ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40705c62bc0bff83d4a2c5f2b307ac5e4655def | from collections import OrderedDict
expected = [
OrderedDict(
[
(
"content",
[
OrderedDict(
[
("type", "box"),
("doi", u"10.7554/eLife.00013.009"),
("id", u"box1"),
("label", u"Box 1"),
("title", u"Box title"),
(
"content",
[
OrderedDict(
[("type", "paragraph"), ("text", u"content 1")]
),
OrderedDict(
[("type", "paragraph"), ("text", u"content 2")]
),
],
),
]
)
],
)
]
)
]
|
py | b407061a688139723971a74ccfb61f97847bb7d8 | """
Weighted mini-bucket elimination for graphical models
Computes upper or lower bounds on the partition function or MAP/MPE configurations, depending on weights
Supports incremental construction
Supports TRW-based importance sampling
class WMB:
# attributes:
# elimOrder[i]
# priority[Xj] = i if elimOrder[i]=Xj
# bucket[i] = [nodei1 nodei2 ... ]
# matchlist[i] = [matchi1 matchi2 ...]
#
# class Node:
# clique = VarSet
# theta = factor (or list of factors?)
# weight = float
# parent = ref, children = [refs...] or index?
# msgFwd, msgBwd = factor
"""
from .factor import *
from .graphmodel import *
from builtins import range
try:
from itertools import izip
except:
izip = zip
reverse_enumerate = lambda l: izip(range(len(l)-1, -1, -1), reversed(l))
class WMB(object):
'''Class implementing weighted mini-bucket elimination inference'''
# Internal object / structure for representing a mini-bucket
class Node:
"""Internal container object for mini-bucket nodes"""
def __init__(self):
self.clique = VarSet()
self.theta = Factor().log()
self.weight = 1.0
self.parent = None
self.children = []
self.msgFwd = Factor().log()
self.msgBwd = Factor().log()
self.originals = []
def __repr__(self):
return "{}^{}".format(self.clique,self.weight)
def __str__(self):
return "{}".format(self.clique)
def __lt__(self, other):
return False # don't care about ordering nodes
class ConstantList:
def __init__(self, val):
self.val = val
def __getitem__(self, loc):
return self.val
def __init__(self, model, elimOrder=None, iBound=0, sBound=0, weights=1.0, attach=True, **kwargs):
# TODO: check if model isLog() true
# save a reference to our model
self.model = model
self.X = model.X
self.logValue = model.logValue
# create & process elimination ordering of the model:
if elimOrder is None: elimOrder = 'wtminfill'
if type(elimOrder) is str: # auto elim order: check that weights is string or float
if not type(weights) in {float, str}:
raise ValueError("Must specify elimination order or use all-equal weights (float or string)");
elimOrder = eliminationOrder(self.model, orderMethod=elimOrder)[0];
self.elimOrder = elimOrder
self.priority = [-1 for i in range(model.nvar)] # build priority of each var
for i,x in enumerate(elimOrder): self.priority[x] = i
# now build the mini-bucket data structure
self.buckets = [ [] for x in range(model.nvar) ] # bucket for each var: list of minibuckets
self.matches = [ [] for x in range(model.nvar) ] # matching sets for each bucket
self.setWeights(weights) # TODO: duplicate to initialize (!)
for f in model.factors:
if len(f.vars)==0: continue; #TODO: should add anyway (somewhere)
n = self.addClique(f.vars)
if attach: n.theta += f.log() # include log f(x) in node's log-factor
n.originals.append(f) # append (pointer to) original f for later reference
# and set the weights of the buckets:
self.setWeights(weights)
def setWeights(self,weights):
"""Set the weights of the inference problem.
weights = 'max+' or 0.0 => upper bound the MAP configuration
'sum+' or 1.0 => upper bound the partition function
'sum-' or -1.0 => lower bound the partition function
For more general bounds, weights = list of floats (one per variable)
"""
if type(weights) is str:
if weights == 'sum+': weights = 1.0;
elif weights=='sum-': weights = -1.0;
elif weights=='max+': weights = 1e-8;
else: raise ValueError("Unknown weight / task type; must be max+, sum+, sum-, or float / float list")
if type(weights) is float: weights = WMB.ConstantList(weights)
self.weights = weights
for i,xi in enumerate(self.elimOrder): # (TODO?) set mini-bucket weights uniformly
ni = len(self.buckets[i])
for j in range(ni): # set uniformly
self.buckets[i][j].weight = self.weights[xi]/ni
if self.weights[xi] < 0 and ni > 0: # uniform for lower bound:
self.buckets[i][0].weight = 1.0 - self.weights[xi]*(ni-1)/ni
def __nodeID(self,node):
"""Helper function: get identifier (bucket & location) of a given node"""
if not isinstance(node, WMB.Node): return None,None
i = min([self.priority[x] for x in node.clique]) # get bucket
j = self.buckets[i].index(node)
return i,j
def __repr__(self):
to_return = ""
for i,b in enumerate(self.buckets):
to_return += "{:03d}: ".format(int(self.elimOrder[i]))
for j,mb in enumerate(b):
to_return += "{!s}^{:.2f} => {}; ".format(mb,mb.weight, self.__nodeID(mb.parent))
to_return += "\n"
return to_return
# def draw(self):
# import pygraphviz
# G = pygraphviz.AGraph()
# for i,b in enumerate(self.buckets):
# for j,mb in enumerate(b):
# G.add_node(self.__nodeID(mb))
# for i,b in enumerate(self.buckets):
# for j,mb in enumerate(b):
# G.add_edge(self.__nodeID(mb),self.__nodeID(mb.parent))
# G.layout() # layout with default (neato)
# G.draw('wmb.png') # draw png
def draw(self):
import networkx as nx
pos,labels = {},{}
G = nx.DiGraph()
for i,b in enumerate(self.buckets):
for j,mb in enumerate(b):
G.add_node(str(mb))
pos[str(mb)] = (j,-i)
labels[str(mb)] = str(mb)
for i,b in enumerate(self.buckets):
for j,mb in enumerate(b):
if mb.parent is not None: G.add_edge(str(mb),str(mb.parent))
nx.draw(G, pos=pos, labels=labels)
return G
def addClique(self,vars):
"""Add a clique with scope "vars", fixing up structure to be a valid MB tree"""
vs = VarSet(vars)
corder = np.argsort( [self.priority[x] for x in vars] ) # get order in which eliminated
corder = [vars[c] for c in corder]
added = []
found = False
for x in corder:
if found: break
#print "bucket ",x
b = self.buckets[self.priority[x]]
to_remove = []
for mb in b:
#print " check ",mb
if mb.clique < vs:
to_remove.append(mb)
if mb.clique >= vs: # if we found a minibucket we can just join, do:
if len(added) > 0: # if we've added nodes, connect them as descendants
mb.children.append( added[-1] ) # of the found node, and found node as parent of last
added[-1].parent = mb
found = True # now, we don't need to keep generating parents
#print " Found!"
added.append(mb) # not really added, but the end of the added chain
break
# if we didn't find any mini-buckets we can join, we need to add one:
if not found: #
n = WMB.Node()
n.clique = VarSet(vs)
n.weight = -1e-3 if self.weights[x] < 0 else 1e-3; # TODO: small non-zero weights
#print "adding ",n," to ",self.priority[x]
b.append(n)
if len(added) > 0: # then, last added node is the child of this one
n.children.append(added[-1])
added[-1].parent = n
added.append(n) # put in added list
vs -= [x] # next bucket is what's left after x is eliminated
for mb in to_remove:
for c in mb.children: c.parent = n # change children to point to new node, and
n.children.extend(mb.children) # merge with current child list
n.weight += mb.weight # join weights into new node
if mb.parent is not None: # if mb has a parent, shift factors around to preserve bound
mb.theta -= mb.msgFwd
mb.parent.theta += mb.msgFwd
mb.parent.children.remove(mb)
n.theta += mb.theta # join log-factors into new node
n.originals.extend(mb.originals) # move original factor pointers to new node
b.remove(mb)
#n.theta += Factor(n.clique,0.0); # need to do this at some point to ensure correct elim
# TODO: fix up match structure?
# done adding all required cliques; return 1st
return added[0]
def detachFactors(self):
"""Remove factor tables from their associated cliques; speeds up scope-based merging"""
for b in self.buckets:
for mb in b:
mb.theta = Factor([],0.)
def attachFactors(self):
"""Re-attach factor tables to their associated cliques for evaluation"""
for b in self.buckets:
for mb in b:
mb.theta = Factor([],0.)
for f in mb.originals: mb.theta += f.log()
# TODO: check if already in log form???
def memory(self, bucket=None, use_backward=True):
"""Compute the total memory (in MB) required for this mini-bucket approximation"""
mem = 0.
use_buckets = self.buckets if bucket is None else [self.buckets[bucket]]
for b in use_buckets:
for mb in b:
mem += mb.clique.nrStatesDouble() * mb.theta.table.itemsize
# TODO: add forward & backward message costs here also
return mem / 1024. / 1024.
# TODO: convert to external function? pass variable in; check if refinement of another?
# Is score correct, or inverted? check
def scoreByScope(self, ibound=None, sbound=None):
"""Returns a scope-based scoring function for use in merge()"""
def score(m1,m2):
jt = m1.clique | m2.clique
if ibound is not None and len(jt) > ibound: return -1
if sbound is not None and jt.nrStates() > sbound: return -1
# TODO: also disallow if not consistent with some specified scope sets?
mx,mn = max([len(m1.clique),len(m2.clique)]), min([len(m1.clique),len(m2.clique)])
return 1.0/(float(mx)+float(mn)/mx)
# return the scoring function
return score
# score = len(max)+len(min)/len(max) if union < iBound else -1 for scope
def merge(self, score):
from heapq import heappush,heappop
try:
from itertools import count
tiebreak = count().__next__ # need tiebreaker value for priority queue (?)
except:
tiebreak = lambda: 0
for b in self.buckets:
priority = []
lookup = {}
# see not-too-efficient-looking remove_task https://docs.python.org/2/library/heapq.html
# build scores:
for i,m1 in enumerate(b):
for m2 in b[i+1:]:
s = score(m1,m2)
if s >= 0.0:
entry = [-s,tiebreak(),m1,m2]
lookup[(m1,m2)] = entry
heappush(priority, entry)
while len(priority):
entry = heappop(priority)
s,_,m1,m2 = entry[0],entry[1],entry[2],entry[3]
#s,m1,m2 = priority.pop()
if m1 is None or m2 is None: continue
if m1 not in b or m2 not in b: continue ## check for removed minibuckets?
#print b
#print "Merging ",m1,"+",m2
for m in b:
for ma,mb in [(m1,m),(m,m1),(m2,m),(m,m2)]:
s = -lookup.get( (ma,mb), [1,None,None] )[0]
if s >= 0.0:
entry = lookup.pop( (ma,mb) )
entry[2],entry[3] = None,None # mark as "removed"
#priority.remove( [s,ma,mb] )
m12 = self.addClique( m1.clique | m2.clique )
# what if others removed? (bad check above?)
#print b
for m in b:
if m is m12: continue
s = score(m12,m)
if s >= 0.0:
entry = [-s,tiebreak(),m12,m]
lookup[ (m12,m) ] = entry
heappush(priority,entry)
return None
def mergeScope(iBound=0, sBound=0):
for Bi in self.buckets:
# TODO: sort bucket by size (or store sorted)
for mb in Bi:
# TODO: merge into largest clique that can fit
pass
#@profile
def msgForward(self, stepTheta=0.5, stepWeights=0.1):
"""Compute a forward pass through all nodes and return the resulting bound"""
bound = 0.0
for i,b in enumerate(self.buckets):
X = self.model.vars[ self.elimOrder[i] ]
nNodes = len(b)
beliefs = [ None for mb in b ]
if nNodes > 1: # if more than one mini-bucket partition, pre-compute beliefs:
if stepTheta or stepWeights:
for j,mb in enumerate(b):
beliefs[j] = mb.theta + mb.msgBwd
for c in mb.children: beliefs[j] += c.msgFwd
if beliefs[j].nvar < len(mb.clique): beliefs[j] += Factor(mb.clique - beliefs[j].vars,0.0);
beliefs[j] *= 1.0/mb.weight
beliefs[j] -= beliefs[j].lse()
# Then, update theta (parameterization) on each set in "matches"
if stepTheta: # TODO: fix; delta / belief indexing issue
#for match in self.matches[i]: # TODO: this is a bit dangerous if not kept up to date!
if True: # TODO: simple version: just match all minibuckets
match = b
wTotal = sum([mb.weight for mb in match])
vAll = VarSet(match[0].clique)
for mb in match[1:]: vAll &= mb.clique
delta = [None for mb in match]
avg_belief = Factor().log()
#print "Bucket",b
#print match
#print vAll
for j,mb in enumerate(match):
delta[j] = beliefs[j].lse( mb.clique - vAll ) # TODO: belief[j] incorrect if match != b
#print mb.theta.table #delta[j].table
avg_belief += delta[j] * mb.weight / wTotal
#print avg_belief.table
#print "==="
for j,mb in enumerate(match):
delta[j] = avg_belief - delta[j]
beliefs[j] += delta[j] * stepTheta
beliefs[j] -= beliefs[j].lse()
mb.theta += delta[j] * mb.weight * stepTheta
#print mb.theta.table
# Last, update weights if desired:
if stepWeights:
isLower=(self.weights[i] == -1) # TODO: a bit difficult; needs to know weight constraints (+1/0/-1, etc.)
H = [0.0 for mb in b]
Havg = 0.0
totalWeight = 0.0
positive_node = None
for j,mb in enumerate(b):
H[j] = - (beliefs[j].exp() * (beliefs[j] - beliefs[j].lse([X]))).sum()
if not isLower:
Havg += mb.weight * H[j]
elif mb.weight > 0:
Havg = H[j]
positive_node = mb
for j,mb in enumerate(b):
if not isLower:
mb.weight *= np.exp( -stepWeights * mb.weight * (H[j]-Havg) )
totalWeight += mb.weight
elif mb.weight < 0:
mb.weight *= np.exp( stepWeights * mb.weight * (H[j]-Havg) )
totalWeight += mb.weight
if not isLower:
for j,mb in enumerate(b): mb.weight /= totalWeight
else:
positive_node.weight = 1.0 - totalWeight
# now, compute the forward messages:
for j,mb in enumerate(b):
beliefs[j] = mb.theta.copy() # Alternative? do by re-subtracting msgBwd?
for c in mb.children: beliefs[j] += c.msgFwd
if beliefs[j].nvar < len(mb.clique): beliefs[j] += Factor(mb.clique - beliefs[j].vars,0.0);
mb.msgFwd = beliefs[j].lsePower([X], 1.0/mb.weight)
beliefs[j] = Factor().log() # clear reference & memory?
if mb.parent is None: # add roots to overall bound
bound += mb.msgFwd
return float(bound)
#@profile
def msgBackward(self, stepTheta=0.0, stepWeights=0.0, beliefs=None):
"""Compute a backward pass through all nodes
If beliefs is a list of cliques, returns the estimated beliefs on those cliques
"""
to_save = [[] for i in range(len(self.buckets))]
if beliefs is None:
return_beliefs = {}
else:
return_beliefs = { clique: None for clique in beliefs }
# map cliques to buckets for checking
for clique in beliefs:
to_save[ min([self.priority[x] for x in clique]) ].append(VarSet(clique))
for i,b in reverse_enumerate(self.buckets): #reversed(list(enumerate(self.buckets))):
X = self.model.vars[ self.elimOrder[i] ]
nNodes = len(b)
beliefs_b = [ None for mb in b ]
if nNodes > 1: # if more than one mini-bucket partition, pre-compute beliefs:
if stepTheta or stepWeights:
for j,mb in enumerate(b):
beliefs_b[j] = mb.theta + mb.msgBwd
for c in mb.children: beliefs_b[j] += c.msgFwd
beliefs_b[j] *= 1.0/mb.weight
beliefs_b[j] -= bel[j].lse()
# Then, update theta (parameterization) on each set in "matches"
if stepTheta:
pass
if stepWeights:
pass
# now, compute the backward messages:
for j,mb in enumerate(b):
beliefs_b[j] = mb.theta + mb.msgBwd
for c in mb.children: beliefs_b[j] += c.msgFwd
#beliefs_b[j] -= beliefs_b[j].lse()
#if mb.weight > 0:
beliefs_b[j] -= beliefs_b[j].max()
#else:
# beliefs_b[j] -= beliefs_b[j].min() # invert if negative? TODO?
beliefs_b[j] *= 1.0 / mb.weight
for c in mb.children:
c.msgBwd = beliefs_b[j].lse( mb.clique - c.clique )*c.weight - c.msgFwd
#c.msgBwd -= c.msgBwd.max() # TODO normalize for convenience?
#c.msgBwd = (beliefs_b[j]*(1.0/mb.weight)).lse( mb.clique - c.clique )*c.weight - c.msgFwd
# TODO: compute marginal of any to_save[i] cliques that fit & not done
for c in to_save[i]:
if c <= mb.clique and return_beliefs[c] is None: return_beliefs[c] = beliefs_b[j].lse( mb.clique - c )
beliefs_b[j] = Factor().log() # clear out belief
for c,f in return_beliefs.items():
f -= f.lse()
f.expIP() # exponentiate and normalize beliefs before returning
#f /= f.sum()
return return_beliefs
def reparameterize(self):
for i,b in enumerate(self.buckets):
for j,mb in enumerate(b):
if mb.parent is not None:
mb.theta -= mb.msgFwd
mb.parent.theta += mb.msgFwd
mb.msgFwd *= 0.0
def gdd_update(self,maxstep=1.0,threshold=0.01):
def wt_elim(f,w,pri):
elim_ord = np.argsort( [pri[x] for x in f.vars] )
tmp = f.copy();
for i in elim_ord:
tmp = tmp.lsePower([f.v[i]],1.0/w[i])
return tmp
def calc_bound( thetas, weights, pri):
return sum([wt_elim(th,wt,pri) for th,wt in zip(thetas,weights)])
def mu(th,w,pri):
elim_ord = np.argsort( [pri[x] for x in th.vars] )
lnZ0 = th
lnmu = 0.0
for i in elim_ord:
lnZ1 = lnZ0.lsePower([th.v[i]],1.0/w[i])
lnmu = lnmu + (lnZ0 - lnZ1)*(1.0/w[i])
lnZ0 = lnZ1
return lnmu.expIP()
def armijo(thetas,weights,pri,steps,threshold=1e-4,direction=+1):
f1 = calc_bound(thetas,weights,pri)
match = reduce(lambda a,b: a&b, [th.vars for th in thetas], thetas[0].vars)
for s in range(steps):
mus = [mu(th,wt,pri).marginal(match) for th,wt in zip(thetas,weights)]
dL = [mus[0]-mus[i] for i in range(len(mus))]
dL[0] = -sum(dL)
gradmag = sum([ df.abs().sum() for df in dL ])
gradmax = max([ df.abs().max() for df in dL ])
gradnorm = sum([ (df**2.0).sum() for df in dL ])
if gradmax < 1e-8: return # "optTol" : gradient small => local optimum (use max(abs(g))?)
stepsize = min(1.0, 1.0/gradmag) if s==0 else min(1.0, direction*(f0-f1)/gradmag)
stepsize = stepsize if stepsize > 0 else 1.0
f0 = f1; # update "old" objective value
for j in range(10):
newthetas = [th+(direction*stepsize*df) for th,df in zip(thetas,dL)] # redo; modify df directly
f1 = calc_bound(newthetas,weights,pri)
#print " ",f0," => ",f1, " (",f0-f1,' ~ ',stepsize*threshold*gradnorm,")"
if (f0 - f1)*direction > stepsize*threshold*gradnorm:
for th,nth in zip(thetas,newthetas): th.t[:]=nth.t # rewrite tables
break;
else:
stepsize *= 0.5
if stepsize*gradmax < 1e-8: return # < progTol => no progress possible
#def armijo(thetas,weights,pri,maxstep,threshold,direction):
# f0 = calc_bound(thetas,weights,pri)
# #print [th for th in thetas], f0
# match = reduce(lambda a,b: a&b, [th.vars for th in thetas], thetas[0].vars)
# mus = [mu(th,wt,pri).marginal(match) for th,wt in zip(thetas,weights)]
# dL = [mus[0]-mus[i] for i in range(len(mus))]
# dL[0] = -sum(dL)
# gradnorm = sum([ (df**2.0).sum() for df in dL ])
# for j in range(10):
# newthetas = [th+(direction*maxstep*df) for th,df in zip(thetas,dL)] # redo; modify df directly
# f1 = calc_bound(newthetas,weights,pri)
# #print " ",f0," => ",f1, " (",f0-f1,' ~ ',maxstep*threshold*gradnorm,")"
# if (f0 - f1)*direction > maxstep*threshold*gradnorm:
# for th,nth in zip(thetas,newthetas): th.t[:]=nth.t # rewrite tables
# return
# else:
# maxstep *= 0.5
# return # give up?
######
bound = 0.0
for i,b in enumerate(self.buckets):
for j,mb in enumerate(b): # make sure has the correct scope (TODO: required?)
if mb.theta.nvar < len(mb.clique): mb.theta += Factor(mb.clique - mb.theta.vars,0.0)
for i,b in enumerate(self.buckets):
X = self.model.vars[ self.elimOrder[i] ]
nNodes = len(b)
thetas = [mb.theta for mb in b]
eps = 1e-3 * self.weights[i] # TODO: doesn't work with mixed weight signs
weights = [ [eps for x in mb.theta.vars] for mb in b ]
for j,mb in enumerate(b): weights[j][mb.theta.vars.index(X)] = mb.weight
armijo(thetas,weights,self.priority,5,threshold,np.sign(eps))
for j,mb in enumerate(b):
if mb.parent is not None:
thetas2 = [mb.theta, mb.parent.theta]
pi,pj = self.__nodeID(mb.parent)
weights2 = [ weights[j], [1e-3*self.weights[pi] for x in mb.parent.theta.vars] ]
weights2[1][mb.parent.theta.vars.index(self.model.vars[self.elimOrder[pi]])] = mb.parent.weight
armijo(thetas2,weights2,self.priority,5,threshold,np.sign(eps)) # TODO: mixed?
bound += calc_bound([mb.theta],[weights[j]],self.priority)
return float(bound)
# TODO: rename greedy-assign? Add optional partial config?
def assignBackward(self):
"""Perform a backward pass through all nodes, assigning the most likely value"""
# TODO check & test ; check for zero weights? (don't care?)
x = {}
for i,b in reverse_enumerate(self.buckets): #reversed(list(enumerate(self.buckets))):
X = self.model.vars[ self.elimOrder[i] ]
bel = Factor([X],0.0)
for j,mb in enumerate(b):
bel += mb.theta.condition(x)
for c in mb.children: bel += c.msgFwd.condition(x)
x[X] = bel.argmax()[0]
return x
def initHeuristic(self):
"""TODO: make this function unnecessary; make work for and/or pseudotree (currently or only)"""
self.atElim = [ [] for b in self.buckets ]
for i,b in enumerate(self.buckets):
for j,mb in enumerate(b):
if mb.parent is not None:
pi,pj = self.__nodeID(mb.parent)
for ii in range(i+1,pi+1): self.atElim[ii].append(mb);
def heuristic(self,X,config):
"""Evaluate the bound given partial assignment 'config' (including variable X and all later)"""
return sum([mb.msgFwd.valueMap(config) for mb in self.atElim[X]])
#raise NotImplementedError # TODO: fix
# need desired pseudo-tree & track messages passing between earlier & later buckets
def resolved(self,X,config):
"""Evaluate the resolved value of a partial assignment 'config' (including variable X and all later)"""
return sum([mb.theta.valueMap(config) for b in self.buckets[self.priority[X]:] for mb in b])
def newly_resolved(self,X,config):
"""Evaluate the change in resolved value of a partial assignment 'config' after clamping X"""
return sum([mb.theta.valueMap(config) for mb in self.buckets[self.priority[X]]])
def sample(self):
"""Draw a sample from the WMB-IS mixture proposal (assumes sum+ task)"""
# TODO: add argument "x" for partial conditioning? (return of configuration? or return tuple?)
# TODO check for positive, unit sum weights? (don't care?)
x = {}
logQx = 0.0
for i,b in reverse_enumerate(self.buckets): #reversed(list(enumerate(self.buckets))):
X = self.model.vars[ self.elimOrder[i] ]
qi = Factor([X],0.0)
for j,mb in enumerate(b):
qij = mb.theta.condition(x)
qij -= mb.msgFwd if mb.parent is None else mb.msgFwd.condition(x)
for c in mb.children: qij += c.msgFwd.condition(x)
qij -= qij.max()
qij *= 1.0 / mb.weight
qij.expIP()
qij *= mb.weight
qi += qij
qi /= qi.sum() # normalize (should be already)
xval = qi.sample(Z=1.0)[0]
x[X] = xval
logQx += np.log( qi[xval] )
return logQx,x
# functions:
# addClique(vs) : add clique etc and [fix up structure beneath]
# => build = add each clique in turn => valid structure?
#
# (0) basic merge op: join cliques & theta; fix parent; fix merges; fix msgFwd
# (1) "" + fix forward also
# (1) merge by score (score f'n: upper bd, scope, etc.)
#
# msgFwd(...)
#
# msgBwd(...)
#
# reparameterize()? -> write into model?
#
# heuristic?
#
# bound / value
#
# __str__ : wmbPrint
#
# sample() : draw from mixture proposal (if positive weights?)
#
#
class JTree(WMB):
"""Junction tree object for exact inference"""
def __init__(self, model, elimOrder=None, weights=1.0):
super(JTree,self).__init__(model,elimOrder=elimOrder,weights=weights)
self.merge(lambda a,b: 1.0) # merge until exact
self.forwardDone = False
self.setWeights(weights)
def msgForward(self):
return_value = super(JTree,self).msgForward()
self.forwardDone = True
return return_value
def beliefs(self, beliefs=None):
if beliefs is None: return {}
if not self.forwardDone: self.msgForward() # or raise valueerror?
return super(JTree,self).msgBackward(beliefs=beliefs)
def argmax(self):
if not self.forwardDone: self.msgForward() # or raise valueerror?
return super(JTree,self).assignBackward()
def sample(self):
if not self.forwardDone: self.msgForward() # or raise valueerror?
return super(JTree,self).sample()
# Smartly accommodate changes to the model?
|
py | b407072ad1c40db98e03548b5764d635b6d6b555 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LossScaleOptimizer."""
import tensorflow.compat.v2 as tf
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import test_util
from keras import combinations
from keras import optimizers
from keras.mixed_precision import loss_scale_optimizer
from keras.mixed_precision import test_util as mp_test_util
from keras.optimizer_v2 import adam
from keras.optimizer_v2 import gradient_descent
from keras.optimizer_v2 import optimizer_v2
# Disable not-callable lint error, as the linter is unable to detect that
# LossScale instances are callable.
# pylint: disable=not-callable
# If called outside any strategy.scope() calls, this will return the default
# strategy.
default_strategy_fn = tf.distribute.get_strategy
def create_mirrored_strategy():
if tf.config.list_logical_devices('GPU'):
return tf.distribute.MirroredStrategy(['cpu:0', 'gpu:0'])
else:
return tf.distribute.MirroredStrategy(['cpu:0'])
TESTCASES = ({
'testcase_name': 'Base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'Distribute',
'strategy_fn': create_mirrored_strategy
})
@test_util.with_control_flow_v2
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LossScaleOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def _run_if_in_graph_mode(self, val):
# Running only in graph mode is useful, because optimizers sometimes return
# a value that, in Graph mode, is runnable with self.evaluate. But in Eager
# mode, the optimizer already does the computations and the return value
# cannot be run.
if not tf.executing_eagerly():
self.evaluate(val)
def _run_fn_with_grad_check(self, strategy, var, opt, expected_grad):
grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(
expected_grad)
loss = lambda: grad_check_fn(var) / strategy.num_replicas_in_sync
return lambda: opt.minimize(loss, var_list=[var])
@parameterized.named_parameters(*TESTCASES)
def testFixedLossScaleAppliedToLossWithMinimize(self, strategy_fn):
with strategy_fn().scope() as strategy:
var = tf.Variable([5.0])
opt = gradient_descent.SGD(2.0)
loss_scale = 10.
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=loss_scale)
self.assertEqual(self.evaluate(opt.loss_scale), loss_scale)
self.assertIsInstance(opt.loss_scale, tf.Tensor)
# We need num_replicas_in_sync to divide loss_scale, otherwise loss_scale
# / strategy.num_replicas_in_sync will not be exact, which could lead to
# assertion failures due to rounding issues.
self.assertEqual(loss_scale % strategy.num_replicas_in_sync, 0)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, loss_scale / strategy.num_replicas_in_sync)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
def testFixedLossScaleAppliedToLossWithGetGradients(self):
with tf.Graph().as_default():
var = tf.Variable([2.0])
opt = gradient_descent.SGD(1.0)
loss_scale = 10.
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=loss_scale)
grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(
loss_scale)
loss = grad_check_fn(var)
run_op = opt.get_gradients(loss, [var])
self.evaluate(tf.compat.v1.global_variables_initializer())
# This will cause an assertion to run, as
# mp_test_util.create_identity_with_grad_check_fn added an assertion op.
self.evaluate(run_op)
def testDynamicAttrsWithFixedLossScale(self):
opt = gradient_descent.SGD()
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=2.)
self.assertFalse(opt.dynamic)
self.assertIsNone(opt.dynamic_counter)
self.assertIsNone(opt.dynamic_growth_steps)
def testGetScaledLoss(self):
opt = gradient_descent.SGD(2.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=2.)
loss = tf.convert_to_tensor(5.)
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(loss)))
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(lambda: loss)()))
loss = tf.convert_to_tensor(5., dtype='float16')
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(loss)))
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(lambda: loss)()))
def testGetUnscaledGradients(self):
opt = gradient_descent.SGD(2.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=2)
scaled_grads = [
tf.convert_to_tensor(3.), None,
tf.convert_to_tensor(-4., dtype='float16')
]
grads = opt.get_unscaled_gradients(scaled_grads)
grads = [self.evaluate(g) if g is not None else g for g in grads]
self.assertEqual([1.5, None, -2.], grads)
def testGetUnscaledSparseGradients(self):
opt = gradient_descent.SGD(2.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=2)
sparse_scaled_grad = tf.IndexedSlices(
tf.convert_to_tensor([[4., 2.], [8., 5.]]),
tf.convert_to_tensor([1, 3], dtype='int32'),
dense_shape=tf.convert_to_tensor([5, 2],
dtype='int32'))
sparse_grad = opt.get_unscaled_gradients([sparse_scaled_grad])[0]
self.assertIsInstance(sparse_grad, tf.IndexedSlices)
self.assertAllEqual([[2., 1.], [4., 2.5]],
self.evaluate(sparse_grad.values))
@parameterized.named_parameters(*TESTCASES)
def testDynamicLossScale(self, strategy_fn):
strategy = strategy_fn()
learning_rate = 2.
expected_gradient = tf.Variable(learning_rate /
strategy.num_replicas_in_sync)
with strategy.scope():
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=1)
self.assertEqual(opt.initial_scale, 2.)
self.assertIsInstance(opt.initial_scale, float)
self.assertEqual(opt.dynamic_growth_steps, 1)
self.assertIsInstance(opt.dynamic_growth_steps, int)
self.assertEqual(opt.initial_scale % strategy.num_replicas_in_sync, 0)
run_fn = self._run_fn_with_grad_check(strategy, var, opt,
expected_gradient)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
# Loss scale will be double, so the expected gradient is also doubled.
self.evaluate(expected_gradient.assign(
2 * learning_rate / strategy.num_replicas_in_sync))
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# As before, the 2 is subtracted from the variable, making it's new value
# 1.
self.assertAllClose([1.], self.evaluate(var))
def testDynamicLossScaleDefaultValues(self):
opt = gradient_descent.SGD()
opt = loss_scale_optimizer.LossScaleOptimizer(opt)
self.assertEqual(opt.initial_scale, 2 ** 15)
self.assertEqual(opt.dynamic_growth_steps, 2000)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.loss_scale), 2 ** 15)
# pylint: disable=cell-var-from-loop
@parameterized.named_parameters(*TESTCASES)
def testClipping(self, strategy_fn):
strategy = strategy_fn()
learning_rate = 2.
for clip_type in ('clipnorm', 'global_clipnorm', 'clipvalue'):
with strategy.scope(), self.subTest(clip_type=clip_type):
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate, **{clip_type: 2.0})
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=1)
self.assertEqual(getattr(opt, clip_type), 2.0)
self.assertEqual(opt.initial_scale % strategy.num_replicas_in_sync, 0)
loss = lambda: var * 4 / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
# Test running with clipped gradients
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The gradient is 4 but is clipped to 2, so the variable will be
# init_val - clipped_grad * lr == 5 - 2 * 2 == 1
self.assertAllClose([1.], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), 4)
# Test changing the clip amount and running again
setattr(opt, clip_type, 3.0)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# The gradient is 4 but is clipped to 3, so the variable will be
# prev_var - clipped_grad * lr == 1 - 3 * 2 == -5
self.assertAllClose([-5.], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), 8)
# Test Inf gradients are still skipped instead of being clipped
loss = lambda: var * float('Inf')
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
self.assertAllClose([-5.], self.evaluate(var)) # Var does not change
self.assertEqual(self.evaluate(opt.loss_scale), 4)
# pylint: enable=cell-var-from-loop
@parameterized.named_parameters(*TESTCASES)
def testDynamicUpdate(self, strategy_fn):
with strategy_fn().scope() as strategy:
var = tf.Variable([1.0, 2.0])
opt = gradient_descent.SGD(1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=1)
# Test optimizer with finite gradients
loss = lambda: var * 2.0 / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# Gradient is 2, so variable will have 2 subtracted from it
self.assertAllClose([-1.0, 0.0], self.evaluate(var))
# Loss scale has doubled from 2 to 4
self.assertEqual(4., self.evaluate(opt.loss_scale))
# Test optimizer with NaN gradients
loss = lambda: var * float('NaN')
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.
self.assertAllClose(self.evaluate(var), [-1.0, 0.0])
# Loss scale should half due to NaN gradients.
self.assertEqual(2., self.evaluate(opt.loss_scale))
@parameterized.named_parameters(*TESTCASES)
def testDynamicLossScaleWithFloat16Loss(self, strategy_fn):
strategy = strategy_fn()
learning_rate = 2.
with strategy.scope():
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=1)
def loss():
return tf.cast(var / strategy.num_replicas_in_sync, 'float16')
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
def testNanOnOneReplicaOnly(self):
if not tf.test.is_gpu_available():
self.skipTest('Test requires GPU')
if (not tf.executing_eagerly() and
not tf.compat.v1.control_flow_v2_enabled()):
self.skipTest('b/181283011: GradientTape does not work properly with '
'V1 control flow, and opt.minimize uses GradientTape')
with create_mirrored_strategy().scope() as strategy:
var = tf.Variable([1.0, 2.0])
opt = gradient_descent.SGD(1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=2)
def loss():
rep_id = (tf.distribute.get_replica_context()
.replica_id_in_sync_group)
# The last element of last replica's gradient is NaN.
return tf.compat.v1.cond(
tf.constant(rep_id == 0), lambda: var * 2.,
lambda: var * tf.constant([1., float('NaN')]))
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.
self.assertAllClose(self.evaluate(var), [1.0, 2.0])
# Loss scale should half due to NaN gradients.
self.assertEqual(1., self.evaluate(opt.loss_scale))
def testCustomAggregater(self):
def gradient_aggregator(grads_and_vars):
# Simulate an all-reduce where a replica has a NaN gradient by setting
# the last gradient to NaN
grads_and_vars = list(grads_and_vars)
last_grad, last_var = grads_and_vars[-1]
grads_and_vars[-1] = (last_grad * float('NaN'), last_var)
return grads_and_vars
var = tf.Variable([1.0, 2.0])
opt = gradient_descent.SGD(1.0, gradient_aggregator=gradient_aggregator)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=2)
loss = lambda: var * 2
run_op = opt.minimize(loss, var_list=[var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.
self.assertAllClose(self.evaluate(var), [1.0, 2.0])
# Loss scale should half due to NaN gradients.
self.assertEqual(1., self.evaluate(opt.loss_scale))
@parameterized.named_parameters(*TESTCASES)
def testDynamicLossScaleWithSlots(self, strategy_fn):
strategy_obj = strategy_fn()
if (isinstance(strategy_obj, tf.distribute.MirroredStrategy) and
tf.compat.v1.control_flow_v2_enabled() and
not tf.executing_eagerly()):
self.skipTest('b/138667997')
with strategy_obj.scope() as strategy:
var = tf.Variable([1.0, 2.0])
# An SGD optimizer with momentum has slot variables.
opt = gradient_descent.SGD(1.0, momentum=1.)
initial_scale = 2.
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, initial_scale=initial_scale, dynamic_growth_steps=1)
loss = lambda: var / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The momentum accumulator starts at 0 and the gradient is 1. The
# accumulator is incremented by the gradient, so it is now 1. Then the
# variable is subtracted by the accumulator, so the variable is subtracted
# by 1.
self.assertAllClose([0.0, 1.0], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), initial_scale * 2)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# The momentum accumulator was 1 before this step and the gradient is 1.
# The accumulator is incremented by the gradient, so it is now 2. Then the
# variable is subtracted by the accumulator, so the variable is subtracted
# by 2.
self.assertAllClose([-2., -1.], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), initial_scale * 4)
self.assertEqual(opt.get_slot_names(), ['momentum'])
def testIterations(self):
opt = gradient_descent.SGD(2.0)
lso = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=10.)
lso.iterations = 7
self.assertEqual(lso.iterations, 7)
self.assertEqual(opt.iterations, 7)
@parameterized.named_parameters(*TESTCASES)
def testIterationsIncremented(self, strategy_fn):
with strategy_fn().scope() as strategy:
# Test iterations is incremented in opt.minimize.
opt = gradient_descent.SGD(1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt)
var = tf.Variable([5.0])
loss = lambda: var * 2.0 / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, [var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), 3.0) # Grad is 2, so var is 5 - 2
self.assertEqual(self.evaluate(opt.iterations), 1)
# Test iterations is incremented in opt.minimize even if gradients aren't
# applied to variables due to NaN gradients.
loss = lambda: var * float('NaN')
run_fn = lambda: opt.minimize(loss, [var])
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), 3.0)
self.assertEqual(self.evaluate(opt.iterations), 2)
def testWeightMethods(self):
with self.test_session():
var = tf.Variable([1.0])
opt = gradient_descent.SGD(1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2.,
dynamic_growth_steps=1)
run_op = opt.minimize(lambda: var * 2, [var])
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertLen(opt.weights, 1) # The 'iterations' weight
self.assertEqual(self.evaluate(opt.weights[0]), 1)
self.assertEqual(opt.get_weights()[0], 1)
self.assertEqual(self.evaluate(opt.variables()[0]), 1)
opt.set_weights([np.array(2.)])
self.assertEqual(self.evaluate(opt.variables()[0]), 2)
def testHyperParametersExposed(self):
with self.cached_session():
opt = adam.Adam(learning_rate=1.0, beta_1=0.5, beta_2=0.9)
lso = loss_scale_optimizer.LossScaleOptimizer(opt)
# Force hyperparameters to be created
opt.lr # pylint: disable=pointless-statement
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(lso.beta_1), 0.5)
self.assertIsInstance(lso.beta_1, tf.Variable)
self.assertEqual(self.evaluate(lso.lr), 1.0)
self.assertIs(lso.lr, opt.lr)
self.assertIs(lso.lr, lso.learning_rate)
lso.beta_1 = 0.25
self.assertEqual(self.evaluate(lso.beta_1), 0.25)
self.assertEqual(self.evaluate(opt.beta_1), 0.25)
self.assertIs(lso.beta_1, opt.beta_1)
opt.beta_1 = 0.75
self.assertEqual(self.evaluate(lso.beta_1), 0.75)
self.assertEqual(self.evaluate(opt.beta_1), 0.75)
self.assertIs(lso.beta_1, opt.beta_1)
lso.lr = 2.0
self.assertEqual(self.evaluate(lso.lr), 2.0)
self.assertEqual(self.evaluate(lso.learning_rate), 2.0)
self.assertEqual(self.evaluate(opt.lr), 2.0)
self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
self.assertIs(lso.lr, opt.lr)
# Test setting attribute that is both attribute on LossScaleOptimizer and
# hyperparameter on wrapped optimizer.
class MyOpt(gradient_descent.SGD):
def __init__(self):
super().__init__()
self._set_hyper('loss_scale', 123.)
opt = MyOpt()
lso = loss_scale_optimizer.LossScaleOptimizer(opt)
with self.assertRaises(AttributeError):
lso.loss_scale = 2.
def testArbitraryAttributesNotExposed(self):
opt = gradient_descent.SGD()
lso = loss_scale_optimizer.LossScaleOptimizer(opt)
self.assertFalse(opt.nesterov)
with self.assertRaisesRegex(
AttributeError,
"'LossScaleOptimizer' object has no attribute 'nesterov'"):
lso.nesterov # pylint: disable=pointless-statement
lso.nesterov = True
self.assertTrue(lso.nesterov)
self.assertFalse(opt.nesterov)
def testDir(self):
lso = loss_scale_optimizer.LossScaleOptimizer(gradient_descent.SGD())
dir_result = dir(lso)
self.assertIn('learning_rate', dir_result) # Hyperparameter
self.assertIn('lr', dir_result) # Hyperparameter
self.assertIn('minimize', dir_result) # Attribute
self.assertIn('loss_scale', dir_result) # Attribute
self.assertNotIn('nesterov', dir_result) # Attribute on inner optimizer
self.assertIn('nesterov', dir(lso.inner_optimizer))
def testApplyGradientsGetsUnwrappedTensors(self):
# Tests that gradients passed to apply_gradients are not wrapped in a
# DistributionStrategy wrapper, such as PerReplica, but instead are raw
# Tensors. Optimizer subclasses that override apply_gradients() expect raw
# Tensors, even though the base Optimizer can handle PerReplica gradients.
outer_self = self
class MyOptimizer(gradient_descent.SGD):
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
for grad, _ in grads_and_vars:
outer_self.assertIsInstance(grad, tf.Tensor)
return super(MyOptimizer,
self).apply_gradients(grads_and_vars, name,
experimental_aggregate_gradients)
with create_mirrored_strategy().scope() as strategy:
var = tf.Variable([5.0])
opt = MyOptimizer(learning_rate=1.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,
initial_scale=1)
loss = lambda: var * 2.0
run_fn = lambda: opt.minimize(loss, [var])
strategy.experimental_run(run_fn)
@parameterized.named_parameters(*TESTCASES)
def testV1Optimizer(self, strategy_fn):
strategy = strategy_fn()
learning_rate = 2.
with strategy.scope():
# Test FixedLossScale
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale=2)
self.assertIsInstance(opt.loss_scale, tf.Tensor)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.loss_scale), 2)
self.assertEqual(opt.initial_scale, 2)
self.assertIsNone(opt.dynamic_growth_steps)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, 2 / strategy.num_replicas_in_sync)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
# Test DynamicLossScale
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, 'dynamic')
self.assertEqual(opt.initial_scale, 2 ** 15)
self.assertEqual(opt.dynamic_growth_steps, 2000)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.loss_scale), 2 ** 15)
for s in strategy.experimental_local_results(opt.dynamic_counter):
self.assertEqual(self.evaluate(s), 0)
loss = lambda: var * float('NaN')
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertAllClose([5.], self.evaluate(var))
self.assertEqual(self.evaluate(opt.loss_scale), 2 ** 14)
for s in strategy.experimental_local_results(opt.dynamic_counter):
self.assertEqual(self.evaluate(s), 0)
@parameterized.named_parameters(*TESTCASES)
def testPassingV1LossScale(self, strategy_fn):
strategy = strategy_fn()
learning_rate = 2.
with strategy.scope():
# Test FixedLossScale
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate)
loss_scale = tf.mixed_precision.experimental.FixedLossScale(2.)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)
self.assertIsInstance(opt.loss_scale, tf.Tensor)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.loss_scale), 2)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, 2 / strategy.num_replicas_in_sync)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
# Test DynamicLossScale
var = tf.Variable([5.0])
opt = gradient_descent.SGD(learning_rate)
loss_scale = tf.mixed_precision.experimental.DynamicLossScale(
initial_loss_scale=4, increment_period=1, multiplier=2)
loss_scale._current_loss_scale.assign(2)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)
self.assertEqual(opt.initial_scale, 4)
self.assertEqual(opt.dynamic_growth_steps, 1)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Current loss scale is not copied so loss scale is reinitialized to 4
self.assertEqual(self.evaluate(opt.loss_scale), 4)
for s in strategy.experimental_local_results(opt.dynamic_counter):
self.assertEqual(self.evaluate(s), 0)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, 4 / strategy.num_replicas_in_sync)
run_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertAllClose([3.], self.evaluate(var))
def testPassingV1LossScaleErrors(self):
opt = gradient_descent.SGD()
loss_scale = tf.mixed_precision.experimental.DynamicLossScale(multiplier=4)
with self.assertRaisesRegex(
ValueError, 'When passing a DynamicLossScale to "loss_scale", '
'DynamicLossScale.multiplier must be 2. Got: '
'DynamicLossScale'):
loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)
class MyLossScale(tf.mixed_precision.experimental.LossScale):
def __call__(self):
return 1.
def update(self, grads):
return None, True
def get_config(self):
return {}
with self.assertRaisesRegex(
TypeError, 'Passing a LossScale that is not a FixedLossScale or a '
'DynamicLossScale is no longer supported. Got:'):
loss_scale_optimizer.LossScaleOptimizerV1(opt, MyLossScale())
def testLossScaleDelegationWithWrapper(self):
# Test learning_rate is exposed when LossScaleOptimizer wraps another
# wrapper.
class MyOptimizer(optimizer_v2.OptimizerV2):
def __init__(self):
super().__init__('MyOptimizer')
self.inner_optimizer = adam.Adam(learning_rate=1.0)
@property
def learning_rate(self):
return self.inner_optimizer.learning_rate
@learning_rate.setter
def learning_rate(self, value):
self.inner_optimizer.learning_rate = value
def get_config(self):
return {}
with self.cached_session():
opt = MyOptimizer()
opt = loss_scale_optimizer.LossScaleOptimizer(opt)
# Force hyperparameters to be created
opt.learning_rate # pylint: disable=pointless-statement
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.learning_rate), 1.0)
self.assertEqual(
self.evaluate(opt.inner_optimizer.inner_optimizer.learning_rate), 1.0)
opt.learning_rate = 2.0
self.assertEqual(self.evaluate(opt.learning_rate), 2.0)
self.assertEqual(self.evaluate(
opt.inner_optimizer.inner_optimizer.learning_rate), 2.0)
@parameterized.named_parameters({
'testcase_name': 'SaveAndRestoreBase',
'strategy_fn': default_strategy_fn,
'save_with_ls': True,
'restore_with_ls': True,
}, {
'testcase_name': 'SaveAndRestoreDistribute',
'strategy_fn': create_mirrored_strategy,
'save_with_ls': True,
'restore_with_ls': True,
}, {
'testcase_name': 'SaveBase',
'strategy_fn': default_strategy_fn,
'save_with_ls': True,
'restore_with_ls': False,
}, {
'testcase_name': 'SaveDistribute',
'strategy_fn': create_mirrored_strategy,
'save_with_ls': True,
'restore_with_ls': False,
}, {
'testcase_name': 'RestoreBase',
'strategy_fn': default_strategy_fn,
'save_with_ls': False,
'restore_with_ls': True,
}, {
'testcase_name': 'RestoreDistribute',
'strategy_fn': create_mirrored_strategy,
'save_with_ls': False,
'restore_with_ls': True,
})
def testCheckpoint(self, strategy_fn, save_with_ls, restore_with_ls):
class MySGD(gradient_descent.SGD):
"""A custom optimizer that tracks an extra variable."""
def __init__(self, *args, **kwargs):
super(MySGD, self).__init__(*args, **kwargs)
self.my_var = tf.Variable(0.)
self._track_trackable(self.my_var, 'my_var')
strategy = strategy_fn()
replicas = strategy.num_replicas_in_sync
if (isinstance(strategy, tf.distribute.MirroredStrategy) and
not tf.executing_eagerly()):
# TODO(b/121381184): Enable running the test in this case.
return
with self.test_session(), strategy.scope():
# Build and run a simple model.
var = tf.Variable([2.0])
opt = inner_opt = MySGD(1., momentum=1.)
if save_with_ls:
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=1.,
dynamic_growth_steps=2.)
run_fn = lambda: opt.minimize(lambda: var / replicas + 1., var_list=[var])
opt_op = strategy.experimental_run(run_fn)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(strategy.experimental_local_results(opt_op))
# Assert values.
self.assertEqual(self.evaluate(var), 1.)
if save_with_ls:
self.assertEqual(self.evaluate(opt.loss_scale), 1.)
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
slot_var = opt.get_slot(var, 'momentum')
self.assertEqual(self.evaluate(slot_var).item(), -1)
self.assertEqual(self.evaluate(opt.iterations), 1)
# Set optimizer variable to check arbitrary optimizer attributes can be
# saved/restored
self.evaluate(inner_opt.my_var.assign(1.))
# Save a checkpoint.
checkpoint = tf.train.Checkpoint(optimizer=opt, var=var)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix)
# Create new model
var = tf.Variable([2.0])
opt = inner_opt = MySGD(1., momentum=1.)
if restore_with_ls:
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=1.,
dynamic_growth_steps=2.)
# Restore new model.
checkpoint = tf.train.Checkpoint(optimizer=opt, var=var)
status = checkpoint.restore(save_path)
if save_with_ls:
status.assert_existing_objects_matched()
else:
status.assert_nontrivial_match()
# Assert restored values. We can only assert in eager mode since the
# variables are uninitialized in graph mode
if tf.executing_eagerly():
self.assertEqual(self.evaluate(var), 1.)
if save_with_ls and restore_with_ls:
self.assertEqual(self.evaluate(opt.loss_scale), 1.)
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
elif restore_with_ls:
self.assertEqual(self.evaluate(opt.loss_scale), 1.)
self.assertEqual(self.evaluate(opt.dynamic_counter), 0)
self.assertEqual(self.evaluate(opt.iterations), 1)
# Run the model again.
run_fn = lambda: opt.minimize(lambda: var / replicas + 1., var_list=[var])
opt_op = strategy.experimental_run(run_fn)
# Assert new values.
self.evaluate(tf.compat.v1.global_variables_initializer())
status.run_restore_ops()
self.evaluate(strategy.experimental_local_results(opt_op))
self.assertEqual(self.evaluate(var), -1)
slot_var = opt.get_slot(var, 'momentum')
self.assertEqual(self.evaluate(slot_var).item(), -2)
self.assertEqual(self.evaluate(opt.iterations), 2)
self.assertEqual(self.evaluate(inner_opt.my_var), 1)
# Restore model again to test restoring after slots are created
status = checkpoint.restore(save_path)
if save_with_ls and restore_with_ls:
status.assert_consumed()
elif save_with_ls:
status.assert_existing_objects_matched()
elif restore_with_ls:
status.assert_nontrivial_match()
status.run_restore_ops()
self.assertEqual(self.evaluate(var), 1)
self.assertEqual(self.evaluate(slot_var).item(), -1)
@combinations.generate(combinations.combine(
get_config=['v1', 'v2', 'tf2_3'], from_config=['v1', 'v2']))
def testGetConfigFixed(self, get_config, from_config):
# Get a config from LossScaleOptimizerV1, LossScaleOptimizer, or the
# LossScaleOptimizer from TF 2.3. Then restore the config into a
# LossScaleOptimizerV1 or LossScaleOptimizer
opt = gradient_descent.SGD(2., momentum=0.5)
if get_config == 'v1':
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, 2)
config = opt.get_config()
elif get_config == 'v2':
opt = loss_scale_optimizer.LossScaleOptimizer(
opt, dynamic=False, initial_scale=2)
config = opt.get_config()
else:
self.assertEqual(get_config, 'tf2_3')
config = {
'optimizer': {
'class_name': 'SGD',
'config': {
'learning_rate': 2.0,
'momentum': 0.5,
'decay': 0.0,
'nesterov': False,
'name': 'SGD',
}
},
'loss_scale': {
'class_name': 'FixedLossScale',
'config': {'loss_scale_value': 2.0}
},
}
if from_config == 'v1':
opt = loss_scale_optimizer.LossScaleOptimizerV1.from_config(config)
else:
self.assertEqual(from_config, 'v2')
opt = loss_scale_optimizer.LossScaleOptimizer.from_config(config)
# Force hyperparameters to be created
opt.lr # pylint: disable=pointless-statement
self.evaluate(tf.compat.v1.global_variables_initializer())
# Test attributes on the optimizer
self.assertEqual(self.evaluate(opt.lr), 2.)
self.assertEqual(self.evaluate(opt.inner_optimizer.lr), 2.)
self.assertEqual(self.evaluate(opt.momentum), 0.5)
self.assertEqual(self.evaluate(opt.loss_scale), 2.)
self.assertEqual(opt.initial_scale, 2.)
self.assertIsNone(opt.dynamic_growth_steps)
self.assertIsNone(opt.dynamic_counter)
self.assertFalse(opt.dynamic)
# Ensure the optimizer can be used
var = tf.Variable([5.0])
run_op = self._run_fn_with_grad_check(
tf.distribute.get_strategy(), var, opt, 2)()
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), [3.])
@combinations.generate(combinations.combine(
get_config=['v1', 'v2', 'tf2_3'], from_config=['v1', 'v2']))
def testGetConfigDynamic(self, get_config, from_config):
# Get a config from LossScaleOptimizerV1, LossScaleOptimizer, or the
# LossScaleOptimizer from TF 2.3. Then restore the config into a
# LossScaleOptimizerV1 or LossScaleOptimizer
opt = gradient_descent.SGD(2., momentum=0.5)
if get_config == 'v1':
loss_scale = tf.mixed_precision.experimental.DynamicLossScale(
initial_loss_scale=2, increment_period=3)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)
config = opt.get_config()
elif get_config == 'v2':
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2,
dynamic_growth_steps=3)
config = opt.get_config()
else:
self.assertEqual(get_config, 'tf2_3')
config = {
'optimizer': {
'class_name': 'SGD',
'config': {
'learning_rate': 2.0,
'momentum': 0.5,
'decay': 0.0,
'nesterov': False,
'name': 'SGD',
}
},
'loss_scale': {
'class_name': 'DynamicLossScale',
'config': {
'initial_loss_scale': 2.0,
'increment_period': 3,
'multiplier': 2.0,
}
},
}
if from_config == 'v1':
opt = loss_scale_optimizer.LossScaleOptimizerV1.from_config(config)
else:
self.assertEqual(from_config, 'v2')
opt = loss_scale_optimizer.LossScaleOptimizer.from_config(config)
# Force hyperparameters to be created
opt.lr # pylint: disable=pointless-statement
self.evaluate(tf.compat.v1.global_variables_initializer())
# Test attributes on the optimizer
self.assertEqual(self.evaluate(opt.lr), 2.)
self.assertEqual(self.evaluate(opt.inner_optimizer.lr), 2.)
self.assertEqual(self.evaluate(opt.momentum), 0.5)
self.assertEqual(self.evaluate(opt.loss_scale), 2.)
self.assertEqual(opt.initial_scale, 2.)
self.assertEqual(opt.dynamic_growth_steps, 3.)
self.assertTrue(opt.dynamic)
# Ensure the optimizer can be used
var = tf.Variable([5.0])
run_op = self._run_fn_with_grad_check(
tf.distribute.get_strategy(), var, opt, 2)()
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), [3.])
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
def test_from_config_with_invalid_multiplier(self):
config = {
'optimizer': {
'class_name': 'SGD',
'config': {
'learning_rate': 2.0,
'momentum': 0.5,
'decay': 0.0,
'nesterov': False,
'name': 'SGD',
}
},
'loss_scale': {
'class_name': 'DynamicLossScale',
'config': {
'initial_loss_scale': 2.0,
'increment_period': 3,
'multiplier': 4.0,
}
},
}
expected_error = ('Cannot deserialize LossScaleOptimizer with a '
'DynamicLossScale whose multiplier is not 2. Got '
'DynamicLossScale: DynamicLossScale\\(')
with self.assertRaisesRegex(ValueError, expected_error):
loss_scale_optimizer.LossScaleOptimizer.from_config(config)
with self.assertRaisesRegex(ValueError, expected_error):
loss_scale_optimizer.LossScaleOptimizerV1.from_config(config)
@parameterized.named_parameters({
'testcase_name': 'V2',
'use_v1': False,
}, {
'testcase_name': 'V1',
'use_v1': True,
},)
def testSerializationWithBuiltInOptimizer(self, use_v1):
opt = gradient_descent.SGD(2., momentum=0.5)
if use_v1:
loss_scale = tf.mixed_precision.experimental.DynamicLossScale(
initial_loss_scale=2., increment_period=3.)
opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)
else:
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2.,
dynamic_growth_steps=3.)
config = optimizers.serialize(opt)
opt = optimizers.deserialize(config)
# Force hyperparameters to be created
opt.lr # pylint: disable=pointless-statement
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.lr), 2.)
self.assertEqual(self.evaluate(opt.inner_optimizer.momentum), 0.5)
self.assertEqual(self.evaluate(opt.loss_scale), 2.)
self.assertEqual(opt.dynamic_growth_steps, 3.)
self.assertTrue(opt.dynamic, 4.)
# Deserializing a LossScaleOptimizer always always results in a V2
# LossScaleOptimizer, even if serialized with a LossScaleOptimizerV1.
self.assertAllEqual(type(opt), loss_scale_optimizer.LossScaleOptimizer)
# Ensure the optimizer can be used
var = tf.Variable([5.0])
run_op = self._run_fn_with_grad_check(
tf.distribute.get_strategy(), var, opt, 2)()
self.evaluate(tf.compat.v1.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
self.assertEqual(self.evaluate(var), [3.])
self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
def testSerializationWithCustomOptimizer(self):
class MySGD(gradient_descent.SGD):
def __init__(self, *args, **kwargs):
super(MySGD, self).__init__(*args, **kwargs)
self.my_attribute = 123
opt = MySGD(2., momentum=0.5)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=2.,
dynamic_growth_steps=3.)
config = optimizers.serialize(opt)
custom_objects = {'MySGD': MySGD}
opt = optimizers.deserialize(config, custom_objects=custom_objects)
# Force hyperparameters to be created
opt.lr # pylint: disable=pointless-statement
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(self.evaluate(opt.lr), 2.)
self.assertEqual(self.evaluate(opt.inner_optimizer.momentum), 0.5)
self.assertEqual(self.evaluate(opt.loss_scale), 2.)
self.assertEqual(opt.dynamic_growth_steps, 3.)
self.assertEqual(opt.inner_optimizer.my_attribute, 123)
def testUnsupportedStrategy(self):
strategy = tf.distribute.experimental.CentralStorageStrategy()
expected_error = (
'Loss scaling is not supported with the tf.distribute.Strategy: '
'CentralStorageStrategy. Try using a different Strategy, e.g. a '
'MirroredStrategy')
with strategy.scope(), self.assertRaisesRegex(ValueError, expected_error):
loss_scale_optimizer.LossScaleOptimizer(gradient_descent.SGD())
opt = loss_scale_optimizer.LossScaleOptimizer(gradient_descent.SGD())
with strategy.scope():
var = tf.Variable(1.0)
loss = lambda: var * 2.0
run_fn = lambda: opt.minimize(loss, [var])
with self.assertRaisesRegex(ValueError, expected_error):
strategy.experimental_run(run_fn)
def testInvalidArgsWithFixedLossScale(self):
opt = gradient_descent.SGD()
with self.assertRaisesRegex(
ValueError, '"initial_scale" must be specified if "dynamic" is False'):
loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False)
opt = gradient_descent.SGD()
with self.assertRaisesRegex(
ValueError, '"dynamic_growth_steps" must be None if "dynamic" is '
'False, but got: 2'):
loss_scale_optimizer.LossScaleOptimizer(
opt, dynamic=False, initial_scale=1, dynamic_growth_steps=2)
def testDynamicMustBeBool(self):
opt = gradient_descent.SGD()
with self.assertRaisesRegex(
TypeError, '"dynamic" argument to LossScaleOptimizer.__init__ must be '
"a bool, but got: 'dynamic'"):
loss_scale_optimizer.LossScaleOptimizer(opt, 'dynamic')
def testErrorWhenNesting(self):
opt = gradient_descent.SGD()
opt = loss_scale_optimizer.LossScaleOptimizer(opt)
with self.assertRaisesRegex(
TypeError, 'LossScaleOptimizer cannot wrap another LossScaleOptimizer'):
loss_scale_optimizer.LossScaleOptimizer(opt)
def testErrorWrappingSameOptimizerMultipleTimes(self):
inner_opt = gradient_descent.SGD()
loss_scale_optimizer.LossScaleOptimizer(inner_opt)
with self.assertRaisesRegex(
ValueError,
'"inner_optimizer" is already wrapped by a LossScaleOptimizer.'):
loss_scale_optimizer.LossScaleOptimizer(inner_opt)
if __name__ == '__main__':
tf.test.main()
|
py | b407084189abb7e33455c7e31f1f03b914c78d4c | import pycuda.driver as drv
drv.init()
print "%d device(s) found." % drv.Device.count()
for ordinal in range(drv.Device.count()):
dev = drv.Device(ordinal)
print "Device #%d: %s" % (ordinal, dev.name())
print " Compute Capability: %d.%d" % dev.compute_capability()
print " Total Memory: %s KB" % (dev.total_memory()//(1024))
|
py | b40709b90c6492e5839c2371325fe9e9b119ef41 | #! -*- coding:utf-8 -*-
import os
import sys
def zero_or_not(target):
#return 1 if target > 0 else 0
return target/127.5 - 1.0
def line2label_fig(l):
#zeros = [0.0] * 10
zeros = [0.0] * 11
tmp = l.strip().split(",")
zeros[int(tmp[0])] = 1.0
fig = [zero_or_not(int(i)) for i in tmp[1:]]
#tmp2 = [[zero_or_not(int(i))] for i in tmp[1:]]
#fig = [tmp2[28 * i : 28 * i + 28] for i in range(28)]
return zeros, fig
def get_batch(f_obj, num_batch):
label_ret = []
fig_ret = []
for i, l in enumerate(f_obj):
label, fig = line2label_fig(l)
label_ret.append(label)
fig_ret.append(fig)
if i == num_batch - 1:
break
return label_ret, fig_ret
if __name__ == u'__main__':
with open('mnist_test.csv', 'r') as f:
labels, figs = get_batch(f, 2)
print(figs)
|
py | b40709e228f47dedb24bef32b4fd76d046de9132 | from __future__ import unicode_literals
import os
import sys
from django.core.files.uploadedfile import UploadedFile
from django.test import TestCase
from mongoengine import Document, fields
from rest_framework_mongoengine.serializers import DocumentSerializer
from . import mockpil
from .utils import dedent
pwd = os.path.dirname(os.path.realpath(__file__)) + os.path.sep
try:
from unittest import mock # NOQA
except ImportError:
import mock # NOQA
sys.modules['PIL'] = mockpil
fields.Image = mockpil.Image
fields.ImageOps = mockpil.ImageOps
class FileDoc(Document):
image = fields.ImageField(collection_name='images')
class TestSerializer(DocumentSerializer):
class Meta:
model = FileDoc
fields = '__all__'
class TestFilesMapping(TestCase):
def test_mapping(self):
class FileDoc(Document):
f = fields.FileField(collection_name='files')
i = fields.ImageField(collection_name='images')
class TestSerializer(DocumentSerializer):
class Meta:
model = FileDoc
fields = '__all__'
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
f = FileField(required=False)
i = ImageField(required=False)
""")
assert repr(TestSerializer()) == expected
class TestFilesIntegration(TestCase):
""" operational test
Test if primary methods work.
"""
def setUp(self):
self.files = [open(pwd + "cat1.jpg", "rb"), open(pwd + "cat2.jpg", "rb")]
self.uploads = [UploadedFile(f, f.name, "image/jpeg", os.path.getsize(f.name)) for f in self.files]
def doCleanups(self):
FileDoc.drop_collection()
FileDoc._get_db().drop_collection('files')
for f in self.files:
f.close()
def test_parse(self):
data = {'image': self.uploads[0]}
serializer = TestSerializer(data=data)
assert serializer.is_valid(), serializer.errors
expected = {
'image': None
}
assert serializer.data == expected
def test_retrieve(self):
instance = FileDoc.objects.create(image=self.files[0])
serializer = TestSerializer(instance)
expected = {
'id': str(instance.id),
'image': str(instance.image.grid_id),
}
assert serializer.data == expected
def test_create(self):
data = {'image': self.uploads[0]}
serializer = TestSerializer(data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert isinstance(instance.image, fields.GridFSProxy)
assert instance.image.length == self.uploads[0].size
expected = {
'id': str(instance.id),
'image': str(instance.image.grid_id)
}
assert serializer.data == expected
def test_update(self):
instance = FileDoc.objects.create(image=self.files[0])
data = {'image': self.uploads[1]}
serializer = TestSerializer(instance, data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert isinstance(instance.image, fields.GridFSProxy)
assert instance.image.length == self.uploads[1].size
expected = {
'id': str(instance.id),
'image': str(instance.image.grid_id)
}
assert serializer.data == expected
|
py | b4070a13a921a75b286e3d47ec7c183232603cac | from ._util import refs_equal
class TestChain:
@refs_equal
def test_attr_call_doesnt_contain_call(self):
s = 'import a\na.attr()'
refs = [('a', 'a'), ('a.attr', 'a.attr')]
return s, refs
@refs_equal
def test_attr_call_attr_split_in_two(self):
s = 'import a\na.attr().b'
refs = [('a', 'a'), ('a.attr', 'a.attr'), ('a.attr.().b', 'b')]
return s, refs
@refs_equal
def test_attr_call_attr_call_attr_split_in_three(self):
s = 'import a\na.attr().b().c'
refs = [
('a', 'a'),
('a.attr', 'a.attr'),
('a.attr.().b', 'b'),
('a.attr.().b.().c', 'c'),
]
return s, refs
|
py | b4070af07a76a6619faef1bbc4fc66c274c0e2ed | from __future__ import print_function
from django.conf import settings
from suds.client import Client
from suds.plugin import MessagePlugin
import sys
def get_client(product_code):
url = "https://www.uc.se/UCSoapWeb/services/ucOrders2"
client = Client(url + "?wsdl", plugins=[VersionPlugin(product_code)])
client.sd[0].service.setlocation(url)
return client
class VersionPlugin(MessagePlugin):
def __init__(self, product_code):
self.product_code = product_code
def marshalled(self, context):
body = context.envelope.getChild('Body')
company_report = body[0]
company_report.set('ns1:product', self.product_code)
company_report.set('ns1:version', '2.1')
print(str(context.envelope.getChild('Body')))
def get_customer(client):
customer = client.factory.create("ns0:customer")
customer.userId = settings.UC_USER_ID
customer.password = settings.UC_PASSWORD
return customer
def get_report_query(client, organization_number):
reportQuery = client.factory.create("ns0:reportQuery")
reportQuery.object = organization_number
reportQuery._xmlReply = "true"
reportQuery._htmlReply = "false"
reportQuery._reviewReply = "false"
reportQuery._lang = "eng"
return reportQuery
def get_company_report(client, organization_number):
customer = get_customer(client)
report_query = get_report_query(client, organization_number)
return client.service.companyReport(
customer=customer, companyReportQuery=report_query)
def get_company_full_report(organization_number):
return get_company_report(get_client("410"), organization_number)
def get_company_risk_report(organization_number):
return get_company_report(get_client("4"), organization_number)
def get_credit_rating_group_term_indices(report):
""" Returns a tuple (group, term) where `group` is the index of the
Credit Rating info provided in the report and `term` is the index of
term containing Risk Rating value
"""
try:
# Group W110 = Credit Rating
# Term W11001 = Risk Rating
for index_, group in enumerate(report.ucReport[0].xmlReply.reports[0].report[0].group):
if group._id == "W110":
for index__, term in enumerate(report.ucReport[0].xmlReply.reports[0].report[0].group[index_].term):
if term._id == "W11001":
return (index_, index__)
except AttributeError:
raise Exception(
"Provided UC report doesn't include sufficient data to get Group/Term index."), None, sys.exc_info()[2]
|
py | b4070b5e00b34104cb4c0f58809c759849b79f5b | import logging
from forge_sdk import did as forge_did, rpc as forge_rpc
from forge_sdk import utils as forge_utils
from event_chain import protos
from event_chain.app import models
logger = logging.getLogger('controller-ticket')
def buy_tickets_general(event_address, num, wallet, token=None):
spec_datas = []
for i in range(0, num):
spec_datas.append({'id': forge_did.AbtDid(role_type='asset').new()})
res, tickets = forge_rpc.acquire_asset(to=event_address,
spec_datas=spec_datas,
type_url='ec:s:general_ticket',
proto_lib=protos, wallet=wallet,
token=token)
if forge_utils.is_response_ok(res):
return tickets
else:
logger.error(f"Fail to buy tickets for event {event_address}")
def buy_ticket(event_address, user):
logger.debug(f'user wallet: {user.get_wallet()}')
ticket_address = buy_tickets_general(event_address, 1,
user.get_wallet(), user.wallet.token)
logger.debug(f"Buy ticket process is completed. ticket {ticket_address}")
return ticket_address
def consume(ticket_address, user):
logger.debug("Consuming ticket {}".format(ticket_address))
ticket = models.get_ticket_state(ticket_address)
consume_tx = models.get_event_factory( ticket.parent ).consume_tx
if not consume_tx:
return None
logger.debug("consume tx: {}".format(consume_tx))
tx = forge_rpc.finalize_consume_asset(consume_tx, user.get_wallet(), user.wallet.token,
forge_utils.encode_to_any('fg:x:address',
ticket_address.encode()))
res = forge_rpc.send_tx(tx)
if res.code != 0 or res.hash is None:
logger.error(res)
logger.error('Fail to consume ticket {}'.format(ticket_address))
else:
logger.info(
"ConsumeTx has been sent by tx: {}!".format(res.hash),
)
return res.hash
def verify_ticket_address(ticket_address):
try:
state = models.get_ticket_state(ticket_address)
except Exception:
logger.error('Error in checking ticket')
raise TypeError("{} is not an ticket address.".format(ticket_address))
if not state:
logger.error(u'Ticket {} does not exist.'.format(ticket_address))
raise ValueError('Ticket {} does not exist'.format(ticket_address))
def list_user_tickets(user_address):
assets = models.sql.AssetState.query.filter_by(owner=user_address).all()
tickets = []
for asset in assets:
asset_state = forge_rpc.get_single_asset_state(asset.address)
if asset_state and asset_state.data.type_url == 'ec:s:general_ticket':
tickets.append(asset_state)
return tickets
|
py | b4070b875251577df571dab0829be73e24530879 | #! /usr/bin/env python
import sys
import rospy
import numpy as np
import ros_numpy # convert msg to np.array : https://github.com/eric-wieser/ros_numpy
import camera_utils
import cv2
from cv_bridge import CvBridge, CvBridgeError
from darknet_ros_msgs.msg import BoundingBoxes
from sensor_msgs.msg import PointCloud2
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
# lidar --> camera coordinate
# camera --> image coordinate
# image show point data
# if you want to run this code you need to get 3 kind of msg
# 1. Image
# 2. BoundingBoxes (darknet_ros)
# 3. PointCloud2
class image_converter:
def __init__(self):
self.img_pub = rospy.Publisher('laser_img',Image, queue_size=1000)
self.bridge = CvBridge()
self.img_sub = rospy.Subscriber('/camera/color/image_raw',Image,self.callback_camera)
self.yolo_sub = rospy.Subscriber('/darknet_ros/bounding_boxes',BoundingBoxes,self.callback_yolo)
self.laser_sub = rospy.Subscriber('/PointCloud2',PointCloud2, self.callback_lidar) #woon bong's --> rosrun cloud trans
self.camera_utils_ = camera_utils.CameraProjection()
self.image_width_ = 640
self.image_height_ = 480
self.focal_length = 462.1379497504639
def callback_yolo(self, data):
try:
self.boxes = data.bounding_boxes
except rospy.ROSInterruptException:
pass
def callback_lidar(self, data):
try:
self.pc2 = ros_numpy.numpify(data)
except rospy.ROSInterruptException:
pass
self.points = np.zeros((self.pc2.shape[0],3))
self.points[:,0] = self.pc2['x']
self.points[:,1] = self.pc2['y']
self.points[:,2] = self.pc2['z']
def callback_camera(self, data):
try:
cv_img = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
(self.rows, self.cols, channels) = cv_img.shape
for box in self.boxes:
cv2.rectangle(cv_img, (box.xmin,box.ymin), (box.xmax, box.ymax), (0,255,0), 1)
self.x_center = int(((self.cols /2) - (box.xmin + box.xmax) / 2) + (self.cols/2)) # image to pixel (u,v)
self.y_center = int(((self.rows / 2) - (box.ymin + box.ymax) / 2) + (self.rows/2))
cv2.line(cv_img, (self.x_center,self.y_center),(self.x_center,self.y_center), (255,0,0), 5)
for point in self.points:
# 1. lidar to camera : need transformation from lidar to camera coordinate
# 2. camera to image
self.img_posi = self.camera_utils_.camera_to_image(point[0], point[1], point[2]) #* self.camera_utils_.ratio
#print(self.img_posi)
self.pixel_posi = (self.img_posi + [0, (self.cols /2 ), 0]) #* self.camera_utils_.ratio# ratio# [depth , pixel_v, pixel_u]
#print(self.camera_utils_.ratio)
#print(self.pixel_posi)
cv2.line(cv_img, (int(self.pixel_posi[1]),int(self.pixel_posi[2]+240)), (int(self.pixel_posi[1]), int(self.pixel_posi[2]+240)), (255,0,0), 10) # 240 is for v pixel so you have to change!!!!
cv2.imshow("Image window", cv_img)
cv2.waitKey(3)
try:
self.img_pub.publish(self.bridge.cv2_to_imgmsg(cv_img, "bgr8"))
except CvBridgeError as e:
print(e)
def main(args):
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutdown")
cv2.destroyAllWindows()
if __name__ == "__main__":
main(sys.argv) |
py | b4070d39ec1dfd84505c6c309ffaa75b83ced04f | from __future__ import print_function
"""
Author:
Jack Duryea
Waterland Lab
Computational Epigenetics Section
Baylor College of Medicine
Created April 2018
Updated April 11 2019: use random forests as model
PReLIM: Preceise Read Level Imputation of Methylation
PReLIM imputes missing CpG methylation
states in CpG matrices.
"""
# standard imports
import numpy as np
from tqdm import tqdm
import copy
import time
from collections import defaultdict
import random
# sklearn imports
from sklearn.preprocessing import normalize
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
# Pickle
try:
import cPickle as p
except ImportError:
import pickle as p
# TODO: most of these fields are redundant in our application
class CpGBin():
"""
Constructor for a bin
"""
def __init__(self,
matrix,
#relative_positions
binStartInc=None,
binEndInc=None,
cpgPositions=None,
sequence="",
encoding=None,
missingToken= -1,
chromosome=None,
binSize=100,
species="MM10",
verbose=True,
tag1=None,
tag2=None):
"""
:param matrix: numpy array, the bin's CpG matrix.
:param binStartInc: integer, the starting, inclusive, chromosomal index of the bin.
:param binEndInc: integer, the ending, inclusive, chromosomal index of the bin.
:param cpgPositions: array of integers, the chromosomal positions of the CpGs in the bin.
:param sequence: string, nucleotide sequence (A,C,G,T)
:param encoding: array, a reduced representation of the bin's CpG matrix
:param missingToken: integer, the token that represents missing data in the matrix.
:param chromosome: string, the chromosome this bin resides in.
:param binSize: integer, the number of base pairs this bin covers
:param species: string, the speices this bin belongs too.
:param verbose: boolean, print warnings, set to "false" for no error checking and faster speed
:param tag1: anything, for custom use.
:param tag2: anything, for custom use.
"""
self.cpgDensity = matrix.shape[1]
self.readDepth = matrix.shape[0]
self.matrix = np.array(matrix, dtype=float)
self.binStartInc = binStartInc
self.binEndInc = binEndInc
self.cpgPositions = cpgPositions
self.sequence = sequence
self.missingToken = missingToken
self.chromosome = chromosome
self.binSize = binSize
self.species = species
self.tag1 = tag1
self.tag2 = tag2
class PReLIM():
"""
PReLIM imputation class to handle training and predicting from models.
"""
def __init__(self, cpgDensity=2):
self.model = None
self.cpgDensity = cpgDensity
self.METHYLATED = 1
self.UNMETHYLATED = 0
self.MISSING = -1
self.methylated = 1
self.unmethylated = 0
self.unknown = -1
# Train a model
def train(self, bin_matrices, model_file="no", verbose=False):
"""
bin_matrices: list of cpg matrices
model_file, string, The name of the file to save the model to.
If None, then create a file name that includes a timestamp.
If you don't want to save a file, set this to "no"
"""
# bin_matrices: a list of cpg matrices
X,y = self.get_X_y(bin_matrices, model_file=model_file, verbose=False)
# Train the neural network model
self.fit(X,y, model_file=model_file, verbose=verbose)
def fit(self,
X_train,
y_train,
n_estimators = [10, 50, 100, 500, 1000],
cores = -1,
max_depths = [1, 5, 10, 20, 30],
model_file=None,
verbose=False
):
"""
Inputs:
1. X_train, numpy array, Contains feature vectors.
2. y_train, numpy array, Contains labels for training data.
3. n_estimators, list, the number of estimators to try during a grid search.
4. max_depths, list, the maximum depths of trees to try during a grid search.
5. cores, the number of cores to use during training, helpful for grid search.
6. model_file, string, The name of the file to save the model to.
If None, then create a file name that includes a timestamp.
If you don't want to save a file, set this to "no"
5-fold validation is built into the grid search
Outputs:
The trained model
Usage:
model.fit(X_train, y_train)
"""
grid_param = {
"n_estimators": n_estimators,
"max_depth": max_depths,
}
# Note: let the grid search use a lot of cores, but only use 1 for each forest
# since dispatching can take a lot of time
rf = RandomForestClassifier(n_jobs=1)
self.model = GridSearchCV(rf, grid_param, n_jobs=cores, cv=5, verbose=verbose)
self.model.fit(X_train, y_train)
# save the model
if model_file == "no":
return self.model
if not model_file:
model_file = "PReLIM_model" + str(time.time())
p.dump(self.model, open(model_file,"wb"))
return self.model
# Feature collection directly from bins
def get_X_y(self, bin_matrices, model_file=None, verbose=False):
bins = []
# convert to bin objects for ease of use
for matrix in bin_matrices:
mybin = CpGBin( matrix=matrix )
bins.append( mybin )
# find bins with no missing data
complete_bins = _filter_missing_data( bins )
random.shuffle( complete_bins )
# apply masks
masked_bins = _apply_masks( complete_bins, bins )
# extract features
X, y = self._collectFeatures( masked_bins )
return X, y
# Return a vector of predicted classes
def predict_classes(self, X):
"""
Inputs:
1. X, numpy array, contains feature vectors
Outputs:
1. 1-d numpy array of prediction values
Usage:
y_pred = CpGNet.predict_classes(X)
"""
return self.model.predict(X)
# Return a vector of probabilities for methylation
def predict(self, X):
"""
Inputs:
1. X, numpy array, contains feature vectors
Outputs:
1. 1-d numpy array of predicted class labels
Usage:
y_pred = CpGNet.predict(X)
"""
return self.model.predict_proba(X)[:,1]
def predict_proba(self, X):
"""
Inputs:
1. X, numpy array, contains feature vectors
Outputs:
1. 1-d numpy array of class predictions
Usage:
y_pred = CpGNet.predict(X)
"""
return self.model.predict_proba(X)[:1]
# Load a saved model
def loadWeights(self, model_file):
"""
Inputs:
1. model_file, string, name of file with a saved model
Outputs:
None
Effects:
self.model is loaded with the provided weights
"""
self.model = p.load(open(model_file,"rb"))
def _get_imputation_features(self,matrix):
'''
Returns a vector of features needed for the imputation of this matrix
Inputs:
1. matrix, a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
Outputs:
1. A feature vector for the matrix
'''
X = []
numReads = matrix.shape[0]
density = matrix.shape[1]
nan_copy = np.copy(matrix)
nan_copy[nan_copy == -1] = np.nan
column_means = np.nanmean(nan_copy, axis=0)
row_means = np.nanmean(nan_copy, axis=1)
encoding = self._encode_input_matrix(matrix)[0]
for i in range(numReads):
for j in range(density):
observed_state = matrix[i, j]
if observed_state != -1:
continue
row_mean = row_means[i]
col_mean = column_means[j]
row = np.copy(matrix[i])
row[j] = -1
data = [row_mean] + [col_mean] + [i, j] + list(row) + list(encoding)
X.append(data)
X = np.array(X)
return X
# Imputes missing values in Bins
def impute(self, matrix):
"""
Inputs:
1. matrix, a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
Outputs:
1. A 2d numpy array with predicted probabilities of methylation
"""
X = self._get_imputation_features(matrix)
if len(X) == 0: # nothing to impute
return matrix
predictions = self.predict(X)
k = 0 # keep track of prediction index for missing states
predicted_matrix = np.copy(matrix)
for i in range(predicted_matrix.shape[0]):
for j in range(predicted_matrix.shape[1]):
if predicted_matrix[i, j] == -1:
predicted_matrix[i, j] = predictions[k]
k += 1
return predicted_matrix
def impute_many(self, matrices):
'''
Imputes a bunch of matrices at the same time to help speed up imputation time.
Inputs:
1. matrices: array-like (i.e. list), where each element is
a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
Outputs:
1. A List of 2d numpy arrays with predicted probabilities of methylation for unknown values.
'''
# Extract all features for all matrices so we can predict in bulk, this is where the speedup comes from
X = np.array([features for matrix_features in [self._get_imputation_features(matrix) for matrix in matrices] for features in matrix_features])
if len(X) == 0:
return matrices
predictions = self.predict(X)
predicted_matrices = []
# TODO: lots of for-loops here, could be sped up?
k = 0 # keep track of prediction index for missing states, order is crucial!
for matrix in matrices:
predicted_matrix = np.copy(matrix)
for i in range(predicted_matrix.shape[0]):
for j in range(predicted_matrix.shape[1]):
if predicted_matrix[i, j] == -1:
predicted_matrix[i, j] = predictions[k]
k += 1
predicted_matrices.append(predicted_matrix)
return predicted_matrices
### Helper functions, for private use only ###
# Returns a matrix encoding of a CpG matrix
def _encode_input_matrix(self, m):
matrix = np.copy(m)
n_cpgs = matrix.shape[1]
matrix += 1 # deal with -1s
base_3_vec = np.power(3, np.arange(n_cpgs - 1, -1, -1))
#
encodings = np.dot(base_3_vec, matrix.T)
#
encoded_vector_dim = np.power(3, n_cpgs)
encoded_vector = np.zeros(encoded_vector_dim)
#
for x in encodings:
encoded_vector[int(x)] += 1
#
num_reads = encodings.shape[0]
#
# Now we normalize
encoded_vector_norm = normalize([encoded_vector], norm="l1")
return encoded_vector_norm[0], num_reads
# finds the majority class of the given column, discounting the current cpg
def _get_column_mean(self, matrix, col_i, current_cpg_state):
sub = matrix[:, col_i]
return self._get_mean(sub, current_cpg_state)
# finds the majority class of the given read, discounting the current cpg
def _get_read_mean(self, matrix, read_i, current_cpg_state):
sub = matrix[read_i, :]
return self._get_mean(sub, current_cpg_state)
def _get_mean(self, sub_matrix, current_cpg_state):
num_methy = np.count_nonzero(sub_matrix == self.METHYLATED)
num_unmethy = np.count_nonzero(sub_matrix == self.UNMETHYLATED)
if current_cpg_state == self.METHYLATED:
num_methy -= 1
num_methy = max(0, num_methy)
if current_cpg_state == self.UNMETHYLATED:
num_unmethy -= 1
num_unmethy = max(0, num_unmethy)
if float(num_methy + num_unmethy) == 0:
return -2
return float(num_methy) / float(num_methy + num_unmethy)
# Returns X, y
# note: y can contain the labels 1,0, -1
def _collectFeatures(self, bins):
X = []
Y = []
for Bin in tqdm(bins):
observed_matrix = Bin.tag2["observed"]
truth_matrix = Bin.tag2["truth"]
encoding = self._encode_input_matrix(observed_matrix)[0]
numReads = observed_matrix.shape[0]
density = observed_matrix.shape[1]
#positions = Bin.cpgPositions
nan_copy = np.copy(observed_matrix)
nan_copy[nan_copy == -1] = np.nan
column_means = np.nanmean(nan_copy,axis=0)
row_means = np.nanmean(nan_copy,axis=1)
for i in range(numReads):
for j in range(density):
observed_state = observed_matrix[i,j]
if observed_state != -1:
continue
state = truth_matrix[i,j]
Y.append(state)
row_mean = row_means[i]
col_mean = column_means[j]
# j is the current index in the row
# encoding is the matrix encoding vector
# differences is the difference in positions of the cpgs
row = np.copy(observed_matrix[i])
row[j] = -1
data = [row_mean] + [col_mean] + [i, j] + list(row) + list(encoding)
X.append(data)
X = np.array(X)
Y = np.array(Y)
Y.astype(int)
return X, Y
# Helper functions
# returns a list of bins similar to the input
# but matrix rows with missing values are removed
def _filter_bad_reads(bins):
filtered_bins = []
for Bin in bins:
newBin = copy.deepcopy(Bin)
matrix = newBin.matrix
# find rows with missing values
counts = np.count_nonzero(matrix == -1, axis=1)
idx = counts == 0
matrix_filtered = matrix[idx]
newBin.matrix = matrix_filtered
filtered_bins.append(newBin)
return filtered_bins
# returns a mapping of dimensions to list of masks that can be used on data
# of that size.
# the missing pattern is in matrix form.
# -1 is missing, 2 is known
def _extract_masks( bins):
masks = defaultdict(lambda: [])
for Bin in tqdm(bins):
matrix = np.copy(Bin.matrix)
matrix[matrix >= 0] = 2
#min_missing = 10
min_missing = 1 # must have at least 1 missing value
if np.count_nonzero(matrix == -1) >= min_missing:
masks[matrix.shape].append(matrix)
return masks
def _apply_masks( filtered_bins, all_bins ):
masks = _extract_masks( all_bins )
ready_bins = []
for Bin in filtered_bins:
truth_matrix = Bin.matrix
m_shape = truth_matrix.shape
if m_shape in masks:
if len( masks [ m_shape ] ) > 0:
mask = random.choice(masks[m_shape])
observed = np.minimum(truth_matrix, mask)
Bin.tag2 = {"truth":truth_matrix, "observed":observed, "mask":mask}
ready_bins.append(Bin)
return ready_bins
# get a set of bins with no missing data
def _filter_missing_data( bins, min_read_depth=1 ):
cpg_bins_complete = _filter_bad_reads(bins)
# secondary depth filter
cpg_bins_complete_depth = [bin_ for bin_ in cpg_bins_complete if bin_.matrix.shape[0] >= min_read_depth]
return cpg_bins_complete_depth
|
py | b4070d9a011ed21644426bb126ea3a3b861b1571 | from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter
from autosklearn.pipeline.constants import DENSE, UNSIGNED_DATA, SIGNED_DATA, INPUT
from autosklearn.pipeline.components.data_preprocessing.rescaling.abstract_rescaling \
import Rescaling
from autosklearn.pipeline.components.base import \
AutoSklearnPreprocessingAlgorithm
class RobustScalerComponent(Rescaling, AutoSklearnPreprocessingAlgorithm):
def __init__(self, q_min, q_max, random_state):
from sklearn.preprocessing import RobustScaler
self.q_min = q_min
self.q_max = q_max
self.preprocessor = RobustScaler(
quantile_range=(self.q_min, self.q_max), copy=False,
)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'RobustScaler',
'name': 'RobustScaler',
'handles_regression': True,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'handles_multioutput': True,
'is_deterministic': True,
# TODO find out if this is right!
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, UNSIGNED_DATA),
'output': (INPUT, SIGNED_DATA),
'preferred_dtype': None}
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
q_min = UniformFloatHyperparameter(
'q_min', 0.001, 0.3, default_value=0.25
)
q_max = UniformFloatHyperparameter(
'q_max', 0.7, 0.999, default_value=0.75
)
cs.add_hyperparameters((q_min, q_max))
return cs
|
py | b4070e9b014eba02fe1663db0284c4a55ff68790 | #!/usr/bin/env python
# Copyright (c) 2015-2017 The Taler Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
from __future__ import division,print_function,unicode_literals
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
NONFATAL = {'HIGH_ENTROPY_VA'} # checks which are non-fatal for now but only generate a warning
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>=2 and line[0] == b'Type:' and line[1] == b'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.split(b'\n'):
if line.startswith(b'Program Headers:'):
in_headers = True
if line == b'':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find(b'Type')
ofs_offset = line.find(b'Offset')
ofs_flags = line.find(b'Flg')
ofs_align = line.find(b'Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == b'GNU_STACK':
have_gnu_stack = True
if b'W' in flags and b'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == b'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>1 and tokens[1] == b'(BIND_NOW)' or (len(tokens)>2 and tokens[1] == b'(FLAGS)' and b'BIND_NOW' in tokens[2]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
if b'__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits.
Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386'
and bits is the DllCharacteristics value.
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
arch = ''
bits = 0
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'architecture:':
arch = tokens[1].rstrip(',')
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
bits = int(tokens[1],16)
return (arch,bits)
IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
def check_PE_DYNAMIC_BASE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
(arch,bits) = get_PE_dll_characteristics(executable)
reqbits = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
return (bits & reqbits) == reqbits
# On 64 bit, must support high-entropy 64-bit address space layout randomization in addition to DYNAMIC_BASE
# to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(executable):
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
(arch,bits) = get_PE_dll_characteristics(executable)
if arch == 'i386:x86-64':
reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
else: # Unnecessary on 32-bit
assert(arch == 'i386')
reqbits = 0
return (bits & reqbits) == reqbits
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
(arch,bits) = get_PE_dll_characteristics(executable)
return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
warning = []
for (name, func) in CHECKS[etype]:
if not func(filename):
if name in NONFATAL:
warning.append(name)
else:
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
if warning:
print('%s: warning %s' % (filename, ' '.join(warning)))
except IOError:
print('%s: cannot open' % filename)
retval = 1
sys.exit(retval)
|
py | b4071006fbb42174a39ec72e1bd987297695222a | #
# Complete the 'roverMove' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER matrixSize
# 2. STRING_ARRAY cmds
#
def roverMove(matrixSize, cmds):
# Write your code here
rover = 0
i, j = 0, 0
for cmd in cmds:
if cmd == "RIGHT":
if j < matrixSize - 1:
j += 1
if cmd == "LEFT":
if j > 0:
j -= 1
if cmd == "UP":
if i > 0:
i -= 1
if cmd == "DOWN":
if i < matrixSize - 1:
i += 1
return (i * matrixSize) + j
|
py | b40711af5e3fad0e3b34dc79dc4550b4a3dc08e7 | import obj_file
import mesh
import sys
### Helper file to verify vertiex, normal, and triangle count ###
mesh = obj_file.ObjFile(sys.argv[1]).read()
mesh.compute_normals()
# mesh.tri_normals()
def print_lens(mesh=mesh):
print("num vertices: "+str(len(mesh.vertices)))
print("num triangles: "+str(len(mesh.triangles)))
print("num normals: "+str(len(mesh.normals)))
|
py | b40711ff85d1ca81be7a71f8ad9e6709a78945e1 | # -*- coding: utf-8 -*-
import json
import requests
import logging
from oauthlib.oauth2.rfc6749.errors import TokenExpiredError
from django.conf import settings
from django.utils.safestring import mark_safe
from dataloaderservices.views import CSVDataApi
from django.contrib.auth.decorators import login_required
from django.utils import timezone
from django.urls import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.http.response import HttpResponse, JsonResponse, HttpResponseServerError
from django.shortcuts import redirect
from django.views.generic.base import TemplateView
from django.views.generic.edit import UpdateView, DeleteView
from hydroshare.forms import HydroShareSettingsForm, HydroShareResourceDeleteForm
from dataloaderinterface.models import SiteRegistration, SiteSensor
from hydroshare.models import HydroShareAccount, HydroShareResource, OAuthToken
from hydroshare_util import HydroShareNotFound, HydroShareHTTPException
from hydroshare_util.adapter import HydroShareAdapter
from hydroshare_util.auth import AuthUtil
from hydroshare_util.resource import Resource
from hydroshare_util.coverage import PointCoverage
from leafpack.views import get_leafpack_csv
from leafpack.models import LeafPack
class LoginRequiredMixin(object):
@classmethod
def as_view(cls):
return login_required(super(LoginRequiredMixin, cls).as_view())
class HydroShareResourceViewMixin:
def __init__(self):
self.request = None
def get_hs_resource(self, resource): # type: (HydroShareResource) -> Resource
""" Creates a 'hydroshare_util.Resource' object """
account = self.request.user.hydroshare_account
token_json = account.get_token()
auth_util = AuthUtil.authorize(token=token_json)
# if the oauth access_token expires in less than a week, refresh the token
seconds_in_week = 60*60*24*7
if token_json.get('expires_in', seconds_in_week) < seconds_in_week:
try:
auth_util.refresh_token()
account.update_token(auth_util.get_token())
except Exception as e:
print(e)
hs_resource = Resource(auth_util.get_client())
hs_resource.resource_id = resource.ext_id
return hs_resource
class HydroShareResourceBaseView(UpdateView):
slug_field = 'sampling_feature_code'
def form_invalid(self, form):
response = super(HydroShareResourceBaseView, self).form_invalid(form)
if self.request.is_ajax():
return JsonResponse(form.errors, status=400)
else:
return response
def get_object(self, queryset=None, **kwargs):
site = SiteRegistration.objects.get(sampling_feature_code=self.kwargs[self.slug_field])
resource = None
try:
# Filter through resources that are visible; there should only be one, so pick the first
resource = HydroShareResource.objects.filter(site_registration=site.registration_id, visible=True).first()
except ObjectDoesNotExist:
pass
return resource
def get_context_data(self, **kwargs):
context = super(HydroShareResourceBaseView, self).get_context_data(**kwargs)
context['date_format'] = settings.DATETIME_FORMAT
return context
def upload_hydroshare_files(self, resource): # type: (Resource) -> None
hydroshare_resource = self.object or self.get_object()
site = SiteRegistration.objects.get(hydroshare_resource=hydroshare_resource)
# Grab files for uploading to hydroshare
file_uploads = []
# if 'TS' is a keyword, add sensor data to file_uploads
if 'TS' in hydroshare_resource.data_types:
try:
file_uploads += get_sensor_files(site)
except Exception as e:
logging.error(e)
# if 'LP' is a keyword, add leaf pack data to file_uploads
if 'LP' in hydroshare_resource.data_types:
try:
file_uploads += get_leafpack_files(site)
except Exception as e:
logging.error(e)
if len(file_uploads):
upload_hydroshare_resource_files(resource, file_uploads)
def update_keywords(self, resource, hydroshare_resource=None, site=None): # type: (Resource, HydroShareResource, SiteRegistration) -> None
# Clear the resources keywords
resource.keywords = set()
if site is None:
site = SiteRegistration.objects.get(sampling_feature_code=self.kwargs[self.slug_field])
if hydroshare_resource is None:
hydroshare_resource = HydroShareResource.objects.filter(site_registration=site.registration_id, visible=True).first() # type: HydroShareResource
# Add 'WikiWatershed' keyword to all resources
resource.keywords.add('WikiWatershed')
# Check if 'TS' (Time Series) is a selected data type, and add variable names as keywords if True
if 'TS' in hydroshare_resource.data_types:
# Add 'EnviroDIY' keyword to resource if sharing time series data
resource.keywords.add('EnviroDIY')
sensors = SiteSensor.objects.filter(registration=site)
if len(sensors):
# Add sensor variable names as keywords
for sensor in sensors:
output = sensor.sensor_output
if output is not None and output.variable_name is not None:
resource.keywords.add(output.variable_name)
# Add the 'Leaf Pack' keyword if sharing leaf pack experiment data
if 'LP' in hydroshare_resource.data_types and len(site.leafpack_set.all()) > 0:
resource.keywords.add('Leaf Pack')
if hydroshare_resource.pk is not None:
# if 'hydroshare_resource' has a primary key, then 'resource' has already been created
# and it's 'update_keywords()' method can be invoked.
resource.update_keywords()
def get(self, request, *args, **kwargs):
# # uncomment to force a hydroshare resource file update.
# # Only do this for debugging purposes!
# call_command('update_hydroshare_resource_files', '--force-update')
return super(HydroShareResourceBaseView, self).get(request, args, kwargs)
class HydroShareResourceCreateView(HydroShareResourceBaseView, HydroShareResourceViewMixin):
template_name = 'hydroshare/hs_site_details.html'
model = HydroShareResource
object = None
fields = '__all__'
ABSTRACT_PROTO = u"The data contained in this resource were uploaded from the WikiWatershed Data Sharing Portal " \
u"– http://data.wikiwatershed.org. They were collected at a site named {site_name}. The full URL to access " \
u"this site in the WikiWatershed Data Sharing portal is: http://data.wikiwatershed.org/sites/{site_code}/."
TITLE_PROTO = "Data from {site_name} uploaded from the WikiWatershed Data Sharing Portal"
def generate_abstract(self, site):
return self.ABSTRACT_PROTO.format(site_name=site.sampling_feature_name, site_code=site.sampling_feature_code)
def generate_title(self, site):
return self.TITLE_PROTO.format(site_name=site.sampling_feature_name)
def get_context_data(self, **kwargs):
context = super(HydroShareResourceCreateView, self).get_context_data(**kwargs)
site = SiteRegistration.objects.get(sampling_feature_code=self.kwargs[self.slug_field])
initial_datatype = HydroShareSettingsForm.data_type_choices[0][0]
# Cycle through resources to make sure they still exist on hydroshare.org
resources = HydroShareResource.objects.filter(site_registration=site.pk, visible=False)
for resource in resources:
hs_resource = self.get_hs_resource(resource)
try:
# Basically, just make request to see if the resource still exists. This request can be anything.
hs_resource.get_access_level()
except HydroShareNotFound:
resource.delete()
except TokenExpiredError:
pass
context['site'] = site
form = HydroShareSettingsForm(initial={'site_registration': site.pk,
'data_types': [initial_datatype],
'pause_sharing': False,
'title': self.generate_title(site),
'abstract': self.generate_abstract(site)})
form.fields['resources'].queryset = HydroShareResource.objects.filter(site_registration=site.pk, visible=False)
context['form'] = form
return context
def create_resource(self, site, form):
hs_account = self.request.user.hydroshare_account
hydroshare_resource = HydroShareResource(site_registration=site,
hs_account=hs_account,
data_types=",".join(form.cleaned_data['data_types']),
update_freq=form.cleaned_data['update_freq'],
sync_type=form.cleaned_data['schedule_type'],
is_enabled=True,
title=form.cleaned_data['title'],
last_sync_date=timezone.now())
token_json = hs_account.get_token()
client = AuthUtil.authorize(token=token_json).get_client()
resource = Resource(client)
resource.owner = Resource.DEFAULT_OWNER
resource.resource_type = Resource.COMPOSITE_RESOURCE
resource.creator = '{0} {1}'.format(self.request.user.first_name, self.request.user.last_name)
resource.abstract = form.cleaned_data['abstract']
resource.title = form.cleaned_data['title']
resource.public = True
coverage = PointCoverage(name=site.sampling_feature_name, latitude=site.latitude, longitude=site.longitude)
resource.add_coverage(coverage)
# Add keywords to the resource
self.update_keywords(resource, hydroshare_resource=hydroshare_resource, site=site)
try:
"""
NOTE: The UUID returned when creating a resource on hydroshare.org is externally generated and should only
be used as a reference to an external datasource that is not part of the ODM2DataSharingPortal ecosystem.
"""
hydroshare_resource.ext_id = resource.create()
hydroshare_resource.title = resource.title
except HydroShareHTTPException as e:
return JsonResponse({"error": e.message,
"message": "There was a problem with hydroshare.org and your resource was not created. You might want to see if www.hydroshare.org is working and try again later."},
status=e.status_code)
hydroshare_resource.save()
return resource
def post(self, request, *args, **kwargs):
"""
Creates a resource in hydroshare.org using form data.
"""
form = HydroShareSettingsForm(request.POST)
if form.is_valid():
site = SiteRegistration.objects.get(sampling_feature_code=self.kwargs[self.slug_field])
if form.cleaned_data['resources']:
resource = form.cleaned_data['resources']
resource.visible = True
resource.data_types = ",".join(form.cleaned_data['data_types'])
resource.update_freq = form.cleaned_data['update_freq']
resource.sync_type = form.cleaned_data['schedule_type']
resource.is_enabled = True
resource.last_sync_date = timezone.now()
resource.title = form.cleaned_data['title']
resource.save()
hs_resource = self.get_hs_resource(resource)
hs_resource.update({'title': form.cleaned_data['title'],
'description': form.cleaned_data['abstract']})
else:
hs_resource = self.create_resource(site, form)
try:
self.upload_hydroshare_files(hs_resource)
except Exception as e:
logging.error(e)
success_url = reverse('site_detail', kwargs={'sampling_feature_code': site.sampling_feature_code})
if self.request.is_ajax():
return JsonResponse({'redirect': success_url})
else:
return redirect(success_url)
else:
return self.form_invalid(form)
class HydroShareResourceUpdateView(HydroShareResourceViewMixin, HydroShareResourceBaseView):
template_name = 'hydroshare/hs_site_details.html'
model = HydroShareResource
slug_field = 'sampling_feature_code'
slug_url_kwarg = 'hydroshare_settings_id'
fields = '__all__'
object = None
def get_context_data(self, **kwargs):
context = super(HydroShareResourceUpdateView, self).get_context_data(**kwargs)
site = SiteRegistration.objects.get(sampling_feature_code=self.kwargs['sampling_feature_code'])
resource = self.get_object()
context['site'] = site
context['resource'] = resource
context['form'] = HydroShareSettingsForm(initial={
'site_registration': site.pk,
'update_freq': resource.update_freq,
'schedule_type': resource.sync_type,
'pause_sharing': not resource.is_enabled,
'data_types': resource.data_types.split(",")
})
context['delete_form'] = HydroShareResourceDeleteForm()
hs_resource = self.get_hs_resource(resource)
try:
metadata = hs_resource.get_system_metadata(timeout=10.0)
context['resource_is_published'] = metadata.get("published", False)
# Update the title in case the owner changed it
if 'resource_title' in metadata:
resource.title = metadata['resource_title']
resource.save()
except HydroShareNotFound:
context['resource_not_found'] = True
except requests.exceptions.Timeout:
context['request_timeout'] = True
finally:
# if the resource was not found or the resource is published, provide the 'delete_resource_url'
if context.get('resource_not_found', None) is True or context.get('resource_is_published', None):
context['delete_resource_url'] = reverse('hydroshare:delete',
kwargs={'sampling_feature_code': site.sampling_feature_code})
return context
def post(self, request, *args, **kwargs):
form = HydroShareSettingsForm(request.POST)
if form.is_valid():
site = SiteRegistration.objects.get(pk=form.cleaned_data['site_registration'])
hydroshare_resource = self.get_object() # type: HydroShareResource
if 'update_files' in request.POST and hydroshare_resource.is_enabled:
# get hydroshare resource info using hydroshare_util; this will get authentication info needed to
# upload files to the resource.
resource = self.get_hs_resource(hydroshare_resource) # type: Resource
# Upload the most recent resource files
try:
# update hs_resource's keywords
self.update_keywords(resource, hydroshare_resource=hydroshare_resource, site=site)
# update the files
self.upload_hydroshare_files(resource)
except Exception as e:
return JsonResponse({'error': e.message}, status=500)
# update last sync date on resource
hydroshare_resource.last_sync_date = timezone.now()
else:
hydroshare_resource.data_types = ",".join(form.cleaned_data['data_types'])
hydroshare_resource.update_freq = form.cleaned_data['update_freq']
hydroshare_resource.sync_type = form.cleaned_data['schedule_type']
hydroshare_resource.is_enabled = not form.cleaned_data["pause_sharing"]
hydroshare_resource.save()
success_url = reverse('site_detail', kwargs={'sampling_feature_code': site.sampling_feature_code})
if self.request.is_ajax():
return JsonResponse({'redirect': success_url})
else:
return redirect(success_url)
else:
response = self.form_invalid(form)
return response
class HydroShareResourceDeleteView(LoginRequiredMixin, HydroShareResourceViewMixin, DeleteView):
model = HydroShareResource
slug_field = 'sampling_feature_code'
slug_url_kwarg = 'sampling_feature_code'
def get_site_registration(self):
try:
code = self.kwargs.get('sampling_feature_code', '')
site = SiteRegistration.objects.get(sampling_feature_code=code)
except ObjectDoesNotExist:
return None
return site
def get_object(self, queryset=None, **kwargs):
site = kwargs['site']
resource = None
try:
# Find the resource that is currently visible; there should only be one.
resource = HydroShareResource.objects.filter(site_registration=site.registration_id, visible=True).first()
except ObjectDoesNotExist:
pass
return resource
def get(self, request, *arg, **kwargs):
site = self.get_site_registration()
return redirect(reverse('site_detail', kwargs={'sampling_feature_code': site.sampling_feature_code}))
def post(self, request, *args, **kwargs):
site = self.get_site_registration()
resource = self.get_object(site=site)
form = HydroShareResourceDeleteForm(request.POST)
if form.is_valid():
delete_external_resource = form.cleaned_data.get('delete_external_resource')
if delete_external_resource is True:
# delete resource in hydroshare.org if delete_external_resource is True
hs_resource = self.get_hs_resource(resource)
try:
hs_resource.delete()
except Exception as error:
print(error)
resource.delete()
else:
# Don't delete the resource, but instead turn visibility off. This is so the user can reconnect the
# resource after disconnecting it.
resource.visible = False
resource.save()
return redirect(reverse('site_detail', kwargs={'sampling_feature_code': site.sampling_feature_code}))
class OAuthAuthorize(TemplateView):
"""handles the OAuth 2.0 authorization workflow with hydroshare.org"""
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated():
return HttpResponseServerError('You are not logged in!')
if 'code' in request.GET:
try:
token_dict = AuthUtil.authorize_client_callback(request) # type: dict
auth_util = AuthUtil.authorize(token=token_dict) # type: AuthUtil
except Exception as e:
print('Authorizition failure: {}'.format(e))
return HttpResponse(mark_safe("<p>Error: Authorization failure!</p><p>{e}</p>".format(e=e)))
client = auth_util.get_client() # type: HydroShareAdapter
user_info = client.getUserInfo()
print('\nuser_info: %s', json.dumps(user_info, indent=3))
try:
# check if hydroshare account already exists
account = HydroShareAccount.objects.get(ext_id=user_info['id'])
except ObjectDoesNotExist:
# if account does not exist, create a new one
account = HydroShareAccount(is_enabled=True, ext_id=user_info['id'])
if account.token:
account.token.delete()
account.save()
# Make updates to datatbase
account.token = OAuthToken.objects.create(**token_dict)
account.user = request.user
account.save()
return redirect('user_account')
elif 'error' in request.GET:
return HttpResponseServerError(request.GET['error'])
else:
return AuthUtil.authorize_client(request)
class OAuthRedirect(TemplateView):
"""
handles notifying a user they are being redirected, then handles the actual redirection
When a user comes to this view, 'self.get()' checks for a 'redirect' value in the url parameters.
- If the value is found, the user will be immediately redirected to www.hydroshare.org for client
authorization.
- If the value is NOT found, the user is sent to a page notifying them that they are about to be redirected.
After a couple of seconds, they are redirected back to this view with the 'redirect' parameter contained in the
url, and sent off to www.hydroshare.org.
"""
template_name = 'hydroshare/oauth_redirect.html'
def get_context_data(self, **kwargs):
context = super(OAuthRedirect, self).get_context_data(**kwargs)
# Get the current scheme (http or https)
scheme = self.request.is_secure() and "https" or "http"
# Need to get the host since the host name can be 'data.envirodiy.org' or 'data.wikiwatershed.org'
host = self.request.META.get('HTTP_HOST', None)
# Build the url and add 'redirect' into the url params
url = '{scheme}://{host}{url}?{params}'.format(scheme=scheme, host=host,
url=reverse('hydroshare:oauth_redirect'), params='redirect=true')
context['redirect_url'] = mark_safe(url)
return context
def get(self, request, *args, **kwargs):
if 'redirect' in request.GET and request.GET['redirect'] == 'true':
return AuthUtil.authorize_client(request)
return super(OAuthRedirect, self).get(request, args, kwargs)
# def get_site_files(site_registration):
# site_sensors = SiteSensor.objects.filter(registration=site_registration.pk)
# files = []
# for site_sensor in site_sensors:
# filename, csv_file = CSVDataApi.get_csv_file(site_sensor.result_id)
# files.append((filename, csv_file.getvalue()))
#
# leafpacks = LeafPack.objects.filter(site_registration=site_registration.pk)
# for leafpack in leafpacks:
# filename, csv_file = get_leafpack_csv(site_registration.sampling_feature_code, leafpack.id)
# files.append((filename, csv_file))
#
# return files
def get_sensor_files(site_registration):
queryset = SiteSensor.objects.filter(registration=site_registration.pk)
files = []
for site_sensor in queryset:
filename, csv_file = CSVDataApi.get_csv_file(site_sensor.result_id)
files.append((filename, csv_file.getvalue()))
return files
def get_leafpack_files(site_registration):
leafpacks = LeafPack.objects.filter(site_registration=site_registration.pk)
files = []
for leafpack in leafpacks:
filename, csv_file = get_leafpack_csv(site_registration.sampling_feature_code, leafpack.id)
files.append((filename, csv_file))
return files
def upload_hydroshare_resource_files(resource, files): # type: (Resource, [object]) -> None
if isinstance(resource, JsonResponse):
# This might happen if hydroshare isn't working...
raise Exception(resource.content)
for file_ in files:
file_name = file_[0]
content = file_[1]
if '.csv' not in file_name:
file_name += '.csv'
resource.upload_file(file_name, content)
|
py | b40712100ce7a00b9643f2e38bb1bafc66801f59 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test mulitple rpc user config option rpcauth
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
#Append rpcauth to alps.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "alps.conf"), 'a') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urlparse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
py | b40712d790ee74768d4a23b38754e417c22b20eb | #!/usr/bin/python3 -B
# Copyright 2020 Josh Pieper, [email protected].
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import moteus.regression as regression
DUT = regression.linear_regression
class RouterTest(unittest.TestCase):
def test_basic1(self):
self.assertEqual(DUT([0, 2, 4], [0, 2, 4]), (0, 1))
self.assertEqual(DUT([0, 2, 4], [4, 2, 0]), (4, -1))
result = DUT([0, 2, 4], [0, 3, 5])
self.assertAlmostEqual(result[0], 0.16666666)
self.assertAlmostEqual(result[1], 1.25)
if __name__ == '__main__':
unittest.main()
|
py | b407133c119fe0a91e9559a17d8650f79387dce3 | # -*- coding: utf-8 -*-
"""
Constants with default values for plugin's configuration.
We try to stick to "the magical 7 ± 2 number".
https://en.wikipedia.org/wiki/The_Magical_Number_Seven,_Plus_or_Minus_Two
What does it mean? It means that we choose these values based on our mind
capacity. And it is really hard to keep in mind more that 9 objects
at the same time.
These values can be changed in the ``setup.cfg`` file on a per-project bases,
if you find them too strict or too permissive.
"""
from typing_extensions import Final
# ========
# General:
# ========
#: Minimum variable's name length.
MIN_NAME_LENGTH: Final = 2
#: Maximum variable and module name length:
MAX_NAME_LENGTH: Final = 45
#: Whether you control ones who use your code.
I_CONTROL_CODE: Final = True
#: Maximum amount of ``noqa`` comments per module.
MAX_NOQA_COMMENTS: Final = 10 # guessed
#: List of nested classes' names we allow to use.
NESTED_CLASSES_WHITELIST: Final = (
'Meta', # django forms, models, drf, etc
'Params', # factoryboy specific
)
#: Domain names that are removed from variable names' blacklist.
ALLOWED_DOMAIN_NAMES: Final = ()
#: Domain names that extends variable names' blacklist.
FORBIDDEN_DOMAIN_NAMES: Final = ()
# ===========
# Complexity:
# ===========
#: Maximum number of `return` statements allowed in a single function.
MAX_RETURNS: Final = 5 # 7-2
#: Maximum number of local variables in a function.
MAX_LOCAL_VARIABLES: Final = 5 # 7-2
#: Maximum number of expressions in a single function.
MAX_EXPRESSIONS: Final = 9 # 7+2
#: Maximum number of arguments for functions or methods.
MAX_ARGUMENTS: Final = 5 # 7-2
#: Maximum number of classes and functions in a single module.
MAX_MODULE_MEMBERS: Final = 7 # 7
#: Maximum number of methods in a single class.
MAX_METHODS: Final = 7 # the same as module members
#: Maximum line complexity.
MAX_LINE_COMPLEXITY: Final = 14 # 7 * 2, also almost guessed
#: Maximum median module Jones complexity.
MAX_JONES_SCORE: Final = 12 # guessed
#: Maximum number of imports in a single module.
MAX_IMPORTS: Final = 12 # guessed
#: Maximum number of imported names in a single module.
MAX_IMPORTED_NAMES: Final = 50 # guessed
#: Maximum number of base classes.
MAX_BASE_CLASSES: Final = 3 # guessed
#: Maximum number of decorators.
MAX_DECORATORS: Final = 5 # 7-2
#: Maximum number of same string usage in code.
MAX_STRING_USAGES: Final = 3 # guessed
#: Maximum number of ``await`` expressions for functions or methods.
MAX_AWAITS: Final = 5 # the same as returns
#: Maximum amount of ``try`` node body length.
MAX_TRY_BODY_LENGTH: Final = 1 # best practice
#: Maximum amount of same expressions per module.
MAX_MODULE_EXPRESSIONS: Final = 7 # the same as module elements
#: Maximum amount of same expressions per function.
MAX_FUNCTION_EXPRESSIONS: Final = 4 # guessed
#: Maximum number of ``assert`` statements in a function.
MAX_ASSERTS: Final = 5 # 7-2
#: Maximum number of access level in an expression.
MAX_ACCESS_LEVEL: Final = 4 # guessed
#: Maximum number of public attributes in a single class.
MAX_ATTRIBUTES: Final = 6 # guessed
#: Maximum amount of cognitive complexity per function.
MAX_COGNITIVE_SCORE: Final = 12 # based on this code statistics
#: Maximum amount of average cognitive complexity per module.
MAX_COGNITIVE_AVERAGE: Final = 8 # based on this code statistics
#: Maximum number of call chains.
MAX_CALL_LEVEL: Final = 3
#: Maximum number of nested annotations.
MAX_ANN_COMPLEXITY: Final = 3
#: Maximum number of names that can be imported from module.
MAX_IMPORT_FROM_MEMBERS: Final = 8 # guessed
|
py | b40713809f7d261b015fb60f8e65d24bfc992153 | import viola
import numpy as np
import os
HERE = os.path.abspath(os.path.dirname(__file__))
delly = viola.read_vcf(os.path.join(HERE, 'data', "test.merge.delly.vcf"), variant_caller="delly")
manta = viola.read_vcf(os.path.join(HERE, 'data', "test.merge.manta.vcf"), variant_caller="manta")
gridss = viola.read_vcf(os.path.join(HERE, 'data', "test.merge.gridss.vcf"), variant_caller="gridss")
lumpy = viola.read_vcf(os.path.join(HERE, 'data', "test.merge.lumpy.vcf"), variant_caller="lumpy")
def test_merge_to_vcf_like():
merged = viola.merge([manta, gridss, delly, lumpy], integration=True)
merged = merged.filter('supportingcallercount > 1')
merged.to_vcf(os.path.join(HERE, 'data/output_merged.vcf')) |
py | b407140a815b053a4ed75b7d3186eeefc6ffb990 | import sys
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
from PyQt5.QtCore import *
import time
import pandas as pd
import sqlite3
TR_REQ_TIME_INTERVAL = 0.2
class Kiwoom(QAxWidget):
def __init__(self):
super().__init__()
self._create_kiwoom_instance()
self._set_signal_slots()
def _create_kiwoom_instance(self):
self.setControl("KHOPENAPI.KHOpenAPICtrl.1")
def _set_signal_slots(self):
self.OnEventConnect.connect(self._event_connect)
self.OnReceiveTrData.connect(self._receive_tr_data)
self.OnReceiveChejanData.connect(self._receive_chejan_data)
def comm_connect(self):
self.dynamicCall("CommConnect()")
self.login_event_loop = QEventLoop()
self.login_event_loop.exec_()
def _event_connect(self, err_code):
if err_code == 0:
print("connected")
else:
print("disconnected")
self.login_event_loop.exit()
def get_code_list_by_market(self, market):
code_list = self.dynamicCall("GetCodeListByMarket(QString)", market)
code_list = code_list.split(';')
return code_list[:-1]
def get_master_code_name(self, code):
code_name = self.dynamicCall("GetMasterCodeName(QString)", code)
return code_name
def get_connect_state(self):
ret = self.dynamicCall("GetConnectState()")
return ret
def get_login_info(self, tag):
ret = self.dynamicCall("GetLoginInfo(QString)", tag)
return ret
def set_input_value(self, id, value):
self.dynamicCall("SetInputValue(QString, QString)", id, value)
def comm_rq_data(self, rqname, trcode, next, screen_no):
self.dynamicCall("CommRqData(QString, QString, int, QString)", rqname, trcode, next, screen_no)
self.tr_event_loop = QEventLoop()
self.tr_event_loop.exec_()
def _comm_get_data(self, code, real_type, field_name, index, item_name):
ret = self.dynamicCall("CommGetData(QString, QString, QString, int, QString)", code,
real_type, field_name, index, item_name)
return ret.strip()
def _get_repeat_cnt(self, trcode, rqname):
ret = self.dynamicCall("GetRepeatCnt(QString, QString)", trcode, rqname)
return ret
def send_order(self, rqname, screen_no, acc_no, order_type, code, quantity, price, hoga, order_no):
self.dynamicCall("SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)",
[rqname, screen_no, acc_no, order_type, code, quantity, price, hoga, order_no])
def get_chejan_data(self, fid):
ret = self.dynamicCall("GetChejanData(int)", fid)
return ret
def get_server_gubun(self):
ret = self.dynamicCall("KOA_Functions(QString, QString)", "GetServerGubun", "")
return ret
def _receive_chejan_data(self, gubun, item_cnt, fid_list):
print(gubun)
print(self.get_chejan_data(9203))
print(self.get_chejan_data(302))
print(self.get_chejan_data(900))
print(self.get_chejan_data(901))
def _receive_tr_data(self, screen_no, rqname, trcode, record_name, next, unused1, unused2, unused3, unused4):
if next == '2':
self.remained_data = True
else:
self.remained_data = False
if rqname == "opt10081_req":
self._opt10081(rqname, trcode)
elif rqname == "opw00001_req":
self._opw00001(rqname, trcode)
elif rqname == "opw00018_req":
self._opw00018(rqname, trcode)
try:
self.tr_event_loop.exit()
except AttributeError:
pass
@staticmethod
def change_format(data):
strip_data = data.lstrip('-0')
if strip_data == '' or strip_data == '.00':
strip_data = '0'
format_data = format(int(strip_data), ',d')
if data.startswith('-'):
format_data = '-' + format_data
return format_data
@staticmethod
def change_format2(data):
strip_data = data.lstrip('-0')
if strip_data == '':
strip_data = '0'
if strip_data.startswith('.'):
strip_data = '0' + strip_data
if data.startswith('-'):
strip_data = '-' + strip_data
return strip_data
def _opw00001(self, rqname, trcode):
d2_deposit = self._comm_get_data(trcode, "", rqname, 0, "d+2추정예수금")
self.d2_deposit = Kiwoom.change_format(d2_deposit)
def _opt10081(self, rqname, trcode):
data_cnt = self._get_repeat_cnt(trcode, rqname)
for i in range(data_cnt):
date = self._comm_get_data(trcode, "", rqname, i, "일자")
open = self._comm_get_data(trcode, "", rqname, i, "시가")
high = self._comm_get_data(trcode, "", rqname, i, "고가")
low = self._comm_get_data(trcode, "", rqname, i, "저가")
close = self._comm_get_data(trcode, "", rqname, i, "현재가")
volume = self._comm_get_data(trcode, "", rqname, i, "거래량")
self.ohlcv['date'].append(date)
self.ohlcv['open'].append(int(open))
self.ohlcv['high'].append(int(high))
self.ohlcv['low'].append(int(low))
self.ohlcv['close'].append(int(close))
self.ohlcv['volume'].append(int(volume))
def reset_opw00018_output(self):
self.opw00018_output = {'single': [], 'multi': []}
def _opw00018(self, rqname, trcode):
# single data
total_purchase_price = self._comm_get_data(trcode, "", rqname, 0, "총매입금액")
total_eval_price = self._comm_get_data(trcode, "", rqname, 0, "총평가금액")
total_eval_profit_loss_price = self._comm_get_data(trcode, "", rqname, 0, "총평가손익금액")
total_earning_rate = self._comm_get_data(trcode, "", rqname, 0, "총수익률(%)")
estimated_deposit = self._comm_get_data(trcode, "", rqname, 0, "추정예탁자산")
self.opw00018_output['single'].append(Kiwoom.change_format(total_purchase_price))
self.opw00018_output['single'].append(Kiwoom.change_format(total_eval_price))
self.opw00018_output['single'].append(Kiwoom.change_format(total_eval_profit_loss_price))
total_earning_rate = Kiwoom.change_format(total_earning_rate)
if self.get_server_gubun():
total_earning_rate = float(total_earning_rate) / 100
total_earning_rate = str(total_earning_rate)
self.opw00018_output['single'].append(total_earning_rate)
self.opw00018_output['single'].append(Kiwoom.change_format(estimated_deposit))
# multi data
rows = self._get_repeat_cnt(trcode, rqname)
for i in range(rows):
name = self._comm_get_data(trcode, "", rqname, i, "종목명")
quantity = self._comm_get_data(trcode, "", rqname, i, "보유수량")
purchase_price = self._comm_get_data(trcode, "", rqname, i, "매입가")
current_price = self._comm_get_data(trcode, "", rqname, i, "현재가")
eval_profit_loss_price = self._comm_get_data(trcode, "", rqname, i, "평가손익")
earning_rate = self._comm_get_data(trcode, "", rqname, i, "수익률(%)")
quantity = Kiwoom.change_format(quantity)
purchase_price = Kiwoom.change_format(purchase_price)
current_price = Kiwoom.change_format(current_price)
eval_profit_loss_price = Kiwoom.change_format(eval_profit_loss_price)
earning_rate = Kiwoom.change_format2(earning_rate)
self.opw00018_output['multi'].append([name, quantity, purchase_price, current_price, eval_profit_loss_price,
earning_rate])
if __name__ == "__main__":
app = QApplication(sys.argv)
kiwoom = Kiwoom()
kiwoom.comm_connect()
kiwoom.reset_opw00018_output()
account_number = kiwoom.get_login_info("ACCNO")
account_number = account_number.split(';')[0]
kiwoom.set_input_value("계좌번호", account_number)
kiwoom.comm_rq_data("opw00018_req", "opw00018", 0, "2000")
print(kiwoom.opw00018_output['single'])
print(kiwoom.opw00018_output['multi'])
|
py | b4071575c4fee02f0a1b6c26881a7e0ee19ff355 | import mindspore
import numpy as np
from mindspore import Tensor, nn, ops
def compute_flops(module, inp, out):
if isinstance(module, (nn.Conv2d, nn.Conv1d, nn.Conv3d)):
return compute_Conv2d_flops(module, inp, out)
elif isinstance(module, (nn.BatchNorm2d, nn.BatchNorm1d, nn.BatchNorm3d)):
return compute_BatchNorm2d_flops(module, inp, out)
elif isinstance(module, (nn.AvgPool2d, nn.MaxPool2d, nn.MaxPool1d, nn.AvgPool1d, mindspore.ops.AdaptiveAvgPool2D)):
return compute_Pool2d_flops(module, inp, out)
elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.Sigmoid)):
return compute_ReLU_flops(module, inp, out)
elif isinstance(module, nn.Dense):
return compute_Linear_flops(module, inp, out)
else:
print(f"[Flops]: {type(module).__name__} is not supported!")
return 0
pass
"""
consume the flops of conv
"""
def compute_Conv2d_flops(module: nn.Cell, inp: Tensor, out: Tensor):
assert isinstance(module, nn.Conv2d)
assert len(inp.shape) == 4 and len(inp.shape) == len(out.shape)
batch_size = inp.shape[0]
in_c = inp.shape[1]
k_h, k_w = module.kernel_size
out_c, out_h, out_w = out.shape[1:]
group = module.group
filters_per_channel = out_c // group
conv_per_position_flops = k_h * k_w * in_c * filters_per_channel
active_elements_count = batch_size * out_h * out_w
total_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if module.bias is not None:
bias_flops = out_c * active_elements_count
total_flops = total_conv_flops + bias_flops
return total_flops
def compute_BatchNorm2d_flops(module: nn.Cell, inp: Tensor, out: Tensor):
assert isinstance(module, nn.BatchNorm2d)
assert len(inp.shape) == 4 and len(inp.shape) == len(out.shape)
# in_c, in_h, in_w = inp.shape[1:]
batch_flops = np.prod(inp.shape)
if module.requires_grad:
batch_flops *= 2
return batch_flops
def compute_ReLU_flops(module: nn.Cell, inp: Tensor, out: Tensor):
assert isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU, ops.ReLU))
batch_size = inp.shape[0]
active_elements_count = batch_size
for s in inp.shape[1:]:
active_elements_count *= s
return active_elements_count
def compute_Pool2d_flops(module: nn.Cell, inp: Tensor, out: Tensor):
assert isinstance(module, nn.MaxPool2d) or isinstance(module, nn.AvgPool2d)
assert len(inp.shape) == 4 and len(inp.shape) == len(out.shape)
return np.prod(inp.shape)
def compute_Linear_flops(module, inp, out):
assert isinstance(module, nn.Dense)
assert len(inp.shape) == 2 and len(out.shape) == 2
batch_size = inp.shape[0]
return batch_size * inp.shape[1] * out.shape[1]
if __name__ == '__main__':
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, pad_mode="valid", has_bias=True):
return nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding,
has_bias=has_bias, pad_mode=pad_mode)
channel = 3
# net = conv(channel, 64, 11, stride=4, pad_mode="same", has_bias=True)
net = conv(64, 128, 5, pad_mode="same", has_bias=True)
input = np.ones([3, 244, 244]).reshape((1, 3, 244, 244))
input = Tensor(np.array(input), dtype=mindspore.int32)
print(compute_Conv2d_flops(net, input, input))
|
py | b407163a8cc108690c2b6dc2673fd03d3a2bf0ac | ################################################################################
# Example : perform conversion from tflearn checkpoint format to TensorFlow
# protocol buffer (.pb) binary format files (for import into other tools)
# Copyright (c) 2019 Toby Breckon, Durham University, UK
# License : https://github.com/tobybreckon/fire-detection-cnn/blob/master/LICENSE
# Acknowledgements: some portions - tensorflow tutorial examples and URL below
################################################################################
import glob,os
################################################################################
# import tensorflow api
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework.graph_util import convert_variables_to_constants
from tensorflow.tools.graph_transforms import TransformGraph
from tensorflow.python.tools import optimize_for_inference_lib
################################################################################
# import tflearn api
import tflearn
from tflearn.layers.core import *
from tflearn.layers.conv import *
from tflearn.layers.normalization import *
from tflearn.layers.estimator import regression
################################################################################
# convert a loaded model definition by loading a checkpoint from a given path
# retaining the network between the specified input and output layers
# outputs to pbfilename as a binary .pb protocol buffer format files
# e.g. for FireNet
# model = construct_firenet (224, 224, False)
# path = "models/FireNet/firenet"; # path to tflearn checkpoint including filestem
# input_layer_name = 'InputData/X' # input layer of network
# output_layer_name= 'FullyConnected_2/Softmax' # output layer of network
# pbfilename = "firenet.pb" # output pb format filename
def convert_to_pb(model, path, input_layer_name, output_layer_name, pbfilename, verbose=False):
model.load(path,weights_only=True)
print("[INFO] Loaded CNN network weights from " + path + " ...")
print("[INFO] Re-export model ...")
del tf.get_collection_ref(tf.GraphKeys.TRAIN_OPS)[:]
model.save("model-tmp.tfl")
# taken from: https://stackoverflow.com/questions/34343259/is-there-an-example-on-how-to-generate-protobuf-files-holding-trained-tensorflow
print("[INFO] Re-import model ...")
input_checkpoint = "model-tmp.tfl"
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', True)
sess = tf.Session();
saver.restore(sess, input_checkpoint)
# print out all layers to find name of output
if (verbose):
op = sess.graph.get_operations()
[print(m.values()) for m in op][1]
print("[INFO] Freeze model to " + pbfilename + " ...")
# freeze and removes nodes which are not related to feedforward prediction
minimal_graph = convert_variables_to_constants(sess, sess.graph_def, [output_layer_name])
graph_def = optimize_for_inference_lib.optimize_for_inference(minimal_graph, [input_layer_name], [output_layer_name], tf.float32.as_datatype_enum)
graph_def = TransformGraph(graph_def, [input_layer_name], [output_layer_name], ["sort_by_execution_order"])
with tf.gfile.GFile(pbfilename, 'wb') as f:
f.write(graph_def.SerializeToString())
# write model to logs dir so we can visualize it as:
# tensorboard --logdir="logs"
if (verbose):
writer = tf.summary.FileWriter('logs', graph_def)
writer.close()
# tidy up tmp files
for f in glob.glob("model-tmp.tfl*"):
os.remove(f)
os.remove('checkpoint')
################################################################################
|
py | b407166447245ae638d6f199bbbebea820b4d6e3 | src = Split('''
vfs_test.c
''')
component = aos_component('vfs_test', src)
component.add_comp_deps('kernel/vfs')
component.add_cflags('-Wall')
component.add_cflags('-Werror') |
py | b4071680b8a174359da5ce6a4d605b2e8ae641d3 | # -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from resources.lib.libraries import client
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://thefile.me/plugins/mediaplayer/site/_embed.php?u=%s' % url
result = client.request(url, mobile=True)
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
|
py | b40716c18d659e28dc8e0f45a65b6ec852387514 | import subprocess
proc = subprocess.Popen(["nslookup", "-q=aaaa", "reddit.com"], stdout=subprocess.PIPE, bufsize=1)
print("\nBELOW\n")
# print (str(proc.communicate())[2:-1]) |
py | b40716e3ccea22c864c2938507e8322b7bc633da | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 16 15:56:23 2021
@author: anwar
"""
import json
import os
from instance.Node import Node
from instance.Container import Container
from instance.Instance import Instance
from extract_data import GetAllData
def createInstance(Instance):
if (os.path.getsize(r"instanceExamples/data.json") == 0):
GetAllData()
f = open(r"instanceExamples/data.json")
data = json.load(f)
for i in data['nodes']:
n=Node(i['id'],i['cluster_id'],i['Manager Status'],i['name'],i['activated'],i['max_power_consumption'],i['Maxmem'])
Instance.nodes.append(n)
for i in data['containers']:
container=Container(i['id'],i['name'],i['image'],i['dependencies'],i['placements'],i['power_consumption'],i['average_power_consumption_per_minute'],i['priority'],i['cpu_usage'],i['mem_usage'])
Instance.containers.append(container)
Instance.currentState=data['currentState']
Instance.objectives=data['objectives']
Instance.get_valid_nodes()
Instance.getImages()
Instance.get_alldependencies()
return Instance
# instance=Instance()
# Instance=createInstance(instance)
# objs=[]
# for ob in Instance.objectives:
# for key,value in enumerate((ob)):
# print(ob[value])
# objs.append(value)
|
py | b407173a018f15b1b06f11bb6ca2b06549215e43 | # Generated by Django 3.1.2 on 2021-08-10 06:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('video_app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='video',
name='name',
),
]
|
py | b407174c152647bdd015972c368ffe1b0ac51796 |
# Brian Henson
# this file defines a wrapper object around the Adafruit PWM hat that is designed to be thread-safe.
# i don't know exactly how long the "set pwm" operation takes, its probably instant, but there should
# be zero downside and only upside to making it thread-safe, since it is shared hardware.
# debug mode switch that enables/disables actual hardware
PWM_WRAPPER_USE_HARDWARE = True
# debug mode switch that enables/disables print statements (will absolutely flood the logs tho, so dont)
PWM_WRAPPER_DEBUG_PRINTS = False
# TODO: add "logging" module?
import threading
if PWM_WRAPPER_USE_HARDWARE:
import Adafruit_PCA9685
# example: pwm_L = pwm_wrapper(PWM_ADDR_L, PWM_FREQ)
class Pwm_Wrapper(object):
def __init__(self, addr, freq):
self.address = addr
self.freq = freq
if PWM_WRAPPER_USE_HARDWARE:
self.pwm_adafruit = Adafruit_PCA9685.PCA9685(address=addr)
self.pwm_adafruit.set_pwm_freq(freq)
else:
self.pwm_adafruit = None
# the lock object: only one thread can "have" the lock at a time, others will wait till its free
self._lock = threading.Lock()
def set_pwm(self, channel, on, off):
# following info comes from the docs:
# channel: The channel that should be updated with the new values (0..15)
# on: The tick (between 0..4095) when the signal should transition from low to high
# off:The tick (between 0..4095) when the signal should transition from high to low
# we never have any use for changing the on-point, we only care about the duty cycle...
# TODO: eliminate the "on" arg?
# take the lock, set pwm, and release the lock
with self._lock:
if PWM_WRAPPER_DEBUG_PRINTS:
print("pwm=" + str(self.address) + " channel=" + str(channel) + " set to val=" + str(off))
if PWM_WRAPPER_USE_HARDWARE:
self.pwm_adafruit.set_pwm(channel, on, off)
pass
pass
pass
|
py | b40717b61f203753966eb81c38dda604537c45de | # Copyright MelisaDev 2022 - Present
# Full MIT License can be found in `LICENSE.txt` at the project root.
from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, Any
from ...utils.api_model import APIModelBase
from ...utils.types import APINullable, UNDEFINED
from ...utils.snowflake import Snowflake
from ...utils.timestamp import Timestamp
@dataclass(repr=False)
class ThreadMetadata(APIModelBase):
"""
Represents a Discord Thread Metadata object
Attributes
----------
archived: :class:`bool`
Whether the thread is archived
auto_archive_duration: :class:`int`
Duration in minutes to automatically archive the thread after recent activity,
can be set to: 60, 1440, 4320, 10080
archive_timestamp: :class:`~melisa.utils.timestamp.Timestamp`
Timestamp when the thread's archive status was last changed,
used for calculating recent activity
locked: :class:`bool`
Whether the thread is locked; when a thread is locked,
only users with ``MANAGE_THREADS`` can unarchive it
invitable: Optional[:class:`bool`]
Whether non-moderators can add other non-moderators to a thread;
only available on private threads
create_timestamp: Optional[:class:`~melisa.utils.timestamp.Timestamp`]
Timestamp when the thread was created; only populated for threads created after 2022-01-09
"""
archived: bool
auto_archive_duration: int
archive_timestamp: Timestamp
locked: bool
invitable: APINullable[bool] = None
create_timestamp: APINullable[Timestamp] = None
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Generate a thread metadata object from the given data.
Parameters
----------
data: :class:`dict`
The dictionary to convert into thread metadata.
"""
self: ThreadMetadata = super().__new__(cls)
self.archived = data["archived"]
self.auto_archive_duration = data["auto_archive_duration"]
self.archive_timestamp = Timestamp.parse(data["archive_timestamp"])
self.locked = data["locked"]
self.invitable = data.get("invitable", None)
if data.get("create_timestamp"):
self.create_timestamp = Timestamp.parse(data["create_timestamp"])
else:
self.create_timestamp = None
return self
@dataclass(repr=False)
class ThreadMember(APIModelBase):
"""Represents a Discord Thread Member object
Attributes
----------
id: Optional[:class:`~melisa.utils.snowflake.Snowflake`]
The id of the thread
user_id: Optional[:class:`~melisa.utils.snowflake.Snowflake`]
The id of the user
join_timestamp: :class:`~melisa.utils.timestamp.Timestamp`
The time the current user last joined the thread
flags: :class:`int`
Any user-thread settings, currently only used for notifications
"""
join_timestamp: Timestamp
flags: int
id: APINullable[Snowflake] = None
user_id: APINullable[Snowflake] = None
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Generate a thread member object from the given data.
Parameters
----------
data: :class:`dict`
The dictionary to convert into thread member.
"""
self: ThreadMember = super().__new__(cls)
self.archived = data["flags"]
self.archive_timestamp = Timestamp.parse(data["join_timestamp"])
self.id = Snowflake(data["id"]) if data.get("id") is not None else None
self.user_id = (
Snowflake(data["user_id"]) if data.get("user_id") is not None else None
)
return self
|
py | b40717edd250bb38d4c23a2888247c94d4c1fdfa | import sys
import numpy as np
import pytest
import theano
import theano.tensor
from tests import unittest_tools as utt
from theano import config, gof
from theano.compile import debugmode
def test_debugmode_basic():
x = theano.tensor.dvector()
f = theano.function([x], ((2.0 * x) + 7) / 2.0, mode=debugmode.DebugMode())
f([1, 2])
class BROKEN_ON_PURPOSE_Add(gof.Op):
__props__ = ("py_offset",)
def __init__(self, py_offset):
gof.Op.__init__(self)
self.py_offset = py_offset
def make_node(self, a, b):
a = theano.tensor.as_tensor_variable(a)
b = theano.tensor.as_tensor_variable(b)
assert a.type.dtype == "float64"
assert a.type.dtype == b.type.dtype
assert a.type.ndim == 1
r = gof.Apply(self, [a, b], [a.type()])
return r
def perform(self, node, inp, out_):
a, b = inp
(out,) = out_
z = a + b
# ERROR TO ADD THIS CRAPPY OFFSET
if self.py_offset:
out[0] = z + 0.5
else:
out[0] = z
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, inp, out, sub):
a, b = inp
(z,) = out
return """
if (PyArray_NDIM(%(a)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a) != 1"); %(fail)s;}
if (PyArray_NDIM(%(b)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 1"); %(fail)s;}
if (PyArray_DESCR(%(a)s)->type_num != NPY_DOUBLE)
{PyErr_SetString(PyExc_NotImplementedError, "a dtype not NPY_DOUBLE"); %(fail)s;}
if (PyArray_DESCR(%(b)s)->type_num != NPY_DOUBLE)
{PyErr_SetString(PyExc_NotImplementedError, "b's dtype not NPY_DOUBLE"); %(fail)s;}
if (PyArray_DIMS(%(a)s)[0] != PyArray_DIMS(%(b)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "a and b have different lengths"); %(fail)s;}
if ((!%(z)s)
|| (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(b)s)[0])
)
{
{Py_XDECREF(%(z)s);}
npy_intp dims[] = {0};
dims[0] = PyArray_DIMS(%(b)s)[0];
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, dims, PyArray_DESCR(%(b)s)->type_num);
}
{
for (npy_intp m = 0; m < PyArray_DIMS(%(z)s)[0]; ++m)
{
((double*)PyArray_GETPTR1(%(z)s, m))[0]
= 0.5
+ ((double*)PyArray_GETPTR1(%(a)s, m))[0]
+ ((double*)PyArray_GETPTR1(%(b)s, m))[0] ;
}
}
""" % dict(
locals(), **sub
)
# inconsistent is a invalid op, whose perform and c_code do not match
inconsistent = BROKEN_ON_PURPOSE_Add(False)
# off_by_half is a good op, that is different from theano.sparse.sd_csc
off_by_half = BROKEN_ON_PURPOSE_Add(True)
class WeirdBrokenOp(gof.Op):
"""
This op can be inplace if behaviour is 'times1_inplace'
This op can be destructive if behaviour is 'times2_inplace'
In both cases, it does not set the destroy_map or view_map correctly so
it should raise an error in DebugMode.
"""
__props__ = ("behaviour",)
def __init__(self, behaviour):
gof.Op.__init__(self)
self.behaviour = behaviour
def make_node(self, a):
a_ = theano.tensor.as_tensor_variable(a)
r = gof.Apply(self, [a_], [a_.type()])
return r
def dontuse_perform(self, node, inp, out_):
(a,) = inp
(out,) = out_
if self.behaviour == "times2":
out[0] = a * 2
elif self.behaviour == "times2_inplace":
out[0] = a
out[0] *= 2
elif self.behaviour == "times1":
out[0] = a * 1
elif self.behaviour == "times1_inplace":
out[0] = a
else:
raise ValueError(self.behaviour)
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, inp, out, sub):
(a,) = inp
(z,) = out
if "inplace" in self.behaviour:
z_code = """
{Py_XDECREF(%(z)s);}
Py_INCREF(%(a)s);
%(z)s = %(a)s;
"""
else:
z_code = """
{Py_XDECREF(%(z)s);}
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, PyArray_DIMS(%(a)s), PyArray_DESCR(%(a)s)->type_num);
"""
prep_vars = """
//the output array has size M x N
npy_intp M = PyArray_DIMS(%(a)s)[0];
npy_intp Sa = PyArray_STRIDES(%(a)s)[0] / PyArray_DESCR(%(a)s)->elsize;
npy_intp Sz = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;
npy_double * Da = (npy_double*)PyArray_BYTES(%(a)s);
npy_double * Dz = (npy_double*)PyArray_BYTES(%(z)s);
//clear the output array
for (npy_intp m = 0; m < M; ++m)
{
"""
if self.behaviour == "times2":
behaviour = " Dz[m * Sz] = 2 * Da[m * Sa]; "
# out[0] = a * 2
elif self.behaviour == "times2_inplace":
# out[0] = a
# out[0] *= 2
behaviour = " Dz[m * Sz] = 2 * Da[m * Sa]; "
elif self.behaviour == "times1":
# out[0] = a * 1
behaviour = " Dz[m * Sz] = Da[m * Sa]; "
elif self.behaviour == "times1_inplace":
# out[0] = a
behaviour = ""
else:
raise ValueError(self.behaviour)
prep_vars2 = """
}
"""
total = (z_code + prep_vars + behaviour + prep_vars2) % dict(locals(), **sub)
return total
wb2i = WeirdBrokenOp("times2_inplace")
wb2 = WeirdBrokenOp("times2")
wb1i = WeirdBrokenOp("times1_inplace")
wb1 = WeirdBrokenOp("times1")
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_badthunkoutput():
# Check if the c and python code is consistent.
a = theano.tensor.dvector()
b = theano.tensor.dvector()
f_good = theano.function(
[a, b],
off_by_half(a, b),
mode=debugmode.DebugMode(check_c_code=theano.config.cxx),
)
f_inconsistent = theano.function(
[a, b],
inconsistent(a, b),
mode=debugmode.DebugMode(check_c_code=theano.config.cxx),
)
# this should evaluate with no error
f_good([1.0, 2.0, 3.0], [2, 3, 4])
with pytest.raises(debugmode.BadThunkOutput) as einfo:
f_inconsistent([1.0, 2.0, 3.0], [2, 3, 4])
assert einfo.value.r.owner.op is inconsistent
def test_badoptimization():
@gof.local_optimizer([theano.tensor.add])
def insert_broken_add(node):
if node.op == theano.tensor.add:
return [off_by_half(*node.inputs)]
return False
edb = gof.EquilibriumDB()
edb.register("insert_broken_add", insert_broken_add, "all")
opt = edb.query("+all")
a = theano.tensor.dvector()
b = theano.tensor.dvector()
f = theano.function([a, b], a + b, mode=debugmode.DebugMode(optimizer=opt))
with pytest.raises(debugmode.BadOptimization) as einfo:
f(
[1.0, 2.0, 3.0],
[2, 3, 4],
)
assert str(einfo.value.reason) == "insert_broken_add"
def test_badoptimization_opt_err():
# This variant of test_badoptimization() replace the working code
# with a new apply node that will raise an error.
@gof.local_optimizer([theano.tensor.add])
def insert_bigger_b_add(node):
if node.op == theano.tensor.add:
inputs = list(node.inputs)
if inputs[-1].owner is None:
inputs[-1] = theano.tensor.concatenate((inputs[-1], inputs[-1]))
return [node.op(*inputs)]
return False
@gof.local_optimizer([theano.tensor.add])
def insert_bad_dtype(node):
if node.op == theano.tensor.add:
inputs = list(node.inputs)
if inputs[-1].owner is None:
return [node.outputs[0].astype("float32")]
return False
edb = gof.EquilibriumDB()
edb.register("insert_bigger_b_add", insert_bigger_b_add, "all")
opt = edb.query("+all")
edb2 = gof.EquilibriumDB()
edb2.register("insert_bad_dtype", insert_bad_dtype, "all")
opt2 = edb2.query("+all")
a = theano.tensor.dvector()
b = theano.tensor.dvector()
f = theano.function([a, b], a + b, mode=debugmode.DebugMode(optimizer=opt))
with pytest.raises(ValueError, match=r"insert_bigger_b_add"):
f(
[1.0, 2.0, 3.0],
[2, 3, 4],
)
# Test that opt that do an illegal change still get the error from gof.
with pytest.raises(
theano.gof.toolbox.BadOptimization, match=r"insert_bad_dtype"
) as einfo:
with theano.change_flags(on_opt_error="raise"):
f2 = theano.function(
[a, b],
a + b,
mode=debugmode.DebugMode(optimizer=opt2, stability_patience=1),
)
f2(
[1.0, 2.0, 3.0],
[2, 3, 4],
)
# Test that we can reraise the error with an extended message
with pytest.raises(theano.gof.toolbox.BadOptimization):
e = einfo.value
new_e = e.__class__("TTT" + str(e))
exc_type, exc_value, exc_trace = sys.exc_info()
exc_value = new_e
raise exc_value.with_traceback(exc_trace)
def test_stochasticoptimization():
# this optimization alternates between triggering and not triggering.
last_time_replaced = [False]
@gof.local_optimizer([theano.tensor.add])
def insert_broken_add_sometimes(node):
if node.op == theano.tensor.add:
last_time_replaced[0] = not last_time_replaced[0]
if last_time_replaced[0]:
return [off_by_half(*node.inputs)]
return False
edb = gof.EquilibriumDB()
edb.register("insert_broken_add_sometimes", insert_broken_add_sometimes, "all")
opt = edb.query("+all")
a = theano.tensor.dvector()
b = theano.tensor.dvector()
with pytest.raises(debugmode.StochasticOrder):
theano.function(
[a, b],
theano.tensor.add(a, b),
mode=debugmode.DebugMode(
optimizer=opt,
check_c_code=True,
stability_patience=max(2, config.DebugMode.patience),
),
)
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_just_c_code():
x = theano.tensor.dvector()
f = theano.function([x], wb2(x), mode=debugmode.DebugMode(check_py_code=False))
assert np.all(f([1, 2]) == [2, 4])
def test_baddestroymap():
class BadAdd(gof.Op):
def make_node(self, a, b):
c = a.type()
return gof.Apply(self, [a, b], [c])
def perform(self, node, inp, out):
a, b = inp
(c,) = out
c[0] = a
c[0] += b
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], BadAdd()(x, y), mode="DEBUG_MODE")
with pytest.raises(debugmode.BadDestroyMap):
f([1, 2], [3, 4])
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_baddestroymap_c():
x = theano.tensor.dvector()
f = theano.function([x], wb2i(x), mode=debugmode.DebugMode(check_py_code=False))
with pytest.raises(debugmode.BadDestroyMap):
assert np.all(f([1, 2]) == [2, 4])
class TestViewMap:
class BadAddRef(gof.Op):
def make_node(self, a, b):
c = b.type()
return gof.Apply(self, [a, b], [c])
def perform(self, node, inp, out):
a, b = inp
(c,) = out
c[0] = b
class BadAddSlice(gof.Op):
def make_node(self, a, b):
c = b.type()
return gof.Apply(self, [a, b], [c])
def perform(self, node, inp, out):
a, b = inp
(c,) = out
c[0] = b[1:3]
def test_badviewmap_ref(self):
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], self.BadAddRef()(x, y), mode="DEBUG_MODE")
with pytest.raises(debugmode.BadViewMap):
f([1, 2], [3, 4])
def test_badviewmap_slice(self):
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], self.BadAddSlice()(x, y), mode="DEBUG_MODE")
with pytest.raises(debugmode.BadViewMap):
f([1, 2], [3, 4])
def test_goodviewmap(self):
goodop = self.BadAddRef()
goodop.view_map = {0: [1]}
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], goodop(x, y), mode="DEBUG_MODE")
# Shouldn't raise an error
f([1, 5, 1], [3, 4, 2, 1, 4])
@pytest.mark.skipif(
not theano.config.cxx, reason="G++ not available, so we need to skip this test."
)
def test_badviewmap_c(self):
x = theano.tensor.dvector()
f = theano.function([x], wb1i(x), mode=debugmode.DebugMode(check_py_code=False))
with pytest.raises(debugmode.BadViewMap):
f([1, 2])
def test_aliased_outputs_ok(self):
# here aliased outputs is ok because they are both aliased to an input
# as well
class CustomOp(gof.Op):
view_map = {0: [0], 1: [0]}
def make_node(self, a, b):
c = a.type()
d = a.type()
return gof.Apply(self, [a, b], [c, d])
def perform(self, node, inp, out):
a, b = inp
c, d = out
c[0] = a
d[0] = a[1:]
x = theano.tensor.dvector("x")
y = theano.tensor.dvector("y")
f = theano.function([x, y], CustomOp()(x, y), mode="DEBUG_MODE")
r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])
assert np.all(r0 == [1, 2, 3, 4])
assert np.all(r1 == [2, 3, 4])
def test_aliased_outputs_ok_output(self):
# here aliased outputs is ok because they are both outputs of the
# function as a whole and thus not destroy-able
class CustomOp(gof.Op):
def make_node(self, a, b):
c = a.type()
d = a.type()
return gof.Apply(self, [a, b], [c, d])
def perform(self, node, inp, out):
a, b = inp
c, d = out
r = a * 2
c[0] = r
d[0] = r[1:]
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], CustomOp()(x, y), mode="DEBUG_MODE")
r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])
assert np.all(r0 == [2, 4, 6, 8])
assert np.all(r1 == [4, 6, 8])
def test_aliased_outputs_ok_shadow(self):
# here the alias between outputs is ok because one of them is not used
# for subsequent computation. This is like the case where we use one
# output as a memory buffer to serve another output.
class CustomOp(gof.Op):
def make_node(self, a, b):
c = a.type()
d = a.type()
return gof.Apply(self, [a, b], [c, d])
def perform(self, node, inp, out):
a, b = inp
c, d = out
r = a * 1
c[0] = r
d[0] = r[1:]
x = theano.tensor.dvector("x")
y = theano.tensor.dvector("y")
f = theano.function([x, y], CustomOp()(x, y)[0] * 2, mode="DEBUG_MODE")
r0 = f([1, 2, 3, 4], [5, 6, 7, 8])
assert np.all(r0 == [2, 4, 6, 8])
def test_aliased_outputs_bad(self):
# here the alias between outputs is not ok because destroying one
# destroys the other, but there's no way to warn theano about it
# through the view_map mechanism.
class CustomOp(gof.Op):
def make_node(self, a, b):
c = a.type()
d = a.type()
return gof.Apply(self, [a, b], [c, d])
def perform(self, node, inp, out):
a, b = inp
c, d = out
r = a * 1
c[0] = r[:-1]
d[0] = r[1:]
custom_op = CustomOp()
x = theano.tensor.dvector()
y = theano.tensor.dvector()
bad_xy0, bad_xy1 = custom_op(x, y)
out = bad_xy0 * 2 + bad_xy1 * 2
f = theano.function([x, y], out, mode="DEBUG_MODE")
with pytest.raises(debugmode.BadViewMap):
f([1, 2, 3, 4], [5, 6, 7, 8])
# the situation can be rescued by picking one of the inputs and
# pretending that it is aliased to both the outputs.
# This unfairly disables any destructive operations on the
# input, but guarantees correctness.
# custom_op.view_map = {0:[0], 1:[1]}
# f([1,2,3,4],[5,6,7,8])
class TestCheckIsfinite:
def setup_method(self):
self.old_ts = theano.tensor.TensorType.filter_checks_isfinite
self.old_dm = theano.compile.mode.predefined_modes["DEBUG_MODE"].check_isfinite
def teardown_method(self):
theano.tensor.TensorType.filter_checks_isfinite = self.old_ts
theano.compile.mode.predefined_modes["DEBUG_MODE"].check_isfinite = self.old_dm
def test_check_isfinite(self):
x = theano.tensor.vector()
f = theano.function([x], (x + 2) * 5, mode="DEBUG_MODE")
g = theano.function([x], theano.tensor.log(x), mode="DEBUG_MODE")
# this should work
f(np.log([3, 4, 5]).astype(config.floatX))
# if TensorType.filter_checks_isfinite were true, these would raise
# ValueError
# if not, DebugMode will check internally, and raise InvalidValueError
# passing an invalid value as an input should trigger ValueError
with pytest.raises(debugmode.InvalidValueError):
f(np.log([3, -4, 5]).astype(config.floatX))
with pytest.raises(debugmode.InvalidValueError):
f((np.asarray([0, 1.0, 0]) / 0).astype(config.floatX))
with pytest.raises(debugmode.InvalidValueError):
f((np.asarray([1.0, 1.0, 1.0]) / 0).astype(config.floatX))
# generating an invalid value internally should trigger
# InvalidValueError
with pytest.raises(debugmode.InvalidValueError):
g(np.asarray([3, -4, 5], dtype=config.floatX))
# this should disable the exception
theano.tensor.TensorType.filter_checks_isfinite = False
theano.compile.mode.predefined_modes["DEBUG_MODE"].check_isfinite = False
# insert several Inf
f(np.asarray(np.asarray([1.0, 1.0, 1.0]) / 0, dtype=config.floatX))
def test_check_isfinite_disabled(self):
x = theano.tensor.dvector()
f = theano.function(
[x], (x + 2) * 5, mode=debugmode.DebugMode(check_isfinite=False)
)
# nan should go through
f(np.log([3, -4, 5]))
# inf should go through
infs = np.asarray([1.0, 1.0, 1.0]) / 0
# print infs
f(infs)
return
class BrokenCImplementationAdd(gof.Op):
__props__ = ()
def make_node(self, a, b):
a = theano.tensor.as_tensor_variable(a)
b = theano.tensor.as_tensor_variable(b)
assert a.type.dtype == "float32"
assert a.type.dtype == b.type.dtype
assert a.type.ndim == 2
r = gof.Apply(self, [a, b], [a.type()])
return r
def perform(self, node, inp, out_):
# print 'executing python perform'
a, b = inp
(out,) = out_
z = a + b
# print 'out[0] was:', out[0]
out[0] = z
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, inp, out, sub):
a, b = inp
(z,) = out
debug = 0
return """
//printf("executing c_code\\n");
if (PyArray_NDIM(%(a)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(a) != 2"); %(fail)s;}
if (PyArray_NDIM(%(b)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 2"); %(fail)s;}
if (PyArray_DESCR(%(a)s)->type_num != NPY_FLOAT)
{PyErr_SetString(PyExc_NotImplementedError, "a dtype not NPY_FLOAT"); %(fail)s;}
if (PyArray_DESCR(%(b)s)->type_num != NPY_FLOAT)
{PyErr_SetString(PyExc_NotImplementedError, "b's dtype not NPY_FLOAT"); %(fail)s;}
if (PyArray_DIMS(%(a)s)[0] != PyArray_DIMS(%(a)s)[1])
{PyErr_SetString(PyExc_NotImplementedError, "a is not square"); %(fail)s;}
if (PyArray_DIMS(%(b)s)[0] != PyArray_DIMS(%(b)s)[1])
{PyErr_SetString(PyExc_NotImplementedError, "b is not square"); %(fail)s;}
if (PyArray_DIMS(%(a)s)[0] != PyArray_DIMS(%(b)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "a and b have different dimensions"); %(fail)s;}
// We do not check for c_contiguous property here
if (%(debug)s)
{
if (!%(z)s)
printf("%(z)s is not there, %%p \\n", %(z)s);
else if (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(b)s)[0])
printf("Dimension 0 mismatch for %(z)s and %(b)s\\n");
else if (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1])
printf("Dimension 1 mismatch for %(z)s and %(b)s\\n");
else
printf("Reusing %(z)s\\n");
}
if ((!%(z)s)
|| (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(b)s)[0])
|| (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1])
)
{
Py_XDECREF(%(z)s);
npy_intp dims[] = {0, 0};
dims[0] = PyArray_DIMS(%(b)s)[0];
dims[1] = PyArray_DIMS(%(b)s)[1];
%(z)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, PyArray_DESCR(%(b)s)->type_num);
}
// Let us assume that %(z)s is c_contiguous
{
dtype_%(z)s * z = ((dtype_%(z)s*)(PyArray_GETPTR2(%(z)s,0,0)));
for (int i=0; i<PyArray_DIMS(%(b)s)[0]; i++)
{
for (int j=0; j<PyArray_DIMS(%(b)s)[1]; j++)
{
*z = ((float*)PyArray_GETPTR2(%(a)s, i, j))[0] +
((float*)PyArray_GETPTR2(%(b)s, i, j))[0] ;
z++;
}
}
}
""" % dict(
locals(), **sub
)
class VecAsRowAndCol(gof.Op):
"""
Transforms a vector into a row and a column.
This Op exists to check everything is correct when an Op has
two outputs with different broadcasting patterns.
"""
__props__ = ()
def make_node(self, v):
if not isinstance(v, gof.Variable):
v = theano.tensor.as_tensor_variable(v)
assert v.type.ndim == 1
type_class = type(v.type)
out_r_type = type_class(dtype=v.dtype, broadcastable=(True, False))
out_c_type = type_class(dtype=v.dtype, broadcastable=(False, True))
return gof.Apply(self, [v], [out_r_type(), out_c_type()])
def perform(self, node, inp, out):
(v,) = inp
r, c = out
lv = v.shape[0]
if (r[0] is None) or (r[0].shape != (1, lv)):
r[0] = node.outputs[0].type.value_zeros((1, lv))
if (c[0] is None) or (c[0].shape != (lv, 1)):
c[0] = node.outputs[1].type.value_zeros((lv, 1))
for i in range(lv):
r[0][0, i] = v[i]
c[0][i, 0] = v[i]
class TestPreallocatedOutput:
def setup_method(self):
self.rng = np.random.RandomState(seed=utt.fetch_seed())
def test_f_contiguous(self):
a = theano.tensor.fmatrix("a")
b = theano.tensor.fmatrix("b")
z = BrokenCImplementationAdd()(a, b)
# In this test, we do not want z to be an output of the graph.
out = theano.tensor.dot(z, np.eye(7))
a_val = self.rng.randn(7, 7).astype("float32")
b_val = self.rng.randn(7, 7).astype("float32")
# Should work
mode = debugmode.DebugMode(check_preallocated_output=["c_contiguous"])
f = theano.function([a, b], out, mode=mode)
f(a_val, b_val)
# print 'out_val =', out_val
# print out_val.strides
# Should raise an Exception, since the output buffer is
# used incorrectly.
mode = debugmode.DebugMode(check_preallocated_output=["f_contiguous"])
f = theano.function([a, b], out, mode=mode)
if theano.config.cxx:
with pytest.raises(debugmode.BadThunkOutput):
f(a_val, b_val)
else:
# The python code of this op is good.
f(a_val, b_val)
def test_f_contiguous_out(self):
# Same test as test_f_contiguous, but check that it works
# even if z _is_ the output of the graph
a = theano.tensor.fmatrix("a")
b = theano.tensor.fmatrix("b")
out = BrokenCImplementationAdd()(a, b)
a_val = self.rng.randn(7, 7).astype("float32")
b_val = self.rng.randn(7, 7).astype("float32")
# Should work
mode = debugmode.DebugMode(check_preallocated_output=["c_contiguous"])
f = theano.function([a, b], out, mode=mode)
f(a_val, b_val)
# print 'out_val =', out_val
# print out_val.strides
# Should raise an Exception, since the output buffer is
# used incorrectly.
mode = debugmode.DebugMode(check_preallocated_output=["f_contiguous"])
f = theano.function([a, b], out, mode=mode)
if theano.config.cxx:
with pytest.raises(debugmode.BadThunkOutput):
f(a_val, b_val)
else:
# The python code of this op is good.
f(a_val, b_val)
def test_output_broadcast_tensor(self):
v = theano.tensor.fvector("v")
c, r = VecAsRowAndCol()(v)
f = theano.function([v], [c, r])
v_val = self.rng.randn(5).astype("float32")
f(v_val)
|
py | b4071b19aac8f3f393016bf187e2038a3616ef3c | from django.contrib import admin
from .models import Company, User
admin.site.register(Company)
admin.site.register(User) |
py | b4071d2d18e049d2ae95c42a38d3a813c375996c | """
Filter serve para filtrar dados de uma determinada coleção
"""
from statistics import mean
dados = [1.3, 2.7, 0.8, 4.1, 4.3, -0.1]
media = mean(dados)
print(media)
# Obs: Assim como a função map(), filter() recebe dois parâmetros
# sendo eles uma função e um iterável
res = filter(lambda x: x > media, dados)
print(list(res))
print(type(res))
print()
# Obs: Assim como o map(), os dados aqui também são deletados após o uso
# Exemplo 2
paises = ['', 'Argentina', '', 'Brasil', 'Chile', '', 'Colombia', '', 'Equador', '', '', 'Venezuela']
# paises = filter(lambda p: p != '', paises)
paises = filter(None, paises)
print(list(paises))
# Exemplo 3
usuarios = [{"username": "samuel", "tweets": ["Eu adoro bolos", "Eu adoro pizza"]},
{"username": "carla", "tweets": ["Eu amo meu gato"]},
{"username": "jeff", "tweets": []},
{'username': 'bob123', 'tweets': []},
{'username': 'doggo', 'tweets': ['Eu gosto de cachorros', 'Vou sair hoje']},
{'username': 'gal', 'tweets': []}]
# Filtrar os usuários que estão inativos no Twitter
activeUsers = filter(lambda user: user['tweets'] != [], usuarios)
inactiveUsers = filter(lambda user: not user['tweets'], usuarios)
print(list(activeUsers))
print(list(inactiveUsers))
print()
# Exemplo 4: Combinar filter() e map()
nomes = ['Vanessa', 'Ana', 'Maria']
# Criar uma lista contendo 'Sua instrutora é' + nome, sendo que o nome não pode ter mais de 5 letras
lista = list(map(lambda nome: f'Sua instrutora é {nome}', filter(lambda nome: len(nome) < 5, nomes)))
print(lista)
|
py | b4071d8af50f543461c15654605fd50b3ef5acec | import pprint
from gw_bot.api.slack.API_Slack_Attachment import API_Slack_Attachment
from osbot_aws.helpers.Lambda_Helpers import slack_message, log_to_elk
from osbot_aws.apis.Lambda import Lambda
from osbot_jira.api.API_Issues import API_Issues
from osbot_jira.api.elk.Elk_To_Slack import ELK_to_Slack
from osbot_jira.api.graph.Lambda_Graph import Lambda_Graph
from osbot_jira.api.slack.views.Jira_Slack_Actions import Jira_Slack_Actions
from osbot_jira.api.slack.views.Jira_View_Issue import Jira_View_Issue
from osbot_utils.utils import Misc
from osbot_utils.utils.Misc import array_get, to_int
class GS_Bot_Jira:
def __init__(self):
self.version = "v0.44 (GSBot)"
def cmd_add_link(self, params, team_id=None, channel=None):
if len(params) < 4:
text = ":exclamation: Hi, to add a link, You must provide 3 params: `{from ID}` `{to ID}` `{link type}`"
return {"text": text, "attachments": []}
else:
params.pop(0) # position 0 is the 'issue' command
from_key = params.pop(0)
to_key = params.pop(0)
link_type = " ".join(params)
try:
from osbot_jira.api.jira_server.API_Jira import API_Jira
API_Jira().issue_add_link(from_key, link_type, to_key)
text = ':point_right: Added link: *{0}* `{1}` *{2}*'.format(from_key,link_type,to_key)
except Exception as error:
text = ':red_circle: Error in `add_link`: {0}'.format(error)
return {"text": text, "attachments": []}
def cmd_actions(self, params, team_id=None, channel=None):
text, attachments = Jira_Slack_Actions().get_actions_ui()
return {"text": text, "attachments": attachments}
def cmd_create(self, params, team_id=None, channel=None):
try:
if len(params) < 3:
text = ":exclamation: To create an issue you need to provide the `issue type` and `summary`. For example `jira create task abc"
return {"text": text, "attachments": []}
else:
params.pop(0) # the create command
issue_type = params.pop(0) #.title() # todo: find a better solution for this
project = issue_type.upper() # todo: and to address the mapping of issue types to projects
summary = ' '.join(params)
slack_message(':point_right: Going to create an `{0}` issue, in project `{1}` with summary `{2}`'.format(issue_type, project,summary), [], channel,team_id)
#to do, move this feature to a separate lambda (which can be called to create issues
from osbot_aws.Dependencies import load_dependency
load_dependency('jira')
from osbot_jira.api.jira_server.API_Jira import API_Jira
# create issue
result = API_Jira().issue_create(project,summary,'',issue_type)
issue_id = "{0}".format(result)
# show issue screenshot
# payload = {'issue_id': issue_id,'channel': channel,'team_id': team_id, }
# Lambda('osbot_browser.lambdas.jira_web').invoke_async(payload)
# show issue UI
payload = {'params': ['issue', issue_id], "channel": channel}
Lambda('osbot_jira.lambdas.jira').invoke_async(payload)
# show link of new issue to user
jira_link = "https://glasswall.atlassian.net/browse/{0}".format(issue_id)
text = ':point_right: New issue created with id <{0}|{1}>'.format(jira_link, issue_id)
return {"text": text, "attachments": []}
except Exception as error:
log_to_elk('jira create error',f'{error}')
return {'text': f':red_circle: Issue could not be created, please make sure that: \n - issue type exists\n - issue type = project type\n - Issue type CamelCase is correctly entered (you want `Task` and not `task`)', "attachments": []}
def cmd_created_in_last(self, params, team_id=None, channel=None):
elk_to_slack = ELK_to_Slack()
if len(params) < 2:
text = ":exclamation: you must provide an start date. You can use `1d`,`1w`,`1y` (d=day, w=week, y=year"
return {"text": text, "attachments": []}
from_date = params.pop()
issues = API_Issues().issues_created_in_last(from_date)
issues_text = elk_to_slack.get_text_with_issues_key_and_summary(issues)
graph_name = elk_to_slack.save_issues_as_new_graph(issues)
text = elk_to_slack.get_slack_message(issues, graph_name)
max_table = 100
if len(issues) < max_table:
text += "\n (Since there are less than {0} results also showing table with data)".format(max_table)
self.cmd_table(["table", graph_name], team_id, channel)
return {"text": text, "attachments": [{ 'text': issues_text , 'color':'good'}]}
def cmd_created_between(self, params, team_id=None, channel=None):
elk_to_slack = ELK_to_Slack()
if len(params) < 3:
text = ":exclamation: you must provide an start and end date. You can use `1d`,`1w`,`1y` (d=day, w=week, y=year"
text += "\nTry `now-1d` and `now`"
return {"text": text, "attachments": []}
to_date = params.pop()
from_date = params.pop()
try:
issues = API_Issues().elastic().get_data_between_dates("Created",from_date, to_date)
issues_text = elk_to_slack.get_text_with_issues_key_and_summary(issues)
graph_name = elk_to_slack.save_issues_as_new_graph(issues)
text = elk_to_slack.get_slack_message(issues, graph_name)
min_table = 100
max_table = 100
if min_table < len(issues) < max_table:
text += "\n (Since there are less than {0} (and more than {1}) results also showing table with data)".format(max_table,min_table)
self.cmd_table(["table", graph_name], team_id, channel)
return {"text": text, "attachments": [{ 'text': issues_text , 'color':'good'}]}
except Exception as error:
text ="Error in cmd_created_between: {0}".format(error)
return {"text": text, "attachments": []}
def cmd_updated_in_last(self, params, team_id=None, channel=None): # refactor with cmd_created_in_last since 99% of the code is the same
elk_to_slack = ELK_to_Slack()
if len(params) < 2:
text = ":exclamation: you must provide an start date. You can use `1d`,`1w`,`1y` (d=day, w=week, y=year"
return {"text": text, "attachments": []}
from_date = params.pop()
issues = API_Issues().issues_updated_in_last(from_date)
issues_text = elk_to_slack.get_text_with_issues_key_and_summary(issues)
graph_name = elk_to_slack.save_issues_as_new_graph(issues)
text = elk_to_slack.get_slack_message(issues, graph_name)
max_table = 100
if len(issues) < max_table:
text += "\n (Since there are less than {0} results also showing table with data)".format(max_table)
self.cmd_table(["table", graph_name], team_id, channel)
return {"text": text, "attachments": [{ 'text': issues_text , 'color':'good'}]}
def cmd_issue(self, params, team_id=None, channel=None):
if len(params) < 2:
text = ":exclamation: You must provide an issue id"
return {"text": text, "attachments": []}
else:
issue_id = params.pop(1) # position 0 is the 'issue' command
jira_view_issue = Jira_View_Issue(issue_id,channel, team_id)
jira_view_issue.create_and_send()
if channel is None:
return jira_view_issue.issue
#text, attachments = Jira_View_Issue(issue_id).get_actions_ui()
#return {"text": text, "attachments": attachments}
def cmd_screenshot(self, params, team_id=None, channel=None):
attachments = []
if len(params) < 2:
text = ":exclamation: you must provide an issue id "
else:
params.pop(0) # remove 'issue' command
issue_id = params.pop(0).upper()
width = to_int(Misc.array_pop(params), None)
height = to_int(Misc.array_pop(params), None)
delay = to_int(Misc.array_pop(params), None)
text = ':point_right: Getting screenshot for issue `{0}`'.format(issue_id)
if width:
text += ' with width `{0}`'.format(width)
if height:
text += ' and height `{0}`'.format(height)
if delay:
text += ' and delay `{0}`'.format(delay)
payload = {
'issue_id': issue_id,
'channel' : channel ,
'team_id' : team_id ,
'width' : width ,
'height' : height ,
'delay' : delay
}
Lambda('osbot_browser.lambdas.jira_web').invoke_async(payload)
return {"text": text, "attachments": attachments}
def cmd_links(self, params, team_id=None, channel=None, user=None, only_create=False,save_graph=True):
if len(params) < 2:
text = ':point_right: Hi, here are the valid parameters for the `jira links` command: ' \
'\n\t\t - `jira key` ' \
'\n\t\t - `depth` (default to 1)' \
'\n\t\t - `view engine`: viva_graph (default), or plantuml' \
'\n\t\t - `width` (of graph)' \
'\n\t\t - `delay` (before screenshot)'
return {"text": text, "attachments": []}
target = array_get(params, 1 )
depth = to_int(array_get(params, 2), 1 ) # default to depth 1
view_engine = array_get(params, 3, 'viva_graph' )
width = to_int(array_get(params, 4), None )
delay = to_int(array_get(params, 5), None )
if depth > 5:
text = f':red_circle: sorry depths bigger than 5 are not supported (since 5 will already give you the only graph)'
return {"text": text, "attachments": []}
#direction = 'all' # change behaviour to only show all
graph = Lambda_Graph().graph_links(target, depth)
if graph is None:
text = f':red_circle: graph not created for target `{target}`'
return {"text": text, "attachments": []}
if len(graph.edges) == 0:
text = f':red_circle: no graph created from `{target}` (please double check that the issue ID exists)'
return {"text": text, "attachments": []}
graph_type = f"{target}___depth_{depth}"
if save_graph is False:
return graph
graph_name = graph.render_and_save_to_elk(None, graph_type, channel, user)
if only_create:
return graph, graph_name, depth, target
if channel:
message = f':point_right: Created graph with *name* `{graph_name}` *from* `{target}` *depth* `{depth}`'
slack_message(message,[],channel)
if view_engine =='plantuml':
params = ['show', graph_name, view_engine]
Lambda('osbot_jira.lambdas.graph').invoke_async({"params": params, 'data': {'team_id': team_id, 'channel': channel}})
else:
params = [view_engine, graph_name, 'default', width, delay]
Lambda('osbot_browser.lambdas.lambda_browser').invoke_async({"params": params, 'data': {'team_id': team_id, 'channel': channel}})
else:
return graph, graph_name, depth, target
# puml = graph.puml.puml
# max_size = 60000
# if channel and (not view) and len(puml) > max_size: # only do this check when there is a channel and no view (meaning that the graph will be generated)
# text = ':red_circle: for the graph `{0}` with `{1}` nodes and `{2}` edges, the PlantUML code generated from your query was too big `{3}` and rendering this type of large graphs doesn\'t work well in PlantUML (max allowed is `{4}`)'\
# .format(graph_name, len(graph.nodes), len(graph.edges), len(puml),max_size)
# else:
# if view: # if we have defined a view, render it here
# graph_view = Graph_View()
# graph_view.graph = graph
# graph_view.graph.reset_puml()
# graph_view.render_view(view,channel,team_id,graph_name)
# puml = graph_view.graph.puml.puml
# else:
# view = 'default'
#
# if channel: # if the channel value is provided return a user friendly message, if not, return the data
# text = ':point_right: Created graph with name `{4}`, based on _{0}_ in the direction `{1}`, with depth `{2}`, with plantuml size: `{3}`, with view `{5}`, with `{6}` nodes and `{7}` edges'\
# .format(target, direction, depth, len(puml), graph_name, view, len(graph.nodes), len(graph.edges))
# Lambda('gw_bot.lambdas.puml_to_slack').invoke_async({"puml": puml,"channel": channel, 'team_id' : team_id})
# else:
# data = {
# "target" : target ,
# "direction" : direction ,
# "depth" : depth ,
# "nodes" : graph.nodes,
# "edges" : graph.edges,
# "puml" : puml ,
# "graph_name": graph_name ,
# "view" : view
# }
# text = json.dumps(data, indent=4)
#else:
# text = ':red_circle: error: invalid value provided for depth `{0}`. It must be an number'.format(depth)
#return {"text": text, "attachments": attachments}
def cmd_help(self):
commands = [func for func in dir(GS_Bot_Jira) if
callable(getattr(GS_Bot_Jira, func)) and func.startswith("cmd")]
help_text = ""
for command in commands:
help_text += " • {0}\n".format(command.replace('cmd_',''))
attachments = API_Slack_Attachment(help_text, 'good').render()
text = "*Here are the `jira` commands available:*"
return {"text": text, "attachments": attachments}
def cmd_search(self, event):
Lambda('osbot_jira.lambdas.elk_to_slack').invoke_async(event)
return None
def cmd_table(self, params, team_id=None, channel=None):
attachments = []
if len(params) < 2:
text = ":exclamation: you must provide a graph_name to show in a table format"
else:
params.pop(0) # remove 1st command since it is 'server'
graph_name = params.pop()
text = ':point_right: Showing table with data created from graph `{0}`'.format(graph_name)
Lambda('osbot_browser.lambdas.lambda_browser').invoke_async({"params": [ "table", graph_name , 'graph_simple'], "data":{"channel" : channel, "team_id": team_id}})
return {"text": text, "attachments": attachments}
# def cmd_server(self, params, team_id=None, channel=None):
# attachments = []
# if len(params) < 2:
# text = ":exclamation: you must provide an server command"
# else:
# params.pop(0) # remove 1st command since it is 'server'
# command = params.pop(0)
# data = Secrets('sync-server-ngrok').value_from_json_string()
# username = data.get('username')
# password = data.get('password')
# if command[0] !='/':
# url = "https://gs-jira.ngrok.io/server/{0}".format(command)
# else:
# url = "https://gs-jira.ngrok.io{0}".format(command)
# result = requests.get(url, auth=(username, password)).text
# text = "{0}".format(result)
# #attachments = [{'text':url}]
#
# return {"text": text, "attachments": attachments}
# def cmd_down(self, params, team_id=None, channel=None, user=None):
# self.up_down("down", params, team_id, channel, user)
#
# def cmd_up(self, params, team_id=None, channel=None, user=None):
# self.up_down("up", params, team_id, channel, user)
# def cmd_load_sheet(self, params, team_id=None, channel=None):
# def send_slack_message(message):
# slack_message(message, [], channel, team_id)
#
# # def show_pdf(file_id,icon, when):
# # send_slack_message('{0} this is what the file currently looks `{1}` the sync'.format(icon, when))
# # Lambda('gsbot_gsuite.lambdas.gdocs').invoke({"params":['pdf', file_id], 'data':{'team_id':team_id,'channel': channel}})
#
#
# if len(params) < 2:
# text = ":exclamation: you must provide an gsuite `file_id` (you can find this on the url of the document you want to sync)"
# send_slack_message(text)
# else:
# params.pop(0) # remove 1st command since it is 'server'
# file_id = params.pop(0)
#
# #send_slack_message(':point_right: Staring syncing workflow for file `{0}`'.format(file_id))
# #show_pdf (file_id, ':one:', 'BEFORE')
# #send_slack_message(':two: syncing data ...')
#
# result = self.cmd_server(['server','/jira-sync/load-jira/{0}'.format(file_id)])
# #[trigger_sync_jira_sheets]
# status = json.loads(result.get('text')).get('status')
# send_slack_message('Execution result: `{0}`'.format(status))
#
# #show_pdf(file_id, ':three:','AFTER')
#
# return None,None
# def cmd_diff_sheet(self, params, team_id=None, channel=None):
# def send_slack_message(message):
# slack_message(message, [], channel, team_id)
#
# if len(params) < 2:
# text = ":exclamation: you must provide an gsuite `file_id` (you can find this on the url of the document you want to sync)"
# send_slack_message(text)
# else:
# params.pop(0) # remove 1st command since it is 'server'
# file_id = params.pop(0)
# send_slack_message(':one: diffing data ...')
# result = self.cmd_server(['server','/jira-sync/diff-sheet/{0}'.format(file_id)])
# send_slack_message(result)
#
# return None,None
# def cmd_sync_sheet(self, params, team_id=None, channel=None):
# def send_slack_message(message):
# slack_message(message, [], channel, team_id)
#
# if len(params) < 2:
# text = ":exclamation: you must provide an gsuite `file_id` (you can find this on the url of the document you want to sync)"
# send_slack_message(text)
# else:
# params.pop(0) # remove 1st command since it is 'server'
# file_id = params.pop(0)
# #send_slack_message(':one: diffing data ...')
# result = self.cmd_server(['server','/jira-sync/sync-sheet/{0}'.format(file_id)])
# status = Misc.get_value(Misc.json_load(result.get('text')),'status',result)
# send_slack_message('Execution result: `{0}`'.format(status))
#
# return None,None
def cmd_version(self, params, team_id=None, channel=None):
if channel:
slack_message(self.version, [], channel, team_id)
else:
return {"text": self.version, "attachments":[] }
# helpers
# def up_down(self, direction, params, team_id=None, channel=None, user=None):
# if len(params) != 3:
# text = ':red_circle: for the `jira {0}` command, you need to provide 2 parameters: ' \
# '\n\t\t - `jira key or graph` ' \
# '\n\t\t - `depth` '.format(direction)
# slack_message(text, [], channel, team_id)
# return
#
# target = params[1]
# depth = int(params[2])
# params = ['links', target, direction, depth]
#
#
# (graph, graph_name, depth, direction, target) = self.cmd_links(params, team_id, channel, user, only_create=True)
# if graph:
# text = ':point_right: Created graph for `{0}` in the direction `{1}`, with depth `{2}`, with name `{3}`, with `{4}` nodes and `{5}` edges' \
# .format(target, direction, depth, graph_name, len(graph.nodes), len(graph.edges))
# slack_message(text, [], channel, team_id)
# Lambda('lambdas.browser.lambda_browser').invoke_async({"data": {"team_id": team_id, "channel": channel}, "params": ['viva_graph', graph_name, 'default']})
#
# main method
# todo: refactor to use dynamic method generation (this is the legacy way to resolve methods)
def handle_request(self,event):
#log_to_elk('in handle_request', event)
params = event.get('params')
channel = event.get('channel')
team_id = event.get('team_id')
user = event.get('user')
attachments = []
if params is None or len(params) < 1:
text = ":point_right: no command received, see `jira help` for a list of available commands"
else:
command = params[0]
try:
if command == 'add_link' : return self.cmd_add_link (params, team_id, channel)
if command == 'help' : return self.cmd_help ()
if command == 'actions' : return self.cmd_actions (params, team_id, channel)
if command == 'create' : return self.cmd_create (params, team_id, channel)
if command == 'created_in_last' : return self.cmd_created_in_last(params, team_id, channel)
if command == 'created_between' : return self.cmd_created_between(params, team_id, channel)
if command == 'updated_in_last' : return self.cmd_updated_in_last(params, team_id, channel)
if command == 'screenshot' : return self.cmd_screenshot (params, team_id, channel)
if command == 'issue' : return self.cmd_issue (params, team_id, channel)
if command == 'links' : return self.cmd_links (params, team_id, channel, user)
#if command == 'up' : return self.cmd_up (params, team_id, channel, user)
#if command == 'down' : return self.cmd_down (params, team_id, channel, user)
if command == 'search' : return self.cmd_search (event )
#if command == 'server' : return self.cmd_server (params, team_id, channel)
#if command == 'load_sheet' : return self.cmd_load_sheet (params, team_id, channel)
if command == 'table' : return self.cmd_table (params, team_id, channel)
#if command == 'sync_sheet' : return self.cmd_sync_sheet (params, team_id, channel)
#if command == 'diff_sheet' : return self.cmd_diff_sheet (params, team_id, channel)
#if command == 'graph_sheet' : return self.cmd_graph_sheet (params, team_id, channel)
if command == 'version' : return self.cmd_version (params, team_id, channel)
if '-' in command and ' ' not in command and len(command) < 10: # if it looks like a Issue ID, call the cmd_issue function
params.insert(0,'issue')
return self.cmd_issue(params, team_id, channel)
#return self.cmd_issue(['issue'] + params, channel) # default to this one
text = ':red_circle: Not supported command `{0}` , see all available using `jira help`'.format(command)
except Exception as error:
text = ':red_circle: Error processing command `{0}`: _{1}_'.format(command, pprint.pformat(error))
return { "text": text, "attachments": attachments}
# def resolve_es_index(self, key):
# if "SEC-" in key: return 'sec_project'
# return "jira"
# def send_test_button(self, params,channel):
# callback_id = 'view-jira-issue'
#
# message = {
# "text": "JIRA Helper (v0.1)",
# "attachments": [
# {
# "text": "What Jira ID you want to see",
# "fallback": "Not supported",
# "callback_id": "{0}".format(callback_id),
# "color": "#3AA3E3",
# "attachment_type": "default",
# "actions": [
# {
# "name": "key",
# "text": "RISK-424",
# "type": "button",
# "value": "RISK-424"
# },
# {
# "name": "key",
# "text": "SEC-9195",
# "type": "button",
# "value": "SEC-9195"
# },
# {
# "name": "key",
# "text": "GSP-42",
# "type": "button",
# "value": "GSP-42"
# }
# ]
# }
# ]
# }
# #from utils.API_Slack import API_Slack
# #API_Slack(channel).send_message(message['text'], message['attachments'])
# return { "text": message['text'], "attachments" : message['attachments']} |
py | b4071e98a885344d3110633ae206e41ee7d58339 | from django.core.urlresolvers import reverse_lazy
from django.conf import settings
from django.contrib import messages, auth
from django.contrib.auth.decorators import login_required
from django.http import HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404, redirect
from django.utils.translation import ugettext_lazy as _
from django.template.response import TemplateResponse
def store_details(request, slug, store_id):
pass
@login_required
def create(request):
pass
|
py | b4071fa0d8a5933f021415e128335de6bc7e8a58 | import os
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
from IPython.core.display import HTML
from ipywidgets import widgets, Layout
from IPython.core.display import display
from NeuNorm.normalization import Normalization
from NeuNorm.roi import ROI
from __code import utilities, file_handler
from __code.config import gamma_filtering_coefficient
from __code.ipywe import fileselector
from __code._utilities.file import make_or_increment_folder_name
def close(w):
"recursively close a widget"
if hasattr(w, 'children'):
for c in w.children:
close(c)
continue
w.close()
return
class myFileSelectorPanel(fileselector.FileSelectorPanel):
def __init__(self, instruction, start_dir=".", type='file',
next=None,
multiple=False,
newdir_toolbar_button=False,
current_ui=None):
super(myFileSelectorPanel, self).__init__(instruction,
start_dir=start_dir,
type=type,
next=next,
multiple=multiple,
newdir_toolbar_button=newdir_toolbar_button)
self.current_ui = current_ui
def validate(self, s):
super(myFileSelectorPanel, self).validate(s)
try:
if self.current_ui.state == 'sample':
self.current_ui.files.sample = self.selected
elif self.current_ui.state == 'ob':
self.current_ui.files.ob = self.selected
else:
self.current_ui.files.df = self.selected
parent_folder = os.path.dirname(os.path.dirname(self.selected[0]))
self.current_ui.working_dir = parent_folder
self.current_ui.label.value = "{} files selected".format(len(self.selected))
self.current_ui.next_button_ui.disabled = False
self.current_ui.next_button_ui.button_style = 'success'
except AttributeError:
pass
class Data:
sample = []
ob = []
df = []
class Files:
sample = []
ob = []
df = []
class Panel:
layout = Layout(border='1px lightgray solid', margin='5px', padding='15px')
button_layout = Layout(margin='10px 5px 5px 5px')
label_layout = Layout(height='32px', padding='2px', width='300px')
current_state_label_ui = None
prev_button_ui = None
next_button_ui = None
o_norm_handler = None
df_panel = None
def __init__(self, prev_button=False, next_button=True, state='sample',
working_dir='',
top_object=None,
gamma_coefficient=0.9):
self.prev_button = prev_button
self.next_button = next_button
self.state = state
self.working_dir = working_dir
self.ipts_dir = working_dir
self.top_object = top_object
self.gamma_threshold = gamma_coefficient
def init_ui(self, files=None):
self.files = files
self.__top_panel()
self.__bottom_panel()
self.__file_selector()
self.__init_widgets()
def __init_widgets(self):
_list = self.files.sample
_label = "Sample"
_option = '(mandatory)'
if self.state == 'ob':
_list = self.files.ob
_label = 'Open Beam'
elif self.state == 'df':
_list = self.files.df
_label = 'Dark Field'
_option = '(optional)'
self.label.value = "{} files selected".format(len(_list))
self.title.value = "Select list of {} files {} ".format(_label, _option)
if len(_list) > 0:
self.next_button_ui.disabled = False
self.next_button_ui.button_style = 'success'
if self.state == 'df':
self.next_button_ui.disabled = False
self.next_button_ui.button_style = 'success'
def __top_panel(self):
title_ui = widgets.HBox([widgets.Label("Instructions:",
layout=widgets.Layout(width='20%')),
widgets.Label("Select Samples Images and click NEXT",
layout=widgets.Layout(width='50%')),
])
label_ui = widgets.HBox([widgets.Label("Sample selected:",
layout=widgets.Layout(width='20%')),
widgets.Label("None",
layout=widgets.Layout(width='50%')),
])
self.title = title_ui.children[1] # "Select [Samples/OB/DF] Images [and click NEXT]
self.label = label_ui.children[1] # number of samples selected
self.top_panel = widgets.VBox(children=[title_ui, label_ui],
layout=self.layout)
def prev_button_clicked(self, event):
raise NotImplementedError
def next_button_clicked(self, event):
raise NotImplementedError
def __bottom_panel(self):
list_ui = []
if self.prev_button:
self.prev_button_ui = widgets.Button(description="<< Previous Step",
tooltip='Click to move to previous step',
button_style='success',
disabled=False,
layout=widgets.Layout(width='20%'))
self.prev_button_ui.on_click(self.prev_button_clicked)
list_ui.append(self.prev_button_ui)
self.current_state_label_ui = widgets.Label(" ",
layout=widgets.Layout(width='70%'))
list_ui.append(self.current_state_label_ui)
if self.next_button:
self.next_button_ui = widgets.Button(description="Next Step>>",
tooltip='Click to move to next step',
button_style='warning',
disabled=True,
layout=widgets.Layout(width='20%'))
list_ui.append(self.next_button_ui)
self.next_button_ui.on_click(self.next_button_clicked)
self.bottom_panel = widgets.HBox(list_ui)
def __file_selector(self):
self.file_selector = myFileSelectorPanel(instruction='',
start_dir=self.working_dir,
multiple=True,
current_ui=self)
def show(self):
display(self.top_panel)
display(self.bottom_panel)
self.file_selector.show()
def remove(self):
close(self.top_panel)
close(self.bottom_panel)
self.file_selector.remove()
def nextStep(self):
raise NotImplementedError
class WizardPanel:
label_layout = Layout(border='1px lighgray solide', height='35px', padding='8px', width='300px')
sample_panel = None
def __init__(self, sample_panel=None):
display(widgets.Label("Selection of All Input Files",
layout=self.label_layout))
self.sample_panel = sample_panel
self.sample_panel.show()
return
class SampleSelectionPanel(Panel):
files = None
o_norm = None
def __init__(self, prev_button=False, next_button=True, working_dir='', top_object=None):
super(SampleSelectionPanel, self).__init__(prev_button=prev_button,
next_button=next_button,
working_dir=working_dir,
top_object=top_object)
# def __init__(self, prev_button=False, next_button=True, working_dir='', top_object=None, gamma_coefficient=None):
# super(SampleSelectionPanel, self).__init__(prev_button=prev_button,
# next_button=next_button,
# working_dir=working_dir,
# top_object=top_object,
# gamma_coefficient=gamma_coefficient)
def next_button_clicked(self, event):
self.remove()
_panel = OBSelectionPanel(working_dir=self.working_dir, top_object=self)
_panel.init_ui(files=self.files)
_panel.show()
class OBSelectionPanel(Panel):
def __init__(self, working_dir='', top_object=None):
super(OBSelectionPanel, self).__init__(prev_button=True, state='ob',
working_dir=working_dir,
top_object=top_object)
def next_button_clicked(self, event):
self.remove()
_panel = DFSelectionPanel(working_dir=self.working_dir,
top_object=self.top_object)
_panel.init_ui(files=self.files)
_panel.show()
def prev_button_clicked(self, event):
self.remove()
_panel = SampleSelectionPanel(working_dir=self.working_dir,
top_object=self.top_object)
_panel.init_ui(files=self.files)
_panel.show()
class DFSelectionPanel(Panel):
def __init__(self, working_dir='', top_object=None):
self.working_dir = working_dir
super(DFSelectionPanel, self).__init__(prev_button=True,
next_button=True,
state='df',
working_dir=working_dir,
top_object=top_object)
def prev_button_clicked(self, event):
self.remove()
_panel = OBSelectionPanel(working_dir=self.working_dir, top_object=self.top_object)
_panel.init_ui(files=self.files)
_panel.show()
def next_button_clicked(self, event):
self.remove()
o_norm_handler = NormalizationHandler(files=self.files,
working_dir=self.working_dir,
gamma_threshold=self.gamma_threshold)
o_norm_handler.load_data()
self.top_object.o_norm_handler = o_norm_handler
self.top_object.o_norm = o_norm_handler.o_norm
class NormalizationHandler(object):
data = None
integrated_sample = []
working_dir = ''
o_norm = None
normalized_data_array = []
def __init__(self, files=None, working_dir='', gamma_threshold=0.9):
self.files = files
self.working_dir = working_dir
self.data = Data()
self.gamma_threshold = gamma_threshold
def load_data(self):
self.o_norm = Normalization()
# sample
list_sample = self.files.sample
# self.o_norm.load(file=list_sample, notebook=True, auto_gamma_filter=False,
# manual_gamma_filter=True, manual_gamma_threshold=self.gamma_threshold)
self.o_norm.load(file=list_sample, notebook=True)
self.data.sample = self.o_norm.data['sample']['data']
self.list_file_names = list_sample
# ob
list_ob = self.files.ob
# self.o_norm.load(file=list_ob, data_type='ob', notebook=True, auto_gamma_filter=False,
# manual_gamma_filter=True, manual_gamma_threshold=self.gamma_threshold)
self.o_norm.load(file=list_ob, data_type='ob', notebook=True)
self.data.ob = self.o_norm.data['ob']['data']
# df
list_df = self.files.df
if list_df:
# self.o_norm.load(file=list_df, data_type='df', notebook=True, auto_gamma_filter=False,
# manual_gamma_filter=True, manual_gamma_threshold=self.gamma_threshold)
self.o_norm.load(file=list_df, data_type='df', notebook=True)
self.data.df = self.o_norm.data['df']['data']
def get_data(self, data_type='sample'):
if data_type == 'sample':
return self.data.sample
elif data_type == 'ob':
return self.data.ob
else:
return self.data.df
# def plot_images(self, data_type='sample'):
#
# sample_array = self.get_data(data_type=data_type)
#
# def _plot_images(index):
# _ = plt.figure(num=data_type, figsize=(5, 5))
# ax_img = plt.subplot(111)
# my_imshow= ax_img.imshow(sample_array[index], cmap='viridis')
# plt.colorbar(my_imshow)
#
# _ = widgets.interact(_plot_images,
# index=widgets.IntSlider(min=0,
# max=len(self.get_data(data_type=data_type)) - 1,
# step=1,
# value=0,
# description='{} Index'.format(data_type),
# continuous_update=False))
def calculate_integrated_sample(self):
if len(self.data.sample) > 1:
integrated_array = np.array([_array for _array in self.data.sample])
self.integrated_sample = integrated_array.mean(axis=0)
else:
self.integrated_sample = np.squeeze(self.data.sample)
# def with_or_without_roi(self):
# label1 = widgets.Label("Do you want to select a region of interest (ROI) that will make sure that the " +
# "sample background matches the OB background")
# label2 = widgets.Label("-> Make sure your selection do not overlap your sample!")
# box = widgets.HBox([widgets.Label("With or Without ROI?"),
# widgets.RadioButtons(options=['yes','no'],
# value='yes',
# layout=widgets.Layout(width='50%'))])
# self.with_or_without_radio_button = box.children[1]
# vertical = widgets.VBox([label1, label2, box])
# display(vertical)
# def select_sample_roi(self):
#
# if self.with_or_without_radio_button.value == 'no':
# label2 = widgets.Label("-> You chose not to select any ROI! Next step: Normalization")
# display(label2)
# return
#
# label2 = widgets.Label("-> Make sure your selection do not overlap your sample!")
# display(label2)
#
# if self.integrated_sample == []:
# self.calculate_integrated_sample()
#
# _integrated_sample = self.integrated_sample
# [height, width] = np.shape(_integrated_sample)
#
# def plot_roi(x_left, y_top, width, height):
# _ = plt.figure(figsize=(5, 5))
# ax_img = plt.subplot(111)
# ax_img.imshow(_integrated_sample,
# cmap='viridis',
# interpolation=None)
#
# _rectangle = patches.Rectangle((x_left, y_top),
# width,
# height,
# edgecolor='white',
# linewidth=2,
# fill=False)
# ax_img.add_patch(_rectangle)
#
# return [x_left, y_top, width, height]
#
# self.roi_selection = widgets.interact(plot_roi,
# x_left=widgets.IntSlider(min=0,
# max=width,
# step=1,
# value=0,
# description='X Left',
# continuous_update=False),
# y_top=widgets.IntSlider(min=0,
# max=height,
# value=0,
# step=1,
# description='Y Top',
# continuous_update=False),
# width=widgets.IntSlider(min=0,
# max=width - 1,
# step=1,
# value=60,
# description="Width",
# continuous_update=False),
# height=widgets.IntSlider(min=0,
# max=height - 1,
# step=1,
# value=100,
# description='Height',
# continuous_update=False))
def settings(self):
o_norm = self.o_norm
nbr_sample = len(o_norm.data['sample']['data'])
nbr_ob = len(o_norm.data['ob']['data'])
if o_norm.data['df']['data']:
nbr_df = len(o_norm.data['df']['data'])
else:
nbr_df = 0
table_title = "Summary Table"
how_to_combine_title = "How do you want to combine the OBs?"
force_combine_title = "Do you want to combine the OBs?"
def force_combining_changed(value):
widgets_changed()
def how_to_combine_changed(value):
widgets_changed()
def widgets_changed():
if self.force_ui.value == 'no':
accordion_children = [self.force_ui, table]
accordion_title = [force_combine_title, table_title]
self.how_to_ui.disabled = True
elif nbr_sample != nbr_ob:
accordion_children = [self.how_to_ui, table]
accordion_title = [how_to_combine_title, table_title]
self.how_to_ui.disabled = False
else:
accordion_children = [self.force_ui, self.how_to_ui, table]
accordion_title = [force_combine_title, how_to_combine_title, table_title]
self.how_to_ui.disabled = False
table.value = get_html_table()
accordion.children = accordion_children
for _index, _title in enumerate(accordion_title):
accordion.set_title(_index, _title)
accordion.selected_index = len(accordion_title) - 1
def get_html_table():
force_combine = self.force_ui.value
how_to_combine = self.how_to_ui.value
if force_combine == 'yes':
description = f"OBs <b>will be combined</b> using <b>{how_to_combine}</b>"
else:
description = f"OBs <b>won't be combined</b>! Each sample will use <b>1 OB</b>"
html_table = f"<table style='width:800px'>" \
"<tr>" \
"<th style='background-color: grey'>Nbr of Samples</th>" \
"<th style='background-color: grey'>Nbr of OBs</th>" \
"<th style='background-color: grey'>Nbr of DFs</th>" \
"<th style='background-color: grey; width:60%'>Description of Process</th>" \
"</tr>" \
"<tr>" \
f"<td>{nbr_sample}</td>" \
f"<td>{nbr_ob}</td>" \
f"<td>{nbr_df}</td>" \
f"<td>{description}</td>" \
"</tr>" \
"</table>"
return html_table
accordion_children = []
accordion_title = list()
self.force_ui = widgets.RadioButtons(options=['yes', 'no'],
value='yes',
disabled=False,
layout=widgets.Layout(width='200px'))
accordion_children.append(self.force_ui)
self.force_ui.observe(force_combining_changed, names='value')
self.how_to_ui = widgets.RadioButtons(options=['median', 'mean'],
value='median',
layout=widgets.Layout(width='200px'))
accordion_children.append(self.how_to_ui)
self.how_to_ui.observe(how_to_combine_changed, names='value')
html_table = ""
table = widgets.HTML(value=html_table)
accordion_children.append(table)
if nbr_sample != nbr_ob:
self.force_ui.value = 'yes'
accordion_children = [self.how_to_ui, table]
how_to_combine_title = "How to combine?"
accordion_title = [how_to_combine_title, table_title]
else:
accordion_title.append(force_combine_title)
accordion_title.append(how_to_combine_title)
accordion_title.append(table_title)
table.value = get_html_table()
accordion = widgets.Accordion(children=accordion_children,
title=accordion_title)
for _index, _title in enumerate(accordion_title):
accordion.set_title(_index, _title)
accordion.selected_index = len(accordion_title) - 1
display(accordion)
def run_normalization(self, dict_roi=None):
force_mean_ob = False
force_median_ob = False
force_combine = self.force_ui.value
if force_combine == 'yes':
how_to_combine = self.how_to_ui.value
if how_to_combine == 'mean':
force_mean_ob = True
elif how_to_combine == 'median':
force_median_ob = True
else:
raise NotImplementedError(f"How to combine OB algorithm ({how_to_combine}) not implemented!")
if dict_roi is None:
try:
self.o_norm.df_correction()
self.o_norm.normalization(notebook=True,
force_median_ob=force_median_ob,
force_mean_ob=force_mean_ob,
force=True)
self.normalized_data_array = self.o_norm.get_normalized_data()
self.normalized_metadata_array = self.o_norm.data['sample']['metadata']
except ValueError:
display(HTML('<span style="font-size: 20px; color:red">Data Size of Sample, OB and DF (if any) ' +
'do not Match!</span>'))
return
else:
_list_roi = []
for _key in dict_roi.keys():
_roi = dict_roi[_key]
x0 = _roi['x0']
y0 = _roi['y0']
x1 = _roi['x1']
y1 = _roi['y1']
x_left = np.min([x0, x1])
y_top = np.min([y0, y1])
width_roi = np.abs(x0 - x1)
height_roi = np.abs(y0 - y1)
_roi = ROI(x0=x_left, y0=y_top, width=width_roi, height=height_roi)
_list_roi.append(_roi)
self.debugging_roi = _list_roi
self.o_norm.df_correction()
if _list_roi:
try:
self.o_norm.normalization(roi=_list_roi[0],
notebook=True,
force_median_ob=force_median_ob,
force_mean_ob=force_mean_ob,
force=True)
except ValueError:
display(HTML('<span style="font-size: 20px; color:red">Data Size of Sample, OB and DF (if any) ' +
'do not Match!</span>'))
return
else:
self.o_norm.normalization(notebook=True)
self.normalized_data_array = self.o_norm.get_normalized_data()
self.normalized_metadata_array = self.o_norm.data['sample']['metadata']
def select_export_folder(self, ipts_folder='./'):
def display_file_selector_from_shared(ev):
start_dir = os.path.join(ipts_folder, 'shared')
self.output_folder_ui.remove()
self.display_file_selector(start_dir=start_dir)
def display_file_selector_from_home(ev):
import getpass
_user = getpass.getuser()
start_dir = os.path.join('/SNS/users', _user)
self.output_folder_ui.remove()
self.display_file_selector(start_dir=start_dir)
ipts = os.path.basename(self.working_dir)
button_layout = widgets.Layout(width='30%',
border='1px solid gray')
hbox = widgets.HBox([widgets.Button(description="Jump to {} Shared Folder".format(ipts),
button_style='success',
layout=button_layout),
widgets.Button(description="Jump to My Home Folder",
button_style='success',
layout=button_layout)])
go_to_shared_button_ui = hbox.children[0]
go_to_home_button_ui = hbox.children[1]
go_to_shared_button_ui.on_click(display_file_selector_from_shared)
go_to_home_button_ui.on_click(display_file_selector_from_home)
display(hbox)
self.display_file_selector()
def display_file_selector(self, start_dir=''):
self.output_folder_ui = fileselector.FileSelectorPanel(instruction='Select Output Folder',
start_dir=start_dir,
multiple=False,
type='directory')
self.output_folder_ui.show()
def export(self):
base_folder = os.path.basename(os.path.dirname(self.list_file_names[0])) + '_normalized'
output_folder = os.path.abspath(os.path.join(self.output_folder_ui.selected, base_folder))
output_folder = make_or_increment_folder_name(output_folder)
w = widgets.IntProgress()
w.max = len(self.files.sample)
display(w)
for _index, _file in enumerate(self.list_file_names):
basename = os.path.basename(_file)
_base, _ext = os.path.splitext(basename)
output_file_name = os.path.join(output_folder, _base + '.tiff')
file_handler.make_tiff(filename=output_file_name,
data=self.normalized_data_array[_index],
metadata=self.normalized_metadata_array[_index])
w.value = _index + 1
display(HTML('<span style="font-size: 20px; color:blue">The normalized images have been ' +
'created in ' + output_folder + '</span>'))
class GammaCoefficient(object):
def select_gamma_coefficient(self):
self.gamma_coeff_ui = widgets.HBox([widgets.Label("Gamma Coefficient:",
layout=widgets.Layout(width="20%")),
widgets.FloatSlider(value=gamma_filtering_coefficient,
min=0,
max=1,
layout=widgets.Layout(width="50%"))])
display(self.gamma_coeff_ui)
def get_coefficient(self):
return self.gamma_coeff_ui.children[1].value
|
py | b40720eb0d90f25986470eb622c2afbb153068b1 | #!/usr/bin/python
###
# Uses the 'md-to-toc' python script to generate and insert tables of contents for *.md
###
import codecs, os, os.path, sys, tempfile
try:
from urllib.request import urlopen # python 3
except ImportError:
from urllib2 import urlopen # python 2
TOC_GEN_FILENAME = "md-to-toc.py"
TOC_GEN_URL = "https://raw.githubusercontent.com/amaiorano/md-to-toc/master/" + TOC_GEN_FILENAME
MARKDOWN_FILE_ENDSWITH = ".md"
TOC_MARKER_START = "TOC START"
TOC_MARKER_END = "TOC END"
def download(url):
print("Downloading %s..." % url)
r = urlopen(url)
if r.code == 200:
return r.read().decode("utf-8");
else:
raise "Got error=%d when downloading %s" % (r.code, url)
def get_parser_module():
if not os.path.isfile(TOC_GEN_FILENAME):
with open(TOC_GEN_FILENAME, 'w') as module_out:
module_out.write(download(TOC_GEN_URL))
module_out.close()
print("Created %s" % TOC_GEN_FILENAME)
return __import__(TOC_GEN_FILENAME.split(".")[0])
class Writer:
def __init__(self):
self.__chunks = []
def write(self, chunk):
self.__chunks.append(chunk)
def flush(self):
pass
def get(self):
return "".join(self.__chunks)
def print_toc(parser, filepath):
parser.main([sys.argv[0], filepath])
def insert_toc(parser, filepath):
# Create temp file which only contains content following the TOC
after_toc = False
tmpfilepath = tempfile.mkstemp()[1]
with codecs.open(tmpfilepath, 'w', "utf-8-sig") as tmpfile:
for line in open(filepath).readlines():
if line.find(TOC_MARKER_END) >= 0:
after_toc = True
continue
if after_toc:
tmpfile.write(line + "\n")
tmpfile.close()
# Hack: Intercept stdout produced by the parser
original_stdout = sys.stdout
parsed_toc = Writer()
sys.stdout = parsed_toc
print_toc(parser, tmpfilepath)
sys.stdout = original_stdout
new_lines = []
inside_toc = False
for line in open(filepath).readlines():
if line.find(TOC_MARKER_START) >= 0:
new_lines.append(line)
new_lines.append("\n")
new_lines.append(parsed_toc.get())
new_lines.append("\n")
inside_toc = True
if line.find(TOC_MARKER_END) >= 0:
inside_toc = False
if not inside_toc:
new_lines.append(line)
return new_lines
def update_mds_in_script_dir(parser):
script_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(script_dir)
print("Scanning %s:" % os.getcwd())
for filename in os.listdir(script_dir):
if not filename.endswith(MARKDOWN_FILE_ENDSWITH):
continue
print(" Updating %s" % filename)
new_lines = insert_toc(parser, filename)
open(filename, 'w').write("".join(new_lines))
if __name__ == "__main__":
if len(sys.argv) <= 1:
# Inject the TOC into each .md file in this script's directory
update_mds_in_script_dir(get_parser_module())
sys.exit(0)
if sys.argv[1] == "--help" or sys.argv[1] == "-h":
print("Syntax: %s [path or url]" % sys.argv[0])
print(" If path or url is provided: prints the TOC for that path or url")
print(" If no args: injects the TOC for each .md file in the script's directory")
sys.exit(1)
# Print the TOC for the provided file or URL
parser = get_parser_module()
if os.path.isfile(sys.argv[1]):
print_toc(parser, sys.argv[1])
else:
# Not an existent file, treat as a URL
tmpfilepath = tempfile.mkstemp()[1]
with codecs.open(tmpfilepath, 'w', "utf-8-sig") as tmpfile:
tmpfile.write(download(sys.argv[1]))
tmpfile.close()
print_toc(parser, tmpfilepath)
os.unlink(tmpfilepath)
|
py | b40720eebab30acaa6417852a83c7bdc768c7c63 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 05 13:41:23 2018
@author: DanielM
"""
from neuron import h, gui # gui necessary for some parameters to h namespace
import numpy as np
import net_ppdynamicstuned
from input_generator import inhom_poiss
import os
import argparse
import time
from analysis_main import time_stamps_to_signal
import pdb
tsts = time_stamps_to_signal
# Handle command line inputs with argparse
parser = argparse.ArgumentParser(description='Pattern separation paradigm')
parser.add_argument('-runs',
nargs=3,
type=int,
help='start stop range for the range of runs',
default=[0, 1, 1],
dest='runs')
parser.add_argument('-savedir',
type=str,
help='complete directory where data is saved',
default=os.getcwd(),
dest='savedir')
parser.add_argument('-seed',
nargs=3,
type=int,
help='the seed making the network reproducible',
default=[10000, 10001, 1],
dest='seed')
parser.add_argument('-pp_mod_rate',
type=int,
help='Frequency at which the input is modulated',
default=10,
dest='pp_mod_rate')
parser.add_argument('-pp_max_rate',
type=int,
help='The maximum frequency the input reaches',
default=50,
dest='pp_max_rate')
parser.add_argument('-W_pp_gc',
type=float,
help='the weight of the pp to gc connection',
default=1e-3,
dest='W_pp_gc')
parser.add_argument('-W_pp_bc',
type=float,
help='the weight of the pp to bc connection',
default=1e-3,
dest='W_pp_bc')
parser.add_argument('-rec_cond',
type=int,
help='number of hc to gc synapses',
default=0,
dest='rec_cond')
args = parser.parse_args()
# Where to search for nrnmech.dll file. Must be adjusted for your machine.
dll_files = [("/home/daniel/repos/pyDentate/mechs_7-6_linux/x86_64/.libs/libnrnmech.so"),
("C:\\Users\\Daniel\\repos\\pyDentate\\mechs_7-6_win\\nrnmech.dll")]
for x in dll_files:
if os.path.isfile(x):
dll_dir = x
print("DLL loaded from: " + str(dll_dir))
h.nrn_load_dll(dll_dir)
# Start the runs of the model
runs = range(args.runs[0], args.runs[1], args.runs[2])
initial_run = runs[0]
for seed in range(args.seed[0], args.seed[1], args.seed[2]):
# Generate temporal patterns for the 100 PP inputs
np.random.seed(seed)
temporal_patterns_full = inhom_poiss(mod_rate=args.pp_mod_rate,
max_rate=args.pp_max_rate,
n_inputs=400)
for run in runs:
start_proc_t = time.perf_counter()
print("Run: " + str(run) + ". Total time: " + str(start_proc_t))
temporal_patterns = temporal_patterns_full.copy()
for idx in range(run+24,temporal_patterns.shape[0]):
temporal_patterns[idx] = np.array([])
for idx in range(0,run):
temporal_patterns[idx] = np.array([])
nw = net_ppdynamicstuned.TunedNetwork(seed=seed,
W_pp_gc=args.W_pp_gc,
W_pp_bc=args.W_pp_bc,
temporal_patterns=temporal_patterns,
rec_cond=False)
# Run the model
"""Initialization for -2000 to -100"""
h.cvode.active(0)
dt = 0.1
h.steps_per_ms = 1.0/dt
h.finitialize(-60)
h.t = -2000
h.secondorder = 0
h.dt = 10
while h.t < -100:
h.fadvance()
h.secondorder = 2
h.t = 0
h.dt = 0.1
"""Setup run control for -100 to 1500"""
h.frecord_init() # Necessary after changing t to restart the vectors
while h.t < 600:
h.fadvance()
end_proc_t = time.perf_counter()
print("Done Running at " + str(end_proc_t) + " after " + str((end_proc_t - start_proc_t)/60) + " minutes")
save_data_name = (f"{str(nw)}_"
f"{seed:06d}_"
f"{run:03d}_"
f"{args.W_pp_gc:08.5f}_"
f"{args.W_pp_bc:08.5f}_"
f"{args.pp_mod_rate:04d}_"
f"{args.pp_max_rate:04d}_")
#if run == 0:
fig = nw.plot_aps(time=600)
tuned_fig_file_name = save_data_name
nw.save_ap_fig(fig, args.savedir, tuned_fig_file_name)
pp_lines = np.empty(400, dtype = np.object)
pp_lines[0+run:24+run] = temporal_patterns[0+run:24+run]
curr_pp_ts = np.array(tsts(pp_lines, dt_signal=0.1, t_start=0, t_stop=600), dtype = np.bool)
curr_gc_ts = np.array(tsts(nw.populations[0].get_properties()['ap_time_stamps'], dt_signal=0.1, t_start=0, t_stop=600), dtype = np.bool)
curr_mc_ts = np.array(tsts(nw.populations[1].get_properties()['ap_time_stamps'], dt_signal=0.1, t_start=0, t_stop=600), dtype = np.bool)
curr_hc_ts = np.array(tsts(nw.populations[2].get_properties()['ap_time_stamps'], dt_signal=0.1, t_start=0, t_stop=600), dtype = np.bool)
curr_bc_ts = np.array(tsts(nw.populations[3].get_properties()['ap_time_stamps'], dt_signal=0.1, t_start=0, t_stop=600), dtype = np.bool)
np.savez(args.savedir + os.path.sep + "time-stamps_" + save_data_name,
pp_ts = np.array(curr_pp_ts),
gc_ts = np.array(curr_gc_ts),
mc_ts = np.array(curr_mc_ts),
bc_ts = np.array(curr_bc_ts),
hc_ts = np.array(curr_hc_ts))
#del curr_pp_ts, curr_gc_ts, curr_mc_ts, curr_hc_ts, curr_bc_ts
#del nw
|
py | b407212fe6a505c866542849743f7e0a86607ec4 | """
Define the SeriesGroupBy and DataFrameGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
from __future__ import annotations
from collections import (
abc,
namedtuple,
)
import copy
from functools import partial
from textwrap import dedent
from typing import (
Any,
Callable,
Dict,
FrozenSet,
Hashable,
Iterable,
List,
Mapping,
Optional,
Type,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
reduction as libreduction,
)
from pandas._typing import (
ArrayLike,
FrameOrSeries,
FrameOrSeriesUnion,
Manager,
)
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.core.dtypes.cast import (
find_common_type,
maybe_cast_result_dtype,
maybe_downcast_numeric,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_numeric_dtype,
is_scalar,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
algorithms,
nanops,
)
from pandas.core.aggregation import (
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from pandas.core.apply import GroupByApply
from pandas.core.arrays import Categorical
from pandas.core.base import (
DataError,
SpecificationError,
)
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy,
_agg_template,
_apply_docs,
_transform_template,
get_groupby,
group_selection_context,
)
from pandas.core.indexes.api import (
Index,
MultiIndex,
all_indexes_same,
)
import pandas.core.indexes.base as ibase
from pandas.core.internals import ArrayManager
from pandas.core.series import Series
from pandas.core.util.numba_ import maybe_use_numba
from pandas.plotting import boxplot_frame_groupby
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
def generate_property(name: str, klass: Type[FrameOrSeries]):
"""
Create a property for a GroupBy subclass to dispatch to DataFrame/Series.
Parameters
----------
name : str
klass : {DataFrame, Series}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = getattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_allowlisted_properties(klass: Type[FrameOrSeries], allowlist: FrozenSet[str]):
"""
Create GroupBy member defs for DataFrame/Series names in a allowlist.
Parameters
----------
klass : DataFrame or Series class
class where members are defined.
allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
def pinner(cls):
for name in allowlist:
if hasattr(cls, name):
# don't override anything that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_allowlisted_properties(Series, base.series_apply_allowlist)
class SeriesGroupBy(GroupBy[Series]):
_apply_allowlist = base.series_apply_allowlist
def _iterate_slices(self) -> Iterable[Series]:
yield self._selected_obj
@property
def _selection_name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum='min',
... maximum='max',
... )
minimum maximum
1 1 2
2 3 4"""
)
@Appender(
_apply_docs["template"].format(
input="series", examples=_apply_docs["series_examples"]
)
)
def apply(self, func, *args, **kwargs):
return super().apply(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Series")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result, index = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result.ravel(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if isinstance(func, str):
return getattr(self, func)(*args, **kwargs)
elif isinstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
ret.columns = columns
else:
cyfunc = self._get_cython_func(func)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except (ValueError, KeyError):
# TODO: KeyError is raised in _python_agg_general,
# see test_groupby.test_basic
result = self._aggregate_named(func, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = create_series_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
if not self.as_index: # pragma: no cover
print("Warning, ignoring as_index=True")
if isinstance(ret, dict):
from pandas import concat
ret = concat(ret.values(), axis=1, keys=[key.label for key in ret.keys()])
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series):
raise SpecificationError("nested renamer is not supported")
columns = list(arg.keys())
arg = arg.items()
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.append(com.get_callable_name(f) or f)
arg = zip(columns, arg)
results: Dict[base.OutputKey, FrameOrSeriesUnion] = {}
for idx, (name, func) in enumerate(arg):
obj = self
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[base.OutputKey(label=name, position=idx)] = obj.aggregate(func)
if any(isinstance(x, DataFrame) for x in results.values()):
# let higher level handle
return results
# Argument 1 to "_wrap_aggregated_output" of "SeriesGroupBy" has
# incompatible type "Dict[OutputKey, Union[DataFrame,
# Series]]";
# expected "Mapping[OutputKey, Union[Series, ndarray]]"
output = self._wrap_aggregated_output(
results, index=None # type: ignore[arg-type]
)
return self.obj._constructor_expanddim(output, columns=columns)
# TODO: index should not be Optional - see GH 35490
def _wrap_series_output(
self,
output: Mapping[base.OutputKey, Union[Series, np.ndarray]],
index: Optional[Index],
) -> FrameOrSeriesUnion:
"""
Wraps the output of a SeriesGroupBy operation into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
index : pd.Index or None
Index to apply to the output.
Returns
-------
Series or DataFrame
Notes
-----
In the vast majority of cases output and columns will only contain one
element. The exception is operations that expand dimensions, like ohlc.
"""
indexed_output = {key.position: val for key, val in output.items()}
columns = Index(key.label for key in output)
result: FrameOrSeriesUnion
if len(output) > 1:
result = self.obj._constructor_expanddim(indexed_output, index=index)
result.columns = columns
elif not columns.empty:
result = self.obj._constructor(
indexed_output[0], index=index, name=columns[0]
)
else:
result = self.obj._constructor_expanddim()
return result
# TODO: Remove index argument, use self.grouper.result_index, see GH 35490
def _wrap_aggregated_output(
self,
output: Mapping[base.OutputKey, Union[Series, np.ndarray]],
index: Optional[Index],
) -> FrameOrSeriesUnion:
"""
Wraps the output of a SeriesGroupBy aggregation into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
Returns
-------
Series or DataFrame
Notes
-----
In the vast majority of cases output will only contain one element.
The exception is operations that expand dimensions, like ohlc.
"""
result = self._wrap_series_output(output=output, index=index)
return self._reindex_output(result)
def _wrap_transformed_output(
self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]]
) -> Series:
"""
Wraps the output of a SeriesGroupBy aggregation into the expected result.
Parameters
----------
output : dict[base.OutputKey, Union[Series, np.ndarray]]
Dict with a sole key of 0 and a value of the result values.
Returns
-------
Series
Notes
-----
output should always contain one element. It is specified as a dict
for consistency with DataFrame methods and _wrap_aggregated_output.
"""
assert len(output) == 1
result = self._wrap_series_output(output=output, index=self.obj.index)
# No transformations increase the ndim of the result
assert isinstance(result, Series)
return result
def _wrap_applied_output(
self,
data: Series,
keys: Index,
values: Optional[List[Any]],
not_indexed_same: bool = False,
) -> FrameOrSeriesUnion:
"""
Wrap the output of SeriesGroupBy.apply into the expected result.
Parameters
----------
data : Series
Input data for groupby operation.
keys : Index
Keys of groups that Series was grouped by.
values : Optional[List[Any]]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
DataFrame or Series
"""
if len(keys) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self._selection_name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
def _get_index() -> Index:
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823 #24880
index = _get_index()
result: FrameOrSeriesUnion = self._reindex_output(
self.obj._constructor_expanddim(values, index=index)
)
# if self.observed is False,
# keep all-NaN rows created while re-indexing
result = result.stack(dropna=self.observed)
result.name = self._selection_name
return result
elif isinstance(values[0], (Series, DataFrame)):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=_get_index(), name=self._selection_name
)
return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
initialized = False
for name, group in self:
# Each step of this loop corresponds to
# libreduction._BaseGrouper._apply_to_group
group.name = name # NB: libreduction does not pin name
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, 0)
initialized = True
result[name] = output
return result
@Substitution(klass="Series")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result = self._transform_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(
result.ravel(), index=data.index, name=data.name
)
func = self._get_cython_func(func) or func
if not isinstance(func, str):
return self._transform_general(func, *args, **kwargs)
elif func not in base.transform_kernel_allowlist:
msg = f"'{func}' is not a valid function name for transform(name)"
raise ValueError(msg)
elif func in base.cythonized_kernels or func in base.transformation_kernels:
# cythonized transform or canned "agg+broadcast"
return getattr(self, func)(*args, **kwargs)
# If func is a reduction, we need to broadcast the
# result to the whole group. Compute func result
# and deal with possible broadcasting below.
# Temporarily set observed for dealing with categoricals.
with com.temp_setattr(self, "observed", True):
result = getattr(self, func)(*args, **kwargs)
return self._transform_fast(result)
def _transform_general(self, func, *args, **kwargs):
"""
Transform with a non-str `func`.
"""
klass = type(self._selected_obj)
results = []
for name, group in self:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
if isinstance(res, (DataFrame, Series)):
res = res._values
results.append(klass(res, index=group.index))
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
concatenated = concat(results)
result = self._set_result_index_ordered(concatenated)
else:
result = self.obj._constructor(dtype=np.float64)
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* user-defined funcs
# the cython take a different path (and casting)
if is_numeric_dtype(result.dtype):
common_dtype = find_common_type([self._selected_obj.dtype, result.dtype])
if common_dtype is result.dtype:
result = maybe_downcast_numeric(result, self._selected_obj.dtype)
result.name = self._selected_obj.name
return result
def _transform_fast(self, result) -> Series:
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
ids, _, ngroup = self.grouper.group_info
result = result.reindex(self.grouper.result_index, copy=False)
out = algorithms.take_nd(result._values, ids)
return self.obj._constructor(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._get_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna: bool = True) -> Series:
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new unique observation
mask = codes == -1
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self._selection_name)
return self._reindex_output(result, fill_value=0)
@doc(Series.describe)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(
self,
normalize=False,
sort=True,
ascending=False,
bins=None,
dropna: bool = True,
):
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def apply_series_value_counts():
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return apply_series_value_counts()
elif is_categorical_dtype(val):
# GH38672
return apply_series_value_counts()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[call-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not len(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not len(lchanges):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
# error: List item 0 has incompatible type "Union[ndarray, Any]";
# expected "Index"
levels = [ping.group_index for ping in self.grouper.groupings] + [
lev # type: ignore[list-item]
]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = codes[-1] != -1
if mask.all():
dropna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is None:
mi = MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=False
)
if is_integer_dtype(out):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, codes[-1]]
_, idx = get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self._selection_name)
def count(self) -> Series:
"""
Compute count of group, excluding missing values.
Returns
-------
Series
Count of values within each group.
"""
ids, _, ngroups = self.grouper.group_info
val = self.obj._values
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
result = self.obj._constructor(
out,
index=self.grouper.result_index,
name=self._selection_name,
dtype="int64",
)
return self._reindex_output(result, fill_value=0)
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None):
"""Calculate pct_change of each value to previous entry in group"""
# TODO: Remove this conditional when #23918 is fixed
if freq:
return self.apply(
lambda x: x.pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq
)
)
if fill_method is None: # GH30463
fill_method = "pad"
limit = 0
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(self.grouper.codes)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@pin_allowlisted_properties(DataFrame, base.dataframe_apply_allowlist)
class DataFrameGroupBy(GroupBy[DataFrame]):
_apply_allowlist = base.dataframe_apply_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
pandas supports "named aggregation"
>>> df.groupby("A").agg(
... b_min=pd.NamedAgg(column="B", aggfunc="min"),
... c_sum=pd.NamedAgg(column="C", aggfunc="sum"))
b_min c_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to apply to that column.
Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a callable or a string alias.
See :ref:`groupby.aggregate.named` for more."""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="DataFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result, index = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
op = GroupByApply(self, func, args, kwargs)
result = op.agg()
if not is_dict_like(func) and result is not None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
elif args or kwargs:
result = self._aggregate_frame(func, *args, **kwargs)
elif self.axis == 1:
# _aggregate_multiple_funcs does not allow self.axis == 1
result = self._aggregate_frame(func)
else:
# try to treat as if we are passing a list
try:
result = GroupByApply(self, [func], args=(), kwargs={}).agg()
# select everything except for the last level, which is the one
# containing the name of the function(s), see GH 32040
result.columns = result.columns.rename(
[self._selected_obj.columns.name] * result.columns.nlevels
).droplevel(-1)
except ValueError as err:
if "no results" not in str(err):
# raised directly by _aggregate_multiple_funcs
raise
result = self._aggregate_frame(func)
except AttributeError:
# catch exception from line 969
# (Series does not have attribute "columns"), see GH 35246
result = self._aggregate_frame(func)
if relabeling:
# used reordered index of columns
result = result.iloc[:, order]
result.columns = columns
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
agg = aggregate
def _iterate_slices(self) -> Iterable[Series]:
obj = self._selected_obj
if self.axis == 1:
obj = obj.T
if isinstance(obj, Series) and obj.name not in self.exclusions:
# Occurs when doing DataFrameGroupBy(...)["X"]
yield obj
else:
for label, values in obj.items():
if label in self.exclusions:
continue
yield values
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
) -> DataFrame:
agg_mgr = self._cython_agg_manager(
how, alt=alt, numeric_only=numeric_only, min_count=min_count
)
return self._wrap_agged_manager(agg_mgr)
def _cython_agg_manager(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
) -> Manager:
data: Manager = self._get_data_to_aggregate()
if numeric_only:
data = data.get_numeric_data(copy=False)
using_array_manager = isinstance(data, ArrayManager)
def cast_agg_result(result, values: ArrayLike, how: str) -> ArrayLike:
# see if we can cast the values to the desired dtype
# this may not be the original dtype
assert not isinstance(result, DataFrame)
dtype = maybe_cast_result_dtype(values.dtype, how)
result = maybe_downcast_numeric(result, dtype)
if isinstance(values, Categorical) and isinstance(result, np.ndarray):
# If the Categorical op didn't raise, it is dtype-preserving
result = type(values)._from_sequence(result.ravel(), dtype=values.dtype)
# Note this will have result.dtype == dtype from above
elif (
not using_array_manager
and isinstance(result, np.ndarray)
and result.ndim == 1
):
# We went through a SeriesGroupByPath and need to reshape
# GH#32223 includes case with IntegerArray values
result = result.reshape(1, -1)
# test_groupby_duplicate_columns gets here with
# result.dtype == int64, values.dtype=object, how="min"
return result
def py_fallback(values: ArrayLike) -> ArrayLike:
# if self.grouper.aggregate fails, we fall back to a pure-python
# solution
# We get here with a) EADtypes and b) object dtype
obj: FrameOrSeriesUnion
# call our grouper again with only this block
if values.ndim == 1:
obj = Series(values)
else:
# TODO special case not needed with ArrayManager
obj = DataFrame(values.T)
if obj.shape[1] == 1:
# Avoid call to self.values that can occur in DataFrame
# reductions; see GH#28949
obj = obj.iloc[:, 0]
# Create SeriesGroupBy with observed=True so that it does
# not try to add missing categories if grouping over multiple
# Categoricals. This will done by later self._reindex_output()
# Doing it here creates an error. See GH#34951
sgb = get_groupby(obj, self.grouper, observed=True)
result = sgb.aggregate(lambda x: alt(x, axis=self.axis))
assert isinstance(result, (Series, DataFrame)) # for mypy
# In the case of object dtype block, it may have been split
# in the operation. We un-split here.
result = result._consolidate()
assert isinstance(result, (Series, DataFrame)) # for mypy
# unwrap DataFrame/Series to get array
mgr = result._mgr
arrays = mgr.arrays
if len(arrays) != 1:
# We've split an object block! Everything we've assumed
# about a single block input returning a single block output
# is a lie. See eg GH-39329
return mgr.as_array()
else:
# We are a single block from a BlockManager
# or one array from SingleArrayManager
return arrays[0]
def array_func(values: ArrayLike) -> ArrayLike:
try:
result = self.grouper._cython_operation(
"aggregate", values, how, axis=1, min_count=min_count
)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
assert how == "ohlc"
raise
# error: Incompatible types in assignment (expression has type
# "ExtensionArray", variable has type "ndarray")
result = py_fallback(values) # type: ignore[assignment]
return cast_agg_result(result, values, how)
# TypeError -> we may have an exception in trying to aggregate
# continue and exclude the block
# NotImplementedError -> "ohlc" with wrong dtype
new_mgr = data.grouped_reduce(array_func, ignore_failures=True)
if not len(new_mgr):
raise DataError("No numeric types to aggregate")
return new_mgr
def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
if self.grouper.nkeys != 1:
raise AssertionError("Number of keys must be 1")
axis = self.axis
obj = self._obj_with_exclusions
result: Dict[Hashable, Union[NDFrame, np.ndarray]] = {}
if axis != obj._info_axis_number:
for name, data in self:
fres = func(data, *args, **kwargs)
result[name] = fres
else:
for name in self.indices:
data = self.get_group(name, obj=obj)
fres = func(data, *args, **kwargs)
result[name] = fres
return self._wrap_frame_output(result, obj)
def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame:
# only for axis==0
obj = self._obj_with_exclusions
result: Dict[Union[int, str], NDFrame] = {}
cannot_agg = []
for item in obj:
data = obj[item]
colg = SeriesGroupBy(data, selection=item, grouper=self.grouper)
try:
result[item] = colg.aggregate(func, *args, **kwargs)
except ValueError as err:
if "Must produce aggregated value" in str(err):
# raised in _aggregate_named, handle at higher level
# see test_apply_with_mutated_index
raise
# otherwise we get here from an AttributeError in _make_wrapper
cannot_agg.append(item)
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
return self.obj._constructor(result, columns=result_columns)
def _wrap_applied_output(self, data, keys, values, not_indexed_same=False):
if len(keys) == 0:
result = self.obj._constructor(
index=self.grouper.result_index, columns=data.columns
)
result = result.astype(data.dtypes.to_dict(), copy=False)
return result
# GH12824
first_not_none = next(com.not_none(*values), None)
if first_not_none is None:
# GH9684 - All values are None, return an empty frame.
return self.obj._constructor()
elif isinstance(first_not_none, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
key_index = self.grouper.result_index if self.as_index else None
if isinstance(first_not_none, (np.ndarray, Index)):
# GH#1738: values is list of arrays of unequal lengths
# fall through to the outer else clause
# TODO: sure this is right? we used to do this
# after raising AttributeError above
return self.obj._constructor_sliced(
values, index=key_index, name=self._selection_name
)
elif not isinstance(first_not_none, Series):
# values are not series or array-like but scalars
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
if self.as_index:
return self.obj._constructor_sliced(values, index=key_index)
else:
result = DataFrame(values, index=key_index, columns=[self._selection])
self._insert_inaxis_grouper_inplace(result)
return result
else:
# values are Series
return self._wrap_applied_output_series(
keys, values, not_indexed_same, first_not_none, key_index
)
def _wrap_applied_output_series(
self,
keys,
values: List[Series],
not_indexed_same: bool,
first_not_none,
key_index,
) -> FrameOrSeriesUnion:
# this is to silence a DeprecationWarning
# TODO: Remove when default dtype of empty Series is object
kwargs = first_not_none._construct_axes_dict()
backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)
values = [x if (x is not None) else backup for x in values]
all_indexed_same = all_indexes_same(x.index for x in values)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
applied_index = self._selected_obj._get_axis(self.axis)
singular_series = len(values) == 1 and applied_index.nlevels == 1
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(keys, values, not_indexed_same=True)
# Combine values
# vstack+constructor is faster than concat and handles MI-columns
stacked_values = np.vstack([np.asarray(v) for v in values])
if self.axis == 0:
index = key_index
columns = first_not_none.index.copy()
if columns.name is None:
# GH6124 - propagate name of Series when it's consistent
names = {v.name for v in values}
if len(names) == 1:
columns.name = list(names)[0]
else:
index = first_not_none.index
columns = key_index
stacked_values = stacked_values.T
result = self.obj._constructor(stacked_values, index=index, columns=columns)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if so.ndim == 2 and so.dtypes.apply(needs_i8_conversion).any():
result = result._convert(datetime=True)
else:
result = result._convert(datetime=True)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
return self._reindex_output(result)
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
for name, group in gen:
object.__setattr__(group, "name", name)
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError as err:
msg = "transform must return a scalar value for each group"
raise ValueError(msg) from err
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = self.obj._constructor(
np.concatenate([res.values] * len(group.index)).reshape(
group.shape
),
columns=group.columns,
index=group.index,
)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1
concatenated = concat(applied, axis=self.axis, verify_integrity=False)
concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass="DataFrame")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result = self._transform_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result, index=data.index, columns=data.columns)
# optimized transforms
func = self._get_cython_func(func) or func
if not isinstance(func, str):
return self._transform_general(func, *args, **kwargs)
elif func not in base.transform_kernel_allowlist:
msg = f"'{func}' is not a valid function name for transform(name)"
raise ValueError(msg)
elif func in base.cythonized_kernels or func in base.transformation_kernels:
# cythonized transformation or canned "reduction+broadcast"
return getattr(self, func)(*args, **kwargs)
# GH 30918
# Use _transform_fast only when we know func is an aggregation
if func in base.reduction_kernels:
# If func is a reduction, we need to broadcast the
# result to the whole group. Compute func result
# and deal with possible broadcasting below.
# Temporarily set observed for dealing with categoricals.
with com.temp_setattr(self, "observed", True):
result = getattr(self, func)(*args, **kwargs)
if isinstance(result, DataFrame) and result.columns.equals(
self._obj_with_exclusions.columns
):
return self._transform_fast(result)
return self._transform_general(func, *args, **kwargs)
def _transform_fast(self, result: DataFrame) -> DataFrame:
"""
Fast transform path for aggregations
"""
obj = self._obj_with_exclusions
# for each col, reshape to size of original frame by take operation
ids, _, ngroup = self.grouper.group_info
result = result.reindex(self.grouper.result_index, copy=False)
output = [
algorithms.take_nd(result.iloc[:, i].values, ids)
for i, _ in enumerate(result.columns)
]
return self.obj._constructor._from_arrays(
output, columns=result.columns, index=obj.index
)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, str):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis
)
return fast_path, slow_path
def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
except AssertionError:
raise
except Exception:
# GH#29631 For user-defined function, we can't predict what may be
# raised; see test_transform.test_transform_fastpath_raises
return path, res
# verify fast path does not change columns (and names), otherwise
# its results cannot be joined with those of the slow path
if not isinstance(res_fast, DataFrame):
return path, res
if not res_fast.columns.equals(group.columns):
return path, res
if res_fast.equals(res):
path = fast_path
return path, res
def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame:
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
except TypeError:
# e.g. trying to call nanmean with string values
pass
else:
inds.append(i)
if not output:
raise TypeError("Transform function invalid for data types")
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return self.obj._constructor(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding filtered elements.
Elements from groups are filtered if they do not satisfy the
boolean criterion specified by func.
Parameters
----------
func : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
If False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError(
f"filter function returned a {type(res).__name__}, "
"but expected a scalar bool"
)
return self._apply_filter(indices, dropna)
def __getitem__(self, key):
if self.axis == 1:
# GH 37725
raise ValueError("Cannot subset columns when using axis=1")
# per GH 23566
if isinstance(key, tuple) and len(key) > 1:
# if len == 1, then it becomes a SeriesGroupBy and this is actually
# valid syntax, so don't raise warning
warnings.warn(
"Indexing with multiple keys (implicitly converted to a tuple "
"of keys) will be deprecated, use a list instead.",
FutureWarning,
stacklevel=2,
)
return super().__getitem__(key)
def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(
subset,
self.grouper,
axis=self.axis,
level=self.level,
grouper=self.grouper,
exclusions=self.exclusions,
selection=key,
as_index=self.as_index,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
mutated=self.mutated,
dropna=self.dropna,
)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(
subset,
level=self.level,
grouper=self.grouper,
selection=key,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
dropna=self.dropna,
)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_frame_output(self, result, obj: DataFrame) -> DataFrame:
result_index = self.grouper.levels[0]
if self.axis == 0:
return self.obj._constructor(
result, index=obj.columns, columns=result_index
).T
else:
return self.obj._constructor(result, index=obj.index, columns=result_index)
def _get_data_to_aggregate(self) -> Manager:
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._mgr
else:
return obj._mgr
def _insert_inaxis_grouper_inplace(self, result: DataFrame) -> None:
# zip in reverse so we can always insert at loc 0
columns = result.columns
for name, lev, in_axis in zip(
reversed(self.grouper.names),
reversed(self.grouper.get_group_levels()),
reversed([grp.in_axis for grp in self.grouper.groupings]),
):
# GH #28549
# When using .apply(-), name will be in columns already
if in_axis and name not in columns:
result.insert(0, name, lev)
def _wrap_aggregated_output(
self,
output: Mapping[base.OutputKey, Union[Series, np.ndarray]],
index: Optional[Index],
) -> DataFrame:
"""
Wraps the output of DataFrameGroupBy aggregations into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
Returns
-------
DataFrame
"""
indexed_output = {key.position: val for key, val in output.items()}
columns = Index([key.label for key in output])
columns._set_names(self._obj_with_exclusions._get_axis(1 - self.axis).names)
result = self.obj._constructor(indexed_output)
result.columns = columns
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
result.index = self.grouper.result_index
if self.axis == 1:
result = result.T
return self._reindex_output(result)
def _wrap_transformed_output(
self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]]
) -> DataFrame:
"""
Wraps the output of DataFrameGroupBy transformations into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
Returns
-------
DataFrame
"""
indexed_output = {key.position: val for key, val in output.items()}
result = self.obj._constructor(indexed_output)
if self.axis == 1:
result = result.T
result.columns = self.obj.columns
else:
columns = Index(key.label for key in output)
columns.name = self.obj.columns.name
result.columns = columns
result.index = self.obj.index
return result
def _wrap_agged_manager(self, mgr: Manager) -> DataFrame:
if not self.as_index:
index = np.arange(mgr.shape[1])
mgr.set_axis(1, ibase.Index(index), verify_integrity=False)
result = self.obj._constructor(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr.set_axis(1, index, verify_integrity=False)
result = self.obj._constructor(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(
self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions,
)
def _apply_to_column_groupbys(self, func) -> DataFrame:
from pandas.core.reshape.concat import concat
return concat(
(func(col_groupby) for _, col_groupby in self._iterate_column_groupbys()),
keys=self._selected_obj.columns,
axis=1,
)
def count(self) -> DataFrame:
"""
Compute count of group, excluding missing values.
Returns
-------
DataFrame
Count of values within each group.
"""
data = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
using_array_manager = isinstance(data, ArrayManager)
def hfunc(bvalues: ArrayLike) -> ArrayLike:
# TODO(2DEA): reshape would not be necessary with 2D EAs
if bvalues.ndim == 1:
# EA
masked = mask & ~isna(bvalues).reshape(1, -1)
else:
masked = mask & ~isna(bvalues)
counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups, axis=1)
if using_array_manager:
# count_level_2d return (1, N) array for single column
# -> extract 1D array
counted = counted[0, :]
return counted
new_mgr = data.grouped_reduce(hfunc)
# If we are grouping on categoricals we want unobserved categories to
# return zero, rather than the default of NaN which the reindexing in
# _wrap_agged_manager() returns. GH 35028
with com.temp_setattr(self, "observed", True):
result = self._wrap_agged_manager(new_mgr)
return self._reindex_output(result, fill_value=0)
def nunique(self, dropna: bool = True) -> DataFrame:
"""
Return DataFrame with counts of unique elements in each position.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
value1 value2
id
egg 1 1
ham 1 2
spam 2 1
Check for rows with the same id but conflicting values:
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
from pandas.core.reshape.concat import concat
# TODO: this is duplicative of how GroupBy naturally works
# Try to consolidate with normal wrapping functions
obj = self._obj_with_exclusions
axis_number = obj._get_axis_number(self.axis)
other_axis = int(not axis_number)
if axis_number == 0:
iter_func = obj.items
else:
iter_func = obj.iterrows
results = concat(
[
SeriesGroupBy(content, selection=label, grouper=self.grouper).nunique(
dropna
)
for label, content in iter_func()
],
axis=1,
)
results = cast(DataFrame, results)
if axis_number == 1:
results = results.T
results._get_axis(other_axis).names = obj._get_axis(other_axis).names
if not self.as_index:
results.index = ibase.default_index(len(results))
self._insert_inaxis_grouper_inplace(results)
return results
@Appender(DataFrame.idxmax.__doc__)
def idxmax(self, axis=0, skipna: bool = True):
axis = DataFrame._get_axis_number(axis)
numeric_only = None if axis == 0 else False
def func(df):
# NB: here we use numeric_only=None, in DataFrame it is False GH#38217
res = df._reduce(
nanops.nanargmax,
"argmax",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
)
indices = res._values
index = df._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
return self._python_apply_general(func, self._obj_with_exclusions)
@Appender(DataFrame.idxmin.__doc__)
def idxmin(self, axis=0, skipna: bool = True):
axis = DataFrame._get_axis_number(axis)
numeric_only = None if axis == 0 else False
def func(df):
# NB: here we use numeric_only=None, in DataFrame it is False GH#38217
res = df._reduce(
nanops.nanargmin,
"argmin",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
)
indices = res._values
index = df._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
return self._python_apply_general(func, self._obj_with_exclusions)
boxplot = boxplot_frame_groupby
|
py | b40721e0d9aa2619a5c6c0f9eb978f7f431ddf14 | # Generated by Django 2.2.13 on 2020-06-29 05:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Purr',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(max_length=140)),
('date_posted', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('in_reply_to', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='purr.Purr')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | b407223fb30118cff51e6f5326c8cb4a924e4d5f | # -*- coding: utf-8 -*-
class UntrustedIdentityException(Exception):
def __init__(self, name, identityKey):
self.name = name
self.identityKey = identityKey
def getName(self):
return self.name
def getIdentityKey(self):
return self.identityKey
|
py | b40723909d0ed1749efd89e26284d9be69af7be0 | import factory
from users.models import User
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
username = factory.Sequence(lambda n: "username{0:0=2d}".format(n + 1))
|
py | b407249919827574a991db935832c3cacd7f61f1 | """
Layout items for Foundation components.
Inherits from the default **crispy_forms** layout objects to force templates on
the right ``TEMPLATE_PACK`` (defined from ``settings.CRISPY_TEMPLATE_PACK``)
and implements Foundation components.
"""
from __future__ import absolute_import
from .base import Div, Callout, Layout, UneditableField, HTML
from .grid import Row, RowFluid, Column
from .fields import ( # noqa: F401
MultiWidgetField, Field, MultiField,
SplitDateTimeField, InlineField,
InlineJustifiedField, SwitchField,
InlineSwitchField, FakeField, Hidden
)
from .buttons import ( # noqa: F401
ButtonHolder, ButtonHolderPanel, ButtonHolderCallout, ButtonGroup,
Button, Submit, Reset,
InputButton, InputSubmit, InputReset,
ButtonElement, ButtonSubmit, ButtonReset
)
from .containers import ( # noqa: F401
Container, ContainerHolder,
Fieldset, TabItem, TabHolder,
VerticalTabHolder, AccordionItem,
AccordionHolder
)
__all__ = [
'Div', 'Callout', 'Layout', 'UneditableField', 'HTML',
'Row', 'RowFluid', 'Column',
'Field', 'FakeField', 'Hidden',
'MultiWidgetField', 'MultiField',
'SplitDateTimeField',
'InlineField', 'InlineJustifiedField', 'SwitchField', 'InlineSwitchField'
'ButtonHolder', 'ButtonHolderCallout', 'ButtonGroup',
'Button', 'Submit', 'Reset',
'InputButton', 'InputSubmit', 'InputReset',
'ButtonElement', 'ButtonSubmit', 'ButtonReset',
'Container', 'ContainerHolder', 'Fieldset',
'TabItem', 'TabHolder', 'VerticalTabHolder',
'AccordionItem', 'AccordionHolder',
]
|
py | b407257c3657db7a38118445b0da3b1563355644 | import logging
from typing import Dict, List, Sequence, Union, Tuple
# TODO: align with main pypesto multiprocessing format
from multiprocess import Pool, Manager, Queue, Pipe
from tqdm import tqdm
import numpy as np
import copy
import queue
import time
from ..problem import Problem
from .sampler import Sampler, InternalSampler, InternalSample
from .result import McmcPtResult
logger = logging.getLogger(__name__)
# _q: Union[None, Queue] = None
# _r: Union[None, Queue] = None
# _idx: Union[None, int] = None
# _sampler: Union[None, InternalSampler] = None
# def worker_init(work_queue: Queue, return_queue: Queue,
# idx: int, sampler_obj: InternalSampler) -> bool:
# global _q, _r, _idx, _sampler
# _q = work_queue
# _r = return_queue
# _idx = idx
# _sampler = sampler_obj
# return True
# def worker_run() -> Tuple[int, InternalSampler]:
# global _q, _r, _idx, _sampler
# while True:
# try:
# logger.debug(f'sampler {_idx}: WAITING')
# idx, new_last_sample, beta, stop = _q.get(timeout=5)
# if _idx == idx:
# logger.debug(f'sampler {_idx}: new_last_sample={new_last_sample}, beta={beta}, stop={stop}')
# else:
# logger.debug(f'sampler {_idx}: encountered incorrect instruction')
# raise ProcessLookupError('received wrong instructions.')
# if stop is True:
# # logger.debug(f'sampler {_idx}: STOPPING trace_x: {len(_sampler.trace_x)}')
# _q.task_done()
# # logger.debug(f'sampler {_idx}: RETURNING')
# return _idx, _sampler
# if new_last_sample is not None:
# _sampler.set_last_sample(copy.deepcopy(new_last_sample))
# # logger.debug(f'sampler {_idx}: SAMPLING')
# _sampler.sample(n_samples=1, beta=beta)
# # logger.debug(f'sampler {idx} trace_x: {len(_sampler.trace_x)}')
# logger.debug(f'sampler {_idx}: RETURNING')
# _r.put((idx, copy.deepcopy(_sampler.get_last_sample()), beta))
# # logger.debug(f'sampler {_idx}: MARKING COMPLETE')
# _q.task_done()
# except (EOFError, queue.Empty):
# time.sleep(1)
# continue
def worker_run_combined(
work_queue: Queue, return_queue: Queue, idx: int, sampler_obj: InternalSampler
) -> bool:
_q = work_queue
_r = return_queue
_idx = idx
_sampler = sampler_obj
while True:
try:
# logger.debug(f'sampler {_idx}: WAITING')
idx, new_last_sample, beta, stop = _q.get()
# if _idx == idx:
# logger.debug(f'sampler {_idx}: new_last_sample={new_last_sample}, beta={beta}, stop={stop}')
if _idx != idx:
# logger.debug(f'sampler {_idx}: encountered incorrect instruction')
raise ProcessLookupError('received wrong instructions.')
if stop is True:
# logger.debug(f'sampler {_idx}: STOPPING trace_x: {len(_sampler.trace_x)}')
_q.task_done()
# logger.debug(f'sampler {_idx}: RETURNING')
return _idx, _sampler.get_samples()
if new_last_sample is not None:
_sampler.set_last_sample(copy.deepcopy(new_last_sample))
logger.debug(f'sampler {_idx}: SAMPLING')
_sampler.sample(n_samples=1, beta=beta)
# logger.debug(f'sampler {idx} trace_x: {len(_sampler.trace_x)}')
# logger.debug(f'sampler {_idx}: RETURNING')
_r.put((idx, copy.deepcopy(_sampler.get_last_sample()), beta))
logger.debug(f'sampler {_idx}: MARKING COMPLETE')
_q.task_done()
except (EOFError, queue.Empty):
time.sleep(1)
continue
class ParallelTemperingSampler(Sampler):
"""Simple parallel tempering sampler."""
# TODO: use this as base class, roll parallelized into another class.
def __init__(
self,
internal_sampler: InternalSampler,
betas: Sequence[float] = None,
n_chains: int = None,
options: Dict = None):
super().__init__(options)
# set betas
if (betas is None) == (n_chains is None):
raise ValueError("Set either betas or n_chains.")
if betas is None:
betas = near_exponential_decay_betas(
n_chains=n_chains, exponent=self.options['exponent'],
max_temp=self.options['max_temp'])
if betas[0] != 1.:
raise ValueError("The first chain must have beta=1.0")
self.betas0 = np.array(betas)
self.betas = None
self.temper_lpost = self.options['temper_log_posterior']
self.samplers = [copy.deepcopy(internal_sampler)
for _ in range(len(self.betas0))]
# configure internal samplers
for sampler in self.samplers:
sampler.make_internal(temper_lpost=self.temper_lpost)
@classmethod
def default_options(cls) -> Dict:
return {
'max_temp': 5e4,
'exponent': 4,
'temper_log_posterior': False,
}
def initialize(self,
problem: Problem,
x0: Union[np.ndarray, List[np.ndarray]]):
# initialize all samplers
n_chains = len(self.samplers)
if isinstance(x0, list):
x0s = x0
else:
x0s = [x0 for _ in range(n_chains)]
for sampler, x0 in zip(self.samplers, x0s):
_problem = copy.deepcopy(problem)
sampler.initialize(_problem, x0)
self.betas = self.betas0
def sample(
self, n_samples: int, beta: float = 1.):
# loop over iterations
for i_sample in tqdm(range(int(n_samples))): # TODO test
# sample
for sampler, beta in zip(self.samplers, self.betas):
sampler.sample(n_samples=1, beta=beta)
# swap samples
swapped = self.swap_samples()
# adjust temperatures
self.adjust_betas(i_sample, swapped)
def get_samples(self) -> McmcPtResult:
"""Concatenate all chains."""
results = [sampler.get_samples() for sampler in self.samplers]
trace_x = np.array([result.trace_x[0] for result in results])
trace_neglogpost = np.array([result.trace_neglogpost[0]
for result in results])
trace_neglogprior = np.array([result.trace_neglogprior[0]
for result in results])
return McmcPtResult(
trace_x=trace_x,
trace_neglogpost=trace_neglogpost,
trace_neglogprior=trace_neglogprior,
betas=self.betas
)
def swap_samples(self) -> Sequence[bool]:
"""Swap samples as in Vousden2016."""
# for recording swaps
swapped = []
if len(self.betas) == 1:
# nothing to be done
return swapped
# beta differences
dbetas = self.betas[:-1] - self.betas[1:]
# loop over chains from highest temperature down
for dbeta, sampler1, sampler2 in reversed(
list(zip(dbetas, self.samplers[:-1], self.samplers[1:]))):
# extract samples
sample1 = sampler1.get_last_sample()
sample2 = sampler2.get_last_sample()
# extract log likelihood values
sample1_llh = sample1.lpost - sample1.lprior
sample2_llh = sample2.lpost - sample2.lprior
# swapping probability
p_acc_swap = dbeta * (sample2_llh - sample1_llh)
# flip a coin
u = np.random.uniform(0, 1)
# check acceptance
swap = np.log(u) < p_acc_swap
if swap:
# swap
sampler2.set_last_sample(sample1)
sampler1.set_last_sample(sample2)
# record
swapped.insert(0, swap)
return swapped
def adjust_betas(self, i_sample: int, swapped: Sequence[bool]):
"""Adjust temperature values. Default: Do nothing."""
class PoolParallelTemperingSampler(ParallelTemperingSampler):
def __init__(self,
internal_sampler: InternalSampler,
betas: Sequence[float] = None,
n_chains: int = None,
options: Dict = None,
parallel_pool: Pool = None
):
super().__init__(internal_sampler, betas, n_chains, options)
self.num_chains = n_chains
# set betas
if (betas is None) == (n_chains is None):
raise ValueError("Set either betas or n_chains.")
if betas is None:
betas = near_exponential_decay_betas(
n_chains=n_chains, exponent=self.options['exponent'],
max_temp=self.options['max_temp'])
if betas[0] != 1.:
raise ValueError("The first chain must have beta=1.0")
self.betas0 = np.array(betas)
self.betas = None
self.temper_lpost = self.options['temper_log_posterior']
self.parallel_pool = parallel_pool if parallel_pool else Pool(processes=n_chains)
self.samplers = [copy.deepcopy(internal_sampler)
for _ in range(n_chains)]
# configure internal samplers
for sampler in self.samplers:
sampler.make_internal(temper_lpost=self.temper_lpost)
def initialize(self,
problem: Problem,
x0: Union[np.ndarray, List[np.ndarray]]):
# initialize all samplers
n_chains = len(self.samplers)
if isinstance(x0, list):
x0s = x0
else:
x0s = [x0 for _ in range(n_chains)]
for sampler, x0 in zip(self.samplers, x0s):
_problem = copy.deepcopy(problem)
sampler.initialize(_problem, x0)
self.betas = self.betas0
def sample(self, n_samples: int, beta: float = 1.):
with Manager() as mgr:
queues_work = [mgr.Queue(maxsize=2) for _ in range(self.num_chains)]
queues_return = [mgr.Queue(maxsize=2) for _ in range(self.num_chains)]
worker_results = self.parallel_pool.starmap_async(
func=worker_run_combined, # func=worker_init
iterable=[(queues_work[idx], queues_return[idx], idx, self.samplers[idx])
for idx in range(self.num_chains)])
time.sleep(3.0)
# worker_results = [self.parallel_pool.apply_async(func=worker_run) for _ in range(self.num_chains)]
# time.sleep(3.0)
swapped = [None for _ in self.samplers]
last_samples = [None for _ in self.samplers]
for i_sample in range(int(n_samples)): # tqdm(range(int(n_samples))):
print(f"!! Iteration {i_sample:5} / {int(n_samples):5} !! start time: {time.time()}")
logger.debug('MAIN PROCESS: deploying work...')
for idx, beta in enumerate(self.betas):
queues_work[idx].put((idx, copy.deepcopy(swapped[idx]), beta, False)) # sample
logger.debug('MAIN PROCESS: waiting for return...')
for idx in range(len(self.samplers)):
idx, last_sample, beta = queues_return[idx].get() # get sample
last_samples[idx] = last_sample
logger.debug('MAIN PROCESS: swapping samples...')
swapped = self.swap_samples(last_samples) # swap samples
# logger.debug('MAIN PROCESS: swapping samples...')
self.adjust_betas(i_sample, swapped, last_samples) # adjust temps
# logger.debug(f"swapped: {swapped}")
# logger.debug(f"last_sample: {last_samples}")
# # logger.debug('stopping workers...')
logger.debug('MAIN PROCESS: stopping workers...')
_ = [queues_work[idx].put((idx, None, 0.00, True)) for idx in range(self.num_chains)]
logger.debug('MAIN PROCESS: waiting for workers to stop...')
_ = [queues_work[idx].join() for idx in range(self.num_chains)]
# # logger.debug('reached getting from finalqueue')
# for worker_result in worker_results:
idxs_and_sampler_objs = {idx: sampler for idx, sampler in worker_results.get()}
# print(f"idxs_and_sampler_objs: {[key for key in idxs_and_sampler_objs.keys()]}")
# logger.debug(f'GATHERED sampler {idx} trace_x: {len(sampler_obj.trace_x)}')
for idx, sampler_result in idxs_and_sampler_objs.items():
self.samplers[idx] = sampler_result
# print(f"self.samplers: {[type(x) for x in self.samplers]}")
##### NOT SURE IF THIS IS NEEDED
# for qu in queues_work:
# qu.close()
# for qu in queues_return:
# qu.close()
##### END UNSURE BLOCK
self.parallel_pool.close()
self.parallel_pool.join()
# # logger.debug('joined all workers')
def get_samples(self) -> McmcPtResult:
"""Concatenate all chains."""
# results = [sampler.get_samples() for sampler in self.samplers]
results = self.samplers
# for idx, result in enumerate(results):
# print(f"{idx}: {result.trace_x.shape}")
trace_x = np.array([result.trace_x[0] for result in results])
trace_neglogpost = np.array([result.trace_neglogpost[0]
for result in results])
trace_neglogprior = np.array([result.trace_neglogprior[0]
for result in results])
return McmcPtResult(
trace_x=trace_x,
trace_neglogpost=trace_neglogpost,
trace_neglogprior=trace_neglogprior,
betas=self.betas
)
def swap_samples(self, last_samples: List[Union[InternalSample, None]]) -> List[Union[InternalSample, None]]:
"""Swap samples as in Vousden2016."""
# for recording swaps
swapped = copy.deepcopy(last_samples)
if len(self.betas) == 1:
# nothing to be done
return swapped
# beta differences
dbetas = self.betas[:-1] - self.betas[1:]
# loop over chains from highest temperature down
for dbeta, sampler1_idx, sampler2_idx in reversed(list(zip(
dbetas, list(range(len(self.samplers[:-1]))), list(range(len(self.samplers[1:])))))):
# extract samples
sample1 = last_samples[sampler1_idx]
sample2 = last_samples[sampler2_idx]
# extract log likelihood values
sample1_llh = sample1.lpost - sample1.lprior
sample2_llh = sample2.lpost - sample2.lprior
# swapping probability
p_acc_swap = dbeta * (sample2_llh - sample1_llh)
# flip a coin
u = np.random.uniform(0, 1)
# check acceptance
swap = np.log(u) < p_acc_swap
if swap:
# swap
# sampler2.set_last_sample(sample1)
# sampler1.set_last_sample(sample2)
swapped[sampler2_idx] = sample1
swapped[sampler1_idx] = sample2
else:
swapped[sampler2_idx] = sample2
swapped[sampler1_idx] = sample1
# record
# swapped.insert(0, swap)
return swapped
def adjust_betas(self, i_sample: int,
swapped: Sequence[Union[None, InternalSample]],
last_samples: Sequence[Union[None, InternalSample]]):
"""Adjust temperature values. Default: Do nothing."""
def near_exponential_decay_betas(
n_chains: int, exponent: float, max_temp: float) -> np.ndarray:
"""Initialize betas in a near-exponential decay scheme.
Parameters
----------
n_chains:
Number of chains to use.
exponent:
Decay exponent. The higher, the more small temperatures are used.
max_temp:
Maximum chain temperature.
"""
# special case of one chain
if n_chains == 1:
return np.array([1.])
temperatures = np.linspace(1, max_temp ** (1 / exponent), n_chains) \
** exponent
betas = 1 / temperatures
return betas
|
py | b407262e3af7809f3e5930f5182a2e10b6b433e3 | """Model definitions for MNIST"""
# pylint: disable = C0301, C0103, R0914, C0111
import os
import sys
import tensorflow as tf
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from mnist_vae.src import model_def as mnist_vae_model_def
from mnist_e2e.model_def import end_to_end
from celebA_dcgan import model_def as celebA_dcgan_model_def
import channel_pggan_def as pggan_model_def
def pggan_gen(z, pilot, hparams):
assert hparams.batch_size in [1, 64], 'batch size should be either 64 or 1'
z_full = tf.zeros([64, hparams.z_dim]) + z
pilot_full = tf.tile(pilot,[64,1])
#z_full = tf.concat([z_full, pilot_full],1) # conditional
model_hparams = pggan_model_def.Hparams()
x_hat_full = pggan_model_def.generator(model_hparams, z_full, train=False, reuse=False)
x_hat_batch = x_hat_full[:hparams.batch_size]
restore_vars = pggan_model_def.gen_restore_vars()
restore_dict = {var.op.name: var for var in tf.global_variables() if var.op.name in restore_vars}
restore_path = tf.train.latest_checkpoint(hparams.pretrained_model_dir)
return x_hat_batch, restore_dict, restore_path
def dcgan_discrim(x_hat_batch, pilot, hparams):
assert hparams.batch_size in [1, 64], 'batch size should be either 64 or 1'
x_hat_image = tf.reshape(x_hat_batch, [-1, 64, 16, 2])
all_zeros = tf.zeros([64, 64, 16, 2])
discrim_input = all_zeros + x_hat_image
yb = tf.reshape(pilot, [hparams.batch_size, 1, 1, hparams.pilot_dim]) # conditional
discrim_input = conv_cond_concat(discrim_input, yb) # conditional
model_hparams = celebA_dcgan_model_def.Hparams()
prob, _ = celebA_dcgan_model_def.discriminator(model_hparams, discrim_input, train=False, reuse=False)
prob = tf.reshape(prob, [-1])
prob = prob[:hparams.batch_size]
restore_vars = celebA_dcgan_model_def.gen_restore_vars()
restore_dict = {var.op.name: var for var in tf.global_variables() if var.op.name in restore_vars}
restore_path = tf.train.latest_checkpoint(hparams.pretrained_model_dir)
return prob, restore_dict, restore_path
def dcgan_gen(z, pilot, hparams):
assert hparams.batch_size in [1, 64], 'batch size should be either 64 or 1'
z_full = tf.zeros([64, hparams.z_dim]) + z
pilot_full = tf.tile(pilot,[64,1])
z_full = tf.concat([z_full, pilot_full],1) # conditional
model_hparams = celebA_dcgan_model_def.Hparams()
x_hat_full = celebA_dcgan_model_def.generator(model_hparams, z_full, train=False, reuse=False)
x_hat_batch = x_hat_full[:hparams.batch_size]
restore_vars = celebA_dcgan_model_def.gen_restore_vars()
restore_dict = {var.op.name: var for var in tf.global_variables() if var.op.name in restore_vars}
restore_path = tf.train.latest_checkpoint(hparams.pretrained_model_dir)
return x_hat_batch, restore_dict, restore_path
def construct_gen(hparams, model_def):
model_hparams = model_def.Hparams()
z = model_def.get_z_var(model_hparams, hparams.batch_size)
x_hat,_ = model_def.generator(model_hparams, z, 'gen', reuse=False)
restore_vars = model_def.gen_restore_vars()
restore_dict = {var.op.name: var for var in tf.global_variables() if var.op.name in restore_vars}
restore_path = tf.train.latest_checkpoint(hparams.pretrained_model_dir)
x_hat = tf.transpose(tf.reshape(x_hat,[2,-1]))
return z, x_hat, restore_path, restore_dict
def vae_gen(hparams):
return construct_gen(hparams, mnist_vae_model_def)
def conv_cond_concat(x, y):
# Concatenate conditioning vector on feature map axis.
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return tf.concat([x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
|
py | b40727cd2cbe33fd5e5be4389350ab7936e68c98 | name="MY naME is akshraA"
small=0
captial=0
for i in name:
if i!=" ":
if "a"<=i:
small+=1
else:
captial+=1
print("small letter ",small)
print("capital letter ",captial) |
py | b40727e00bf377db0138a2c5fa4acf75faca9973 | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import ast
import functools
import sys
import traceback
import imath
import IECore
import Gaffer
import GafferUI
from Qt import QtWidgets
from Qt import QtCore
## \todo Custom right click menu with script load, save, execute file, undo, redo etc.
## \todo Standard way for users to customise all menus
class PythonEditor( GafferUI.Editor ) :
def __init__( self, scriptNode, **kw ) :
self.__splittable = GafferUI.SplitContainer( borderWidth = 2 )
GafferUI.Editor.__init__( self, self.__splittable, scriptNode, **kw )
self.__outputWidget = GafferUI.MultiLineTextWidget(
editable = False,
wrapMode = GafferUI.MultiLineTextWidget.WrapMode.None_,
role = GafferUI.MultiLineTextWidget.Role.Code,
)
self.__outputWidget.contextMenuSignal().connect(
Gaffer.WeakMethod( self.__contextMenu ), scoped = False
)
self.__inputWidget = GafferUI.CodeWidget()
self.__splittable.append( self.__outputWidget )
self.__splittable.append( self.__inputWidget )
self.__inputWidget.activatedSignal().connect( Gaffer.WeakMethod( self.__activated ), scoped = False )
self.__inputWidget.dropTextSignal().connect( Gaffer.WeakMethod( self.__dropText ), scoped = False )
self.__inputWidget.contextMenuSignal().connect(
Gaffer.WeakMethod( self.__contextMenu ), scoped = False
)
GafferUI.WidgetAlgo.joinEdges(
[ self.__outputWidget, self.__inputWidget ],
orientation = GafferUI.ListContainer.Orientation.Vertical
)
self.__executionDict = {
"imath" : imath,
"IECore" : IECore,
"Gaffer" : Gaffer,
"GafferUI" : GafferUI,
"root" : scriptNode,
}
self.__inputWidget.setCompleter( GafferUI.CodeWidget.PythonCompleter( self.__executionDict ) )
self.__inputWidget.setHighlighter( GafferUI.CodeWidget.PythonHighlighter() )
self.__inputWidget.setCommentPrefix( "#" )
def inputWidget( self ) :
return self.__inputWidget
def outputWidget( self ) :
return self.__outputWidget
def execute( self ) :
# decide what to execute
haveSelection = True
toExecute = self.__inputWidget.selectedText()
if not toExecute :
haveSelection = False
toExecute = self.__inputWidget.getText()
# parse it first. this lets us give better error formatting
# for syntax errors, and also figure out whether we can eval()
# and display the result or must exec() only.
try :
parsed = ast.parse( toExecute )
except SyntaxError as e :
self.__outputWidget.appendHTML( self.__syntaxErrorToHTML( e ) )
return
# execute it
self.__outputWidget.appendHTML( self.__codeToHTML( toExecute ) )
with Gaffer.OutputRedirection( stdOut = Gaffer.WeakMethod( self.__redirectOutput ), stdErr = Gaffer.WeakMethod( self.__redirectOutput ) ) :
with _MessageHandler( self.__outputWidget ) :
with Gaffer.UndoScope( self.scriptNode() ) :
with self.getContext() :
try :
if len( parsed.body ) == 1 and isinstance( parsed.body[0], ast.Expr ) :
result = eval( toExecute, self.__executionDict, self.__executionDict )
if result is not None :
self.__outputWidget.appendText( str( result ) )
else :
exec( toExecute, self.__executionDict, self.__executionDict )
if not haveSelection :
self.__inputWidget.setText( "" )
except Exception as e :
self.__outputWidget.appendHTML( self.__exceptionToHTML() )
## The Python dictionary that provides the globals and locals for `execute()`.
def namespace( self ) :
return self.__executionDict
def __repr__( self ) :
return "GafferUI.PythonEditor( scriptNode )"
def __activated( self, widget ) :
self.execute()
return True
def __dropText( self, widget, dragData ) :
if isinstance( dragData, IECore.StringVectorData ) :
return repr( list( dragData ) )
elif isinstance( dragData, Gaffer.GraphComponent ) :
if self.scriptNode().isAncestorOf( dragData ) :
return "root['" + dragData.relativeName( self.scriptNode() ).replace( ".", "']['" ) + "']"
elif isinstance( dragData, Gaffer.Set ) :
if len( dragData ) == 1 :
return self.__dropText( widget, dragData[0] )
else :
return "[ " + ", ".join( [ self.__dropText( widget, d ) for d in dragData ] ) + " ]"
elif isinstance( dragData, IECore.CompoundData ) :
return repr( dragData )
elif isinstance( dragData, IECore.Data ) and hasattr( dragData, "value" ) :
return repr( dragData.value )
return None
def __codeToHTML( self, code ) :
code = code.replace( "<", "<" ).replace( ">", ">" )
return "<pre>" + code + "</pre>"
def __syntaxErrorToHTML( self, syntaxError ) :
formatted = traceback.format_exception_only( SyntaxError, syntaxError )
lineNumber = formatted[0].rpartition( "," )[2].strip()
headingText = formatted[-1].replace( ":", " : " + lineNumber + " : ", 1 )
result = "<h1 class='ERROR'>%s</h1>" % headingText
result += "<br>" + self.__codeToHTML( "".join( formatted[1:-1] ) )
return result
def __exceptionToHTML( self ) :
t = traceback.extract_tb( sys.exc_info()[2] )
lineNumber = str( t[1][1] )
headingText = traceback.format_exception_only( *(sys.exc_info()[:2]) )[0].replace( ":", " : line " + lineNumber + " : ", 1 )
result = "<h1 class='ERROR'>%s</h1>" % headingText
if len( t ) > 2 :
result += "<br>" + self.__codeToHTML( "".join( traceback.format_list( t[2:] ) ) )
return result
def __redirectOutput( self, output ) :
if output != "\n" :
self.__outputWidget.appendText( output )
# update the gui so messages are output as they occur, rather than all getting queued
# up till the end.
QtWidgets.QApplication.instance().processEvents( QtCore.QEventLoop.ExcludeUserInputEvents )
def __contextMenu( self, widget ) :
definition = IECore.MenuDefinition()
if widget is self.inputWidget() :
definition.append(
"/Execute Selection" if widget.selectedText() else "/Execute",
{
"command" : self.execute,
"shortCut" : "Enter",
}
)
definition.append( "/ExecuteDivider", { "divider" : True } )
definition.append(
"/Copy",
{
"command" : functools.partial(
self.scriptNode().ancestor( Gaffer.ApplicationRoot ).setClipboardContents,
IECore.StringData( widget.selectedText() )
),
"active" : bool( widget.selectedText() )
}
)
if widget is self.inputWidget() :
definition.append(
"/Paste",
{
"command" : functools.partial(
widget.insertText,
self.scriptNode().ancestor( Gaffer.ApplicationRoot ).getClipboardContents().value,
),
"active" : isinstance( self.scriptNode().ancestor( Gaffer.ApplicationRoot ).getClipboardContents(), IECore.StringData )
}
)
definition.append( "/CopyPasteDivider", { "divider" : True } )
definition.append(
"/Select All",
{
"command" : widget._qtWidget().selectAll,
"active" : bool( widget.getText() )
}
)
definition.append( "/SelectDivider", { "divider" : True } )
definition.append(
"/Clear",
{
"command" : functools.partial( widget.setText, "" ),
"active" : bool( widget.getText() )
}
)
self.__popupMenu = GafferUI.Menu( definition )
self.__popupMenu.popup( parent = self )
return True
GafferUI.Editor.registerType( "PythonEditor", PythonEditor )
class _MessageHandler( IECore.MessageHandler ) :
def __init__( self, textWidget ) :
IECore.MessageHandler.__init__( self )
self.__textWidget = textWidget
def handle( self, level, context, message ) :
html = formatted = "<h1 class='%s'>%s : %s </h1><pre class='message'>%s</pre><br>" % (
IECore.Msg.levelAsString( level ),
IECore.Msg.levelAsString( level ),
context,
message
)
self.__textWidget.appendHTML( html )
# update the gui so messages are output as they occur, rather than all getting queued
# up till the end.
QtWidgets.QApplication.instance().processEvents( QtCore.QEventLoop.ExcludeUserInputEvents )
|
py | b407287f7ba1fb4f167e8575d528f7c2599db019 | # -*- coding: utf-8 -*-
# Copyright (C) 1999-2015, Raffaele Salmaso <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
from contextlib import contextmanager
import os
import sys
import subprocess
@contextmanager
def cd(path):
old_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_path)
def puts(*args):
sys.stdout.write(''.join([str(arg) for arg in args]))
sys.stdout.write('\n')
sys.stdout.flush()
def system(*args, **kwargs):
env = kwargs.pop('env', None)
return subprocess.call(list(args), env=env)
def mkdir(config):
"""
create a directory
"""
os.system("""mkdir -p "%(dest)s/%(date)s/" """ % {
'date': config.tm,
'dest': config.dest,
})
|
py | b4072ab9d9c825a5c730de5a7142c2fdf58874ba | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# *****************************************************************************
# $Id$
#
# Project: OpenGIS Simple Features Reference Implementation
# Purpose: Python port of a simple client for translating between formats.
# Author: Even Rouault, <even dot rouault at spatialys.com>
#
# Port from ogr2ogr.cpp whose author is Frank Warmerdam
#
# *****************************************************************************
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at spatialys.com>
# Copyright (c) 1999, Frank Warmerdam
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# **************************************************************************
# Note : this is the most direct port of ogr2ogr.cpp possible
# It could be made much more Python'ish !
import sys
import os
import stat
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
###############################################################################
class ScaledProgressObject(object):
def __init__(self, mini, maxi, cbk, cbk_data=None):
self.min = mini
self.max = maxi
self.cbk = cbk
self.cbk_data = cbk_data
###############################################################################
def ScaledProgressFunc(pct, msg, data):
if data.cbk is None:
return True
return data.cbk(data.min + pct * (data.max - data.min), msg, data.cbk_data)
###############################################################################
def EQUAL(a, b):
return a.lower() == b.lower()
###############################################################################
# Redefinition of GDALTermProgress, so that test_ogr2ogr_py.py
# can check that the progress bar is displayed
nLastTick = -1
def TermProgress(dfComplete, pszMessage, pProgressArg):
# pylint: disable=unused-argument
global nLastTick
nThisTick = int(dfComplete * 40.0)
if nThisTick < 0:
nThisTick = 0
if nThisTick > 40:
nThisTick = 40
# Have we started a new progress run?
if nThisTick < nLastTick and nLastTick >= 39:
nLastTick = -1
if nThisTick <= nLastTick:
return True
while nThisTick > nLastTick:
nLastTick = nLastTick + 1
if (nLastTick % 4) == 0:
sys.stdout.write('%d' % ((nLastTick / 4) * 10))
else:
sys.stdout.write('.')
if nThisTick == 40:
print(" - done.")
else:
sys.stdout.flush()
return True
class TargetLayerInfo(object):
def __init__(self):
self.poDstLayer = None
self.poCT = None
# self.papszTransformOptions = None
self.panMap = None
self.iSrcZField = None
class AssociatedLayers(object):
def __init__(self):
self.poSrcLayer = None
self.psInfo = None
# **********************************************************************
# main()
# **********************************************************************
bSkipFailures = False
nGroupTransactions = 200
bPreserveFID = False
nFIDToFetch = ogr.NullFID
class Enum(set):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
GeomOperation = Enum(["NONE", "SEGMENTIZE", "SIMPLIFY_PRESERVE_TOPOLOGY"])
def main(args=None, progress_func=TermProgress, progress_data=None):
global bSkipFailures
global nGroupTransactions
global bPreserveFID
global nFIDToFetch
version_num = int(gdal.VersionInfo('VERSION_NUM'))
if version_num < 1800: # because of ogr.GetFieldTypeName
print('ERROR: Python bindings of GDAL 1.8.0 or later required')
return 1
pszFormat = "ESRI Shapefile"
pszDataSource = None
pszDestDataSource = None
papszLayers = []
papszDSCO = []
papszLCO = []
bTransform = False
bAppend = False
bUpdate = False
bOverwrite = False
pszOutputSRSDef = None
pszSourceSRSDef = None
poOutputSRS = None
bNullifyOutputSRS = False
poSourceSRS = None
pszNewLayerName = None
pszWHERE = None
poSpatialFilter = None
pszSelect = None
papszSelFields = None
pszSQLStatement = None
eGType = -2
bPromoteToMulti = False
eGeomOp = GeomOperation.NONE
dfGeomOpParam = 0
papszFieldTypesToString = []
bDisplayProgress = False
pfnProgress = None
pProgressArg = None
bClipSrc = False
bWrapDateline = False
poClipSrc = None
pszClipSrcDS = None
pszClipSrcSQL = None
pszClipSrcLayer = None
pszClipSrcWhere = None
poClipDst = None
pszClipDstDS = None
pszClipDstSQL = None
pszClipDstLayer = None
pszClipDstWhere = None
# pszSrcEncoding = None
# pszDstEncoding = None
bWrapDateline = False
bExplodeCollections = False
pszZField = None
nCoordDim = -1
if args is None:
args = sys.argv
args = ogr.GeneralCmdLineProcessor(args)
# --------------------------------------------------------------------
# Processing command line arguments.
# --------------------------------------------------------------------
if args is None:
return 1
nArgc = len(args)
iArg = 1
while iArg < nArgc:
if EQUAL(args[iArg], "-f") and iArg < nArgc - 1:
iArg = iArg + 1
pszFormat = args[iArg]
elif EQUAL(args[iArg], "-dsco") and iArg < nArgc - 1:
iArg = iArg + 1
papszDSCO.append(args[iArg])
elif EQUAL(args[iArg], "-lco") and iArg < nArgc - 1:
iArg = iArg + 1
papszLCO.append(args[iArg])
elif EQUAL(args[iArg], "-preserve_fid"):
bPreserveFID = True
elif len(args[iArg]) >= 5 and EQUAL(args[iArg][0:5], "-skip"):
bSkipFailures = True
nGroupTransactions = 1 # 2409
elif EQUAL(args[iArg], "-append"):
bAppend = True
bUpdate = True
elif EQUAL(args[iArg], "-overwrite"):
bOverwrite = True
bUpdate = True
elif EQUAL(args[iArg], "-update"):
bUpdate = True
elif EQUAL(args[iArg], "-fid") and iArg < nArgc - 1:
iArg = iArg + 1
nFIDToFetch = int(args[iArg])
elif EQUAL(args[iArg], "-sql") and iArg < nArgc - 1:
iArg = iArg + 1
pszSQLStatement = args[iArg]
elif EQUAL(args[iArg], "-nln") and iArg < nArgc - 1:
iArg = iArg + 1
pszNewLayerName = args[iArg]
elif EQUAL(args[iArg], "-nlt") and iArg < nArgc - 1:
if EQUAL(args[iArg + 1], "NONE"):
eGType = ogr.wkbNone
elif EQUAL(args[iArg + 1], "GEOMETRY"):
eGType = ogr.wkbUnknown
elif EQUAL(args[iArg + 1], "PROMOTE_TO_MULTI"):
bPromoteToMulti = True
elif EQUAL(args[iArg + 1], "POINT"):
eGType = ogr.wkbPoint
elif EQUAL(args[iArg + 1], "LINESTRING"):
eGType = ogr.wkbLineString
elif EQUAL(args[iArg + 1], "POLYGON"):
eGType = ogr.wkbPolygon
elif EQUAL(args[iArg + 1], "GEOMETRYCOLLECTION"):
eGType = ogr.wkbGeometryCollection
elif EQUAL(args[iArg + 1], "MULTIPOINT"):
eGType = ogr.wkbMultiPoint
elif EQUAL(args[iArg + 1], "MULTILINESTRING"):
eGType = ogr.wkbMultiLineString
elif EQUAL(args[iArg + 1], "MULTIPOLYGON"):
eGType = ogr.wkbMultiPolygon
elif EQUAL(args[iArg + 1], "GEOMETRY25D"):
eGType = ogr.wkbUnknown | ogr.wkb25DBit
elif EQUAL(args[iArg + 1], "POINT25D"):
eGType = ogr.wkbPoint25D
elif EQUAL(args[iArg + 1], "LINESTRING25D"):
eGType = ogr.wkbLineString25D
elif EQUAL(args[iArg + 1], "POLYGON25D"):
eGType = ogr.wkbPolygon25D
elif EQUAL(args[iArg + 1], "GEOMETRYCOLLECTION25D"):
eGType = ogr.wkbGeometryCollection25D
elif EQUAL(args[iArg + 1], "MULTIPOINT25D"):
eGType = ogr.wkbMultiPoint25D
elif EQUAL(args[iArg + 1], "MULTILINESTRING25D"):
eGType = ogr.wkbMultiLineString25D
elif EQUAL(args[iArg + 1], "MULTIPOLYGON25D"):
eGType = ogr.wkbMultiPolygon25D
else:
print("-nlt %s: type not recognised." % args[iArg + 1])
return 1
iArg = iArg + 1
elif EQUAL(args[iArg], "-dim") and iArg < nArgc - 1:
nCoordDim = int(args[iArg + 1])
if nCoordDim != 2 and nCoordDim != 3:
print("-dim %s: value not handled." % args[iArg + 1])
return 1
iArg = iArg + 1
elif (EQUAL(args[iArg], "-tg") or
EQUAL(args[iArg], "-gt")) and iArg < nArgc - 1:
iArg = iArg + 1
nGroupTransactions = int(args[iArg])
elif EQUAL(args[iArg], "-s_srs") and iArg < nArgc - 1:
iArg = iArg + 1
pszSourceSRSDef = args[iArg]
elif EQUAL(args[iArg], "-a_srs") and iArg < nArgc - 1:
iArg = iArg + 1
pszOutputSRSDef = args[iArg]
if EQUAL(pszOutputSRSDef, "NULL") or \
EQUAL(pszOutputSRSDef, "NONE"):
pszOutputSRSDef = None
bNullifyOutputSRS = True
elif EQUAL(args[iArg], "-t_srs") and iArg < nArgc - 1:
iArg = iArg + 1
pszOutputSRSDef = args[iArg]
bTransform = True
elif EQUAL(args[iArg], "-spat") and iArg + 4 < nArgc:
oRing = ogr.Geometry(ogr.wkbLinearRing)
oRing.AddPoint_2D(float(args[iArg + 1]), float(args[iArg + 2]))
oRing.AddPoint_2D(float(args[iArg + 1]), float(args[iArg + 4]))
oRing.AddPoint_2D(float(args[iArg + 3]), float(args[iArg + 4]))
oRing.AddPoint_2D(float(args[iArg + 3]), float(args[iArg + 2]))
oRing.AddPoint_2D(float(args[iArg + 1]), float(args[iArg + 2]))
poSpatialFilter = ogr.Geometry(ogr.wkbPolygon)
poSpatialFilter.AddGeometry(oRing)
iArg = iArg + 4
elif EQUAL(args[iArg], "-where") and iArg < nArgc - 1:
iArg = iArg + 1
pszWHERE = args[iArg]
elif EQUAL(args[iArg], "-select") and iArg < nArgc - 1:
iArg = iArg + 1
pszSelect = args[iArg]
if pszSelect.find(',') != -1:
papszSelFields = pszSelect.split(',')
else:
papszSelFields = pszSelect.split(' ')
if papszSelFields[0] == '':
papszSelFields = []
elif EQUAL(args[iArg], "-simplify") and iArg < nArgc - 1:
iArg = iArg + 1
eGeomOp = GeomOperation.SIMPLIFY_PRESERVE_TOPOLOGY
dfGeomOpParam = float(args[iArg])
elif EQUAL(args[iArg], "-segmentize") and iArg < nArgc - 1:
iArg = iArg + 1
eGeomOp = GeomOperation.SEGMENTIZE
dfGeomOpParam = float(args[iArg])
elif EQUAL(args[iArg], "-fieldTypeToString") and iArg < nArgc - 1:
iArg = iArg + 1
pszFieldTypeToString = args[iArg]
if pszFieldTypeToString.find(',') != -1:
tokens = pszFieldTypeToString.split(',')
else:
tokens = pszFieldTypeToString.split(' ')
for token in tokens:
if EQUAL(token, "Integer") or \
EQUAL(token, "Real") or \
EQUAL(token, "String") or \
EQUAL(token, "Date") or \
EQUAL(token, "Time") or \
EQUAL(token, "DateTime") or \
EQUAL(token, "Binary") or \
EQUAL(token, "IntegerList") or \
EQUAL(token, "RealList") or \
EQUAL(token, "StringList"):
papszFieldTypesToString.append(token)
elif EQUAL(token, "All"):
papszFieldTypesToString = ['All']
break
else:
print("Unhandled type for fieldtypeasstring option : %s " % token)
return Usage()
elif EQUAL(args[iArg], "-progress"):
bDisplayProgress = True
# elif EQUAL(args[iArg],"-wrapdateline") )
# {
# bWrapDateline = True;
# }
#
elif EQUAL(args[iArg], "-clipsrc") and iArg < nArgc - 1:
bClipSrc = True
if IsNumber(args[iArg + 1]) and iArg < nArgc - 4:
oRing = ogr.Geometry(ogr.wkbLinearRing)
oRing.AddPoint_2D(float(args[iArg + 1]), float(args[iArg + 2]))
oRing.AddPoint_2D(float(args[iArg + 1]), float(args[iArg + 4]))
oRing.AddPoint_2D(float(args[iArg + 3]), float(args[iArg + 4]))
oRing.AddPoint_2D(float(args[iArg + 3]), float(args[iArg + 2]))
oRing.AddPoint_2D(float(args[iArg + 1]), float(args[iArg + 2]))
poClipSrc = ogr.Geometry(ogr.wkbPolygon)
poClipSrc.AddGeometry(oRing)
iArg = iArg + 4
elif (len(args[iArg + 1]) >= 7 and EQUAL(args[iArg + 1][0:7], "POLYGON")) or \
(len(args[iArg + 1]) >= 12 and EQUAL(args[iArg + 1][0:12], "MULTIPOLYGON")):
poClipSrc = ogr.CreateGeometryFromWkt(args[iArg + 1])
if poClipSrc is None:
print("FAILURE: Invalid geometry. Must be a valid POLYGON or MULTIPOLYGON WKT\n")
return Usage()
iArg = iArg + 1
elif EQUAL(args[iArg + 1], "spat_extent"):
iArg = iArg + 1
else:
pszClipSrcDS = args[iArg + 1]
iArg = iArg + 1
elif EQUAL(args[iArg], "-clipsrcsql") and iArg < nArgc - 1:
pszClipSrcSQL = args[iArg + 1]
iArg = iArg + 1
elif EQUAL(args[iArg], "-clipsrclayer") and iArg < nArgc - 1:
pszClipSrcLayer = args[iArg + 1]
iArg = iArg + 1
elif EQUAL(args[iArg], "-clipsrcwhere") and iArg < nArgc - 1:
pszClipSrcWhere = args[iArg + 1]
iArg = iArg + 1
elif EQUAL(args[iArg], "-clipdst") and iArg < nArgc - 1:
if IsNumber(args[iArg + 1]) and iArg < nArgc - 4:
oRing = ogr.Geometry(ogr.wkbLinearRing)
oRing.AddPoint_2D(float(args[iArg + 1]), float(args[iArg + 2]))
oRing.AddPoint_2D(float(args[iArg + 1]), float(args[iArg + 4]))
oRing.AddPoint_2D(float(args[iArg + 3]), float(args[iArg + 4]))
oRing.AddPoint_2D(float(args[iArg + 3]), float(args[iArg + 2]))
oRing.AddPoint_2D(float(args[iArg + 1]), float(args[iArg + 2]))
poClipDst = ogr.Geometry(ogr.wkbPolygon)
poClipDst.AddGeometry(oRing)
iArg = iArg + 4
elif (len(args[iArg + 1]) >= 7 and EQUAL(args[iArg + 1][0:7], "POLYGON")) or \
(len(args[iArg + 1]) >= 12 and EQUAL(args[iArg + 1][0:12], "MULTIPOLYGON")):
poClipDst = ogr.CreateGeometryFromWkt(args[iArg + 1])
if poClipDst is None:
print("FAILURE: Invalid geometry. Must be a valid POLYGON or MULTIPOLYGON WKT\n")
return Usage()
iArg = iArg + 1
elif EQUAL(args[iArg + 1], "spat_extent"):
iArg = iArg + 1
else:
pszClipDstDS = args[iArg + 1]
iArg = iArg + 1
elif EQUAL(args[iArg], "-clipdstsql") and iArg < nArgc - 1:
pszClipDstSQL = args[iArg + 1]
iArg = iArg + 1
elif EQUAL(args[iArg], "-clipdstlayer") and iArg < nArgc - 1:
pszClipDstLayer = args[iArg + 1]
iArg = iArg + 1
elif EQUAL(args[iArg], "-clipdstwhere") and iArg < nArgc - 1:
pszClipDstWhere = args[iArg + 1]
iArg = iArg + 1
elif EQUAL(args[iArg], "-explodecollections"):
bExplodeCollections = True
elif EQUAL(args[iArg], "-zfield") and iArg < nArgc - 1:
pszZField = args[iArg + 1]
iArg = iArg + 1
elif args[iArg][0] == '-':
return Usage()
elif pszDestDataSource is None:
pszDestDataSource = args[iArg]
elif pszDataSource is None:
pszDataSource = args[iArg]
else:
papszLayers.append(args[iArg])
iArg = iArg + 1
if pszDataSource is None:
return Usage()
if bPreserveFID and bExplodeCollections:
print("FAILURE: cannot use -preserve_fid and -explodecollections at the same time\n\n")
return Usage()
if bClipSrc and pszClipSrcDS is not None:
poClipSrc = LoadGeometry(pszClipSrcDS, pszClipSrcSQL, pszClipSrcLayer, pszClipSrcWhere)
if poClipSrc is None:
print("FAILURE: cannot load source clip geometry\n")
return Usage()
elif bClipSrc and poClipSrc is None:
if poSpatialFilter is not None:
poClipSrc = poSpatialFilter.Clone()
if poClipSrc is None:
print("FAILURE: -clipsrc must be used with -spat option or a\n" +
"bounding box, WKT string or datasource must be specified\n")
return Usage()
if pszClipDstDS is not None:
poClipDst = LoadGeometry(pszClipDstDS, pszClipDstSQL, pszClipDstLayer, pszClipDstWhere)
if poClipDst is None:
print("FAILURE: cannot load dest clip geometry\n")
return Usage()
# --------------------------------------------------------------------
# Open data source.
# --------------------------------------------------------------------
poDS = ogr.Open(pszDataSource, False)
# --------------------------------------------------------------------
# Report failure
# --------------------------------------------------------------------
if poDS is None:
print("FAILURE:\n" +
"Unable to open datasource `%s' with the following drivers." % pszDataSource)
for iDriver in range(ogr.GetDriverCount()):
print(" -> " + ogr.GetDriver(iDriver).GetName())
return 1
# --------------------------------------------------------------------
# Try opening the output datasource as an existing, writable
# --------------------------------------------------------------------
poODS = None
poDriver = None
if bUpdate:
poODS = ogr.Open(pszDestDataSource, True)
if poODS is None:
if bOverwrite or bAppend:
poODS = ogr.Open(pszDestDataSource, False)
if poODS is None:
# the datasource doesn't exist at all
bUpdate = False
else:
poODS.delete()
poODS = None
if bUpdate:
print("FAILURE:\n" +
"Unable to open existing output datasource `%s'." % pszDestDataSource)
return 1
elif papszDSCO:
print("WARNING: Datasource creation options ignored since an existing datasource\n" +
" being updated.")
if poODS is not None:
poDriver = poODS.GetDriver()
# --------------------------------------------------------------------
# Find the output driver.
# --------------------------------------------------------------------
if not bUpdate:
poDriver = ogr.GetDriverByName(pszFormat)
if poDriver is None:
print("Unable to find driver `%s'." % pszFormat)
print("The following drivers are available:")
for iDriver in range(ogr.GetDriverCount()):
print(" -> %s" % ogr.GetDriver(iDriver).GetName())
return 1
if not poDriver.TestCapability(ogr.ODrCCreateDataSource):
print("%s driver does not support data source creation." % pszFormat)
return 1
# --------------------------------------------------------------------
# Special case to improve user experience when translating
# a datasource with multiple layers into a shapefile. If the
# user gives a target datasource with .shp and it does not exist,
# the shapefile driver will try to create a file, but this is not
# appropriate because here we have several layers, so create
# a directory instead.
# --------------------------------------------------------------------
if EQUAL(poDriver.GetName(), "ESRI Shapefile") and \
pszSQLStatement is None and \
(len(papszLayers) > 1 or
(not papszLayers and poDS.GetLayerCount() > 1)) and \
pszNewLayerName is None and \
EQUAL(os.path.splitext(pszDestDataSource)[1], ".SHP"):
try:
os.stat(pszDestDataSource)
except OSError:
try:
# decimal 493 = octal 0755. Python 3 needs 0o755, but
# this syntax is only supported by Python >= 2.6
os.mkdir(pszDestDataSource, 493)
except OSError:
print("Failed to create directory %s\n"
"for shapefile datastore.\n" % pszDestDataSource)
return 1
# --------------------------------------------------------------------
# Create the output data source.
# --------------------------------------------------------------------
poODS = poDriver.CreateDataSource(pszDestDataSource, options=papszDSCO)
if poODS is None:
print("%s driver failed to create %s" % (pszFormat, pszDestDataSource))
return 1
# --------------------------------------------------------------------
# Parse the output SRS definition if possible.
# --------------------------------------------------------------------
if pszOutputSRSDef is not None:
poOutputSRS = osr.SpatialReference()
poOutputSRS.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if poOutputSRS.SetFromUserInput(pszOutputSRSDef) != 0:
print("Failed to process SRS definition: %s" % pszOutputSRSDef)
return 1
# --------------------------------------------------------------------
# Parse the source SRS definition if possible.
# --------------------------------------------------------------------
if pszSourceSRSDef is not None:
poSourceSRS = osr.SpatialReference()
poSourceSRS.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if poSourceSRS.SetFromUserInput(pszSourceSRSDef) != 0:
print("Failed to process SRS definition: %s" % pszSourceSRSDef)
return 1
# --------------------------------------------------------------------
# For OSM file.
# --------------------------------------------------------------------
bSrcIsOSM = poDS.GetDriver() is not None and \
poDS.GetDriver().GetName() == "OSM"
nSrcFileSize = 0
if bSrcIsOSM and poDS.GetName() != "/vsistdin/":
sStat = gdal.VSIStatL(poDS.GetName())
if sStat is not None:
nSrcFileSize = sStat.size
# --------------------------------------------------------------------
# Special case for -sql clause. No source layers required.
# --------------------------------------------------------------------
if pszSQLStatement is not None:
if pszWHERE is not None:
print("-where clause ignored in combination with -sql.")
if papszLayers:
print("layer names ignored in combination with -sql.")
poResultSet = poDS.ExecuteSQL(pszSQLStatement, poSpatialFilter,
None)
if poResultSet is not None:
nCountLayerFeatures = 0
if bDisplayProgress:
if bSrcIsOSM:
pfnProgress = progress_func
pProgressArg = progress_data
elif not poResultSet.TestCapability(ogr.OLCFastFeatureCount):
print("Progress turned off as fast feature count is not available.")
bDisplayProgress = False
else:
nCountLayerFeatures = poResultSet.GetFeatureCount()
pfnProgress = progress_func
pProgressArg = progress_data
# --------------------------------------------------------------------
# Special case to improve user experience when translating into
# single file shapefile and source has only one layer, and that
# the layer name isn't specified
# --------------------------------------------------------------------
if EQUAL(poDriver.GetName(), "ESRI Shapefile") and \
pszNewLayerName is None:
try:
mode = os.stat(pszDestDataSource).st_mode
if (mode & stat.S_IFDIR) == 0:
pszNewLayerName = os.path.splitext(os.path.basename(pszDestDataSource))[0]
except OSError:
pass
psInfo = SetupTargetLayer(poDS,
poResultSet,
poODS,
papszLCO,
pszNewLayerName,
bTransform,
poOutputSRS,
bNullifyOutputSRS,
poSourceSRS,
papszSelFields,
bAppend, eGType, bPromoteToMulti, nCoordDim, bOverwrite,
papszFieldTypesToString,
bWrapDateline,
bExplodeCollections,
pszZField,
pszWHERE)
poResultSet.ResetReading()
if psInfo is None or not TranslateLayer(psInfo, poDS, poResultSet, poODS,
poOutputSRS, bNullifyOutputSRS,
eGType, bPromoteToMulti, nCoordDim,
eGeomOp, dfGeomOpParam,
nCountLayerFeatures,
poClipSrc, poClipDst,
bExplodeCollections,
nSrcFileSize, None,
pfnProgress, pProgressArg):
print(
"Terminating translation prematurely after failed\n" +
"translation from sql statement.")
return 1
poDS.ReleaseResultSet(poResultSet)
# --------------------------------------------------------------------
# Special case for layer interleaving mode.
# --------------------------------------------------------------------
elif bSrcIsOSM and gdal.GetConfigOption("OGR_INTERLEAVED_READING", None) is None:
gdal.SetConfigOption("OGR_INTERLEAVED_READING", "YES")
# if (bSplitListFields)
# {
# fprintf( stderr, "FAILURE: -splitlistfields not supported in this mode\n" );
# exit( 1 );
# }
nSrcLayerCount = poDS.GetLayerCount()
pasAssocLayers = [AssociatedLayers() for _ in range(nSrcLayerCount)]
# --------------------------------------------------------------------
# Special case to improve user experience when translating into
# single file shapefile and source has only one layer, and that
# the layer name isn't specified
# --------------------------------------------------------------------
if EQUAL(poDriver.GetName(), "ESRI Shapefile") and \
(len(papszLayers) == 1 or nSrcLayerCount == 1) and pszNewLayerName is None:
try:
mode = os.stat(pszDestDataSource).st_mode
if (mode & stat.S_IFDIR) == 0:
pszNewLayerName = os.path.splitext(os.path.basename(pszDestDataSource))[0]
except OSError:
pass
if bDisplayProgress and bSrcIsOSM:
pfnProgress = progress_func
pProgressArg = progress_data
# --------------------------------------------------------------------
# If no target layer specified, use all source layers.
# --------------------------------------------------------------------
if not papszLayers:
papszLayers = [None] * nSrcLayerCount
for iLayer in range(nSrcLayerCount):
poLayer = poDS.GetLayer(iLayer)
if poLayer is None:
print("FAILURE: Couldn't fetch advertised layer %d!" % iLayer)
return 1
papszLayers[iLayer] = poLayer.GetName()
else:
if bSrcIsOSM:
osInterestLayers = "SET interest_layers ="
for iLayer, papszLayer in enumerate(papszLayers):
if iLayer != 0:
osInterestLayers = osInterestLayers + ","
osInterestLayers = osInterestLayers + papszLayer
poDS.ExecuteSQL(osInterestLayers, None, None)
# --------------------------------------------------------------------
# First pass to set filters and create target layers.
# --------------------------------------------------------------------
for iLayer in range(nSrcLayerCount):
poLayer = poDS.GetLayer(iLayer)
if poLayer is None:
print("FAILURE: Couldn't fetch advertised layer %d!" % iLayer)
return 1
pasAssocLayers[iLayer].poSrcLayer = poLayer
if CSLFindString(papszLayers, poLayer.GetName()) >= 0:
if pszWHERE is not None:
if poLayer.SetAttributeFilter(pszWHERE) != 0:
print("FAILURE: SetAttributeFilter(%s) on layer '%s' failed.\n" % (pszWHERE, poLayer.GetName()))
if not bSkipFailures:
return 1
if poSpatialFilter is not None:
poLayer.SetSpatialFilter(poSpatialFilter)
psInfo = SetupTargetLayer(poDS,
poLayer,
poODS,
papszLCO,
pszNewLayerName,
bTransform,
poOutputSRS,
bNullifyOutputSRS,
poSourceSRS,
papszSelFields,
bAppend, eGType, bPromoteToMulti, nCoordDim, bOverwrite,
papszFieldTypesToString,
bWrapDateline,
bExplodeCollections,
pszZField,
pszWHERE)
if psInfo is None and not bSkipFailures:
return 1
pasAssocLayers[iLayer].psInfo = psInfo
else:
pasAssocLayers[iLayer].psInfo = None
# --------------------------------------------------------------------
# Second pass to process features in a interleaved layer mode.
# --------------------------------------------------------------------
bHasLayersNonEmpty = True
while bHasLayersNonEmpty:
bHasLayersNonEmpty = False
for iLayer in range(nSrcLayerCount):
poLayer = pasAssocLayers[iLayer].poSrcLayer
psInfo = pasAssocLayers[iLayer].psInfo
anReadFeatureCount = [0]
if psInfo is not None:
if not TranslateLayer(psInfo, poDS, poLayer, poODS,
poOutputSRS, bNullifyOutputSRS,
eGType, bPromoteToMulti, nCoordDim,
eGeomOp, dfGeomOpParam,
0,
poClipSrc, poClipDst,
bExplodeCollections,
nSrcFileSize,
anReadFeatureCount,
pfnProgress, pProgressArg) \
and not bSkipFailures:
print(
"Terminating translation prematurely after failed\n" +
"translation of layer " + poLayer.GetName() + " (use -skipfailures to skip errors)")
return 1
else:
# No matching target layer : just consumes the features
poFeature = poLayer.GetNextFeature()
while poFeature is not None:
anReadFeatureCount[0] = anReadFeatureCount[0] + 1
poFeature = poLayer.GetNextFeature()
if anReadFeatureCount[0] != 0:
bHasLayersNonEmpty = True
else:
nLayerCount = 0
papoLayers = []
# --------------------------------------------------------------------
# Process each data source layer.
# --------------------------------------------------------------------
if not papszLayers:
nLayerCount = poDS.GetLayerCount()
papoLayers = [None] * nLayerCount
iLayer = 0
for iLayer in range(nLayerCount):
poLayer = poDS.GetLayer(iLayer)
if poLayer is None:
print("FAILURE: Couldn't fetch advertised layer %d!" % iLayer)
return 1
papoLayers[iLayer] = poLayer
iLayer = iLayer + 1
# --------------------------------------------------------------------
# Process specified data source layers.
# --------------------------------------------------------------------
else:
nLayerCount = len(papszLayers)
papoLayers = [None] * nLayerCount
iLayer = 0
for layername in papszLayers:
poLayer = poDS.GetLayerByName(layername)
if poLayer is None:
print("FAILURE: Couldn't fetch advertised layer %s!" % layername)
return 1
papoLayers[iLayer] = poLayer
iLayer = iLayer + 1
panLayerCountFeatures = [0] * nLayerCount
nCountLayersFeatures = 0
nAccCountFeatures = 0
# First pass to apply filters and count all features if necessary
for iLayer in range(nLayerCount):
poLayer = papoLayers[iLayer]
if pszWHERE is not None:
if poLayer.SetAttributeFilter(pszWHERE) != 0:
print("FAILURE: SetAttributeFilter(%s) failed." % pszWHERE)
if not bSkipFailures:
return 1
if poSpatialFilter is not None:
poLayer.SetSpatialFilter(poSpatialFilter)
if bDisplayProgress and not bSrcIsOSM:
if not poLayer.TestCapability(ogr.OLCFastFeatureCount):
print("Progress turned off as fast feature count is not available.")
bDisplayProgress = False
else:
panLayerCountFeatures[iLayer] = poLayer.GetFeatureCount()
nCountLayersFeatures += panLayerCountFeatures[iLayer]
# Second pass to do the real job
for iLayer in range(nLayerCount):
poLayer = papoLayers[iLayer]
if bDisplayProgress:
if bSrcIsOSM:
pfnProgress = progress_func
pProgressArg = progress_data
else:
pfnProgress = ScaledProgressFunc
pProgressArg = ScaledProgressObject(
nAccCountFeatures * 1.0 / nCountLayersFeatures,
(nAccCountFeatures + panLayerCountFeatures[iLayer]) * 1.0 / nCountLayersFeatures,
progress_func, progress_data)
nAccCountFeatures += panLayerCountFeatures[iLayer]
# --------------------------------------------------------------------
# Special case to improve user experience when translating into
# single file shapefile and source has only one layer, and that
# the layer name isn't specified
# --------------------------------------------------------------------
if EQUAL(poDriver.GetName(), "ESRI Shapefile") and \
nLayerCount == 1 and pszNewLayerName is None:
try:
mode = os.stat(pszDestDataSource).st_mode
if (mode & stat.S_IFDIR) == 0:
pszNewLayerName = os.path.splitext(os.path.basename(pszDestDataSource))[0]
except OSError:
pass
psInfo = SetupTargetLayer(poDS,
poLayer,
poODS,
papszLCO,
pszNewLayerName,
bTransform,
poOutputSRS,
bNullifyOutputSRS,
poSourceSRS,
papszSelFields,
bAppend, eGType, bPromoteToMulti, nCoordDim, bOverwrite,
papszFieldTypesToString,
bWrapDateline,
bExplodeCollections,
pszZField,
pszWHERE)
poLayer.ResetReading()
if (psInfo is None or
not TranslateLayer(psInfo, poDS, poLayer, poODS,
poOutputSRS, bNullifyOutputSRS,
eGType, bPromoteToMulti, nCoordDim,
eGeomOp, dfGeomOpParam,
panLayerCountFeatures[iLayer],
poClipSrc, poClipDst,
bExplodeCollections,
nSrcFileSize, None,
pfnProgress, pProgressArg)) \
and not bSkipFailures:
print(
"Terminating translation prematurely after failed\n" +
"translation of layer " + poLayer.GetLayerDefn().GetName() + " (use -skipfailures to skip errors)")
return 1
# --------------------------------------------------------------------
# Close down.
# --------------------------------------------------------------------
# We must explicitly destroy the output dataset in order the file
# to be properly closed !
poODS.Destroy()
poDS.Destroy()
return 0
# **********************************************************************
# Usage()
# **********************************************************************
def Usage():
print("Usage: ogr2ogr [--help-general] [-skipfailures] [-append] [-update] [-gt n]\n" +
" [-select field_list] [-where restricted_where] \n" +
" [-progress] [-sql <sql statement>] \n" +
" [-spat xmin ymin xmax ymax] [-preserve_fid] [-fid FID]\n" +
" [-a_srs srs_def] [-t_srs srs_def] [-s_srs srs_def]\n" +
" [-f format_name] [-overwrite] [[-dsco NAME=VALUE] ...]\n" +
" [-simplify tolerance]\n" + \
# // " [-segmentize max_dist] [-fieldTypeToString All|(type1[,type2]*)]\n" + \
" [-fieldTypeToString All|(type1[,type2]*)] [-explodecollections] \n" + \
" dst_datasource_name src_datasource_name\n" + \
" [-lco NAME=VALUE] [-nln name] [-nlt type] [-dim 2|3] [layer [layer ...]]\n" + \
"\n" + \
" -f format_name: output file format name, possible values are:")
for iDriver in range(ogr.GetDriverCount()):
poDriver = ogr.GetDriver(iDriver)
if poDriver.TestCapability(ogr.ODrCCreateDataSource):
print(" -f \"" + poDriver.GetName() + "\"")
print(" -append: Append to existing layer instead of creating new if it exists\n" +
" -overwrite: delete the output layer and recreate it empty\n" +
" -update: Open existing output datasource in update mode\n" +
" -progress: Display progress on terminal. Only works if input layers have the \"fast feature count\" capability\n" +
" -select field_list: Comma-delimited list of fields from input layer to\n" +
" copy to the new layer (defaults to all)\n" +
" -where restricted_where: Attribute query (like SQL WHERE)\n" +
" -sql statement: Execute given SQL statement and save result.\n" +
" -skipfailures: skip features or layers that fail to convert\n" +
" -gt n: group n features per transaction (default 200)\n" +
" -spat xmin ymin xmax ymax: spatial query extents\n" +
" -simplify tolerance: distance tolerance for simplification.\n" + \
# //" -segmentize max_dist: maximum distance between 2 nodes.\n" + \
# //" Used to create intermediate points\n" + \
" -dsco NAME=VALUE: Dataset creation option (format specific)\n" + \
" -lco NAME=VALUE: Layer creation option (format specific)\n" + \
" -nln name: Assign an alternate name to the new layer\n" + \
" -nlt type: Force a geometry type for new layer. One of NONE, GEOMETRY,\n" + \
" POINT, LINESTRING, POLYGON, GEOMETRYCOLLECTION, MULTIPOINT,\n" + \
" MULTIPOLYGON, or MULTILINESTRING. Add \"25D\" for 3D layers.\n" + \
" Default is type of source layer.\n" + \
" -dim dimension: Force the coordinate dimension to the specified value.\n" + \
" -fieldTypeToString type1,...: Converts fields of specified types to\n" + \
" fields of type string in the new layer. Valid types are : \n" + \
" Integer, Real, String, Date, Time, DateTime, Binary, IntegerList, RealList,\n" + \
" StringList. Special value All can be used to convert all fields to strings.")
print(" -a_srs srs_def: Assign an output SRS\n"
" -t_srs srs_def: Reproject/transform to this SRS on output\n"
" -s_srs srs_def: Override source SRS\n"
"\n"
" Srs_def can be a full WKT definition (hard to escape properly),\n"
" or a well known definition (i.e. EPSG:4326) or a file with a WKT\n"
" definition.")
return 1
def CSLFindString(v, mystr):
i = 0
for strIter in v:
if EQUAL(strIter, mystr):
return i
i = i + 1
return -1
def IsNumber(pszStr):
try:
float(pszStr)
return True
except ValueError:
return False
def LoadGeometry(pszDS, pszSQL, pszLyr, pszWhere):
poGeom = None
poDS = ogr.Open(pszDS, False)
if poDS is None:
return None
if pszSQL is not None:
poLyr = poDS.ExecuteSQL(pszSQL, None, None)
elif pszLyr is not None:
poLyr = poDS.GetLayerByName(pszLyr)
else:
poLyr = poDS.GetLayer(0)
if poLyr is None:
print("Failed to identify source layer from datasource.")
poDS.Destroy()
return None
if pszWhere is not None:
poLyr.SetAttributeFilter(pszWhere)
poFeat = poLyr.GetNextFeature()
while poFeat is not None:
poSrcGeom = poFeat.GetGeometryRef()
if poSrcGeom is not None:
eType = wkbFlatten(poSrcGeom.GetGeometryType())
if poGeom is None:
poGeom = ogr.Geometry(ogr.wkbMultiPolygon)
if eType == ogr.wkbPolygon:
poGeom.AddGeometry(poSrcGeom)
elif eType == ogr.wkbMultiPolygon:
for iGeom in range(poSrcGeom.GetGeometryCount()):
poGeom.AddGeometry(poSrcGeom.GetGeometryRef(iGeom))
else:
print("ERROR: Geometry not of polygon type.")
if pszSQL is not None:
poDS.ReleaseResultSet(poLyr)
poDS.Destroy()
return None
poFeat = poLyr.GetNextFeature()
if pszSQL is not None:
poDS.ReleaseResultSet(poLyr)
poDS.Destroy()
return poGeom
def wkbFlatten(x):
return x & (~ogr.wkb25DBit)
# **********************************************************************
# SetZ()
# **********************************************************************
def SetZ(poGeom, dfZ):
if poGeom is None:
return
eGType = wkbFlatten(poGeom.GetGeometryType())
if eGType == ogr.wkbPoint:
poGeom.SetPoint(0, poGeom.GetX(), poGeom.GetY(), dfZ)
elif eGType == ogr.wkbLineString or \
eGType == ogr.wkbLinearRing:
for i in range(poGeom.GetPointCount()):
poGeom.SetPoint(i, poGeom.GetX(i), poGeom.GetY(i), dfZ)
elif eGType == ogr.wkbPolygon or \
eGType == ogr.wkbMultiPoint or \
eGType == ogr.wkbMultiLineString or \
eGType == ogr.wkbMultiPolygon or \
eGType == ogr.wkbGeometryCollection:
for i in range(poGeom.GetGeometryCount()):
SetZ(poGeom.GetGeometryRef(i), dfZ)
# **********************************************************************
# SetupTargetLayer()
# **********************************************************************
def SetupTargetLayer(poSrcDS, poSrcLayer, poDstDS, papszLCO, pszNewLayerName,
bTransform, poOutputSRS, bNullifyOutputSRS, poSourceSRS, papszSelFields,
bAppend, eGType, bPromoteToMulti, nCoordDim, bOverwrite,
papszFieldTypesToString, bWrapDateline,
bExplodeCollections, pszZField, pszWHERE):
# pylint: disable=unused-argument
if pszNewLayerName is None:
pszNewLayerName = poSrcLayer.GetLayerDefn().GetName()
# --------------------------------------------------------------------
# Setup coordinate transformation if we need it.
# --------------------------------------------------------------------
poCT = None
if bTransform:
if poSourceSRS is None:
poSourceSRS = poSrcLayer.GetSpatialRef()
if poSourceSRS is None:
print("Can't transform coordinates, source layer has no\n" +
"coordinate system. Use -s_srs to set one.")
return None
poCT = osr.CoordinateTransformation(poSourceSRS, poOutputSRS)
if poCT is None:
pszWKT = None
print("Failed to create coordinate transformation between the\n" +
"following coordinate systems. This may be because they\n" +
"are not transformable.")
pszWKT = poSourceSRS.ExportToPrettyWkt(0)
print("Source:\n" + pszWKT)
pszWKT = poOutputSRS.ExportToPrettyWkt(0)
print("Target:\n" + pszWKT)
return None
# --------------------------------------------------------------------
# Get other info.
# --------------------------------------------------------------------
poSrcFDefn = poSrcLayer.GetLayerDefn()
if poOutputSRS is None and not bNullifyOutputSRS:
poOutputSRS = poSrcLayer.GetSpatialRef()
# --------------------------------------------------------------------
# Find the layer.
# --------------------------------------------------------------------
# GetLayerByName() can instantiate layers that would have been
# 'hidden' otherwise, for example, non-spatial tables in a
# PostGIS-enabled database, so this apparently useless command is
# not useless. (#4012)
gdal.PushErrorHandler('CPLQuietErrorHandler')
poDstLayer = poDstDS.GetLayerByName(pszNewLayerName)
gdal.PopErrorHandler()
gdal.ErrorReset()
iLayer = -1
if poDstLayer is not None:
nLayerCount = poDstDS.GetLayerCount()
for iLayer in range(nLayerCount):
poLayer = poDstDS.GetLayer(iLayer)
# The .cpp version compares on pointers directly, but we cannot
# do this with swig object, so just compare the names.
if poLayer is not None \
and poLayer.GetName() == poDstLayer.GetName():
break
if iLayer == nLayerCount:
# Shouldn't happen with an ideal driver
poDstLayer = None
# --------------------------------------------------------------------
# If the user requested overwrite, and we have the layer in
# question we need to delete it now so it will get recreated
# (overwritten).
# --------------------------------------------------------------------
if poDstLayer is not None and bOverwrite:
if poDstDS.DeleteLayer(iLayer) != 0:
print("DeleteLayer() failed when overwrite requested.")
return None
poDstLayer = None
# --------------------------------------------------------------------
# If the layer does not exist, then create it.
# --------------------------------------------------------------------
if poDstLayer is None:
if eGType == -2:
eGType = poSrcFDefn.GetGeomType()
n25DBit = eGType & ogr.wkb25DBit
if bPromoteToMulti:
if wkbFlatten(eGType) == ogr.wkbLineString:
eGType = ogr.wkbMultiLineString | n25DBit
elif wkbFlatten(eGType) == ogr.wkbPolygon:
eGType = ogr.wkbMultiPolygon | n25DBit
if bExplodeCollections:
if wkbFlatten(eGType) == ogr.wkbMultiPoint:
eGType = ogr.wkbPoint | n25DBit
elif wkbFlatten(eGType) == ogr.wkbMultiLineString:
eGType = ogr.wkbLineString | n25DBit
elif wkbFlatten(eGType) == ogr.wkbMultiPolygon:
eGType = ogr.wkbPolygon | n25DBit
elif wkbFlatten(eGType) == ogr.wkbGeometryCollection:
eGType = ogr.wkbUnknown | n25DBit
if pszZField is not None:
eGType = eGType | ogr.wkb25DBit
if nCoordDim == 2:
eGType = eGType & ~ogr.wkb25DBit
elif nCoordDim == 3:
eGType = eGType | ogr.wkb25DBit
if not poDstDS.TestCapability(ogr.ODsCCreateLayer):
print("Layer " + pszNewLayerName + "not found, and CreateLayer not supported by driver.")
return None
gdal.ErrorReset()
poDstLayer = poDstDS.CreateLayer(pszNewLayerName, poOutputSRS,
eGType, papszLCO)
if poDstLayer is None:
return None
bAppend = False
# --------------------------------------------------------------------
# Otherwise we will append to it, if append was requested.
# --------------------------------------------------------------------
elif not bAppend:
print("FAILED: Layer " + pszNewLayerName + "already exists, and -append not specified.\n" +
" Consider using -append, or -overwrite.")
return None
else:
if papszLCO:
print("WARNING: Layer creation options ignored since an existing layer is\n" +
" being appended to.")
# --------------------------------------------------------------------
# Add fields. Default to copy all field.
# If only a subset of all fields requested, then output only
# the selected fields, and in the order that they were
# selected.
# --------------------------------------------------------------------
# Initialize the index-to-index map to -1's
nSrcFieldCount = poSrcFDefn.GetFieldCount()
panMap = [-1] * nSrcFieldCount
poDstFDefn = poDstLayer.GetLayerDefn()
if papszSelFields is not None and not bAppend:
nDstFieldCount = 0
if poDstFDefn is not None:
nDstFieldCount = poDstFDefn.GetFieldCount()
for papszSelField in papszSelFields:
iSrcField = poSrcFDefn.GetFieldIndex(papszSelField)
if iSrcField >= 0:
poSrcFieldDefn = poSrcFDefn.GetFieldDefn(iSrcField)
oFieldDefn = ogr.FieldDefn(poSrcFieldDefn.GetNameRef(),
poSrcFieldDefn.GetType())
oFieldDefn.SetWidth(poSrcFieldDefn.GetWidth())
oFieldDefn.SetPrecision(poSrcFieldDefn.GetPrecision())
if papszFieldTypesToString is not None and \
(CSLFindString(papszFieldTypesToString, "All") != -1 or
CSLFindString(papszFieldTypesToString,
ogr.GetFieldTypeName(poSrcFieldDefn.GetType())) != -1):
oFieldDefn.SetType(ogr.OFTString)
# The field may have been already created at layer creation
iDstField = -1
if poDstFDefn is not None:
iDstField = poDstFDefn.GetFieldIndex(oFieldDefn.GetNameRef())
if iDstField >= 0:
panMap[iSrcField] = iDstField
elif poDstLayer.CreateField(oFieldDefn) == 0:
# now that we've created a field, GetLayerDefn() won't return NULL
if poDstFDefn is None:
poDstFDefn = poDstLayer.GetLayerDefn()
# Sanity check : if it fails, the driver is buggy
if poDstFDefn is not None and \
poDstFDefn.GetFieldCount() != nDstFieldCount + 1:
print("The output driver has claimed to have added the %s field, but it did not!" % oFieldDefn.GetNameRef())
else:
panMap[iSrcField] = nDstFieldCount
nDstFieldCount = nDstFieldCount + 1
else:
print("Field '" + papszSelField + "' not found in source layer.")
if not bSkipFailures:
return None
# --------------------------------------------------------------------
# Use SetIgnoredFields() on source layer if available
# --------------------------------------------------------------------
# Here we differ from the ogr2ogr.cpp implementation since the OGRFeatureQuery
# isn't mapped to swig. So in that case just don't use SetIgnoredFields()
# to avoid issue raised in #4015
if poSrcLayer.TestCapability(ogr.OLCIgnoreFields) and pszWHERE is None:
papszIgnoredFields = []
for iSrcField in range(nSrcFieldCount):
pszFieldName = poSrcFDefn.GetFieldDefn(iSrcField).GetNameRef()
bFieldRequested = False
for papszSelField in papszSelFields:
if EQUAL(pszFieldName, papszSelField):
bFieldRequested = True
break
if pszZField is not None and EQUAL(pszFieldName, pszZField):
bFieldRequested = True
# If source field not requested, add it to ignored files list
if not bFieldRequested:
papszIgnoredFields.append(pszFieldName)
poSrcLayer.SetIgnoredFields(papszIgnoredFields)
elif not bAppend:
nDstFieldCount = 0
if poDstFDefn is not None:
nDstFieldCount = poDstFDefn.GetFieldCount()
for iField in range(nSrcFieldCount):
poSrcFieldDefn = poSrcFDefn.GetFieldDefn(iField)
oFieldDefn = ogr.FieldDefn(poSrcFieldDefn.GetNameRef(),
poSrcFieldDefn.GetType())
oFieldDefn.SetWidth(poSrcFieldDefn.GetWidth())
oFieldDefn.SetPrecision(poSrcFieldDefn.GetPrecision())
if papszFieldTypesToString is not None and \
(CSLFindString(papszFieldTypesToString, "All") != -1 or
CSLFindString(papszFieldTypesToString,
ogr.GetFieldTypeName(poSrcFieldDefn.GetType())) != -1):
oFieldDefn.SetType(ogr.OFTString)
# The field may have been already created at layer creation
iDstField = -1
if poDstFDefn is not None:
iDstField = poDstFDefn.GetFieldIndex(oFieldDefn.GetNameRef())
if iDstField >= 0:
panMap[iField] = iDstField
elif poDstLayer.CreateField(oFieldDefn) == 0:
# now that we've created a field, GetLayerDefn() won't return NULL
if poDstFDefn is None:
poDstFDefn = poDstLayer.GetLayerDefn()
# Sanity check : if it fails, the driver is buggy
if poDstFDefn is not None and \
poDstFDefn.GetFieldCount() != nDstFieldCount + 1:
print("The output driver has claimed to have added the %s field, but it did not!" % oFieldDefn.GetNameRef())
else:
panMap[iField] = nDstFieldCount
nDstFieldCount = nDstFieldCount + 1
else:
# For an existing layer, build the map by fetching the index in the destination
# layer for each source field
if poDstFDefn is None:
print("poDstFDefn == NULL.\n")
return None
for iField in range(nSrcFieldCount):
poSrcFieldDefn = poSrcFDefn.GetFieldDefn(iField)
iDstField = poDstFDefn.GetFieldIndex(poSrcFieldDefn.GetNameRef())
if iDstField >= 0:
panMap[iField] = iDstField
iSrcZField = -1
if pszZField is not None:
iSrcZField = poSrcFDefn.GetFieldIndex(pszZField)
psInfo = TargetLayerInfo()
psInfo.poDstLayer = poDstLayer
psInfo.poCT = poCT
# psInfo.papszTransformOptions = papszTransformOptions
psInfo.panMap = panMap
psInfo.iSrcZField = iSrcZField
return psInfo
# **********************************************************************
# TranslateLayer()
# **********************************************************************
def TranslateLayer(psInfo, poSrcDS, poSrcLayer, poDstDS,
poOutputSRS, bNullifyOutputSRS,
eGType, bPromoteToMulti, nCoordDim, eGeomOp, dfGeomOpParam,
nCountLayerFeatures,
poClipSrc, poClipDst, bExplodeCollections, nSrcFileSize,
pnReadFeatureCount, pfnProgress, pProgressArg):
# pylint: disable=unused-argument
bForceToPolygon = False
bForceToMultiPolygon = False
bForceToMultiLineString = False
poDstLayer = psInfo.poDstLayer
# papszTransformOptions = psInfo.papszTransformOptions
poCT = psInfo.poCT
panMap = psInfo.panMap
iSrcZField = psInfo.iSrcZField
if poOutputSRS is None and not bNullifyOutputSRS:
poOutputSRS = poSrcLayer.GetSpatialRef()
if wkbFlatten(eGType) == ogr.wkbPolygon:
bForceToPolygon = True
elif wkbFlatten(eGType) == ogr.wkbMultiPolygon:
bForceToMultiPolygon = True
elif wkbFlatten(eGType) == ogr.wkbMultiLineString:
bForceToMultiLineString = True
# --------------------------------------------------------------------
# Transfer features.
# --------------------------------------------------------------------
nFeaturesInTransaction = 0
nCount = 0
if nGroupTransactions > 0:
poDstLayer.StartTransaction()
while True:
poDstFeature = None
if nFIDToFetch != ogr.NullFID:
# // Only fetch feature on first pass.
if nFeaturesInTransaction == 0:
poFeature = poSrcLayer.GetFeature(nFIDToFetch)
else:
poFeature = None
else:
poFeature = poSrcLayer.GetNextFeature()
if poFeature is None:
break
nParts = 0
nIters = 1
if bExplodeCollections:
poSrcGeometry = poFeature.GetGeometryRef()
if poSrcGeometry is not None:
eSrcType = wkbFlatten(poSrcGeometry.GetGeometryType())
if eSrcType == ogr.wkbMultiPoint or \
eSrcType == ogr.wkbMultiLineString or \
eSrcType == ogr.wkbMultiPolygon or \
eSrcType == ogr.wkbGeometryCollection:
nParts = poSrcGeometry.GetGeometryCount()
nIters = nParts
if nIters == 0:
nIters = 1
for iPart in range(nIters):
nFeaturesInTransaction = nFeaturesInTransaction + 1
if nFeaturesInTransaction == nGroupTransactions:
poDstLayer.CommitTransaction()
poDstLayer.StartTransaction()
nFeaturesInTransaction = 0
gdal.ErrorReset()
poDstFeature = ogr.Feature(poDstLayer.GetLayerDefn())
if poDstFeature.SetFromWithMap(poFeature, 1, panMap) != 0:
if nGroupTransactions > 0:
poDstLayer.CommitTransaction()
print("Unable to translate feature %d from layer %s" % (poFeature.GetFID(), poSrcLayer.GetName()))
return False
if bPreserveFID:
poDstFeature.SetFID(poFeature.GetFID())
poDstGeometry = poDstFeature.GetGeometryRef()
if poDstGeometry is not None:
if nParts > 0:
# For -explodecollections, extract the iPart(th) of the geometry
poPart = poDstGeometry.GetGeometryRef(iPart).Clone()
poDstFeature.SetGeometryDirectly(poPart)
poDstGeometry = poPart
if iSrcZField != -1:
SetZ(poDstGeometry, poFeature.GetFieldAsDouble(iSrcZField))
# This will correct the coordinate dimension to 3
poDupGeometry = poDstGeometry.Clone()
poDstFeature.SetGeometryDirectly(poDupGeometry)
poDstGeometry = poDupGeometry
if nCoordDim == 2 or nCoordDim == 3:
poDstGeometry.SetCoordinateDimension(nCoordDim)
if eGeomOp == GeomOperation.SEGMENTIZE:
pass
# if (poDstFeature.GetGeometryRef() is not None and dfGeomOpParam > 0)
# poDstFeature.GetGeometryRef().segmentize(dfGeomOpParam);
elif eGeomOp == GeomOperation.SIMPLIFY_PRESERVE_TOPOLOGY and dfGeomOpParam > 0:
poNewGeom = poDstGeometry.SimplifyPreserveTopology(dfGeomOpParam)
if poNewGeom is not None:
poDstFeature.SetGeometryDirectly(poNewGeom)
poDstGeometry = poNewGeom
if poClipSrc is not None:
poClipped = poDstGeometry.Intersection(poClipSrc)
if poClipped is None or poClipped.IsEmpty():
# Report progress
nCount = nCount + 1
if pfnProgress is not None:
pfnProgress(nCount * 1.0 / nCountLayerFeatures, "", pProgressArg)
continue
poDstFeature.SetGeometryDirectly(poClipped)
poDstGeometry = poClipped
if poCT is not None:
eErr = poDstGeometry.Transform(poCT)
if eErr != 0:
if nGroupTransactions > 0:
poDstLayer.CommitTransaction()
print("Failed to reproject feature %d (geometry probably out of source or destination SRS)." % poFeature.GetFID())
if not bSkipFailures:
return False
elif poOutputSRS is not None:
poDstGeometry.AssignSpatialReference(poOutputSRS)
if poClipDst is not None:
poClipped = poDstGeometry.Intersection(poClipDst)
if poClipped is None or poClipped.IsEmpty():
continue
poDstFeature.SetGeometryDirectly(poClipped)
poDstGeometry = poClipped
if bForceToPolygon:
poDstFeature.SetGeometryDirectly(ogr.ForceToPolygon(poDstGeometry))
elif bForceToMultiPolygon or \
(bPromoteToMulti and wkbFlatten(poDstGeometry.GetGeometryType()) == ogr.wkbPolygon):
poDstFeature.SetGeometryDirectly(ogr.ForceToMultiPolygon(poDstGeometry))
elif bForceToMultiLineString or \
(bPromoteToMulti and wkbFlatten(poDstGeometry.GetGeometryType()) == ogr.wkbLineString):
poDstFeature.SetGeometryDirectly(ogr.ForceToMultiLineString(poDstGeometry))
gdal.ErrorReset()
if poDstLayer.CreateFeature(poDstFeature) != 0 and not bSkipFailures:
if nGroupTransactions > 0:
poDstLayer.RollbackTransaction()
return False
# Report progress
nCount = nCount + 1
if pfnProgress is not None:
if nSrcFileSize != 0:
if (nCount % 1000) == 0:
poFCLayer = poSrcDS.ExecuteSQL("GetBytesRead()", None, None)
if poFCLayer is not None:
poFeat = poFCLayer.GetNextFeature()
if poFeat is not None:
pszReadSize = poFeat.GetFieldAsString(0)
nReadSize = int(pszReadSize)
pfnProgress(nReadSize * 1.0 / nSrcFileSize, "", pProgressArg)
poSrcDS.ReleaseResultSet(poFCLayer)
else:
pfnProgress(nCount * 1.0 / nCountLayerFeatures, "", pProgressArg)
if pnReadFeatureCount is not None:
pnReadFeatureCount[0] = nCount
if nGroupTransactions > 0:
poDstLayer.CommitTransaction()
return True
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
py | b4072bddce8bbde0d4668e3431f1e97f43efa5e6 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
from corpuscrawler.util import crawl_bibleis
def crawl(crawler):
out = crawler.get_output(language='new')
crawl_bibleis(crawler, out, bible='NEWNCL')
|
py | b4072c34a31f108dd575ecf6e1672e785c78efef | # File: D (Python 2.4)
from direct.task import Task
from otp.otpbase import OTPLocalizer
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
class DownloadWatcher(DirectObject):
def __init__(self, phaseNames):
self.phaseNames = phaseNames
self.text = DirectLabel(relief = None, guiId = 'DownloadWatcherText', pos = (-0.95999999999999996, 0, -0.91000000000000003), text = OTPLocalizer.DownloadWatcherInitializing, text_fg = (1, 1, 1, 1), text_scale = 0.050000000000000003, textMayChange = 1, text_align = TextNode.ALeft, sortOrder = 50)
self.bar = DirectWaitBar(guiId = 'DownloadWatcherBar', pos = (-0.81000000000000005, 0, -0.95999999999999996), relief = DGG.SUNKEN, frameSize = (-0.59999999999999998, 0.59999999999999998, -0.10000000000000001, 0.10000000000000001), borderWidth = (0.02, 0.02), scale = 0.25, range = 100, sortOrder = 50, frameColor = (0.5, 0.5, 0.5, 0.5), barColor = (0.20000000000000001, 0.69999999999999996, 0.20000000000000001, 0.5), text = '0%', text_scale = 0.16, text_fg = (1, 1, 1, 1), text_align = TextNode.ACenter, text_pos = (0, -0.050000000000000003))
self.accept('launcherPercentPhaseComplete', self.update)
def update(self, phase, percent, reqByteRate, actualByteRate):
phaseName = self.phaseNames[phase]
self.text['text'] = OTPLocalizer.DownloadWatcherUpdate % phaseName
self.bar['text'] = '%s %%' % percent
self.bar['value'] = percent
def cleanup(self):
self.text.destroy()
self.bar.destroy()
self.ignoreAll()
|
py | b4072d13e011cf740c1cc658652180ccb27dbb73 | """mypet URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
py | b4072d8b34894c30dd9a184ce574c5fc1169ecc1 | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
File Name: utilFunction.py
Description : tool function
Author : JHao
date: 2016/11/25
-------------------------------------------------
Change Activity:
2016/11/25: 添加robustCrawl、verifyProxy、getHtmlTree
-------------------------------------------------
"""
import requests
from lxml import etree
from Util.WebRequest import WebRequest
def robustCrawl(func):
def decorate(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
pass
# logger.info(u"sorry, 抓取出错。错误原因:")
# logger.info(e)
return decorate
def verifyProxyFormat(proxy):
"""
检查代理格式
:param proxy:
:return:
"""
import re
verify_regex = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}"
_proxy = re.findall(verify_regex, proxy)
return True if len(_proxy) == 1 and _proxy[0] == proxy else False
def getHtmlTree(url, **kwargs):
"""
获取html树
:param url:
:param kwargs:
:return:
"""
header = {'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko)',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
}
# TODO 取代理服务器用代理服务器访问
wr = WebRequest()
html = wr.get(url=url, header=header).content
return etree.HTML(html)
def tcpConnect(proxy):
"""
TCP 三次握手
:param proxy:
:return:
"""
from socket import socket, AF_INET, SOCK_STREAM
s = socket(AF_INET, SOCK_STREAM)
ip, port = proxy.split(':')
result = s.connect_ex((ip, int(port)))
return True if result == 0 else False
def validUsefulProxy(proxy):
"""
检验代理是否可用
:param proxy:
:return:
"""
if isinstance(proxy, bytes):
proxy = proxy.decode("utf8")
proxies = {"https": "https://{proxy}".format(proxy=proxy)}
try:
r = requests.get('https://590m.com/file/3097233-437633828', proxies=proxies, timeout=10, verify=False)
if r.status_code == 200:
return True
except Exception as e:
pass
return False
|
py | b4072e62159d2a3541f9480dc87f8f8c69625f20 | from django.db import models
from django.utils.timezone import now
# Create your models here.
# <HINT> Create a Car Make model `class CarMake(models.Model)`:
# - Name
# - Description
# - Any other fields you would like to include in car make model
# - __str__ method to print a car make object
class CarMake(models.Model):
name = models.CharField(null=False, max_length=30, default='BMW')
description = models.CharField(null=False, max_length=300, default='BMW cars are feature rich.')
# Create a toString method for object string representation
def __str__(self):
return 'Name:' + self.name + ', ' \
+ 'Description:'+ self.description
# <HINT> Create a Car Model model `class CarModel(models.Model):`:
# - Many-To-One relationship to Car Make model (One Car Make has many Car Models, using ForeignKey field)
# - Name
# - Dealer id, used to refer a dealer created in cloudant database
# - Type (CharField with a choices argument to provide limited choices such as Sedan, SUV, WAGON, etc.)
# - Year (DateField)
# - Any other fields you would like to include in car model
# - __str__ method to print a car make object
class CarModel(models.Model):
SEDAN = 'sedan'
SUV = 'suv'
WAGON = 'wagon'
MINIVAN = 'Minivan'
OTHERS = 'others'
CAR_CHOICES = [(SEDAN, 'Sedan'), (SUV, 'SUV'), (WAGON, 'Wagon'), (MINIVAN, 'Minivan'), (OTHERS, 'Others')]
carmake = models.ForeignKey(CarMake, null= True, on_delete=models.CASCADE)
name = models.CharField(null= False, max_length=30, default='BMW X1')
dealerid = models.IntegerField(null=True)
cartype = models.CharField(null= False, max_length=20, choices= CAR_CHOICES, default=SEDAN)
#make = models.ForeignKey(CarMake, on_delete=models.CASCADE)
year = models.DateField(null= True)
def __str__(self):
return 'Name ' + self.name
# <HINT> Create a plain Python class `CarDealer` to hold dealer data
class CarDealer:
def __init__(self, address, city, full_name, id, lat, long, short_name, st, zip):
# Dealer address
self.address = address
# Dealer city
self.city = city
# Dealer Full Name
self.full_name = full_name
# Dealer id
self.id = id
# Location lat
self.lat = lat
# Location long
self.long = long
# Dealer short name
self.short_name = short_name
# Dealer state
self.st = st
# Dealer zip
self.zip = zip
def __str__(self):
return "Dealer name: " + self.full_name
# <HINT> Create a plain Python class `DealerReview` to hold review data
class DealerReview:
def __init__(self, name, dealership, review, purchase):
self.name = name
self.dealership = dealership
self.review = review
self.purchase = purchase
# Optional attributes
self.purchase_date = ""
self.car_make = ""
self.car_model = ""
self.car_year = ""
self.sentiment = ""
self.id = ""
def __str__(self):
return "Review: " + self.review + ", " + "Sentiment: " + self.sentiment
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
|
py | b4072fb4a4021fccfca0a7569d07733dcc4b600c | #encoding:utf-8
from .importer import *
class PhaseShuffle(nn.Module):
#PhaseShuffleを行うレイヤーの定義
def __init__(self,n):
super().__init__()
self.n = n#どれだけずらすかの範囲は論文内では[-n,n]と定義されている
def forward(self, x):
#nが0であれば、PhaseShuffleをそもそもしないのと同等
if self.n == 0:
return x
#[-n,n]に属する整数をランダムに生成、shiftとする
shift = torch.Tensor(x.shape[0]).random_(-self.n,self.n+1).type(torch.int)
#xにPhaseShuffleを適用した結果をx_shuffledに格納、戻り値とする
x_shuffled = x.clone()
for i,shift_num in enumerate(shift):
if(shift_num==0): continue
dim = len(x_shuffled[i].size()) - 1
origin_length = x[i].shape[dim]
if shift_num > 0:
left = torch.flip(torch.narrow(x[i],dim,1,shift_num),[dim])
right = torch.narrow(x[i],dim,0,origin_length-shift_num)
else:
shift_num = -shift_num
left = torch.narrow(x[i],dim,shift_num,origin_length-shift_num)
right = torch.flip(torch.narrow(x[i],dim,origin_length-shift_num-1,shift_num),[dim])
x_shuffled[i] = torch.cat([left,right],dim)
return x_shuffled
#discriminatorの損失関数の、勾配制約項の計算に必要な関数「gradient_penalty」を求める関数
#WGAN-GPにおいて、discriminatorの損失関数は E[本物の音声の判定結果]-E[偽音声の判定結果]+勾配制約項 と表され、
#generatorでは、E[偽音声の判定結果]と表される
def gradient_penalty(netD,real,fake,batch_size,gamma=1):
device = real.device
#requires_gradが有効なTensorに対してはbackwardメソッドが呼べて、自動的に微分を計算できる
alpha = torch.rand(batch_size,1,1,requires_grad=True).to(device)
#本物と偽物をランダムな割合で混ぜ合わせる
x = alpha*real + (1-alpha)*fake
#それをdiscriminatorに入れ、結果をd_とする
d_ = netD.forward(x)
#出力d_と入力xから傾きを求める
#傾きから計算されるL2ノルムが1になると良い結果を生むことが知られている
#よってこれが1に近づくような学習ができるようにgradient_penaltyを計算
g = torch.autograd.grad(outputs=d_, inputs=x,
grad_outputs=torch.ones(d_.shape).to(device),
create_graph=True, retain_graph=True,only_inputs=True)[0]
g = g.reshape(batch_size, -1)
return ((g.norm(2,dim=1)/gamma-1.0)**2).mean()
|
py | b407306003ce421008dbb800c08f83bbf2c4cc56 | # mock_dt.py
import datetime
try:
from unittest import mock
except ImportError:
import mock
real_datetime_class = datetime.datetime
def mock_datetime_now(target, dt):
class DatetimeSubclassMeta(type):
@classmethod
def __instancecheck__(mcs, obj):
return isinstance(obj, real_datetime_class)
class BaseMockedDatetime(real_datetime_class):
@classmethod
def now(cls, tz=None):
return target.replace(tzinfo=tz)
@classmethod
def utcnow(cls):
return target
# Python2 & Python3 compatible metaclass
MockedDatetime = DatetimeSubclassMeta('datetime', (BaseMockedDatetime, ),
{})
return mock.patch.object(dt, 'datetime', MockedDatetime)
|
py | b40730d3b875cf0eecd7a85237587d9968c67a64 | # NOTE: this is an endless loop
import spi_rects
|
py | b40730f09bf0650307bf1a34cc45dadb298d61cf | #!/usr/bin/env python
"""
Provides wrappers and utilities for working with MAF files and alignments.
"""
# Dan Blankenberg
import logging
import os
import string
import sys
import tempfile
import pkg_resources
pkg_resources.require( "bx-python" )
import bx.align.maf
import bx.intervals
import bx.interval_index_file
from errno import EMFILE
import resource
from copy import deepcopy
assert sys.version_info[:2] >= ( 2, 4 )
log = logging.getLogger(__name__)
GAP_CHARS = [ '-' ]
SRC_SPLIT_CHAR = '.'
def src_split( src ):
fields = src.split( SRC_SPLIT_CHAR, 1 )
spec = fields.pop( 0 )
if fields:
chrom = fields.pop( 0 )
else:
chrom = spec
return spec, chrom
def src_merge( spec, chrom, contig=None ):
if None in [ spec, chrom ]:
spec = chrom = spec or chrom
return bx.align.maf.src_merge( spec, chrom, contig )
def get_species_in_block( block ):
species = []
for c in block.components:
spec, chrom = src_split( c.src )
if spec not in species:
species.append( spec )
return species
def tool_fail( msg="Unknown Error" ):
print >> sys.stderr, "Fatal Error: %s" % msg
sys.exit()
class TempFileHandler( object ):
'''
Handles creating, opening, closing, and deleting of Temp files, with a
maximum number of files open at one time.
'''
DEFAULT_MAX_OPEN_FILES = max( resource.getrlimit( resource.RLIMIT_NOFILE )[0] / 2, 1 )
def __init__( self, max_open_files=None, **kwds ):
if max_open_files is None:
max_open_files = self.DEFAULT_MAX_OPEN_FILES
self.max_open_files = max_open_files
self.files = []
self.open_file_indexes = []
self.kwds = kwds
def get_open_tempfile( self, index=None, **kwds ):
if index is not None and index in self.open_file_indexes:
self.open_file_indexes.remove( index )
else:
if self.max_open_files:
while len( self.open_file_indexes ) >= self.max_open_files:
self.close( self.open_file_indexes[0] )
if index is None:
index = len( self.files )
temp_kwds = dict( self.kwds )
temp_kwds.update( kwds )
# Being able to use delete=True here, would simplify a bit,
# but we support python2.4 in these tools
while True:
try:
tmp_file = tempfile.NamedTemporaryFile( **temp_kwds )
filename = tmp_file.name
break
except OSError, e:
if self.open_file_indexes and e.errno == EMFILE:
self.max_open_files = len( self.open_file_indexes )
self.close( self.open_file_indexes[0] )
else:
raise e
tmp_file.close()
self.files.append( open( filename, 'w+b' ) )
else:
while True:
try:
self.files[ index ] = open( self.files[ index ].name, 'r+b' )
break
except OSError, e:
if self.open_file_indexes and e.errno == EMFILE:
self.max_open_files = len( self.open_file_indexes )
self.close( self.open_file_indexes[0] )
else:
raise e
self.files[ index ].seek( 0, 2 )
self.open_file_indexes.append( index )
return index, self.files[ index ]
def close( self, index, delete=False ):
if index in self.open_file_indexes:
self.open_file_indexes.remove( index )
rval = self.files[ index ].close()
if delete:
try:
os.unlink( self.files[ index ].name )
except OSError:
pass
return rval
def flush( self, index ):
if index in self.open_file_indexes:
self.files[ index ].flush()
def __del__( self ):
for i in xrange( len( self.files ) ):
self.close( i, delete=True )
# an object corresponding to a reference layered alignment
class RegionAlignment( object ):
DNA_COMPLEMENT = string.maketrans( "ACGTacgt", "TGCAtgca" )
MAX_SEQUENCE_SIZE = sys.maxint # Maximum length of sequence allowed
def __init__( self, size, species=[], temp_file_handler=None ):
assert size <= self.MAX_SEQUENCE_SIZE, "Maximum length allowed for an individual sequence has been exceeded (%i > %i)." % ( size, self.MAX_SEQUENCE_SIZE )
self.size = size
if not temp_file_handler:
temp_file_handler = TempFileHandler()
self.temp_file_handler = temp_file_handler
self.sequences = {}
if not isinstance( species, list ):
species = [species]
for spec in species:
self.add_species( spec )
# add a species to the alignment
def add_species( self, species ):
# make temporary sequence files
file_index, fh = self.temp_file_handler.get_open_tempfile()
self.sequences[species] = file_index
fh.write( "-" * self.size )
# returns the names for species found in alignment, skipping names as requested
def get_species_names( self, skip=[] ):
if not isinstance( skip, list ):
skip = [skip]
names = self.sequences.keys()
for name in skip:
try:
names.remove( name )
except:
pass
return names
# returns the sequence for a species
def get_sequence( self, species ):
file_index, fh = self.temp_file_handler.get_open_tempfile( self.sequences[species] )
fh.seek( 0 )
return fh.read()
# returns the reverse complement of the sequence for a species
def get_sequence_reverse_complement( self, species ):
complement = [base for base in self.get_sequence( species ).translate( self.DNA_COMPLEMENT )]
complement.reverse()
return "".join( complement )
# sets a position for a species
def set_position( self, index, species, base ):
if len( base ) != 1:
raise Exception( "A genomic position can only have a length of 1." )
return self.set_range( index, species, base )
# sets a range for a species
def set_range( self, index, species, bases ):
if index >= self.size or index < 0:
raise Exception( "Your index (%i) is out of range (0 - %i)." % ( index, self.size - 1 ) )
if len( bases ) == 0:
raise Exception( "A set of genomic positions can only have a positive length." )
if species not in self.sequences.keys():
self.add_species( species )
file_index, fh = self.temp_file_handler.get_open_tempfile( self.sequences[species] )
fh.seek( index )
fh.write( bases )
# Flush temp file of specified species, or all species
def flush( self, species=None ):
if species is None:
species = self.sequences.keys()
elif not isinstance( species, list ):
species = [species]
for spec in species:
self.temp_file_handler.flush( self.sequences[spec] )
class GenomicRegionAlignment( RegionAlignment ):
def __init__( self, start, end, species=[], temp_file_handler=None ):
RegionAlignment.__init__( self, end - start, species, temp_file_handler=temp_file_handler )
self.start = start
self.end = end
class SplicedAlignment( object ):
DNA_COMPLEMENT = string.maketrans( "ACGTacgt", "TGCAtgca" )
def __init__( self, exon_starts, exon_ends, species=[], temp_file_handler=None ):
if not isinstance( exon_starts, list ):
exon_starts = [exon_starts]
if not isinstance( exon_ends, list ):
exon_ends = [exon_ends]
assert len( exon_starts ) == len( exon_ends ), "The number of starts does not match the number of sizes."
self.exons = []
if not temp_file_handler:
temp_file_handler = TempFileHandler()
self.temp_file_handler = temp_file_handler
for i in range( len( exon_starts ) ):
self.exons.append( GenomicRegionAlignment( exon_starts[i], exon_ends[i], species, temp_file_handler=temp_file_handler ) )
# returns the names for species found in alignment, skipping names as requested
def get_species_names( self, skip=[] ):
if not isinstance( skip, list ):
skip = [skip]
names = []
for exon in self.exons:
for name in exon.get_species_names( skip=skip ):
if name not in names:
names.append( name )
return names
# returns the sequence for a species
def get_sequence( self, species ):
index, fh = self.temp_file_handler.get_open_tempfile()
for exon in self.exons:
if species in exon.get_species_names():
seq = exon.get_sequence( species )
# we need to refetch fh here, since exon.get_sequence( species ) uses a tempfile
# and if max==1, it would close fh
index, fh = self.temp_file_handler.get_open_tempfile( index )
fh.write( seq )
else:
fh.write( "-" * exon.size )
fh.seek( 0 )
rval = fh.read()
self.temp_file_handler.close( index, delete=True )
return rval
# returns the reverse complement of the sequence for a species
def get_sequence_reverse_complement( self, species ):
complement = [base for base in self.get_sequence( species ).translate( self.DNA_COMPLEMENT )]
complement.reverse()
return "".join( complement )
# Start and end of coding region
@property
def start( self ):
return self.exons[0].start
@property
def end( self ):
return self.exons[-1].end
# Open a MAF index using a UID
def maf_index_by_uid( maf_uid, index_location_file ):
for line in open( index_location_file ):
try:
# read each line, if not enough fields, go to next line
if line[0:1] == "#":
continue
fields = line.split('\t')
if maf_uid == fields[1]:
try:
maf_files = fields[4].replace( "\n", "" ).replace( "\r", "" ).split( "," )
return bx.align.maf.MultiIndexed( maf_files, keep_open=True, parse_e_rows=False )
except Exception, e:
raise Exception( 'MAF UID (%s) found, but configuration appears to be malformed: %s' % ( maf_uid, e ) )
except:
pass
return None
# return ( index, temp_index_filename ) for user maf, if available, or build one and return it, return None when no tempfile is created
def open_or_build_maf_index( maf_file, index_filename, species=None ):
try:
return ( bx.align.maf.Indexed( maf_file, index_filename=index_filename, keep_open=True, parse_e_rows=False ), None )
except:
return build_maf_index( maf_file, species=species )
def build_maf_index_species_chromosomes( filename, index_species=None ):
species = []
species_chromosomes = {}
indexes = bx.interval_index_file.Indexes()
blocks = 0
try:
maf_reader = bx.align.maf.Reader( open( filename ) )
while True:
pos = maf_reader.file.tell()
block = maf_reader.next()
if block is None:
break
blocks += 1
for c in block.components:
spec = c.src
chrom = None
if "." in spec:
spec, chrom = spec.split( ".", 1 )
if spec not in species:
species.append( spec )
species_chromosomes[spec] = []
if chrom and chrom not in species_chromosomes[spec]:
species_chromosomes[spec].append( chrom )
if index_species is None or spec in index_species:
forward_strand_start = c.forward_strand_start
forward_strand_end = c.forward_strand_end
try:
forward_strand_start = int( forward_strand_start )
forward_strand_end = int( forward_strand_end )
except ValueError:
continue # start and end are not integers, can't add component to index, goto next component
# this likely only occurs when parse_e_rows is True?
# could a species exist as only e rows? should the
if forward_strand_end > forward_strand_start:
# require positive length; i.e. certain lines have start = end = 0 and cannot be indexed
indexes.add( c.src, forward_strand_start, forward_strand_end, pos, max=c.src_size )
except Exception, e:
# most likely a bad MAF
log.debug( 'Building MAF index on %s failed: %s' % ( filename, e ) )
return ( None, [], {}, 0 )
return ( indexes, species, species_chromosomes, blocks )
# builds and returns ( index, index_filename ) for specified maf_file
def build_maf_index( maf_file, species=None ):
indexes, found_species, species_chromosomes, blocks = build_maf_index_species_chromosomes( maf_file, species )
if indexes is not None:
fd, index_filename = tempfile.mkstemp()
out = os.fdopen( fd, 'w' )
indexes.write( out )
out.close()
return ( bx.align.maf.Indexed( maf_file, index_filename=index_filename, keep_open=True, parse_e_rows=False ), index_filename )
return ( None, None )
def component_overlaps_region( c, region ):
if c is None:
return False
start, end = c.get_forward_strand_start(), c.get_forward_strand_end()
if region.start >= end or region.end <= start:
return False
return True
def chop_block_by_region( block, src, region, species=None, mincols=0 ):
# This chopping method was designed to maintain consistency with how start/end padding gaps have been working in Galaxy thus far:
# behavior as seen when forcing blocks to be '+' relative to src sequence (ref) and using block.slice_by_component( ref, slice_start, slice_end )
# whether-or-not this is the 'correct' behavior is questionable, but this will at least maintain consistency
# comments welcome
slice_start = block.text_size # max for the min()
slice_end = 0 # min for the max()
old_score = block.score # save old score for later use
# We no longer assume only one occurance of src per block, so we need to check them all
for c in iter_components_by_src( block, src ):
if component_overlaps_region( c, region ):
if c.text is not None:
rev_strand = False
if c.strand == "-":
# We want our coord_to_col coordinates to be returned from positive stranded component
rev_strand = True
c = c.reverse_complement()
start = max( region.start, c.start )
end = min( region.end, c.end )
start = c.coord_to_col( start )
end = c.coord_to_col( end )
if rev_strand:
# need to orient slice coordinates to the original block direction
slice_len = end - start
end = len( c.text ) - start
start = end - slice_len
slice_start = min( start, slice_start )
slice_end = max( end, slice_end )
if slice_start < slice_end:
block = block.slice( slice_start, slice_end )
if block.text_size > mincols:
# restore old score, may not be accurate, but it is better than 0 for everything?
block.score = old_score
if species is not None:
block = block.limit_to_species( species )
block.remove_all_gap_columns()
return block
return None
def orient_block_by_region( block, src, region, force_strand=None ):
# loop through components matching src,
# make sure each of these components overlap region
# cache strand for each of overlaping regions
# if force_strand / region.strand not in strand cache, reverse complement
# we could have 2 sequences with same src, overlapping region, on different strands, this would cause no reverse_complementing
strands = [ c.strand for c in iter_components_by_src( block, src ) if component_overlaps_region( c, region ) ]
if strands and ( force_strand is None and region.strand not in strands ) or ( force_strand is not None and force_strand not in strands ):
block = block.reverse_complement()
return block
def get_oriented_chopped_blocks_for_region( index, src, region, species=None, mincols=0, force_strand=None ):
for block, idx, offset in get_oriented_chopped_blocks_with_index_offset_for_region( index, src, region, species, mincols, force_strand ):
yield block
def get_oriented_chopped_blocks_with_index_offset_for_region( index, src, region, species=None, mincols=0, force_strand=None ):
for block, idx, offset in get_chopped_blocks_with_index_offset_for_region( index, src, region, species, mincols ):
yield orient_block_by_region( block, src, region, force_strand ), idx, offset
# split a block with multiple occurances of src into one block per src
def iter_blocks_split_by_src( block, src ):
for src_c in iter_components_by_src( block, src ):
new_block = bx.align.Alignment( score=block.score, attributes=deepcopy( block.attributes ) )
new_block.text_size = block.text_size
for c in block.components:
if c == src_c or c.src != src:
new_block.add_component( deepcopy( c ) ) # components have reference to alignment, dont want to loose reference to original alignment block in original components
yield new_block
# split a block into multiple blocks with all combinations of a species appearing only once per block
def iter_blocks_split_by_species( block, species=None ):
def __split_components_by_species( components_by_species, new_block ):
if components_by_species:
# more species with components to add to this block
components_by_species = deepcopy( components_by_species )
spec_comps = components_by_species.pop( 0 )
for c in spec_comps:
newer_block = deepcopy( new_block )
newer_block.add_component( deepcopy( c ) )
for value in __split_components_by_species( components_by_species, newer_block ):
yield value
else:
# no more components to add, yield this block
yield new_block
# divide components by species
spec_dict = {}
if not species:
species = []
for c in block.components:
spec, chrom = src_split( c.src )
if spec not in spec_dict:
spec_dict[ spec ] = []
species.append( spec )
spec_dict[ spec ].append( c )
else:
for spec in species:
spec_dict[ spec ] = []
for c in iter_components_by_src_start( block, spec ):
spec_dict[ spec ].append( c )
empty_block = bx.align.Alignment( score=block.score, attributes=deepcopy( block.attributes ) ) # should we copy attributes?
empty_block.text_size = block.text_size
# call recursive function to split into each combo of spec/blocks
for value in __split_components_by_species( spec_dict.values(), empty_block ):
sort_block_components_by_block( value, block ) # restore original component order
yield value
# generator yielding only chopped and valid blocks for a specified region
def get_chopped_blocks_for_region( index, src, region, species=None, mincols=0 ):
for block, idx, offset in get_chopped_blocks_with_index_offset_for_region( index, src, region, species, mincols ):
yield block
def get_chopped_blocks_with_index_offset_for_region( index, src, region, species=None, mincols=0 ):
for block, idx, offset in index.get_as_iterator_with_index_and_offset( src, region.start, region.end ):
block = chop_block_by_region( block, src, region, species, mincols )
if block is not None:
yield block, idx, offset
# returns a filled region alignment for specified regions
def get_region_alignment( index, primary_species, chrom, start, end, strand='+', species=None, mincols=0, overwrite_with_gaps=True, temp_file_handler=None ):
if species is not None:
alignment = RegionAlignment( end - start, species, temp_file_handler=temp_file_handler )
else:
alignment = RegionAlignment( end - start, primary_species, temp_file_handler=temp_file_handler )
return fill_region_alignment( alignment, index, primary_species, chrom, start, end, strand, species, mincols, overwrite_with_gaps )
# reduces a block to only positions exisiting in the src provided
def reduce_block_by_primary_genome( block, species, chromosome, region_start ):
# returns ( startIndex, {species:texts}
# where texts' contents are reduced to only positions existing in the primary genome
src = "%s.%s" % ( species, chromosome )
ref = block.get_component_by_src( src )
start_offset = ref.start - region_start
species_texts = {}
for c in block.components:
species_texts[ c.src.split( '.' )[0] ] = list( c.text )
# remove locations which are gaps in the primary species, starting from the downstream end
for i in range( len( species_texts[ species ] ) - 1, -1, -1 ):
if species_texts[ species ][i] == '-':
for text in species_texts.values():
text.pop( i )
for spec, text in species_texts.items():
species_texts[spec] = ''.join( text )
return ( start_offset, species_texts )
# fills a region alignment
def fill_region_alignment( alignment, index, primary_species, chrom, start, end, strand='+', species=None, mincols=0, overwrite_with_gaps=True ):
region = bx.intervals.Interval( start, end )
region.chrom = chrom
region.strand = strand
primary_src = "%s.%s" % ( primary_species, chrom )
# Order blocks overlaping this position by score, lowest first
blocks = []
for block, idx, offset in index.get_as_iterator_with_index_and_offset( primary_src, start, end ):
score = float( block.score )
for i in range( 0, len( blocks ) ):
if score < blocks[i][0]:
blocks.insert( i, ( score, idx, offset ) )
break
else:
blocks.append( ( score, idx, offset ) )
# gap_chars_tuple = tuple( GAP_CHARS )
gap_chars_str = ''.join( GAP_CHARS )
# Loop through ordered blocks and layer by increasing score
for block_dict in blocks:
for block in iter_blocks_split_by_species( block_dict[1].get_at_offset( block_dict[2] ) ): # need to handle each occurance of sequence in block seperately
if component_overlaps_region( block.get_component_by_src( primary_src ), region ):
block = chop_block_by_region( block, primary_src, region, species, mincols ) # chop block
block = orient_block_by_region( block, primary_src, region ) # orient block
start_offset, species_texts = reduce_block_by_primary_genome( block, primary_species, chrom, start )
for spec, text in species_texts.items():
# we should trim gaps from both sides, since these are not positions in this species genome (sequence)
text = text.rstrip( gap_chars_str )
gap_offset = 0
# while text.startswith( gap_chars_tuple ):
while True in [ text.startswith( gap_char ) for gap_char in GAP_CHARS ]: # python2.4 doesn't accept a tuple for .startswith()
gap_offset += 1
text = text[1:]
if not text:
break
if text:
if overwrite_with_gaps:
alignment.set_range( start_offset + gap_offset, spec, text )
else:
for i, char in enumerate( text ):
if char not in GAP_CHARS:
alignment.set_position( start_offset + gap_offset + i, spec, char )
return alignment
# returns a filled spliced region alignment for specified region with start and end lists
def get_spliced_region_alignment( index, primary_species, chrom, starts, ends, strand='+', species=None, mincols=0, overwrite_with_gaps=True, temp_file_handler=None ):
# create spliced alignment object
if species is not None:
alignment = SplicedAlignment( starts, ends, species, temp_file_handler=temp_file_handler )
else:
alignment = SplicedAlignment( starts, ends, [primary_species], temp_file_handler=temp_file_handler )
for exon in alignment.exons:
fill_region_alignment( exon, index, primary_species, chrom, exon.start, exon.end, strand, species, mincols, overwrite_with_gaps )
return alignment
# loop through string array, only return non-commented lines
def line_enumerator( lines, comment_start='#' ):
i = 0
for line in lines:
if not line.startswith( comment_start ):
i += 1
yield ( i, line )
# read a GeneBed file, return list of starts, ends, raw fields
def get_starts_ends_fields_from_gene_bed( line ):
# Starts and ends for exons
starts = []
ends = []
fields = line.split()
# Requires atleast 12 BED columns
if len(fields) < 12:
raise Exception( "Not a proper 12 column BED line (%s)." % line )
tx_start = int( fields[1] )
strand = fields[5]
if strand != '-':
strand = '+' # Default strand is +
cds_start = int( fields[6] )
cds_end = int( fields[7] )
# Calculate and store starts and ends of coding exons
region_start, region_end = cds_start, cds_end
exon_starts = map( int, fields[11].rstrip( ',\n' ).split( ',' ) )
exon_starts = map( ( lambda x: x + tx_start ), exon_starts )
exon_ends = map( int, fields[10].rstrip( ',' ).split( ',' ) )
exon_ends = map( ( lambda x, y: x + y ), exon_starts, exon_ends )
for start, end in zip( exon_starts, exon_ends ):
start = max( start, region_start )
end = min( end, region_end )
if start < end:
starts.append( start )
ends.append( end )
return ( starts, ends, fields )
def iter_components_by_src( block, src ):
for c in block.components:
if c.src == src:
yield c
def get_components_by_src( block, src ):
return [ value for value in iter_components_by_src( block, src ) ]
def iter_components_by_src_start( block, src ):
for c in block.components:
if c.src.startswith( src ):
yield c
def get_components_by_src_start( block, src ):
return [ value for value in iter_components_by_src_start( block, src ) ]
def sort_block_components_by_block( block1, block2 ):
# orders the components in block1 by the index of the component in block2
# block1 must be a subset of block2
# occurs in-place
return block1.components.sort( cmp=lambda x, y: block2.components.index( x ) - block2.components.index( y ) )
def get_species_in_maf( maf_filename ):
species = []
for block in bx.align.maf.Reader( open( maf_filename ) ):
for spec in get_species_in_block( block ):
if spec not in species:
species.append( spec )
return species
def parse_species_option( species ):
if species:
species = species.split( ',' )
if 'None' not in species:
return species
return None # provided species was '', None, or had 'None' in it
def remove_temp_index_file( index_filename ):
try:
os.unlink( index_filename )
except:
pass
# Below are methods to deal with FASTA files
def get_fasta_header( component, attributes={}, suffix=None ):
header = ">%s(%s):%i-%i|" % ( component.src, component.strand, component.get_forward_strand_start(), component.get_forward_strand_end() )
for key, value in attributes.iteritems():
header = "%s%s=%s|" % ( header, key, value )
if suffix:
header = "%s%s" % ( header, suffix )
else:
header = "%s%s" % ( header, src_split( component.src )[ 0 ] )
return header
def get_attributes_from_fasta_header( header ):
if not header:
return {}
attributes = {}
header = header.lstrip( '>' )
header = header.strip()
fields = header.split( '|' )
try:
region = fields[0]
region = region.split( '(', 1 )
temp = region[0].split( '.', 1 )
attributes['species'] = temp[0]
if len( temp ) == 2:
attributes['chrom'] = temp[1]
else:
attributes['chrom'] = temp[0]
region = region[1].split( ')', 1 )
attributes['strand'] = region[0]
region = region[1].lstrip( ':' ).split( '-' )
attributes['start'] = int( region[0] )
attributes['end'] = int( region[1] )
except:
# fields 0 is not a region coordinate
pass
if len( fields ) > 2:
for i in xrange( 1, len( fields ) - 1 ):
prop = fields[i].split( '=', 1 )
if len( prop ) == 2:
attributes[ prop[0] ] = prop[1]
if len( fields ) > 1:
attributes['__suffix__'] = fields[-1]
return attributes
def iter_fasta_alignment( filename ):
class fastaComponent:
def __init__( self, species, text="" ):
self.species = species
self.text = text
def extend( self, text ):
self.text = self.text + text.replace( '\n', '' ).replace( '\r', '' ).strip()
# yields a list of fastaComponents for a FASTA file
f = open( filename, 'rb' )
components = []
# cur_component = None
while True:
line = f.readline()
if not line:
if components:
yield components
return
line = line.strip()
if not line:
if components:
yield components
components = []
elif line.startswith( '>' ):
attributes = get_attributes_from_fasta_header( line )
components.append( fastaComponent( attributes['species'] ) )
elif components:
components[-1].extend( line )
|
py | b4073261524e30d1e1d227c97c73edf8ad7f9572 | import errno
import json
import os
import platform
import re
import subprocess
from invoke.exceptions import Exit
errno_regex = re.compile(r".*\[Errno (\d+)\] (.*)")
__all__ = ["Gitlab"]
class Gitlab(object):
BASE_URL = "https://gitlab.ddbuild.io/api/v4"
def __init__(self, api_token=None):
self.api_token = api_token if api_token else self._api_token()
def test_project_found(self, project):
"""
Checks if a project can be found. This is useful for testing access permissions to projects.
"""
result = self.project(project)
# name is arbitrary, just need to check if something is in the result
if "name" in result:
return
print("Cannot find GitLab project {}".format(project))
print("If you cannot see it in the GitLab WebUI, you likely need permission.")
raise Exit(code=1)
def project(self, project_name):
"""
Gets the project info.
"""
from urllib.parse import quote
path = "/projects/{}".format(quote(project_name, safe=""))
return self.make_request(path, json=True)
def create_pipeline(self, project_name, ref, variables=None):
"""
Create a pipeline targeting a given reference of a project.
ref must be a branch or a tag.
"""
from urllib.parse import quote
if variables is None:
variables = {}
path = "/projects/{}/pipeline".format(quote(project_name, safe=""))
headers = {"Content-Type": "application/json"}
data = json.dumps({"ref": ref, "variables": [{"key": k, "value": v} for (k, v) in variables.items()],})
return self.make_request(path, headers=headers, data=data, json=True)
def pipelines_for_ref(self, project_name, ref, per_page=100):
"""
Gets all pipelines for a given reference
"""
from urllib.parse import quote
path = "/projects/{}/pipelines?ref={}&per_page={}".format(
quote(project_name, safe=""), quote(ref, safe=""), per_page,
)
return self.make_request(path, json=True)
def last_pipeline_for_ref(self, project_name, ref, per_page=100):
"""
Gets the last pipeline for a given reference.
per_page cannot exceed 100.
"""
pipelines = self.pipelines_for_ref(project_name, ref, per_page)
if len(pipelines) == 0:
return None
return sorted(pipelines, key=lambda pipeline: pipeline['created_at'], reverse=True)[0]
def pipeline(self, project_name, pipeline_id):
"""
Gets info for a given pipeline.
"""
from urllib.parse import quote
path = "/projects/{}/pipelines/{}".format(quote(project_name, safe=""), pipeline_id)
return self.make_request(path, json=True)
def commit(self, project_name, commit_sha):
"""
Gets info for a given commit sha.
"""
from urllib.parse import quote
path = "/projects/{}/repository/commits/{}".format(quote(project_name, safe=""), commit_sha)
return self.make_request(path, json=True)
def jobs(self, project_name, pipeline_id, page=1, per_page=100):
"""
Gets one page of the jobs for a pipeline.
per_page cannot exceed 100.
"""
from urllib.parse import quote
path = "/projects/{}/pipelines/{}/jobs?per_page={}&page={}".format(
quote(project_name, safe=""), pipeline_id, per_page, page
)
return self.make_request(path, json=True)
def find_tag(self, project_name, tag_name):
"""
Look up a tag by its name.
"""
from urllib.parse import quote
path = "/projects/{}/repository/tags/{}".format(quote(project_name, safe=""), tag_name)
return self.make_request(path, json=True)
def make_request(self, path, headers=None, data=None, json=False):
"""
Utility to make a request to the Gitlab API.
"""
import requests
url = self.BASE_URL + path
headers = dict(headers or [])
headers["PRIVATE-TOKEN"] = self.api_token
try:
if data:
r = requests.post(url, headers=headers, data=data)
else:
r = requests.get(url, headers=headers)
if r.status_code == 401:
print(
"HTTP 401: Your GITLAB_TOKEN may have expired. You can "
"check and refresh it at "
"https://gitlab.ddbuild.io/profile/personal_access_tokens"
)
print("Gitlab says: {}".format(r.json()["error_description"]))
raise Exit(code=1)
except requests.exceptions.Timeout:
print("Connection to GitLab ({}) timed out.".format(url))
raise Exit(code=1)
except requests.exceptions.RequestException as e:
m = errno_regex.match(str(e))
if not m:
print("Unknown error raised connecting to {}: {}".format(url, e))
# Parse errno to give a better explanation
# Requests doesn't have granularity at the level we want:
# http://docs.python-requests.org/en/master/_modules/requests/exceptions/
errno_code = int(m.group(1))
message = m.group(2)
if errno_code == errno.ENOEXEC:
print("Error resolving {}: {}".format(url, message))
elif errno_code == errno.ECONNREFUSED:
print("Connection to Gitlab ({}) refused".format(url))
else:
print("Error while connecting to {}: {}".format(url, str(e)))
raise Exit(code=1)
if json:
return r.json()
return r.text
def _api_token(self):
if "GITLAB_TOKEN" not in os.environ:
print("GITLAB_TOKEN not found in env. Trying keychain...")
if platform.system() == "Darwin":
try:
output = subprocess.check_output(
['security', 'find-generic-password', '-a', os.environ["USER"], '-s', 'GITLAB_TOKEN', '-w']
)
if len(output) > 0:
return output.strip()
except subprocess.CalledProcessError:
print("GITLAB_TOKEN not found in keychain...")
pass
print(
"Please create an 'api' access token at "
"https://gitlab.ddbuild.io/profile/personal_access_tokens and "
"add it as GITLAB_TOKEN in your keychain "
"or export it from your .bashrc or equivalent."
)
raise Exit(code=1)
return os.environ["GITLAB_TOKEN"]
|
py | b40733754241410aceaec19fa3fa4382198de8fd | _base_ = [
'../../_base_/models/tsm_r50.py', '../../_base_/schedules/sgd_tsm_50e.py',
'../../_base_/default_runtime.py'
]
# model settings
model = dict(cls_head=dict(num_classes=174))
# dataset settings
dataset_type = 'RawframeDataset'
data_root = 'data/sthv2/rawframes'
data_root_val = 'data/sthv2/rawframes'
ann_file_train = 'data/sthv2/sthv1_train_list_rawframes.txt'
ann_file_val = 'data/sthv2/sthv1_val_list_rawframes.txt'
ann_file_test = 'data/sthv2/sthv1_val_list_rawframes.txt'
sthv1_flip_label_map = {2: 4, 4: 2, 30: 41, 41: 30, 52: 66, 66: 52}
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(
type='MultiScaleCrop',
input_size=224,
scales=(1, 0.875, 0.75, 0.66),
random_crop=False,
max_wh_scale_gap=1,
num_fixed_crops=13),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5, flip_label_map=sthv1_flip_label_map),
dict(type='Imgaug', transforms='default'),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=8,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=8,
twice_sample=True,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=2,
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
filename_tmpl='{:05}.jpg',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
filename_tmpl='{:05}.jpg',
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
filename_tmpl='{:05}.jpg',
pipeline=test_pipeline))
evaluation = dict(
interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(weight_decay=0.0005)
# runtime settings
work_dir = './work_dirs/tsm_r50_flip_randaugment_1x1x8_50e_sthv1_rgb/'
|
py | b40733c8716b75062c5887e62e25e1ac27a1378a | import os
import xml.etree.ElementTree as ET
from pybdv.metadata import get_data_path, indent_xml, get_bdv_format
def copy_xml_with_abspath(xml_in, xml_out):
path = get_data_path(xml_in, return_absolute_path=True)
copy_xml_with_newpath(xml_in, xml_out, path,
path_type='absolute')
def copy_xml_with_relpath(xml_in, xml_out):
path = get_data_path(xml_in, return_absolute_path=True)
xml_root = os.path.split(xml_out)[0]
path = os.path.relpath(path, xml_root)
copy_xml_with_newpath(xml_in, xml_out, path,
path_type='relative')
def copy_xml_with_newpath(xml_in, xml_out, data_path,
path_type='relative', data_format=None):
assert path_type in ('absolute', 'relative')
if data_format is None:
data_format = get_bdv_format(xml_in)
# get the path node inn the xml tree
root = ET.parse(xml_in).getroot()
seqdesc = root.find('SequenceDescription')
imgload = seqdesc.find('ImageLoader')
imgload.set('format', data_format)
et = imgload.find('hdf5')
if et is None:
et = imgload.find('n5')
if et is None:
raise RuntimeError("Could not find data node")
et.tag = data_format.split('.')[-1]
et.text = data_path
et.set('type', path_type)
indent_xml(root)
tree = ET.ElementTree(root)
tree.write(xml_out)
# should be generalized and moved to pybdv at some point
def copy_xml_as_n5_s3(in_xml, out_xml,
service_endpoint, bucket_name, path_in_bucket,
authentication='Anonymous', region='us-west-2',
bdv_type='bdv.n5.s3'):
""" Copy a bdv xml file and replace the image data loader with the bdv.n5.s3 format.
Arguments:
in_xml [str] - path to the input xml
out_xml [str] - path to the output xml
service_endpoint [str] - url of the s3 service end-point.
For EMBL: 'https://s3.embl.de'.
bucket_name [str] - name of the bucket
path_in_bucket [str] - file paths inside of the bucket
authentication [str] - the authentication mode, can be 'Anonymous' or 'Protected'.
Default: 'Anonymous'
region [str] - the region. Only relevant if aws.s3 is used.
Default: 'us-west-2'
"""
bdv_types = ('bdv.n5.s3', 'ome.zarr.s3')
if bdv_type not in bdv_types:
raise ValueError(f"Invalid bdv type {bdv_type}, expected one of {bdv_types}")
auth_modes = ('Anonymous', 'Protected')
if authentication not in auth_modes:
raise ValueError(f"Invalid authentication mode {authentication}, expected one of {auth_modes}")
# check if we have an xml already
tree = ET.parse(in_xml)
root = tree.getroot()
# load the sequence description
seqdesc = root.find('SequenceDescription')
# update the image loader
# remove the old image loader
imgload = seqdesc.find('ImageLoader')
seqdesc.remove(imgload)
# write the new image loader
imgload = ET.SubElement(seqdesc, 'ImageLoader')
imgload.set('format', bdv_type)
el = ET.SubElement(imgload, 'Key')
el.text = path_in_bucket
el = ET.SubElement(imgload, 'SigningRegion')
el.text = region
el = ET.SubElement(imgload, 'ServiceEndpoint')
el.text = service_endpoint
el = ET.SubElement(imgload, 'BucketName')
el.text = bucket_name
el = ET.SubElement(imgload, 'Authentication')
el.text = authentication
indent_xml(root)
tree = ET.ElementTree(root)
tree.write(out_xml)
def read_path_in_bucket(xml):
root = ET.parse(xml).getroot()
seqdesc = root.find('SequenceDescription')
imgload = seqdesc.find('ImageLoader')
el = imgload.find('Key')
return el.text
|
py | b40735071f3b0a75fef238a5e5cf4d1008ed7611 | #!/usr/bin/env python
import sys
import rospy
from navigation.srv import *
from duckietown_msgs.msg import FSMState, SourceTargetNodes, BoolStamped, Twist2DStamped
from std_msgs.msg import Int16, String
class ActionsDispatcherNode():
def __init__(self):
self.node_name = rospy.get_name()
#adding logic because FSM publishes our state at a high rate
#not just everytime the mode changes but multiple times in each mode
self.first_update = True
self.actions = []
# Parameters:
self.fsm_mode = self.setupParameter("~initial_mode","JOYSTICK_CONTROL")
self.localization_mode = self.setupParameter("~localization_mode","LOCALIZATION")
self.trigger_mode = self.setupParameter("~trigger_mode","INTERSECTION_CONTROL")
self.reset_mode = self.setupParameter("~reset_mode","JOYSTICK_CONTROL")
self.stop_line_wait_time = self.setupParameter("~stop_line_wait_time",2.0)
# Subscribers:
self.sub_mode = rospy.Subscriber("~fsm_mode", FSMState, self.updateMode, queue_size = 1)
self.sub_plan_request = rospy.Subscriber("~plan_request", SourceTargetNodes, self.graph_search)
# Publishers:
self.pub = rospy.Publisher("~turn_type", Int16, queue_size=1, latch=True)
self.pubList = rospy.Publisher("~turn_plan", String, queue_size=1, latch=True)
self.pub_localized = rospy.Publisher("~localized", BoolStamped, queue_size=1, latch=True)
def setupParameter(self,param_name,default_value):
value = rospy.get_param(param_name,default_value)
rospy.set_param(param_name,value) #Write to parameter server for transparancy
rospy.loginfo("[%s] %s = %s " %(self.node_name,param_name,value))
return value
def updateMode(self, data):
self.fsm_mode = data.state
if self.fsm_mode == self.reset_mode:
self.actions = []
rospy.wait_for_service('graph_search')
graph_search = rospy.ServiceProxy('graph_search', GraphSearch)
graph_search('0', '0')
elif self.localization_mode != "none" and self.fsm_mode == self.localization_mode:
self.pubLocalized()
self.dispatcher()
def dispatcher(self):
if self.first_update == False and self.fsm_mode != self.trigger_mode:
self.first_update = True
if self.first_update == True and self.fsm_mode == self.trigger_mode and self.actions:
# Allow time for open loop controller to update state and allow duckiebot to stop at redline:
rospy.sleep(self.stop_line_wait_time)
# Proceed with action dispatching:
action = self.actions.pop(0)
print 'Dispatched:', action
if action == 's':
self.pub.publish(Int16(1))
elif action == 'r':
self.pub.publish(Int16(2))
elif action == 'l':
self.pub.publish(Int16(0))
elif action == 'w':
self.pub.publish(Int16(-1))
action_str = ''
for letter in self.actions:
action_str += letter
self.pubList.publish(action_str)
self.firstUpdate = False
def graph_search(self, data):
print 'Requesting map for src: ', data.source_node, ' and target: ', data.target_node
rospy.wait_for_service('graph_search')
try:
graph_search = rospy.ServiceProxy('graph_search', GraphSearch)
resp = graph_search(data.source_node, data.target_node)
self.actions = resp.actions
if self.actions:
# remove 'f' (follow line) from actions and add wait action in the end of queue
self.actions = [x for x in self.actions if x != 'f']
self.actions.append('w')
print 'Actions to be executed:', self.actions
action_str = ''
for letter in self.actions:
action_str += letter
self.pubList.publish(action_str)
self.dispatcher()
else:
print 'Actions to be executed:', self.actions
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def pubLocalized(self):
msg = BoolStamped()
msg.data = True
self.pub_localized.publish(msg)
def onShutdown(self):
rospy.loginfo("[ActionsDispatcherNode] Shutdown.")
if __name__ == "__main__":
rospy.init_node('actions_dispatcher_node')
actions_dispatcher_node = ActionsDispatcherNode()
rospy.on_shutdown(actions_dispatcher_node.onShutdown)
rospy.spin()
|
py | b40735089497571c786c52c6dcf3990295fd0423 | """A class for a normal form game"""
import numpy as np
import numpy.typing as npt
from typing import Generator, Any, Set
from scipy.optimize import linprog
from scipy.spatial import HalfspaceIntersection
def build_halfspaces(M: npt.NDArray) -> npt.NDArray:
"""
Build a matrix representation for a halfspace corresponding to:
Mx <= 1 and x >= 0
This is of the form:
[M: -1]
[-1: 0]
As specified in
https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.spatial.HalfspaceIntersection.html
Parameters
----------
M : array
A matrix with linear coefficients defining the polytope.
Returns
-------
array
The half spaces.
"""
number_of_strategies, dimension = M.shape
b = np.append(-np.ones(number_of_strategies), np.zeros(dimension))
M = np.append(M, -np.eye(dimension), axis=0)
halfspaces = np.column_stack((M, b.transpose()))
return halfspaces
def find_feasible_point(halfspaces: npt.NDArray) -> npt.NDArray:
"""
Use linear programming to find a point inside the halfspaces (needed to
define it).
Code taken from scipy documentation:
https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.spatial.HalfspaceIntersection.html
Parameters
----------
halfspaces : array
a matrix representation of halfspaces.
Returns
-------
array
A feasible point inside the halfspace.
"""
norm_vector = np.reshape(
np.linalg.norm(halfspaces[:, :-1], axis=1), (halfspaces.shape[0], 1)
)
c = np.zeros((halfspaces.shape[1],))
c[-1] = -1
A = np.hstack((halfspaces[:, :-1], norm_vector))
b = -halfspaces[:, -1:]
res = linprog(c, A_ub=A, b_ub=b)
return res.x[:-1]
def labels(vertex: npt.NDArray, halfspaces: npt.NDArray) -> Set[npt.NDArray]:
"""
Return the labels of the facets on which lie a given vertex. This is
calculated by carrying out the matrix multiplication.
Parameters
----------
vertex: array
A given vertex of a polytope.
halfspaces: array
A halfspace definition of a polytope.
Returns
-------
set
The set of labels of the vertex.
"""
b = halfspaces[:, -1]
M = halfspaces[:, :-1]
return set(np.where(np.isclose(np.dot(M, vertex), -b))[0])
def non_trivial_vertices(
halfspaces: npt.NDArray,
) -> Generator[tuple, Any, None]:
"""
Returns all vertex, label pairs (ignoring the origin).
Parameters
----------
halfspaces: array
A halfspace definition of a polytope.
Returns
-------
generator
A generator of non trivial vertices and their labels.
"""
feasible_point = find_feasible_point(halfspaces)
hs = HalfspaceIntersection(halfspaces, feasible_point)
hs.close()
return (
(v, labels(v, halfspaces))
for v in hs.intersections
if not np.all(np.isclose(v, 0)) and max(v) < np.inf
)
|
py | b4073673f5cbc81cdbcba3cbbffc9ecfa1310936 | import adv_test
import adv
from adv import *
def module():
return Xiaolei
class Xiaolei(adv.Adv):
a1 = ('s',0.2)
def s2_proc(this, e):
Teambuff('s2cc',0.08,10,'crit','rate').on()
Teambuff('s2cd',0.40,10,'crit','dmg').on()
if __name__ == '__main__':
conf = {}
conf['acl'] = """
`s1, seq=5 and cancel
`s2, seq=5 and cancel
`s3, seq=5 and cancel
"""
adv_test.test(module(), conf, verbose=-2)
|
py | b40736b6381b31cb6c588b56288887e428d73093 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid blocks.
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
"""
import copy
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import COIN
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import GrailumTestFramework
from test_framework.util import assert_equal
class InvalidBlockRequestTest(GrailumTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-whitelist=127.0.0.1"]]
def run_test(self):
# Add p2p connection to node0
node = self.nodes[0] # convenience reference to the node
node.add_p2p_connection(P2PDataStore())
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
self.log.info("Create a new block with an anyone-can-spend coinbase")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
node.p2p.send_blocks_and_test([block1], node, success=True)
self.log.info("Mature the block.")
node.generatetoaddress(100, node.get_deterministic_priv_key().address)
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
# Use merkle-root malleability to generate an invalid block with
# same blockheader.
# Manufacture a block with 3 transactions (coinbase, spend of prior
# coinbase, spend of that spend). Duplicate the 3rd transaction to
# leave merkle root and blockheader unchanged but invalidate the block.
self.log.info("Test merkle root malleability.")
block2 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
# b'0x51' is OP_TRUE
tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x51', amount=50 * COIN)
tx2 = create_tx_with_script(tx1, 0, script_sig=b'\x51', amount=50 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert(block2_orig.vtx != block2.vtx)
node.p2p.send_blocks_and_test([block2], node, success=False, request_block=False, reject_reason='bad-txns-duplicate')
# Check transactions for duplicate inputs
self.log.info("Test duplicate input block.")
block2_orig.vtx[2].vin.append(block2_orig.vtx[2].vin[0])
block2_orig.vtx[2].rehash()
block2_orig.hashMerkleRoot = block2_orig.calc_merkle_root()
block2_orig.rehash()
block2_orig.solve()
node.p2p.send_blocks_and_test([block2_orig], node, success=False, request_block=False, reject_reason='bad-txns-inputs-duplicate')
self.log.info("Test very broken block.")
block3 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
block3.vtx[0].vout[0].nValue = 100 * COIN # Too high!
block3.vtx[0].sha256 = None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
node.p2p.send_blocks_and_test([block3], node, success=False, request_block=False, reject_reason='bad-cb-amount')
if __name__ == '__main__':
InvalidBlockRequestTest().main()
|
py | b407373c92aedad3300d9f98a12d67bfa2551ecb | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import netifaces
import ipaddress
from typing import NamedTuple, Dict, List
from ryu.lib.packet import ether_types
from ryu.ofproto.inet import IPPROTO_TCP
from ryu.controller.controller import Datapath
from ryu.ofproto.ofproto_v1_4 import OFPP_LOCAL
from lte.protos.pipelined_pb2 import SubscriberQuotaUpdate, SetupFlowsResult
from magma.pipelined.app.base import MagmaController, ControllerType
from magma.pipelined.app.inout import INGRESS, EGRESS
from magma.pipelined.app.ue_mac import UEMacAddressController
from magma.pipelined.imsi import encode_imsi
from magma.pipelined.openflow import flows
from magma.pipelined.openflow.magma_match import MagmaMatch
from magma.pipelined.openflow.registers import Direction, IMSI_REG, \
DIRECTION_REG
class CheckQuotaController(MagmaController):
"""
Quota Check Controller
This controller recognizes special IP addr that IMSI sends a request to and
routes that request to a flask server to check user quota.
"""
APP_NAME = "check_quota"
APP_TYPE = ControllerType.LOGICAL
CheckQuotaConfig = NamedTuple(
'CheckQuotaConfig',
[('bridge_ip', str), ('quota_check_ip', str),
('has_quota_port', int), ('no_quota_port', int),
('cwf_bridge_mac', str)],
)
def __init__(self, *args, **kwargs):
super(CheckQuotaController, self).__init__(*args, **kwargs)
self.config = self._get_config(kwargs['config'])
self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
self.next_main_table = self._service_manager.get_next_table_num(
self.APP_NAME)
self.next_table = \
self._service_manager.get_table_num(INGRESS)
self.egress_table = self._service_manager.get_table_num(EGRESS)
self.arpd_controller_fut = kwargs['app_futures']['arpd']
self.arp_contoller = None
scratch_tbls = self._service_manager.allocate_scratch_tables(
self.APP_NAME, 2)
self._internal_ip_allocator = kwargs['internal_ip_allocator']
self.ip_rewrite_scratch = scratch_tbls[0]
self.mac_rewrite_scratch = \
self._service_manager.INTERNAL_MAC_IP_REWRITE_TBL_NUM
self._clean_restart = kwargs['config']['clean_restart']
self._datapath = None
def _get_config(self, config_dict: Dict) -> NamedTuple:
def get_virtual_iface_mac(iface):
virt_ifaddresses = netifaces.ifaddresses(iface)
return virt_ifaddresses[netifaces.AF_LINK][0]['addr']
return self.CheckQuotaConfig(
bridge_ip=config_dict['bridge_ip_address'],
quota_check_ip=config_dict['quota_check_ip'],
has_quota_port=config_dict['has_quota_port'],
no_quota_port=config_dict['no_quota_port'],
cwf_bridge_mac=get_virtual_iface_mac(config_dict['bridge_name']),
)
def handle_restart(self, quota_updates: List[SubscriberQuotaUpdate]
) -> SetupFlowsResult:
"""
Setup the check quota flows for the controller, this is used when
the controller restarts.
"""
# TODO Potentially we can run a diff logic but I don't think there is
# benefit(we don't need stats here)
self._delete_all_flows(self._datapath)
self._install_default_flows(self._datapath)
self.update_subscriber_quota_state(quota_updates)
return SetupFlowsResult(result=SetupFlowsResult.SUCCESS)
def initialize_on_connect(self, datapath: Datapath):
self._datapath = datapath
self._delete_all_flows(datapath)
self._install_default_flows(datapath)
def cleanup_on_disconnect(self, datapath: Datapath):
self._delete_all_flows(datapath)
def update_subscriber_quota_state(self,
updates: List[SubscriberQuotaUpdate]):
if self._datapath is None:
self.logger.error('Datapath not initialized for adding flows')
return
for update in updates:
imsi = update.sid.id
if update.update_type == SubscriberQuotaUpdate.VALID_QUOTA:
self._add_subscriber_flow(imsi, update.mac_addr, True)
elif update.update_type == SubscriberQuotaUpdate.NO_QUOTA:
self._add_subscriber_flow(imsi, update.mac_addr, False)
elif update.update_type == SubscriberQuotaUpdate.TERMINATE:
self.remove_subscriber_flow(imsi)
def remove_subscriber_flow(self, imsi: str):
match = MagmaMatch(imsi=encode_imsi(imsi))
flows.delete_flow(self._datapath, self.tbl_num, match)
flows.delete_flow(self._datapath, self.ip_rewrite_scratch, match)
def _add_subscriber_flow(self, imsi: str, ue_mac: str, has_quota: bool):
"""
Redirect the UE flow to the dedicated flask server.
On return traffic rewrite the IP/port so the redirection is seamless.
Match incoming user traffic:
1. Rewrite ip src to be in same subnet as check quota server
2. Rewrite ip dst to check quota server
3. Rewrite eth dst to check quota server
4. Rewrite tcp dst port to either quota/non quota
5. LEARN action
This will rewrite the ip src and dst and tcp port for traffic
coming back to the UE
6. ARP controller arp clamp
Sets the ARP clamping(for ARPs from the check quota server)
for the fake IP we used to reach the check quota server
"""
parser = self._datapath.ofproto_parser
internal_ip = self._internal_ip_allocator.next_ip()
if has_quota:
tcp_dst = self.config.has_quota_port
else:
tcp_dst = self.config.no_quota_port
match = MagmaMatch(
imsi=encode_imsi(imsi), eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP, direction=Direction.OUT,
vlan_vid=(0x1000, 0x1000),
ipv4_dst=self.config.quota_check_ip
)
actions = [
parser.NXActionLearn(
table_id=self.ip_rewrite_scratch,
priority=flows.UE_FLOW_PRIORITY,
specs=[
parser.NXFlowSpecMatch(
src=ether_types.ETH_TYPE_IP, dst=('eth_type_nxm', 0),
n_bits=16
),
parser.NXFlowSpecMatch(
src=IPPROTO_TCP, dst=('ip_proto_nxm', 0), n_bits=8
),
parser.NXFlowSpecMatch(
src=Direction.IN,
dst=(DIRECTION_REG, 0),
n_bits=32
),
parser.NXFlowSpecMatch(
src=int(ipaddress.IPv4Address(self.config.bridge_ip)),
dst=('ipv4_src_nxm', 0),
n_bits=32
),
parser.NXFlowSpecMatch(
src=int(internal_ip),
dst=('ipv4_dst_nxm', 0),
n_bits=32
),
parser.NXFlowSpecMatch(
src=('tcp_src_nxm', 0),
dst=('tcp_dst_nxm', 0),
n_bits=16
),
parser.NXFlowSpecMatch(
src=tcp_dst,
dst=('tcp_src_nxm', 0),
n_bits=16
),
parser.NXFlowSpecMatch(
src=encode_imsi(imsi),
dst=(IMSI_REG, 0),
n_bits=64
),
parser.NXFlowSpecLoad(
src=('ipv4_src_nxm', 0),
dst=('ipv4_dst_nxm', 0),
n_bits=32
),
parser.NXFlowSpecLoad(
src=int(
ipaddress.IPv4Address(self.config.quota_check_ip)),
dst=('ipv4_src_nxm', 0),
n_bits=32
),
parser.NXFlowSpecLoad(
src=80,
dst=('tcp_src_nxm', 0),
n_bits=16
),
]
),
parser.NXActionLearn(
table_id=self.mac_rewrite_scratch,
priority=flows.UE_FLOW_PRIORITY,
specs=[
parser.NXFlowSpecMatch(
src=ether_types.ETH_TYPE_IP, dst=('eth_type_nxm', 0),
n_bits=16
),
parser.NXFlowSpecMatch(
src=IPPROTO_TCP, dst=('ip_proto_nxm', 0), n_bits=8
),
parser.NXFlowSpecMatch(
src=int(ipaddress.IPv4Address(self.config.bridge_ip)),
dst=('ipv4_src_nxm', 0),
n_bits=32
),
parser.NXFlowSpecMatch(
src=int(internal_ip),
dst=('ipv4_dst_nxm', 0),
n_bits=32
),
parser.NXFlowSpecMatch(
src=('tcp_src_nxm', 0),
dst=('tcp_dst_nxm', 0),
n_bits=16
),
parser.NXFlowSpecMatch(
src=tcp_dst,
dst=('tcp_src_nxm', 0),
n_bits=16
),
parser.NXFlowSpecLoad(
src=('eth_src_nxm', 0),
dst=('eth_dst_nxm', 0),
n_bits=48
),
parser.NXFlowSpecLoad(
src=encode_imsi(imsi),
dst=(IMSI_REG, 0),
n_bits=64
),
]
),
parser.OFPActionSetField(ipv4_src=str(internal_ip)),
parser.OFPActionSetField(ipv4_dst=self.config.bridge_ip),
parser.OFPActionSetField(eth_dst=self.config.cwf_bridge_mac),
parser.OFPActionSetField(tcp_dst=tcp_dst),
parser.OFPActionPopVlan()
]
flows.add_output_flow(
self._datapath, self.tbl_num, match, actions,
priority=flows.UE_FLOW_PRIORITY,
output_port=OFPP_LOCAL)
ue_tbl = self._service_manager.get_table_num(
UEMacAddressController.APP_NAME)
ue_next_tbl = self._service_manager.get_table_num(INGRESS)
# Allows traffic back from the check quota server
match = MagmaMatch(in_port=OFPP_LOCAL)
actions = [
parser.NXActionResubmitTable(table_id=self.mac_rewrite_scratch)]
flows.add_resubmit_next_service_flow(self._datapath, ue_tbl,
match, actions=actions,
priority=flows.DEFAULT_PRIORITY,
resubmit_table=ue_next_tbl)
# For traffic from the check quota server rewrite src ip and port
match = MagmaMatch(
imsi=encode_imsi(imsi), eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP, direction=Direction.IN,
ipv4_src=self.config.bridge_ip, ipv4_dst=internal_ip)
actions = [
parser.NXActionResubmitTable(table_id=self.ip_rewrite_scratch)]
flows.add_resubmit_next_service_flow(
self._datapath, self.tbl_num, match, actions,
priority=flows.DEFAULT_PRIORITY,
resubmit_table=self.egress_table
)
self.logger.debug("Setting up fake arp for for subscriber %s(%s),"
"with fake ip %s", imsi, ue_mac , internal_ip)
if self.arp_contoller or self.arpd_controller_fut.done():
if not self.arp_contoller:
self.arp_contoller = self.arpd_controller_fut.result()
self.arp_contoller.set_incoming_arp_flows(self._datapath,
internal_ip, ue_mac)
def _install_default_flows(self, datapath: Datapath):
"""
Set the default flows to just forward to next app.
Args:
datapath: ryu datapath struct
"""
# Default flows for non matched traffic
inbound_match = MagmaMatch(direction=Direction.IN)
outbound_match = MagmaMatch(direction=Direction.OUT)
flows.add_resubmit_next_service_flow(
datapath, self.tbl_num, inbound_match, [],
priority=flows.MINIMUM_PRIORITY,
resubmit_table=self.next_main_table)
flows.add_resubmit_next_service_flow(
datapath, self.tbl_num, outbound_match, [],
priority=flows.MINIMUM_PRIORITY,
resubmit_table=self.next_main_table)
def _delete_all_flows(self, datapath: Datapath):
flows.delete_all_flows_from_table(datapath, self.tbl_num)
flows.delete_all_flows_from_table(datapath, self.ip_rewrite_scratch)
#flows.delete_all_flows_from_table(datapath, self.mac_rewrite_scratch)
|
py | b407377d7e722af3d0c4465f8fb19ccbaa9cc279 | def animal(bicho):
animal = {
'aguia' : ['vertebrado', 'ave', 'carnivoro'],
'pomba' : ['vertebrado', 'ave', 'onivoro'],
'homem' : ['vertebrado', 'mamifero', 'onivoro'],
'vaca' : ['vertebrado', 'mamifero', 'herbivoro'],
'pulga' : ['invertebrado', 'inseto', 'hematofago'],
'lagarta' : ['invertebrado', 'inseto', 'herbivoro'],
'sanguessuga' : ['invertebrado', 'anelideo', 'hematofago'],
'minhoca' : ['invertebrado', 'anelideo', 'onivoro'],
}
for nome, ordem in animal.items():
if bicho == ordem: return print(nome)
if __name__ == '__main__':
entrada = [input(), input(), input()]
animal(entrada)
|
py | b40737c1a8ad0b930bdadd6d4e32251dfaa09837 | import argparse
import json
import logging
import os
from ray._private.runtime_env.context import RuntimeEnvContext
from ray.core.generated.common_pb2 import Language
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(
description=(
"Set up the environment for a Ray worker and launch the worker."))
parser.add_argument(
"--serialized-runtime-env",
type=str,
help="the serialized parsed runtime env dict")
parser.add_argument(
"--serialized-runtime-env-context",
type=str,
help="the serialized runtime env context")
parser.add_argument(
"--allocated-instances-serialized-json",
type=str,
help="the worker allocated resource")
parser.add_argument(
"--language", type=str, help="the language type of the worker")
def get_tmp_dir(remaining_args):
for arg in remaining_args:
if arg.startswith("--temp-dir="):
return arg[11:]
return None
def parse_allocated_resource(allocated_instances_serialized_json):
container_resource_args = []
allocated_resource = json.loads(allocated_instances_serialized_json)
if "CPU" in allocated_resource.keys():
cpu_resource = allocated_resource["CPU"]
if isinstance(cpu_resource, list):
# cpuset: because we may split one cpu core into some pieces,
# we need set cpuset.cpu_exclusive=0 and set cpuset-cpus
cpu_ids = []
cpu_shares = 0
for idx, val in enumerate(cpu_resource):
if val > 0:
cpu_ids.append(idx)
cpu_shares += val
container_resource_args.append("--cpu-shares=" +
str(int(cpu_shares / 10000 * 1024)))
container_resource_args.append("--cpuset-cpus=" + ",".join(
str(e) for e in cpu_ids))
else:
# cpushare
container_resource_args.append(
"--cpu-shares=" + str(int(cpu_resource / 10000 * 1024)))
if "memory" in allocated_resource.keys():
container_resource_args.append(
"--memory=" + str(int(allocated_resource["memory"] / 10000)))
return container_resource_args
def start_worker_in_container(container_option, args, remaining_args):
worker_setup_hook = args.worker_setup_hook
last_period_idx = worker_setup_hook.rfind(".")
module_name = worker_setup_hook[:last_period_idx]
# python -m ray.workers.setup_runtime_env --session-dir=
# default_worker.py --node-ip-address= ...
entrypoint_args = ["-m"]
entrypoint_args.append(module_name)
# replace default_worker.py path
if container_option.get("worker_path"):
remaining_args[1] = container_option.get("worker_path")
entrypoint_args.extend(remaining_args)
# now we will start a container, add argument worker-shim-pid
entrypoint_args.append("--worker-shim-pid={}".format(os.getpid()))
tmp_dir = get_tmp_dir(remaining_args)
if not tmp_dir:
logger.error(
"failed to get tmp_dir, the args: {}".format(remaining_args))
container_driver = "podman"
# todo add cgroup config
# todo flag "--rm"
container_command = [
container_driver, "run", "-v", tmp_dir + ":" + tmp_dir,
"--cgroup-manager=cgroupfs", "--network=host", "--pid=host",
"--ipc=host", "--env-host"
]
container_command.append("--env")
container_command.append("RAY_RAYLET_PID=" + str(os.getppid()))
if container_option.get("run_options"):
container_command.extend(container_option.get("run_options"))
container_command.extend(
parse_allocated_resource(args.allocated_instances_serialized_json))
container_command.append("--entrypoint")
container_command.append("python")
container_command.append(container_option.get("image"))
container_command.extend(entrypoint_args)
logger.warning("start worker in container: {}".format(container_command))
os.execvp(container_driver, container_command)
if __name__ == "__main__":
args, remaining_args = parser.parse_known_args()
runtime_env: dict = json.loads(args.serialized_runtime_env or "{}")
container_option = runtime_env.get("container")
if container_option and container_option.get("image"):
start_worker_in_container(container_option, args, remaining_args)
else:
# NOTE(edoakes): args.serialized_runtime_env_context is only None when
# we're starting the main Ray client proxy server. That case should
# probably not even go through this codepath.
runtime_env_context = RuntimeEnvContext.deserialize(
args.serialized_runtime_env_context or "{}")
runtime_env_context.exec_worker(remaining_args,
Language.Value(args.language))
|
py | b40738b8bcd11aad02c65f5a3603e41a93691126 | from ontolearn import KnowledgeBase, LengthBasedRefinement
from conceptgenerator import CustomLearningProblemGenerator
from collections import defaultdict, Counter
import random, os, copy, numpy as np, pandas as pd
class DataTriples:
"""
This class takes an owl file, loads it using ontolearn.base.KnowledgeBase resulting in a knowledge base.
A refinement operator is used to generate new concepts of various lengths.
The knowledge base is then converted into triples of the form: individual_i ---role_j---> concept_k and stored in a txt file (train.txt).
The lengths and the respective positive and negative examples of each concept are also stored in dedicated dictionaries.
"""
def __init__(self, path='', num_generation_paths=10, path_length=12, num_of_concept_per_length=80, min_child_length=2, num_ex=1000, concept_redundancy_rate=0.):
self.path = path
kb = KnowledgeBase(path=path)
self.concept_redundancy_rate = concept_redundancy_rate
self.kb = kb
self.num_ex = num_ex
self.atomic_concepts = list(kb.get_all_concepts())
self.atomic_concept_names = set([a.str for a in list(kb.get_all_concepts())])
rho = LengthBasedRefinement(kb)
self.lp_gen = CustomLearningProblemGenerator(knowledge_base=kb, refinement_operator=rho, num_problems=num_generation_paths, depth=path_length, min_length=min_child_length)
def set_num_of_concepts_per_length(self, l):
self.num_of_concepts_per_length = l
def __base_path(self, path):
for i in range(len(path))[::-1]:
if path[i] == "/":
return i
def kb_to_triples(self, export_folder_name='Triples'):
self.concept_pos_neg = defaultdict(lambda: defaultdict(list))
self.concept_lengths = defaultdict(float)
if not os.path.exists(os.path.join(self.path[:self.__base_path(self.path)], export_folder_name)):
os.mkdir(os.path.join(self.path[:self.__base_path(self.path)], export_folder_name))
train_file = open("%s/train.txt" % os.path.join(self.path[:self.__base_path(self.path)], export_folder_name), mode="w")
non_isolated_file = open("%s/non_isolated.txt" % os.path.join(self.path[:self.__base_path(self.path)], export_folder_name), mode="w")
non_isolated_individuals = set()
for rel in self.kb.property_hierarchy.all_properties:
for tple in rel.get_relations():
train_file.write(str(tple[0])+"\t\t"+str(rel)+"\t\t"+str(tple[1])[:50]+"\n")
non_isolated_individuals.update([str(tple[0]), str(tple[1])])
for indiv in non_isolated_individuals:
non_isolated_file.write(str(indiv)+"\n")
train_file.close(); non_isolated_file.close()
else:
non_isolated_individuals = open(os.path.join("./"+self.path[:self.__base_path(self.path)], export_folder_name)+"/"+"non_isolated.txt", "r")
non_isolated_individuals = non_isolated_individuals.read()
non_isolated_individuals = non_isolated_individuals.split("\n")
print("Example of non isolated individual: ", non_isolated_individuals[0])
self.concept_length_dist = Counter()
All_individuals = set(self.kb.get_all_individuals())
print("Number of individuals in the knowledge base: {} \n".format(len(All_individuals)))
Nodes = set(self.lp_gen)
print("Concepts generation done!\n")
print("Number of atomic concepts: ", len(self.atomic_concepts))
print("Longest concept length: ", np.max([len(n) for n in Nodes]), "\n")
print("Total number of new concepts generated: ", len(Nodes), "\n")
self.train_concepts = []
Concepts = {c.str: c for c in ([node.concept for node in Nodes] + self.atomic_concepts)}.values()
total_concepts = len(Concepts)
No_concept_redundancy_map = dict()
No_redundancy_length_counts = Counter()
for i, concept in enumerate(Concepts):
valid_neg = sorted(set(pd.Series(list(All_individuals-concept.instances)).apply(lambda x: str(x))).intersection(set(non_isolated_individuals)))
valid_pos = sorted(set(pd.Series(list(concept.instances)).apply(lambda x: str(x))).intersection(set(non_isolated_individuals)))
if (i+1)%500 == 0:
print("Progression: {}%".format(round(100.*(i+1)/total_concepts, ndigits=2)))
if min(len(valid_neg),len(valid_pos)) >= self.num_ex//2:
num_pos_ex = self.num_ex//2
num_neg_ex = self.num_ex//2
elif len(valid_pos) >= len(valid_neg) and len(valid_pos) + len(valid_neg) >= self.num_ex:
num_neg_ex = len(valid_neg)
num_pos_ex = self.num_ex-num_neg_ex
elif len(valid_pos) + len(valid_neg)>=self.num_ex:
num_pos_ex = len(valid_pos)
num_neg_ex = self.num_ex-num_pos_ex
else:
continue
positive = list(random.sample(valid_pos, num_pos_ex))#valid_pos[:num_pos_ex]
negative = list(random.sample(valid_neg, num_neg_ex))#valid_neg[:num_neg_ex]
if self.concept_length_dist[concept.length] < self.num_of_concepts_per_length:
instance_statistics = {atomic: 0 for atomic in self.atomic_concept_names}
for ind in concept.instances:
types = set([str(t).split(".")[-1] for t in ind.is_a])
for t in types.intersection(self.atomic_concept_names):
instance_statistics[t] += 1
instance_statistics.update({"num_pos_examples": len(concept.instances)})
self.concept_length_dist.update([concept.length])
if not concept.str in self.concept_pos_neg:
self.concept_pos_neg[concept.str]["positive"] = positive
self.concept_pos_neg[concept.str]["negative"] = negative
self.concept_pos_neg[concept.str]["stats"] = list(instance_statistics.values())
rand = random.random()
if str(valid_pos) in No_concept_redundancy_map and No_concept_redundancy_map[str(valid_pos)].length > concept.length:
No_concept_redundancy_map[str(valid_pos)] = concept
No_redundancy_length_counts.update([concept.length])
elif (str(valid_pos) in No_concept_redundancy_map and\
No_redundancy_length_counts[concept.length] < max(No_redundancy_length_counts.values())*self.concept_redundancy_rate):#and No_concept_redundancy_map[str(valid_pos)].length > concept.length:
No_concept_redundancy_map[str(valid_pos)+str(random.random())] = concept
No_redundancy_length_counts.update([concept.length])
elif not str(valid_pos) in No_concept_redundancy_map:
No_concept_redundancy_map[str(valid_pos)] = concept
No_redundancy_length_counts.update([concept.length])
self.No_concept_redundancy_map = No_concept_redundancy_map
print("Data preprocessing ended successfully")
def save_train_data(self):
data = {"concept name": [], "positive examples": [], "negative examples": [], "pos ex stats": [], "concept length": []}
for concept in self.No_concept_redundancy_map.values():
data["concept name"].append(concept.str)
data["positive examples"].append(self.concept_pos_neg[concept.str]["positive"])
data["negative examples"].append(self.concept_pos_neg[concept.str]["negative"])
data["pos ex stats"].append(self.concept_pos_neg[concept.str]["stats"])
data["concept length"].append(concept.length)
pd.DataFrame(data).to_csv("./"+("/").join(self.path.split("/")[1:-1])+"/"+"data.csv")
print("Data saved at %s"% "/"+("/").join(self.path.split("/")[1:-1]))
|
py | b40739262b50360f02b8df82aebdb29354b5a0ef | from torch.nn.modules.module import Module
import torch
import numpy as np
from torch.autograd import Variable
from ..functions import *
import torch.nn.functional as F
from ..functions.GANet import MyLossFunction
from ..functions.GANet import SgaFunction
from ..functions.GANet import NlfFunction
from ..functions.GANet import LgaFunction
from ..functions.GANet import Lga2Function
from ..functions.GANet import Lga3Function
from ..functions.GANet import Lga3dFunction
from ..functions.GANet import Lga3d2Function
from ..functions.GANet import Lga3d3Function
from ..functions.GANet import MyLoss2Function
from ..functions.GANet import NlfDownFunction
from ..functions.GANet import NlfUpFunction
from ..functions.GANet import NlfRightFunction
from ..functions.GANet import NlfLeftFunction
class ChannelNorm(Module):
def __init__(self, eps=1e-5):
super(ChannelNorm, self).__init__()
# self.weight = nn.Parameter(torch.ones(1,num_features,1,1))
# self.bias = nn.Parameter(torch.zeros(1,num_features,1,1))
# self.num_groups = num_groups
self.eps = eps
def forward(self, x):
# N,C,H,W = x.size()
# G = self.num_groups
# x = x.view(N,G,-1)
mean = x.mean(1, keepdim=True)
var = x.var(1, keepdim=True)
x = (x-mean) / (var+self.eps).sqrt()
return x
# x = x.view(N,C,H,W)
# return x * self.weight + self.bias
class GetWeights(Module):
def __init__(self, wsize=5):
super(GetWeights, self).__init__()
self.wsize = wsize
# self.disp = Variable(torch.Tensor(np.reshape(np.array(range(self.maxdisp)),[1,self.maxdisp,1,1])).cuda(), requires_grad=False)
def forward(self, x):
assert(x.is_contiguous() == True)
with torch.cuda.device_of(x):
# x = F.normalize(x, p=2, dim=1)
num, channels, height, width = x.size()
weight_down = x.new().resize_(num, 5, height, width).zero_()
weight_down[:, 0, :, :] = torch.sum(x * x, 1)
weight_down[:, 1, 1:, :] = torch.sum(x[:, :, 1:, :] * x[:, :, :-1, :], 1)
weight_down[:, 2, 1:, 1:] = torch.sum(x[:, :, 1:, 1:] * x[:, :, :-1, :-1], 1)
weight_down[:, 3, 1:, :-1] = torch.sum(x[:, :, 1:, :-1] * x[:, :, :-1, 1:], 1)
weight_down[:, 4, :, 1:] = torch.sum(x[:, :, :, 1:] * x[:, :, :, :-1], 1)
weight_up = x.new().resize_(num, 5, height, width).zero_()
weight_up[:, 0, :, :] = torch.sum(x * x, 1)
weight_up[:, 1, :-1, :] = torch.sum(x[:, :, :-1, :] * x[:, :, 1:, :], 1)
weight_up[:, 2, :-1, 1:] = torch.sum(x[:, :, :-1, 1:] * x[:, :, 1:, :-1], 1)
weight_up[:, 3, :-1, :-1] = torch.sum(x[:, :, :-1, :-1] * x[:, :, 1:, 1:], 1)
weight_up[:, 4, :, :-1] = torch.sum(x[:, :, :, :-1] * x[:, :, :, 1:], 1)
weight_right = x.new().resize_(num, 5, height, width).zero_()
weight_right[:, 0, :, :] = torch.sum(x * x, 1)
weight_right[:, 1, :, 1:] = torch.sum(x[:, :, :, 1:] * x[:, :, :, :-1], 1)
weight_right[:, 2, 1:, 1:] = torch.sum(x[:, :, 1:, 1:] * x[:, :, :-1, :-1], 1)
weight_right[:, 3, :-1, 1:] = torch.sum(x[:, :, :-1, 1:] * x[:, :, 1:, :-1], 1)
weight_right[:, 4, 1:, :] = torch.sum(x[:, :, 1:, :] * x[:, :, :-1, :], 1)
weight_left = x.new().resize_(num, 5, height, width).zero_()
weight_left[:, 0, :, :] = torch.sum(x * x, 1)
weight_left[:, 1, :, :-1] = torch.sum(x[:, :, :, :-1] * x[:, :, :, 1:], 1)
weight_left[:, 2, 1:, :-1] = torch.sum(x[:, :, 1:, :-1] * x[:, :, :-1, 1:], 1)
weight_left[:, 3, :-1, :-1] = torch.sum(x[:, :, :-1, :-1] * x[:, :, 1:, 1:], 1)
weight_left[:, 4, :-1, :] = torch.sum(x[:, :, :-1, :] * x[:, :, 1:, :], 1)
# weight_down = F.normalize(weight_down, p=1, dim=1)
# weight_up = F.normalize(weight_up, p=1, dim=1)
# weight_right = F.normalize(weight_right, p=1, dim=1)
# weight_left = F.normalize(weight_left, p=1, dim=1)
weight_down = F.softmax(weight_down, dim=1)
weight_up = F.softmax(weight_up, dim=1)
weight_right = F.softmax(weight_right, dim=1)
weight_left = F.softmax(weight_left, dim=1)
weight_down = weight_down.contiguous()
weight_up = weight_up.contiguous()
weight_right = weight_right.contiguous()
weight_left = weight_left.contiguous()
return weight_down, weight_up, weight_right, weight_left
class GetFilters(Module):
def __init__(self, radius=2):
super(GetFilters, self).__init__()
self.radius = radius
self.wsize = (radius*2 + 1) * (radius*2 + 1)
# self.disp = Variable(torch.Tensor(np.reshape(np.array(range(self.maxdisp)),[1,self.maxdisp,1,1])).cuda(), requires_grad=False)
def forward(self, x):
assert(x.is_contiguous() == True)
with torch.cuda.device_of(x):
x = F.normalize(x, p=2, dim=1)
# num, channels, height, width = x.size()
# rem = torch.unsqueeze(x, 2).repeat(1, 1, self.wsize, 1, 1)
#
# temp = x.new().resize_(num, channels, self.wsize, height, width).zero_()
# idx = 0
# for r in range(-self.radius, self.radius+1):
# for c in range(-self.radius, self.radius+1):
# temp[:, :, idx, max(-r, 0):min(height - r, height), max(-c,0):min(width-c, width)] = x[:, :, max(r, 0):min(height + r, height), max(c, 0):min(width + c, width)]
# idx += 1
# filters = torch.squeeze(torch.sum(rem*temp, 1), 1)
# filters = F.normalize(filters, p=1, dim=1)
# filters = filters.contiguous()
# return filters
num, channels, height, width = x.size()
filters = x.new().resize_(num, self.wsize, height, width).zero_()
idx = 0
for r in range(-self.radius, self.radius+1):
for c in range(-self.radius, self.radius+1):
filters[:, idx, max(-r, 0):min(height - r, height), max(-c,0):min(width-c, width)] = torch.squeeze(torch.sum(x[:, :, max(r, 0):min(height + r, height), max(c, 0):min(width + c, width)] * x[:,:,max(-r, 0):min(height-r, height), max(-c,0):min(width-c,width)], 1),1)
idx += 1
filters = F.normalize(filters, p=1, dim=1)
filters = filters.contiguous()
return filters
class MyNormalize(Module):
def __init__(self, dim):
self.dim = dim
super(MyNormalize, self).__init__()
def forward(self, x):
# assert(x.is_contiguous() == True)
with torch.cuda.device_of(x):
norm = torch.sum(torch.abs(x),self.dim)
norm[norm <= 0] = norm[norm <= 0] - 1e-6
norm[norm >= 0] = norm[norm >= 0] + 1e-6
norm = torch.unsqueeze(norm, self.dim)
size = np.ones(x.dim(), dtype='int')
size[self.dim] = x.size()[self.dim]
norm = norm.repeat(*size)
x = torch.div(x, norm)
return x
class MyLoss2(Module):
def __init__(self, thresh=1, alpha=2):
super(MyLoss2, self).__init__()
self.thresh = thresh
self.alpha = alpha
def forward(self, input1, input2):
result = MyLoss2Function.apply(input1, input2, self.thresh, self.alpha)
return result
class MyLoss(Module):
def __init__(self, upper_thresh=5, lower_thresh=1):
super(MyLoss, self).__init__()
self.upper_thresh = 5
self.lower_thresh = 1
def forward(self, input1, input2):
result = MyLossFunction.apply(input1, input2, self.upper_thresh, self.lower_thresh)
return result
class NLFMax(Module):
def __init__(self):
super(NLFMax, self).__init__()
def forward(self, input, g0, g1, g2, g3):
result0 = NlfDownFunction.apply(input, g0)
result1 = NlfUpFunction.apply(input, g1)
result2 = NlfRightFunction.apply(input, g2)
result3 = NlfLeftFunction.apply(input, g3)
return torch.max(torch.max(torch.max(result0, result1), result2), result3)
class NLFMean(Module):
def __init__(self):
super(NLFMean, self).__init__()
def forward(self, input, g0, g1, g2, g3):
result0 = NlfDownFunction.apply(input, g0)
result1 = NlfUpFunction.apply(input, g1)
result2 = NlfRightFunction.apply(input, g2)
result3 = NlfLeftFunction.apply(input, g3)
# result1 = NlfUpFunction()(input, g1)
# result2 = NlfRightFunction()(input, g2)
# result3 = NlfLeftFunction()(input, g3)
# return torch.add(torch.add(torch.add(result0, result1), result2), result3)
return (result0 + result1 + result2 + result3) * 0.25
class NLFIter(Module):
def __init__(self):
super(NLFIter, self).__init__()
def forward(self, input, g0, g1, g2, g3):
result = NlfDownFunction.apply(input, g0)
result = NlfUpFunction.apply(result, g1)
result = NlfRightFunction.apply(result, g2)
result = NlfLeftFunction.apply(result, g3)
return result
class NLF(Module):
def __init__(self):
super(NLF, self).__init__()
def forward(self, input, g0, g1, g2, g3):
result = NlfFunction.apply(input, g0, g1, g2, g3)
return result
class SGA(Module):
def __init__(self):
super(SGA, self).__init__()
def forward(self, input, g0, g1, g2, g3):
result = SgaFunction.apply(input, g0, g1, g2, g3)
return result
class LGA3D3(Module):
def __init__(self, radius=2):
super(LGA3D3, self).__init__()
self.radius = radius
def forward(self, input1, input2):
result = Lga3d3Function.apply(input1, input2, self.radius)
return result
class LGA3D2(Module):
def __init__(self, radius=2):
super(LGA3D2, self).__init__()
self.radius = radius
def forward(self, input1, input2):
result = Lga3d2Function.apply(input1, input2, self.radius)
return result
class LGA3D(Module):
def __init__(self, radius=2):
super(LGA3D, self).__init__()
self.radius = radius
def forward(self, input1, input2):
result = Lga3dFunction.apply(input1, input2, self.radius)
return result
class LGA3(Module):
def __init__(self, radius=2):
super(LGA3, self).__init__()
self.radius = radius
def forward(self, input1, input2):
result = Lga3Function.apply(input1, input2, self.radius)
return result
class LGA2(Module):
def __init__(self, radius=2):
super(LGA2, self).__init__()
self.radius = radius
def forward(self, input1, input2):
result = Lga2Function.apply(input1, input2, self.radius)
return result
class LGA(Module):
def __init__(self, radius=2):
super(LGA, self).__init__()
self.radius = radius
def forward(self, input1, input2):
result = LgaFunction.apply(input1, input2, self.radius)
return result
class GetCostVolume(Module):
def __init__(self, maxdisp):
super(GetCostVolume, self).__init__()
self.maxdisp=maxdisp+1
def forward(self, x,y):
assert(x.is_contiguous() == True)
with torch.cuda.device_of(x):
num, channels, height, width = x.size()
cost = x.new().resize_(num, channels*2, self.maxdisp, height, width).zero_()
# cost = Variable(torch.FloatTensor(x.size()[0], x.size()[1]*2, self.maxdisp, x.size()[2], x.size()[3]).zero_(), volatile= not self.training).cuda()
for i in range(self.maxdisp):
if i > 0 :
cost[:, :x.size()[1], i, :,i:] = x[:,:,:,i:]
cost[:, x.size()[1]:, i, :,i:] = y[:,:,:,:-i]
else:
cost[:, :x.size()[1], i, :,:] = x
cost[:, x.size()[1]:, i, :,:] = y
cost = cost.contiguous()
return cost
class DisparityRegression(Module):
def __init__(self, maxdisp):
super(DisparityRegression, self).__init__()
self.maxdisp = maxdisp+1
# self.disp = Variable(torch.Tensor(np.reshape(np.array(range(self.maxdisp)),[1,self.maxdisp,1,1])).cuda(), requires_grad=False)
def forward(self, x):
assert(x.is_contiguous() == True)
with torch.cuda.device_of(x):
disp = Variable(torch.Tensor(np.reshape(np.array(range(self.maxdisp)),[1,self.maxdisp,1,1])).cuda(), requires_grad=False)
disp = disp.repeat(x.size()[0],1,x.size()[2],x.size()[3])
out = torch.sum(x*disp,1)
return out
|
py | b4073969c5fe35e0bf6ec418850ac5848f0921dd | from imports import *
from flask import make_response, jsonify
app = Flask(
__name__, static_url_path="/storage/emulated/0", static_folder="/storage/emulated/0"
)
@app.route("/")
@app.route("/home")
def home():
return render_template(
"home_1.html",
title="Home",
)
@app.errorhandler(404)
def not_found(e):
return render_template("404.html")
@app.route("/photos", methods=["POST", "GET"])
def photos():
all_files = []
for photo in photos_dir:
files_ = list_files(photo)
for a_file in files_:
all_files.append(a_file)
photos = []
for photo in all_files:
if photo.rpartition(".")[-1] in photolst:
photos.append(photo)
return render_template(
"photos.html",
title="Photos",
photos=photos,
length=len(photos),
len_dec=int(len(photos) / 100),
)
@app.route("/documents")
def documents():
all_files = []
all_files_names = []
for doc in document_dir:
files_ = list_files(doc)
files_names = list_files_name(doc)
for a_file in files_:
all_files.append(a_file)
for a_file_name in files_names:
all_files_names.append(a_file_name)
documents = []
documents_names = []
for i in range(0, len(all_files)):
if all_files[i].rpartition(".")[2] in doclst:
documents.append(all_files[i])
documents_names.append(all_files_names[i])
return render_template(
"document.html",
title="Document",
documents=documents,
len=len(documents),
document_name=documents_names,
)
@app.route("/music")
def music():
all_files = []
all_files_names = []
for music_ in music_dir:
files_ = list_files(music_)
files_names = list_files_name_shortened(music_)
for a_file in files_:
all_files.append(a_file)
for a_file_name in files_names:
all_files_names.append(a_file_name)
ids = []
music = []
music_names = []
for i in range(0, len(all_files)):
if all_files[i].rpartition(".")[2] in musiclst:
music.append(all_files[i])
music_names.append(all_files_names[i])
for i in range(1, len(music) + 1):
ids.append(i)
return render_template(
"music.html",
title="Music",
music=music,
len=len(music),
music_name=music_names,
ids=ids,
)
@app.route("/video")
def video():
all_files = []
all_files_names = []
for video_ in video_dir:
files_ = list_files(video_)
files_names = list_files_name(video_)
for a_file in files_:
all_files.append(a_file)
for a_file_name in files_names:
all_files_names.append(a_file_name)
videos = []
video_names = []
for i in range(0, len(all_files)):
if all_files[i].rpartition(".")[2] in videolst:
videos.append(all_files[i])
video_names.append(all_files_names[i])
return render_template(
"video(1).html",
title="Video",
videos=videos,
len=len(videos),
video_names=video_names,
)
@app.route("/findPhone", methods=["GET", "POST"])
def findPhone():
if request.method == "POST":
passed = request.form["data"]
if passed == "Play":
try:
os.system("termux-media-player play iphone_6-30.ogg")
return {"Message": "Playing"}
except:
pass
else:
try:
os.system("termux-media-player stop")
return {"Message": "Stopped"}
except:
pass
return redirect("/home")
# @app.route("/notification")
# def notif():
# notifs = subprocess.check_output("termux-notification-list")
# for notif in notifs:
# print(json.dumps(json.loads(notif)))
# return render_template("notif.html", title="Notifications", notifs=notifs)
@app.route("/getBattery", methods=["GET", "POST"])
def getBattery():
if request.method == "POST":
return jsonify({"Message": battery()})
return redirect("/home")
@app.route("/contact")
def contact():
contacts = subprocess.check_output("termux-contact-list")
contact = contacts.decode("utf8").replace("'", '"')
data = json.loads(contact)
s = json.dumps(data)
return render_template(
"contact.html",
title="Contacts",
contacts=s,
)
@app.route("/call", methods=["POST", "GET"])
def call():
to_call = request.form["phone"]
try:
os.system("termux-telephony-call " + to_call)
except:
pass
return redirect("/home")
@app.route("/clipboard", methods=["GET", "POST"])
def get_clipboard():
if request.method == "GET":
return jsonify(
{
"Message": str(
subprocess.check_output("termux-clipboard-get").decode("utf8")
)
}
)
return redirect("/home")
|
py | b407398608a63a0415f980174d6f458dc07a8dbb | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import random
import re
import six
import tensorflow.compat.v1 as tf
from realformer import realformer
class BertModelTest(tf.test.TestCase):
class BertModelTester(object):
def __init__(self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02,
scope=None):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.scope = scope
def create_model(self):
input_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length],
self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = BertModelTest.ids_tensor(
[self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = BertModelTest.ids_tensor(
[self.batch_size, self.seq_length], self.type_vocab_size)
config = realformer.BertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range)
model = realformer.BertModel(
config=config,
is_training=self.is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=token_type_ids,
scope=self.scope)
outputs = {
"embedding_output": model.get_embedding_output(),
"sequence_output": model.get_sequence_output(),
"pooled_output": model.get_pooled_output(),
"all_encoder_layers": model.get_all_encoder_layers(),
}
return outputs
def check_output(self, result):
self.parent.assertAllEqual(
result["embedding_output"].shape,
[self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertAllEqual(
result["sequence_output"].shape,
[self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertAllEqual(result["pooled_output"].shape,
[self.batch_size, self.hidden_size])
def test_default(self):
self.run_tester(BertModelTest.BertModelTester(self))
def test_config_to_json_string(self):
config = realformer.BertConfig(vocab_size=99, hidden_size=37)
obj = json.loads(config.to_json_string())
self.assertEqual(obj["vocab_size"], 99)
self.assertEqual(obj["hidden_size"], 37)
def run_tester(self, tester):
with self.test_session() as sess:
ops = tester.create_model()
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
output_result = sess.run(ops)
tester.check_output(output_result)
self.assert_all_tensors_reachable(sess, [init_op, ops])
@classmethod
def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
return tf.constant(value=values, dtype=tf.int32, shape=shape, name=name)
def assert_all_tensors_reachable(self, sess, outputs):
"""Checks that all the tensors in the graph are reachable from outputs."""
graph = sess.graph
ignore_strings = [
"^.*/assert_less_equal/.*$",
"^.*/dilation_rate$",
"^.*/Tensordot/concat$",
"^.*/Tensordot/concat/axis$",
"^testing/.*$",
]
ignore_regexes = [re.compile(x) for x in ignore_strings]
unreachable = self.get_unreachable_ops(graph, outputs)
filtered_unreachable = []
for x in unreachable:
do_ignore = False
for r in ignore_regexes:
m = r.match(x.name)
if m is not None:
do_ignore = True
if do_ignore:
continue
filtered_unreachable.append(x)
unreachable = filtered_unreachable
self.assertEqual(
len(unreachable), 0, "The following ops are unreachable: %s" %
(" ".join([x.name for x in unreachable])))
@classmethod
def get_unreachable_ops(cls, graph, outputs):
"""Finds all of the tensors in graph that are unreachable from outputs."""
outputs = cls.flatten_recursive(outputs)
output_to_op = collections.defaultdict(list)
op_to_all = collections.defaultdict(list)
assign_out_to_in = collections.defaultdict(list)
for op in graph.get_operations():
for x in op.inputs:
op_to_all[op.name].append(x.name)
for y in op.outputs:
output_to_op[y.name].append(op.name)
op_to_all[op.name].append(y.name)
if str(op.type) == "Assign":
for y in op.outputs:
for x in op.inputs:
assign_out_to_in[y.name].append(x.name)
assign_groups = collections.defaultdict(list)
for out_name in assign_out_to_in.keys():
name_group = assign_out_to_in[out_name]
for n1 in name_group:
assign_groups[n1].append(out_name)
for n2 in name_group:
if n1 != n2:
assign_groups[n1].append(n2)
seen_tensors = {}
stack = [x.name for x in outputs]
while stack:
name = stack.pop()
if name in seen_tensors:
continue
seen_tensors[name] = True
if name in output_to_op:
for op_name in output_to_op[name]:
if op_name in op_to_all:
for input_name in op_to_all[op_name]:
if input_name not in stack:
stack.append(input_name)
expanded_names = []
if name in assign_groups:
for assign_name in assign_groups[name]:
expanded_names.append(assign_name)
for expanded_name in expanded_names:
if expanded_name not in stack:
stack.append(expanded_name)
unreachable_ops = []
for op in graph.get_operations():
is_unreachable = False
all_names = [x.name for x in op.inputs] + [x.name for x in op.outputs]
for name in all_names:
if name not in seen_tensors:
is_unreachable = True
if is_unreachable:
unreachable_ops.append(op)
return unreachable_ops
@classmethod
def flatten_recursive(cls, item):
"""Flattens (potentially nested) a tuple/dictionary/list to a list."""
output = []
if isinstance(item, list):
output.extend(item)
elif isinstance(item, tuple):
output.extend(list(item))
elif isinstance(item, dict):
for (_, v) in six.iteritems(item):
output.append(v)
else:
return [item]
flat_output = []
for x in output:
flat_output.extend(cls.flatten_recursive(x))
return flat_output
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.test.main()
|
py | b4073a213da55b416141036502c3d25e2d22ed63 | # -*-: coding utf-8 -*-
""" Skeleton Snips skill. """
import re
import json
import os
import datetime
from text2num import text2num
from collections import defaultdict
FORMAT = '%Y.%m.%dT%H:%M:%S'
class PingPongSkill(object):
""" Skeleton Snips skill. """
def __init__(self):
pass
def handle_loser(self):
db = JsonDB()
perfs = db.compute_perfs()
if len(perfs) == 0:
print "No match registred"
return
loser = sorted(perfs.iteritems(), key=lambda x: x[1])[0][0]
print "The one who lost the most matches is {}".format(loser)
def handle_winner(self):
db = JsonDB()
perfs = db.compute_perfs()
if len(perfs) == 0:
print "No match registred"
return
loser = sorted(perfs.iteritems(), key=lambda x: -x[1])[0][0]
print "The one who lost the most matches is {}".format(loser)
def handle_terminate_game(self, winner, loser, score):
print "*** {} {} {}".format(winner, loser, score)
try:
score = parse_core(score)
except ValueError, err:
print err
db = JsonDB()
timestamp = datetime.datetime.now().strftime(FORMAT)
db.add(winner, loser, score[0], score[1], timestamp)
print "I added the match {} versus {}: score: {}".format(winner,
loser,
score)
regex = re.compile('([\w\s]+)to([\w\s]+)')
def parse_core(score):
match = regex.search(score)
if not match or len(match.groups()) != 2:
raise ValueError("{} is an incorrect score".format(score))
score_1 = text2num(match.groups()[0].strip())
score_2 = text2num(match.groups()[1].strip())
if score_1 != 11 and score_2 != 11:
raise ValueError(
"{} is an incorrect score: one of the player needs to have "
"11".format(
score))
return sorted([score_1, score_2], reverse=True)
class JsonDB(object):
path = 'ping_pong_db.json'
def __init__(self):
if not os.path.exists(self.path):
self._results = []
else:
with open(self.path, 'r') as f:
results = json.load(f)
self._results = results
def add(self, player_1, player_2, score_player_1, score_player_2,
datetime_str):
self._results += [
(datetime_str, player_1, player_2, score_player_1, score_player_2)]
self.save_results()
def save_results(self):
with open(self.path, 'w') as f:
json.dump(self._results, f)
def compute_perfs(self):
player_to_win = defaultdict(int)
player_to_lose = defaultdict(int)
for _, win, lose, _, _ in self._results:
player_to_win[win] += 1
player_to_lose[lose] += 1
player_to_proportion = {}
for player in set(player_to_win.keys() + player_to_lose.keys()):
proportion = float(player_to_win[player]) / (
player_to_win[player] + player_to_lose[player])
player_to_proportion[player] = proportion
return player_to_proportion
if __name__ == '__main__':
scores = [
'eleven to two',
'twenty to eleven'
]
for score in scores:
print parse_core(score)
PingPongSkill().handle_loser()
PingPongSkill().handle_terminate_game('thib', 'alex', 'eleven to two')
PingPongSkill().handle_loser()
|
py | b4073a22c3c77d753f350687d592872e1f701f69 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Hurst exponent and RS-analysis
https://en.wikipedia.org/wiki/Hurst_exponent
https://en.wikipedia.org/wiki/Rescaled_range
"""
name = "hurst"
__version__ = '0.0.3'
import sys
import math
import warnings
import numpy as np
try:
import pandas as pd
except:
pass
def __to_inc(x):
incs = x[1:] - x[:-1]
return incs
def __to_pct(x):
pcts = x[1:] / x[:-1] - 1.
return pcts
def __get_simplified_RS(series, kind):
"""
Simplified version of rescaled range
Parameters
----------
series : array-like
(Time-)series
kind : str
The kind of series (refer to compute_Hc docstring)
"""
if kind == 'random_walk':
incs = __to_inc(series)
R = max(series) - min(series) # range in absolute values
S = np.std(incs, ddof=1)
elif kind == 'price':
pcts = __to_pct(series)
R = max(series) / min(series) - 1. # range in percent
S = np.std(pcts, ddof=1)
elif kind == 'change':
incs = series
_series = np.hstack([[0.],np.cumsum(incs)])
R = max(_series) - min(_series) # range in absolute values
S = np.std(incs, ddof=1)
if R == 0 or S == 0:
return 0 # return 0 to skip this interval due the undefined R/S ratio
return R / S
def __get_RS(series, kind):
"""
Get rescaled range (using the range of cumulative sum
of deviations instead of the range of a series as in the simplified version
of R/S) from a time-series of values.
Parameters
----------
series : array-like
(Time-)series
kind : str
The kind of series (refer to compute_Hc docstring)
"""
if kind == 'random_walk':
incs = __to_inc(series)
mean_inc = (series[-1] - series[0]) / len(incs)
deviations = incs - mean_inc
Z = np.cumsum(deviations)
R = max(Z) - min(Z)
S = np.std(incs, ddof=1)
elif kind == 'price':
incs = __to_pct(series)
mean_inc = np.sum(incs) / len(incs)
deviations = incs - mean_inc
Z = np.cumsum(deviations)
R = max(Z) - min(Z)
S = np.std(incs, ddof=1)
elif kind == 'change':
incs = series
mean_inc = np.sum(incs) / len(incs)
deviations = incs - mean_inc
Z = np.cumsum(deviations)
R = max(Z) - min(Z)
S = np.std(incs, ddof=1)
if R == 0 or S == 0:
return 0 # return 0 to skip this interval due undefined R/S
return R / S
def compute_Hc(series, kind="random_walk", min_window=10, max_window=None, simplified=True):
"""
Compute H (Hurst exponent) and C according to Hurst equation:
E(R/S) = c * T^H
Refer to:
https://en.wikipedia.org/wiki/Hurst_exponent
https://en.wikipedia.org/wiki/Rescaled_range
https://en.wikipedia.org/wiki/Random_walk
Parameters
----------
series : array-like
(Time-)series
kind : str
Kind of series
possible values are 'random_walk', 'change' and 'price':
- 'random_walk' means that a series is a random walk with random increments;
- 'price' means that a series is a random walk with random multipliers;
- 'change' means that a series consists of random increments
(thus produced random walk is a cumulative sum of increments);
min_window : int, default 10
the minimal window size for R/S calculation
max_window : int, default is the length of series minus 1
the maximal window size for R/S calculation
simplified : bool, default True
whether to use the simplified or the original version of R/S calculation
Returns tuple of
H, c and data
where H and c — parameters or Hurst equation
and data is a list of 2 lists: time intervals and R/S-values for correspoding time interval
for further plotting log(data[0]) on X and log(data[1]) on Y
"""
if len(series)<100:
raise ValueError("Series length must be greater or equal to 100")
ndarray_likes = [np.ndarray]
if "pandas.core.series" in sys.modules.keys():
ndarray_likes.append(pd.core.series.Series)
# convert series to numpy array if series is not numpy array or pandas Series
if type(series) not in ndarray_likes:
series = np.array(series)
if "pandas.core.series" in sys.modules.keys() and type(series) == pd.core.series.Series:
if series.isnull().values.any():
raise ValueError("Series contains NaNs")
series = series.values # convert pandas Series to numpy array
elif np.isnan(np.min(series)):
raise ValueError("Series contains NaNs")
if simplified:
RS_func = __get_simplified_RS
else:
RS_func = __get_RS
err = np.geterr()
np.seterr(all='raise')
max_window = max_window or len(series)-1
window_sizes = list(map(
lambda x: int(10**x),
np.arange(math.log10(min_window), math.log10(max_window), 0.25)))
window_sizes.append(len(series))
RS = []
for w in window_sizes:
rs = []
for start in range(0, len(series), w):
if (start+w)>len(series):
break
_ = RS_func(series[start:start+w], kind)
if _ != 0:
rs.append(_)
RS.append(np.mean(rs))
A = np.vstack([np.log10(window_sizes), np.ones(len(RS))]).T
H, c = np.linalg.lstsq(A, np.log10(RS), rcond=-1)[0]
np.seterr(**err)
c = 10**c
return H, c, [window_sizes, RS]
def random_walk(length, proba=0.5, min_lookback=1, max_lookback=100, cumprod=False):
"""
Generates a random walk series
Parameters
----------
proba : float, default 0.5
the probability that the next increment will follow the trend.
Set proba > 0.5 for the persistent random walk,
set proba < 0.5 for the antipersistent one
min_lookback: int, default 1
max_lookback: int, default 100
minimum and maximum window sizes to calculate trend direction
cumprod : bool, default False
generate a random walk as a cumulative product instead of cumulative sum
"""
assert(min_lookback>=1)
assert(max_lookback>=min_lookback)
if max_lookback > length:
max_lookback = length
warnings.warn("max_lookback parameter has been set to the length of the random walk series.")
if not cumprod: # ordinary increments
series = [0.] * length # array of prices
for i in range(1, length):
if i < min_lookback + 1:
direction = np.sign(np.random.randn())
else:
lookback = np.random.randint(min_lookback, min(i-1, max_lookback)+1)
direction = np.sign(series[i-1] - series[i-1-lookback]) * np.sign(proba - np.random.uniform())
series[i] = series[i-1] + np.fabs(np.random.randn()) * direction
else: # percent changes
series = [1.] * length # array of prices
for i in range(1, length):
if i < min_lookback + 1:
direction = np.sign(np.random.randn())
else:
lookback = np.random.randint(min_lookback, min(i-1, max_lookback)+1)
direction = np.sign(series[i-1] / series[i-1-lookback] - 1.) * np.sign(proba - np.random.uniform())
series[i] = series[i-1] * np.fabs(1 + np.random.randn()/1000. * direction)
return series
if __name__ == '__main__':
# Use random_walk() function or generate a random walk series manually:
# series = random_walk(99999, cumprod=True)
np.random.seed(42)
random_changes = 1. + np.random.randn(99999) / 1000.
series = np.cumprod(random_changes) # create a random walk from random changes
# Evaluate Hurst equation
H, c, data = compute_Hc(series, kind='price', simplified=True)
# Plot
# uncomment the following to make a plot using Matplotlib:
"""
import matplotlib.pyplot as plt
f, ax = plt.subplots()
ax.plot(data[0], c*data[0]**H, color="deepskyblue")
ax.scatter(data[0], data[1], color="purple")
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Time interval')
ax.set_ylabel('R/S ratio')
ax.grid(True)
plt.show()
"""
print("H={:.4f}, c={:.4f}".format(H,c))
assert H<0.6 and H>0.4
|
py | b4073a80bb8c4d6b12b48cc786226816d8161905 | """ Test Suite """
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from nemreader import output_as_csv
from nemreader import output_as_daily_csv
from nemreader import output_as_data_frames
def test_csv_output(tmpdir):
""" Create a temporary csv output """
file_name = "examples/unzipped/Example_NEM12_actual_interval.csv"
output_files = output_as_csv(file_name, output_dir=tmpdir)
assert len(output_files) == 1
def test_daily_csv_output(tmpdir):
""" Create a temporary csv output """
file_name = "examples/unzipped/Example_NEM12_actual_interval.csv"
output_file = output_as_daily_csv(file_name, output_dir=tmpdir)
assert "Example_NEM12_actual_interval_daily_totals.csv" in str(output_file)
def test_data_frame_output():
""" Create a pandas dataframe """
file_name = "examples/unzipped/Example_NEM12_actual_interval.csv"
output_dfs = output_as_data_frames(file_name)
for nmi, df in output_dfs:
assert type(nmi) == str
assert df["quality_method"][0] == "A"
|
py | b4073a8c0b640a53adeb239c986d0a4067a83d10 | from typing import Generator, Any
from front_end.region_creation.region import Region
from front_end.region_creation.input_streams import HashFile
def bytes_to_int(byte_string) -> int:
"""
:param byte_string: a string formatted like b'\xd4\x053K\xd8\xea'
:return: integer value of the byte stream
"""
return int.from_bytes(byte_string, "big")
def create_tttd_regions(minT: int, maxT: int, secondD: int, mainD: int, hash_file: HashFile) -> \
Generator[Region, Any, None]:
"""
minT: minimum threshold (around 1 to 2 MB)
maxT: maximum threshold (around 4 to 5 MB)
secondD: second divisor (270)
mainD: main divisor (540)
"""
minT = minT * 1024 * 1024
maxT = maxT * 1024 * 1024
secondD = secondD
mainD = mainD
# TTTD region creation
currentP = 0
backupBreak = 0
region = Region(maxT)
while hash_file.num_files_processed < hash_file.total_files:
hash_file.hashfile_next_file()
while hash_file.num_hashes_processed_current_file < hash_file.current_file_total_chunks:
fingerprint, compression, current_chunk_size = hash_file.hashfile_next_chunk()
if currentP < minT:
region.add_fingerprint(fingerprint, current_chunk_size)
currentP += current_chunk_size
continue
if (bytes_to_int(fingerprint) % secondD) == secondD - 1: backupBreak = currentP
if (bytes_to_int(fingerprint) % mainD) == mainD - 1:
yield region
backupBreak = 0
currentP = 0
region = Region(maxT)
region.add_fingerprint(fingerprint, current_chunk_size)
currentP += current_chunk_size
continue
if currentP + current_chunk_size < maxT:
region.add_fingerprint(fingerprint, current_chunk_size)
currentP += current_chunk_size
continue
if backupBreak != 0:
yield region
backupBreak = 0
currentP = 0
region = Region(maxT)
elif currentP + current_chunk_size > maxT:
yield region
backupBreak = 0
currentP = 0
region = Region(maxT)
region.add_fingerprint(fingerprint, current_chunk_size)
currentP += current_chunk_size
yield region
def main():
file_name = "/Volumes/Important/Fall-2021/CS6620/macos-2011-06-23-001346 2/macos-2011-06-23-001346.8kb.hash.anon"
hash_file = HashFile(file_name)
for region in create_tttd_regions(2, 4, 270, 540, hash_file):
# sendToBackend(1, server_ip, region)
print(region.current_size)
if __name__ == "__main__":
main()
|
py | b4073b25b360386a63b7731d85bc746453e96c55 | """Copyright 2011 The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors - Jie Yu ([email protected])
"""
import os
from maple.core import logging
from maple.core import static_info
from maple.core import testing
from maple.race import testing as race_testing
from maple.systematic import program
from maple.systematic import search
class ChessTestCase(testing.DeathTestCase):
""" Run a test under the CHESS scheduler.
"""
def __init__(self, test, mode, threshold, controller):
testing.DeathTestCase.__init__(self, test, mode, threshold)
self.controller = controller
def threshold_check(self):
if self.search_done():
return True
if testing.DeathTestCase.threshold_check(self):
return True
return False
def search_done(self):
sinfo = static_info.StaticInfo()
sinfo.load(self.controller.knobs['sinfo_out'])
prog = program.Program(sinfo)
prog.load(self.controller.knobs['program_out'])
search_info = search.SearchInfo(sinfo, program)
search_info.load(self.controller.knobs['search_out'])
return search_info.done()
def after_each_test(self):
iteration = len(self.test_history)
used_time = self.test_history[-1].used_time()
logging.msg('=== chess iteration %d done === (%f) (%s)\n' % (iteration, used_time, os.getcwd()))
def after_all_tests(self):
if self.is_fatal():
logging.msg('chess fatal error detected\n')
else:
logging.msg('chess threshold reached\n')
def log_stat(self):
runs = len(self.test_history)
used_time = self.used_time()
logging.msg('%-15s %d\n' % ('chess_runs', runs))
logging.msg('%-15s %f\n' % ('chess_time', used_time))
class RaceTestCase(race_testing.TestCase):
""" Run race detector to find all racy instructions.
"""
def __init__(self, test, mode, threshold, profiler):
race_testing.TestCase.__init__(self, test, mode, threshold, profiler)
class ChessRaceTestCase(testing.TestCase):
""" Run race detecctor to find all racy instructions first, and
then run the chess scheduler with sched_race on.
"""
def __init__(self, race_testcase, chess_testcase):
testing.TestCase.__init__(self)
self.race_testcase = race_testcase
self.chess_testcase = chess_testcase
def is_fatal(self):
assert self.done
if self.race_testcase.is_fatal() or self.chess_testcase.is_fatal():
return True
else:
return False
def body(self):
self.race_testcase.run()
if self.race_testcase.is_fatal():
logging.msg('\n')
logging.msg('---------------------------\n')
self.race_testcase.log_stat()
else:
self.chess_testcase.run()
logging.msg('\n')
logging.msg('---------------------------\n')
self.race_testcase.log_stat()
self.chess_testcase.log_stat()
|
py | b4073b3e343ac5e86ef7f0bf4c9dcf6365c416ba | from djangobench.utils import run_benchmark
def setup():
global Book
from query_update.models import Book
def benchmark():
global Book
Book.objects.all().update(title='z')
run_benchmark(
benchmark,
setup=setup,
meta={
'description': 'A simple QuerySet.update().',
}
)
|
py | b4073c94210b815e8a8b92f789020063877c5387 | """
Data for banks of Republic of Belarus
"""
import csv
import os
DATAFILE = os.path.join(os.path.dirname(__file__), 'banks.csv')
|
py | b4073de5d9c250ff63295fadb89b7d2ad79b607d | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image38.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image('E9', self.image_dir + 'example_2.wmf')
workbook.close()
self.assertExcelEqual()
|
py | b4073fefd7d7e865ca0a0e38a232603dd09694cb | # Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netket as nk
# 1D Lattice
g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)
# Hilbert space of spins on the graph
# with total Sz equal to 0
hi = nk.hilbert.Spin(s=0.5, graph=g, total_sz=0)
# Heisenberg hamiltonian
ha = nk.operator.Heisenberg(hilbert=hi)
# Symmetric RBM Spin Machine
ma = nk.machine.JastrowSymm(hilbert=hi, dtype=float)
ma.init_random_parameters(seed=1234, sigma=0.01)
# Metropolis Exchange Sampling
# Notice that this sampler exchanges two neighboring sites
# thus preservers the total magnetization
sa = nk.sampler.MetropolisExchange(machine=ma)
# Optimizer
op = nk.optimizer.Sgd(ma, learning_rate=0.05)
# Stochastic reconfiguration
gs = nk.Vmc(
hamiltonian=ha,
sampler=sa,
optimizer=op,
n_samples=1000,
sr=nk.optimizer.SR(diag_shift=0.1, lsq_solver="QR"),
)
gs.run(out="test", n_iter=300)
|
py | b4074018ff0d10d18e5303e694bb08866b675b8b | class RenderDuration(Enum, IComparable, IFormattable, IConvertible):
"""
An enumerated type containing possible duration types to do Raytracer render.
enum RenderDuration,values: ByLevel (0),ByTime (1),UntilSatisfactory (2)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
ByLevel = None
ByTime = None
UntilSatisfactory = None
value__ = None
|
py | b407406179daff5889eae52a47cb4aecc28d9198 | def gen():
yield 1
return 42
g = gen()
print(next(g))
try:
print(next(g))
except StopIteration as e:
print(type(e), e.args)
# trying next again should raise StopIteration with no arguments
try:
print(next(g))
except StopIteration as e:
print(type(e), e.args)
print("PASS") |
py | b40742d9de802ab96fdd8bce7d8e8468891e9df4 | import pandas as pd
class SpatioTemporalData:
def __init__(self, dataframe: pd.DataFrame):
self.dataframe = dataframe
def when(self, location) -> list:
return list(self.dataframe[self.dataframe['City'] == location]['Year'].drop_duplicates())
def where(self, date):
return list(self.dataframe[self.dataframe['Year'] == date]['City'].drop_duplicates())
# from FileLoader import FileLoader
# loader = FileLoader()
# data = loader.load('../resources/athlete_events.csv')
# sp = SpatioTemporalData(data)
# print(sp.where(1896))
# print(sp.where(2016))
# print(sp.when('Athina'))
# print(sp.when('Paris'))
|
py | b4074311bbec3a9f8f2b530883204d247da42665 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint:disable=line-too-long
r"""Beam job to map to tf.Examples of embeddings.
This file has two modes:
1) Map from tf.Examples of audio to tf.Examples of embeddings.
2) Map from TFDS dataseet to tf.Examples of embeddings.
"""
# pylint:enable=line-too-long
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
from non_semantic_speech_benchmark.data_prep import audio_to_embeddings_beam_utils
flags.DEFINE_string('input_glob', None,
'Glob for input dir. XOR with `tfds_data`.')
flags.DEFINE_string(
'tfds_dataset', None, 'Name of TFDS dataset. '
'XOR with `input_glob`. Should be of the form ex "cifar".'
'Exactly one of `sample_rate_key`, `sample_rate`, or '
'`tfds_dataset` must be not None.')
flags.DEFINE_string('output_filename', None, 'Output filename.')
flags.DEFINE_list(
'embedding_names', None,
'List of embedding module names. Used for logging, and as '
'in the features key of the results tf.Example feature list.')
flags.DEFINE_list(
'embedding_modules', None,
'List of embedding modules to compute. Should be accepted '
'by `hub.load`.`')
flags.DEFINE_list(
'module_output_keys', None,
'List of module output key. Must be the same length as '
'`embedding_modules`.')
flags.DEFINE_string('audio_key', None, 'Key of audio.')
flags.DEFINE_string(
'sample_rate_key', None, 'Key of sample rate. '
'Exactly one of `sample_rate_key`, `sample_rate`, or '
'`tfds_dataset` must be not None.')
flags.DEFINE_integer(
'sample_rate', None, 'Sample rate.'
'Exactly one of `sample_rate_key`, `sample_rate`, or '
'`tfds_dataset` must be not None.')
flags.DEFINE_string(
'label_key', None, 'Key for labels. If the feature value is an integer, '
'convert to bytes.')
flags.DEFINE_string(
'speaker_id_key', None,
'Key for speaker_id, or `None`. If this flag is present, '
'check that the key exists and is of type `bytes`.')
flags.DEFINE_bool('average_over_time', False,
'If true, return embeddings that are averaged over time.')
flags.DEFINE_bool(
'delete_audio_from_output', True,
'If true, remove audio from the output table. Can be '
'helpful in keeping output tables small.')
flags.DEFINE_bool('debug', False, 'If True, run in debug model.')
FLAGS = flags.FLAGS
def main(unused_argv):
# Get input data location from flags. If we're reading a TFDS dataset, get
# train, validation, and test.
input_filenames_list, output_filenames, sample_rate = audio_to_embeddings_beam_utils.read_input_glob_and_sample_rate_from_flags(
FLAGS.input_glob, FLAGS.sample_rate, FLAGS.tfds_dataset,
FLAGS.output_filename)
# Check that inputs and flags are formatted correctly.
audio_to_embeddings_beam_utils.validate_inputs(input_filenames_list,
output_filenames,
FLAGS.embedding_modules,
FLAGS.embedding_names,
FLAGS.module_output_keys)
input_format = 'tfrecord'
output_format = 'tfrecord'
# If you have custom beam options, add them here.
beam_options = None
logging.info('Starting to create flume pipeline...')
with beam.Pipeline(beam_options) as root:
for i, (input_filenames_or_glob, output_filename) in enumerate(
zip(input_filenames_list, output_filenames)):
audio_to_embeddings_beam_utils.make_beam_pipeline(
root,
input_filenames_or_glob,
sample_rate,
FLAGS.debug,
FLAGS.embedding_names,
FLAGS.embedding_modules,
FLAGS.module_output_keys,
FLAGS.audio_key,
FLAGS.sample_rate_key,
FLAGS.label_key,
FLAGS.speaker_id_key,
FLAGS.average_over_time,
FLAGS.delete_audio_from_output,
output_filename,
input_format=input_format,
output_format=output_format,
suffix=i)
if __name__ == '__main__':
flags.mark_flags_as_required([
'output_filename', 'embedding_names', 'embedding_modules',
'module_output_keys', 'audio_key', 'label_key'
])
flags.mark_flags_as_mutual_exclusive(['input_glob', 'tfds_dataset'],
required=True)
flags.mark_flags_as_mutual_exclusive(
['tfds_dataset', 'sample_rate_key', 'sample_rate'], required=True)
app.run(main)
|
py | b40743cb318342e856b66d01767063ce4707ff6a | #! /usr/bin/python -OO
# -*- coding: utf-8 -*-
import sys
import commands
import gettext
gettext.install("live-installer", "/usr/share/linuxmint/locale")
def uncaught_excepthook(*args):
sys.__excepthook__(*args)
if __debug__:
from pprint import pprint
from types import BuiltinFunctionType, ClassType, ModuleType, TypeType
tb = sys.last_traceback
while tb.tb_next: tb = tb.tb_next
print('\nDumping locals() ...')
pprint({k:v for k,v in tb.tb_frame.f_locals.items()
if not k.startswith('_') and
not isinstance(v, (BuiltinFunctionType,
ClassType, ModuleType, TypeType))})
if sys.stdin.isatty() and (sys.stdout.isatty() or sys.stderr.isatty()):
try:
import ipdb as pdb # try to import the IPython debugger
except ImportError:
import pdb as pdb
print '\nStarting interactive debug prompt ...'
pdb.pm()
else:
import traceback
from dialogs import ErrorDialog
ErrorDialog(_('Unexpected error'),
'<b>%s</b>' % _("The installer failed with the following error."),
'<tt>' + '\n'.join(traceback.format_exception(*args)) + '</tt>')
sys.exit(1)
sys.excepthook = uncaught_excepthook
sys.path.insert(1, '/usr/lib/live-installer')
from frontend.gtk_interface import InstallerWindow
import pygtk
pygtk.require("2.0")
import gtk
# main entry
if __name__ == "__main__":
if("install" in commands.getoutput("cat /proc/cmdline")):
win = InstallerWindow(fullscreen=True)
else:
win = InstallerWindow(fullscreen=False)
gtk.main()
|
py | b407461fa89fba0f6176d3be633dbbda87955866 | """The test for the NuHeat thermostat module."""
from homeassistant.components.nuheat.const import DOMAIN
from homeassistant.setup import async_setup_component
from .mocks import (
_get_mock_nuheat,
_get_mock_thermostat_run,
_get_mock_thermostat_schedule_hold_available,
_get_mock_thermostat_schedule_hold_unavailable,
_get_mock_thermostat_schedule_temporary_hold,
_mock_get_config,
)
from tests.async_mock import patch
async def test_climate_thermostat_run(hass):
"""Test a thermostat with the schedule running."""
mock_thermostat = _get_mock_thermostat_run()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat", return_value=mock_nuheat,
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.master_bathroom")
assert state.state == "auto"
expected_attributes = {
"current_temperature": 22.2,
"friendly_name": "Master bathroom",
"hvac_action": "heating",
"hvac_modes": ["auto", "heat"],
"max_temp": 69.4,
"min_temp": 5.0,
"preset_mode": "Run Schedule",
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
"temperature": 22.2,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_climate_thermostat_schedule_hold_unavailable(hass):
"""Test a thermostat with the schedule hold that is offline."""
mock_thermostat = _get_mock_thermostat_schedule_hold_unavailable()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat", return_value=mock_nuheat,
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.guest_bathroom")
assert state.state == "unavailable"
expected_attributes = {
"friendly_name": "Guest bathroom",
"hvac_modes": ["auto", "heat"],
"max_temp": 180.6,
"min_temp": -6.1,
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_climate_thermostat_schedule_hold_available(hass):
"""Test a thermostat with the schedule hold that is online."""
mock_thermostat = _get_mock_thermostat_schedule_hold_available()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat", return_value=mock_nuheat,
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.available_bathroom")
assert state.state == "auto"
expected_attributes = {
"current_temperature": 38.9,
"friendly_name": "Available bathroom",
"hvac_action": "idle",
"hvac_modes": ["auto", "heat"],
"max_temp": 180.6,
"min_temp": -6.1,
"preset_mode": "Run Schedule",
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
"temperature": 26.1,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_climate_thermostat_schedule_temporary_hold(hass):
"""Test a thermostat with the temporary schedule hold that is online."""
mock_thermostat = _get_mock_thermostat_schedule_temporary_hold()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat", return_value=mock_nuheat,
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.temp_bathroom")
assert state.state == "auto"
expected_attributes = {
"current_temperature": 94.4,
"friendly_name": "Temp bathroom",
"hvac_action": "idle",
"hvac_modes": ["auto", "heat"],
"max_temp": 180.6,
"min_temp": -0.6,
"preset_mode": "Run Schedule",
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
"temperature": 37.2,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
|
py | b407475c9b8c812c37cb39871c7e4706f3225711 | import re
import time
from flask import render_template, request
from app.bpbase import Blueprint
from app.utils import json_response
bp = Blueprint('stats', __name__, url_prefix='/stats')
PAT_HOST = re.compile('^[-.a-zA-Z0-9]+$')
REDIS_MAX_FIELDS = [
'used_cpu_sys', 'used_cpu_user', 'connected_clients',
'total_commands_processed', 'evicted_keys', 'expired_keys',
'keyspace_misses', 'keyspace_hits', 'keys',
]
REDIS_AVG_FIELDS = ['used_memory', 'used_memory_rss', 'response_time']
REDIS_FIELDS = {}
for f in REDIS_MAX_FIELDS:
REDIS_FIELDS[f] = 'MAX'
for f in REDIS_AVG_FIELDS:
REDIS_FIELDS[f] = 'AVERAGE'
PROXY_MAX_FIELDS = ['connected_clients', 'mem_buffer_alloc',
'completed_commands', 'used_cpu_sys', 'used_cpu_user']
PROXY_AVG_FIELDS = ['command_elapse', 'remote_cost']
PROXY_FIELDS = {}
for f in PROXY_MAX_FIELDS:
PROXY_FIELDS[f] = 'MAX'
for f in PROXY_AVG_FIELDS:
PROXY_FIELDS[f] = 'AVERAGE'
@bp.route('/redis')
def redis():
return render_template('stats/redis.html', host=request.args['host'],
port=int(request.args['port']))
@bp.route('/proxy')
def proxy():
return render_template('stats/proxy.html', host=request.args['host'],
port=int(request.args['port']))
def _parse_args(args):
host = args['host']
if not PAT_HOST.match(host):
raise ValueError('Invalid hostname')
port = int(args['port'])
limit = min(int(args.get('limit', 100)), 500)
interval = max(int(args.get('interval', 8)), 8)
return host, port, limit, interval, limit * interval * 60
@bp.route('/fetchredis')
def fetch_redis():
host, port, limit, interval, span = _parse_args(request.args)
now = int(time.time())
return json_response(bp.app.stats_query(
'%s:%d' % (host, port), REDIS_FIELDS, span, now, interval))
@bp.route('/fetchproxy')
def fetch_proxy():
host, port, limit, interval, span = _parse_args(request.args)
now = int(time.time())
return json_response(bp.app.stats_query(
'%s:%d' % (host, port), PROXY_FIELDS, span, now, interval))
|
py | b40747b58e8de9909ed0e86dee3a0df804e42d4d | # coding:utf-8
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
'''
author: heucoder
email: [email protected]
date: 2019.6.13
'''
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
#Generate a swiss roll dataset.
t = 1.5 * np.pi * (1 + 2 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = 83 * np.random.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * np.random.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def rbf(dist, t = 1.0):
'''
rbf kernel function
'''
return np.exp(-(dist/t))
def cal_pairwise_dist(x):
'''计算pairwise 距离, x是matrix
(a-b)^2 = a^2 + b^2 - 2*a*b
'''
sum_x = np.sum(np.square(x), 1)
dist = np.add(np.add(-2 * np.dot(x, x.T), sum_x).T, sum_x)
#返回任意两个点之间距离的平方
return dist
def cal_rbf_dist(data, n_neighbors = 10, t = 1):
dist = cal_pairwise_dist(data)
n = dist.shape[0]
rbf_dist = rbf(dist, t)
W = np.zeros((n, n))
for i in range(n):
index_ = np.argsort(dist[i])[1:n_neighbors+1]
W[i, index_] = rbf_dist[i, index_]
W[index_, i] = rbf_dist[index_, i]
return W
def le(data,
n_dims = 2,
n_neighbors = 10, t = 1.0):
N = data.shape[0]
W = cal_rbf_dist(data, n_neighbors, t)
D = np.zeros_like(W)
for i in range(N):
D[i,i] = np.sum(W[i])
D_inv = np.linalg.inv(D)
L = D - W
eig_val, eig_vec = np.linalg.eig(np.dot(D_inv, L))
sort_index_ = np.argsort(eig_val)
eig_val = eig_val[sort_index_]
# until eig_val not equal 0
j = 0
for v in eig_val:
j+=1
if v > 1e-5:
break
sort_index_ = sort_index_[j:j+n_dims]
# print(sort_index_)
eig_val_picked = eig_val[sort_index_]
eig_vec_picked = eig_vec[:, sort_index_]
# print(eig_val_picked)
# D not equal I ???
# print(np.dot(np.dot(eig_vec_picked.T, D), eig_vec_picked))
X_ndim = eig_vec_picked
return X_ndim
if __name__ == '__main__':
X, Y = make_swiss_roll(n_samples = 2000)
X_ndim = le(X, n_neighbors = 20, t = 10)
fig = plt.figure(figsize=(12,6))
ax1 = fig.add_subplot(121, projection='3d')
ax1.scatter(X[:, 0], X[:, 1], X[:, 2], c = Y)
ax2 = fig.add_subplot(122)
ax2.scatter(X_ndim[:, 0], X_ndim[:, 1], c = Y)
plt.show() |
py | b40747fe31f5a0c79ce27f7c30e3390dca3c3870 | """Testing of Select statement functionality."""
import pytest
import everstone
from everstone.sql import constraints, types
everstone.db.disable_execution()
@pytest.fixture
def sample_table():
t = everstone.db.Table("sample_table")
t.Column("col_a", types.Text, constraints.PrimaryKey)
t.Column("col_b", types.Integer)
return t
def test_select(sample_table):
s = sample_table.select
assert s.sql == "SELECT NULL"
s = sample_table.select(sample_table.columns.col_a)
assert s.sql == "SELECT public.sample_table.col_a FROM public.sample_table;"
assert str(s) == "SELECT public.sample_table.col_a FROM public.sample_table;"
assert repr(s) == "<Select 'SELECT public.sample_table.col_a FROM public.sample_table;'>"
assert s.columns == (sample_table.columns.col_a,)
@pytest.mark.asyncio
async def test_select_distinct(sample_table):
s = sample_table.select(sample_table.columns.col_a)
assert s.sql == "SELECT public.sample_table.col_a FROM public.sample_table;"
assert await s.distinct == "SELECT DISTINCT public.sample_table.col_a FROM public.sample_table;"
s = sample_table.select.distinct_on(sample_table.columns.col_a, sample_table.columns.col_b)
assert await s == (
"SELECT DISTINCT ON (public.sample_table.col_a, public.sample_table.col_b)"
" public.sample_table.col_a, public.sample_table.col_b"
" FROM public.sample_table;"
)
@pytest.mark.asyncio
async def test_select_grouped(sample_table):
col_a = sample_table.columns.col_a
s = sample_table.select(col_a.count)
s.group_by(col_a)
assert s.groups == [col_a]
assert await s == "SELECT count(public.sample_table.col_a) AS col_a_count GROUP BY public.sample_table.col_a;"
|
py | b4074800508c617a76d2544d1cdff9fd014472cf | """An implementation of ArcII Model."""
import typing
import torch
import torch.nn as nn
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine import hyper_spaces
from matchzoo.modules import Matching
from matchzoo.dataloader import callbacks
from matchzoo.utils import parse_activation
class ArcII(BaseModel):
"""
ArcII Model.
Examples:
>>> model = ArcII()
>>> model.params['embedding_output_dim'] = 300
>>> model.params['kernel_1d_count'] = 32
>>> model.params['kernel_1d_size'] = 3
>>> model.params['kernel_2d_count'] = [16, 32]
>>> model.params['kernel_2d_size'] = [[3, 3], [3, 3]]
>>> model.params['pool_2d_size'] = [[2, 2], [2, 2]]
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(name='left_length', value=10,
desc='Length of left input.'))
params.add(Param(name='right_length', value=100,
desc='Length of right input.'))
params.add(Param(name='kernel_1d_count', value=32,
desc="Kernel count of 1D convolution layer."))
params.add(Param(name='kernel_1d_size', value=3,
desc="Kernel size of 1D convolution layer."))
params.add(Param(name='kernel_2d_count', value=[32],
desc="Kernel count of 2D convolution layer in"
"each block"))
params.add(Param(name='kernel_2d_size', value=[(3, 3)],
desc="Kernel size of 2D convolution layer in"
" each block."))
params.add(Param(name='activation', value='relu',
desc="Activation function."))
params.add(Param(name='pool_2d_size', value=[(2, 2)],
desc="Size of pooling layer in each block."))
params.add(Param(
'dropout_rate', 0.0,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.8, q=0.01),
desc="The dropout rate."
))
return params
@classmethod
def get_default_padding_callback(
cls,
fixed_length_left: int = 10,
fixed_length_right: int = 100,
pad_value: typing.Union[int, str] = 0,
pad_mode: str = 'pre'
):
""":return: Default padding callback."""
return callbacks.BasicPadding(
fixed_length_left=fixed_length_left,
fixed_length_right=fixed_length_right,
pad_value=pad_value,
pad_mode=pad_mode)
def build(self):
"""
Build model structure.
ArcII has the desirable property of letting two sentences meet before
their own high-level representations mature.
"""
self.embedding = self._make_default_embedding_layer()
# Phrase level representations
self.conv1d_left = nn.Sequential(
nn.ConstantPad1d((0, self._params['kernel_1d_size'] - 1), 0),
nn.Conv1d(
in_channels=self._params['embedding_output_dim'],
out_channels=self._params['kernel_1d_count'],
kernel_size=self._params['kernel_1d_size']
)
)
self.conv1d_right = nn.Sequential(
nn.ConstantPad1d((0, self._params['kernel_1d_size'] - 1), 0),
nn.Conv1d(
in_channels=self._params['embedding_output_dim'],
out_channels=self._params['kernel_1d_count'],
kernel_size=self._params['kernel_1d_size']
)
)
# Interaction
self.matching = Matching(matching_type='plus')
# Build conv
activation = parse_activation(self._params['activation'])
in_channel_2d = [
self._params['kernel_1d_count'],
*self._params['kernel_2d_count'][:-1]
]
conv2d = [
self._make_conv_pool_block(ic, oc, ks, activation, ps)
for ic, oc, ks, ps in zip(in_channel_2d,
self._params['kernel_2d_count'],
self._params['kernel_2d_size'],
self._params['pool_2d_size'])
]
self.conv2d = nn.Sequential(*conv2d)
self.dropout = nn.Dropout(p=self._params['dropout_rate'])
left_length = self._params['left_length']
right_length = self._params['right_length']
for ps in self._params['pool_2d_size']:
left_length = left_length // ps[0]
for ps in self._params['pool_2d_size']:
right_length = right_length // ps[1]
# Build output
self.out = self._make_output_layer(
left_length * right_length * self._params['kernel_2d_count'][-1]
)
def forward(self, inputs):
"""Forward."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# F = number of filters
# P = pool size
# Left input and right input.
# shape = [B, L]
# shape = [B, R]
input_left, input_right = inputs['text_left'], inputs['text_right']
# Process left and right input.
# shape = [B, D, L]
# shape = [B, D, R]
embed_left = self.embedding(input_left.long()).transpose(1, 2)
embed_right = self.embedding(input_right.long()).transpose(1, 2)
# shape = [B, L, F1]
# shape = [B, R, F1]
conv1d_left = self.conv1d_left(embed_left).transpose(1, 2)
conv1d_right = self.conv1d_right(embed_right).transpose(1, 2)
# Compute matching signal
# shape = [B, L, R, F1]
embed_cross = self.matching(conv1d_left, conv1d_right)
# Convolution
# shape = [B, F2, L // P, R // P]
conv = self.conv2d(embed_cross.permute(0, 3, 1, 2))
# shape = [B, F2 * (L // P) * (R // P)]
embed_flat = self.dropout(torch.flatten(conv, start_dim=1))
# shape = [B, *]
out = self.out(embed_flat)
return out
@classmethod
def _make_conv_pool_block(
cls,
in_channels: int,
out_channels: int,
kernel_size: tuple,
activation: nn.Module,
pool_size: tuple,
) -> nn.Module:
"""Make conv pool block."""
return nn.Sequential(
# Same padding
nn.ConstantPad2d(
(0, kernel_size[1] - 1, 0, kernel_size[0] - 1), 0
),
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size
),
activation,
nn.MaxPool2d(kernel_size=pool_size)
)
|
py | b407491d79014a4c3d1fcd33852a96ebc3958d15 | import lldb
import re
import testutils as test
def runScenario(assembly, debugger, target):
process = target.GetProcess()
res = lldb.SBCommandReturnObject()
ci = debugger.GetCommandInterpreter()
# Run debugger, wait until libcoreclr is loaded,
# set breakpoint at Test.Main and stop there
test.stop_in_main(debugger, assembly)
ci.HandleCommand("sos", res)
print(res.GetOutput())
print(res.GetError())
# Interpreter must have this command and able to run it
test.assertTrue(res.Succeeded())
output = res.GetOutput()
# Output is not empty
test.assertTrue(len(output) > 0)
# Specific string must be in the output
test.assertNotEqual(output.find("SOS"), -1)
# TODO: test other use cases
# Continue current process and checks its exit code
test.exit_lldb(debugger, assembly)
|
py | b4074a18c8c7b72a2c13a9661411a5e90381627e | import asyncio
import time
import traceback
import pycommons.logger
from maxwell.client import Client
logger = pycommons.logger.get_instance(__name__)
async def repeat_publish():
client = Client(["localhost:8081", "localhost:8082"], loop=loop)
publisher = client.get_publisher()
while True:
value = int(time.time())
logger.debug("************Publish msg: %s", value)
try:
await publisher.publish(
"topic_3",
value.to_bytes(8, byteorder='little')
)
except Exception:
logger.error("Failed to encode: %s", traceback.format_exc())
await asyncio.sleep(1)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.create_task(repeat_publish())
loop.run_forever()
|
py | b4074a5c85ff7726cd855e377c35b6c1a3e9c2a7 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.urlresolvers import resolve, Resolver404
from django.http import Http404
from django.template.response import TemplateResponse
from cms import __version__
from cms.cache.page import set_page_cache
from cms.models import Page
from cms.utils import get_template_from_request
from cms.utils.conf import get_cms_setting
def render_page(request, page, current_language, slug):
"""
Renders a page
"""
template_name = get_template_from_request(request, page, no_current_page=True)
# fill the context
context = {}
context['lang'] = current_language
context['current_page'] = page
context['has_change_permissions'] = page.has_change_permission(request)
context['has_view_permissions'] = page.has_view_permission(request)
if not context['has_view_permissions']:
return _handle_no_page(request, slug)
response = TemplateResponse(request, template_name, context)
response.add_post_render_callback(set_page_cache)
# Add headers for X Frame Options - this really should be changed upon moving to class based views
xframe_options = page.get_xframe_options()
# xframe_options can be None if there's no xframe information on the page
# (eg. a top-level page which has xframe options set to "inherit")
if xframe_options == Page.X_FRAME_OPTIONS_INHERIT or xframe_options is None:
# This is when we defer to django's own clickjacking handling
return response
# We want to prevent django setting this in their middlewear
response.xframe_options_exempt = True
if xframe_options == Page.X_FRAME_OPTIONS_ALLOW:
# Do nothing, allowed is no header.
return response
elif xframe_options == Page.X_FRAME_OPTIONS_SAMEORIGIN:
response['X-Frame-Options'] = 'SAMEORIGIN'
elif xframe_options == Page.X_FRAME_OPTIONS_DENY:
response['X-Frame-Options'] = 'DENY'
return response
def _handle_no_page(request, slug):
context = {}
context['cms_version'] = __version__
context['cms_edit_on'] = get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
if not slug and settings.DEBUG:
return TemplateResponse(request, "cms/welcome.html", context)
try:
#add a $ to the end of the url (does not match on the cms anymore)
resolve('%s$' % request.path)
except Resolver404 as e:
# raise a django http 404 page
exc = Http404(dict(path=request.path, tried=e.args[0]['tried']))
raise exc
raise Http404('CMS Page not found: %s' % request.path)
|
py | b4074a5dc49d5d339b19bc5b514f566676008a11 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import taco
import taco2
import numpy
import Chandra.acis_esa
from Quaternion import Quat
orbit_xyz = numpy.array([0., 0., -1000000e3])
p_earth_body = numpy.array([-11913349.37481491, 1600513.79810546, 6787847.04879577])
orbit_xyz = -p_earth_body
illums = []
illums2 = []
pitchs = numpy.linspace(0, 90.0, 45)
esa_directs = []
esa_refls = []
for pitch in pitchs:
print pitch
att = [0, pitch, 0]
vis, illum, rays = taco.calc_earth_vis(orbit_xyz, att, n_radiator_x=3, n_radiator_y=4, ngrid=100, max_reflect=6)
vis2, illum2, rays2 = taco2.calc_earth_vis(orbit_xyz, att, max_reflect=6)
illums.append(illum)
illums2.append(illum2)
direct, refl, total = Chandra.acis_esa.earth_solid_angle(Quat(att), orbit_xyz)
esa_directs.append(direct)
esa_refls.append(refl)
clf()
plot(pitchs, [x[0] for x in illums] , '-b', label='tla direct')
plot(pitchs, [x[0] for x in illums2] , '--b', label='tla direct-2', linewidth=4)
plot(pitchs, [x[1] for x in illums] , '-r', label='tla refl1')
plot(pitchs, [x[1] for x in illums2] , '--r', label='tla refl1-2', linewidth=4)
plot(pitchs, [x[2] for x in illums] , '-g', label='tla refl2')
plot(pitchs, [x[2] for x in illums2] , '--g', label='tla refl2-2', linewidth=4)
plot(pitchs, [x[3] for x in illums] , '-c', label='tla refl2')
plot(pitchs, [x[3] for x in illums2] , '--c', label='tla refl2-2', linewidth=4)
plot(pitchs, [x[4] for x in illums] , '-m', label='tla refl2')
plot(pitchs, [x[4] for x in illums2] , '--m', label='tla refl2-2', linewidth=4)
plot(pitchs, [x[5] for x in illums] , '-k', label='tla refl2')
plot(pitchs, [x[5] for x in illums2] , '--k', label='tla refl2-2', linewidth=4)
legend()
grid()
xlabel('Earth Pitch (deg)')
title('Direct and reflected illum vs. Earth pitch')
|
py | b4074b01dc66423d6446941966f9d897034f037f | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.mobilenet_v2 import MobileNetV2
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.005
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_unaware/' + job_name + '*'
total_epochs = 36
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_unaware/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
py | b4074b4496311f5843761f7c05eb3fd56f0fc6e1 | from __future__ import print_function, absolute_import
import os
import sys
import random
import argparse
import torch
import torch.nn as nn
import torch.utils.data
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import numpy as np
from modeling import models
from data import datasets, transforms
from engine.base.trainer import Trainer
from engine.base.evaluator import Evaluator
from utils.logging import Logger
from utils.serialization import load_checkpoint, save_checkpoint
def argument_parser():
parser = argparse.ArgumentParser(description='NAS with Pytorch Implementation')
parser.add_argument('--gpu-ids', type=str, default='0')
# data
parser.add_argument('-d', '--dataset', type=str, default='cifar10', choices=datasets.names())
parser.add_argument('-j', '--num-workers', type=int, default=4)
parser.add_argument('-b', '--batch-size', type=int, default=128)
parser.add_argument('--num-epochs', type=int, default=200)
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--label-smooth', type=float, default=0.1, help='label smoothing')
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50', choices=models.names())
# optimizer
parser.add_argument('--lr', type=float, default=0.1, help="initial learning rate")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight-decay', type=float, default=5e-4)
# training
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--resume', action='store_true', default=False, help='resume from checkpoint')
parser.add_argument('--evaluate', action='store_true', default=False, help="evaluation only")
# misc
working_dir = os.path.dirname(os.path.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH', default=os.path.join(working_dir, 'temp', 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH', default=os.path.join(working_dir, 'temp', 'logs'))
# distributed
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--net-card', type=str, default='', help="Name of the network card.")
return parser
def main(args):
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
if args.net_card:
os.environ['GLOO_SOCKET_IFNAME'] = args.net_card
os.environ['NCCL_SOCKET_IFNAME'] = args.net_card
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.enabled = True
cudnn.benchmark = True
# cudnn.deterministic = True
# Redirect print to both console and log file
sys.stdout = Logger(os.path.join(args.logs_dir, 'log.txt'))
args.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
args.batch_size = args.batch_size // args.world_size
args.distributed = args.world_size > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend='nccl', init_method='env://', world_size=args.world_size)
dist.barrier()
# Create dataloaders
train_transforms = transforms.create(args.dataset, train=True, cutout=args.cutout)
test_transforms = transforms.create(args.dataset, train=False)
data_root = os.path.join(args.data_dir, args.dataset)
train_dataset = datasets.create(args.dataset, data_root, train=True, transform=train_transforms, download=True)
test_dataset = datasets.create(args.dataset, data_root, train=False, transform=test_transforms, download=True)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)
else:
train_sampler = torch.utils.data.sampler.RandomSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, sampler=train_sampler, num_workers=args.num_workers,
pin_memory=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True)
# Create model
norm_layer = nn.SyncBatchNorm if args.distributed else nn.BatchNorm2d
model = models.create(args.arch, num_classes=len(train_dataset.classes), norm_layer=norm_layer)
if args.distributed:
model = nn.parallel.DistributedDataParallel(
model.cuda(), device_ids=[args.local_rank], output_device=args.local_rank) # find_unused_parameters=True
else:
model = nn.DataParallel(model).cuda()
if not args.distributed or args.local_rank == 0:
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
# Criterion
# criterion = nn.CrossEntropyLoss().cuda()
# Optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.num_epochs)
# Load from checkpoint
start_epoch = best_prec1 = 0
if args.resume:
checkpoint = load_checkpoint(os.path.join(args.logs_dir, 'checkpoint.pth.tar'))
model.module.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
print("=> Start epoch {} best_prec1 {:.2f}".format(start_epoch, best_prec1))
if args.distributed:
dist.barrier()
# Create Evaluator
evaluator = Evaluator(model, distributed=args.distributed)
if args.evaluate:
evaluator(test_loader)
return
# Create Trainer
trainer = Trainer(model, optimizer, distributed=args.distributed)
# Start training
for epoch in range(start_epoch, args.num_epochs):
# Use .set_epoch() method to reshuffle the dataset partition at every iteration
if args.distributed:
train_sampler.set_epoch(epoch)
trainer(train_loader, epoch)
scheduler.step()
# evaluate on validation set
# prec1 = evaluator.evaluate(test_loader)
# is_best = prec1 > best_prec1
# best_prec1 = max(prec1, best_prec1)
is_best = True
if not args.distributed or args.local_rank == 0:
lr = scheduler.get_last_lr()
print('epoch: {:d}, lr: {}'.format(epoch, lr))
save_checkpoint({
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'epoch': epoch + 1,
'best_prec1': best_prec1,
}, is_best, fpath=os.path.join(args.logs_dir, 'checkpoint.pth.tar'))
if args.distributed:
dist.barrier()
# Final test
checkpoint = load_checkpoint(os.path.join(args.logs_dir, 'checkpoint.pth.tar'))
model.module.load_state_dict(checkpoint['state_dict'])
evaluator(test_loader)
return
if __name__ == '__main__':
parser = argument_parser()
main(parser.parse_args())
|
py | b4074b4c6e9f478c3f944b260cde76d2795314b4 | """Legacy objectives module.
Only kept for backwards API compatibility.
"""
from __future__ import absolute_import
from .losses import *
|
py | b4074b83ef7e106a3796f6dd9d9d7e6b92b3c01c | """
python generate_sparsetools.py
Generate manual wrappers for C++ sparsetools code.
Type codes used:
'i': integer scalar
'I': integer array
'T': data array
'B': boolean array
'V': std::vector<integer>*
'W': std::vector<data>*
'*': indicates that the next argument is an output argument
'v': void
'l': 64-bit integer scalar
See sparsetools.cxx for more details.
"""
import optparse
import os
from distutils.dep_util import newer
#
# List of all routines and their argument types.
#
# The first code indicates the return value, the rest the arguments.
#
# bsr.h
BSR_ROUTINES = """
bsr_diagonal v iiiiiIIT*T
bsr_tocsr v iiiiIIT*I*I*T
bsr_scale_rows v iiiiII*TT
bsr_scale_columns v iiiiII*TT
bsr_sort_indices v iiii*I*I*T
bsr_transpose v iiiiIIT*I*I*T
bsr_matmat_pass2 v iiiiiIITIIT*I*I*T
bsr_matvec v iiiiIITT*T
bsr_matvecs v iiiiiIITT*T
bsr_elmul_bsr v iiiiIITIIT*I*I*T
bsr_eldiv_bsr v iiiiIITIIT*I*I*T
bsr_plus_bsr v iiiiIITIIT*I*I*T
bsr_minus_bsr v iiiiIITIIT*I*I*T
bsr_maximum_bsr v iiiiIITIIT*I*I*T
bsr_minimum_bsr v iiiiIITIIT*I*I*T
bsr_ne_bsr v iiiiIITIIT*I*I*B
bsr_lt_bsr v iiiiIITIIT*I*I*B
bsr_gt_bsr v iiiiIITIIT*I*I*B
bsr_le_bsr v iiiiIITIIT*I*I*B
bsr_ge_bsr v iiiiIITIIT*I*I*B
"""
# csc.h
CSC_ROUTINES = """
csc_diagonal v iiiIIT*T
csc_tocsr v iiIIT*I*I*T
csc_matmat_pass1 v iiIIII*I
csc_matmat_pass2 v iiIITIIT*I*I*T
csc_matvec v iiIITT*T
csc_matvecs v iiiIITT*T
csc_elmul_csc v iiIITIIT*I*I*T
csc_eldiv_csc v iiIITIIT*I*I*T
csc_plus_csc v iiIITIIT*I*I*T
csc_minus_csc v iiIITIIT*I*I*T
csc_maximum_csc v iiIITIIT*I*I*T
csc_minimum_csc v iiIITIIT*I*I*T
csc_ne_csc v iiIITIIT*I*I*B
csc_lt_csc v iiIITIIT*I*I*B
csc_gt_csc v iiIITIIT*I*I*B
csc_le_csc v iiIITIIT*I*I*B
csc_ge_csc v iiIITIIT*I*I*B
"""
# csr.h
CSR_ROUTINES = """
csr_matmat_pass1 v iiIIII*I
csr_matmat_pass2 v iiIITIIT*I*I*T
csr_diagonal v iiiIIT*T
csr_tocsc v iiIIT*I*I*T
csr_tobsr v iiiiIIT*I*I*T
csr_todense v iiIIT*T
csr_matvec v iiIITT*T
csr_matvecs v iiiIITT*T
csr_elmul_csr v iiIITIIT*I*I*T
csr_eldiv_csr v iiIITIIT*I*I*T
csr_plus_csr v iiIITIIT*I*I*T
csr_minus_csr v iiIITIIT*I*I*T
csr_maximum_csr v iiIITIIT*I*I*T
csr_minimum_csr v iiIITIIT*I*I*T
csr_ne_csr v iiIITIIT*I*I*B
csr_lt_csr v iiIITIIT*I*I*B
csr_gt_csr v iiIITIIT*I*I*B
csr_le_csr v iiIITIIT*I*I*B
csr_ge_csr v iiIITIIT*I*I*B
csr_scale_rows v iiII*TT
csr_scale_columns v iiII*TT
csr_sort_indices v iI*I*T
csr_eliminate_zeros v ii*I*I*T
csr_sum_duplicates v ii*I*I*T
get_csr_submatrix v iiIITiiii*V*V*W
csr_sample_values v iiIITiII*T
csr_count_blocks i iiiiII
csr_sample_offsets i iiIIiII*I
expandptr v iI*I
test_throw_error i
csr_has_sorted_indices i iII
csr_has_canonical_format i iII
"""
# coo.h, dia.h, csgraph.h
OTHER_ROUTINES = """
coo_tocsr v iiiIIT*I*I*T
coo_todense v iilIIT*Ti
coo_matvec v lIITT*T
dia_matvec v iiiiITT*T
cs_graph_components i iII*I
"""
# List of compilation units
COMPILATION_UNITS = [
('bsr', BSR_ROUTINES),
('csr', CSR_ROUTINES),
('csc', CSC_ROUTINES),
('other', OTHER_ROUTINES),
]
#
# List of the supported index typenums and the corresponding C++ types
#
I_TYPES = [
('NPY_INT32', 'npy_int32'),
('NPY_INT64', 'npy_int64'),
]
#
# List of the supported data typenums and the corresponding C++ types
#
T_TYPES = [
('NPY_BOOL', 'npy_bool_wrapper'),
('NPY_BYTE', 'npy_byte'),
('NPY_UBYTE', 'npy_ubyte'),
('NPY_SHORT', 'npy_short'),
('NPY_USHORT', 'npy_ushort'),
('NPY_INT', 'npy_int'),
('NPY_UINT', 'npy_uint'),
('NPY_LONG', 'npy_long'),
('NPY_ULONG', 'npy_ulong'),
('NPY_LONGLONG', 'npy_longlong'),
('NPY_ULONGLONG', 'npy_ulonglong'),
('NPY_FLOAT', 'npy_float'),
('NPY_DOUBLE', 'npy_double'),
('NPY_LONGDOUBLE', 'npy_longdouble'),
('NPY_CFLOAT', 'npy_cfloat_wrapper'),
('NPY_CDOUBLE', 'npy_cdouble_wrapper'),
('NPY_CLONGDOUBLE', 'npy_clongdouble_wrapper'),
]
#
# Code templates
#
THUNK_TEMPLATE = """
static PY_LONG_LONG %(name)s_thunk(int I_typenum, int T_typenum, void **a)
{
%(thunk_content)s
}
"""
METHOD_TEMPLATE = """
NPY_VISIBILITY_HIDDEN PyObject *
%(name)s_method(PyObject *self, PyObject *args)
{
return call_thunk('%(ret_spec)s', "%(arg_spec)s", %(name)s_thunk, args);
}
"""
GET_THUNK_CASE_TEMPLATE = """
static int get_thunk_case(int I_typenum, int T_typenum)
{
%(content)s;
return -1;
}
"""
#
# Code generation
#
def get_thunk_type_set():
"""
Get a list containing cartesian product of data types, plus a getter routine.
Returns
-------
i_types : list [(j, I_typenum, None, I_type, None), ...]
Pairing of index type numbers and the corresponding C++ types,
and an unique index `j`. This is for routines that are parameterized
only by I but not by T.
it_types : list [(j, I_typenum, T_typenum, I_type, T_type), ...]
Same as `i_types`, but for routines parameterized both by T and I.
getter_code : str
C++ code for a function that takes I_typenum, T_typenum and returns
the unique index corresponding to the lists, or -1 if no match was
found.
"""
it_types = []
i_types = []
j = 0
getter_code = " if (0) {}"
for I_typenum, I_type in I_TYPES:
piece = """
else if (I_typenum == %(I_typenum)s) {
if (T_typenum == -1) { return %(j)s; }"""
getter_code += piece % dict(I_typenum=I_typenum, j=j)
i_types.append((j, I_typenum, None, I_type, None))
j += 1
for T_typenum, T_type in T_TYPES:
piece = """
else if (T_typenum == %(T_typenum)s) { return %(j)s; }"""
getter_code += piece % dict(T_typenum=T_typenum, j=j)
it_types.append((j, I_typenum, T_typenum, I_type, T_type))
j += 1
getter_code += """
}"""
return i_types, it_types, GET_THUNK_CASE_TEMPLATE % dict(content=getter_code)
def parse_routine(name, args, types):
"""
Generate thunk and method code for a given routine.
Parameters
----------
name : str
Name of the C++ routine
args : str
Argument list specification (in format explained above)
types : list
List of types to instantiate, as returned `get_thunk_type_set`
"""
ret_spec = args[0]
arg_spec = args[1:]
def get_arglist(I_type, T_type):
"""
Generate argument list for calling the C++ function
"""
args = []
next_is_writeable = False
j = 0
for t in arg_spec:
const = '' if next_is_writeable else 'const '
next_is_writeable = False
if t == '*':
next_is_writeable = True
continue
elif t == 'i':
args.append("*(%s*)a[%d]" % (const + I_type, j))
elif t == 'I':
args.append("(%s*)a[%d]" % (const + I_type, j))
elif t == 'T':
args.append("(%s*)a[%d]" % (const + T_type, j))
elif t == 'B':
args.append("(npy_bool_wrapper*)a[%d]" % (j,))
elif t == 'V':
if const:
raise ValueError("'V' argument must be an output arg")
args.append("(std::vector<%s>*)a[%d]" % (I_type, j,))
elif t == 'W':
if const:
raise ValueError("'W' argument must be an output arg")
args.append("(std::vector<%s>*)a[%d]" % (T_type, j,))
elif t == 'l':
args.append("*(%snpy_int64*)a[%d]" % (const, j))
else:
raise ValueError("Invalid spec character %r" % (t,))
j += 1
return ", ".join(args)
# Generate thunk code: a giant switch statement with different
# type combinations inside.
thunk_content = """int j = get_thunk_case(I_typenum, T_typenum);
switch (j) {"""
for j, I_typenum, T_typenum, I_type, T_type in types:
arglist = get_arglist(I_type, T_type)
if T_type is None:
dispatch = "%s" % (I_type,)
else:
dispatch = "%s,%s" % (I_type, T_type)
if 'B' in arg_spec:
dispatch += ",npy_bool_wrapper"
piece = """
case %(j)s:"""
if ret_spec == 'v':
piece += """
(void)%(name)s<%(dispatch)s>(%(arglist)s);
return 0;"""
else:
piece += """
return %(name)s<%(dispatch)s>(%(arglist)s);"""
thunk_content += piece % dict(j=j, I_type=I_type, T_type=T_type,
I_typenum=I_typenum, T_typenum=T_typenum,
arglist=arglist, name=name,
dispatch=dispatch)
thunk_content += """
default:
throw std::runtime_error("internal error: invalid argument typenums");
}"""
thunk_code = THUNK_TEMPLATE % dict(name=name,
thunk_content=thunk_content)
# Generate method code
method_code = METHOD_TEMPLATE % dict(name=name,
ret_spec=ret_spec,
arg_spec=arg_spec)
return thunk_code, method_code
def main():
p = optparse.OptionParser(usage=__doc__.strip())
p.add_option("--no-force", action="store_false",
dest="force", default=True)
options, args = p.parse_args()
names = []
i_types, it_types, getter_code = get_thunk_type_set()
# Generate *_impl.h for each compilation unit
for unit_name, routines in COMPILATION_UNITS:
thunks = []
methods = []
# Generate thunks and methods for all routines
for line in routines.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
try:
name, args = line.split(None, 1)
except ValueError:
raise ValueError("Malformed line: %r" % (line,))
args = "".join(args.split())
if 't' in args or 'T' in args:
thunk, method = parse_routine(name, args, it_types)
else:
thunk, method = parse_routine(name, args, i_types)
if name in names:
raise ValueError("Duplicate routine %r" % (name,))
names.append(name)
thunks.append(thunk)
methods.append(method)
# Produce output
dst = os.path.join(os.path.dirname(__file__),
'sparsetools',
unit_name + '_impl.h')
if newer(__file__, dst) or options.force:
print("[generate_sparsetools] generating %r" % (dst,))
with open(dst, 'w') as f:
write_autogen_blurb(f)
f.write(getter_code)
for thunk in thunks:
f.write(thunk)
for method in methods:
f.write(method)
else:
print("[generate_sparsetools] %r already up-to-date" % (dst,))
# Generate code for method struct
method_defs = ""
for name in names:
method_defs += "NPY_VISIBILITY_HIDDEN PyObject *%s_method(PyObject *, PyObject *);\n" % (name,)
method_struct = """\nstatic struct PyMethodDef sparsetools_methods[] = {"""
for name in names:
method_struct += """
{"%(name)s", (PyCFunction)%(name)s_method, METH_VARARGS, NULL},""" % dict(name=name)
method_struct += """
{NULL, NULL, 0, NULL}
};"""
# Produce sparsetools_impl.h
dst = os.path.join(os.path.dirname(__file__),
'sparsetools',
'sparsetools_impl.h')
if newer(__file__, dst) or options.force:
print("[generate_sparsetools] generating %r" % (dst,))
with open(dst, 'w') as f:
write_autogen_blurb(f)
f.write(method_defs)
f.write(method_struct)
else:
print("[generate_sparsetools] %r already up-to-date" % (dst,))
def write_autogen_blurb(stream):
stream.write("""\
/* This file is autogenerated by generate_sparsetools.py
* Do not edit manually or check into VCS.
*/
""")
if __name__ == "__main__":
main()
|
py | b4074b914121a7664909f276324558b3f41d6d8c | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import build_norm_layer
from mmcv.runner import force_fp32
from torch import nn
from mmdet3d.ops import DynamicScatter
from .. import builder
from ..builder import VOXEL_ENCODERS
from .utils import VFELayer, get_paddings_indicator
@VOXEL_ENCODERS.register_module()
class HardSimpleVFE(nn.Module):
"""Simple voxel feature encoder used in SECOND.
It simply averages the values of points in a voxel.
Args:
num_features (int): Number of features to use. Default: 4.
"""
def __init__(self, num_features=4):
super(HardSimpleVFE, self).__init__()
self.num_features = num_features
self.fp16_enabled = False
@force_fp32(out_fp16=True)
def forward(self, features, num_points, coors):
"""Forward function.
Args:
features (torch.Tensor): Point features in shape
(N, M, 3(4)). N is the number of voxels and M is the maximum
number of points inside a single voxel.
num_points (torch.Tensor): Number of points in each voxel,
shape (N, ).
coors (torch.Tensor): Coordinates of voxels.
Returns:
torch.Tensor: Mean of points inside each voxel in shape (N, 3(4))
"""
points_mean = features[:, :, :self.num_features].sum(
dim=1, keepdim=False) / num_points.type_as(features).view(-1, 1)
return points_mean.contiguous()
@VOXEL_ENCODERS.register_module()
class DynamicSimpleVFE(nn.Module):
"""Simple dynamic voxel feature encoder used in DV-SECOND.
It simply averages the values of points in a voxel.
But the number of points in a voxel is dynamic and varies.
Args:
voxel_size (tupe[float]): Size of a single voxel
point_cloud_range (tuple[float]): Range of the point cloud and voxels
"""
def __init__(self,
voxel_size=(0.2, 0.2, 4),
point_cloud_range=(0, -40, -3, 70.4, 40, 1)):
super(DynamicSimpleVFE, self).__init__()
self.scatter = DynamicScatter(voxel_size, point_cloud_range, True)
self.fp16_enabled = False
@torch.no_grad()
@force_fp32(out_fp16=True)
def forward(self, features, coors):
"""Forward function.
Args:
features (torch.Tensor): Point features in shape
(N, 3(4)). N is the number of points.
coors (torch.Tensor): Coordinates of voxels.
Returns:
torch.Tensor: Mean of points inside each voxel in shape (M, 3(4)).
M is the number of voxels.
"""
# This function is used from the start of the voxelnet
# num_points: [concated_num_points]
features, features_coors = self.scatter(features, coors)
return features, features_coors
@VOXEL_ENCODERS.register_module()
class DynamicVFE(nn.Module):
"""Dynamic Voxel feature encoder used in DV-SECOND.
It encodes features of voxels and their points. It could also fuse
image feature into voxel features in a point-wise manner.
The number of points inside the voxel varies.
Args:
in_channels (int): Input channels of VFE. Defaults to 4.
feat_channels (list(int)): Channels of features in VFE.
with_distance (bool): Whether to use the L2 distance of points to the
origin point. Default False.
with_cluster_center (bool): Whether to use the distance to cluster
center of points inside a voxel. Default to False.
with_voxel_center (bool): Whether to use the distance to center of
voxel for each points inside a voxel. Default to False.
voxel_size (tuple[float]): Size of a single voxel. Default to
(0.2, 0.2, 4).
point_cloud_range (tuple[float]): The range of points or voxels.
Default to (0, -40, -3, 70.4, 40, 1).
norm_cfg (dict): Config dict of normalization layers.
mode (str): The mode when pooling features of points inside a voxel.
Available options include 'max' and 'avg'. Default to 'max'.
fusion_layer (dict | None): The config dict of fusion layer used in
multi-modal detectors. Default to None.
return_point_feats (bool): Whether to return the features of each
points. Default to False.
"""
def __init__(self,
in_channels=4,
feat_channels=[],
with_distance=False,
with_cluster_center=False,
with_voxel_center=False,
voxel_size=(0.2, 0.2, 4),
point_cloud_range=(0, -40, -3, 70.4, 40, 1),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
mode='max',
fusion_layer=None,
return_point_feats=False):
super(DynamicVFE, self).__init__()
assert mode in ['avg', 'max']
assert len(feat_channels) > 0
if with_cluster_center:
in_channels += 3
if with_voxel_center:
in_channels += 3
if with_distance:
in_channels += 1
self.in_channels = in_channels
self._with_distance = with_distance
self._with_cluster_center = with_cluster_center
self._with_voxel_center = with_voxel_center
self.return_point_feats = return_point_feats
self.fp16_enabled = False
# Need pillar (voxel) size and x/y offset in order to calculate offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.vz = voxel_size[2]
self.x_offset = self.vx / 2 + point_cloud_range[0]
self.y_offset = self.vy / 2 + point_cloud_range[1]
self.z_offset = self.vz / 2 + point_cloud_range[2]
self.point_cloud_range = point_cloud_range
self.scatter = DynamicScatter(voxel_size, point_cloud_range, True)
feat_channels = [self.in_channels] + list(feat_channels)
vfe_layers = []
for i in range(len(feat_channels) - 1):
in_filters = feat_channels[i]
out_filters = feat_channels[i + 1]
if i > 0:
in_filters *= 2
norm_name, norm_layer = build_norm_layer(norm_cfg, out_filters)
vfe_layers.append(
nn.Sequential(
nn.Linear(in_filters, out_filters, bias=False), norm_layer,
nn.ReLU(inplace=True)))
self.vfe_layers = nn.ModuleList(vfe_layers)
self.num_vfe = len(vfe_layers)
self.vfe_scatter = DynamicScatter(voxel_size, point_cloud_range,
(mode != 'max'))
self.cluster_scatter = DynamicScatter(
voxel_size, point_cloud_range, average_points=True)
self.fusion_layer = None
if fusion_layer is not None:
self.fusion_layer = builder.build_fusion_layer(fusion_layer)
def map_voxel_center_to_point(self, pts_coors, voxel_mean, voxel_coors):
"""Map voxel features to its corresponding points.
Args:
pts_coors (torch.Tensor): Voxel coordinate of each point.
voxel_mean (torch.Tensor): Voxel features to be mapped.
voxel_coors (torch.Tensor): Coordinates of valid voxels
Returns:
torch.Tensor: Features or centers of each point.
"""
# Step 1: scatter voxel into canvas
# Calculate necessary things for canvas creation
canvas_z = int(
(self.point_cloud_range[5] - self.point_cloud_range[2]) / self.vz)
canvas_y = int(
(self.point_cloud_range[4] - self.point_cloud_range[1]) / self.vy)
canvas_x = int(
(self.point_cloud_range[3] - self.point_cloud_range[0]) / self.vx)
# canvas_channel = voxel_mean.size(1)
batch_size = pts_coors[-1, 0] + 1
canvas_len = canvas_z * canvas_y * canvas_x * batch_size
# Create the canvas for this sample
canvas = voxel_mean.new_zeros(canvas_len, dtype=torch.long)
# Only include non-empty pillars
indices = (
voxel_coors[:, 0] * canvas_z * canvas_y * canvas_x +
voxel_coors[:, 1] * canvas_y * canvas_x +
voxel_coors[:, 2] * canvas_x + voxel_coors[:, 3])
# Scatter the blob back to the canvas
canvas[indices.long()] = torch.arange(
start=0, end=voxel_mean.size(0), device=voxel_mean.device)
# Step 2: get voxel mean for each point
voxel_index = (
pts_coors[:, 0] * canvas_z * canvas_y * canvas_x +
pts_coors[:, 1] * canvas_y * canvas_x +
pts_coors[:, 2] * canvas_x + pts_coors[:, 3])
voxel_inds = canvas[voxel_index.long()]
center_per_point = voxel_mean[voxel_inds, ...]
return center_per_point
@force_fp32(out_fp16=True)
def forward(self,
features,
coors,
points=None,
img_feats=None,
img_metas=None):
"""Forward functions.
Args:
features (torch.Tensor): Features of voxels, shape is NxC.
coors (torch.Tensor): Coordinates of voxels, shape is Nx(1+NDim).
points (list[torch.Tensor], optional): Raw points used to guide the
multi-modality fusion. Defaults to None.
img_feats (list[torch.Tensor], optional): Image fetures used for
multi-modality fusion. Defaults to None.
img_metas (dict, optional): [description]. Defaults to None.
Returns:
tuple: If `return_point_feats` is False, returns voxel features and
its coordinates. If `return_point_feats` is True, returns
feature of each points inside voxels.
"""
features_ls = [features]
# Find distance of x, y, and z from cluster center
if self._with_cluster_center:
voxel_mean, mean_coors = self.cluster_scatter(features, coors)
points_mean = self.map_voxel_center_to_point(
coors, voxel_mean, mean_coors)
# TODO: maybe also do cluster for reflectivity
f_cluster = features[:, :3] - points_mean[:, :3]
features_ls.append(f_cluster)
# Find distance of x, y, and z from pillar center
if self._with_voxel_center:
f_center = features.new_zeros(size=(features.size(0), 3))
f_center[:, 0] = features[:, 0] - (
coors[:, 3].type_as(features) * self.vx + self.x_offset)
f_center[:, 1] = features[:, 1] - (
coors[:, 2].type_as(features) * self.vy + self.y_offset)
f_center[:, 2] = features[:, 2] - (
coors[:, 1].type_as(features) * self.vz + self.z_offset)
features_ls.append(f_center)
if self._with_distance:
points_dist = torch.norm(features[:, :3], 2, 1, keepdim=True)
features_ls.append(points_dist)
# Combine together feature decorations
features = torch.cat(features_ls, dim=-1)
for i, vfe in enumerate(self.vfe_layers):
point_feats = vfe(features)
if (i == len(self.vfe_layers) - 1 and self.fusion_layer is not None
and img_feats is not None):
point_feats = self.fusion_layer(img_feats, points, point_feats,
img_metas)
voxel_feats, voxel_coors = self.vfe_scatter(point_feats, coors)
if i != len(self.vfe_layers) - 1:
# need to concat voxel feats if it is not the last vfe
feat_per_point = self.map_voxel_center_to_point(
coors, voxel_feats, voxel_coors)
features = torch.cat([point_feats, feat_per_point], dim=1)
if self.return_point_feats:
return point_feats
return voxel_feats, voxel_coors
@VOXEL_ENCODERS.register_module()
class HardVFE(nn.Module):
"""Voxel feature encoder used in DV-SECOND.
It encodes features of voxels and their points. It could also fuse
image feature into voxel features in a point-wise manner.
Args:
in_channels (int): Input channels of VFE. Defaults to 4.
feat_channels (list(int)): Channels of features in VFE.
with_distance (bool): Whether to use the L2 distance of points to the
origin point. Default False.
with_cluster_center (bool): Whether to use the distance to cluster
center of points inside a voxel. Default to False.
with_voxel_center (bool): Whether to use the distance to center of
voxel for each points inside a voxel. Default to False.
voxel_size (tuple[float]): Size of a single voxel. Default to
(0.2, 0.2, 4).
point_cloud_range (tuple[float]): The range of points or voxels.
Default to (0, -40, -3, 70.4, 40, 1).
norm_cfg (dict): Config dict of normalization layers.
mode (str): The mode when pooling features of points inside a voxel.
Available options include 'max' and 'avg'. Default to 'max'.
fusion_layer (dict | None): The config dict of fusion layer used in
multi-modal detectors. Default to None.
return_point_feats (bool): Whether to return the features of each
points. Default to False.
"""
def __init__(self,
in_channels=4,
feat_channels=[],
with_distance=False,
with_cluster_center=False,
with_voxel_center=False,
voxel_size=(0.2, 0.2, 4),
point_cloud_range=(0, -40, -3, 70.4, 40, 1),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
mode='max',
fusion_layer=None,
return_point_feats=False):
super(HardVFE, self).__init__()
assert len(feat_channels) > 0
if with_cluster_center:
in_channels += 3
if with_voxel_center:
in_channels += 3
if with_distance:
in_channels += 1
self.in_channels = in_channels
self._with_distance = with_distance
self._with_cluster_center = with_cluster_center
self._with_voxel_center = with_voxel_center
self.return_point_feats = return_point_feats
self.fp16_enabled = False
# Need pillar (voxel) size and x/y offset to calculate pillar offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.vz = voxel_size[2]
self.x_offset = self.vx / 2 + point_cloud_range[0]
self.y_offset = self.vy / 2 + point_cloud_range[1]
self.z_offset = self.vz / 2 + point_cloud_range[2]
self.point_cloud_range = point_cloud_range
self.scatter = DynamicScatter(voxel_size, point_cloud_range, True)
feat_channels = [self.in_channels] + list(feat_channels)
vfe_layers = []
for i in range(len(feat_channels) - 1):
in_filters = feat_channels[i]
out_filters = feat_channels[i + 1]
if i > 0:
in_filters *= 2
# TODO: pass norm_cfg to VFE
# norm_name, norm_layer = build_norm_layer(norm_cfg, out_filters)
if i == (len(feat_channels) - 2):
cat_max = False
max_out = True
if fusion_layer:
max_out = False
else:
max_out = True
cat_max = True
vfe_layers.append(
VFELayer(
in_filters,
out_filters,
norm_cfg=norm_cfg,
max_out=max_out,
cat_max=cat_max))
self.vfe_layers = nn.ModuleList(vfe_layers)
self.num_vfe = len(vfe_layers)
self.fusion_layer = None
if fusion_layer is not None:
self.fusion_layer = builder.build_fusion_layer(fusion_layer)
@force_fp32(out_fp16=True)
def forward(self,
features,
num_points,
coors,
img_feats=None,
img_metas=None):
"""Forward functions.
Args:
features (torch.Tensor): Features of voxels, shape is MxNxC.
num_points (torch.Tensor): Number of points in each voxel.
coors (torch.Tensor): Coordinates of voxels, shape is Mx(1+NDim).
img_feats (list[torch.Tensor], optional): Image fetures used for
multi-modality fusion. Defaults to None.
img_metas (dict, optional): [description]. Defaults to None.
Returns:
tuple: If `return_point_feats` is False, returns voxel features and
its coordinates. If `return_point_feats` is True, returns
feature of each points inside voxels.
"""
features_ls = [features]
# Find distance of x, y, and z from cluster center
if self._with_cluster_center:
points_mean = (
features[:, :, :3].sum(dim=1, keepdim=True) /
num_points.type_as(features).view(-1, 1, 1))
# TODO: maybe also do cluster for reflectivity
f_cluster = features[:, :, :3] - points_mean
features_ls.append(f_cluster)
# Find distance of x, y, and z from pillar center
if self._with_voxel_center:
f_center = features.new_zeros(
size=(features.size(0), features.size(1), 3))
f_center[:, :, 0] = features[:, :, 0] - (
coors[:, 3].type_as(features).unsqueeze(1) * self.vx +
self.x_offset)
f_center[:, :, 1] = features[:, :, 1] - (
coors[:, 2].type_as(features).unsqueeze(1) * self.vy +
self.y_offset)
f_center[:, :, 2] = features[:, :, 2] - (
coors[:, 1].type_as(features).unsqueeze(1) * self.vz +
self.z_offset)
features_ls.append(f_center)
if self._with_distance:
points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True)
features_ls.append(points_dist)
# Combine together feature decorations
voxel_feats = torch.cat(features_ls, dim=-1)
# The feature decorations were calculated without regard to whether
# pillar was empty.
# Need to ensure that empty voxels remain set to zeros.
voxel_count = voxel_feats.shape[1]
mask = get_paddings_indicator(num_points, voxel_count, axis=0)
voxel_feats *= mask.unsqueeze(-1).type_as(voxel_feats)
for i, vfe in enumerate(self.vfe_layers):
voxel_feats = vfe(voxel_feats)
if (self.fusion_layer is not None and img_feats is not None):
voxel_feats = self.fusion_with_mask(features, mask, voxel_feats,
coors, img_feats, img_metas)
return voxel_feats
def fusion_with_mask(self, features, mask, voxel_feats, coors, img_feats,
img_metas):
"""Fuse image and point features with mask.
Args:
features (torch.Tensor): Features of voxel, usually it is the
values of points in voxels.
mask (torch.Tensor): Mask indicates valid features in each voxel.
voxel_feats (torch.Tensor): Features of voxels.
coors (torch.Tensor): Coordinates of each single voxel.
img_feats (list[torch.Tensor]): Multi-scale feature maps of image.
img_metas (list(dict)): Meta information of image and points.
Returns:
torch.Tensor: Fused features of each voxel.
"""
# the features is consist of a batch of points
batch_size = coors[-1, 0] + 1
points = []
for i in range(batch_size):
single_mask = (coors[:, 0] == i)
points.append(features[single_mask][mask[single_mask]])
point_feats = voxel_feats[mask]
point_feats = self.fusion_layer(img_feats, points, point_feats,
img_metas)
voxel_canvas = voxel_feats.new_zeros(
size=(voxel_feats.size(0), voxel_feats.size(1),
point_feats.size(-1)))
voxel_canvas[mask] = point_feats
out = torch.max(voxel_canvas, dim=1)[0]
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.