text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
return a list of group names
<END_TASK>
<USER_TASK:>
Description:
def iddtxt2grouplist(txt):
"""return a list of group names
the list in the same order as the idf objects in idd file
""" |
def makenone(astr):
if astr == 'None':
return None
else:
return astr
txt = nocomment(txt, '!')
txt = txt.replace("\\group", "!-group") # retains group in next line
txt = nocomment(txt, '\\') # remove all other idd info
lines = txt.splitlines()
lines = [line.strip() for line in lines] # cleanup
lines = [line for line in lines if line != ''] # cleanup
txt = '\n'.join(lines)
gsplits = txt.split('!') # split into groups, since we have !-group
gsplits = [gsplit.splitlines() for gsplit in gsplits] # split group
gsplits[0].insert(0, u'-group None')
# Put None for the first group that does nothave a group name
glist = []
for gsplit in gsplits:
glist.append((gsplit[0], gsplit[1:]))
# makes dict {groupname:[k1, k2], groupname2:[k3, k4]}
glist = [(k, '\n'.join(v)) for k, v in glist]# joins lines back
glist = [(k, v.split(';')) for k, v in glist] # splits into idfobjects
glist = [(k, [i.strip() for i in v]) for k, v in glist] # cleanup
glist = [(k, [i.splitlines() for i in v]) for k, v in glist]
# splits idfobjects into lines
glist = [(k, [i for i in v if len(i) > 0]) for k, v in glist]
# cleanup - removes blank lines
glist = [(k, [i[0] for i in v]) for k, v in glist] # use first line
fglist = []
for gnamelist in glist:
gname = gnamelist[0]
thelist = gnamelist[-1]
for item in thelist:
fglist.append((gname, item))
glist = [(gname[len("-group "):], obj) for gname, obj in fglist] # remove "-group "
glist = [(makenone(gname), obj) for gname, obj in glist] # make str None into None
glist = [(gname, obj.split(',')[0]) for gname, obj in glist] # remove comma
return glist |
<SYSTEM_TASK:>
add group info to commlst
<END_TASK>
<USER_TASK:>
Description:
def group2commlst(commlst, glist):
"""add group info to commlst""" |
for (gname, objname), commitem in zip(glist, commlst):
newitem1 = "group %s" % (gname, )
newitem2 = "idfobj %s" % (objname, )
commitem[0].insert(0, newitem1)
commitem[0].insert(1, newitem2)
return commlst |
<SYSTEM_TASK:>
flatten and return a copy of the list
<END_TASK>
<USER_TASK:>
Description:
def flattencopy(lst):
"""flatten and return a copy of the list
indefficient on large lists""" |
# modified from
# http://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists-in-python
thelist = copy.deepcopy(lst)
list_is_nested = True
while list_is_nested: # outer loop
keepchecking = False
atemp = []
for element in thelist: # inner loop
if isinstance(element, list):
atemp.extend(element)
keepchecking = True
else:
atemp.append(element)
list_is_nested = keepchecking # determine if outer loop exits
thelist = atemp[:]
return thelist |
<SYSTEM_TASK:>
make a pipe component
<END_TASK>
<USER_TASK:>
Description:
def makepipecomponent(idf, pname):
"""make a pipe component
generate inlet outlet names""" |
apipe = idf.newidfobject("Pipe:Adiabatic".upper(), Name=pname)
apipe.Inlet_Node_Name = "%s_inlet" % (pname,)
apipe.Outlet_Node_Name = "%s_outlet" % (pname,)
return apipe |
<SYSTEM_TASK:>
make a duct component
<END_TASK>
<USER_TASK:>
Description:
def makeductcomponent(idf, dname):
"""make a duct component
generate inlet outlet names""" |
aduct = idf.newidfobject("duct".upper(), Name=dname)
aduct.Inlet_Node_Name = "%s_inlet" % (dname,)
aduct.Outlet_Node_Name = "%s_outlet" % (dname,)
return aduct |
<SYSTEM_TASK:>
make a branch with a pipe
<END_TASK>
<USER_TASK:>
Description:
def makepipebranch(idf, bname):
"""make a branch with a pipe
use standard inlet outlet names""" |
# make the pipe component first
pname = "%s_pipe" % (bname,)
apipe = makepipecomponent(idf, pname)
# now make the branch with the pipe in it
abranch = idf.newidfobject("BRANCH", Name=bname)
abranch.Component_1_Object_Type = 'Pipe:Adiabatic'
abranch.Component_1_Name = pname
abranch.Component_1_Inlet_Node_Name = apipe.Inlet_Node_Name
abranch.Component_1_Outlet_Node_Name = apipe.Outlet_Node_Name
abranch.Component_1_Branch_Control_Type = "Bypass"
return abranch |
<SYSTEM_TASK:>
make a branch with a duct
<END_TASK>
<USER_TASK:>
Description:
def makeductbranch(idf, bname):
"""make a branch with a duct
use standard inlet outlet names""" |
# make the duct component first
pname = "%s_duct" % (bname,)
aduct = makeductcomponent(idf, pname)
# now make the branch with the duct in it
abranch = idf.newidfobject("BRANCH", Name=bname)
abranch.Component_1_Object_Type = 'duct'
abranch.Component_1_Name = pname
abranch.Component_1_Inlet_Node_Name = aduct.Inlet_Node_Name
abranch.Component_1_Outlet_Node_Name = aduct.Outlet_Node_Name
abranch.Component_1_Branch_Control_Type = "Bypass"
return abranch |
<SYSTEM_TASK:>
get the components of the branch
<END_TASK>
<USER_TASK:>
Description:
def getbranchcomponents(idf, branch, utest=False):
"""get the components of the branch""" |
fobjtype = 'Component_%s_Object_Type'
fobjname = 'Component_%s_Name'
complist = []
for i in range(1, 100000):
try:
objtype = branch[fobjtype % (i,)]
if objtype.strip() == '':
break
objname = branch[fobjname % (i,)]
complist.append((objtype, objname))
except bunch_subclass.BadEPFieldError:
break
if utest:
return complist
else:
return [idf.getobject(ot, on) for ot, on in complist] |
<SYSTEM_TASK:>
rename all the changed nodes
<END_TASK>
<USER_TASK:>
Description:
def renamenodes(idf, fieldtype):
"""rename all the changed nodes""" |
renameds = []
for key in idf.model.dtls:
for idfobject in idf.idfobjects[key]:
for fieldvalue in idfobject.obj:
if type(fieldvalue) is list:
if fieldvalue not in renameds:
cpvalue = copy.copy(fieldvalue)
renameds.append(cpvalue)
# do the renaming
for key in idf.model.dtls:
for idfobject in idf.idfobjects[key]:
for i, fieldvalue in enumerate(idfobject.obj):
itsidd = idfobject.objidd[i]
if 'type' in itsidd:
if itsidd['type'][0] == fieldtype:
tempdct = dict(renameds)
if type(fieldvalue) is list:
fieldvalue = fieldvalue[-1]
idfobject.obj[i] = fieldvalue
else:
if fieldvalue in tempdct:
fieldvalue = tempdct[fieldvalue]
idfobject.obj[i] = fieldvalue |
<SYSTEM_TASK:>
get the filednames for the idfobject based on endswith
<END_TASK>
<USER_TASK:>
Description:
def getfieldnamesendswith(idfobject, endswith):
"""get the filednames for the idfobject based on endswith""" |
objls = idfobject.objls
tmp = [name for name in objls if name.endswith(endswith)]
if tmp == []:
pass
return [name for name in objls if name.endswith(endswith)] |
<SYSTEM_TASK:>
return the field name of the node
<END_TASK>
<USER_TASK:>
Description:
def getnodefieldname(idfobject, endswith, fluid=None, startswith=None):
"""return the field name of the node
fluid is only needed if there are air and water nodes
fluid is Air or Water or ''.
if the fluid is Steam, use Water""" |
if startswith is None:
startswith = ''
if fluid is None:
fluid = ''
nodenames = getfieldnamesendswith(idfobject, endswith)
nodenames = [name for name in nodenames if name.startswith(startswith)]
fnodenames = [nd for nd in nodenames if nd.find(fluid) != -1]
fnodenames = [name for name in fnodenames if name.startswith(startswith)]
if len(fnodenames) == 0:
nodename = nodenames[0]
else:
nodename = fnodenames[0]
return nodename |
<SYSTEM_TASK:>
rename nodes so that the components get connected
<END_TASK>
<USER_TASK:>
Description:
def connectcomponents(idf, components, fluid=None):
"""rename nodes so that the components get connected
fluid is only needed if there are air and water nodes
fluid is Air or Water or ''.
if the fluid is Steam, use Water""" |
if fluid is None:
fluid = ''
if len(components) == 1:
thiscomp, thiscompnode = components[0]
initinletoutlet(idf, thiscomp, thiscompnode, force=False)
outletnodename = getnodefieldname(thiscomp, "Outlet_Node_Name",
fluid=fluid, startswith=thiscompnode)
thiscomp[outletnodename] = [thiscomp[outletnodename],
thiscomp[outletnodename]]
# inletnodename = getnodefieldname(nextcomp, "Inlet_Node_Name", fluid)
# nextcomp[inletnodename] = [nextcomp[inletnodename], betweennodename]
return components
for i in range(len(components) - 1):
thiscomp, thiscompnode = components[i]
nextcomp, nextcompnode = components[i + 1]
initinletoutlet(idf, thiscomp, thiscompnode, force=False)
initinletoutlet(idf, nextcomp, nextcompnode, force=False)
betweennodename = "%s_%s_node" % (thiscomp.Name, nextcomp.Name)
outletnodename = getnodefieldname(thiscomp, "Outlet_Node_Name",
fluid=fluid, startswith=thiscompnode)
thiscomp[outletnodename] = [thiscomp[outletnodename], betweennodename]
inletnodename = getnodefieldname(nextcomp, "Inlet_Node_Name", fluid)
nextcomp[inletnodename] = [nextcomp[inletnodename], betweennodename]
return components |
<SYSTEM_TASK:>
initialze values for all the inlet outlet nodes for the object.
<END_TASK>
<USER_TASK:>
Description:
def initinletoutlet(idf, idfobject, thisnode, force=False):
"""initialze values for all the inlet outlet nodes for the object.
if force == False, it willl init only if field = '' """ |
def blankfield(fieldvalue):
"""test for blank field"""
try:
if fieldvalue.strip() == '':
return True
else:
return False
except AttributeError: # field may be a list
return False
def trimfields(fields, thisnode):
if len(fields) > 1:
if thisnode is not None:
fields = [field for field in fields
if field.startswith(thisnode)]
return fields
else:
print("Where should this loop connect ?")
print("%s - %s" % (idfobject.key, idfobject.Name))
print([field.split("Inlet_Node_Name")[0]
for field in inletfields])
raise WhichLoopError
else:
return fields
inletfields = getfieldnamesendswith(idfobject, "Inlet_Node_Name")
inletfields = trimfields(inletfields, thisnode) # or warn with exception
for inletfield in inletfields:
if blankfield(idfobject[inletfield]) == True or force == True:
idfobject[inletfield] = "%s_%s" % (idfobject.Name, inletfield)
outletfields = getfieldnamesendswith(idfobject, "Outlet_Node_Name")
outletfields = trimfields(outletfields, thisnode) # or warn with exception
for outletfield in outletfields:
if blankfield(idfobject[outletfield]) == True or force == True:
idfobject[outletfield] = "%s_%s" % (idfobject.Name, outletfield)
return idfobject |
<SYSTEM_TASK:>
insert a list of components into a branch
<END_TASK>
<USER_TASK:>
Description:
def componentsintobranch(idf, branch, listofcomponents, fluid=None):
"""insert a list of components into a branch
fluid is only needed if there are air and water nodes in same object
fluid is Air or Water or ''.
if the fluid is Steam, use Water""" |
if fluid is None:
fluid = ''
componentlist = [item[0] for item in listofcomponents]
# assumes that the nodes of the component connect to each other
# empty branch if it has existing components
thebranchname = branch.Name
thebranch = idf.removeextensibles('BRANCH', thebranchname) # empty the branch
# fill in the new components with the node names into this branch
# find the first extensible field and fill in the data in obj.
e_index = idf.getextensibleindex('BRANCH', thebranchname)
theobj = thebranch.obj
modeleditor.extendlist(theobj, e_index) # just being careful here
for comp, compnode in listofcomponents:
theobj.append(comp.key)
theobj.append(comp.Name)
inletnodename = getnodefieldname(comp, "Inlet_Node_Name", fluid=fluid,
startswith=compnode)
theobj.append(comp[inletnodename])
outletnodename = getnodefieldname(comp, "Outlet_Node_Name",
fluid=fluid, startswith=compnode)
theobj.append(comp[outletnodename])
theobj.append('')
return thebranch |
<SYSTEM_TASK:>
force it to be a list of tuples
<END_TASK>
<USER_TASK:>
Description:
def _clean_listofcomponents(listofcomponents):
"""force it to be a list of tuples""" |
def totuple(item):
"""return a tuple"""
if isinstance(item, (tuple, list)):
return item
else:
return (item, None)
return [totuple(item) for item in listofcomponents] |
<SYSTEM_TASK:>
force 3 items in the tuple
<END_TASK>
<USER_TASK:>
Description:
def _clean_listofcomponents_tuples(listofcomponents_tuples):
"""force 3 items in the tuple""" |
def to3tuple(item):
"""return a 3 item tuple"""
if len(item) == 3:
return item
else:
return (item[0], item[1], None)
return [to3tuple(item) for item in listofcomponents_tuples] |
<SYSTEM_TASK:>
get idfobject or make it if it does not exist
<END_TASK>
<USER_TASK:>
Description:
def getmakeidfobject(idf, key, name):
"""get idfobject or make it if it does not exist""" |
idfobject = idf.getobject(key, name)
if not idfobject:
return idf.newidfobject(key, Name=name)
else:
return idfobject |
<SYSTEM_TASK:>
Area of a polygon poly
<END_TASK>
<USER_TASK:>
Description:
def area(poly):
"""Area of a polygon poly""" |
if len(poly) < 3: # not a plane - no area
return 0
total = [0, 0, 0]
num = len(poly)
for i in range(num):
vi1 = poly[i]
vi2 = poly[(i+1) % num]
prod = np.cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
if total == [0, 0, 0]: # points are in a straight line - no area
return 0
result = np.dot(total, unit_normal(poly[0], poly[1], poly[2]))
return abs(result/2) |
<SYSTEM_TASK:>
unit normal vector of plane defined by points pt_a, pt_b, and pt_c
<END_TASK>
<USER_TASK:>
Description:
def unit_normal(pt_a, pt_b, pt_c):
"""unit normal vector of plane defined by points pt_a, pt_b, and pt_c""" |
x_val = np.linalg.det([[1, pt_a[1], pt_a[2]], [1, pt_b[1], pt_b[2]], [1, pt_c[1], pt_c[2]]])
y_val = np.linalg.det([[pt_a[0], 1, pt_a[2]], [pt_b[0], 1, pt_b[2]], [pt_c[0], 1, pt_c[2]]])
z_val = np.linalg.det([[pt_a[0], pt_a[1], 1], [pt_b[0], pt_b[1], 1], [pt_c[0], pt_c[1], 1]])
magnitude = (x_val**2 + y_val**2 + z_val**2)**.5
mag = (x_val/magnitude, y_val/magnitude, z_val/magnitude)
if magnitude < 0.00000001:
mag = (0, 0, 0)
return mag |
<SYSTEM_TASK:>
Width of a polygon poly
<END_TASK>
<USER_TASK:>
Description:
def width(poly):
"""Width of a polygon poly""" |
num = len(poly) - 1
if abs(poly[num][2] - poly[0][2]) < abs(poly[1][2] - poly[0][2]):
return dist(poly[num], poly[0])
elif abs(poly[num][2] - poly[0][2]) > abs(poly[1][2] - poly[0][2]):
return dist(poly[1], poly[0])
else: return max(dist(poly[num], poly[0]), dist(poly[1], poly[0])) |
<SYSTEM_TASK:>
Height of a polygon poly
<END_TASK>
<USER_TASK:>
Description:
def height(poly):
"""Height of a polygon poly""" |
num = len(poly) - 1
if abs(poly[num][2] - poly[0][2]) > abs(poly[1][2] - poly[0][2]):
return dist(poly[num], poly[0])
elif abs(poly[num][2] - poly[0][2]) < abs(poly[1][2] - poly[0][2]):
return dist(poly[1], poly[0])
else:
return min(dist(poly[num], poly[0]), dist(poly[1], poly[0])) |
<SYSTEM_TASK:>
angle between two vectors
<END_TASK>
<USER_TASK:>
Description:
def angle2vecs(vec1, vec2):
"""angle between two vectors""" |
# vector a * vector b = |a|*|b|* cos(angle between vector a and vector b)
dot = np.dot(vec1, vec2)
vec1_modulus = np.sqrt(np.multiply(vec1, vec1).sum())
vec2_modulus = np.sqrt(np.multiply(vec2, vec2).sum())
if (vec1_modulus * vec2_modulus) == 0:
cos_angle = 1
else: cos_angle = dot / (vec1_modulus * vec2_modulus)
return math.degrees(acos(cos_angle)) |
<SYSTEM_TASK:>
Tilt of a polygon poly
<END_TASK>
<USER_TASK:>
Description:
def tilt(poly):
"""Tilt of a polygon poly""" |
num = len(poly) - 1
vec = unit_normal(poly[0], poly[1], poly[num])
vec_alt = np.array([vec[0], vec[1], vec[2]])
vec_z = np.array([0, 0, 1])
# return (90 - angle2vecs(vec_alt, vec_z)) # update by Santosh
return angle2vecs(vec_alt, vec_z) |
<SYSTEM_TASK:>
get all the fields that have the key 'field'
<END_TASK>
<USER_TASK:>
Description:
def getfields(comm):
"""get all the fields that have the key 'field' """ |
fields = []
for field in comm:
if 'field' in field:
fields.append(field)
return fields |
<SYSTEM_TASK:>
get the names of the repeating fields
<END_TASK>
<USER_TASK:>
Description:
def repeatingfieldsnames(fields):
"""get the names of the repeating fields""" |
fnames = [field['field'][0] for field in fields]
fnames = [bunchhelpers.onlylegalchar(fname) for fname in fnames]
fnames = [fname for fname in fnames if bunchhelpers.intinlist(fname.split())]
fnames = [(bunchhelpers.replaceint(fname), None) for fname in fnames]
dct = dict(fnames)
repnames = fnames[:len(list(dct.keys()))]
return repnames |
<SYSTEM_TASK:>
put missing keys in commdct for standard objects
<END_TASK>
<USER_TASK:>
Description:
def missingkeys_standard(commdct, dtls, skiplist=None):
"""put missing keys in commdct for standard objects
return a list of keys where it is unable to do so
commdct is not returned, but is updated""" |
if skiplist == None:
skiplist = []
# find objects where all the fields are not named
gkeys = [dtls[i] for i in range(len(dtls)) if commdct[i].count({}) > 2]
nofirstfields = []
# operatie on those fields
for key_txt in gkeys:
if key_txt in skiplist:
continue
# print key_txt
# for a function, pass comm as a variable
key_i = dtls.index(key_txt.upper())
comm = commdct[key_i]
# get all fields
fields = getfields(comm)
# get repeating field names
repnames = repeatingfieldsnames(fields)
try:
first = repnames[0][0] % (1, )
except IndexError:
nofirstfields.append(key_txt)
continue
# print first
# get all comments of the first repeating field names
firstnames = [repname[0] % (1, ) for repname in repnames]
fcomments = [field for field in fields
if bunchhelpers.onlylegalchar(field['field'][0])
in firstnames]
fcomments = [dict(fcomment) for fcomment in fcomments]
for cmt in fcomments:
fld = cmt['field'][0]
fld = bunchhelpers.onlylegalchar(fld)
fld = bunchhelpers.replaceint(fld)
cmt['field'] = [fld]
for i, cmt in enumerate(comm[1:]):
thefield = cmt['field'][0]
thefield = bunchhelpers.onlylegalchar(thefield)
if thefield == first:
break
first_i = i + 1
newfields = []
for i in range(1, len(comm[first_i:]) // len(repnames) + 1):
for fcomment in fcomments:
nfcomment = dict(fcomment)
fld = nfcomment['field'][0]
fld = fld % (i, )
nfcomment['field'] = [fld]
newfields.append(nfcomment)
for i, cmt in enumerate(comm):
if i < first_i:
continue
else:
afield = newfields.pop(0)
comm[i] = afield
commdct[key_i] = comm
return nofirstfields |
<SYSTEM_TASK:>
This is an object list where thre is no first field name
<END_TASK>
<USER_TASK:>
Description:
def missingkeys_nonstandard(block, commdct, dtls, objectlist, afield='afiled %s'):
"""This is an object list where thre is no first field name
to give a hint of what the first field name should be""" |
afield = 'afield %s'
for key_txt in objectlist:
key_i = dtls.index(key_txt.upper())
comm = commdct[key_i]
if block:
blk = block[key_i]
for i, cmt in enumerate(comm):
if cmt == {}:
first_i = i
break
for i, cmt in enumerate(comm):
if i >= first_i:
if block:
comm[i]['field'] = ['%s' % (blk[i])]
else:
comm[i]['field'] = [afield % (i - first_i + 1,),] |
<SYSTEM_TASK:>
Return a EventLoop instance.
<END_TASK>
<USER_TASK:>
Description:
def get_event_loop():
"""Return a EventLoop instance.
A new instance is created for each new HTTP request. We determine
that we're in a new request by inspecting os.environ, which is reset
at the start of each request. Also, each thread gets its own loop.
""" |
ev = _state.event_loop
if not os.getenv(_EVENT_LOOP_KEY) and ev is not None:
ev.clear()
_state.event_loop = None
ev = None
if ev is None:
ev = EventLoop()
_state.event_loop = ev
os.environ[_EVENT_LOOP_KEY] = '1'
return ev |
<SYSTEM_TASK:>
Remove all pending events without running any.
<END_TASK>
<USER_TASK:>
Description:
def clear(self):
"""Remove all pending events without running any.""" |
while self.current or self.idlers or self.queue or self.rpcs:
current = self.current
idlers = self.idlers
queue = self.queue
rpcs = self.rpcs
_logging_debug('Clearing stale EventLoop instance...')
if current:
_logging_debug(' current = %s', current)
if idlers:
_logging_debug(' idlers = %s', idlers)
if queue:
_logging_debug(' queue = %s', queue)
if rpcs:
_logging_debug(' rpcs = %s', rpcs)
self.__init__()
current.clear()
idlers.clear()
queue[:] = []
rpcs.clear()
_logging_debug('Cleared') |
<SYSTEM_TASK:>
Insert event in queue, and keep it sorted assuming queue is sorted.
<END_TASK>
<USER_TASK:>
Description:
def insort_event_right(self, event, lo=0, hi=None):
"""Insert event in queue, and keep it sorted assuming queue is sorted.
If event is already in queue, insert it to the right of the rightmost
event (to keep FIFO order).
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
Args:
event: a (time in sec since unix epoch, callback, args, kwds) tuple.
""" |
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(self.queue)
while lo < hi:
mid = (lo + hi) // 2
if event[0] < self.queue[mid][0]:
hi = mid
else:
lo = mid + 1
self.queue.insert(lo, event) |
<SYSTEM_TASK:>
Schedule a function call at a specific time in the future.
<END_TASK>
<USER_TASK:>
Description:
def queue_call(self, delay, callback, *args, **kwds):
"""Schedule a function call at a specific time in the future.""" |
if delay is None:
self.current.append((callback, args, kwds))
return
if delay < 1e9:
when = delay + self.clock.now()
else:
# Times over a billion seconds are assumed to be absolute.
when = delay
self.insort_event_right((when, callback, args, kwds)) |
<SYSTEM_TASK:>
Schedule an RPC with an optional callback.
<END_TASK>
<USER_TASK:>
Description:
def queue_rpc(self, rpc, callback=None, *args, **kwds):
"""Schedule an RPC with an optional callback.
The caller must have previously sent the call to the service.
The optional callback is called with the remaining arguments.
NOTE: If the rpc is a MultiRpc, the callback will be called once
for each sub-RPC. TODO: Is this a good idea?
""" |
if rpc is None:
return
if rpc.state not in (_RUNNING, _FINISHING):
raise RuntimeError('rpc must be sent to service before queueing')
if isinstance(rpc, datastore_rpc.MultiRpc):
rpcs = rpc.rpcs
if len(rpcs) > 1:
# Don't call the callback until all sub-rpcs have completed.
rpc.__done = False
def help_multi_rpc_along(r=rpc, c=callback, a=args, k=kwds):
if r.state == _FINISHING and not r.__done:
r.__done = True
c(*a, **k)
# TODO: And again, what about exceptions?
callback = help_multi_rpc_along
args = ()
kwds = {}
else:
rpcs = [rpc]
for rpc in rpcs:
self.rpcs[rpc] = (callback, args, kwds) |
<SYSTEM_TASK:>
Add an idle callback.
<END_TASK>
<USER_TASK:>
Description:
def add_idle(self, callback, *args, **kwds):
"""Add an idle callback.
An idle callback can return True, False or None. These mean:
- None: remove the callback (don't reschedule)
- False: the callback did no work; reschedule later
- True: the callback did some work; reschedule soon
If the callback raises an exception, the traceback is logged and
the callback is removed.
""" |
self.idlers.append((callback, args, kwds)) |
<SYSTEM_TASK:>
Run one of the idle callbacks.
<END_TASK>
<USER_TASK:>
Description:
def run_idle(self):
"""Run one of the idle callbacks.
Returns:
True if one was called, False if no idle callback was called.
""" |
if not self.idlers or self.inactive >= len(self.idlers):
return False
idler = self.idlers.popleft()
callback, args, kwds = idler
_logging_debug('idler: %s', callback.__name__)
res = callback(*args, **kwds)
# See add_idle() for the meaning of the callback return value.
if res is not None:
if res:
self.inactive = 0
else:
self.inactive += 1
self.idlers.append(idler)
else:
_logging_debug('idler %s removed', callback.__name__)
return True |
<SYSTEM_TASK:>
Helper for GQL parsing to extract values from GQL expressions.
<END_TASK>
<USER_TASK:>
Description:
def _args_to_val(func, args):
"""Helper for GQL parsing to extract values from GQL expressions.
This can extract the value from a GQL literal, return a Parameter
for a GQL bound parameter (:1 or :foo), and interprets casts like
KEY(...) and plain lists of values like (1, 2, 3).
Args:
func: A string indicating what kind of thing this is.
args: One or more GQL values, each integer, string, or GQL literal.
""" |
from .google_imports import gql # Late import, to avoid name conflict.
vals = []
for arg in args:
if isinstance(arg, (int, long, basestring)):
val = Parameter(arg)
elif isinstance(arg, gql.Literal):
val = arg.Get()
else:
raise TypeError('Unexpected arg (%r)' % arg)
vals.append(val)
if func == 'nop':
if len(vals) != 1:
raise TypeError('"nop" requires exactly one value')
return vals[0] # May be a Parameter
pfunc = ParameterizedFunction(func, vals)
if pfunc.is_parameterized():
return pfunc
else:
return pfunc.resolve({}, {}) |
<SYSTEM_TASK:>
Helper for FQL parsing to turn a property name into a property object.
<END_TASK>
<USER_TASK:>
Description:
def _get_prop_from_modelclass(modelclass, name):
"""Helper for FQL parsing to turn a property name into a property object.
Args:
modelclass: The model class specified in the query.
name: The property name. This may contain dots which indicate
sub-properties of structured properties.
Returns:
A Property object.
Raises:
KeyError if the property doesn't exist and the model clas doesn't
derive from Expando.
""" |
if name == '__key__':
return modelclass._key
parts = name.split('.')
part, more = parts[0], parts[1:]
prop = modelclass._properties.get(part)
if prop is None:
if issubclass(modelclass, model.Expando):
prop = model.GenericProperty(part)
else:
raise TypeError('Model %s has no property named %r' %
(modelclass._get_kind(), part))
while more:
part = more.pop(0)
if not isinstance(prop, model.StructuredProperty):
raise TypeError('Model %s has no property named %r' %
(modelclass._get_kind(), part))
maybe = getattr(prop, part, None)
if isinstance(maybe, model.Property) and maybe._name == part:
prop = maybe
else:
maybe = prop._modelclass._properties.get(part)
if maybe is not None:
# Must get it this way to get the copy with the long name.
# (See StructuredProperty.__getattr__() for details.)
prop = getattr(prop, maybe._code_name)
else:
if issubclass(prop._modelclass, model.Expando) and not more:
prop = model.GenericProperty()
prop._name = name # Bypass the restriction on dots.
else:
raise KeyError('Model %s has no property named %r' %
(prop._modelclass._get_kind(), part))
return prop |
<SYSTEM_TASK:>
Apply the filter to values extracted from an entity.
<END_TASK>
<USER_TASK:>
Description:
def _apply(self, key_value_map):
"""Apply the filter to values extracted from an entity.
Think of self.match_keys and self.match_values as representing a
table with one row. For example:
match_keys = ('name', 'age', 'rank')
match_values = ('Joe', 24, 5)
(Except that in reality, the values are represented by tuples
produced by datastore_types.PropertyValueToKeyValue().)
represents this table:
| name | age | rank |
+---------+-------+--------+
| 'Joe' | 24 | 5 |
Think of key_value_map as a table with the same structure but
(potentially) many rows. This represents a repeated structured
property of a single entity. For example:
{'name': ['Joe', 'Jane', 'Dick'],
'age': [24, 21, 23],
'rank': [5, 1, 2]}
represents this table:
| name | age | rank |
+---------+-------+--------+
| 'Joe' | 24 | 5 |
| 'Jane' | 21 | 1 |
| 'Dick' | 23 | 2 |
We must determine wheter at least one row of the second table
exactly matches the first table. We need this class because the
datastore, when asked to find an entity with name 'Joe', age 24
and rank 5, will include entities that have 'Joe' somewhere in the
name column, 24 somewhere in the age column, and 5 somewhere in
the rank column, but not all aligned on a single row. Such an
entity should not be considered a match.
""" |
columns = []
for key in self.match_keys:
column = key_value_map.get(key)
if not column: # None, or an empty list.
return False # If any column is empty there can be no match.
columns.append(column)
# Use izip to transpose the columns into rows.
return self.match_values in itertools.izip(*columns) |
<SYSTEM_TASK:>
Internal helper to fix the namespace.
<END_TASK>
<USER_TASK:>
Description:
def _fix_namespace(self):
"""Internal helper to fix the namespace.
This is called to ensure that for queries without an explicit
namespace, the namespace used by async calls is the one in effect
at the time the async call is made, not the one in effect when the
the request is actually generated.
""" |
if self.namespace is not None:
return self
namespace = namespace_manager.get_namespace()
return self.__class__(kind=self.kind, ancestor=self.ancestor,
filters=self.filters, orders=self.orders,
app=self.app, namespace=namespace,
default_options=self.default_options,
projection=self.projection, group_by=self.group_by) |
<SYSTEM_TASK:>
True if results are guaranteed to contain a unique set of property
<END_TASK>
<USER_TASK:>
Description:
def is_distinct(self):
"""True if results are guaranteed to contain a unique set of property
values.
This happens when every property in the group_by is also in the projection.
""" |
return bool(self.__group_by and
set(self._to_property_names(self.__group_by)) <=
set(self._to_property_names(self.__projection))) |
<SYSTEM_TASK:>
Fetch a list of query results, up to a limit.
<END_TASK>
<USER_TASK:>
Description:
def fetch_async(self, limit=None, **q_options):
"""Fetch a list of query results, up to a limit.
This is the asynchronous version of Query.fetch().
""" |
if limit is None:
default_options = self._make_options(q_options)
if default_options is not None and default_options.limit is not None:
limit = default_options.limit
else:
limit = _MAX_LIMIT
q_options['limit'] = limit
q_options.setdefault('batch_size', limit)
if self._needs_multi_query():
return self.map_async(None, **q_options)
# Optimization using direct batches.
options = self._make_options(q_options)
qry = self._fix_namespace()
return qry._run_to_list([], options=options) |
<SYSTEM_TASK:>
Count the number of query results, up to a limit.
<END_TASK>
<USER_TASK:>
Description:
def count_async(self, limit=None, **q_options):
"""Count the number of query results, up to a limit.
This is the asynchronous version of Query.count().
""" |
qry = self._fix_namespace()
return qry._count_async(limit=limit, **q_options) |
<SYSTEM_TASK:>
Fetch a page of results.
<END_TASK>
<USER_TASK:>
Description:
def fetch_page_async(self, page_size, **q_options):
"""Fetch a page of results.
This is the asynchronous version of Query.fetch_page().
""" |
qry = self._fix_namespace()
return qry._fetch_page_async(page_size, **q_options) |
<SYSTEM_TASK:>
Helper to construct a QueryOptions object from keyword arguments.
<END_TASK>
<USER_TASK:>
Description:
def _make_options(self, q_options):
"""Helper to construct a QueryOptions object from keyword arguments.
Args:
q_options: a dict of keyword arguments.
Note that either 'options' or 'config' can be used to pass another
QueryOptions object, but not both. If another QueryOptions object is
given it provides default values.
If self.default_options is set, it is used to provide defaults,
which have a lower precedence than options set in q_options.
Returns:
A QueryOptions object, or None if q_options is empty.
""" |
if not (q_options or self.__projection):
return self.default_options
if 'options' in q_options:
# Move 'options' to 'config' since that is what QueryOptions() uses.
if 'config' in q_options:
raise TypeError('You cannot use config= and options= at the same time')
q_options['config'] = q_options.pop('options')
if q_options.get('projection'):
try:
q_options['projection'] = self._to_property_names(
q_options['projection'])
except TypeError, e:
raise datastore_errors.BadArgumentError(e)
self._check_properties(q_options['projection'])
options = QueryOptions(**q_options)
# Populate projection if it hasn't been overridden.
if (options.keys_only is None and
options.projection is None and
self.__projection):
options = QueryOptions(
projection=self._to_property_names(self.__projection), config=options)
# Populate default options
if self.default_options is not None:
options = self.default_options.merge(options)
return options |
<SYSTEM_TASK:>
Return a list giving the parameters required by a query.
<END_TASK>
<USER_TASK:>
Description:
def analyze(self):
"""Return a list giving the parameters required by a query.""" |
class MockBindings(dict):
def __contains__(self, key):
self[key] = None
return True
bindings = MockBindings()
used = {}
ancestor = self.ancestor
if isinstance(ancestor, ParameterizedThing):
ancestor = ancestor.resolve(bindings, used)
filters = self.filters
if filters is not None:
filters = filters.resolve(bindings, used)
return sorted(used) |
<SYSTEM_TASK:>
Return the cursor before the current item.
<END_TASK>
<USER_TASK:>
Description:
def cursor_before(self):
"""Return the cursor before the current item.
You must pass a QueryOptions object with produce_cursors=True
for this to work.
If there is no cursor or no current item, raise BadArgumentError.
Before next() has returned there is no cursor. Once the loop is
exhausted, this returns the cursor after the last item.
""" |
if self._exhausted:
return self.cursor_after()
if isinstance(self._cursor_before, BaseException):
raise self._cursor_before
return self._cursor_before |
<SYSTEM_TASK:>
Return the cursor after the current item.
<END_TASK>
<USER_TASK:>
Description:
def cursor_after(self):
"""Return the cursor after the current item.
You must pass a QueryOptions object with produce_cursors=True
for this to work.
If there is no cursor or no current item, raise BadArgumentError.
Before next() has returned there is no cursor. Once the loop is
exhausted, this returns the cursor after the last item.
""" |
if isinstance(self._cursor_after, BaseException):
raise self._cursor_after
return self._cursor_after |
<SYSTEM_TASK:>
Return a Future whose result will say whether a next item is available.
<END_TASK>
<USER_TASK:>
Description:
def has_next_async(self):
"""Return a Future whose result will say whether a next item is available.
See the module docstring for the usage pattern.
""" |
if self._fut is None:
self._fut = self._iter.getq()
flag = True
try:
yield self._fut
except EOFError:
flag = False
raise tasklets.Return(flag) |
<SYSTEM_TASK:>
A decorator to declare that only the first N arguments may be positional.
<END_TASK>
<USER_TASK:>
Description:
def positional(max_pos_args):
"""A decorator to declare that only the first N arguments may be positional.
Note that for methods, n includes 'self'.
""" |
__ndb_debug__ = 'SKIP'
def positional_decorator(wrapped):
if not DEBUG:
return wrapped
__ndb_debug__ = 'SKIP'
@wrapping(wrapped)
def positional_wrapper(*args, **kwds):
__ndb_debug__ = 'SKIP'
if len(args) > max_pos_args:
plural_s = ''
if max_pos_args != 1:
plural_s = 's'
raise TypeError(
'%s() takes at most %d positional argument%s (%d given)' %
(wrapped.__name__, max_pos_args, plural_s, len(args)))
return wrapped(*args, **kwds)
return positional_wrapper
return positional_decorator |
<SYSTEM_TASK:>
Converts a function into a decorator that optionally accepts keyword
<END_TASK>
<USER_TASK:>
Description:
def decorator(wrapped_decorator):
"""Converts a function into a decorator that optionally accepts keyword
arguments in its declaration.
Example usage:
@utils.decorator
def decorator(func, args, kwds, op1=None):
... apply op1 ...
return func(*args, **kwds)
# Form (1), vanilla
@decorator
foo(...)
...
# Form (2), with options
@decorator(op1=5)
foo(...)
...
Args:
wrapped_decorator: A function that accepts positional args (func, args,
kwds) and any additional supported keyword arguments.
Returns:
A decorator with an additional 'wrapped_decorator' property that is set to
the original function.
""" |
def helper(_func=None, **options):
def outer_wrapper(func):
@wrapping(func)
def inner_wrapper(*args, **kwds):
return wrapped_decorator(func, args, kwds, **options)
return inner_wrapper
if _func is None:
# Form (2), with options.
return outer_wrapper
# Form (1), vanilla.
if options:
# Don't allow @decorator(foo, op1=5).
raise TypeError('positional arguments not supported')
return outer_wrapper(_func)
helper.wrapped_decorator = wrapped_decorator
return helper |
<SYSTEM_TASK:>
A recursive Fibonacci to exercise task switching.
<END_TASK>
<USER_TASK:>
Description:
def fibonacci(n):
"""A recursive Fibonacci to exercise task switching.""" |
if n <= 1:
raise ndb.Return(n)
a, b = yield fibonacci(n - 1), fibonacci(n - 2)
raise ndb.Return(a + b) |
<SYSTEM_TASK:>
Actually run the _todo_tasklet.
<END_TASK>
<USER_TASK:>
Description:
def run_queue(self, options, todo):
"""Actually run the _todo_tasklet.""" |
utils.logging_debug('AutoBatcher(%s): %d items',
self._todo_tasklet.__name__, len(todo))
batch_fut = self._todo_tasklet(todo, options)
self._running.append(batch_fut)
# Add a callback when we're done.
batch_fut.add_callback(self._finished_callback, batch_fut, todo) |
<SYSTEM_TASK:>
Adds an arg and gets back a future.
<END_TASK>
<USER_TASK:>
Description:
def add(self, arg, options=None):
"""Adds an arg and gets back a future.
Args:
arg: one argument for _todo_tasklet.
options: rpc options.
Return:
An instance of future, representing the result of running
_todo_tasklet without batching.
""" |
fut = tasklets.Future('%s.add(%s, %s)' % (self, arg, options))
todo = self._queues.get(options)
if todo is None:
utils.logging_debug('AutoBatcher(%s): creating new queue for %r',
self._todo_tasklet.__name__, options)
if not self._queues:
eventloop.add_idle(self._on_idle)
todo = self._queues[options] = []
todo.append((fut, arg))
if len(todo) >= self._limit:
del self._queues[options]
self.run_queue(options, todo)
return fut |
<SYSTEM_TASK:>
Passes exception along.
<END_TASK>
<USER_TASK:>
Description:
def _finished_callback(self, batch_fut, todo):
"""Passes exception along.
Args:
batch_fut: the batch future returned by running todo_tasklet.
todo: (fut, option) pair. fut is the future return by each add() call.
If the batch fut was successful, it has already called fut.set_result()
on other individual futs. This method only handles when the batch fut
encountered an exception.
""" |
self._running.remove(batch_fut)
err = batch_fut.get_exception()
if err is not None:
tb = batch_fut.get_traceback()
for (fut, _) in todo:
if not fut.done():
fut.set_exception(err, tb) |
<SYSTEM_TASK:>
Return all namespaces in the specified range.
<END_TASK>
<USER_TASK:>
Description:
def get_namespaces(start=None, end=None):
"""Return all namespaces in the specified range.
Args:
start: only return namespaces >= start if start is not None.
end: only return namespaces < end if end is not None.
Returns:
A list of namespace names between the (optional) start and end values.
""" |
q = Namespace.query()
if start is not None:
q = q.filter(Namespace.key >= Namespace.key_for_namespace(start))
if end is not None:
q = q.filter(Namespace.key < Namespace.key_for_namespace(end))
return [x.namespace_name for x in q] |
<SYSTEM_TASK:>
Return all kinds in the specified range, for the current namespace.
<END_TASK>
<USER_TASK:>
Description:
def get_kinds(start=None, end=None):
"""Return all kinds in the specified range, for the current namespace.
Args:
start: only return kinds >= start if start is not None.
end: only return kinds < end if end is not None.
Returns:
A list of kind names between the (optional) start and end values.
""" |
q = Kind.query()
if start is not None and start != '':
q = q.filter(Kind.key >= Kind.key_for_kind(start))
if end is not None:
if end == '':
return []
q = q.filter(Kind.key < Kind.key_for_kind(end))
return [x.kind_name for x in q] |
<SYSTEM_TASK:>
Return all properties of kind in the specified range.
<END_TASK>
<USER_TASK:>
Description:
def get_properties_of_kind(kind, start=None, end=None):
"""Return all properties of kind in the specified range.
NOTE: This function does not return unindexed properties.
Args:
kind: name of kind whose properties you want.
start: only return properties >= start if start is not None.
end: only return properties < end if end is not None.
Returns:
A list of property names of kind between the (optional) start and end
values.
""" |
q = Property.query(ancestor=Property.key_for_kind(kind))
if start is not None and start != '':
q = q.filter(Property.key >= Property.key_for_property(kind, start))
if end is not None:
if end == '':
return []
q = q.filter(Property.key < Property.key_for_property(kind, end))
return [Property.key_to_property(k) for k in q.iter(keys_only=True)] |
<SYSTEM_TASK:>
Return all representations of properties of kind in the specified range.
<END_TASK>
<USER_TASK:>
Description:
def get_representations_of_kind(kind, start=None, end=None):
"""Return all representations of properties of kind in the specified range.
NOTE: This function does not return unindexed properties.
Args:
kind: name of kind whose properties you want.
start: only return properties >= start if start is not None.
end: only return properties < end if end is not None.
Returns:
A dictionary mapping property names to its list of representations.
""" |
q = Property.query(ancestor=Property.key_for_kind(kind))
if start is not None and start != '':
q = q.filter(Property.key >= Property.key_for_property(kind, start))
if end is not None:
if end == '':
return {}
q = q.filter(Property.key < Property.key_for_property(kind, end))
result = {}
for property in q:
result[property.property_name] = property.property_representation
return result |
<SYSTEM_TASK:>
Return the version of the entity group containing key.
<END_TASK>
<USER_TASK:>
Description:
def get_entity_group_version(key):
"""Return the version of the entity group containing key.
Args:
key: a key for an entity group whose __entity_group__ key you want.
Returns:
The version of the entity group containing key. This version is
guaranteed to increase on every change to the entity group. The version
may increase even in the absence of user-visible changes to the entity
group. May return None if the entity group was never written to.
On non-HR datatores, this function returns None.
""" |
eg = EntityGroup.key_for_entity_group(key).get()
if eg:
return eg.version
else:
return None |
<SYSTEM_TASK:>
Return the Key for a namespace.
<END_TASK>
<USER_TASK:>
Description:
def key_for_namespace(cls, namespace):
"""Return the Key for a namespace.
Args:
namespace: A string giving the namespace whose key is requested.
Returns:
The Key for the namespace.
""" |
if namespace:
return model.Key(cls.KIND_NAME, namespace)
else:
return model.Key(cls.KIND_NAME, cls.EMPTY_NAMESPACE_ID) |
<SYSTEM_TASK:>
Return the key for the entity group containing key.
<END_TASK>
<USER_TASK:>
Description:
def key_for_entity_group(cls, key):
"""Return the key for the entity group containing key.
Args:
key: a key for an entity group whose __entity_group__ key you want.
Returns:
The __entity_group__ key for the entity group containing key.
""" |
return model.Key(cls.KIND_NAME, cls.ID, parent=key.root()) |
<SYSTEM_TASK:>
Called by Django before deciding which view to execute.
<END_TASK>
<USER_TASK:>
Description:
def process_request(self, unused_request):
"""Called by Django before deciding which view to execute.""" |
# Compare to the first half of toplevel() in context.py.
tasklets._state.clear_all_pending()
# Create and install a new context.
ctx = tasklets.make_default_context()
tasklets.set_context(ctx) |
<SYSTEM_TASK:>
Helper to construct a ContextOptions object from keyword arguments.
<END_TASK>
<USER_TASK:>
Description:
def _make_ctx_options(ctx_options, config_cls=ContextOptions):
"""Helper to construct a ContextOptions object from keyword arguments.
Args:
ctx_options: A dict of keyword arguments.
config_cls: Optional Configuration class to use, default ContextOptions.
Note that either 'options' or 'config' can be used to pass another
Configuration object, but not both. If another Configuration
object is given it provides default values.
Returns:
A Configuration object, or None if ctx_options is empty.
""" |
if not ctx_options:
return None
for key in list(ctx_options):
translation = _OPTION_TRANSLATIONS.get(key)
if translation:
if translation in ctx_options:
raise ValueError('Cannot specify %s and %s at the same time' %
(key, translation))
ctx_options[translation] = ctx_options.pop(key)
return config_cls(**ctx_options) |
<SYSTEM_TASK:>
Set the context cache policy function.
<END_TASK>
<USER_TASK:>
Description:
def set_cache_policy(self, func):
"""Set the context cache policy function.
Args:
func: A function that accepts a Key instance as argument and returns
a bool indicating if it should be cached. May be None.
""" |
if func is None:
func = self.default_cache_policy
elif isinstance(func, bool):
func = lambda unused_key, flag=func: flag
self._cache_policy = func |
<SYSTEM_TASK:>
Return whether to use the context cache for this key.
<END_TASK>
<USER_TASK:>
Description:
def _use_cache(self, key, options=None):
"""Return whether to use the context cache for this key.
Args:
key: Key instance.
options: ContextOptions instance, or None.
Returns:
True if the key should be cached, False otherwise.
""" |
flag = ContextOptions.use_cache(options)
if flag is None:
flag = self._cache_policy(key)
if flag is None:
flag = ContextOptions.use_cache(self._conn.config)
if flag is None:
flag = True
return flag |
<SYSTEM_TASK:>
Set the memcache policy function.
<END_TASK>
<USER_TASK:>
Description:
def set_memcache_policy(self, func):
"""Set the memcache policy function.
Args:
func: A function that accepts a Key instance as argument and returns
a bool indicating if it should be cached. May be None.
""" |
if func is None:
func = self.default_memcache_policy
elif isinstance(func, bool):
func = lambda unused_key, flag=func: flag
self._memcache_policy = func |
<SYSTEM_TASK:>
Return whether to use memcache for this key.
<END_TASK>
<USER_TASK:>
Description:
def _use_memcache(self, key, options=None):
"""Return whether to use memcache for this key.
Args:
key: Key instance.
options: ContextOptions instance, or None.
Returns:
True if the key should be cached in memcache, False otherwise.
""" |
flag = ContextOptions.use_memcache(options)
if flag is None:
flag = self._memcache_policy(key)
if flag is None:
flag = ContextOptions.use_memcache(self._conn.config)
if flag is None:
flag = True
return flag |
<SYSTEM_TASK:>
Default datastore policy.
<END_TASK>
<USER_TASK:>
Description:
def default_datastore_policy(key):
"""Default datastore policy.
This defers to _use_datastore on the Model class.
Args:
key: Key instance.
Returns:
A bool or None.
""" |
flag = None
if key is not None:
modelclass = model.Model._kind_map.get(key.kind())
if modelclass is not None:
policy = getattr(modelclass, '_use_datastore', None)
if policy is not None:
if isinstance(policy, bool):
flag = policy
else:
flag = policy(key)
return flag |
<SYSTEM_TASK:>
Set the context datastore policy function.
<END_TASK>
<USER_TASK:>
Description:
def set_datastore_policy(self, func):
"""Set the context datastore policy function.
Args:
func: A function that accepts a Key instance as argument and returns
a bool indicating if it should use the datastore. May be None.
""" |
if func is None:
func = self.default_datastore_policy
elif isinstance(func, bool):
func = lambda unused_key, flag=func: flag
self._datastore_policy = func |
<SYSTEM_TASK:>
Return whether to use the datastore for this key.
<END_TASK>
<USER_TASK:>
Description:
def _use_datastore(self, key, options=None):
"""Return whether to use the datastore for this key.
Args:
key: Key instance.
options: ContextOptions instance, or None.
Returns:
True if the datastore should be used, False otherwise.
""" |
flag = ContextOptions.use_datastore(options)
if flag is None:
flag = self._datastore_policy(key)
if flag is None:
flag = ContextOptions.use_datastore(self._conn.config)
if flag is None:
flag = True
return flag |
<SYSTEM_TASK:>
Default memcache timeout policy.
<END_TASK>
<USER_TASK:>
Description:
def default_memcache_timeout_policy(key):
"""Default memcache timeout policy.
This defers to _memcache_timeout on the Model class.
Args:
key: Key instance.
Returns:
Memcache timeout to use (integer), or None.
""" |
timeout = None
if key is not None and isinstance(key, model.Key):
modelclass = model.Model._kind_map.get(key.kind())
if modelclass is not None:
policy = getattr(modelclass, '_memcache_timeout', None)
if policy is not None:
if isinstance(policy, (int, long)):
timeout = policy
else:
timeout = policy(key)
return timeout |
<SYSTEM_TASK:>
Returns a cached Model instance given the entity key if available.
<END_TASK>
<USER_TASK:>
Description:
def _load_from_cache_if_available(self, key):
"""Returns a cached Model instance given the entity key if available.
Args:
key: Key instance.
Returns:
A Model instance if the key exists in the cache.
""" |
if key in self._cache:
entity = self._cache[key] # May be None, meaning "doesn't exist".
if entity is None or entity._key == key:
# If entity's key didn't change later, it is ok.
# See issue 13. http://goo.gl/jxjOP
raise tasklets.Return(entity) |
<SYSTEM_TASK:>
Return a Model instance given the entity key.
<END_TASK>
<USER_TASK:>
Description:
def get(self, key, **ctx_options):
"""Return a Model instance given the entity key.
It will use the context cache if the cache policy for the given
key is enabled.
Args:
key: Key instance.
**ctx_options: Context options.
Returns:
A Model instance if the key exists in the datastore; None otherwise.
""" |
options = _make_ctx_options(ctx_options)
use_cache = self._use_cache(key, options)
if use_cache:
self._load_from_cache_if_available(key)
use_datastore = self._use_datastore(key, options)
if (use_datastore and
isinstance(self._conn, datastore_rpc.TransactionalConnection)):
use_memcache = False
else:
use_memcache = self._use_memcache(key, options)
ns = key.namespace()
memcache_deadline = None # Avoid worries about uninitialized variable.
if use_memcache:
mkey = self._memcache_prefix + key.urlsafe()
memcache_deadline = self._get_memcache_deadline(options)
mvalue = yield self.memcache_get(mkey, for_cas=use_datastore,
namespace=ns, use_cache=True,
deadline=memcache_deadline)
# A value may have appeared while yielding.
if use_cache:
self._load_from_cache_if_available(key)
if mvalue not in (_LOCKED, None):
cls = model.Model._lookup_model(key.kind(),
self._conn.adapter.default_model)
pb = entity_pb.EntityProto()
try:
pb.MergePartialFromString(mvalue)
except ProtocolBuffer.ProtocolBufferDecodeError:
logging.warning('Corrupt memcache entry found '
'with key %s and namespace %s' % (mkey, ns))
mvalue = None
else:
entity = cls._from_pb(pb)
# Store the key on the entity since it wasn't written to memcache.
entity._key = key
if use_cache:
# Update in-memory cache.
self._cache[key] = entity
raise tasklets.Return(entity)
if mvalue is None and use_datastore:
yield self.memcache_set(mkey, _LOCKED, time=_LOCK_TIME, namespace=ns,
use_cache=True, deadline=memcache_deadline)
yield self.memcache_gets(mkey, namespace=ns, use_cache=True,
deadline=memcache_deadline)
if not use_datastore:
# NOTE: Do not cache this miss. In some scenarios this would
# prevent an app from working properly.
raise tasklets.Return(None)
if use_cache:
entity = yield self._get_batcher.add_once(key, options)
else:
entity = yield self._get_batcher.add(key, options)
if entity is not None:
if use_memcache and mvalue != _LOCKED:
# Don't serialize the key since it's already the memcache key.
pbs = entity._to_pb(set_key=False).SerializePartialToString()
# Don't attempt to write to memcache if too big. Note that we
# use LBYL ("look before you leap") because a multi-value
# memcache operation would fail for all entities rather than
# for just the one that's too big. (Also, the AutoBatcher
# class doesn't pass back exceptions very well.)
if len(pbs) <= memcache.MAX_VALUE_SIZE:
timeout = self._get_memcache_timeout(key, options)
# Don't use fire-and-forget -- for users who forget
# @ndb.toplevel, it's too painful to diagnose why their simple
# code using a single synchronous call doesn't seem to use
# memcache. See issue 105. http://goo.gl/JQZxp
yield self.memcache_cas(mkey, pbs, time=timeout, namespace=ns,
deadline=memcache_deadline)
if use_cache:
# Cache hit or miss. NOTE: In this case it is okay to cache a
# miss; the datastore is the ultimate authority.
self._cache[key] = entity
raise tasklets.Return(entity) |
<SYSTEM_TASK:>
Call a callback upon successful commit of a transaction.
<END_TASK>
<USER_TASK:>
Description:
def call_on_commit(self, callback):
"""Call a callback upon successful commit of a transaction.
If not in a transaction, the callback is called immediately.
In a transaction, multiple callbacks may be registered and will be
called once the transaction commits, in the order in which they
were registered. If the transaction fails, the callbacks will not
be called.
If the callback raises an exception, it bubbles up normally. This
means: If the callback is called immediately, any exception it
raises will bubble up immediately. If the call is postponed until
commit, remaining callbacks will be skipped and the exception will
bubble up through the transaction() call. (However, the
transaction is already committed at that point.)
""" |
if not self.in_transaction():
callback()
else:
self._on_commit_queue.append(callback) |
<SYSTEM_TASK:>
Return a Future for a nickname from an account.
<END_TASK>
<USER_TASK:>
Description:
def get_nickname(userid):
"""Return a Future for a nickname from an account.""" |
account = yield get_account(userid)
if not account:
nickname = 'Unregistered'
else:
nickname = account.nickname or account.email
raise ndb.Return(nickname) |
<SYSTEM_TASK:>
Marks a task as done.
<END_TASK>
<USER_TASK:>
Description:
def mark_done(task_id):
"""Marks a task as done.
Args:
task_id: The integer id of the task to update.
Raises:
ValueError: if the requested task doesn't exist.
""" |
task = Task.get_by_id(task_id)
if task is None:
raise ValueError('Task with id %d does not exist' % task_id)
task.done = True
task.put() |
<SYSTEM_TASK:>
Converts a list of tasks to a list of string representations.
<END_TASK>
<USER_TASK:>
Description:
def format_tasks(tasks):
"""Converts a list of tasks to a list of string representations.
Args:
tasks: A list of the tasks to convert.
Returns:
A list of string formatted tasks.
""" |
return ['%d : %s (%s)' % (task.key.id(),
task.description,
('done' if task.done
else 'created %s' % task.created))
for task in tasks] |
<SYSTEM_TASK:>
Accepts a string command and performs an action.
<END_TASK>
<USER_TASK:>
Description:
def handle_command(command):
"""Accepts a string command and performs an action.
Args:
command: the command to run as a string.
""" |
try:
cmds = command.split(None, 1)
cmd = cmds[0]
if cmd == 'new':
add_task(get_arg(cmds))
elif cmd == 'done':
mark_done(int(get_arg(cmds)))
elif cmd == 'list':
for task in format_tasks(list_tasks()):
print task
elif cmd == 'delete':
delete_task(int(get_arg(cmds)))
else:
print_usage()
except Exception, e: # pylint: disable=broad-except
print e
print_usage() |
<SYSTEM_TASK:>
Create upload URL for POST form.
<END_TASK>
<USER_TASK:>
Description:
def create_upload_url(success_path,
max_bytes_per_blob=None,
max_bytes_total=None,
**options):
"""Create upload URL for POST form.
Args:
success_path: Path within application to call when POST is successful
and upload is complete.
max_bytes_per_blob: The maximum size in bytes that any one blob in the
upload can be or None for no maximum size.
max_bytes_total: The maximum size in bytes that the aggregate sizes of all
of the blobs in the upload can be or None for no maximum size.
**options: Options for create_rpc().
Returns:
The upload URL.
Raises:
TypeError: If max_bytes_per_blob or max_bytes_total are not integral types.
ValueError: If max_bytes_per_blob or max_bytes_total are not
positive values.
""" |
fut = create_upload_url_async(success_path,
max_bytes_per_blob=max_bytes_per_blob,
max_bytes_total=max_bytes_total,
**options)
return fut.get_result() |
<SYSTEM_TASK:>
Parse a BlobInfo record from file upload field_storage.
<END_TASK>
<USER_TASK:>
Description:
def parse_blob_info(field_storage):
"""Parse a BlobInfo record from file upload field_storage.
Args:
field_storage: cgi.FieldStorage that represents uploaded blob.
Returns:
BlobInfo record as parsed from the field-storage instance.
None if there was no field_storage.
Raises:
BlobInfoParseError when provided field_storage does not contain enough
information to construct a BlobInfo object.
""" |
if field_storage is None:
return None
field_name = field_storage.name
def get_value(dct, name):
value = dct.get(name, None)
if value is None:
raise BlobInfoParseError(
'Field %s has no %s.' % (field_name, name))
return value
filename = get_value(field_storage.disposition_options, 'filename')
blob_key_str = get_value(field_storage.type_options, 'blob-key')
blob_key = BlobKey(blob_key_str)
upload_content = email.message_from_file(field_storage.file)
content_type = get_value(upload_content, 'content-type')
size = get_value(upload_content, 'content-length')
creation_string = get_value(upload_content, UPLOAD_INFO_CREATION_HEADER)
md5_hash_encoded = get_value(upload_content, 'content-md5')
md5_hash = base64.urlsafe_b64decode(md5_hash_encoded)
try:
size = int(size)
except (TypeError, ValueError):
raise BlobInfoParseError(
'%s is not a valid value for %s size.' % (size, field_name))
try:
creation = blobstore._parse_creation(creation_string, field_name)
except blobstore._CreationFormatError, err:
raise BlobInfoParseError(str(err))
return BlobInfo(id=blob_key_str,
content_type=content_type,
creation=creation,
filename=filename,
size=size,
md5_hash=md5_hash,
) |
<SYSTEM_TASK:>
Fetch data for blob.
<END_TASK>
<USER_TASK:>
Description:
def fetch_data(blob, start_index, end_index, **options):
"""Fetch data for blob.
Fetches a fragment of a blob up to MAX_BLOB_FETCH_SIZE in length. Attempting
to fetch a fragment that extends beyond the boundaries of the blob will return
the amount of data from start_index until the end of the blob, which will be
a smaller size than requested. Requesting a fragment which is entirely
outside the boundaries of the blob will return empty string. Attempting
to fetch a negative index will raise an exception.
Args:
blob: BlobInfo, BlobKey, str or unicode representation of BlobKey of
blob to fetch data from.
start_index: Start index of blob data to fetch. May not be negative.
end_index: End index (inclusive) of blob data to fetch. Must be
>= start_index.
**options: Options for create_rpc().
Returns:
str containing partial data of blob. If the indexes are legal but outside
the boundaries of the blob, will return empty string.
Raises:
TypeError if start_index or end_index are not indexes. Also when blob
is not a string, BlobKey or BlobInfo.
DataIndexOutOfRangeError when start_index < 0 or end_index < start_index.
BlobFetchSizeTooLargeError when request blob fragment is larger than
MAX_BLOB_FETCH_SIZE.
BlobNotFoundError when blob does not exist.
""" |
fut = fetch_data_async(blob, start_index, end_index, **options)
return fut.get_result() |
<SYSTEM_TASK:>
Retrieve a BlobInfo by key.
<END_TASK>
<USER_TASK:>
Description:
def get(cls, blob_key, **ctx_options):
"""Retrieve a BlobInfo by key.
Args:
blob_key: A blob key. This may be a str, unicode or BlobKey instance.
**ctx_options: Context options for Model().get_by_id().
Returns:
A BlobInfo entity associated with the provided key, If there was
no such entity, returns None.
""" |
fut = cls.get_async(blob_key, **ctx_options)
return fut.get_result() |
<SYSTEM_TASK:>
Permanently delete this blob from Blobstore.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, **options):
"""Permanently delete this blob from Blobstore.
Args:
**options: Options for create_rpc().
""" |
fut = delete_async(self.key(), **options)
fut.get_result() |
<SYSTEM_TASK:>
Fills the internal buffer.
<END_TASK>
<USER_TASK:>
Description:
def __fill_buffer(self, size=0):
"""Fills the internal buffer.
Args:
size: Number of bytes to read. Will be clamped to
[self.__buffer_size, MAX_BLOB_FETCH_SIZE].
""" |
read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE)
self.__buffer = fetch_data(self.__blob_key, self.__position,
self.__position + read_size - 1)
self.__buffer_position = 0
self.__eof = len(self.__buffer) < read_size |
<SYSTEM_TASK:>
Create a new Connection object with the right adapter.
<END_TASK>
<USER_TASK:>
Description:
def make_connection(config=None, default_model=None,
_api_version=datastore_rpc._DATASTORE_V3,
_id_resolver=None):
"""Create a new Connection object with the right adapter.
Optionally you can pass in a datastore_rpc.Configuration object.
""" |
return datastore_rpc.Connection(
adapter=ModelAdapter(default_model, id_resolver=_id_resolver),
config=config,
_api_version=_api_version) |
<SYSTEM_TASK:>
Internal helper to unpack a User value from a protocol buffer.
<END_TASK>
<USER_TASK:>
Description:
def _unpack_user(v):
"""Internal helper to unpack a User value from a protocol buffer.""" |
uv = v.uservalue()
email = unicode(uv.email().decode('utf-8'))
auth_domain = unicode(uv.auth_domain().decode('utf-8'))
obfuscated_gaiaid = uv.obfuscated_gaiaid().decode('utf-8')
obfuscated_gaiaid = unicode(obfuscated_gaiaid)
federated_identity = None
if uv.has_federated_identity():
federated_identity = unicode(
uv.federated_identity().decode('utf-8'))
value = users.User(email=email,
_auth_domain=auth_domain,
_user_id=obfuscated_gaiaid,
federated_identity=federated_identity)
return value |
<SYSTEM_TASK:>
Decorator to make a function automatically run in a transaction.
<END_TASK>
<USER_TASK:>
Description:
def transactional(func, args, kwds, **options):
"""Decorator to make a function automatically run in a transaction.
Args:
**ctx_options: Transaction options (see transaction(), but propagation
default to TransactionOptions.ALLOWED).
This supports two forms:
(1) Vanilla:
@transactional
def callback(arg):
...
(2) With options:
@transactional(retries=1)
def callback(arg):
...
""" |
return transactional_async.wrapped_decorator(
func, args, kwds, **options).get_result() |
<SYSTEM_TASK:>
A decorator that ensures a function is run outside a transaction.
<END_TASK>
<USER_TASK:>
Description:
def non_transactional(func, args, kwds, allow_existing=True):
"""A decorator that ensures a function is run outside a transaction.
If there is an existing transaction (and allow_existing=True), the
existing transaction is paused while the function is executed.
Args:
allow_existing: If false, throw an exception if called from within
a transaction. If true, temporarily re-establish the
previous non-transactional context. Defaults to True.
This supports two forms, similar to transactional().
Returns:
A wrapper for the decorated function that ensures it runs outside a
transaction.
""" |
from . import tasklets
ctx = tasklets.get_context()
if not ctx.in_transaction():
return func(*args, **kwds)
if not allow_existing:
raise datastore_errors.BadRequestError(
'%s cannot be called within a transaction.' % func.__name__)
save_ctx = ctx
while ctx.in_transaction():
ctx = ctx._parent_context
if ctx is None:
raise datastore_errors.BadRequestError(
'Context without non-transactional ancestor')
save_ds_conn = datastore._GetConnection()
try:
if hasattr(save_ctx, '_old_ds_conn'):
datastore._SetConnection(save_ctx._old_ds_conn)
tasklets.set_context(ctx)
return func(*args, **kwds)
finally:
tasklets.set_context(save_ctx)
datastore._SetConnection(save_ds_conn) |
<SYSTEM_TASK:>
Updates all descendants to a specified value.
<END_TASK>
<USER_TASK:>
Description:
def _set(self, value):
"""Updates all descendants to a specified value.""" |
if self.__is_parent_node():
for child in self.__sub_counters.itervalues():
child._set(value)
else:
self.__counter = value |
<SYSTEM_TASK:>
Internal helper for comparison operators.
<END_TASK>
<USER_TASK:>
Description:
def _comparison(self, op, value):
"""Internal helper for comparison operators.
Args:
op: The operator ('=', '<' etc.).
Returns:
A FilterNode instance representing the requested comparison.
""" |
# NOTE: This is also used by query.gql().
if not self._indexed:
raise datastore_errors.BadFilterError(
'Cannot query for unindexed property %s' % self._name)
from .query import FilterNode # Import late to avoid circular imports.
if value is not None:
value = self._do_validate(value)
value = self._call_to_base_type(value)
value = self._datastore_type(value)
return FilterNode(self._name, op, value) |
<SYSTEM_TASK:>
Comparison operator for the 'in' comparison operator.
<END_TASK>
<USER_TASK:>
Description:
def _IN(self, value):
"""Comparison operator for the 'in' comparison operator.
The Python 'in' operator cannot be overloaded in the way we want
to, so we define a method. For example::
Employee.query(Employee.rank.IN([4, 5, 6]))
Note that the method is called ._IN() but may normally be invoked
as .IN(); ._IN() is provided for the case you have a
StructuredProperty with a model that has a Property named IN.
""" |
if not self._indexed:
raise datastore_errors.BadFilterError(
'Cannot query for unindexed property %s' % self._name)
from .query import FilterNode # Import late to avoid circular imports.
if not isinstance(value, (list, tuple, set, frozenset)):
raise datastore_errors.BadArgumentError(
'Expected list, tuple or set, got %r' % (value,))
values = []
for val in value:
if val is not None:
val = self._do_validate(val)
val = self._call_to_base_type(val)
val = self._datastore_type(val)
values.append(val)
return FilterNode(self._name, 'in', values) |
<SYSTEM_TASK:>
Call all validations on the value.
<END_TASK>
<USER_TASK:>
Description:
def _do_validate(self, value):
"""Call all validations on the value.
This calls the most derived _validate() method(s), then the custom
validator function, and then checks the choices. It returns the
value, possibly modified in an idempotent way, or raises an
exception.
Note that this does not call all composable _validate() methods.
It only calls _validate() methods up to but not including the
first _to_base_type() method, when the MRO is traversed looking
for _validate() and _to_base_type() methods. (IOW if a class
defines both _validate() and _to_base_type(), its _validate()
is called and then the search is aborted.)
Note that for a repeated Property this function should be called
for each item in the list, not for the list as a whole.
""" |
if isinstance(value, _BaseValue):
return value
value = self._call_shallow_validation(value)
if self._validator is not None:
newvalue = self._validator(self, value)
if newvalue is not None:
value = newvalue
if self._choices is not None:
if value not in self._choices:
raise datastore_errors.BadValueError(
'Value %r for property %s is not an allowed choice' %
(value, self._name))
return value |
<SYSTEM_TASK:>
Internal helper called to tell the property its name.
<END_TASK>
<USER_TASK:>
Description:
def _fix_up(self, cls, code_name):
"""Internal helper called to tell the property its name.
This is called by _fix_up_properties() which is called by
MetaModel when finishing the construction of a Model subclass.
The name passed in is the name of the class attribute to which the
Property is assigned (a.k.a. the code name). Note that this means
that each Property instance must be assigned to (at most) one
class attribute. E.g. to declare three strings, you must call
StringProperty() three times, you cannot write
foo = bar = baz = StringProperty()
""" |
self._code_name = code_name
if self._name is None:
self._name = code_name |
<SYSTEM_TASK:>
Internal helper to set a value in an entity for a Property.
<END_TASK>
<USER_TASK:>
Description:
def _set_value(self, entity, value):
"""Internal helper to set a value in an entity for a Property.
This performs validation first. For a repeated Property the value
should be a list.
""" |
if entity._projection:
raise ReadonlyPropertyError(
'You cannot set property values of a projection entity')
if self._repeated:
if not isinstance(value, (list, tuple, set, frozenset)):
raise datastore_errors.BadValueError('Expected list or tuple, got %r' %
(value,))
value = [self._do_validate(v) for v in value]
else:
if value is not None:
value = self._do_validate(value)
self._store_value(entity, value) |
<SYSTEM_TASK:>
Internal helper to retrieve the value for this Property from an entity.
<END_TASK>
<USER_TASK:>
Description:
def _retrieve_value(self, entity, default=None):
"""Internal helper to retrieve the value for this Property from an entity.
This returns None if no value is set, or the default argument if
given. For a repeated Property this returns a list if a value is
set, otherwise None. No additional transformations are applied.
""" |
return entity._values.get(self._name, default) |
<SYSTEM_TASK:>
Compute a list of composable methods.
<END_TASK>
<USER_TASK:>
Description:
def _find_methods(cls, *names, **kwds):
"""Compute a list of composable methods.
Because this is a common operation and the class hierarchy is
static, the outcome is cached (assuming that for a particular list
of names the reversed flag is either always on, or always off).
Args:
*names: One or more method names.
reverse: Optional flag, default False; if True, the list is
reversed.
Returns:
A list of callable class method objects.
""" |
reverse = kwds.pop('reverse', False)
assert not kwds, repr(kwds)
cache = cls.__dict__.get('_find_methods_cache')
if cache:
hit = cache.get(names)
if hit is not None:
return hit
else:
cls._find_methods_cache = cache = {}
methods = []
for c in cls.__mro__:
for name in names:
method = c.__dict__.get(name)
if method is not None:
methods.append(method)
if reverse:
methods.reverse()
cache[names] = methods
return methods |
<SYSTEM_TASK:>
Return a single callable that applies a list of methods to a value.
<END_TASK>
<USER_TASK:>
Description:
def _apply_list(self, methods):
"""Return a single callable that applies a list of methods to a value.
If a method returns None, the last value is kept; if it returns
some other value, that replaces the last value. Exceptions are
not caught.
""" |
def call(value):
for method in methods:
newvalue = method(self, value)
if newvalue is not None:
value = newvalue
return value
return call |
<SYSTEM_TASK:>
Internal helper to get the value for this Property from an entity.
<END_TASK>
<USER_TASK:>
Description:
def _get_value(self, entity):
"""Internal helper to get the value for this Property from an entity.
For a repeated Property this initializes the value to an empty
list if it is not set.
""" |
if entity._projection:
if self._name not in entity._projection:
raise UnprojectedPropertyError(
'Property %s is not in the projection' % (self._name,))
return self._get_user_value(entity) |
<SYSTEM_TASK:>
Internal helper to delete the value for this Property from an entity.
<END_TASK>
<USER_TASK:>
Description:
def _delete_value(self, entity):
"""Internal helper to delete the value for this Property from an entity.
Note that if no value exists this is a no-op; deleted values will
not be serialized but requesting their value will return None (or
an empty list in the case of a repeated Property).
""" |
if self._name in entity._values:
del entity._values[self._name] |
<SYSTEM_TASK:>
Internal helper to ask if the entity has a value for this Property.
<END_TASK>
<USER_TASK:>
Description:
def _is_initialized(self, entity):
"""Internal helper to ask if the entity has a value for this Property.
This returns False if a value is stored but it is None.
""" |
return (not self._required or
((self._has_value(entity) or self._default is not None) and
self._get_value(entity) is not None)) |
<SYSTEM_TASK:>
Internal helper to serialize this property to a protocol buffer.
<END_TASK>
<USER_TASK:>
Description:
def _serialize(self, entity, pb, prefix='', parent_repeated=False,
projection=None):
"""Internal helper to serialize this property to a protocol buffer.
Subclasses may override this method.
Args:
entity: The entity, a Model (subclass) instance.
pb: The protocol buffer, an EntityProto instance.
prefix: Optional name prefix used for StructuredProperty
(if present, must end in '.').
parent_repeated: True if the parent (or an earlier ancestor)
is a repeated Property.
projection: A list or tuple of strings representing the projection for
the model instance, or None if the instance is not a projection.
""" |
values = self._get_base_value_unwrapped_as_list(entity)
name = prefix + self._name
if projection and name not in projection:
return
if self._indexed:
create_prop = lambda: pb.add_property()
else:
create_prop = lambda: pb.add_raw_property()
if self._repeated and not values and self._write_empty_list:
# We want to write the empty list
p = create_prop()
p.set_name(name)
p.set_multiple(False)
p.set_meaning(entity_pb.Property.EMPTY_LIST)
p.mutable_value()
else:
# We write a list, or a single property
for val in values:
p = create_prop()
p.set_name(name)
p.set_multiple(self._repeated or parent_repeated)
v = p.mutable_value()
if val is not None:
self._db_set_value(v, p, val)
if projection:
# Projected properties have the INDEX_VALUE meaning and only contain
# the original property's name and value.
new_p = entity_pb.Property()
new_p.set_name(p.name())
new_p.set_meaning(entity_pb.Property.INDEX_VALUE)
new_p.set_multiple(False)
new_p.mutable_value().CopyFrom(v)
p.CopyFrom(new_p) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.