text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def basicauthfail(self, realm = b'all'):
""" Return 401 for authentication failure. This will end the handler. """ |
if not isinstance(realm, bytes):
realm = realm.encode('ascii')
self.start_response(401, [(b'WWW-Authenticate', b'Basic realm="' + realm + b'"')])
self.exit(b'<h1>' + _createstatus(401) + b'</h1>') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def createcsrf(self, csrfarg = '_csrf'):
""" Create a anti-CSRF token in the session """ |
await self.sessionstart()
if not csrfarg in self.session.vars:
self.session.vars[csrfarg] = uuid.uuid4().hex |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def outputjson(self, obj):
""" Serialize `obj` with JSON and output to the client """ |
self.header('Content-Type', 'application/json')
self.outputdata(json.dumps(obj).encode('ascii')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def expand(cls, match, expand):
""" If use expand directly, the url-decoded context will be decoded again, which create a security issue. Hack expand to quote the text before expanding """ |
return re._expand(match.re, cls._EncodedMatch(match), expand) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def rewrite(self, path, expand, newmethod = None, host = None, vhost = None, method = [b'GET', b'HEAD'], keepquery = True):
"Automatically rewrite a request to another location"
async def func(env):
newpath = self.expand(env.path_match, expand)
if keepquery and getattr(env, 'querystring', None):
if b'?' in newpath:
newpath += b'&' + env.querystring
else:
newpath += b'?' + env.querystring
await env.rewrite(newpath, newmethod)
self.route(path, func) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def routeargs(path, host = None, vhost = None, method = [b'POST'], **kwargs):
"For extra arguments, see Dispatcher.routeargs. They must be specified by keyword arguments"
def decorator(func):
func.routemode = 'routeargs'
func.route_path = path
func.route_host = host
func.route_vhost = vhost
func.route_method = method
func.route_kwargs = kwargs
return func
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _close_generator(g):
""" PyPy 3 generator has a bug that calling `close` caused memory leak. Before it is fixed, use `throw` instead """ |
if isinstance(g, generatorwrapper):
g.close()
elif _get_frame(g) is not None:
try:
g.throw(GeneratorExit_)
except (StopIteration, GeneratorExit_):
return
else:
raise RuntimeError("coroutine ignored GeneratorExit") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Routine(coroutine, scheduler, asyncStart = True, container = None, manualStart = False, daemon = False):
""" This wraps a normal coroutine to become a VLCP routine. Usually you do not need to call this yourself; `container.start` and `container.subroutine` calls this automatically. """ |
def run():
iterator = _await(coroutine)
iterself = yield
if manualStart:
yield
try:
if asyncStart:
scheduler.yield_(iterself)
yield
if container is not None:
container.currentroutine = iterself
if daemon:
scheduler.setDaemon(iterself, True)
try:
matchers = next(iterator)
except StopIteration:
return
while matchers is None:
scheduler.yield_(iterself)
yield
try:
matchers = next(iterator)
except StopIteration:
return
try:
scheduler.register(matchers, iterself)
except Exception:
try:
iterator.throw(IllegalMatchersException(matchers))
except StopIteration:
pass
raise
while True:
try:
etup = yield
except GeneratorExit_:
raise
except:
#scheduler.unregister(matchers, iterself)
lmatchers = matchers
t,v,tr = sys.exc_info() # @UnusedVariable
if container is not None:
container.currentroutine = iterself
try:
matchers = iterator.throw(t,v)
except StopIteration:
return
else:
#scheduler.unregister(matchers, iterself)
lmatchers = matchers
if container is not None:
container.currentroutine = iterself
try:
matchers = iterator.send(etup)
except StopIteration:
return
while matchers is None:
scheduler.yield_(iterself)
yield
try:
matchers = next(iterator)
except StopIteration:
return
try:
if hasattr(matchers, 'two_way_difference'):
reg, unreg = matchers.two_way_difference(lmatchers)
else:
reg = set(matchers).difference(lmatchers)
unreg = set(lmatchers).difference(matchers)
scheduler.register(reg, iterself)
scheduler.unregister(unreg, iterself)
except Exception:
try:
iterator.throw(IllegalMatchersException(matchers))
except StopIteration:
pass
raise
finally:
# iterator.close() can be called in other routines, we should restore the currentroutine variable
if container is not None:
lastcurrentroutine = getattr(container, 'currentroutine', None)
container.currentroutine = iterself
else:
lastcurrentroutine = None
_close_generator(coroutine)
if container is not None:
container.currentroutine = lastcurrentroutine
scheduler.unregisterall(iterself)
r = generatorwrapper(run())
next(r)
r.send(r)
return r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def with_exception(self, subprocess, *matchers):
""" Monitoring event matchers while executing a subprocess. If events are matched before the subprocess ends, the subprocess is terminated and a RoutineException is raised. """ |
def _callback(event, matcher):
raise RoutineException(matcher, event)
return await self.with_callback(subprocess, _callback, *matchers) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def wait_for_all_to_process(self, *matchers, eventlist = None, eventdict = None, callback = None):
""" Similar to `waitForAll`, but set `canignore=True` for these events. This ensures blocking events are processed correctly. """ |
def _callback(event, matcher):
event.canignore = True
if callback:
callback(event, matcher)
return await self.wait_for_all(*matchers, eventlist=eventlist,
eventdict=eventdict, callback=_callback) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def wait_for_all_empty(self, *queues):
""" Wait for multiple queues to be empty at the same time. Require delegate when calling from coroutines running in other containers """ |
matchers = [m for m in (q.waitForEmpty() for q in queues) if m is not None]
while matchers:
await self.wait_for_all(*matchers)
matchers = [m for m in (q.waitForEmpty() for q in queues) if m is not None] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def syscall(self, func, ignoreException = False):
""" Call a syscall method and retrieve its return value """ |
ev = await self.syscall_noreturn(func)
if hasattr(ev, 'exception'):
if ignoreException:
return
else:
raise ev.exception[1]
else:
return ev.retvalue |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def end_delegate(self, delegate_matcher, routine = None, forceclose = False):
""" Retrieve a begin_delegate result. Must be called immediately after begin_delegate before any other `await`, or the result might be lost. Do not use this method without thinking. Always use `RoutineFuture` when possible. """ |
try:
ev = await delegate_matcher
if hasattr(ev, 'exception'):
raise ev.exception
else:
return ev.result
finally:
if forceclose and routine:
routine.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_container(cls, scheduler):
""" Create temporary instance for helper functions """ |
if scheduler in cls._container_cache:
return cls._container_cache[scheduler]
else:
c = cls(scheduler)
cls._container_cache[scheduler] = c
return c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def proxy(name, default = None):
""" Create a proxy module. A proxy module has a default implementation, but can be redirected to other implementations with configurations. Other modules can depend on proxy modules. """ |
proxymodule = _ProxyMetaClass(name, (_ProxyModule,), {'_default': default})
proxymodule.__module__ = sys._getframe(1).f_globals.get('__name__')
return proxymodule |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def send_api(container, targetname, name, params = {}):
""" Send API and discard the result """ |
handle = object()
apiEvent = ModuleAPICall(handle, targetname, name, params = params)
await container.wait_for_send(apiEvent) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def batch_call_api(container, apis, timeout = 120.0):
""" DEPRECATED - use execute_all instead """ |
apiHandles = [(object(), api) for api in apis]
apiEvents = [ModuleAPICall(handle, targetname, name, params = params)
for handle, (targetname, name, params) in apiHandles]
apiMatchers = tuple(ModuleAPIReply.createMatcher(handle) for handle, _ in apiHandles)
async def process():
for e in apiEvents:
await container.wait_for_send(e)
container.subroutine(process(), False)
eventdict = {}
async def process2():
ms = len(apiMatchers)
matchers = Diff_(apiMatchers)
while ms:
ev, m = await matchers
matchers = Diff_(matchers, remove=(m,))
eventdict[ev.handle] = ev
await container.execute_with_timeout(timeout, process2())
for e in apiEvents:
if e.handle not in eventdict:
e.canignore = True
container.scheduler.ignore(ModuleAPICall.createMatcher(e.handle))
return [eventdict.get(handle, None) for handle, _ in apiHandles] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def registerAPI(self, name, handler, container = None, discoverinfo = None, criteria = None):
""" Append new API to this handler """ |
self.handler.registerHandler(*self._createHandler(name, handler, container, criteria))
if discoverinfo is None:
self.discoverinfo[name] = {'description': cleandoc(handler.__doc__)}
else:
self.discoverinfo[name] = discoverinfo |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unregisterAPI(self, name):
""" Remove an API from this handler """ |
if name.startswith('public/'):
target = 'public'
name = name[len('public/'):]
else:
target = self.servicename
name = name
removes = [m for m in self.handler.handlers.keys() if m.target == target and m.name == name]
for m in removes:
self.handler.unregisterHandler(m) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def discover(self, details = False):
'Discover API definitions. Set details=true to show details'
if details and not (isinstance(details, str) and details.lower() == 'false'):
return copy.deepcopy(self.discoverinfo)
else:
return dict((k,v.get('description', '')) for k,v in self.discoverinfo.items()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| async def unloadmodule(self, module, ignoreDependencies = False):
'''
Unload a module class
'''
self._logger.debug('Try to unload module %r', module)
if hasattr(module, '_instance'):
self._logger.debug('Module %r is loaded, module state is %r', module, module._instance.state)
inst = module._instance
if inst.state == ModuleLoadStateChanged.LOADING or inst.state == ModuleLoadStateChanged.LOADED:
# Wait for loading
# Wait for succeeded or failed
sm = ModuleLoadStateChanged.createMatcher(module._instance.target, ModuleLoadStateChanged.SUCCEEDED)
fm = ModuleLoadStateChanged.createMatcher(module._instance.target, ModuleLoadStateChanged.FAILED)
await M_(sm, fm)
elif inst.state == ModuleLoadStateChanged.UNLOADING or inst.state == ModuleLoadStateChanged.UNLOADED:
um = ModuleLoadStateChanged.createMatcher(module, ModuleLoadStateChanged.UNLOADED)
await um
if hasattr(module, '_instance') and (module._instance.state == ModuleLoadStateChanged.SUCCEEDED or
module._instance.state == ModuleLoadStateChanged.FAILED):
self._logger.info('Unloading module %r', module)
inst = module._instance
# Change state to unloading to prevent more dependencies
await inst.changestate(ModuleLoadStateChanged.UNLOADING, self)
if not ignoreDependencies:
deps = [d for d in inst.dependedBy if hasattr(d, '_instance') and d._instance.state != ModuleLoadStateChanged.UNLOADED]
ums = [ModuleLoadStateChanged.createMatcher(d, ModuleLoadStateChanged.UNLOADED) for d in deps]
for d in deps:
self.subroutine(self.unloadmodule(d), False)
await self.wait_for_all(*ums)
await inst.unload(self)
del self.activeModules[inst.getServiceName()]
self._logger.info('Module %r is unloaded', module)
if not ignoreDependencies:
for d in module.depends:
if hasattr(d, '_instance') and module in d._instance.dependedBy:
self._removeDepend(module, d) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def load_by_path(self, path):
""" Load a module by full path. If there are dependencies, they are also loaded. """ |
try:
p, module = findModule(path, True)
except KeyError as exc:
raise ModuleLoadException('Cannot load module ' + repr(path) + ': ' + str(exc) + 'is not defined in the package')
except Exception as exc:
raise ModuleLoadException('Cannot load module ' + repr(path) + ': ' + str(exc))
if module is None:
raise ModuleLoadException('Cannot find module: ' + repr(path))
return await self.loadmodule(module) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def unload_by_path(self, path):
""" Unload a module by full path. Dependencies are automatically unloaded if they are marked to be services. """ |
p, module = findModule(path, False)
if module is None:
raise ModuleLoadException('Cannot find module: ' + repr(path))
return await self.unloadmodule(module) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_module_by_name(self, targetname):
""" Return the module instance for a target name. """ |
if targetname == 'public':
target = None
elif not targetname not in self.activeModules:
raise KeyError('Module %r not exists or is not loaded' % (targetname,))
else:
target = self.activeModules[targetname]
return target |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def config_keys(self, sortkey = False):
""" Return all configuration keys in this node, including configurations on children nodes. """ |
if sortkey:
items = sorted(self.items())
else:
items = self.items()
for k,v in items:
if isinstance(v, ConfigTree):
for k2 in v.config_keys(sortkey):
yield k + '.' + k2
else:
yield k |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def config_value_keys(self, sortkey = False):
""" Return configuration keys directly stored in this node. Configurations in child nodes are not included. """ |
if sortkey:
items = sorted(self.items())
else:
items = self.items()
return (k for k,v in items if not isinstance(v,ConfigTree)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loadconfig(self, keysuffix, obj):
""" Copy all configurations from this node into obj """ |
subtree = self.get(keysuffix)
if subtree is not None and isinstance(subtree, ConfigTree):
for k,v in subtree.items():
if isinstance(v, ConfigTree):
if hasattr(obj, k) and not isinstance(getattr(obj, k), ConfigTree):
v.loadconfig(getattr(obj,k))
else:
setattr(obj, k, v)
elif not hasattr(obj, k):
setattr(obj, k, v) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def withconfig(self, keysuffix):
""" Load configurations with this decorator """ |
def decorator(cls):
return self.loadconfig(keysuffix, cls)
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def todict(self):
""" Convert this node to a dictionary tree. """ |
dict_entry = []
for k,v in self.items():
if isinstance(v, ConfigTree):
dict_entry.append((k, v.todict()))
else:
dict_entry.append((k, v))
return dict(dict_entry) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, sortkey = True):
""" Save configurations to a list of strings """ |
return [k + '=' + repr(v) for k,v in self.config_items(sortkey)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def savetostr(self, sortkey = True):
""" Save configurations to a single string """ |
return ''.join(k + '=' + repr(v) + '\n' for k,v in self.config_items(sortkey)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def savetofile(self, filelike, sortkey = True):
""" Save configurations to a file-like object which supports `writelines` """ |
filelike.writelines(k + '=' + repr(v) + '\n' for k,v in self.config_items(sortkey)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def saveto(self, path, sortkey = True):
""" Save configurations to path """ |
with open(path, 'w') as f:
self.savetofile(f, sortkey) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getConfigurableParent(cls):
""" Return the parent from which this class inherits configurations """ |
for p in cls.__bases__:
if isinstance(p, Configurable) and p is not Configurable:
return p
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getConfigRoot(cls, create = False):
""" Return the mapped configuration root node """ |
try:
return manager.gettree(getattr(cls, 'configkey'), create)
except AttributeError:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def config_value_keys(self, sortkey = False):
""" Return all mapped configuration keys for this object """ |
ret = set()
cls = type(self)
while True:
root = cls.getConfigRoot()
if root:
ret = ret.union(set(root.config_value_keys()))
parent = None
for c in cls.__bases__:
if issubclass(c, Configurable):
parent = c
if parent is None:
break
cls = parent
if sortkey:
return sorted(list(ret))
else:
return list(ret) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def wait_for_group(self, container, networkid, timeout = 120):
""" Wait for a VXLAN group to be created """ |
if networkid in self._current_groups:
return self._current_groups[networkid]
else:
if not self._connection.connected:
raise ConnectionResetException
groupchanged = VXLANGroupChanged.createMatcher(self._connection, networkid, VXLANGroupChanged.UPDATED)
conn_down = self._connection.protocol.statematcher(self._connection)
timeout_, ev, m = await container.wait_with_timeout(timeout, groupchanged, conn_down)
if timeout_:
raise ValueError('VXLAN group is still not created after a long time')
elif m is conn_down:
raise ConnectionResetException
else:
return ev.physicalportid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readonce(self, size = None):
""" Read from current buffer. If current buffer is empty, returns an empty string. You can use `prepareRead` to read the next chunk of data. This is not a coroutine method. """ |
if self.eof:
raise EOFError
if self.errored:
raise IOError('Stream is broken before EOF')
if size is not None and size < len(self.data) - self.pos:
ret = self.data[self.pos: self.pos + size]
self.pos += size
return ret
else:
ret = self.data[self.pos:]
self.pos = len(self.data)
if self.dataeof:
self.eof = True
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def readline(self, container = None, size = None):
""" Coroutine method which reads the next line or until EOF or size exceeds """ |
ret = []
retsize = 0
if self.eof:
raise EOFError
if self.errored:
raise IOError('Stream is broken before EOF')
while size is None or retsize < size:
if self.pos >= len(self.data):
await self.prepareRead(container)
if size is None or size - retsize >= len(self.data) - self.pos:
t = self.data[self.pos:]
if self.isunicode:
p = t.find(u'\n')
else:
p = t.find(b'\n')
if p >= 0:
t = t[0: p + 1]
ret.append(t)
retsize += len(t)
self.pos += len(t)
break
else:
ret.append(t)
retsize += len(t)
self.pos += len(t)
if self.dataeof:
self.eof = True
break
if self.dataerror:
self.errored = True
break
else:
t = self.data[self.pos:self.pos + (size - retsize)]
if self.isunicode:
p = t.find(u'\n')
else:
p = t.find(b'\n')
if p >= 0:
t = t[0: p + 1]
ret.append(t)
self.pos += len(t)
retsize += len(t)
break
if self.isunicode:
return u''.join(ret)
else:
return b''.join(ret)
if self.errored:
raise IOError('Stream is broken before EOF') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def copy_to(self, dest, container, buffering = True):
""" Coroutine method to copy content from this stream to another stream. """ |
if self.eof:
await dest.write(u'' if self.isunicode else b'', True)
elif self.errored:
await dest.error(container)
else:
try:
while not self.eof:
await self.prepareRead(container)
data = self.readonce()
try:
await dest.write(data, container, self.eof, buffering = buffering)
except IOError:
break
except:
async def _cleanup():
try:
await dest.error(container)
except IOError:
pass
container.subroutine(_cleanup(), False)
raise
finally:
self.close(container.scheduler) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def subtree(self, event, create = False):
'''
Find a subtree from an event
'''
current = self
for i in range(self.depth, len(event.indices)):
if not hasattr(current, 'index'):
return current
ind = event.indices[i]
if create:
current = current.index.setdefault(ind, EventTree(current, self.branch))
current.parentIndex = ind
else:
current = current.index.get(ind)
if current is None:
return None
return current |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lognet_vxlan_walker(prepush = True):
""" Return a walker function to retrieve necessary information from ObjectDB """ |
def _walk_lognet(key, value, walk, save):
save(key)
if value is None:
return
try:
phynet = walk(value.physicalnetwork.getkey())
except KeyError:
pass
else:
if phynet is not None and getattr(phynet, 'type') == 'vxlan':
try:
vxlan_endpoint_key = VXLANEndpointSet.default_key(value.id)
walk(vxlan_endpoint_key)
except KeyError:
pass
else:
save(vxlan_endpoint_key)
if prepush:
# Acquire all logical ports
try:
netmap = walk(LogicalNetworkMap.default_key(value.id))
except KeyError:
pass
else:
save(netmap.getkey())
for logport in netmap.ports.dataset():
try:
_ = walk(logport.getkey())
except KeyError:
pass
else:
save(logport.getkey())
try:
_, (portid,) = LogicalPort._getIndices(logport.getkey())
portinfokey = LogicalPortVXLANInfo.default_key(portid)
_ = walk(portinfokey)
except KeyError:
pass
else:
save(portinfokey)
return _walk_lognet |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ensure_result(self, supress_exception = False, defaultresult = None):
'''
Context manager to ensure returning the result
'''
try:
yield self
except Exception as exc:
if not self.done():
self.set_exception(exc)
if not supress_exception:
raise
except:
if not self.done():
self.set_exception(FutureCancelledException('cancelled'))
raise
else:
if not self.done():
self.set_result(defaultresult) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode_cert(cert):
"""Convert an X509 certificate into a Python dictionary This function converts the given X509 certificate into a Python dictionary in the manner established by the Python standard library's ssl module. """ |
ret_dict = {}
subject_xname = X509_get_subject_name(cert.value)
ret_dict["subject"] = _create_tuple_for_X509_NAME(subject_xname)
notAfter = X509_get_notAfter(cert.value)
ret_dict["notAfter"] = ASN1_TIME_print(notAfter)
peer_alt_names = _get_peer_alt_names(cert)
if peer_alt_names is not None:
ret_dict["subjectAltName"] = peer_alt_names
return ret_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
"""Retrieve a server certificate Retrieve the certificate from the server at the specified address, and return it as a PEM-encoded string. If 'ca_certs' is specified, validate the server cert against it. If 'ssl_version' is specified, use it in the connection attempt. """ |
if ssl_version not in (PROTOCOL_DTLS, PROTOCOL_DTLSv1, PROTOCOL_DTLSv1_2):
return _orig_get_server_certificate(addr, ssl_version, ca_certs)
if ca_certs is not None:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
af = getaddrinfo(addr[0], addr[1])[0][0]
s = ssl.wrap_socket(socket(af, SOCK_DGRAM),
ssl_version=ssl_version,
cert_reqs=cert_reqs, ca_certs=ca_certs)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return ssl.DER_cert_to_PEM_cert(dercert) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_curves(self, curves):
u''' Set supported curves by name, nid or nist.
:param str | tuple(int) curves: Example "secp384r1:secp256k1", (715, 714), "P-384", "K-409:B-409:K-571", ...
:return: 1 for success and 0 for failure
'''
retVal = None
if isinstance(curves, str):
retVal = SSL_CTX_set1_curves_list(self._ctx, curves)
elif isinstance(curves, tuple):
retVal = SSL_CTX_set1_curves(self._ctx, curves, len(curves))
return retVal |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def build_cert_chain(self, flags=SSL_BUILD_CHAIN_FLAG_NONE):
u'''
Used for server side only!
:param flags:
:return: 1 for success and 0 for failure
'''
retVal = SSL_CTX_build_cert_chain(self._ctx, flags)
return retVal |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_ssl_logging(self, enable=False, func=_ssl_logging_cb):
u''' Enable or disable SSL logging
:param True | False enable: Enable or disable SSL logging
:param func: Callback function for logging
'''
if enable:
SSL_CTX_set_info_callback(self._ctx, func)
else:
SSL_CTX_set_info_callback(self._ctx, 0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_socket(self, inbound):
"""Retrieve a socket used by this connection When inbound is True, then the socket from which this connection reads data is retrieved. Otherwise the socket to which this connection writes data is retrieved. Read and write sockets differ depending on whether this is a server- or a client-side connection, and on whether a routing demux is in use. """ |
if inbound and hasattr(self, "_rsock"):
return self._rsock
return self._sock |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def listen(self):
"""Server-side cookie exchange This method reads datagrams from the socket and initiates cookie exchange, upon whose successful conclusion one can then proceed to the accept method. Alternatively, accept can be called directly, in which case it will call this method. In order to prevent denial-of- service attacks, only a small, constant set of computing resources are used during the listen phase. On some platforms, listen must be called so that packets will be forwarded to accepted connections. Doing so is therefore recommened in all cases for portable code. Return value: a peer address if a datagram from a new peer was encountered, None if a datagram for a known peer was forwarded """ |
if not hasattr(self, "_listening"):
raise InvalidSocketError("listen called on non-listening socket")
self._pending_peer_address = None
try:
peer_address = self._udp_demux.service()
except socket.timeout:
peer_address = None
except socket.error as sock_err:
if sock_err.errno != errno.EWOULDBLOCK:
_logger.exception("Unexpected socket error in listen")
raise
peer_address = None
if not peer_address:
_logger.debug("Listen returning without peer")
return
# The demux advises that a datagram from a new peer may have arrived
if type(peer_address) is tuple:
# For this type of demux, the write BIO must be pointed at the peer
BIO_dgram_set_peer(self._wbio.value, peer_address)
self._udp_demux.forward()
self._listening_peer_address = peer_address
self._check_nbio()
self._listening = True
try:
_logger.debug("Invoking DTLSv1_listen for ssl: %d",
self._ssl.raw)
dtls_peer_address = DTLSv1_listen(self._ssl.value)
except openssl_error() as err:
if err.ssl_error == SSL_ERROR_WANT_READ:
# This method must be called again to forward the next datagram
_logger.debug("DTLSv1_listen must be resumed")
return
elif err.errqueue and err.errqueue[0][0] == ERR_WRONG_VERSION_NUMBER:
_logger.debug("Wrong version number; aborting handshake")
raise
elif err.errqueue and err.errqueue[0][0] == ERR_COOKIE_MISMATCH:
_logger.debug("Mismatching cookie received; aborting handshake")
raise
elif err.errqueue and err.errqueue[0][0] == ERR_NO_SHARED_CIPHER:
_logger.debug("No shared cipher; aborting handshake")
raise
_logger.exception("Unexpected error in DTLSv1_listen")
raise
finally:
self._listening = False
self._listening_peer_address = None
if type(peer_address) is tuple:
_logger.debug("New local peer: %s", dtls_peer_address)
self._pending_peer_address = peer_address
else:
self._pending_peer_address = dtls_peer_address
_logger.debug("New peer: %s", self._pending_peer_address)
return self._pending_peer_address |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def accept(self):
"""Server-side UDP connection establishment This method returns a server-side SSLConnection object, connected to that peer most recently returned from the listen method and not yet connected. If there is no such peer, then the listen method is invoked. Return value: SSLConnection connected to a new peer, None if packet forwarding only to an existing peer occurred. """ |
if not self._pending_peer_address:
if not self.listen():
_logger.debug("Accept returning without connection")
return
new_conn = SSLConnection(self, self._keyfile, self._certfile, True,
self._cert_reqs, self._ssl_version,
self._ca_certs, self._do_handshake_on_connect,
self._suppress_ragged_eofs, self._ciphers,
cb_user_config_ssl_ctx=self._user_config_ssl_ctx,
cb_user_config_ssl=self._user_config_ssl)
new_peer = self._pending_peer_address
self._pending_peer_address = None
if self._do_handshake_on_connect:
# Note that since that connection's socket was just created in its
# constructor, the following operation must be blocking; hence
# handshake-on-connect can only be used with a routing demux if
# listen is serviced by a separate application thread, or else we
# will hang in this call
new_conn.do_handshake()
_logger.debug("Accept returning new connection for new peer")
return new_conn, new_peer |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(self, peer_address):
"""Client-side UDP connection establishment This method connects this object's underlying socket. It subsequently performs a handshake if do_handshake_on_connect was set during initialization. Arguments: peer_address - address tuple of server peer """ |
self._sock.connect(peer_address)
peer_address = self._sock.getpeername() # substituted host addrinfo
BIO_dgram_set_connected(self._wbio.value, peer_address)
assert self._wbio is self._rbio
if self._do_handshake_on_connect:
self.do_handshake() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_handshake(self):
"""Perform a handshake with the peer This method forces an explicit handshake to be performed with either the client or server peer. """ |
_logger.debug("Initiating handshake...")
try:
self._wrap_socket_library_call(
lambda: SSL_do_handshake(self._ssl.value),
ERR_HANDSHAKE_TIMEOUT)
except openssl_error() as err:
if err.ssl_error == SSL_ERROR_SYSCALL and err.result == -1:
raise_ssl_error(ERR_PORT_UNREACHABLE, err)
raise
self._handshake_done = True
_logger.debug("...completed handshake") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(self, len=1024, buffer=None):
"""Read data from connection Read up to len bytes and return them. Arguments: len -- maximum number of bytes to read Return value: string containing read bytes """ |
try:
return self._wrap_socket_library_call(
lambda: SSL_read(self._ssl.value, len, buffer), ERR_READ_TIMEOUT)
except openssl_error() as err:
if err.ssl_error == SSL_ERROR_SYSCALL and err.result == -1:
raise_ssl_error(ERR_PORT_UNREACHABLE, err)
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, data):
"""Write data to connection Write data as string of bytes. Arguments: data -- buffer containing data to be written Return value: number of bytes actually transmitted """ |
try:
ret = self._wrap_socket_library_call(
lambda: SSL_write(self._ssl.value, data), ERR_WRITE_TIMEOUT)
except openssl_error() as err:
if err.ssl_error == SSL_ERROR_SYSCALL and err.result == -1:
raise_ssl_error(ERR_PORT_UNREACHABLE, err)
raise
if ret:
self._handshake_done = True
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shutdown(self):
"""Shut down the DTLS connection This method attemps to complete a bidirectional shutdown between peers. For non-blocking sockets, it should be called repeatedly until it no longer raises continuation request exceptions. """ |
if hasattr(self, "_listening"):
# Listening server-side sockets cannot be shut down
return
try:
self._wrap_socket_library_call(
lambda: SSL_shutdown(self._ssl.value), ERR_READ_TIMEOUT)
except openssl_error() as err:
if err.result == 0:
# close-notify alert was just sent; wait for same from peer
# Note: while it might seem wise to suppress further read-aheads
# with SSL_set_read_ahead here, doing so causes a shutdown
# failure (ret: -1, SSL_ERROR_SYSCALL) on the DTLS shutdown
# initiator side. And test_starttls does pass.
self._wrap_socket_library_call(
lambda: SSL_shutdown(self._ssl.value), ERR_READ_TIMEOUT)
else:
raise
if hasattr(self, "_rsock"):
# Return wrapped connected server socket (non-listening)
return _UnwrappedSocket(self._sock, self._rsock, self._udp_demux,
self._ctx,
BIO_dgram_get_peer(self._wbio.value))
# Return unwrapped client-side socket or unwrapped server-side socket
# for single-socket servers
return self._sock |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getpeercert(self, binary_form=False):
"""Retrieve the peer's certificate When binary form is requested, the peer's DER-encoded certficate is returned if it was transmitted during the handshake. When binary form is not requested, and the peer's certificate has been validated, then a certificate dictionary is returned. If the certificate was not validated, an empty dictionary is returned. In all cases, None is returned if no certificate was received from the peer. """ |
try:
peer_cert = _X509(SSL_get_peer_certificate(self._ssl.value))
except openssl_error():
return
if binary_form:
return i2d_X509(peer_cert.value)
if self._cert_reqs == CERT_NONE:
return {}
return decode_cert(peer_cert) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cipher(self):
"""Retrieve information about the current cipher Return a triple consisting of cipher name, SSL protocol version defining its use, and the number of secret bits. Return None if handshaking has not been completed. """ |
if not self._handshake_done:
return
current_cipher = SSL_get_current_cipher(self._ssl.value)
cipher_name = SSL_CIPHER_get_name(current_cipher)
cipher_version = SSL_CIPHER_get_version(current_cipher)
cipher_bits = SSL_CIPHER_get_bits(current_cipher)
return cipher_name, cipher_version, cipher_bits |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _prep_bins():
""" Support for running straight out of a cloned source directory instead of an installed distribution """ |
from os import path
from sys import platform, maxsize
from shutil import copy
bit_suffix = "-x86_64" if maxsize > 2**32 else "-x86"
package_root = path.abspath(path.dirname(__file__))
prebuilt_path = path.join(package_root, "prebuilt", platform + bit_suffix)
config = {"MANIFEST_DIR": prebuilt_path}
try:
execfile(path.join(prebuilt_path, "manifest.pycfg"), config)
except IOError:
return # there are no prebuilts for this platform - nothing to do
files = map(lambda x: path.join(prebuilt_path, x), config["FILES"])
for prebuilt_file in files:
try:
copy(path.join(prebuilt_path, prebuilt_file), package_root)
except IOError:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def SSL_CTX_set_info_callback(ctx, app_info_cb):
""" Set the info callback :param callback: The Python callback to use :return: None """ |
def py_info_callback(ssl, where, ret):
try:
app_info_cb(SSL(ssl), where, ret)
except:
pass
return
global _info_callback
_info_callback[ctx] = _rvoid_voidp_int_int(py_info_callback)
_SSL_CTX_set_info_callback(ctx, _info_callback[ctx]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def raise_ssl_error(code, nested=None):
"""Raise an SSL error with the given error code""" |
err_string = str(code) + ": " + _ssl_errors[code]
if nested:
raise SSLError(code, err_string + str(nested))
raise SSLError(code, err_string) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def service(self):
"""Service the root socket Read from the root socket and forward one datagram to a connection. The call will return without forwarding data if any of the following occurs: * An error is encountered while reading from the root socket * Reading from the root socket times out * The root socket is non-blocking and has no data available * An empty payload is received * A non-empty payload is received from an unknown peer (a peer for which get_connection has not yet been called); in this case, the payload is held by this instance and will be forwarded when the forward method is called Return: if the datagram received was from a new peer, then the peer's address; otherwise None """ |
self.payload, self.payload_peer_address = \
self.datagram_socket.recvfrom(UDP_MAX_DGRAM_LENGTH)
_logger.debug("Received datagram from peer: %s",
self.payload_peer_address)
if not self.payload:
self.payload_peer_address = None
return
if self.connections.has_key(self.payload_peer_address):
self.forward()
else:
return self.payload_peer_address |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def forward(self):
"""Forward a stored datagram When the service method returns the address of a new peer, it holds the datagram from that peer in this instance. In this case, this method will perform the forwarding step. The target connection is the one associated with address None if get_connection has not been called since the service method returned the new peer's address, and the connection associated with the new peer's address if it has. """ |
assert self.payload
assert self.payload_peer_address
if self.connections.has_key(self.payload_peer_address):
conn = self.connections[self.payload_peer_address]
default = False
else:
conn = self.connections[None] # propagate exception if not created
default = True
_logger.debug("Forwarding datagram from peer: %s, default: %s",
self.payload_peer_address, default)
self._forwarding_socket.sendto(self.payload, conn.getsockname())
self.payload = ""
self.payload_peer_address = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def emojificate_filter(content, autoescape=True):
"Convert any emoji in a string into accessible content."
# return mark_safe(emojificate(content))
if autoescape:
esc = conditional_escape
else:
esc = lambda x: x
return mark_safe(emojificate(esc(content))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def updateorders(self):
""" Update the orders """ |
log.info("Replacing orders")
# Canceling orders
self.cancelall()
# Target
target = self.bot.get("target", {})
price = self.getprice()
# prices
buy_price = price * (1 - target["offsets"]["buy"] / 100)
sell_price = price * (1 + target["offsets"]["sell"] / 100)
# Store price in storage for later use
self["feed_price"] = float(price)
# Buy Side
if float(self.balance(self.market["base"])) < buy_price * target["amount"]["buy"]:
InsufficientFundsError(Amount(target["amount"]["buy"] * float(buy_price), self.market["base"]))
self["insufficient_buy"] = True
else:
self["insufficient_buy"] = False
self.market.buy(
buy_price,
Amount(target["amount"]["buy"], self.market["quote"]),
account=self.account
)
# Sell Side
if float(self.balance(self.market["quote"])) < target["amount"]["sell"]:
InsufficientFundsError(Amount(target["amount"]["sell"], self.market["quote"]))
self["insufficient_sell"] = True
else:
self["insufficient_sell"] = False
self.market.sell(
sell_price,
Amount(target["amount"]["sell"], self.market["quote"]),
account=self.account
)
pprint(self.execute()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getprice(self):
""" Here we obtain the price for the quote and make sure it has a feed price """ |
target = self.bot.get("target", {})
if target.get("reference") == "feed":
assert self.market == self.market.core_quote_market(), "Wrong market for 'feed' reference!"
ticker = self.market.ticker()
price = ticker.get("quoteSettlement_price")
assert abs(price["price"]) != float("inf"), "Check price feed of asset! (%s)" % str(price)
return price |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tick(self, d):
""" ticks come in on every block """ |
if self.test_blocks:
if not (self.counter["blocks"] or 0) % self.test_blocks:
self.test()
self.counter["blocks"] += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def orders(self):
""" Return the bot's open accounts in the current market """ |
self.account.refresh()
return [o for o in self.account.openorders if self.bot["market"] == o.market and self.account.openorders] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _callbackPlaceFillOrders(self, d):
""" This method distringuishes notifications caused by Matched orders from those caused by placed orders """ |
if isinstance(d, FilledOrder):
self.onOrderMatched(d)
elif isinstance(d, Order):
self.onOrderPlaced(d)
elif isinstance(d, UpdateCallOrder):
self.onUpdateCallOrder(d)
else:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute(self):
""" Execute a bundle of operations """ |
self.bitshares.blocking = "head"
r = self.bitshares.txbuffer.broadcast()
self.bitshares.blocking = False
return r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cancelall(self):
""" Cancel all orders of this bot """ |
if self.orders:
return self.bitshares.cancel(
[o["id"] for o in self.orders],
account=self.account
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""Start the background process.""" |
self._lc = LoopingCall(self._download)
# Run immediately, and then every 30 seconds:
self._lc.start(30, now=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _download(self):
"""Download the page.""" |
print("Downloading!")
def parse(result):
print("Got %r back from Yahoo." % (result,))
values = result.strip().split(",")
self._value = float(values[1])
d = getPage(
"http://download.finance.yahoo.com/d/quotes.csv?e=.csv&f=c4l1&s=%s=X"
% (self._name,))
d.addCallback(parse)
d.addErrback(log.err)
return d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register(self, result):
""" Register an EventualResult. May be called in any thread. """ |
if self._stopped:
raise ReactorStopped()
self._results.add(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop(self):
""" Indicate no more results will get pushed into EventualResults, since the reactor has stopped. This should be called in the reactor thread. """ |
self._stopped = True
for result in self._results:
result._set_result(Failure(ReactorStopped())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _connect_deferred(self, deferred):
""" Hook up the Deferred that that this will be the result of. Should only be run in Twisted thread, and only called once. """ |
self._deferred = deferred
# Because we use __del__, we need to make sure there are no cycles
# involving this object, which is why we use a weakref:
def put(result, eventual=weakref.ref(self)):
eventual = eventual()
if eventual:
eventual._set_result(result)
else:
err(result, "Unhandled error in EventualResult")
deferred.addBoth(put) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_result(self, result):
""" Set the result of the EventualResult, if not already set. This can only happen in the reactor thread, either as a result of Deferred firing, or as a result of ResultRegistry.stop(). So, no need for thread-safety. """ |
if self._result_set.isSet():
return
self._value = result
self._result_set.set() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _result(self, timeout=None):
""" Return the result, if available. It may take an unknown amount of time to return the result, so a timeout option is provided. If the given number of seconds pass with no result, a TimeoutError will be thrown. If a previous call timed out, additional calls to this function will still wait for a result and return it if available. If a result was returned on one call, additional calls will return/raise the same result. """ |
if timeout is None:
warnings.warn(
"Unlimited timeouts are deprecated.",
DeprecationWarning,
stacklevel=3)
# Queue.get(None) won't get interrupted by Ctrl-C...
timeout = 2**31
self._result_set.wait(timeout)
# In Python 2.6 we can't rely on the return result of wait(), so we
# have to check manually:
if not self._result_set.is_set():
raise TimeoutError()
self._result_retrieved = True
return self._value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wait(self, timeout=None):
""" Return the result, or throw the exception if result is a failure. It may take an unknown amount of time to return the result, so a timeout option is provided. If the given number of seconds pass with no result, a TimeoutError will be thrown. If a previous call timed out, additional calls to this function will still wait for a result and return it if available. If a result was returned or raised on one call, additional calls will return/raise the same result. """ |
if threadable.isInIOThread():
raise RuntimeError(
"EventualResult.wait() must not be run in the reactor thread.")
if imp.lock_held():
try:
imp.release_lock()
except RuntimeError:
# The lock is held by some other thread. We should be safe
# to continue.
pass
else:
# If EventualResult.wait() is run during module import, if the
# Twisted code that is being run also imports something the
# result will be a deadlock. Even if that is not an issue it
# would prevent importing in other threads until the call
# returns.
raise RuntimeError(
"EventualResult.wait() must not be run at module "
"import time.")
result = self._result(timeout)
if isinstance(result, Failure):
result.raiseException()
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def original_failure(self):
""" Return the underlying Failure object, if the result is an error. If no result is yet available, or the result was not an error, None is returned. This method is useful if you want to get the original traceback for an error result. """ |
try:
result = self._result(0.0)
except TimeoutError:
return None
if isinstance(result, Failure):
return result
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _startReapingProcesses(self):
""" Start a LoopingCall that calls reapAllProcesses. """ |
lc = LoopingCall(self._reapAllProcesses)
lc.clock = self._reactor
lc.start(0.1, False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup(self):
""" Initialize the crochet library. This starts the reactor in a thread, and connect's Twisted's logs to Python's standard library logging module. This must be called at least once before the library can be used, and can be called multiple times. """ |
if self._started:
return
self._common_setup()
if platform.type == "posix":
self._reactor.callFromThread(self._startReapingProcesses)
if self._startLoggingWithObserver:
observer = ThreadLogObserver(PythonLoggingObserver().emit)
def start():
# Twisted is going to override warnings.showwarning; let's
# make sure that has no effect:
from twisted.python import log
original = log.showwarning
log.showwarning = warnings.showwarning
self._startLoggingWithObserver(observer, False)
log.showwarning = original
self._reactor.callFromThread(start)
# We only want to stop the logging thread once the reactor has
# shut down:
self._reactor.addSystemEventTrigger(
"after", "shutdown", observer.stop)
t = threading.Thread(
target=lambda: self._reactor.run(installSignalHandlers=False),
name="CrochetReactor")
t.start()
self._atexit_register(self._reactor.callFromThread, self._reactor.stop)
self._atexit_register(_store.log_errors)
if self._watchdog_thread is not None:
self._watchdog_thread.start() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_in_reactor(self, function):
""" A decorator that ensures the wrapped function runs in the reactor thread. When the wrapped function is called, an EventualResult is returned. """ |
result = self._run_in_reactor(function)
# Backwards compatibility; use __wrapped__ instead.
try:
result.wrapped_function = function
except AttributeError:
pass
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wait_for(self, timeout):
""" A decorator factory that ensures the wrapped function runs in the reactor thread. When the wrapped function is called, its result is returned or its exception raised. Deferreds are handled transparently. Calls will timeout after the given number of seconds (a float), raising a crochet.TimeoutError, and cancelling the Deferred being waited on. """ |
def decorator(function):
@wrapt.decorator
def wrapper(function, _, args, kwargs):
@self.run_in_reactor
def run():
return function(*args, **kwargs)
eventual_result = run()
try:
return eventual_result.wait(timeout)
except TimeoutError:
eventual_result.cancel()
raise
result = wrapper(function)
# Expose underling function for testing purposes; this attribute is
# deprecated, use __wrapped__ instead:
try:
result.wrapped_function = function
except AttributeError:
pass
return result
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def in_reactor(self, function):
""" DEPRECATED, use run_in_reactor. A decorator that ensures the wrapped function runs in the reactor thread. The wrapped function will get the reactor passed in as a first argument, in addition to any arguments it is called with. When the wrapped function is called, an EventualResult is returned. """ |
warnings.warn(
"@in_reactor is deprecated, use @run_in_reactor",
DeprecationWarning,
stacklevel=2)
@self.run_in_reactor
@wraps(function)
def add_reactor(*args, **kwargs):
return function(self._reactor, *args, **kwargs)
return add_reactor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store(self, deferred_result):
""" Store a EventualResult. Return an integer, a unique identifier that can be used to retrieve the object. """ |
self._counter += 1
self._stored[self._counter] = deferred_result
return self._counter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log_errors(self):
""" Log errors for all stored EventualResults that have error results. """ |
for result in self._stored.values():
failure = result.original_failure()
if failure is not None:
log.err(failure, "Unhandled error in stashed EventualResult:") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start_ssh_server(port, username, password, namespace):
""" Start an SSH server on the given port, exposing a Python prompt with the given namespace. """ |
# This is a lot of boilerplate, see http://tm.tl/6429 for a ticket to
# provide a utility function that simplifies this.
from twisted.internet import reactor
from twisted.conch.insults import insults
from twisted.conch import manhole, manhole_ssh
from twisted.cred.checkers import (
InMemoryUsernamePasswordDatabaseDontUse as MemoryDB)
from twisted.cred.portal import Portal
sshRealm = manhole_ssh.TerminalRealm()
def chainedProtocolFactory():
return insults.ServerProtocol(manhole.Manhole, namespace)
sshRealm.chainedProtocolFactory = chainedProtocolFactory
sshPortal = Portal(sshRealm, [MemoryDB(**{username: password})])
reactor.listenTCP(port, manhole_ssh.ConchFactory(sshPortal),
interface="127.0.0.1") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _synced(method, self, args, kwargs):
"""Underlying synchronized wrapper.""" |
with self._lock:
return method(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register(self, f, *args, **kwargs):
""" Register a function and arguments to be called later. """ |
self._functions.append(lambda: f(*args, **kwargs)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_function(self, function, name=None):
""" Register function to be called from EPC client. :type function: callable :arg function: Function to publish. :type name: str :arg name: Name by which function is published. This method returns the given `function` as-is, so that you can use it as a decorator. """ |
if name is None:
name = function.__name__
self.funcs[name] = function
return function |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_method(self, name):
""" Get registered method callend `name`. """ |
try:
return self.funcs[name]
except KeyError:
try:
return self.instance._get_method(name)
except AttributeError:
return SimpleXMLRPCServer.resolve_dotted_attribute(
self.instance, name, self.allow_dotted_names) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_debugger(self, debugger):
""" Set debugger to run when an error occurs in published method. You can also set debugger by passing `debugger` argument to the class constructor. :type debugger: {'pdb', 'ipdb', None} :arg debugger: type of debugger. """ |
if debugger == 'pdb':
import pdb
self.debugger = pdb
elif debugger == 'ipdb':
import ipdb
self.debugger = ipdb
else:
self.debugger = debugger |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(self, socket_or_address):
""" Connect to server and start serving registered functions. :type socket_or_address: tuple or socket object :arg socket_or_address: A ``(host, port)`` pair to be passed to `socket.create_connection`, or a socket object. """ |
if isinstance(socket_or_address, tuple):
import socket
self.socket = socket.create_connection(socket_or_address)
else:
self.socket = socket_or_address
# This is what BaseServer.finish_request does:
address = None # it is not used, so leave it empty
self.handler = EPCClientHandler(self.socket, address, self)
self.call = self.handler.call
self.call_sync = self.handler.call_sync
self.methods = self.handler.methods
self.methods_sync = self.handler.methods_sync
self.handler_thread = newthread(self, target=self.handler.start)
self.handler_thread.daemon = self.thread_daemon
self.handler_thread.start()
self.handler.wait_until_ready() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(args=None):
""" Quick CLI to serve Python functions in a module. Example usage:: python -m epc.server --allow-dotted-names os Note that only the functions which gets and returns simple built-in types (str, int, float, list, tuple, dict) works. """ |
import argparse
from textwrap import dedent
parser = argparse.ArgumentParser(
formatter_class=type('EPCHelpFormatter',
(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter),
{}),
description=dedent(main.__doc__))
parser.add_argument(
'module', help='Serve python functions in this module.')
parser.add_argument(
'--address', default='localhost',
help='server address')
parser.add_argument(
'--port', default=0, type=int,
help='server port. 0 means to pick up random port.')
parser.add_argument(
'--allow-dotted-names', default=False, action='store_true')
parser.add_argument(
'--pdb', dest='debugger', const='pdb', action='store_const',
help='start pdb when error occurs.')
parser.add_argument(
'--ipdb', dest='debugger', const='ipdb', action='store_const',
help='start ipdb when error occurs.')
parser.add_argument(
'--log-traceback', action='store_true', default=False)
ns = parser.parse_args(args)
server = EPCServer((ns.address, ns.port),
debugger=ns.debugger,
log_traceback=ns.log_traceback)
server.register_instance(
__import__(ns.module),
allow_dotted_names=ns.allow_dotted_names)
server.print_port()
server.serve_forever() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_port(self, stream=sys.stdout):
""" Print port this EPC server runs on. As Emacs client reads port number from STDOUT, you need to call this just before calling :meth:`serve_forever`. :type stream: text stream :arg stream: A stream object to write port on. Default is :data:`sys.stdout`. """ |
stream.write(str(self.server_address[1]))
stream.write("\n")
stream.flush() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call(self, name, *args, **kwds):
""" Call method connected to this handler. :type name: str :arg name: Method name to call. :type args: list :arg args: Arguments for remote method to call. :type callback: callable :arg callback: A function to be called with returned value of the remote method. :type errback: callable :arg errback: A function to be called with an error occurred in the remote method. It is either an instance of :class:`ReturnError` or :class:`EPCError`. """ |
self.callmanager.call(self, name, *args, **kwds) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def methods(self, *args, **kwds):
""" Request info of callable remote methods. Arguments for :meth:`call` except for `name` can be applied to this function too. """ |
self.callmanager.methods(self, *args, **kwds) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def func_call_as_str(name, *args, **kwds):
""" Return arguments and keyword arguments as formatted string 'f(1, 2, a=1)' """ |
return '{0}({1})'.format(
name,
', '.join(itertools.chain(
map('{0!r}'.format, args),
map('{0[0]!s}={0[1]!r}'.format, sorted(kwds.items()))))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def callwith(context_manager):
""" A decorator to wrap execution of function with a context manager. """ |
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwds):
with context_manager:
return func(*args, **kwds)
return wrapper
return decorator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.