response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Given a socket exception, return connection error.
def getConnectError(e): """Given a socket exception, return connection error.""" if isinstance(e, Exception): args = e.args else: args = e try: number, string = args except ValueError: return ConnectError(string=e) if hasattr(socket, "gaierror") and isinstance(e, socket.gaierror): # Only works in 2.2 in newer. Really that means always; #5978 covers # this and other weirdnesses in this function. klass = UnknownHostError else: klass = errnoMapping.get(number, ConnectError) return klass(number, string)
Set the file description of the given file descriptor to non-blocking.
def setNonBlocking(fd): """ Set the file description of the given file descriptor to non-blocking. """ flags = fcntl.fcntl(fd, fcntl.F_GETFL) flags = flags | os.O_NONBLOCK fcntl.fcntl(fd, fcntl.F_SETFL, flags)
Set the file description of the given file descriptor to blocking.
def setBlocking(fd): """ Set the file description of the given file descriptor to blocking. """ flags = fcntl.fcntl(fd, fcntl.F_GETFL) flags = flags & ~os.O_NONBLOCK fcntl.fcntl(fd, fcntl.F_SETFL, flags)
Read from file descriptor, calling callback with resulting data. If successful, call 'callback' with a single argument: the resulting data. Returns same thing FileDescriptor.doRead would: CONNECTION_LOST, CONNECTION_DONE, or None. @type fd: C{int} @param fd: non-blocking file descriptor to be read from. @param callback: a callable which accepts a single argument. If data is read from the file descriptor it will be called with this data. Handling exceptions from calling the callback is up to the caller. Note that if the descriptor is still connected but no data is read, None will be returned but callback will not be called. @return: CONNECTION_LOST on error, CONNECTION_DONE when fd is closed, otherwise None.
def readFromFD(fd, callback): """ Read from file descriptor, calling callback with resulting data. If successful, call 'callback' with a single argument: the resulting data. Returns same thing FileDescriptor.doRead would: CONNECTION_LOST, CONNECTION_DONE, or None. @type fd: C{int} @param fd: non-blocking file descriptor to be read from. @param callback: a callable which accepts a single argument. If data is read from the file descriptor it will be called with this data. Handling exceptions from calling the callback is up to the caller. Note that if the descriptor is still connected but no data is read, None will be returned but callback will not be called. @return: CONNECTION_LOST on error, CONNECTION_DONE when fd is closed, otherwise None. """ try: output = os.read(fd, 8192) except OSError as ioe: if ioe.args[0] in (errno.EAGAIN, errno.EINTR): return else: return CONNECTION_LOST if not output: return CONNECTION_DONE callback(output)
Write data to file descriptor. Returns same thing FileDescriptor.writeSomeData would. @type fd: C{int} @param fd: non-blocking file descriptor to be written to. @type data: C{str} or C{buffer} @param data: bytes to write to fd. @return: number of bytes written, or CONNECTION_LOST.
def writeToFD(fd, data): """ Write data to file descriptor. Returns same thing FileDescriptor.writeSomeData would. @type fd: C{int} @param fd: non-blocking file descriptor to be written to. @type data: C{str} or C{buffer} @param data: bytes to write to fd. @return: number of bytes written, or CONNECTION_LOST. """ try: return os.write(fd, data) except OSError as io: if io.errno in (errno.EAGAIN, errno.EINTR): return 0 return CONNECTION_LOST
Configure the twisted mainloop to be run inside the glib mainloop. @param useGtk: A hint that the Gtk GUI will or will not be used. Currently does not modify any behavior.
def install(useGtk: bool = False) -> Union[GIReactor, PortableGIReactor]: """ Configure the twisted mainloop to be run inside the glib mainloop. @param useGtk: A hint that the Gtk GUI will or will not be used. Currently does not modify any behavior. """ reactor: Union[GIReactor, PortableGIReactor] if runtime.platform.getType() == "posix": reactor = GIReactor(useGtk=useGtk) else: reactor = PortableGIReactor(useGtk=useGtk) from twisted.internet.main import installReactor installReactor(reactor) return reactor
Configure the twisted mainloop to be run inside the glib mainloop.
def install(): """ Configure the twisted mainloop to be run inside the glib mainloop. """ reactor = Glib2Reactor() from twisted.internet.main import installReactor installReactor(reactor)
Configure the twisted mainloop to be run inside the gtk mainloop. @param useGtk: should glib rather than GTK+ event loop be used (this will be slightly faster but does not support GUI).
def install(useGtk=True): """ Configure the twisted mainloop to be run inside the gtk mainloop. @param useGtk: should glib rather than GTK+ event loop be used (this will be slightly faster but does not support GUI). """ reactor = Gtk2Reactor(useGtk) from twisted.internet.main import installReactor installReactor(reactor) return reactor
Configure the twisted mainloop to be run inside the gtk mainloop.
def portableInstall(useGtk=True): """ Configure the twisted mainloop to be run inside the gtk mainloop. """ reactor = PortableGtkReactor() from twisted.internet.main import installReactor installReactor(reactor) return reactor
Auxiliary function that converts a hexadecimal mask into a series of human readable flags.
def humanReadableMask(mask): """ Auxiliary function that converts a hexadecimal mask into a series of human readable flags. """ s = [] for k, v in _FLAG_TO_HUMAN: if k & mask: s.append(v) return s
Install the kqueue() reactor.
def install(): """ Install the kqueue() reactor. """ p = KQueueReactor() from twisted.internet.main import installReactor installReactor(p)
Install reactor C{reactor}. @param reactor: An object that provides one or more IReactor* interfaces.
def installReactor(reactor): """ Install reactor C{reactor}. @param reactor: An object that provides one or more IReactor* interfaces. """ # this stuff should be common to all reactors. import sys import twisted.internet if "twisted.internet.reactor" in sys.modules: raise error.ReactorAlreadyInstalledError("reactor already installed") twisted.internet.reactor = reactor sys.modules["twisted.internet.reactor"] = reactor
Install the poll() reactor.
def install(): """Install the poll() reactor.""" p = PollReactor() from twisted.internet.main import installReactor installReactor(p)
Reap all registered processes.
def reapAllProcesses() -> None: """ Reap all registered processes. """ # Coerce this to a list, as reaping the process changes the dictionary and # causes a "size changed during iteration" exception for process in list(reapProcessHandlers.values()): process.reapProcess()
Register a process handler for the given pid, in case L{reapAllProcesses} is called. @param pid: the pid of the process. @param process: a process handler.
def registerReapProcessHandler(pid, process): """ Register a process handler for the given pid, in case L{reapAllProcesses} is called. @param pid: the pid of the process. @param process: a process handler. """ if pid in reapProcessHandlers: raise RuntimeError("Try to register an already registered process.") try: auxPID, status = os.waitpid(pid, os.WNOHANG) except BaseException: log.msg(f"Failed to reap {pid}:") log.err() if pid is None: return auxPID = None if auxPID: process.processEnded(status) else: # if auxPID is 0, there are children but none have exited reapProcessHandlers[pid] = process
Unregister a process handler previously registered with L{registerReapProcessHandler}.
def unregisterReapProcessHandler(pid, process): """ Unregister a process handler previously registered with L{registerReapProcessHandler}. """ if not (pid in reapProcessHandlers and reapProcessHandlers[pid] == process): raise RuntimeError("Try to unregister a process not registered.") del reapProcessHandlers[pid]
Use the global detector object to figure out which FD implementation to use.
def _listOpenFDs(): """ Use the global detector object to figure out which FD implementation to use. """ return detector._listOpenFDs()
Get the C{file_actions} parameter for C{posix_spawn} based on the parameters describing the current process state. @param fdState: A list of 2-tuples of (file descriptor, close-on-exec flag). @param doClose: the integer to use for the 'close' instruction @param doDup2: the integer to use for the 'dup2' instruction
def _getFileActions( fdState: List[Tuple[int, bool]], childToParentFD: Dict[int, int], doClose: int, doDup2: int, ) -> List[Tuple[int, ...]]: """ Get the C{file_actions} parameter for C{posix_spawn} based on the parameters describing the current process state. @param fdState: A list of 2-tuples of (file descriptor, close-on-exec flag). @param doClose: the integer to use for the 'close' instruction @param doDup2: the integer to use for the 'dup2' instruction """ fdStateDict = dict(fdState) parentToChildren: Dict[int, List[int]] = defaultdict(list) for inChild, inParent in childToParentFD.items(): parentToChildren[inParent].append(inChild) allocated = set(fdStateDict) allocated |= set(childToParentFD.values()) allocated |= set(childToParentFD.keys()) nextFD = 0 def allocateFD() -> int: nonlocal nextFD while nextFD in allocated: nextFD += 1 allocated.add(nextFD) return nextFD result: List[Tuple[int, ...]] = [] relocations = {} for inChild, inParent in sorted(childToParentFD.items()): # The parent FD will later be reused by a child FD. parentToChildren[inParent].remove(inChild) if parentToChildren[inChild]: new = relocations[inChild] = allocateFD() result.append((doDup2, inChild, new)) if inParent in relocations: result.append((doDup2, relocations[inParent], inChild)) if not parentToChildren[inParent]: result.append((doClose, relocations[inParent])) else: if inParent == inChild: if fdStateDict[inParent]: # If the child is attempting to inherit the parent as-is, # and it is not close-on-exec, the job is already done; we # can bail. Otherwise... tempFD = allocateFD() # The child wants to inherit the parent as-is, so the # handle must be heritable.. dup2 makes the new descriptor # inheritable by default, *but*, per the man page, “if # fildes and fildes2 are equal, then dup2() just returns # fildes2; no other changes are made to the existing # descriptor”, so we need to dup it somewhere else and dup # it back before closing the temporary place we put it. result.extend( [ (doDup2, inParent, tempFD), (doDup2, tempFD, inChild), (doClose, tempFD), ] ) else: result.append((doDup2, inParent, inChild)) for eachFD, uninheritable in fdStateDict.items(): if eachFD not in childToParentFD and not uninheritable: result.append((doClose, eachFD)) return result
Schedule PyUI's display to be updated approximately every C{ms} milliseconds, and initialize PyUI with the specified arguments.
def install(ms=10, reactor=None, args=(), kw={}): """ Schedule PyUI's display to be updated approximately every C{ms} milliseconds, and initialize PyUI with the specified arguments. """ d = pyui.init(*args, **kw) if reactor is None: from twisted.internet import reactor _guiUpdate(reactor, ms / 1000.0) return d
Win32 select wrapper.
def win32select(r, w, e, timeout=None): """Win32 select wrapper.""" if not (r or w): # windows select() exits immediately when no sockets if timeout is None: timeout = 0.01 else: timeout = min(timeout, 0.001) sleep(timeout) return [], [], [] # windows doesn't process 'signals' inside select(), so we set a max # time or ctrl-c will never be recognized if timeout is None or timeout > 0.5: timeout = 0.5 r, w, e = select.select(r, w, w, timeout) return r, w + e, []
Configure the twisted mainloop to be run using the select() reactor.
def install(): """Configure the twisted mainloop to be run using the select() reactor.""" reactor = SelectReactor() from twisted.internet.main import installReactor installReactor(reactor)
Cooperatively iterate over the given iterator, dividing runtime between it and all other iterators which have been passed to this function and not yet exhausted. @param iterator: the iterator to invoke. @return: a Deferred that will fire when the iterator finishes.
def coiterate(iterator: Iterator[_T]) -> Deferred[Iterator[_T]]: """ Cooperatively iterate over the given iterator, dividing runtime between it and all other iterators which have been passed to this function and not yet exhausted. @param iterator: the iterator to invoke. @return: a Deferred that will fire when the iterator finishes. """ return _theCooperator.coiterate(iterator)
Start running the given iterator as a long-running cooperative task, by calling next() on it as a periodic timed event. This is very useful if you have computationally expensive tasks that you want to run without blocking the reactor. Just break each task up so that it yields frequently, pass it in here and the global L{Cooperator} will make sure work is distributed between them without blocking longer than a single iteration of a single task. @param iterator: the iterator to invoke. @return: a L{CooperativeTask} object representing this task.
def cooperate(iterator: Iterator[_T]) -> CooperativeTask: """ Start running the given iterator as a long-running cooperative task, by calling next() on it as a periodic timed event. This is very useful if you have computationally expensive tasks that you want to run without blocking the reactor. Just break each task up so that it yields frequently, pass it in here and the global L{Cooperator} will make sure work is distributed between them without blocking longer than a single iteration of a single task. @param iterator: the iterator to invoke. @return: a L{CooperativeTask} object representing this task. """ return _theCooperator.cooperate(iterator)
Call the given function after a certain period of time has passed. @param clock: The object which will be used to schedule the delayed call. @param delay: The number of seconds to wait before calling the function. @param callable: The callable to call after the delay, or C{None}. @param args: The positional arguments to pass to C{callable}. @param kw: The keyword arguments to pass to C{callable}. @return: A deferred that fires with the result of the callable when the specified time has elapsed.
def deferLater( clock: IReactorTime, delay: float, callable: Optional[Callable[..., _T]] = None, *args: object, **kw: object, ) -> Deferred[_T]: """ Call the given function after a certain period of time has passed. @param clock: The object which will be used to schedule the delayed call. @param delay: The number of seconds to wait before calling the function. @param callable: The callable to call after the delay, or C{None}. @param args: The positional arguments to pass to C{callable}. @param kw: The keyword arguments to pass to C{callable}. @return: A deferred that fires with the result of the callable when the specified time has elapsed. """ def deferLaterCancel(deferred: Deferred[object]) -> None: delayedCall.cancel() def cb(result: object) -> _T: if callable is None: return None # type: ignore[return-value] return callable(*args, **kw) d: Deferred[_T] = Deferred(deferLaterCancel) d.addCallback(cb) delayedCall = clock.callLater(delay, d.callback, None) return d
Call C{main} and run the reactor until the L{Deferred} it returns fires or the coroutine it returns completes. This is intended as the way to start up an application with a well-defined completion condition. Use it to write clients or one-off asynchronous operations. Prefer this to calling C{reactor.run} directly, as this function will also: - Take care to call C{reactor.stop} once and only once, and at the right time. - Log any failures from the C{Deferred} returned by C{main}. - Exit the application when done, with exit code 0 in case of success and 1 in case of failure. If C{main} fails with a C{SystemExit} error, the code returned is used. The following demonstrates the signature of a C{main} function which can be used with L{react}:: async def main(reactor, username, password): return "ok" task.react(main, ("alice", "secret")) @param main: A callable which returns a L{Deferred} or coroutine. It should take the reactor as its first parameter, followed by the elements of C{argv}. @param argv: A list of arguments to pass to C{main}. If omitted the callable will be invoked with no additional arguments. @param _reactor: An implementation detail to allow easier unit testing. Do not supply this parameter. @since: 12.3
def react( main: Callable[ ..., Union[Deferred[_T], Coroutine["Deferred[_T]", object, _T]], ], argv: Iterable[object] = (), _reactor: Optional[IReactorCore] = None, ) -> NoReturn: """ Call C{main} and run the reactor until the L{Deferred} it returns fires or the coroutine it returns completes. This is intended as the way to start up an application with a well-defined completion condition. Use it to write clients or one-off asynchronous operations. Prefer this to calling C{reactor.run} directly, as this function will also: - Take care to call C{reactor.stop} once and only once, and at the right time. - Log any failures from the C{Deferred} returned by C{main}. - Exit the application when done, with exit code 0 in case of success and 1 in case of failure. If C{main} fails with a C{SystemExit} error, the code returned is used. The following demonstrates the signature of a C{main} function which can be used with L{react}:: async def main(reactor, username, password): return "ok" task.react(main, ("alice", "secret")) @param main: A callable which returns a L{Deferred} or coroutine. It should take the reactor as its first parameter, followed by the elements of C{argv}. @param argv: A list of arguments to pass to C{main}. If omitted the callable will be invoked with no additional arguments. @param _reactor: An implementation detail to allow easier unit testing. Do not supply this parameter. @since: 12.3 """ if _reactor is None: from twisted.internet import reactor _reactor = cast(IReactorCore, reactor) finished = ensureDeferred(main(_reactor, *argv)) code = 0 stopping = False def onShutdown() -> None: nonlocal stopping stopping = True _reactor.addSystemEventTrigger("before", "shutdown", onShutdown) def stop(result: object, stopReactor: bool) -> None: if stopReactor: assert _reactor is not None try: _reactor.stop() except ReactorNotRunning: pass if isinstance(result, Failure): nonlocal code if result.check(SystemExit) is not None: code = result.value.code else: log.err(result, "main function encountered error") code = 1 def cbFinish(result: object) -> None: if stopping: stop(result, False) else: assert _reactor is not None _reactor.callWhenRunning(stop, result, True) finished.addBoth(cbFinish) _reactor.run() sys.exit(code)
Return a 2-tuple of socket IP and port for IPv4 and a 4-tuple of socket IP, port, flowInfo, and scopeID for IPv6. For IPv6, it returns the interface portion (the part after the %) as a part of the IPv6 address, which Python 3.7+ does not include. @param addr: A 2-tuple for IPv4 information or a 4-tuple for IPv6 information.
def _getrealname(addr): """ Return a 2-tuple of socket IP and port for IPv4 and a 4-tuple of socket IP, port, flowInfo, and scopeID for IPv6. For IPv6, it returns the interface portion (the part after the %) as a part of the IPv6 address, which Python 3.7+ does not include. @param addr: A 2-tuple for IPv4 information or a 4-tuple for IPv6 information. """ if len(addr) == 4: # IPv6 host = socket.getnameinfo(addr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)[ 0 ] return tuple([host] + list(addr[1:])) else: return addr[:2]
See L{_getrealname}.
def _getpeername(skt): """ See L{_getrealname}. """ return _getrealname(skt.getpeername())
See L{_getrealname}.
def _getsockname(skt): """ See L{_getrealname}. """ return _getrealname(skt.getsockname())
Resolve an IPv6 literal into an IPv6 address. This is necessary to resolve any embedded scope identifiers to the relevant C{sin6_scope_id} for use with C{socket.connect()}, C{socket.listen()}, or C{socket.bind()}; see U{RFC 3493 <https://tools.ietf.org/html/rfc3493>} for more information. @param ip: An IPv6 address literal. @type ip: C{str} @param port: A port number. @type port: C{int} @return: a 4-tuple of C{(host, port, flow, scope)}, suitable for use as an IPv6 address. @raise socket.gaierror: if either the IP or port is not numeric as it should be.
def _resolveIPv6(ip, port): """ Resolve an IPv6 literal into an IPv6 address. This is necessary to resolve any embedded scope identifiers to the relevant C{sin6_scope_id} for use with C{socket.connect()}, C{socket.listen()}, or C{socket.bind()}; see U{RFC 3493 <https://tools.ietf.org/html/rfc3493>} for more information. @param ip: An IPv6 address literal. @type ip: C{str} @param port: A port number. @type port: C{int} @return: a 4-tuple of C{(host, port, flow, scope)}, suitable for use as an IPv6 address. @raise socket.gaierror: if either the IP or port is not numeric as it should be. """ return socket.getaddrinfo(ip, port, 0, 0, 0, _NUMERIC_ONLY)[0][4]
Return a generator that yields client sockets from the provided listening socket until there are none left or an unrecoverable error occurs. @param logger: A logger to which C{accept}-related events will be logged. This should not log to arbitrary observers that might open a file descriptor to avoid claiming the C{EMFILE} file descriptor on UNIX-like systems. @type logger: L{Logger} @param accepts: An iterable iterated over to limit the number consecutive C{accept}s. @type accepts: An iterable. @param listener: The listening socket. @type listener: L{socket.socket} @param reservedFD: A reserved file descriptor that can be used to recover from C{EMFILE} on UNIX-like systems. @type reservedFD: L{_IFileDescriptorReservation} @return: A generator that yields C{(socket, addr)} tuples from L{socket.socket.accept}
def _accept(logger, accepts, listener, reservedFD): """ Return a generator that yields client sockets from the provided listening socket until there are none left or an unrecoverable error occurs. @param logger: A logger to which C{accept}-related events will be logged. This should not log to arbitrary observers that might open a file descriptor to avoid claiming the C{EMFILE} file descriptor on UNIX-like systems. @type logger: L{Logger} @param accepts: An iterable iterated over to limit the number consecutive C{accept}s. @type accepts: An iterable. @param listener: The listening socket. @type listener: L{socket.socket} @param reservedFD: A reserved file descriptor that can be used to recover from C{EMFILE} on UNIX-like systems. @type reservedFD: L{_IFileDescriptorReservation} @return: A generator that yields C{(socket, addr)} tuples from L{socket.socket.accept} """ for _ in accepts: try: client, address = listener.accept() except OSError as e: if e.args[0] in (EWOULDBLOCK, EAGAIN): # No more clients. return elif e.args[0] == EPERM: # Netfilter on Linux may have rejected the # connection, but we get told to try to accept() # anyway. continue elif e.args[0] == EMFILE and reservedFD.available(): # Linux and other UNIX-like operating systems return # EMFILE when a process has reached its soft limit of # file descriptors. The reserved file descriptor is # available, so it can be released to free up a # descriptor for use by listener.accept()'s clients. # Each client socket will be closed until the listener # returns EAGAIN. logger.info( "EMFILE encountered;" " releasing reserved file descriptor." ) # The following block should not run arbitrary code # that might acquire its own file descriptor. with reservedFD: clientsToClose = _accept(logger, accepts, listener, reservedFD) for clientToClose, closedAddress in clientsToClose: clientToClose.close() logger.info( "EMFILE recovery:" " Closed socket from {address}", address=closedAddress, ) logger.info("Re-reserving EMFILE recovery file descriptor.") return elif e.args[0] in _ACCEPT_ERRORS: logger.info( "Could not accept new connection ({acceptError})", acceptError=errorcode[e.args[0]], ) return else: raise else: yield client, address
Take a list of disconnecting protocols, callback a L{Deferred} when they're all done. This is a hack to make some older tests less flaky, as L{ITransport.loseConnection} is not atomic on all reactors (for example, the CoreFoundation, which sometimes takes a reactor turn for CFSocket to realise). New tests should either not use real sockets in testing, or take the advice in I{https://jml.io/pages/how-to-disconnect-in-twisted-really.html} to heart. @param reactor: The reactor to schedule the checks on. @type reactor: L{IReactorTime} @param protocols: The protocols to wait for disconnecting. @type protocols: A L{list} of L{IProtocol}s.
def waitUntilAllDisconnected(reactor, protocols): """ Take a list of disconnecting protocols, callback a L{Deferred} when they're all done. This is a hack to make some older tests less flaky, as L{ITransport.loseConnection} is not atomic on all reactors (for example, the CoreFoundation, which sometimes takes a reactor turn for CFSocket to realise). New tests should either not use real sockets in testing, or take the advice in I{https://jml.io/pages/how-to-disconnect-in-twisted-really.html} to heart. @param reactor: The reactor to schedule the checks on. @type reactor: L{IReactorTime} @param protocols: The protocols to wait for disconnecting. @type protocols: A L{list} of L{IProtocol}s. """ lc = None def _check(): if True not in [x.transport.connected for x in protocols]: lc.stop() lc = task.LoopingCall(_check) lc.clock = reactor return lc.start(0.01, now=True)
Call the function C{f} using a thread from the given threadpool and return the result as a Deferred. This function is only used by client code which is maintaining its own threadpool. To run a function in the reactor's threadpool, use C{deferToThread}. @param reactor: The reactor in whose main thread the Deferred will be invoked. @param threadpool: An object which supports the C{callInThreadWithCallback} method of C{twisted.python.threadpool.ThreadPool}. @param f: The function to call. @param args: positional arguments to pass to f. @param kwargs: keyword arguments to pass to f. @return: A Deferred which fires a callback with the result of f, or an errback with a L{twisted.python.failure.Failure} if f throws an exception.
def deferToThreadPool( reactor: IReactorFromThreads, threadpool: ThreadPool, f: Callable[_P, _R], *args: _P.args, **kwargs: _P.kwargs, ) -> defer.Deferred[_R]: """ Call the function C{f} using a thread from the given threadpool and return the result as a Deferred. This function is only used by client code which is maintaining its own threadpool. To run a function in the reactor's threadpool, use C{deferToThread}. @param reactor: The reactor in whose main thread the Deferred will be invoked. @param threadpool: An object which supports the C{callInThreadWithCallback} method of C{twisted.python.threadpool.ThreadPool}. @param f: The function to call. @param args: positional arguments to pass to f. @param kwargs: keyword arguments to pass to f. @return: A Deferred which fires a callback with the result of f, or an errback with a L{twisted.python.failure.Failure} if f throws an exception. """ d: defer.Deferred[_R] = defer.Deferred() def onResult(success: bool, result: _R | BaseException) -> None: if success: reactor.callFromThread(d.callback, result) else: reactor.callFromThread(d.errback, result) threadpool.callInThreadWithCallback(onResult, f, *args, **kwargs) return d
Run a function in a thread and return the result as a Deferred. @param f: The function to call. @param args: positional arguments to pass to f. @param kwargs: keyword arguments to pass to f. @return: A Deferred which fires a callback with the result of f, or an errback with a L{twisted.python.failure.Failure} if f throws an exception.
def deferToThread(f, *args, **kwargs): """ Run a function in a thread and return the result as a Deferred. @param f: The function to call. @param args: positional arguments to pass to f. @param kwargs: keyword arguments to pass to f. @return: A Deferred which fires a callback with the result of f, or an errback with a L{twisted.python.failure.Failure} if f throws an exception. """ from twisted.internet import reactor return deferToThreadPool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
Run a list of functions.
def _runMultiple(tupleList): """ Run a list of functions. """ for f, args, kwargs in tupleList: f(*args, **kwargs)
Run a list of functions in the same thread. tupleList should be a list of (function, argsList, kwargsDict) tuples.
def callMultipleInThread(tupleList): """ Run a list of functions in the same thread. tupleList should be a list of (function, argsList, kwargsDict) tuples. """ from twisted.internet import reactor reactor.callInThread(_runMultiple, tupleList)
Run a function in the reactor from a thread, and wait for the result synchronously. If the function returns a L{Deferred}, wait for its result and return that. @param reactor: The L{IReactorThreads} provider which will be used to schedule the function call. @param f: the callable to run in the reactor thread @type f: any callable. @param a: the arguments to pass to C{f}. @param kw: the keyword arguments to pass to C{f}. @return: the result of the L{Deferred} returned by C{f}, or the result of C{f} if it returns anything other than a L{Deferred}. @raise Exception: If C{f} raises a synchronous exception, C{blockingCallFromThread} will raise that exception. If C{f} returns a L{Deferred} which fires with a L{Failure}, C{blockingCallFromThread} will raise that failure's exception (see L{Failure.raiseException}).
def blockingCallFromThread(reactor, f, *a, **kw): """ Run a function in the reactor from a thread, and wait for the result synchronously. If the function returns a L{Deferred}, wait for its result and return that. @param reactor: The L{IReactorThreads} provider which will be used to schedule the function call. @param f: the callable to run in the reactor thread @type f: any callable. @param a: the arguments to pass to C{f}. @param kw: the keyword arguments to pass to C{f}. @return: the result of the L{Deferred} returned by C{f}, or the result of C{f} if it returns anything other than a L{Deferred}. @raise Exception: If C{f} raises a synchronous exception, C{blockingCallFromThread} will raise that exception. If C{f} returns a L{Deferred} which fires with a L{Failure}, C{blockingCallFromThread} will raise that failure's exception (see L{Failure.raiseException}). """ queue = Queue.Queue() def _callFromThread(): result = defer.maybeDeferred(f, *a, **kw) result.addBoth(queue.put) reactor.callFromThread(_callFromThread) result = queue.get() if isinstance(result, failure.Failure): result.raiseException() return result
Install a Tkinter.Tk() object into the reactor.
def install(widget, ms=10, reactor=None): """Install a Tkinter.Tk() object into the reactor.""" installTkFunctions() global _task _task = task.LoopingCall(widget.update) _task.start(ms / 1000.0, False)
Remove the root Tk widget from the reactor. Call this before destroy()ing the root widget.
def uninstall(): """Remove the root Tk widget from the reactor. Call this before destroy()ing the root widget. """ global _task _task.stop() _task = None
Pack an integer into an ancillary data structure suitable for use with L{sendmsg.sendmsg}.
def _ancillaryDescriptor(fd): """ Pack an integer into an ancillary data structure suitable for use with L{sendmsg.sendmsg}. """ packed = struct.pack("i", fd) return [(socket.SOL_SOCKET, sendmsg.SCM_RIGHTS, packed)]
Determine whether the given unix socket path is in a filesystem namespace. While most PF_UNIX sockets are entries in the filesystem, Linux 2.2 and above support PF_UNIX sockets in an "abstract namespace" that does not correspond to any path. This function returns C{True} if the given socket path is stored in the filesystem and C{False} if the path is in this abstract namespace.
def _inFilesystemNamespace(path): """ Determine whether the given unix socket path is in a filesystem namespace. While most PF_UNIX sockets are entries in the filesystem, Linux 2.2 and above support PF_UNIX sockets in an "abstract namespace" that does not correspond to any path. This function returns C{True} if the given socket path is stored in the filesystem and C{False} if the path is in this abstract namespace. """ return path[:1] not in (b"\0", "\0")
Spawn a process and return its output as a deferred returning a L{bytes}. @param executable: The file name to run and get the output of - the full path should be used. @param args: the command line arguments to pass to the process; a sequence of strings. The first string should B{NOT} be the executable's name. @param env: the environment variables to pass to the process; a dictionary of strings. @param path: the path to run the subprocess in - defaults to the current directory. @param reactor: the reactor to use - defaults to the default reactor @param errortoo: If true, include stderr in the result. If false, if stderr is received the returned L{Deferred} will errback with an L{IOError} instance with a C{processEnded} attribute. The C{processEnded} attribute refers to a L{Deferred} which fires when the executed process ends.
def getProcessOutput(executable, args=(), env={}, path=None, reactor=None, errortoo=0): """ Spawn a process and return its output as a deferred returning a L{bytes}. @param executable: The file name to run and get the output of - the full path should be used. @param args: the command line arguments to pass to the process; a sequence of strings. The first string should B{NOT} be the executable's name. @param env: the environment variables to pass to the process; a dictionary of strings. @param path: the path to run the subprocess in - defaults to the current directory. @param reactor: the reactor to use - defaults to the default reactor @param errortoo: If true, include stderr in the result. If false, if stderr is received the returned L{Deferred} will errback with an L{IOError} instance with a C{processEnded} attribute. The C{processEnded} attribute refers to a L{Deferred} which fires when the executed process ends. """ return _callProtocolWithDeferred( lambda d: _BackRelay(d, errortoo=errortoo), executable, args, env, path, reactor )
Spawn a process and return its exit code as a Deferred.
def getProcessValue(executable, args=(), env={}, path=None, reactor=None): """Spawn a process and return its exit code as a Deferred.""" return _callProtocolWithDeferred(_ValueGetter, executable, args, env, path, reactor)
Spawn a process and returns a Deferred that will be called back with its output (from stdout and stderr) and it's exit code as (out, err, code) If a signal is raised, the Deferred will errback with the stdout and stderr up to that point, along with the signal, as (out, err, signalNum)
def getProcessOutputAndValue( executable, args=(), env={}, path=None, reactor=None, stdinBytes=None ): """Spawn a process and returns a Deferred that will be called back with its output (from stdout and stderr) and it's exit code as (out, err, code) If a signal is raised, the Deferred will errback with the stdout and stderr up to that point, along with the signal, as (out, err, signalNum) """ return _callProtocolWithDeferred( _EverythingGetter, executable, args, env, path, reactor, protoArgs=(stdinBytes,), )
Run the function I{f}, but with some warnings suppressed. This calls L{warnings.filterwarnings} to add warning filters before invoking I{f}. If I{f} returns a L{Deferred} then the added filters are removed once the deferred fires. Otherwise they are removed immediately. Note that the list of warning filters is a process-wide resource, so calling this function will affect all threads. @param suppressedWarnings: A list of arguments to pass to L{warnings.filterwarnings}, a sequence of (args, kwargs) 2-tuples. @param f: A callable, which may return a L{Deferred}. @param a: Positional arguments passed to I{f} @param kw: Keyword arguments passed to I{f} @return: The result of C{f(*a, **kw)} @seealso: L{twisted.python.util.runWithWarningsSuppressed} functions similarly, but doesn't handled L{Deferred}s.
def runWithWarningsSuppressed(suppressedWarnings, f, *a, **kw): """ Run the function I{f}, but with some warnings suppressed. This calls L{warnings.filterwarnings} to add warning filters before invoking I{f}. If I{f} returns a L{Deferred} then the added filters are removed once the deferred fires. Otherwise they are removed immediately. Note that the list of warning filters is a process-wide resource, so calling this function will affect all threads. @param suppressedWarnings: A list of arguments to pass to L{warnings.filterwarnings}, a sequence of (args, kwargs) 2-tuples. @param f: A callable, which may return a L{Deferred}. @param a: Positional arguments passed to I{f} @param kw: Keyword arguments passed to I{f} @return: The result of C{f(*a, **kw)} @seealso: L{twisted.python.util.runWithWarningsSuppressed} functions similarly, but doesn't handled L{Deferred}s. """ for args, kwargs in suppressedWarnings: warnings.filterwarnings(*args, **kwargs) addedFilters = warnings.filters[: len(suppressedWarnings)] try: result = f(*a, **kw) except BaseException: exc_info = sys.exc_info() _resetWarningFilters(None, addedFilters) raise exc_info[1].with_traceback(exc_info[2]) else: if isinstance(result, defer.Deferred): result.addBoth(_resetWarningFilters, addedFilters) else: _resetWarningFilters(None, addedFilters) return result
Wrap C{f} in a callable which suppresses the indicated warnings before invoking C{f} and unsuppresses them afterwards. If f returns a Deferred, warnings will remain suppressed until the Deferred fires.
def suppressWarnings(f, *suppressedWarnings): """ Wrap C{f} in a callable which suppresses the indicated warnings before invoking C{f} and unsuppresses them afterwards. If f returns a Deferred, warnings will remain suppressed until the Deferred fires. """ @wraps(f) def warningSuppressingWrapper(*a, **kw): return runWithWarningsSuppressed(suppressedWarnings, f, *a, **kw) return warningSuppressingWrapper
Configure the twisted mainloop to be run inside the wxPython mainloop.
def install(): """ Configure the twisted mainloop to be run inside the wxPython mainloop. """ reactor = WxReactor() from twisted.internet.main import installReactor installReactor(reactor) return reactor
Install the wxPython support, given a wxApp instance
def install(app): """Install the wxPython support, given a wxApp instance""" runner = wxRunner(app) reactor.callLater(0.02, runner.run)
Emit a deprecation warning about a gnome-related reactor. @param name: The name of the reactor. For example, C{"gtk2reactor"}. @param version: The version in which the deprecation was introduced.
def deprecatedGnomeReactor(name: str, version: Version) -> None: """ Emit a deprecation warning about a gnome-related reactor. @param name: The name of the reactor. For example, C{"gtk2reactor"}. @param version: The version in which the deprecation was introduced. """ stem = DEPRECATION_WARNING_FORMAT % { "fqpn": "twisted.internet." + name, "version": getVersionString(version), } msg = stem + ". Please use twisted.internet.gireactor instead." warnings.warn(msg, category=DeprecationWarning)
Look for a #! line, and return the value following the #! if one exists, or None if this file is not a script. I don't know if there are any conventions for quoting in Windows shebang lines, so this doesn't support any; therefore, you may not pass any arguments to scripts invoked as filters. That's probably wrong, so if somebody knows more about the cultural expectations on Windows, please feel free to fix. This shebang line support was added in support of the CGI tests; appropriately enough, I determined that shebang lines are culturally accepted in the Windows world through this page:: http://www.cgi101.com/learn/connect/winxp.html @param filename: str representing a filename @return: a str representing another filename.
def _findShebang(filename): """ Look for a #! line, and return the value following the #! if one exists, or None if this file is not a script. I don't know if there are any conventions for quoting in Windows shebang lines, so this doesn't support any; therefore, you may not pass any arguments to scripts invoked as filters. That's probably wrong, so if somebody knows more about the cultural expectations on Windows, please feel free to fix. This shebang line support was added in support of the CGI tests; appropriately enough, I determined that shebang lines are culturally accepted in the Windows world through this page:: http://www.cgi101.com/learn/connect/winxp.html @param filename: str representing a filename @return: a str representing another filename. """ with open(filename) as f: if f.read(2) == "#!": exe = f.readline(1024).strip("\n") return exe
Determine if a pywintypes.error is telling us that the given process is 'not a valid win32 application', i.e. not a PE format executable. @param pywinerr: a pywintypes.error instance raised by CreateProcess @return: a boolean
def _invalidWin32App(pywinerr): """ Determine if a pywintypes.error is telling us that the given process is 'not a valid win32 application', i.e. not a PE format executable. @param pywinerr: a pywintypes.error instance raised by CreateProcess @return: a boolean """ # Let's do this better in the future, but I have no idea what this error # is; MSDN doesn't mention it, and there is no symbolic constant in # win32process module that represents 193. return pywinerr.args[0] == 193
Check whether the given modules were imported, and if requested, ensure they will not be importable in the future. @param moduleNames: A list of module names we make sure aren't imported. @type moduleNames: C{list} of C{str} @param preventImports: A list of module name whose future imports should be prevented. @type preventImports: C{list} of C{str} @param errorMessage: Message to use when raising an C{ImportError}. @type errorMessage: C{str} @raise ImportError: with given error message if a given module name has already been imported.
def ensureNotImported(moduleNames, errorMessage, preventImports=[]): """ Check whether the given modules were imported, and if requested, ensure they will not be importable in the future. @param moduleNames: A list of module names we make sure aren't imported. @type moduleNames: C{list} of C{str} @param preventImports: A list of module name whose future imports should be prevented. @type preventImports: C{list} of C{str} @param errorMessage: Message to use when raising an C{ImportError}. @type errorMessage: C{str} @raise ImportError: with given error message if a given module name has already been imported. """ for name in moduleNames: if sys.modules.get(name) is not None: raise ImportError(errorMessage) # Disable module imports to avoid potential problems. for name in preventImports: sys.modules[name] = None
Integrate glib's wakeup file descriptor usage and our own. Python supports only one wakeup file descriptor at a time and both Twisted and glib want to use it. This is a context manager that can be wrapped around the whole glib reactor main loop which makes our signal handling work with glib's signal handling.
def _signalGlue(): """ Integrate glib's wakeup file descriptor usage and our own. Python supports only one wakeup file descriptor at a time and both Twisted and glib want to use it. This is a context manager that can be wrapped around the whole glib reactor main loop which makes our signal handling work with glib's signal handling. """ from gi import _ossighelper as signalGlue patcher = MonkeyPatcher() patcher.addPatch(signalGlue, "_wakeup_fd_is_active", True) return patcher
Combine the C{glib.idle_add} and C{glib.MainLoop.quit} functions into a function suitable for crashing the reactor.
def _loopQuitter( idleAdd: Callable[[Callable[[], None]], None], loopQuit: Callable[[], None] ) -> Callable[[], None]: """ Combine the C{glib.idle_add} and C{glib.MainLoop.quit} functions into a function suitable for crashing the reactor. """ return lambda: idleAdd(loopQuit)
Convert some text typed by a human into some ASCII bytes. This is provided to allow us to use the U{partially-broken IDNA implementation in the standard library <http://bugs.python.org/issue17305>} if the more-correct U{idna <https://pypi.python.org/pypi/idna>} package is not available; C{service_identity} is somewhat stricter about this. @param text: A domain name, hopefully. @type text: L{unicode} @return: The domain name's IDNA representation, encoded as bytes. @rtype: L{bytes}
def _idnaBytes(text: str) -> bytes: """ Convert some text typed by a human into some ASCII bytes. This is provided to allow us to use the U{partially-broken IDNA implementation in the standard library <http://bugs.python.org/issue17305>} if the more-correct U{idna <https://pypi.python.org/pypi/idna>} package is not available; C{service_identity} is somewhat stricter about this. @param text: A domain name, hopefully. @type text: L{unicode} @return: The domain name's IDNA representation, encoded as bytes. @rtype: L{bytes} """ try: import idna except ImportError: return text.encode("idna") else: return idna.encode(text)
Convert some IDNA-encoded octets into some human-readable text. Currently only used by the tests. @param octets: Some bytes representing a hostname. @type octets: L{bytes} @return: A human-readable domain name. @rtype: L{unicode}
def _idnaText(octets: bytes) -> str: """ Convert some IDNA-encoded octets into some human-readable text. Currently only used by the tests. @param octets: Some bytes representing a hostname. @type octets: L{bytes} @return: A human-readable domain name. @rtype: L{unicode} """ try: import idna except ImportError: return octets.decode("idna") else: return idna.decode(octets)
Add a layer of SSL to a transport. @param transport: The transport which will be modified. This can either by a L{FileDescriptor<twisted.internet.abstract.FileDescriptor>} or a L{FileHandle<twisted.internet.iocpreactor.abstract.FileHandle>}. The actual requirements of this instance are that it have: - a C{_tlsClientDefault} attribute indicating whether the transport is a client (C{True}) or a server (C{False}) - a settable C{TLS} attribute which can be used to mark the fact that SSL has been started - settable C{getHandle} and C{getPeerCertificate} attributes so these L{ISSLTransport} methods can be added to it - a C{protocol} attribute referring to the L{IProtocol} currently connected to the transport, which can also be set to a new L{IProtocol} for the transport to deliver data to @param contextFactory: An SSL context factory defining SSL parameters for the new SSL layer. @type contextFactory: L{twisted.internet.interfaces.IOpenSSLContextFactory} @param normal: A flag indicating whether SSL will go in the same direction as the underlying transport goes. That is, if the SSL client will be the underlying client and the SSL server will be the underlying server. C{True} means it is the same, C{False} means they are switched. @type normal: L{bool} @param bypass: A transport base class to call methods on to bypass the new SSL layer (so that the SSL layer itself can send its bytes). @type bypass: L{type}
def startTLS(transport, contextFactory, normal, bypass): """ Add a layer of SSL to a transport. @param transport: The transport which will be modified. This can either by a L{FileDescriptor<twisted.internet.abstract.FileDescriptor>} or a L{FileHandle<twisted.internet.iocpreactor.abstract.FileHandle>}. The actual requirements of this instance are that it have: - a C{_tlsClientDefault} attribute indicating whether the transport is a client (C{True}) or a server (C{False}) - a settable C{TLS} attribute which can be used to mark the fact that SSL has been started - settable C{getHandle} and C{getPeerCertificate} attributes so these L{ISSLTransport} methods can be added to it - a C{protocol} attribute referring to the L{IProtocol} currently connected to the transport, which can also be set to a new L{IProtocol} for the transport to deliver data to @param contextFactory: An SSL context factory defining SSL parameters for the new SSL layer. @type contextFactory: L{twisted.internet.interfaces.IOpenSSLContextFactory} @param normal: A flag indicating whether SSL will go in the same direction as the underlying transport goes. That is, if the SSL client will be the underlying client and the SSL server will be the underlying server. C{True} means it is the same, C{False} means they are switched. @type normal: L{bool} @param bypass: A transport base class to call methods on to bypass the new SSL layer (so that the SSL layer itself can send its bytes). @type bypass: L{type} """ # Figure out which direction the SSL goes in. If normal is True, # we'll go in the direction indicated by the subclass. Otherwise, # we'll go the other way (client = not normal ^ _tlsClientDefault, # in other words). if normal: client = transport._tlsClientDefault else: client = not transport._tlsClientDefault # If we have a producer, unregister it, and then re-register it below once # we've switched to TLS mode, so it gets hooked up correctly: producer, streaming = None, None if transport.producer is not None: producer, streaming = transport.producer, transport.streamingProducer transport.unregisterProducer() tlsFactory = TLSMemoryBIOFactory(contextFactory, client, None) tlsProtocol = tlsFactory.protocol(tlsFactory, transport.protocol, False) # Hook up the new TLS protocol to the transport: transport.protocol = tlsProtocol transport.getHandle = tlsProtocol.getHandle transport.getPeerCertificate = tlsProtocol.getPeerCertificate # Mark the transport as secure. directlyProvides(transport, ISSLTransport) # Remember we did this so that write and writeSequence can send the # data to the right place. transport.TLS = True # Hook it up transport.protocol.makeConnection(_BypassTLS(bypass, transport)) # Restore producer if necessary: if producer: transport.registerProducer(producer, streaming)
Install a signal handler which will write a byte to C{fd} when I{SIGCHLD} is received. This is implemented by installing a SIGCHLD handler that does nothing, setting the I{SIGCHLD} handler as not allowed to interrupt system calls, and using L{signal.set_wakeup_fd} to do the actual writing. @param fd: The file descriptor to which to write when I{SIGCHLD} is received. @return: The file descriptor previously configured for this use.
def installHandler(fd: int) -> int: """ Install a signal handler which will write a byte to C{fd} when I{SIGCHLD} is received. This is implemented by installing a SIGCHLD handler that does nothing, setting the I{SIGCHLD} handler as not allowed to interrupt system calls, and using L{signal.set_wakeup_fd} to do the actual writing. @param fd: The file descriptor to which to write when I{SIGCHLD} is received. @return: The file descriptor previously configured for this use. """ if fd == -1: signal.signal(signal.SIGCHLD, signal.SIG_DFL) else: def noopSignalHandler(*args): pass signal.signal(signal.SIGCHLD, noopSignalHandler) signal.siginterrupt(signal.SIGCHLD, False) return signal.set_wakeup_fd(fd)
Determine whether the I{SIGCHLD} handler is the default or not.
def isDefaultHandler(): """ Determine whether the I{SIGCHLD} handler is the default or not. """ return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL
Given a pair of L{TLSVersion} constants, figure out what versions we want to disable (as OpenSSL is an exclusion based API). @param oldest: The oldest L{TLSVersion} we want to allow. @type oldest: L{TLSVersion} constant @param newest: The newest L{TLSVersion} we want to allow, or L{None} for no upper limit. @type newest: L{TLSVersion} constant or L{None} @return: The versions we want to disable. @rtype: L{list} of L{TLSVersion} constants.
def _getExcludedTLSProtocols(oldest, newest): """ Given a pair of L{TLSVersion} constants, figure out what versions we want to disable (as OpenSSL is an exclusion based API). @param oldest: The oldest L{TLSVersion} we want to allow. @type oldest: L{TLSVersion} constant @param newest: The newest L{TLSVersion} we want to allow, or L{None} for no upper limit. @type newest: L{TLSVersion} constant or L{None} @return: The versions we want to disable. @rtype: L{list} of L{TLSVersion} constants. """ versions = list(TLSVersion.iterconstants()) excludedVersions = [x for x in versions[: versions.index(oldest)]] if newest: excludedVersions.extend([x for x in versions[versions.index(newest) :]]) return excludedVersions
Check only the common name in the certificate presented by the peer and only for an exact match. This is to provide I{something} in the way of hostname verification to users who haven't installed C{service_identity}. This check is overly strict, relies on a deprecated TLS feature (you're supposed to ignore the commonName if the subjectAlternativeName extensions are present, I believe), and lots of valid certificates will fail. @param connection: the OpenSSL connection to verify. @type connection: L{OpenSSL.SSL.Connection} @param hostname: The hostname expected by the user. @type hostname: L{unicode} @raise twisted.internet.ssl.VerificationError: if the common name and hostname don't match.
def simpleVerifyHostname(connection, hostname): """ Check only the common name in the certificate presented by the peer and only for an exact match. This is to provide I{something} in the way of hostname verification to users who haven't installed C{service_identity}. This check is overly strict, relies on a deprecated TLS feature (you're supposed to ignore the commonName if the subjectAlternativeName extensions are present, I believe), and lots of valid certificates will fail. @param connection: the OpenSSL connection to verify. @type connection: L{OpenSSL.SSL.Connection} @param hostname: The hostname expected by the user. @type hostname: L{unicode} @raise twisted.internet.ssl.VerificationError: if the common name and hostname don't match. """ commonName = connection.get_peer_certificate().get_subject().commonName if commonName != hostname: raise SimpleVerificationError(repr(commonName) + "!=" + repr(hostname))
Always fails validation of IP addresses @param connection: the OpenSSL connection to verify. @type connection: L{OpenSSL.SSL.Connection} @param hostname: The hostname expected by the user. @type hostname: L{unicode} @raise twisted.internet.ssl.VerificationError: Always raised
def simpleVerifyIPAddress(connection, hostname): """ Always fails validation of IP addresses @param connection: the OpenSSL connection to verify. @type connection: L{OpenSSL.SSL.Connection} @param hostname: The hostname expected by the user. @type hostname: L{unicode} @raise twisted.internet.ssl.VerificationError: Always raised """ raise SimpleVerificationError("Cannot verify certificate IP addresses")
Check pyOpenSSL version string whether we can use it for host verification. @param version: A pyOpenSSL version string. @type version: L{str} @rtype: L{bool}
def _usablePyOpenSSL(version): """ Check pyOpenSSL version string whether we can use it for host verification. @param version: A pyOpenSSL version string. @type version: L{str} @rtype: L{bool} """ major, minor = (int(part) for part in version.split(".")[:2]) return (major, minor) >= (0, 12)
Determine if C{service_identity} is installed. If so, use it. If not, use simplistic and incorrect checking as implemented in L{simpleVerifyHostname}. @return: 2-tuple of (C{verify_hostname}, C{VerificationError}) @rtype: L{tuple}
def _selectVerifyImplementation(): """ Determine if C{service_identity} is installed. If so, use it. If not, use simplistic and incorrect checking as implemented in L{simpleVerifyHostname}. @return: 2-tuple of (C{verify_hostname}, C{VerificationError}) @rtype: L{tuple} """ whatsWrong = ( "Without the service_identity module, Twisted can perform only " "rudimentary TLS client hostname verification. Many valid " "certificate/hostname mappings may be rejected." ) try: from service_identity import VerificationError from service_identity.pyopenssl import verify_hostname, verify_ip_address return verify_hostname, verify_ip_address, VerificationError except ImportError as e: warnings.warn_explicit( "You do not have a working installation of the " "service_identity module: '" + str(e) + "'. " "Please install it from " "<https://pypi.python.org/pypi/service_identity> and make " "sure all of its dependencies are satisfied. " + whatsWrong, # Unfortunately the lineno is required. category=UserWarning, filename="", lineno=0, ) return simpleVerifyHostname, simpleVerifyIPAddress, SimpleVerificationError
Checks whether your versions of PyOpenSSL and OpenSSL are recent enough to support protocol negotiation, and if they are, what kind of protocol negotiation is supported. @return: A combination of flags from L{ProtocolNegotiationSupport} that indicate which mechanisms for protocol negotiation are supported. @rtype: L{constantly.FlagConstant}
def protocolNegotiationMechanisms(): """ Checks whether your versions of PyOpenSSL and OpenSSL are recent enough to support protocol negotiation, and if they are, what kind of protocol negotiation is supported. @return: A combination of flags from L{ProtocolNegotiationSupport} that indicate which mechanisms for protocol negotiation are supported. @rtype: L{constantly.FlagConstant} """ support = ProtocolNegotiationSupport.NOSUPPORT ctx = SSL.Context(SSL.SSLv23_METHOD) try: ctx.set_npn_advertise_callback(lambda c: None) except (AttributeError, NotImplementedError): pass else: support |= ProtocolNegotiationSupport.NPN try: ctx.set_alpn_select_callback(lambda c: None) except (AttributeError, NotImplementedError): pass else: support |= ProtocolNegotiationSupport.ALPN return support
(private) Helper for L{Certificate.peerFromTransport} and L{Certificate.hostFromTransport} which checks for incompatible handle types and null certificates and raises the appropriate exception or returns the appropriate certificate object.
def _handleattrhelper(Class, transport, methodName): """ (private) Helper for L{Certificate.peerFromTransport} and L{Certificate.hostFromTransport} which checks for incompatible handle types and null certificates and raises the appropriate exception or returns the appropriate certificate object. """ method = getattr(transport.getHandle(), f"get_{methodName}_certificate", None) if method is None: raise CertificateError( "non-TLS transport {!r} did not have {} certificate".format( transport, methodName ) ) cert = method() if cert is None: raise CertificateError( "TLS transport {!r} did not have {} certificate".format( transport, methodName ) ) return Class(cert)
Builds an object that trusts multiple root L{Certificate}s. When passed to L{optionsForClientTLS}, connections using those options will reject any server certificate not signed by at least one of the certificates in the `certificates` list. @since: 16.0 @param certificates: All certificates which will be trusted. @type certificates: C{iterable} of L{CertBase} @rtype: L{IOpenSSLTrustRoot} @return: an object suitable for use as the trustRoot= keyword argument to L{optionsForClientTLS}
def trustRootFromCertificates(certificates): """ Builds an object that trusts multiple root L{Certificate}s. When passed to L{optionsForClientTLS}, connections using those options will reject any server certificate not signed by at least one of the certificates in the `certificates` list. @since: 16.0 @param certificates: All certificates which will be trusted. @type certificates: C{iterable} of L{CertBase} @rtype: L{IOpenSSLTrustRoot} @return: an object suitable for use as the trustRoot= keyword argument to L{optionsForClientTLS} """ certs = [] for cert in certificates: # PrivateCertificate or Certificate are both okay if isinstance(cert, CertBase): cert = cert.original else: raise TypeError( "certificates items must be twisted.internet.ssl.CertBase" " instances" ) certs.append(cert) return OpenSSLCertificateAuthorities(certs)
Attempt to discover a set of trusted certificate authority certificates (or, in other words: trust roots, or root certificates) whose trust is managed and updated by tools outside of Twisted. If you are writing any client-side TLS code with Twisted, you should use this as the C{trustRoot} argument to L{CertificateOptions <twisted.internet.ssl.CertificateOptions>}. The result of this function should be like the up-to-date list of certificates in a web browser. When developing code that uses C{platformTrust}, you can think of it that way. However, the choice of which certificate authorities to trust is never Twisted's responsibility. Unless you're writing a very unusual application or library, it's not your code's responsibility either. The user may use platform-specific tools for defining which server certificates should be trusted by programs using TLS. The purpose of using this API is to respect that decision as much as possible. This should be a set of trust settings most appropriate for I{client} TLS connections; i.e. those which need to verify a server's authenticity. You should probably use this by default for any client TLS connection that you create. For servers, however, client certificates are typically not verified; or, if they are, their verification will depend on a custom, application-specific certificate authority. @since: 14.0 @note: Currently, L{platformTrust} depends entirely upon your OpenSSL build supporting a set of "L{default verify paths <OpenSSLDefaultPaths>}" which correspond to certificate authority trust roots. Unfortunately, whether this is true of your system is both outside of Twisted's control and difficult (if not impossible) for Twisted to detect automatically. Nevertheless, this ought to work as desired by default on: - Ubuntu Linux machines with the U{ca-certificates <https://launchpad.net/ubuntu/+source/ca-certificates>} package installed, - macOS when using the system-installed version of OpenSSL (i.e. I{not} one installed via MacPorts or Homebrew), - any build of OpenSSL which has had certificate authority certificates installed into its default verify paths (by default, C{/usr/local/ssl/certs} if you've built your own OpenSSL), or - any process where the C{SSL_CERT_FILE} environment variable is set to the path of a file containing your desired CA certificates bundle. Hopefully soon, this API will be updated to use more sophisticated trust-root discovery mechanisms. Until then, you can follow tickets in the Twisted tracker for progress on this implementation on U{Microsoft Windows <https://twistedmatrix.com/trac/ticket/6371>}, U{macOS <https://twistedmatrix.com/trac/ticket/6372>}, and U{a fallback for other platforms which do not have native trust management tools <https://twistedmatrix.com/trac/ticket/6934>}. @return: an appropriate trust settings object for your platform. @rtype: L{IOpenSSLTrustRoot} @raise NotImplementedError: if this platform is not yet supported by Twisted. At present, only OpenSSL is supported.
def platformTrust(): """ Attempt to discover a set of trusted certificate authority certificates (or, in other words: trust roots, or root certificates) whose trust is managed and updated by tools outside of Twisted. If you are writing any client-side TLS code with Twisted, you should use this as the C{trustRoot} argument to L{CertificateOptions <twisted.internet.ssl.CertificateOptions>}. The result of this function should be like the up-to-date list of certificates in a web browser. When developing code that uses C{platformTrust}, you can think of it that way. However, the choice of which certificate authorities to trust is never Twisted's responsibility. Unless you're writing a very unusual application or library, it's not your code's responsibility either. The user may use platform-specific tools for defining which server certificates should be trusted by programs using TLS. The purpose of using this API is to respect that decision as much as possible. This should be a set of trust settings most appropriate for I{client} TLS connections; i.e. those which need to verify a server's authenticity. You should probably use this by default for any client TLS connection that you create. For servers, however, client certificates are typically not verified; or, if they are, their verification will depend on a custom, application-specific certificate authority. @since: 14.0 @note: Currently, L{platformTrust} depends entirely upon your OpenSSL build supporting a set of "L{default verify paths <OpenSSLDefaultPaths>}" which correspond to certificate authority trust roots. Unfortunately, whether this is true of your system is both outside of Twisted's control and difficult (if not impossible) for Twisted to detect automatically. Nevertheless, this ought to work as desired by default on: - Ubuntu Linux machines with the U{ca-certificates <https://launchpad.net/ubuntu/+source/ca-certificates>} package installed, - macOS when using the system-installed version of OpenSSL (i.e. I{not} one installed via MacPorts or Homebrew), - any build of OpenSSL which has had certificate authority certificates installed into its default verify paths (by default, C{/usr/local/ssl/certs} if you've built your own OpenSSL), or - any process where the C{SSL_CERT_FILE} environment variable is set to the path of a file containing your desired CA certificates bundle. Hopefully soon, this API will be updated to use more sophisticated trust-root discovery mechanisms. Until then, you can follow tickets in the Twisted tracker for progress on this implementation on U{Microsoft Windows <https://twistedmatrix.com/trac/ticket/6371>}, U{macOS <https://twistedmatrix.com/trac/ticket/6372>}, and U{a fallback for other platforms which do not have native trust management tools <https://twistedmatrix.com/trac/ticket/6934>}. @return: an appropriate trust settings object for your platform. @rtype: L{IOpenSSLTrustRoot} @raise NotImplementedError: if this platform is not yet supported by Twisted. At present, only OpenSSL is supported. """ return OpenSSLDefaultPaths()
Wrap up an C{info_callback} for pyOpenSSL so that if something goes wrong the error is immediately logged and the connection is dropped if possible. This wrapper exists because some versions of pyOpenSSL don't handle errors from callbacks at I{all}, and those which do write tracebacks directly to stderr rather than to a supplied logging system. This reports unexpected errors to the Twisted logging system. Also, this terminates the connection immediately if possible because if you've got bugs in your verification logic it's much safer to just give up. @param wrapped: A valid C{info_callback} for pyOpenSSL. @type wrapped: L{callable} @return: A valid C{info_callback} for pyOpenSSL that handles any errors in C{wrapped}. @rtype: L{callable}
def _tolerateErrors(wrapped): """ Wrap up an C{info_callback} for pyOpenSSL so that if something goes wrong the error is immediately logged and the connection is dropped if possible. This wrapper exists because some versions of pyOpenSSL don't handle errors from callbacks at I{all}, and those which do write tracebacks directly to stderr rather than to a supplied logging system. This reports unexpected errors to the Twisted logging system. Also, this terminates the connection immediately if possible because if you've got bugs in your verification logic it's much safer to just give up. @param wrapped: A valid C{info_callback} for pyOpenSSL. @type wrapped: L{callable} @return: A valid C{info_callback} for pyOpenSSL that handles any errors in C{wrapped}. @rtype: L{callable} """ def infoCallback(connection, where, ret): try: return wrapped(connection, where, ret) except BaseException: f = Failure() log.err(f, "Error during info_callback") connection.get_app_data().failVerification(f) return infoCallback
Create a L{client connection creator <IOpenSSLClientConnectionCreator>} for use with APIs such as L{SSL4ClientEndpoint <twisted.internet.endpoints.SSL4ClientEndpoint>}, L{connectSSL <twisted.internet.interfaces.IReactorSSL.connectSSL>}, and L{startTLS <twisted.internet.interfaces.ITLSTransport.startTLS>}. @since: 14.0 @param hostname: The expected name of the remote host. This serves two purposes: first, and most importantly, it verifies that the certificate received from the server correctly identifies the specified hostname. The second purpose is to use the U{Server Name Indication extension <https://en.wikipedia.org/wiki/Server_Name_Indication>} to indicate to the server which certificate should be used. @type hostname: L{unicode} @param trustRoot: Specification of trust requirements of peers. This may be a L{Certificate} or the result of L{platformTrust}. By default it is L{platformTrust} and you probably shouldn't adjust it unless you really know what you're doing. Be aware that clients using this interface I{must} verify the server; you cannot explicitly pass L{None} since that just means to use L{platformTrust}. @type trustRoot: L{IOpenSSLTrustRoot} @param clientCertificate: The certificate and private key that the client will use to authenticate to the server. If unspecified, the client will not authenticate. @type clientCertificate: L{PrivateCertificate} @param acceptableProtocols: The protocols this peer is willing to speak after the TLS negotiation has completed, advertised over both ALPN and NPN. If this argument is specified, and no overlap can be found with the other peer, the connection will fail to be established. If the remote peer does not offer NPN or ALPN, the connection will be established, but no protocol wil be negotiated. Protocols earlier in the list are preferred over those later in the list. @type acceptableProtocols: L{list} of L{bytes} @param extraCertificateOptions: A dictionary of additional keyword arguments to be presented to L{CertificateOptions}. Please avoid using this unless you absolutely need to; any time you need to pass an option here that is a bug in this interface. @type extraCertificateOptions: L{dict} @return: A client connection creator. @rtype: L{IOpenSSLClientConnectionCreator}
def optionsForClientTLS( hostname, trustRoot=None, clientCertificate=None, acceptableProtocols=None, *, extraCertificateOptions=None, ): """ Create a L{client connection creator <IOpenSSLClientConnectionCreator>} for use with APIs such as L{SSL4ClientEndpoint <twisted.internet.endpoints.SSL4ClientEndpoint>}, L{connectSSL <twisted.internet.interfaces.IReactorSSL.connectSSL>}, and L{startTLS <twisted.internet.interfaces.ITLSTransport.startTLS>}. @since: 14.0 @param hostname: The expected name of the remote host. This serves two purposes: first, and most importantly, it verifies that the certificate received from the server correctly identifies the specified hostname. The second purpose is to use the U{Server Name Indication extension <https://en.wikipedia.org/wiki/Server_Name_Indication>} to indicate to the server which certificate should be used. @type hostname: L{unicode} @param trustRoot: Specification of trust requirements of peers. This may be a L{Certificate} or the result of L{platformTrust}. By default it is L{platformTrust} and you probably shouldn't adjust it unless you really know what you're doing. Be aware that clients using this interface I{must} verify the server; you cannot explicitly pass L{None} since that just means to use L{platformTrust}. @type trustRoot: L{IOpenSSLTrustRoot} @param clientCertificate: The certificate and private key that the client will use to authenticate to the server. If unspecified, the client will not authenticate. @type clientCertificate: L{PrivateCertificate} @param acceptableProtocols: The protocols this peer is willing to speak after the TLS negotiation has completed, advertised over both ALPN and NPN. If this argument is specified, and no overlap can be found with the other peer, the connection will fail to be established. If the remote peer does not offer NPN or ALPN, the connection will be established, but no protocol wil be negotiated. Protocols earlier in the list are preferred over those later in the list. @type acceptableProtocols: L{list} of L{bytes} @param extraCertificateOptions: A dictionary of additional keyword arguments to be presented to L{CertificateOptions}. Please avoid using this unless you absolutely need to; any time you need to pass an option here that is a bug in this interface. @type extraCertificateOptions: L{dict} @return: A client connection creator. @rtype: L{IOpenSSLClientConnectionCreator} """ if extraCertificateOptions is None: extraCertificateOptions = {} if trustRoot is None: trustRoot = platformTrust() if not isinstance(hostname, str): raise TypeError( "optionsForClientTLS requires text for host names, not " + hostname.__class__.__name__ ) if clientCertificate: extraCertificateOptions.update( privateKey=clientCertificate.privateKey.original, certificate=clientCertificate.original, ) certificateOptions = OpenSSLCertificateOptions( trustRoot=trustRoot, acceptableProtocols=acceptableProtocols, **extraCertificateOptions, ) return ClientTLSOptions(hostname, certificateOptions.getContext())
Expand C{cipherString} according to C{method} and C{options} to a tuple of explicit ciphers that are supported by the current platform. @param cipherString: An OpenSSL cipher string to expand. @type cipherString: L{unicode} @param method: An OpenSSL method like C{SSL.TLS_METHOD} used for determining the effective ciphers. @param options: OpenSSL options like C{SSL.OP_NO_SSLv3} ORed together. @type options: L{int} @return: The effective list of explicit ciphers that results from the arguments on the current platform. @rtype: L{tuple} of L{ICipher}
def _expandCipherString(cipherString, method, options): """ Expand C{cipherString} according to C{method} and C{options} to a tuple of explicit ciphers that are supported by the current platform. @param cipherString: An OpenSSL cipher string to expand. @type cipherString: L{unicode} @param method: An OpenSSL method like C{SSL.TLS_METHOD} used for determining the effective ciphers. @param options: OpenSSL options like C{SSL.OP_NO_SSLv3} ORed together. @type options: L{int} @return: The effective list of explicit ciphers that results from the arguments on the current platform. @rtype: L{tuple} of L{ICipher} """ ctx = SSL.Context(method) ctx.set_options(options) try: ctx.set_cipher_list(cipherString.encode("ascii")) except SSL.Error as e: # OpenSSL 1.1.1 turns an invalid cipher list into TLS 1.3 # ciphers, so pyOpenSSL >= 19.0.0 raises an artificial Error # that lacks a corresponding OpenSSL error if the cipher list # consists only of these after a call to set_cipher_list. if not e.args[0]: return tuple() if e.args[0][0][2] == "no cipher match": return tuple() else: raise conn = SSL.Connection(ctx, None) ciphers = conn.get_cipher_list() if isinstance(ciphers[0], str): return tuple(OpenSSLCipher(cipher) for cipher in ciphers) else: return tuple(OpenSSLCipher(cipher.decode("ascii")) for cipher in ciphers)
Caclulate the acceptable list of ciphers from the ciphers we want and the ciphers we have support for. @param wantedCiphers: The ciphers we want to use. @type wantedCiphers: L{tuple} of L{OpenSSLCipher} @param availableCiphers: The ciphers we have available to use. @type availableCiphers: L{tuple} of L{OpenSSLCipher} @rtype: L{tuple} of L{OpenSSLCipher}
def _selectCiphers(wantedCiphers, availableCiphers): """ Caclulate the acceptable list of ciphers from the ciphers we want and the ciphers we have support for. @param wantedCiphers: The ciphers we want to use. @type wantedCiphers: L{tuple} of L{OpenSSLCipher} @param availableCiphers: The ciphers we have available to use. @type availableCiphers: L{tuple} of L{OpenSSLCipher} @rtype: L{tuple} of L{OpenSSLCipher} """ return tuple(cipher for cipher in wantedCiphers if cipher in availableCiphers)
Called to set up the L{OpenSSL.SSL.Context} for doing NPN and/or ALPN negotiation. @param context: The context which is set up. @type context: L{OpenSSL.SSL.Context} @param acceptableProtocols: The protocols this peer is willing to speak after the TLS negotiation has completed, advertised over both ALPN and NPN. If this argument is specified, and no overlap can be found with the other peer, the connection will fail to be established. If the remote peer does not offer NPN or ALPN, the connection will be established, but no protocol wil be negotiated. Protocols earlier in the list are preferred over those later in the list. @type acceptableProtocols: L{list} of L{bytes}
def _setAcceptableProtocols(context, acceptableProtocols): """ Called to set up the L{OpenSSL.SSL.Context} for doing NPN and/or ALPN negotiation. @param context: The context which is set up. @type context: L{OpenSSL.SSL.Context} @param acceptableProtocols: The protocols this peer is willing to speak after the TLS negotiation has completed, advertised over both ALPN and NPN. If this argument is specified, and no overlap can be found with the other peer, the connection will fail to be established. If the remote peer does not offer NPN or ALPN, the connection will be established, but no protocol wil be negotiated. Protocols earlier in the list are preferred over those later in the list. @type acceptableProtocols: L{list} of L{bytes} """ def protoSelectCallback(conn, protocols): """ NPN client-side and ALPN server-side callback used to select the next protocol. Prefers protocols found earlier in C{_acceptableProtocols}. @param conn: The context which is set up. @type conn: L{OpenSSL.SSL.Connection} @param conn: Protocols advertised by the other side. @type conn: L{list} of L{bytes} """ overlap = set(protocols) & set(acceptableProtocols) for p in acceptableProtocols: if p in overlap: return p else: return b"" # If we don't actually have protocols to negotiate, don't set anything up. # Depending on OpenSSL version, failing some of the selection callbacks can # cause the handshake to fail, which is presumably not what was intended # here. if not acceptableProtocols: return supported = protocolNegotiationMechanisms() if supported & ProtocolNegotiationSupport.NPN: def npnAdvertiseCallback(conn): return acceptableProtocols context.set_npn_advertise_callback(npnAdvertiseCallback) context.set_npn_select_callback(protoSelectCallback) if supported & ProtocolNegotiationSupport.ALPN: context.set_alpn_select_callback(protoSelectCallback) context.set_alpn_protos(acceptableProtocols)
Configure the twisted mainloop to be run using the select() reactor.
def install(): """Configure the twisted mainloop to be run using the select() reactor.""" reactor = ThreadedSelectReactor() from twisted.internet.main import installReactor installReactor(reactor) return reactor
Ask the platform to allocate a free port on the specified interface, then release the socket and return the address which was allocated. @param interface: The local address to try to bind the port on. @type interface: C{str} @param type: The socket type which will use the resulting port. @return: A two-tuple of address and port, like that returned by L{socket.getsockname}.
def findFreePort(interface="127.0.0.1", family=socket.AF_INET, type=socket.SOCK_STREAM): """ Ask the platform to allocate a free port on the specified interface, then release the socket and return the address which was allocated. @param interface: The local address to try to bind the port on. @type interface: C{str} @param type: The socket type which will use the resulting port. @return: A two-tuple of address and port, like that returned by L{socket.getsockname}. """ addr = socket.getaddrinfo(interface, 0)[0][4] probe = socket.socket(family, type) try: probe.bind(addr) if family == socket.AF_INET6: sockname = probe.getsockname() hostname = socket.getnameinfo( sockname, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV )[0] return (hostname, sockname[1]) else: return probe.getsockname() finally: probe.close()
Connect two protocols using endpoints and a new reactor instance. A new reactor will be created and run, with the client and server protocol instances connected to each other using the given endpoint creator. The protocols should run through some set of tests, then disconnect; when both have disconnected the reactor will be stopped and the function will return. @param reactorBuilder: A L{ReactorBuilder} instance. @param serverProtocol: A L{ConnectableProtocol} that will be the server. @param clientProtocol: A L{ConnectableProtocol} that will be the client. @param endpointCreator: An instance of L{EndpointCreator}. @return: The reactor run by this test.
def runProtocolsWithReactor( reactorBuilder, serverProtocol, clientProtocol, endpointCreator ): """ Connect two protocols using endpoints and a new reactor instance. A new reactor will be created and run, with the client and server protocol instances connected to each other using the given endpoint creator. The protocols should run through some set of tests, then disconnect; when both have disconnected the reactor will be stopped and the function will return. @param reactorBuilder: A L{ReactorBuilder} instance. @param serverProtocol: A L{ConnectableProtocol} that will be the server. @param clientProtocol: A L{ConnectableProtocol} that will be the client. @param endpointCreator: An instance of L{EndpointCreator}. @return: The reactor run by this test. """ reactor = reactorBuilder.buildReactor() serverProtocol._setAttributes(reactor, Deferred()) clientProtocol._setAttributes(reactor, Deferred()) serverFactory = _SingleProtocolFactory(serverProtocol) clientFactory = _SingleProtocolFactory(clientProtocol) # Listen on a port: serverEndpoint = endpointCreator.server(reactor) d = serverEndpoint.listen(serverFactory) # Connect to the port: def gotPort(p): clientEndpoint = endpointCreator.client(reactor, p.getHost()) return clientEndpoint.connect(clientFactory) d.addCallback(gotPort) # Stop reactor when both connections are lost: def failed(result): log.err(result, "Connection setup failed.") disconnected = gatherResults([serverProtocol._done, clientProtocol._done]) d.addCallback(lambda _: disconnected) d.addErrback(failed) d.addCallback(lambda _: needsRunningReactor(reactor, reactor.stop)) reactorBuilder.runReactor(reactor) return reactor
Like L{IReactorFDSet.getWriters}, but with support for IOCP reactor as well.
def _getWriters(reactor): """ Like L{IReactorFDSet.getWriters}, but with support for IOCP reactor as well. """ if IReactorFDSet.providedBy(reactor): return reactor.getWriters() elif "IOCP" in reactor.__class__.__name__: return reactor.handles else: # Cannot tell what is going on. raise Exception(f"Cannot find writers on {reactor!r}")
Various functions within these tests need an already-running reactor at some point. They need to stop the reactor when the test has completed, and that means calling reactor.stop(). However, reactor.stop() raises an exception if the reactor isn't already running, so if the L{Deferred} that a particular API under test returns fires synchronously (as especially an endpoint's C{connect()} method may do, if the connect is to a local interface address) then the test won't be able to stop the reactor being tested and finish. So this calls C{thunk} only once C{reactor} is running. (This is just an alias for L{twisted.internet.interfaces.IReactorCore.callWhenRunning} on the given reactor parameter, in order to centrally reference the above paragraph and repeating it everywhere as a comment.) @param reactor: the L{twisted.internet.interfaces.IReactorCore} under test @param thunk: a 0-argument callable, which eventually finishes the test in question, probably in a L{Deferred} callback.
def needsRunningReactor(reactor, thunk): """ Various functions within these tests need an already-running reactor at some point. They need to stop the reactor when the test has completed, and that means calling reactor.stop(). However, reactor.stop() raises an exception if the reactor isn't already running, so if the L{Deferred} that a particular API under test returns fires synchronously (as especially an endpoint's C{connect()} method may do, if the connect is to a local interface address) then the test won't be able to stop the reactor being tested and finish. So this calls C{thunk} only once C{reactor} is running. (This is just an alias for L{twisted.internet.interfaces.IReactorCore.callWhenRunning} on the given reactor parameter, in order to centrally reference the above paragraph and repeating it everywhere as a comment.) @param reactor: the L{twisted.internet.interfaces.IReactorCore} under test @param thunk: a 0-argument callable, which eventually finishes the test in question, probably in a L{Deferred} callback. """ reactor.callWhenRunning(thunk)
Stop the reactor as soon as any error is logged on the given publisher. This is beneficial for tests which will wait for a L{Deferred} to fire before completing (by passing or failing). Certain implementation bugs may prevent the L{Deferred} from firing with any result at all (consider a protocol's {dataReceived} method that raises an exception: this exception is logged but it won't ever cause a L{Deferred} to fire). In that case the test would have to complete by timing out which is a much less desirable outcome than completing as soon as the unexpected error is encountered. @param case: A L{SynchronousTestCase} to use to clean up the necessary log observer when the test is over. @param reactor: The reactor to stop. @param publisher: A L{LogPublisher} to watch for errors. If L{None}, the global log publisher will be watched.
def stopOnError(case, reactor, publisher=None): """ Stop the reactor as soon as any error is logged on the given publisher. This is beneficial for tests which will wait for a L{Deferred} to fire before completing (by passing or failing). Certain implementation bugs may prevent the L{Deferred} from firing with any result at all (consider a protocol's {dataReceived} method that raises an exception: this exception is logged but it won't ever cause a L{Deferred} to fire). In that case the test would have to complete by timing out which is a much less desirable outcome than completing as soon as the unexpected error is encountered. @param case: A L{SynchronousTestCase} to use to clean up the necessary log observer when the test is over. @param reactor: The reactor to stop. @param publisher: A L{LogPublisher} to watch for errors. If L{None}, the global log publisher will be watched. """ if publisher is None: from twisted.python import log as publisher running = [None] def stopIfError(event): if running and event.get("isError"): running.pop() reactor.stop() publisher.addObserver(stopIfError) case.addCleanup(publisher.removeObserver, stopIfError)
Make a new asyncio reactor associated with a new event loop. The test suite prefers this constructor because having a new event loop for each reactor provides better test isolation. The real constructor prefers to re-use (or create) a global loop because of how this interacts with other asyncio-based libraries and applications (though maybe it shouldn't). @param self: The L{ReactorBuilder} subclass this is being called on. We don't use this parameter but we get called with it anyway.
def asyncioSelectorReactor(self: object) -> "asyncioreactor.AsyncioSelectorReactor": """ Make a new asyncio reactor associated with a new event loop. The test suite prefers this constructor because having a new event loop for each reactor provides better test isolation. The real constructor prefers to re-use (or create) a global loop because of how this interacts with other asyncio-based libraries and applications (though maybe it shouldn't). @param self: The L{ReactorBuilder} subclass this is being called on. We don't use this parameter but we get called with it anyway. """ from asyncio import get_event_loop, new_event_loop, set_event_loop from twisted.internet import asyncioreactor asTestCase = cast(SynchronousTestCase, self) originalLoop = get_event_loop() loop = new_event_loop() set_event_loop(loop) @asTestCase.addCleanup def cleanUp(): loop.close() set_event_loop(originalLoop) return asyncioreactor.AsyncioSelectorReactor(loop)
Function used by L{DelayedCallTests.test_str}.
def nothing(): """ Function used by L{DelayedCallTests.test_str}. """
Do-nothing callable. Stub for testing.
def noop() -> None: """ Do-nothing callable. Stub for testing. """
Create a reactor that will deterministically resolve all hostnames it is passed to the list of addresses given. @param reactor: An object that we wish to add an L{IReactorPluggableNameResolver} to. @type reactor: Any object with some formally-declared interfaces (i.e. one where C{list(providedBy(reactor))} is not empty); usually C{IReactor*} interfaces. @param expectedAddresses: (optional); the addresses expected to be returned for every address. If these are strings, they should be IPv4 or IPv6 literals, and they will be wrapped in L{IPv4Address} and L{IPv6Address} objects in the resolution result. @type expectedAddresses: iterable of C{object} or C{str} @param hostMap: (optional); the names (unicode) mapped to lists of addresses (str or L{IAddress}); in the same format as expectedAddress, which map the results for I{specific} hostnames to addresses. @return: A new reactor which provides all the interfaces previously provided by C{reactor} as well as L{IReactorPluggableNameResolver}. All name resolutions performed with its C{nameResolver} attribute will resolve reentrantly and synchronously with the given C{expectedAddresses}. However, it is not a complete implementation as it does not have an C{installNameResolver} method.
def deterministicResolvingReactor(reactor, expectedAddresses=(), hostMap=None): """ Create a reactor that will deterministically resolve all hostnames it is passed to the list of addresses given. @param reactor: An object that we wish to add an L{IReactorPluggableNameResolver} to. @type reactor: Any object with some formally-declared interfaces (i.e. one where C{list(providedBy(reactor))} is not empty); usually C{IReactor*} interfaces. @param expectedAddresses: (optional); the addresses expected to be returned for every address. If these are strings, they should be IPv4 or IPv6 literals, and they will be wrapped in L{IPv4Address} and L{IPv6Address} objects in the resolution result. @type expectedAddresses: iterable of C{object} or C{str} @param hostMap: (optional); the names (unicode) mapped to lists of addresses (str or L{IAddress}); in the same format as expectedAddress, which map the results for I{specific} hostnames to addresses. @return: A new reactor which provides all the interfaces previously provided by C{reactor} as well as L{IReactorPluggableNameResolver}. All name resolutions performed with its C{nameResolver} attribute will resolve reentrantly and synchronously with the given C{expectedAddresses}. However, it is not a complete implementation as it does not have an C{installNameResolver} method. """ if hostMap is None: hostMap = {} hostMap = hostMap.copy() @implementer(IHostnameResolver) class SimpleNameResolver: @staticmethod def resolveHostName( resolutionReceiver, hostName, portNumber=0, addressTypes=None, transportSemantics="TCP", ): resolutionReceiver.resolutionBegan(None) for expectedAddress in hostMap.get(hostName, expectedAddresses): if isinstance(expectedAddress, str): expectedAddress = [IPv4Address, IPv6Address][ isIPv6Address(expectedAddress) ]("TCP", expectedAddress, portNumber) resolutionReceiver.addressResolved(expectedAddress) resolutionReceiver.resolutionComplete() @implementer(IReactorPluggableNameResolver) class WithResolver( proxyForInterface(InterfaceClass("*", tuple(providedBy(reactor)))) ): nameResolver = SimpleNameResolver() return WithResolver(reactor)
For the duration of C{testCase}, add a fake plugin to twisted.plugins which contains some sample endpoint parsers.
def addFakePlugin(testCase, dropinSource="fakeendpoint.py"): """ For the duration of C{testCase}, add a fake plugin to twisted.plugins which contains some sample endpoint parsers. """ import sys savedModules = sys.modules.copy() savedPluginPath = list(plugins.__path__) def cleanup(): sys.modules.clear() sys.modules.update(savedModules) plugins.__path__[:] = savedPluginPath testCase.addCleanup(cleanup) fp = FilePath(testCase.mktemp()) fp.createDirectory() getModule(__name__).filePath.sibling(dropinSource).copyTo(fp.child(dropinSource)) plugins.__path__.append(fp.path)
Given a L{MemoryReactor} and the result of calling L{wrapClientTLS}, extract the L{IOpenSSLClientConnectionCreator} associated with it. Implementation presently uses private attributes but could (and should) be refactored to just call C{.connect()} on the endpoint, when L{HostnameEndpoint} starts directing its C{getaddrinfo} call through the reactor it is passed somehow rather than via the global threadpool. @param memoryReactor: the reactor attached to the given endpoint. (Presently unused, but included so tests won't need to be modified to honor it.) @param tlsEndpoint: The result of calling L{wrapClientTLS}. @return: the client connection creator associated with the endpoint wrapper. @rtype: L{IOpenSSLClientConnectionCreator}
def connectionCreatorFromEndpoint(memoryReactor, tlsEndpoint): """ Given a L{MemoryReactor} and the result of calling L{wrapClientTLS}, extract the L{IOpenSSLClientConnectionCreator} associated with it. Implementation presently uses private attributes but could (and should) be refactored to just call C{.connect()} on the endpoint, when L{HostnameEndpoint} starts directing its C{getaddrinfo} call through the reactor it is passed somehow rather than via the global threadpool. @param memoryReactor: the reactor attached to the given endpoint. (Presently unused, but included so tests won't need to be modified to honor it.) @param tlsEndpoint: The result of calling L{wrapClientTLS}. @return: the client connection creator associated with the endpoint wrapper. @rtype: L{IOpenSSLClientConnectionCreator} """ return tlsEndpoint._wrapperFactory(None)._connectionCreator
Create a copy of the given function with the given globals substituted. The globals must already exist in the function's existing global scope. @param function: any function object. @type function: L{types.FunctionType} @param newGlobals: each keyword argument should be a global to set in the new function's returned scope. @type newGlobals: L{dict} @return: a new function, like C{function}, but with new global scope.
def replacingGlobals(function, **newGlobals): """ Create a copy of the given function with the given globals substituted. The globals must already exist in the function's existing global scope. @param function: any function object. @type function: L{types.FunctionType} @param newGlobals: each keyword argument should be a global to set in the new function's returned scope. @type newGlobals: L{dict} @return: a new function, like C{function}, but with new global scope. """ try: codeObject = function.func_code funcGlobals = function.func_globals except AttributeError: codeObject = function.__code__ funcGlobals = function.__globals__ for key in newGlobals: if key not in funcGlobals: raise TypeError( "Name bound by replacingGlobals but not present in module: {}".format( key ) ) mergedGlobals = {} mergedGlobals.update(funcGlobals) mergedGlobals.update(newGlobals) newFunction = FunctionType(codeObject, mergedGlobals) mergedGlobals[function.__name__] = newFunction return newFunction
Do nothing.
def _fakeKEvent(*args: object, **kwargs: object) -> None: """ Do nothing. """
Create a fake that implements L{_IKQueue}. @param testKQueue: Something that acts like L{select.kqueue}. @param testKEvent: Something that acts like L{select.kevent}. @return: An implementation of L{_IKQueue} that includes C{testKQueue} and C{testKEvent}.
def makeFakeKQueue(testKQueue: object, testKEvent: object) -> _IKQueue: """ Create a fake that implements L{_IKQueue}. @param testKQueue: Something that acts like L{select.kqueue}. @param testKEvent: Something that acts like L{select.kevent}. @return: An implementation of L{_IKQueue} that includes C{testKQueue} and C{testKEvent}. """ @implementer(_IKQueue) class FakeKQueue: kqueue = testKQueue kevent = testKEvent return FakeKQueue()
Only run this test on POSIX platforms. @param testMethod: A test function, being decorated. @return: the C{testMethod} argument.
def onlyOnPOSIX(testMethod): """ Only run this test on POSIX platforms. @param testMethod: A test function, being decorated. @return: the C{testMethod} argument. """ if os.name != "posix": testMethod.skip = "Test only applies to POSIX platforms." return testMethod
A function that can be used as a factory for L{ReactorBuilder} tests but which always raises an exception. This gives the appearance of a reactor type which is unsupported in the current runtime configuration for some reason.
def unsupportedReactor(self: ReactorBuilder) -> NoReturn: """ A function that can be used as a factory for L{ReactorBuilder} tests but which always raises an exception. This gives the appearance of a reactor type which is unsupported in the current runtime configuration for some reason. """ raise Exception(UNSUPPORTED)
Create a deterministic threadpool. @return: 2-tuple of L{ThreadPool}, 0-argument C{work} callable; when C{work} is called, do the work.
def deterministicPool(): """ Create a deterministic threadpool. @return: 2-tuple of L{ThreadPool}, 0-argument C{work} callable; when C{work} is called, do the work. """ worker, doer = createMemoryWorker() return ( DeterministicThreadPool( Team(LockWorker(Lock(), local()), (lambda: worker), lambda: None) ), doer, )
Create a deterministic L{IReactorThreads} @return: a 2-tuple consisting of an L{IReactorThreads}-like object and a 0-argument callable that will perform one unit of work invoked via that object's C{callFromThread} method.
def deterministicReactorThreads(): """ Create a deterministic L{IReactorThreads} @return: a 2-tuple consisting of an L{IReactorThreads}-like object and a 0-argument callable that will perform one unit of work invoked via that object's C{callFromThread} method. """ worker, doer = createMemoryWorker() class CFT: def callFromThread(self, f, *a, **k): worker.do(lambda: f(*a, **k)) return CFT(), doer
Find and return a configured link local IPv6 address including a scope identifier using the % separation syntax. If the system has no link local IPv6 addresses, raise L{SkipTest} instead. @raise SkipTest: if no link local address can be found or if the C{netifaces} module is not available. @return: a C{str} giving the address
def getLinkLocalIPv6Address(): """ Find and return a configured link local IPv6 address including a scope identifier using the % separation syntax. If the system has no link local IPv6 addresses, raise L{SkipTest} instead. @raise SkipTest: if no link local address can be found or if the C{netifaces} module is not available. @return: a C{str} giving the address """ addresses = getLinkLocalIPv6Addresses() if addresses: return addresses[0] raise SkipTest("Link local IPv6 address unavailable")
Connect a socket to the given destination. @param client: A C{socket.socket}. @param destination: A tuple of (host, port). The host is a C{str}, the port a C{int}. If the C{host} is an IPv6 IP, the address is resolved using C{getaddrinfo} and the first version found is used.
def connect(client, destination): """ Connect a socket to the given destination. @param client: A C{socket.socket}. @param destination: A tuple of (host, port). The host is a C{str}, the port a C{int}. If the C{host} is an IPv6 IP, the address is resolved using C{getaddrinfo} and the first version found is used. """ (host, port) = destination if "%" in host or ":" in host: address = socket.getaddrinfo(host, port)[0][4] else: address = (host, port) client.connect(address)
Create a socket for the duration of the given test. @param test: the test to add cleanup to. @param addressFamily: an C{AF_*} constant @param socketType: a C{SOCK_*} constant. @return: a socket object.
def createTestSocket(test, addressFamily, socketType): """ Create a socket for the duration of the given test. @param test: the test to add cleanup to. @param addressFamily: an C{AF_*} constant @param socketType: a C{SOCK_*} constant. @return: a socket object. """ skt = socket.socket(addressFamily, socketType) test.addCleanup(skt.close) return skt
Assert that an L{IListeningPort} immediately closes an accepted peer socket when the number of open file descriptors exceeds the soft resource limit. @param testCase: The test case under which to run this assertion. @type testCase: L{trial.unittest.SynchronousTestCase} @param exhauster: The file descriptor exhauster. @type exhauster: L{_ExhaustsFileDescriptors} @param reactor: The reactor under test. @param runReactor: A callable that will synchronously run the provided reactor. @param listen: A callback to bind to a port. @type listen: A L{callable} that accepts two arguments: the provided C{reactor}; and a L{ServerFactory}. It must return an L{IListeningPort} provider. @param connect: A callback to connect a client to the listening port. @type connect: A L{callable} that accepts three arguments: the provided C{reactor}; the address returned by L{IListeningPort.getHost}; and a L{ClientFactory}. Its return value is ignored.
def assertPeerClosedOnEMFILE( testCase, exhauster, reactor, runReactor, listen, connect, ): """ Assert that an L{IListeningPort} immediately closes an accepted peer socket when the number of open file descriptors exceeds the soft resource limit. @param testCase: The test case under which to run this assertion. @type testCase: L{trial.unittest.SynchronousTestCase} @param exhauster: The file descriptor exhauster. @type exhauster: L{_ExhaustsFileDescriptors} @param reactor: The reactor under test. @param runReactor: A callable that will synchronously run the provided reactor. @param listen: A callback to bind to a port. @type listen: A L{callable} that accepts two arguments: the provided C{reactor}; and a L{ServerFactory}. It must return an L{IListeningPort} provider. @param connect: A callback to connect a client to the listening port. @type connect: A L{callable} that accepts three arguments: the provided C{reactor}; the address returned by L{IListeningPort.getHost}; and a L{ClientFactory}. Its return value is ignored. """ testCase.addCleanup(exhauster.release) serverFactory = MyServerFactory() serverConnectionMade = Deferred() serverFactory.protocolConnectionMade = serverConnectionMade serverConnectionCompleted = [False] def stopReactorIfServerAccepted(_): reactor.stop() serverConnectionCompleted[0] = True serverConnectionMade.addCallback(stopReactorIfServerAccepted) clientFactory = MyClientFactory() if IReactorTime.providedBy(reactor): # For Glib-based reactors, the exhauster should be run after the signal handler used by glib [1] # and restoration of twisted signal handlers [2], thus such 2-level callLater # [1] https://gitlab.gnome.org/GNOME/pygobject/-/blob/3.42.0/gi/_ossighelper.py#L76 # [2] https://github.com/twisted/twisted/blob/twisted-22.4.0/src/twisted/internet/_glibbase.py#L134 # See also https://twistedmatrix.com/trac/ticket/10342 def inner(): port = listen(reactor, serverFactory) listeningHost = port.getHost() connect(reactor, listeningHost, clientFactory) exhauster.exhaust() reactor.callLater(0, reactor.callLater, 0, inner) else: # For reactors without callLater (ex: MemoryReactor) port = listen(reactor, serverFactory) listeningHost = port.getHost() connect(reactor, listeningHost, clientFactory) reactor.callWhenRunning(exhauster.exhaust) def stopReactorAndCloseFileDescriptors(result): exhauster.release() reactor.stop() return result clientFactory.deferred.addBoth(stopReactorAndCloseFileDescriptors) clientFactory.failDeferred.addBoth(stopReactorAndCloseFileDescriptors) runReactor(reactor) noResult = [] serverConnectionMade.addBoth(noResult.append) testCase.assertFalse(noResult, "Server accepted connection; EMFILE not triggered.") testCase.assertNoResult(clientFactory.failDeferred) testCase.successResultOf(clientFactory.deferred) testCase.assertRaises( ConnectionClosed, clientFactory.lostReason.raiseException, )
Decorate a L{ReactorBuilder} test function which tests one reactor and one connected transport. Run that test method in the context of C{connectionMade}, and immediately drop the connection (and end the test) when that completes. @param testMethod: A unit test method on a L{ReactorBuilder} test suite; taking two additional parameters; a C{reactor} as built by the L{ReactorBuilder}, and an L{ITCPTransport} provider. @type testMethod: 3-argument C{function} @return: a no-argument test method. @rtype: 1-argument C{function}
def oneTransportTest(testMethod): """ Decorate a L{ReactorBuilder} test function which tests one reactor and one connected transport. Run that test method in the context of C{connectionMade}, and immediately drop the connection (and end the test) when that completes. @param testMethod: A unit test method on a L{ReactorBuilder} test suite; taking two additional parameters; a C{reactor} as built by the L{ReactorBuilder}, and an L{ITCPTransport} provider. @type testMethod: 3-argument C{function} @return: a no-argument test method. @rtype: 1-argument C{function} """ @wraps(testMethod) def actualTestMethod(builder): other = ConnectableProtocol() class ServerProtocol(ConnectableProtocol): def connectionMade(self): try: testMethod(builder, self.reactor, self.transport) finally: if self.transport is not None: self.transport.loseConnection() if other.transport is not None: other.transport.loseConnection() serverProtocol = ServerProtocol() runProtocolsWithReactor(builder, serverProtocol, other, TCPCreator()) return actualTestMethod
Use the given test to assert that the given transport is actively reading in the given reactor. @note: Maintainers; for more information on why this is a function rather than a method on a test case, see U{this document on how we structure test tools <http://twistedmatrix.com/trac/wiki/Design/KeepTestToolsOutOfFixtures>} @param testCase: a test case to perform the assertion upon. @type testCase: L{TestCase} @param reactor: A reactor, possibly one providing L{IReactorFDSet}, or an IOCP reactor. @param transport: An L{ITCPTransport}
def assertReading(testCase, reactor, transport): """ Use the given test to assert that the given transport is actively reading in the given reactor. @note: Maintainers; for more information on why this is a function rather than a method on a test case, see U{this document on how we structure test tools <http://twistedmatrix.com/trac/wiki/Design/KeepTestToolsOutOfFixtures>} @param testCase: a test case to perform the assertion upon. @type testCase: L{TestCase} @param reactor: A reactor, possibly one providing L{IReactorFDSet}, or an IOCP reactor. @param transport: An L{ITCPTransport} """ if IReactorFDSet.providedBy(reactor): testCase.assertIn(transport, reactor.getReaders()) else: # IOCP. testCase.assertIn(transport, reactor.handles) testCase.assertTrue(transport.reading)
Use the given test to assert that the given transport is I{not} actively reading in the given reactor. @note: Maintainers; for more information on why this is a function rather than a method on a test case, see U{this document on how we structure test tools <http://twistedmatrix.com/trac/wiki/Design/KeepTestToolsOutOfFixtures>} @param testCase: a test case to perform the assertion upon. @type testCase: L{TestCase} @param reactor: A reactor, possibly one providing L{IReactorFDSet}, or an IOCP reactor. @param transport: An L{ITCPTransport}
def assertNotReading(testCase, reactor, transport): """ Use the given test to assert that the given transport is I{not} actively reading in the given reactor. @note: Maintainers; for more information on why this is a function rather than a method on a test case, see U{this document on how we structure test tools <http://twistedmatrix.com/trac/wiki/Design/KeepTestToolsOutOfFixtures>} @param testCase: a test case to perform the assertion upon. @type testCase: L{TestCase} @param reactor: A reactor, possibly one providing L{IReactorFDSet}, or an IOCP reactor. @param transport: An L{ITCPTransport} """ if IReactorFDSet.providedBy(reactor): testCase.assertNotIn(transport, reactor.getReaders()) else: # IOCP. testCase.assertFalse(transport.reading)
Returns True if the system can bind an IPv6 address.
def _has_ipv6(): """Returns True if the system can bind an IPv6 address.""" sock = None has_ipv6 = False try: sock = socket.socket(socket.AF_INET6) sock.bind(("::1", 0)) has_ipv6 = True except OSError: pass if sock: sock.close() return has_ipv6
Return a new, unique abstract namespace path to be listened on.
def _abstractPath(case): """ Return a new, unique abstract namespace path to be listened on. """ return md5(urandom(100)).hexdigest()