response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
A custom assertion for L{SerialNumber} values that cannot be meaningfully
compared.
"Note that there are some pairs of values s1 and s2 for which s1 is not
equal to s2, but for which s1 is neither greater than, nor less than, s2.
An attempt to use these ordering operators on such pairs of values produces
an undefined result."
@see: U{https://tools.ietf.org/html/rfc1982#section-3.2}
@param testCase: The L{unittest.TestCase} on which to call assertion
methods.
@type testCase: L{unittest.TestCase}
@param s1: The first value to compare.
@type s1: L{SerialNumber}
@param s2: The second value to compare.
@type s2: L{SerialNumber} | def assertUndefinedComparison(testCase, s1, s2):
"""
A custom assertion for L{SerialNumber} values that cannot be meaningfully
compared.
"Note that there are some pairs of values s1 and s2 for which s1 is not
equal to s2, but for which s1 is neither greater than, nor less than, s2.
An attempt to use these ordering operators on such pairs of values produces
an undefined result."
@see: U{https://tools.ietf.org/html/rfc1982#section-3.2}
@param testCase: The L{unittest.TestCase} on which to call assertion
methods.
@type testCase: L{unittest.TestCase}
@param s1: The first value to compare.
@type s1: L{SerialNumber}
@param s2: The second value to compare.
@type s2: L{SerialNumber}
"""
testCase.assertFalse(s1 == s2)
testCase.assertFalse(s1 <= s2)
testCase.assertFalse(s1 < s2)
testCase.assertFalse(s1 > s2)
testCase.assertFalse(s1 >= s2) |
From the result of a L{Deferred} returned by L{IResolver.lookupAddress},
return the payload of the first record in the answer section. | def getOnePayload(results):
"""
From the result of a L{Deferred} returned by L{IResolver.lookupAddress},
return the payload of the first record in the answer section.
"""
ans, auth, add = results
return ans[0].payload |
From the result of a L{Deferred} returned by L{IResolver.lookupAddress},
return the first IPv4 address from the answer section. | def getOneAddress(results):
"""
From the result of a L{Deferred} returned by L{IResolver.lookupAddress},
return the first IPv4 address from the answer section.
"""
return getOnePayload(results).dottedQuad() |
Raise a L{ResolverFactoryArguments} exception containing the
positional and keyword arguments passed to resolverFactory.
@param args: A L{list} of all the positional arguments supplied by
the caller.
@param kwargs: A L{list} of all the keyword arguments supplied by
the caller. | def raisingResolverFactory(*args, **kwargs):
"""
Raise a L{ResolverFactoryArguments} exception containing the
positional and keyword arguments passed to resolverFactory.
@param args: A L{list} of all the positional arguments supplied by
the caller.
@param kwargs: A L{list} of all the keyword arguments supplied by
the caller.
"""
raise ResolverFactoryArguments(args, kwargs) |
Raise a L{RaisedArguments} exception containing the supplied arguments.
Used as a fake when testing the call signatures of methods and functions. | def raiser(*args, **kwargs):
"""
Raise a L{RaisedArguments} exception containing the supplied arguments.
Used as a fake when testing the call signatures of methods and functions.
"""
raise RaisedArguments(args, kwargs) |
Assert that the callable logs the expected messages when called.
XXX: Put this somewhere where it can be re-used elsewhere. See #6677.
@param testCase: The test case controlling the test which triggers the
logged messages and on which assertions will be called.
@type testCase: L{unittest.SynchronousTestCase}
@param expectedMessages: A L{list} of the expected log messages
@type expectedMessages: L{list}
@param callable: The function which is expected to produce the
C{expectedMessages} when called.
@type callable: L{callable}
@param args: Positional arguments to be passed to C{callable}.
@type args: L{list}
@param kwargs: Keyword arguments to be passed to C{callable}.
@type kwargs: L{dict} | def assertLogMessage(testCase, expectedMessages, callable, *args, **kwargs):
"""
Assert that the callable logs the expected messages when called.
XXX: Put this somewhere where it can be re-used elsewhere. See #6677.
@param testCase: The test case controlling the test which triggers the
logged messages and on which assertions will be called.
@type testCase: L{unittest.SynchronousTestCase}
@param expectedMessages: A L{list} of the expected log messages
@type expectedMessages: L{list}
@param callable: The function which is expected to produce the
C{expectedMessages} when called.
@type callable: L{callable}
@param args: Positional arguments to be passed to C{callable}.
@type args: L{list}
@param kwargs: Keyword arguments to be passed to C{callable}.
@type kwargs: L{dict}
"""
loggedMessages = []
log.addObserver(loggedMessages.append)
testCase.addCleanup(log.removeObserver, loggedMessages.append)
callable(*args, **kwargs)
testCase.assertEqual([m["message"][0] for m in loggedMessages], expectedMessages) |
Pack an integer into a network-order two-byte string.
@param n: The integer to pack. Only values that fit into 16 bits are
supported.
@return: The packed representation of the integer.
@rtype: L{bytes} | def _H(n):
"""
Pack an integer into a network-order two-byte string.
@param n: The integer to pack. Only values that fit into 16 bits are
supported.
@return: The packed representation of the integer.
@rtype: L{bytes}
"""
return struct.pack(">H", n) |
Construct an ethernet frame.
@param src: The source ethernet address, encoded.
@type src: L{bytes}
@param dst: The destination ethernet address, encoded.
@type dst: L{bytes}
@param protocol: The protocol number of the payload of this datagram.
@type protocol: L{int}
@param payload: The content of the ethernet frame (such as an IP datagram).
@type payload: L{bytes}
@return: The full ethernet frame.
@rtype: L{bytes} | def _ethernet(src, dst, protocol, payload):
"""
Construct an ethernet frame.
@param src: The source ethernet address, encoded.
@type src: L{bytes}
@param dst: The destination ethernet address, encoded.
@type dst: L{bytes}
@param protocol: The protocol number of the payload of this datagram.
@type protocol: L{int}
@param payload: The content of the ethernet frame (such as an IP datagram).
@type payload: L{bytes}
@return: The full ethernet frame.
@rtype: L{bytes}
"""
return dst + src + _H(protocol) + payload |
Construct an IP datagram with the given source, destination, and
application payload.
@param src: The source IPv4 address as a dotted-quad string.
@type src: L{bytes}
@param dst: The destination IPv4 address as a dotted-quad string.
@type dst: L{bytes}
@param payload: The content of the IP datagram (such as a UDP datagram).
@type payload: L{bytes}
@return: An IP datagram header and payload.
@rtype: L{bytes} | def _ip(src, dst, payload):
"""
Construct an IP datagram with the given source, destination, and
application payload.
@param src: The source IPv4 address as a dotted-quad string.
@type src: L{bytes}
@param dst: The destination IPv4 address as a dotted-quad string.
@type dst: L{bytes}
@param payload: The content of the IP datagram (such as a UDP datagram).
@type payload: L{bytes}
@return: An IP datagram header and payload.
@rtype: L{bytes}
"""
ipHeader = (
# Version and header length, 4 bits each
b"\x45"
# Differentiated services field
b"\x00"
# Total length
+ _H(20 + len(payload))
+ b"\x00\x01\x00\x00\x40\x11"
# Checksum
+ _H(0)
# Source address
+ socket.inet_pton(socket.AF_INET, nativeString(src))
# Destination address
+ socket.inet_pton(socket.AF_INET, nativeString(dst))
)
# Total all of the 16-bit integers in the header
checksumStep1 = sum(struct.unpack("!10H", ipHeader))
# Pull off the carry
carry = checksumStep1 >> 16
# And add it to what was left over
checksumStep2 = (checksumStep1 & 0xFFFF) + carry
# Compute the one's complement sum
checksumStep3 = checksumStep2 ^ 0xFFFF
# Reconstruct the IP header including the correct checksum so the platform
# IP stack, if there is one involved in this test, doesn't drop it on the
# floor as garbage.
ipHeader = ipHeader[:10] + struct.pack("!H", checksumStep3) + ipHeader[12:]
return ipHeader + payload |
Construct a UDP datagram with the given source, destination, and
application payload.
@param src: The source port number.
@type src: L{int}
@param dst: The destination port number.
@type dst: L{int}
@param payload: The content of the UDP datagram.
@type payload: L{bytes}
@return: A UDP datagram header and payload.
@rtype: L{bytes} | def _udp(src, dst, payload):
"""
Construct a UDP datagram with the given source, destination, and
application payload.
@param src: The source port number.
@type src: L{int}
@param dst: The destination port number.
@type dst: L{int}
@param payload: The content of the UDP datagram.
@type payload: L{bytes}
@return: A UDP datagram header and payload.
@rtype: L{bytes}
"""
udpHeader = (
# Source port
_H(src)
# Destination port
+ _H(dst)
# Length
+ _H(len(payload) + 8)
# Checksum
+ _H(0)
)
return udpHeader + payload |
Wrap a L{MemoryIOSystem} method with permission-checking logic. The
returned function will check C{self.permissions} and raise L{IOError} with
L{errno.EPERM} if the function name is not listed as an available
permission.
@param original: The L{MemoryIOSystem} instance to wrap.
@return: A wrapper around C{original} that applies permission checks. | def _privileged(original):
"""
Wrap a L{MemoryIOSystem} method with permission-checking logic. The
returned function will check C{self.permissions} and raise L{IOError} with
L{errno.EPERM} if the function name is not listed as an available
permission.
@param original: The L{MemoryIOSystem} instance to wrap.
@return: A wrapper around C{original} that applies permission checks.
"""
@wraps(original)
def permissionChecker(self, *args, **kwargs):
if original.__name__ not in self.permissions:
raise OSError(EPERM, "Operation not permitted")
return original(self, *args, **kwargs)
return permissionChecker |
Pass me an AO, I'll return a nicely-formatted source representation. | def getSource(ao):
"""Pass me an AO, I'll return a nicely-formatted source representation."""
return indentify("app = " + prettify(ao)) |
Pass me an Abstract Object Tree, and I'll unjelly it for you. | def unjellyFromAOT(aot):
"""
Pass me an Abstract Object Tree, and I'll unjelly it for you.
"""
return AOTUnjellier().unjelly(aot) |
Pass me a string of code or a filename that defines an 'app' variable (in
terms of Abstract Objects!), and I'll execute it and unjelly the resulting
AOT for you, returning a newly unpersisted Application object! | def unjellyFromSource(stringOrFile):
"""
Pass me a string of code or a filename that defines an 'app' variable (in
terms of Abstract Objects!), and I'll execute it and unjelly the resulting
AOT for you, returning a newly unpersisted Application object!
"""
ns = {
"Instance": Instance,
"InstanceMethod": InstanceMethod,
"Class": Class,
"Function": Function,
"Module": Module,
"Ref": Ref,
"Deref": Deref,
"Copyreg": Copyreg,
}
if hasattr(stringOrFile, "read"):
source = stringOrFile.read()
else:
source = stringOrFile
code = compile(source, "<source>", "exec")
eval(code, ns, ns)
if "app" in ns:
return unjellyFromAOT(ns["app"])
else:
raise ValueError("%s needs to define an 'app', it didn't!" % stringOrFile) |
Convert an object to an Abstract Object Tree. | def jellyToAOT(obj):
"""Convert an object to an Abstract Object Tree."""
return AOTJellier().jelly(obj) |
Pass me an object and, optionally, a file object.
I'll convert the object to an AOT either return it (if no file was
specified) or write it to the file. | def jellyToSource(obj, file=None):
"""
Pass me an object and, optionally, a file object.
I'll convert the object to an AOT either return it (if no file was
specified) or write it to the file.
"""
aot = jellyToAOT(obj)
if file:
file.write(getSource(aot).encode("utf-8"))
else:
return getSource(aot) |
Get the associated class of the given method object.
@param methodObject: a bound method
@type methodObject: L{types.MethodType}
@return: a class
@rtype: L{type} | def _classOfMethod(methodObject):
"""
Get the associated class of the given method object.
@param methodObject: a bound method
@type methodObject: L{types.MethodType}
@return: a class
@rtype: L{type}
"""
return methodObject.__self__.__class__ |
Get the associated function of the given method object.
@param methodObject: a bound method
@type methodObject: L{types.MethodType}
@return: the function implementing C{methodObject}
@rtype: L{types.FunctionType} | def _funcOfMethod(methodObject):
"""
Get the associated function of the given method object.
@param methodObject: a bound method
@type methodObject: L{types.MethodType}
@return: the function implementing C{methodObject}
@rtype: L{types.FunctionType}
"""
return methodObject.__func__ |
Get the object that a bound method is bound to.
@param methodObject: a bound method
@type methodObject: L{types.MethodType}
@return: the C{self} passed to C{methodObject}
@rtype: L{object} | def _selfOfMethod(methodObject):
"""
Get the object that a bound method is bound to.
@param methodObject: a bound method
@type methodObject: L{types.MethodType}
@return: the C{self} passed to C{methodObject}
@rtype: L{object}
"""
return methodObject.__self__ |
This is for 'anydbm' compatibility.
@param file: The parameter to pass to the DirDBM constructor.
@param flag: ignored
@param mode: ignored | def open(file, flag=None, mode=None):
"""
This is for 'anydbm' compatibility.
@param file: The parameter to pass to the DirDBM constructor.
@param flag: ignored
@param mode: ignored
"""
return DirDBM(file) |
Load an object from a file.
Deserialize an object from a file. The file can be encrypted.
@param filename: string
@param style: string (one of 'pickle' or 'source') | def load(filename, style):
"""Load an object from a file.
Deserialize an object from a file. The file can be encrypted.
@param filename: string
@param style: string (one of 'pickle' or 'source')
"""
mode = "r"
if style == "source":
from twisted.persisted.aot import unjellyFromSource as _load
else:
_load, mode = pickle.load, "rb"
fp = open(filename, mode)
ee = _EverythingEphemeral(sys.modules["__main__"])
sys.modules["__main__"] = ee
ee.initRun = 1
with fp:
try:
value = _load(fp)
finally:
# restore __main__ if an exception is raised.
sys.modules["__main__"] = ee.mainMod
styles.doUpgrade()
ee.initRun = 0
persistable = IPersistable(value, None)
if persistable is not None:
persistable.setStyle(style)
return value |
Load the value of a variable in a Python file.
Run the contents of the file in a namespace and return the result of the
variable named C{variable}.
@param filename: string
@param variable: string | def loadValueFromFile(filename, variable):
"""Load the value of a variable in a Python file.
Run the contents of the file in a namespace and return the result of the
variable named C{variable}.
@param filename: string
@param variable: string
"""
with open(filename) as fileObj:
data = fileObj.read()
d = {"__file__": filename}
codeObj = compile(data, filename, "exec")
eval(codeObj, d, d)
value = d[variable]
return value |
support function for copy_reg to pickle method refs | def pickleMethod(method):
"support function for copy_reg to pickle method refs"
return (
unpickleMethod,
(method.__name__, method.__self__, method.__self__.__class__),
) |
Retrieve the function object implementing a method name given the class
it's on and a method name.
@param classObject: A class to retrieve the method's function from.
@type classObject: L{type}
@param methodName: The name of the method whose function to retrieve.
@type methodName: native L{str}
@return: the function object corresponding to the given method name.
@rtype: L{types.FunctionType} | def _methodFunction(classObject, methodName):
"""
Retrieve the function object implementing a method name given the class
it's on and a method name.
@param classObject: A class to retrieve the method's function from.
@type classObject: L{type}
@param methodName: The name of the method whose function to retrieve.
@type methodName: native L{str}
@return: the function object corresponding to the given method name.
@rtype: L{types.FunctionType}
"""
methodObject = getattr(classObject, methodName)
return methodObject |
Support function for copy_reg to unpickle method refs.
@param im_name: The name of the method.
@type im_name: native L{str}
@param im_self: The instance that the method was present on.
@type im_self: L{object}
@param im_class: The class where the method was declared.
@type im_class: L{type} or L{None} | def unpickleMethod(im_name, im_self, im_class):
"""
Support function for copy_reg to unpickle method refs.
@param im_name: The name of the method.
@type im_name: native L{str}
@param im_self: The instance that the method was present on.
@type im_self: L{object}
@param im_class: The class where the method was declared.
@type im_class: L{type} or L{None}
"""
if im_self is None:
return getattr(im_class, im_name)
try:
methodFunction = _methodFunction(im_class, im_name)
except AttributeError:
log.msg("Method", im_name, "not on class", im_class)
assert im_self is not None, "No recourse: no instance to guess from."
# Attempt a last-ditch fix before giving up. If classes have changed
# around since we pickled this method, we may still be able to get it
# by looking on the instance's current class.
if im_self.__class__ is im_class:
raise
return unpickleMethod(im_name, im_self, im_self.__class__)
else:
maybeClass = ()
bound = types.MethodType(methodFunction, im_self, *maybeClass)
return bound |
Reduce, in the sense of L{pickle}'s C{object.__reduce__} special method, a
function object into its constituent parts.
@param f: The function to reduce.
@type f: L{types.FunctionType}
@return: a 2-tuple of a reference to L{_unpickleFunction} and a tuple of
its arguments, a 1-tuple of the function's fully qualified name.
@rtype: 2-tuple of C{callable, native string} | def _pickleFunction(f):
"""
Reduce, in the sense of L{pickle}'s C{object.__reduce__} special method, a
function object into its constituent parts.
@param f: The function to reduce.
@type f: L{types.FunctionType}
@return: a 2-tuple of a reference to L{_unpickleFunction} and a tuple of
its arguments, a 1-tuple of the function's fully qualified name.
@rtype: 2-tuple of C{callable, native string}
"""
if f.__name__ == "<lambda>":
raise _UniversalPicklingError(f"Cannot pickle lambda function: {f}")
return (_unpickleFunction, tuple([".".join([f.__module__, f.__qualname__])])) |
Convert a function name into a function by importing it.
This is a synonym for L{twisted.python.reflect.namedAny}, but imported
locally to avoid circular imports, and also to provide a persistent name
that can be stored (and deprecated) independently of C{namedAny}.
@param fullyQualifiedName: The fully qualified name of a function.
@type fullyQualifiedName: native C{str}
@return: A function object imported from the given location.
@rtype: L{types.FunctionType} | def _unpickleFunction(fullyQualifiedName):
"""
Convert a function name into a function by importing it.
This is a synonym for L{twisted.python.reflect.namedAny}, but imported
locally to avoid circular imports, and also to provide a persistent name
that can be stored (and deprecated) independently of C{namedAny}.
@param fullyQualifiedName: The fully qualified name of a function.
@type fullyQualifiedName: native C{str}
@return: A function object imported from the given location.
@rtype: L{types.FunctionType}
"""
from twisted.python.reflect import namedAny
return namedAny(fullyQualifiedName) |
support function for copy_reg to pickle module refs | def pickleModule(module):
"support function for copy_reg to pickle module refs"
return unpickleModule, (module.__name__,) |
support function for copy_reg to unpickle module refs | def unpickleModule(name):
"support function for copy_reg to unpickle module refs"
if name in oldModules:
log.msg("Module has moved: %s" % name)
name = oldModules[name]
log.msg(name)
return __import__(name, {}, {}, "x") |
Reduce the given cStringO.
This is only called on Python 2, because the cStringIO module only exists
on Python 2.
@param stringo: The string output to pickle.
@type stringo: C{cStringIO.OutputType} | def pickleStringO(stringo):
"""
Reduce the given cStringO.
This is only called on Python 2, because the cStringIO module only exists
on Python 2.
@param stringo: The string output to pickle.
@type stringo: C{cStringIO.OutputType}
"""
"support function for copy_reg to pickle StringIO.OutputTypes"
return unpickleStringO, (stringo.getvalue(), stringo.tell()) |
Convert the output of L{pickleStringO} into an appropriate type for the
current python version. This may be called on Python 3 and will convert a
cStringIO into an L{io.StringIO}.
@param val: The content of the file.
@type val: L{bytes}
@param sek: The seek position of the file.
@type sek: L{int}
@return: a file-like object which you can write bytes to.
@rtype: C{cStringIO.OutputType} on Python 2, L{io.StringIO} on Python 3. | def unpickleStringO(val, sek):
"""
Convert the output of L{pickleStringO} into an appropriate type for the
current python version. This may be called on Python 3 and will convert a
cStringIO into an L{io.StringIO}.
@param val: The content of the file.
@type val: L{bytes}
@param sek: The seek position of the file.
@type sek: L{int}
@return: a file-like object which you can write bytes to.
@rtype: C{cStringIO.OutputType} on Python 2, L{io.StringIO} on Python 3.
"""
x = _cStringIO()
x.write(val)
x.seek(sek)
return x |
Reduce the given cStringI.
This is only called on Python 2, because the cStringIO module only exists
on Python 2.
@param stringi: The string input to pickle.
@type stringi: C{cStringIO.InputType}
@return: a 2-tuple of (C{unpickleStringI}, (bytes, pointer))
@rtype: 2-tuple of (function, (bytes, int)) | def pickleStringI(stringi):
"""
Reduce the given cStringI.
This is only called on Python 2, because the cStringIO module only exists
on Python 2.
@param stringi: The string input to pickle.
@type stringi: C{cStringIO.InputType}
@return: a 2-tuple of (C{unpickleStringI}, (bytes, pointer))
@rtype: 2-tuple of (function, (bytes, int))
"""
return unpickleStringI, (stringi.getvalue(), stringi.tell()) |
Convert the output of L{pickleStringI} into an appropriate type for the
current Python version.
This may be called on Python 3 and will convert a cStringIO into an
L{io.StringIO}.
@param val: The content of the file.
@type val: L{bytes}
@param sek: The seek position of the file.
@type sek: L{int}
@return: a file-like object which you can read bytes from.
@rtype: C{cStringIO.OutputType} on Python 2, L{io.StringIO} on Python 3. | def unpickleStringI(val, sek):
"""
Convert the output of L{pickleStringI} into an appropriate type for the
current Python version.
This may be called on Python 3 and will convert a cStringIO into an
L{io.StringIO}.
@param val: The content of the file.
@type val: L{bytes}
@param sek: The seek position of the file.
@type sek: L{int}
@return: a file-like object which you can read bytes from.
@rtype: C{cStringIO.OutputType} on Python 2, L{io.StringIO} on Python 3.
"""
x = _cStringIO(val)
x.seek(sek)
return x |
Require that a Versioned instance be upgraded completely first. | def requireUpgrade(obj):
"""Require that a Versioned instance be upgraded completely first."""
objID = id(obj)
if objID in versionedsToUpgrade and objID not in upgraded:
upgraded[objID] = 1
obj.versionUpgrade()
return obj |
Get all of the parent classes of C{c}, not including C{c} itself, which are
strict subclasses of L{Versioned}.
@param c: a class
@returns: list of classes | def _aybabtu(c):
"""
Get all of the parent classes of C{c}, not including C{c} itself, which are
strict subclasses of L{Versioned}.
@param c: a class
@returns: list of classes
"""
# begin with two classes that should *not* be included in the
# final result
l = [c, Versioned]
for b in inspect.getmro(c):
if b not in l and issubclass(b, Versioned):
l.append(b)
# return all except the unwanted classes
return l[2:] |
Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited input:
# Output bytes will tokenize back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2 | def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited input:
# Output bytes will tokenize back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out |
Imitates get_normal_name in tokenizer.c. | def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or enc.startswith(
("latin-1-", "iso-8859-1-", "iso-latin-1-")
):
return "iso-8859-1"
return orig_enc |
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned. | def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = "utf-8"
def read_or_stop():
try:
return readline()
except StopIteration:
return b""
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode("utf-8")
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = "{} for {!r}".format(msg, filename)
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename, encoding)
raise SyntaxError(msg)
if bom_found:
if encoding != "utf-8":
# This behaviour mimics the Python interpreter
if filename is None:
msg = "encoding problem: utf-8"
else:
msg = "encoding problem for {!r}: utf-8".format(filename)
raise SyntaxError(msg)
encoding += "-sig"
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = "utf-8-sig"
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second] |
Open a file in read only mode using the encoding detected by
detect_encoding(). | def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = _builtin_open(filename, "rb")
try:
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = "r"
return text
except BaseException:
buffer.close()
raise |
The tokenize() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternatively, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
physical line.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream. | def tokenize(readline):
"""
The tokenize() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternatively, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
physical line.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
encoding, consumed = detect_encoding(readline)
empty = _itertools.repeat(b"")
rl_gen = _itertools.chain(consumed, iter(readline, b""), empty)
return _tokenize(rl_gen.__next__, encoding) |
Tokenize a source reading Python code as unicode strings.
This has the same API as tokenize(), except that it expects the *readline*
callable to return str objects instead of bytes. | def generate_tokens(readline):
"""Tokenize a source reading Python code as unicode strings.
This has the same API as tokenize(), except that it expects the *readline*
callable to return str objects instead of bytes.
"""
return _tokenize(readline, None) |
A sample function for pickling. | def sampleFunction() -> None:
"""
A sample function for pickling.
""" |
Use L{crypt.crypt} to Verify that an unencrypted
password matches the encrypted password.
@param crypted: The encrypted password, obtained from
the Unix password database or Unix shadow
password database.
@param pw: The unencrypted password.
@return: L{True} if there is successful match, else L{False}.
@rtype: L{bool} | def verifyCryptedPassword(crypted, pw):
"""
Use L{crypt.crypt} to Verify that an unencrypted
password matches the encrypted password.
@param crypted: The encrypted password, obtained from
the Unix password database or Unix shadow
password database.
@param pw: The unencrypted password.
@return: L{True} if there is successful match, else L{False}.
@rtype: L{bool}
"""
try:
import crypt
except ImportError:
crypt = None
if crypt is None:
raise NotImplementedError("cred_unix not supported on this platform")
if isinstance(pw, bytes):
pw = pw.decode("utf-8")
if isinstance(crypted, bytes):
crypted = crypted.decode("utf-8")
try:
crypted_check = crypt.crypt(pw, crypted)
if isinstance(crypted_check, bytes):
crypted_check = crypted_check.decode("utf-8")
return crypted_check == crypted
except OSError:
return False |
Returns the split version of an NMEA sentence, minus header
and checksum.
>>> _split(b"$GPGGA,spam,eggs*00")
[b'GPGGA', b'spam', b'eggs']
@param sentence: The NMEA sentence to split.
@type sentence: C{bytes} | def _split(sentence):
"""
Returns the split version of an NMEA sentence, minus header
and checksum.
>>> _split(b"$GPGGA,spam,eggs*00")
[b'GPGGA', b'spam', b'eggs']
@param sentence: The NMEA sentence to split.
@type sentence: C{bytes}
"""
if sentence[-3:-2] == b"*": # Sentence with checksum
return sentence[1:-3].split(b",")
elif sentence[-1:] == b"*": # Sentence without checksum
return sentence[1:-1].split(b",")
else:
raise base.InvalidSentence(f"malformed sentence {sentence}") |
Validates the checksum of an NMEA sentence.
@param sentence: The NMEA sentence to check the checksum of.
@type sentence: C{bytes}
@raise ValueError: If the sentence has an invalid checksum.
Simply returns on sentences that either don't have a checksum,
or have a valid checksum. | def _validateChecksum(sentence):
"""
Validates the checksum of an NMEA sentence.
@param sentence: The NMEA sentence to check the checksum of.
@type sentence: C{bytes}
@raise ValueError: If the sentence has an invalid checksum.
Simply returns on sentences that either don't have a checksum,
or have a valid checksum.
"""
if sentence[-3:-2] == b"*": # Sentence has a checksum
reference, source = int(sentence[-2:], 16), sentence[1:-3]
computed = reduce(operator.xor, [ord(x) for x in iterbytes(source)])
if computed != reference:
raise base.InvalidChecksum(f"{computed:02x} != {reference:02x}") |
Builds and returns a latitude of given value. | def _makeLatitude(value: float) -> base.Coordinate:
"""
Builds and returns a latitude of given value.
"""
return base.Coordinate(value, Angles.LATITUDE) |
Builds and returns a longitude of given value. | def _makeLongitude(value: float) -> base.Coordinate:
"""
Builds and returns a longitude of given value.
"""
return base.Coordinate(value, Angles.LONGITUDE) |
Builds an NMEA float representation for a given angle in degrees and
decimal minutes.
@param degrees: The integer degrees for this angle.
@type degrees: C{int}
@param minutes: The decimal minutes value for this angle.
@type minutes: C{float}
@return: The NMEA float representation for this angle.
@rtype: C{str} | def _nmeaFloat(degrees: int, minutes: float) -> str:
"""
Builds an NMEA float representation for a given angle in degrees and
decimal minutes.
@param degrees: The integer degrees for this angle.
@type degrees: C{int}
@param minutes: The decimal minutes value for this angle.
@type minutes: C{float}
@return: The NMEA float representation for this angle.
@rtype: C{str}
"""
return "%i%0.3f" % (degrees, minutes) |
Return the sign of a coordinate.
This is C{1} if the coordinate is in the northern or eastern hemispheres,
C{-1} otherwise.
@param hemisphere: NMEA shorthand for the hemisphere. One of "NESW".
@type hemisphere: C{str}
@return: The sign of the coordinate value.
@rtype: C{int} | def _coordinateSign(hemisphere: str) -> Literal[1, -1]:
"""
Return the sign of a coordinate.
This is C{1} if the coordinate is in the northern or eastern hemispheres,
C{-1} otherwise.
@param hemisphere: NMEA shorthand for the hemisphere. One of "NESW".
@type hemisphere: C{str}
@return: The sign of the coordinate value.
@rtype: C{int}
"""
return 1 if hemisphere in "NE" else -1 |
Return the type of a coordinate.
This is L{Angles.LATITUDE} if the coordinate is in the northern or
southern hemispheres, L{Angles.LONGITUDE} otherwise.
@param hemisphere: NMEA shorthand for the hemisphere. One of "NESW".
@type hemisphere: C{str}
@return: The type of the coordinate (L{Angles.LATITUDE} or
L{Angles.LONGITUDE}) | def _coordinateType(hemisphere: str) -> NamedConstant:
"""
Return the type of a coordinate.
This is L{Angles.LATITUDE} if the coordinate is in the northern or
southern hemispheres, L{Angles.LONGITUDE} otherwise.
@param hemisphere: NMEA shorthand for the hemisphere. One of "NESW".
@type hemisphere: C{str}
@return: The type of the coordinate (L{Angles.LATITUDE} or
L{Angles.LONGITUDE})
"""
return Angles.LATITUDE if hemisphere in "NS" else Angles.LONGITUDE |
(Private) Normalize an argument name from the wire for use with Python
code. If the return value is going to be a python keyword it will be
capitalized. If it contains any dashes they will be replaced with
underscores.
The rationale behind this method is that AMP should be an inherently
multi-language protocol, so message keys may contain all manner of bizarre
bytes. This is not a complete solution; there are still forms of arguments
that this implementation will be unable to parse. However, Python
identifiers share a huge raft of properties with identifiers from many
other languages, so this is a 'good enough' effort for now. We deal
explicitly with dashes because that is the most likely departure: Lisps
commonly use dashes to separate method names, so protocols initially
implemented in a lisp amp dialect may use dashes in argument or command
names.
@param key: a C{bytes}, looking something like 'foo-bar-baz' or 'from'
@type key: C{bytes}
@return: a native string which is a valid python identifier, looking
something like 'foo_bar_baz' or 'From'. | def _wireNameToPythonIdentifier(key):
"""
(Private) Normalize an argument name from the wire for use with Python
code. If the return value is going to be a python keyword it will be
capitalized. If it contains any dashes they will be replaced with
underscores.
The rationale behind this method is that AMP should be an inherently
multi-language protocol, so message keys may contain all manner of bizarre
bytes. This is not a complete solution; there are still forms of arguments
that this implementation will be unable to parse. However, Python
identifiers share a huge raft of properties with identifiers from many
other languages, so this is a 'good enough' effort for now. We deal
explicitly with dashes because that is the most likely departure: Lisps
commonly use dashes to separate method names, so protocols initially
implemented in a lisp amp dialect may use dashes in argument or command
names.
@param key: a C{bytes}, looking something like 'foo-bar-baz' or 'from'
@type key: C{bytes}
@return: a native string which is a valid python identifier, looking
something like 'foo_bar_baz' or 'From'.
"""
lkey = nativeString(key.replace(b"-", b"_"))
if lkey in PYTHON_KEYWORDS:
return lkey.title()
return lkey |
Convert an AmpBox to a dictionary of python objects, converting through a
given arglist.
@param strings: an AmpBox (or dict of strings)
@param arglist: a list of 2-tuples of strings and Argument objects, as
described in L{Command.arguments}.
@param proto: an L{AMP} instance.
@return: the converted dictionary mapping names to argument objects. | def _stringsToObjects(strings, arglist, proto):
"""
Convert an AmpBox to a dictionary of python objects, converting through a
given arglist.
@param strings: an AmpBox (or dict of strings)
@param arglist: a list of 2-tuples of strings and Argument objects, as
described in L{Command.arguments}.
@param proto: an L{AMP} instance.
@return: the converted dictionary mapping names to argument objects.
"""
objects = {}
myStrings = strings.copy()
for argname, argparser in arglist:
argparser.fromBox(argname, myStrings, objects, proto)
return objects |
Convert a dictionary of python objects to an AmpBox, converting through a
given arglist.
@param objects: a dict mapping names to python objects
@param arglist: a list of 2-tuples of strings and Argument objects, as
described in L{Command.arguments}.
@param strings: [OUT PARAMETER] An object providing the L{dict}
interface which will be populated with serialized data.
@param proto: an L{AMP} instance.
@return: The converted dictionary mapping names to encoded argument
strings (identical to C{strings}). | def _objectsToStrings(objects, arglist, strings, proto):
"""
Convert a dictionary of python objects to an AmpBox, converting through a
given arglist.
@param objects: a dict mapping names to python objects
@param arglist: a list of 2-tuples of strings and Argument objects, as
described in L{Command.arguments}.
@param strings: [OUT PARAMETER] An object providing the L{dict}
interface which will be populated with serialized data.
@param proto: an L{AMP} instance.
@return: The converted dictionary mapping names to encoded argument
strings (identical to C{strings}).
"""
myObjects = objects.copy()
for argname, argparser in arglist:
argparser.toBox(argname, strings, myObjects, proto)
return strings |
Normalize a path, as represented by a list of strings each
representing one segment of the path. | def toSegments(cwd, path):
"""
Normalize a path, as represented by a list of strings each
representing one segment of the path.
"""
if path.startswith("/"):
segs = []
else:
segs = cwd[:]
for s in path.split("/"):
if s == "." or s == "":
continue
elif s == "..":
if segs:
segs.pop()
else:
raise InvalidPath(cwd, path)
elif "\0" in s or "/" in s:
raise InvalidPath(cwd, path)
else:
segs.append(s)
return segs |
Map C{OSError} and C{IOError} to standard FTP errors. | def errnoToFailure(e, path):
"""
Map C{OSError} and C{IOError} to standard FTP errors.
"""
if e == errno.ENOENT:
return defer.fail(FileNotFoundError(path))
elif e == errno.EACCES or e == errno.EPERM:
return defer.fail(PermissionDeniedError(path))
elif e == errno.ENOTDIR:
return defer.fail(IsNotADirectoryError(path))
elif e == errno.EEXIST:
return defer.fail(FileExistsError(path))
elif e == errno.EISDIR:
return defer.fail(IsADirectoryError(path))
else:
return defer.fail() |
Helper for checking if a FTPShell `segments` contains a wildcard Unix
expression.
Only filename globbing is supported.
This means that wildcards can only be presents in the last element of
`segments`.
@type segments: C{list}
@param segments: List of path elements as used by the FTP server protocol.
@rtype: Boolean
@return: True if `segments` contains a globbing expression. | def _isGlobbingExpression(segments=None):
"""
Helper for checking if a FTPShell `segments` contains a wildcard Unix
expression.
Only filename globbing is supported.
This means that wildcards can only be presents in the last element of
`segments`.
@type segments: C{list}
@param segments: List of path elements as used by the FTP server protocol.
@rtype: Boolean
@return: True if `segments` contains a globbing expression.
"""
if not segments:
return False
# To check that something is a glob expression, we convert it to
# Regular Expression.
# We compare it to the translation of a known non-glob expression.
# If the result is the same as the original expression then it contains no
# globbing expression.
globCandidate = segments[-1]
globTranslations = fnmatch.translate(globCandidate)
nonGlobTranslations = _testTranslation.replace("TEST", globCandidate, 1)
if nonGlobTranslations == globTranslations:
return False
else:
return True |
Return the primary and supplementary groups for the given UID.
@type uid: C{int} | def _getgroups(uid):
"""
Return the primary and supplementary groups for the given UID.
@type uid: C{int}
"""
result = []
pwent = pwd.getpwuid(uid)
result.append(pwent.pw_gid)
for grent in grp.getgrall():
if pwent.pw_name in grent.gr_mem:
result.append(grent.gr_gid)
return result |
checks to see if uid has proper permissions to access path with mode
@type uid: C{int}
@param uid: numeric user id
@type gid: C{int}
@param gid: numeric group id
@type spath: C{str}
@param spath: the path on the server to test
@type mode: C{str}
@param mode: 'r' or 'w' (read or write)
@rtype: C{bool}
@return: True if the given credentials have the specified form of
access to the given path | def _testPermissions(uid, gid, spath, mode="r"):
"""
checks to see if uid has proper permissions to access path with mode
@type uid: C{int}
@param uid: numeric user id
@type gid: C{int}
@param gid: numeric group id
@type spath: C{str}
@param spath: the path on the server to test
@type mode: C{str}
@param mode: 'r' or 'w' (read or write)
@rtype: C{bool}
@return: True if the given credentials have the specified form of
access to the given path
"""
if mode == "r":
usr = stat.S_IRUSR
grp = stat.S_IRGRP
oth = stat.S_IROTH
amode = os.R_OK
elif mode == "w":
usr = stat.S_IWUSR
grp = stat.S_IWGRP
oth = stat.S_IWOTH
amode = os.W_OK
else:
raise ValueError(f"Invalid mode {mode!r}: must specify 'r' or 'w'")
access = False
if os.path.exists(spath):
if uid == 0:
access = True
else:
s = os.stat(spath)
if usr & s.st_mode and uid == s.st_uid:
access = True
elif grp & s.st_mode and gid in _getgroups(uid):
access = True
elif oth & s.st_mode:
access = True
if access:
if not os.access(spath, amode):
access = False
log.msg(
"Filesystem grants permission to UID %d but it is "
"inaccessible to me running as UID %d" % (uid, os.getuid())
)
return access |
Decode an FTP response specifying a host and port.
@return: a 2-tuple of (host, port). | def decodeHostPort(line):
"""
Decode an FTP response specifying a host and port.
@return: a 2-tuple of (host, port).
"""
abcdef = re.sub("[^0-9, ]", "", line)
parsed = [int(p.strip()) for p in abcdef.split(",")]
for x in parsed:
if x < 0 or x > 255:
raise ValueError("Out of range", line, x)
a, b, c, d, e, f = parsed
host = f"{a}.{b}.{c}.{d}"
port = (int(e) << 8) + int(f)
return host, port |
Returns the path from a response to a PWD command.
Responses typically look like::
257 "/home/andrew" is current directory.
For this example, I will return C{'/home/andrew'}.
If I can't find the path, I return L{None}. | def parsePWDResponse(response):
"""
Returns the path from a response to a PWD command.
Responses typically look like::
257 "/home/andrew" is current directory.
For this example, I will return C{'/home/andrew'}.
If I can't find the path, I return L{None}.
"""
match = re.search('"(.*)"', response)
if match:
return match.groups()[0]
else:
return None |
L{identityPumpPolicy} is a policy which delivers each chunk of data written
to the given queue as-is to the target.
This isn't a particularly realistic policy.
@see: L{loopbackAsync} | def identityPumpPolicy(queue, target):
"""
L{identityPumpPolicy} is a policy which delivers each chunk of data written
to the given queue as-is to the target.
This isn't a particularly realistic policy.
@see: L{loopbackAsync}
"""
while queue:
bytes = queue.get()
if bytes is None:
break
target.dataReceived(bytes) |
L{collapsingPumpPolicy} is a policy which collapses all outstanding chunks
into a single string and delivers it to the target.
@see: L{loopbackAsync} | def collapsingPumpPolicy(queue, target):
"""
L{collapsingPumpPolicy} is a policy which collapses all outstanding chunks
into a single string and delivers it to the target.
@see: L{loopbackAsync}
"""
bytes = []
while queue:
chunk = queue.get()
if chunk is None:
break
bytes.append(chunk)
if bytes:
target.dataReceived(b"".join(bytes)) |
Establish a connection between C{server} and C{client} then transfer data
between them until the connection is closed. This is often useful for
testing a protocol.
@param server: The protocol instance representing the server-side of this
connection.
@param client: The protocol instance representing the client-side of this
connection.
@param pumpPolicy: When either C{server} or C{client} writes to its
transport, the string passed in is added to a queue of data for the
other protocol. Eventually, C{pumpPolicy} will be called with one such
queue and the corresponding protocol object. The pump policy callable
is responsible for emptying the queue and passing the strings it
contains to the given protocol's C{dataReceived} method. The signature
of C{pumpPolicy} is C{(queue, protocol)}. C{queue} is an object with a
C{get} method which will return the next string written to the
transport, or L{None} if the transport has been disconnected, and which
evaluates to C{True} if and only if there are more items to be
retrieved via C{get}.
@return: A L{Deferred} which fires when the connection has been closed and
both sides have received notification of this. | def loopbackAsync(server, client, pumpPolicy=identityPumpPolicy):
"""
Establish a connection between C{server} and C{client} then transfer data
between them until the connection is closed. This is often useful for
testing a protocol.
@param server: The protocol instance representing the server-side of this
connection.
@param client: The protocol instance representing the client-side of this
connection.
@param pumpPolicy: When either C{server} or C{client} writes to its
transport, the string passed in is added to a queue of data for the
other protocol. Eventually, C{pumpPolicy} will be called with one such
queue and the corresponding protocol object. The pump policy callable
is responsible for emptying the queue and passing the strings it
contains to the given protocol's C{dataReceived} method. The signature
of C{pumpPolicy} is C{(queue, protocol)}. C{queue} is an object with a
C{get} method which will return the next string written to the
transport, or L{None} if the transport has been disconnected, and which
evaluates to C{True} if and only if there are more items to be
retrieved via C{get}.
@return: A L{Deferred} which fires when the connection has been closed and
both sides have received notification of this.
"""
serverToClient = _LoopbackQueue()
clientToServer = _LoopbackQueue()
server.makeConnection(_LoopbackTransport(serverToClient))
client.makeConnection(_LoopbackTransport(clientToServer))
return _loopbackAsyncBody(
server, serverToClient, client, clientToServer, pumpPolicy
) |
Transfer bytes from the output queue of each protocol to the input of the other.
@param server: The protocol instance representing the server-side of this
connection.
@param serverToClient: The L{_LoopbackQueue} holding the server's output.
@param client: The protocol instance representing the client-side of this
connection.
@param clientToServer: The L{_LoopbackQueue} holding the client's output.
@param pumpPolicy: See L{loopbackAsync}.
@return: A L{Deferred} which fires when the connection has been closed and
both sides have received notification of this. | def _loopbackAsyncBody(server, serverToClient, client, clientToServer, pumpPolicy):
"""
Transfer bytes from the output queue of each protocol to the input of the other.
@param server: The protocol instance representing the server-side of this
connection.
@param serverToClient: The L{_LoopbackQueue} holding the server's output.
@param client: The protocol instance representing the client-side of this
connection.
@param clientToServer: The L{_LoopbackQueue} holding the client's output.
@param pumpPolicy: See L{loopbackAsync}.
@return: A L{Deferred} which fires when the connection has been closed and
both sides have received notification of this.
"""
def pump(source, q, target):
sent = False
if q:
pumpPolicy(q, target)
sent = True
if sent and not q:
# A write buffer has now been emptied. Give any producer on that
# side an opportunity to produce more data.
source.transport._pollProducer()
return sent
while 1:
disconnect = clientSent = serverSent = False
# Deliver the data which has been written.
serverSent = pump(server, serverToClient, client)
clientSent = pump(client, clientToServer, server)
if not clientSent and not serverSent:
# Neither side wrote any data. Wait for some new data to be added
# before trying to do anything further.
d = defer.Deferred()
clientToServer._notificationDeferred = d
serverToClient._notificationDeferred = d
d.addCallback(
_loopbackAsyncContinue,
server,
serverToClient,
client,
clientToServer,
pumpPolicy,
)
return d
if serverToClient.disconnect:
# The server wants to drop the connection. Flush any remaining
# data it has.
disconnect = True
pump(server, serverToClient, client)
elif clientToServer.disconnect:
# The client wants to drop the connection. Flush any remaining
# data it has.
disconnect = True
pump(client, clientToServer, server)
if disconnect:
# Someone wanted to disconnect, so okay, the connection is gone.
server.connectionLost(failure.Failure(main.CONNECTION_DONE))
client.connectionLost(failure.Failure(main.CONNECTION_DONE))
return defer.succeed(None) |
Run session between server and client protocol instances over TCP. | def loopbackTCP(server, client, port=0, noisy=True):
"""Run session between server and client protocol instances over TCP."""
from twisted.internet import reactor
f = policies.WrappingFactory(protocol.Factory())
serverWrapper = _FireOnClose(f, server)
f.noisy = noisy
f.buildProtocol = lambda addr: serverWrapper
serverPort = reactor.listenTCP(port, f, interface="127.0.0.1")
clientF = LoopbackClientFactory(client)
clientF.noisy = noisy
reactor.connectTCP("127.0.0.1", serverPort.getHost().port, clientF)
d = clientF.deferred
d.addCallback(lambda x: serverWrapper.deferred)
d.addCallback(lambda x: serverPort.stopListening())
return d |
Run session between server and client protocol instances over UNIX socket. | def loopbackUNIX(server, client, noisy=True):
"""Run session between server and client protocol instances over UNIX socket."""
path = tempfile.mktemp()
from twisted.internet import reactor
f = policies.WrappingFactory(protocol.Factory())
serverWrapper = _FireOnClose(f, server)
f.noisy = noisy
f.buildProtocol = lambda addr: serverWrapper
serverPort = reactor.listenUNIX(path, f)
clientF = LoopbackClientFactory(client)
clientF.noisy = noisy
reactor.connectUNIX(path, clientF)
d = clientF.deferred
d.addCallback(lambda x: serverWrapper.deferred)
d.addCallback(lambda x: serverPort.stopListening())
return d |
Compute a log prefix for a wrapper and the object it wraps.
@rtype: C{str} | def _wrappedLogPrefix(wrapper, wrapped):
"""
Compute a log prefix for a wrapper and the object it wraps.
@rtype: C{str}
"""
if ILoggingContext.providedBy(wrapped):
logPrefix = wrapped.logPrefix()
else:
logPrefix = wrapped.__class__.__name__
return f"{logPrefix} ({wrapper.__class__.__name__})" |
Capitalize a string, making sure to treat '-' as a word separator | def dashCapitalize(s):
"""
Capitalize a string, making sure to treat '-' as a word separator
"""
return "-".join([x.capitalize() for x in s.split("-")]) |
Parse a Via header.
@return: The parsed version of this header.
@rtype: L{Via} | def parseViaHeader(value):
"""
Parse a Via header.
@return: The parsed version of this header.
@rtype: L{Via}
"""
parts = value.split(";")
sent, params = parts[0], parts[1:]
protocolinfo, by = sent.split(" ", 1)
by = by.strip()
result = {}
pname, pversion, transport = protocolinfo.split("/")
if pname != "SIP" or pversion != "2.0":
raise ValueError(f"wrong protocol or version: {value!r}")
result["transport"] = transport
if ":" in by:
host, port = by.split(":")
result["port"] = int(port)
result["host"] = host
else:
result["host"] = by
for p in params:
# It's the comment-striping dance!
p = p.strip().split(" ", 1)
if len(p) == 1:
p, comment = p[0], ""
else:
p, comment = p
if p == "hidden":
result["hidden"] = True
continue
parts = p.split("=", 1)
if len(parts) == 1:
name, value = parts[0], None
else:
name, value = parts
if name in ("rport", "ttl"):
value = int(value)
result[name] = value
return Via(**result) |
Return string into URL object.
URIs are of form 'sip:[email protected]'. | def parseURL(url, host=None, port=None):
"""
Return string into URL object.
URIs are of form 'sip:[email protected]'.
"""
d = {}
if not url.startswith("sip:"):
raise ValueError("unsupported scheme: " + url[:4])
parts = url[4:].split(";")
userdomain, params = parts[0], parts[1:]
udparts = userdomain.split("@", 1)
if len(udparts) == 2:
userpass, hostport = udparts
upparts = userpass.split(":", 1)
if len(upparts) == 1:
d["username"] = upparts[0]
else:
d["username"] = upparts[0]
d["password"] = upparts[1]
else:
hostport = udparts[0]
hpparts = hostport.split(":", 1)
if len(hpparts) == 1:
d["host"] = hpparts[0]
else:
d["host"] = hpparts[0]
d["port"] = int(hpparts[1])
if host != None:
d["host"] = host
if port != None:
d["port"] = port
for p in params:
if p == params[-1] and "?" in p:
d["headers"] = h = {}
p, headers = p.split("?", 1)
for header in headers.split("&"):
k, v = header.split("=")
h[k] = v
nv = p.split("=", 1)
if len(nv) == 1:
d.setdefault("other", []).append(p)
continue
name, value = nv
if name == "user":
d["usertype"] = value
elif name in ("transport", "ttl", "maddr", "method", "tag"):
if name == "ttl":
value = int(value)
d[name] = value
else:
d.setdefault("other", []).append(p)
return URL(**d) |
Clean a URL from a Request line. | def cleanRequestURL(url):
"""
Clean a URL from a Request line.
"""
url.transport = None
url.maddr = None
url.ttl = None
url.headers = {} |
Return (name, uri, params) for From/To/Contact header.
@param clean: remove unnecessary info, usually for From and To headers. | def parseAddress(address, host=None, port=None, clean=0):
"""
Return (name, uri, params) for From/To/Contact header.
@param clean: remove unnecessary info, usually for From and To headers.
"""
address = address.strip()
# Simple 'sip:foo' case
if address.startswith("sip:"):
return "", parseURL(address, host=host, port=port), {}
params = {}
name, url = address.split("<", 1)
name = name.strip()
if name.startswith('"'):
name = name[1:]
if name.endswith('"'):
name = name[:-1]
url, paramstring = url.split(">", 1)
url = parseURL(url, host=host, port=port)
paramstring = paramstring.strip()
if paramstring:
for l in paramstring.split(";"):
if not l:
continue
k, v = l.split("=")
params[k] = v
if clean:
# RFC 2543 6.21
url.ttl = None
url.headers = {}
url.transport = None
url.maddr = None
return name, url, params |
Does the given OpenSSL.SSL.Error represent an end-of-file? | def _representsEOF(exceptionObject: Error) -> bool:
"""
Does the given OpenSSL.SSL.Error represent an end-of-file?
"""
reasonString: str
if isinstance(exceptionObject, SysCallError):
_, reasonString = exceptionObject.args
else:
errorQueue = exceptionObject.args[0]
_, _, reasonString = errorQueue[-1]
return reasonString.casefold().startswith("unexpected eof") |
Return the default reactor.
This is a function so it can be monkey-patched in tests, specifically
L{twisted.web.test.test_agent}. | def _get_default_clock() -> IReactorTime:
"""
Return the default reactor.
This is a function so it can be monkey-patched in tests, specifically
L{twisted.web.test.test_agent}.
"""
from twisted.internet import reactor
return cast(IReactorTime, reactor) |
Convert an error into a different error type.
@param sourceType: The type of exception that should be caught and
converted.
@type sourceType: L{BaseException}
@param targetType: The type of exception to which the original should be
converted.
@type targetType: L{BaseException} | def convertError(
sourceType: Type[BaseException], targetType: Callable[[], BaseException]
) -> Generator[None, None, None]:
"""
Convert an error into a different error type.
@param sourceType: The type of exception that should be caught and
converted.
@type sourceType: L{BaseException}
@param targetType: The type of exception to which the original should be
converted.
@type targetType: L{BaseException}
"""
try:
yield
except sourceType as e:
raise targetType().with_traceback(e.__traceback__) |
Un-parse the already-parsed args and kwargs back into endpoint syntax.
@param args: C{:}-separated arguments
@param kwargs: C{:} and then C{=}-separated keyword arguments
@return: a string equivalent to the original format which this was parsed
as. | def unparseEndpoint(args: Tuple[object, ...], kwargs: Mapping[str, object]) -> str:
"""
Un-parse the already-parsed args and kwargs back into endpoint syntax.
@param args: C{:}-separated arguments
@param kwargs: C{:} and then C{=}-separated keyword arguments
@return: a string equivalent to the original format which this was parsed
as.
"""
description = ":".join(
[quoteStringArgument(str(arg)) for arg in args]
+ sorted(
"{}={}".format(
quoteStringArgument(str(key)), quoteStringArgument(str(value))
)
for key, value in kwargs.items()
)
)
return description |
Wrap an endpoint with PROXY protocol support, so that the transport's
C{getHost} and C{getPeer} methods reflect the attributes of the proxied
connection rather than the underlying connection.
@param wrappedEndpoint: The underlying listening endpoint.
@type wrappedEndpoint: L{IStreamServerEndpoint}
@return: a new listening endpoint that speaks the PROXY protocol.
@rtype: L{IStreamServerEndpoint} | def proxyEndpoint(
wrappedEndpoint: interfaces.IStreamServerEndpoint,
) -> _WrapperServerEndpoint:
"""
Wrap an endpoint with PROXY protocol support, so that the transport's
C{getHost} and C{getPeer} methods reflect the attributes of the proxied
connection rather than the underlying connection.
@param wrappedEndpoint: The underlying listening endpoint.
@type wrappedEndpoint: L{IStreamServerEndpoint}
@return: a new listening endpoint that speaks the PROXY protocol.
@rtype: L{IStreamServerEndpoint}
"""
return _WrapperServerEndpoint(wrappedEndpoint, HAProxyWrappingFactory) |
Construct a version 2 IPv6 header with custom bytes.
@param sig: The protocol signature; defaults to valid L{V2_SIGNATURE}.
@type sig: L{bytes}
@param verCom: Protocol version and command. Defaults to V2 PROXY.
@type verCom: L{bytes}
@param famProto: Address family and protocol. Defaults to AF_INET6/STREAM.
@type famProto: L{bytes}
@param addrLength: Network-endian byte length of payload. Defaults to
description of default addrs/ports.
@type addrLength: L{bytes}
@param addrs: Address payload. Defaults to C{::1} for source and
destination.
@type addrs: L{bytes}
@param ports: Source and destination ports. Defaults to 8080 for source
8888 for destination.
@type ports: L{bytes}
@return: A packet with header, addresses, and ports.
@rtype: L{bytes} | def _makeHeaderIPv6(
sig: bytes = V2_SIGNATURE,
verCom: bytes = b"\x21",
famProto: bytes = b"\x21",
addrLength: bytes = b"\x00\x24",
addrs: bytes = ((b"\x00" * 15) + b"\x01") * 2,
ports: bytes = b"\x1F\x90\x22\xB8",
) -> bytes:
"""
Construct a version 2 IPv6 header with custom bytes.
@param sig: The protocol signature; defaults to valid L{V2_SIGNATURE}.
@type sig: L{bytes}
@param verCom: Protocol version and command. Defaults to V2 PROXY.
@type verCom: L{bytes}
@param famProto: Address family and protocol. Defaults to AF_INET6/STREAM.
@type famProto: L{bytes}
@param addrLength: Network-endian byte length of payload. Defaults to
description of default addrs/ports.
@type addrLength: L{bytes}
@param addrs: Address payload. Defaults to C{::1} for source and
destination.
@type addrs: L{bytes}
@param ports: Source and destination ports. Defaults to 8080 for source
8888 for destination.
@type ports: L{bytes}
@return: A packet with header, addresses, and ports.
@rtype: L{bytes}
"""
return sig + verCom + famProto + addrLength + addrs + ports |
Construct a version 2 IPv4 header with custom bytes.
@param sig: The protocol signature; defaults to valid L{V2_SIGNATURE}.
@type sig: L{bytes}
@param verCom: Protocol version and command. Defaults to V2 PROXY.
@type verCom: L{bytes}
@param famProto: Address family and protocol. Defaults to AF_INET/STREAM.
@type famProto: L{bytes}
@param addrLength: Network-endian byte length of payload. Defaults to
description of default addrs/ports.
@type addrLength: L{bytes}
@param addrs: Address payload. Defaults to 127.0.0.1 for source and
destination.
@type addrs: L{bytes}
@param ports: Source and destination ports. Defaults to 8080 for source
8888 for destination.
@type ports: L{bytes}
@return: A packet with header, addresses, and ports.
@rtype: L{bytes} | def _makeHeaderIPv4(
sig: bytes = V2_SIGNATURE,
verCom: bytes = b"\x21",
famProto: bytes = b"\x11",
addrLength: bytes = b"\x00\x0C",
addrs: bytes = b"\x7F\x00\x00\x01\x7F\x00\x00\x01",
ports: bytes = b"\x1F\x90\x22\xB8",
) -> bytes:
"""
Construct a version 2 IPv4 header with custom bytes.
@param sig: The protocol signature; defaults to valid L{V2_SIGNATURE}.
@type sig: L{bytes}
@param verCom: Protocol version and command. Defaults to V2 PROXY.
@type verCom: L{bytes}
@param famProto: Address family and protocol. Defaults to AF_INET/STREAM.
@type famProto: L{bytes}
@param addrLength: Network-endian byte length of payload. Defaults to
description of default addrs/ports.
@type addrLength: L{bytes}
@param addrs: Address payload. Defaults to 127.0.0.1 for source and
destination.
@type addrs: L{bytes}
@param ports: Source and destination ports. Defaults to 8080 for source
8888 for destination.
@type ports: L{bytes}
@return: A packet with header, addresses, and ports.
@rtype: L{bytes}
"""
return sig + verCom + famProto + addrLength + addrs + ports |
Construct a version 2 IPv4 header with custom bytes.
@param sig: The protocol signature; defaults to valid L{V2_SIGNATURE}.
@type sig: L{bytes}
@param verCom: Protocol version and command. Defaults to V2 PROXY.
@type verCom: L{bytes}
@param famProto: Address family and protocol. Defaults to AF_UNIX/STREAM.
@type famProto: L{bytes}
@param addrLength: Network-endian byte length of payload. Defaults to 108
bytes for 2 null terminated paths.
@type addrLength: L{bytes}
@param addrs: Address payload. Defaults to C{/home/tests/mysockets/sock}
for source and destination paths.
@type addrs: L{bytes}
@return: A packet with header, addresses, and8 ports.
@rtype: L{bytes} | def _makeHeaderUnix(
sig: bytes = V2_SIGNATURE,
verCom: bytes = b"\x21",
famProto: bytes = b"\x31",
addrLength: bytes = b"\x00\xD8",
addrs: bytes = (
b"\x2F\x68\x6F\x6D\x65\x2F\x74\x65\x73\x74\x73\x2F"
b"\x6D\x79\x73\x6F\x63\x6B\x65\x74\x73\x2F\x73\x6F"
b"\x63\x6B" + (b"\x00" * 82)
)
* 2,
) -> bytes:
"""
Construct a version 2 IPv4 header with custom bytes.
@param sig: The protocol signature; defaults to valid L{V2_SIGNATURE}.
@type sig: L{bytes}
@param verCom: Protocol version and command. Defaults to V2 PROXY.
@type verCom: L{bytes}
@param famProto: Address family and protocol. Defaults to AF_UNIX/STREAM.
@type famProto: L{bytes}
@param addrLength: Network-endian byte length of payload. Defaults to 108
bytes for 2 null terminated paths.
@type addrLength: L{bytes}
@param addrs: Address payload. Defaults to C{/home/tests/mysockets/sock}
for source and destination paths.
@type addrs: L{bytes}
@return: A packet with header, addresses, and8 ports.
@rtype: L{bytes}
"""
return sig + verCom + famProto + addrLength + addrs |
Create a protocol hooked up to a TLS transport hooked up to a
StringTransport.
@param serverMethod: The TLS method accepted by the server-side and used by the created protocol. Set to to C{None} to use the default method used by your OpenSSL library. | def buildTLSProtocol(
server=False, transport=None, fakeConnection=None, serverMethod=None
):
"""
Create a protocol hooked up to a TLS transport hooked up to a
StringTransport.
@param serverMethod: The TLS method accepted by the server-side and used by the created protocol. Set to to C{None} to use the default method used by your OpenSSL library.
"""
# We want to accumulate bytes without disconnecting, so set high limit:
clientProtocol = AccumulatingProtocol(999999999999)
clientFactory = ClientFactory()
clientFactory.protocol = lambda: clientProtocol
if fakeConnection:
@implementer(IOpenSSLServerConnectionCreator, IOpenSSLClientConnectionCreator)
class HardCodedConnection:
def clientConnectionForTLS(self, tlsProtocol):
return fakeConnection
serverConnectionForTLS = clientConnectionForTLS
contextFactory = HardCodedConnection()
else:
if server:
contextFactory = ServerTLSContext(method=serverMethod)
else:
contextFactory = ClientTLSContext()
clock = Clock()
wrapperFactory = TLSMemoryBIOFactory(
contextFactory, not server, clientFactory, clock
)
sslProtocol = wrapperFactory.buildProtocol(None)
if transport is None:
transport = StringTransport()
sslProtocol.makeConnection(transport)
return clientProtocol, sslProtocol |
Construct a client and server L{TLSMemoryBIOProtocol} connected by an IO
pump.
@param greetingData: The data which should be written in L{connectionMade}.
@type greetingData: L{bytes}
@return: 3-tuple of client, server, L{twisted.test.iosim.IOPump} | def handshakingClientAndServer(
clientGreetingData=None, clientAbortAfterHandshake=False
):
"""
Construct a client and server L{TLSMemoryBIOProtocol} connected by an IO
pump.
@param greetingData: The data which should be written in L{connectionMade}.
@type greetingData: L{bytes}
@return: 3-tuple of client, server, L{twisted.test.iosim.IOPump}
"""
authCert, serverCert = certificatesForAuthorityAndServer()
clock = Clock()
@implementer(IHandshakeListener)
class Client(AccumulatingProtocol):
handshook = False
peerAfterHandshake = None
def connectionMade(self):
super().connectionMade()
if clientGreetingData is not None:
self.transport.write(clientGreetingData)
def handshakeCompleted(self):
self.handshook = True
self.peerAfterHandshake = self.transport.getPeerCertificate()
if clientAbortAfterHandshake:
self.transport.abortConnection()
def connectionLost(self, reason):
pass
@implementer(IHandshakeListener)
class Server(AccumulatingProtocol):
handshaked = False
def handshakeCompleted(self):
self.handshaked = True
def connectionLost(self, reason):
pass
clientF = TLSMemoryBIOFactory(
optionsForClientTLS("example.com", trustRoot=authCert),
isClient=True,
wrappedFactory=ClientFactory.forProtocol(lambda: Client(999999)),
clock=clock,
)
serverF = TLSMemoryBIOFactory(
serverCert.options(),
isClient=False,
wrappedFactory=ServerFactory.forProtocol(lambda: Server(999999)),
clock=clock,
)
client, server, pump = connectedServerAndClient(
lambda: serverF.buildProtocol(None),
lambda: clientF.buildProtocol(None),
greet=False,
clock=clock,
)
return client, server, pump |
Return an iterable of the items of C{d}.
@type d: L{dict}
@rtype: iterable | def iteritems(d):
"""
Return an iterable of the items of C{d}.
@type d: L{dict}
@rtype: iterable
"""
return d.items() |
Return an iterable of the values of C{d}.
@type d: L{dict}
@rtype: iterable | def itervalues(d):
"""
Return an iterable of the values of C{d}.
@type d: L{dict}
@rtype: iterable
"""
return d.values() |
Return a list of the items of C{d}.
@type d: L{dict}
@rtype: L{list} | def items(d):
"""
Return a list of the items of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
return list(d.items()) |
In Python 3, L{inspect.currentframe} does not take a stack-level argument.
Restore that functionality from Python 2 so we don't have to re-implement
the C{f_back}-walking loop in places where it's called.
@param n: The number of stack levels above the caller to walk.
@return: a frame, n levels up the stack from the caller. | def currentframe(n: int = 0) -> FrameType:
"""
In Python 3, L{inspect.currentframe} does not take a stack-level argument.
Restore that functionality from Python 2 so we don't have to re-implement
the C{f_back}-walking loop in places where it's called.
@param n: The number of stack levels above the caller to walk.
@return: a frame, n levels up the stack from the caller.
"""
f = inspect.currentframe()
for x in range(n + 1):
assert f is not None
f = f.f_back
assert f is not None
return f |
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function. | def execfile(filename, globals, locals=None):
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
with open(filename, "rb") as fin:
source = fin.read()
code = compile(source, filename, "exec")
exec(code, globals, locals) |
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}. | def cmp(a: object, b: object) -> int:
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b: # type: ignore[operator]
return -1
elif a == b:
return 0
else:
return 1 |
Class decorator that ensures support for the special C{__cmp__} method.
C{__eq__}, C{__lt__}, etc. methods are added to the class, relying on
C{__cmp__} to implement their comparisons. | def comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
C{__eq__}, C{__lt__}, etc. methods are added to the class, relying on
C{__cmp__} to implement their comparisons.
"""
def __eq__(self: Any, other: object) -> bool:
c = cast(bool, self.__cmp__(other))
if c is NotImplemented:
return c
return c == 0
def __ne__(self: Any, other: object) -> bool:
c = cast(bool, self.__cmp__(other))
if c is NotImplemented:
return c
return c != 0
def __lt__(self: Any, other: object) -> bool:
c = cast(bool, self.__cmp__(other))
if c is NotImplemented:
return c
return c < 0
def __le__(self: Any, other: object) -> bool:
c = cast(bool, self.__cmp__(other))
if c is NotImplemented:
return c
return c <= 0
def __gt__(self: Any, other: object) -> bool:
c = cast(bool, self.__cmp__(other))
if c is NotImplemented:
return c
return c > 0
def __ge__(self: Any, other: object) -> bool:
c = cast(bool, self.__cmp__(other))
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass |
Determine the type which will be returned from the given file object's
read() and accepted by its write() method as an argument.
In other words, determine whether the given file is 'opened in text mode'.
@param fileIshObject: Any object, but ideally one which resembles a file.
@type fileIshObject: L{object}
@param default: A default value to return when the type of C{fileIshObject}
cannot be determined.
@type default: L{type}
@return: There are 3 possible return values:
1. L{str}, if the file is unambiguously opened in text mode.
2. L{bytes}, if the file is unambiguously opened in binary mode.
3. The C{default} parameter, if the given type is not understood.
@rtype: L{type} | def ioType(fileIshObject, default=str):
"""
Determine the type which will be returned from the given file object's
read() and accepted by its write() method as an argument.
In other words, determine whether the given file is 'opened in text mode'.
@param fileIshObject: Any object, but ideally one which resembles a file.
@type fileIshObject: L{object}
@param default: A default value to return when the type of C{fileIshObject}
cannot be determined.
@type default: L{type}
@return: There are 3 possible return values:
1. L{str}, if the file is unambiguously opened in text mode.
2. L{bytes}, if the file is unambiguously opened in binary mode.
3. The C{default} parameter, if the given type is not understood.
@rtype: L{type}
"""
if isinstance(fileIshObject, TextIOBase):
# If it's for text I/O, then it's for text I/O.
return str
if isinstance(fileIshObject, IOBase):
# If it's for I/O but it's _not_ for text I/O, it's for bytes I/O.
return bytes
encoding = getattr(fileIshObject, "encoding", None)
import codecs
if isinstance(fileIshObject, (codecs.StreamReader, codecs.StreamWriter)):
# On StreamReaderWriter, the 'encoding' attribute has special meaning;
# it is unambiguously text.
if encoding:
return str
else:
return bytes
return default |
Convert C{bytes} or C{str} to C{str} type, using ASCII encoding if
conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{str}. | def nativeString(s: AnyStr) -> str:
"""
Convert C{bytes} or C{str} to C{str} type, using ASCII encoding if
conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{str}.
"""
if not isinstance(s, (bytes, str)):
raise TypeError("%r is neither bytes nor str" % s)
if isinstance(s, bytes):
return s.decode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.encode("ascii")
return s |
Some functions, such as C{os.path.join}, operate on string arguments which
may be bytes or text, and wish to return a value of the same type. In
those cases you may wish to have a string constant (in the case of
C{os.path.join}, that constant would be C{os.path.sep}) involved in the
parsing or processing, that must be of a matching type in order to use
string operations on it. L{_matchingString} will take a constant string
(either L{bytes} or L{str}) and convert it to the same type as the
input string. C{constantString} should contain only characters from ASCII;
to ensure this, it will be encoded or decoded regardless.
@param constantString: A string literal used in processing.
@type constantString: L{str} or L{bytes}
@param inputString: A byte string or text string provided by the user.
@type inputString: L{str} or L{bytes}
@return: C{constantString} converted into the same type as C{inputString}
@rtype: the type of C{inputString} | def _matchingString(constantString, inputString):
"""
Some functions, such as C{os.path.join}, operate on string arguments which
may be bytes or text, and wish to return a value of the same type. In
those cases you may wish to have a string constant (in the case of
C{os.path.join}, that constant would be C{os.path.sep}) involved in the
parsing or processing, that must be of a matching type in order to use
string operations on it. L{_matchingString} will take a constant string
(either L{bytes} or L{str}) and convert it to the same type as the
input string. C{constantString} should contain only characters from ASCII;
to ensure this, it will be encoded or decoded regardless.
@param constantString: A string literal used in processing.
@type constantString: L{str} or L{bytes}
@param inputString: A byte string or text string provided by the user.
@type inputString: L{str} or L{bytes}
@return: C{constantString} converted into the same type as C{inputString}
@rtype: the type of C{inputString}
"""
if isinstance(constantString, bytes):
otherType = constantString.decode("ascii")
else:
otherType = constantString.encode("ascii")
if type(constantString) == type(inputString):
return constantString
else:
return otherType |
Re-raise an exception, with an optional traceback.
Re-raised exceptions will be mutated, with their C{__traceback__} attribute
being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or L{None} indicating a new
traceback. | def reraise(exception, traceback):
"""
Re-raise an exception, with an optional traceback.
Re-raised exceptions will be mutated, with their C{__traceback__} attribute
being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or L{None} indicating a new
traceback.
"""
raise exception.with_traceback(traceback) |
Return an iterable wrapper for a C{bytes} object that provides the behavior
of iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather
than integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped. | def iterbytes(originalBytes):
"""
Return an iterable wrapper for a C{bytes} object that provides the behavior
of iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather
than integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped.
"""
for i in range(len(originalBytes)):
yield originalBytes[i : i + 1] |
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes} | def intToBytes(i: int) -> bytes:
"""
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes}
"""
return b"%d" % (i,) |
Return a copy of the given bytes-like object.
If an offset is given, the copy starts at that offset. If a size is
given, the copy will only be of that length.
@param object: C{bytes} to be copied.
@param offset: C{int}, starting index of copy.
@param size: Optional, if an C{int} is given limit the length of copy
to this size. | def lazyByteSlice(object, offset=0, size=None):
"""
Return a copy of the given bytes-like object.
If an offset is given, the copy starts at that offset. If a size is
given, the copy will only be of that length.
@param object: C{bytes} to be copied.
@param offset: C{int}, starting index of copy.
@param size: Optional, if an C{int} is given limit the length of copy
to this size.
"""
view = memoryview(object)
if size is None:
return view[offset:]
else:
return view[offset : (offset + size)] |
Convert a string to L{bytes} using ASCII encoding.
This is useful for sending text-like bytes that are constructed using
string interpolation. For example::
networkString("Hello %d" % (n,))
@param s: A string to convert to bytes.
@type s: L{str}
@raise UnicodeError: The input string is not ASCII encodable.
@raise TypeError: The input is not L{str}.
@rtype: L{bytes} | def networkString(s: str) -> bytes:
"""
Convert a string to L{bytes} using ASCII encoding.
This is useful for sending text-like bytes that are constructed using
string interpolation. For example::
networkString("Hello %d" % (n,))
@param s: A string to convert to bytes.
@type s: L{str}
@raise UnicodeError: The input string is not ASCII encodable.
@raise TypeError: The input is not L{str}.
@rtype: L{bytes}
"""
if not isinstance(s, str):
raise TypeError("Can only convert strings to bytes")
return s.encode("ascii") |
Return a L{dict} of L{os.environ} where all text-strings are encoded into
L{bytes}.
This function is POSIX only; environment variables are always text strings
on Windows. | def bytesEnviron():
"""
Return a L{dict} of L{os.environ} where all text-strings are encoded into
L{bytes}.
This function is POSIX only; environment variables are always text strings
on Windows.
"""
encodekey = os.environ.encodekey
encodevalue = os.environ.encodevalue
return {encodekey(x): encodevalue(y) for x, y in os.environ.items()} |
Construct a bound method.
@param cls: The class that the method should be bound to.
@type cls: L{type}
@param name: The name of the method.
@type name: native L{str}
@param self: The object that the method is bound to.
@type self: any object
@return: a bound method
@rtype: L{_MethodType} | def _constructMethod(cls, name, self):
"""
Construct a bound method.
@param cls: The class that the method should be bound to.
@type cls: L{type}
@param name: The name of the method.
@type name: native L{str}
@param self: The object that the method is bound to.
@type self: any object
@return: a bound method
@rtype: L{_MethodType}
"""
func = cls.__dict__[name]
return _MethodType(func, self) |
Work around U{https://foss.heptapod.net/pypy/pypy/-/issues/3051}
by replacing C{socket.fromfd} with a more conservative version. | def _pypy3BlockingHack():
"""
Work around U{https://foss.heptapod.net/pypy/pypy/-/issues/3051}
by replacing C{socket.fromfd} with a more conservative version.
"""
try:
from fcntl import F_GETFL, F_SETFL, fcntl
except ImportError:
return
if not _PYPY:
return
def fromFDWithoutModifyingFlags(fd, family, type, proto=None):
passproto = [proto] * (proto is not None)
flags = fcntl(fd, F_GETFL)
try:
return realFromFD(fd, family, type, *passproto)
finally:
fcntl(fd, F_SETFL, flags)
realFromFD = socket.fromfd
if realFromFD.__name__ == fromFDWithoutModifyingFlags.__name__:
return
socket.fromfd = fromFDWithoutModifyingFlags |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.