code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def full(self):
"""Return True if there are maxsize items in the queue.
Note: if the Queue was initialized with maxsize=0 (the default),
then full() is never True.
"""
if self._maxsize <= 0:
return False
else:
return self.qsize() >= self._maxsize | Return True if there are maxsize items in the queue.
Note: if the Queue was initialized with maxsize=0 (the default),
then full() is never True. | full | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | MIT |
async def put(self, item):
"""Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item.
"""
while self.full():
putter = self._loop.create_future()
self._putters.append(putter)
try:
await putter
except:
putter.cancel() # Just in case putter is not done yet.
try:
# Clean self._putters from canceled putters.
self._putters.remove(putter)
except ValueError:
# The putter could be removed from self._putters by a
# previous get_nowait call.
pass
if not self.full() and not putter.cancelled():
# We were woken up by get_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(self._putters)
raise
return self.put_nowait(item) | Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item. | put | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | MIT |
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise QueueFull.
"""
if self.full():
raise QueueFull
self._put(item)
self._unfinished_tasks += 1
self._finished.clear()
self._wakeup_next(self._getters) | Put an item into the queue without blocking.
If no free slot is immediately available, raise QueueFull. | put_nowait | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | MIT |
async def get(self):
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
"""
while self.empty():
getter = self._loop.create_future()
self._getters.append(getter)
try:
await getter
except:
getter.cancel() # Just in case getter is not done yet.
try:
# Clean self._getters from canceled getters.
self._getters.remove(getter)
except ValueError:
# The getter could be removed from self._getters by a
# previous put_nowait call.
pass
if not self.empty() and not getter.cancelled():
# We were woken up by put_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(self._getters)
raise
return self.get_nowait() | Remove and return an item from the queue.
If queue is empty, wait until an item is available. | get | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | MIT |
def get_nowait(self):
"""Remove and return an item from the queue.
Return an item if one is immediately available, else raise QueueEmpty.
"""
if self.empty():
raise QueueEmpty
item = self._get()
self._wakeup_next(self._putters)
return item | Remove and return an item from the queue.
Return an item if one is immediately available, else raise QueueEmpty. | get_nowait | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | MIT |
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set() | Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue. | task_done | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | MIT |
async def join(self):
"""Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer calls task_done() to
indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
if self._unfinished_tasks > 0:
await self._finished.wait() | Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer calls task_done() to
indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks. | join | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/queues.py | MIT |
async def open_connection(host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""A wrapper for create_connection() returning a (reader, writer) pair.
The reader returned is a StreamReader instance; the writer is a
StreamWriter instance.
The arguments are all the usual arguments to create_connection()
except protocol_factory; most common are positional host and port,
with various optional keyword arguments following.
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
(If you want to customize the StreamReader and/or
StreamReaderProtocol classes, just copy the code -- there's
really nothing special here except some convenience.)
"""
if loop is None:
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = await loop.create_connection(
lambda: protocol, host, port, **kwds)
writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer | A wrapper for create_connection() returning a (reader, writer) pair.
The reader returned is a StreamReader instance; the writer is a
StreamWriter instance.
The arguments are all the usual arguments to create_connection()
except protocol_factory; most common are positional host and port,
with various optional keyword arguments following.
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
(If you want to customize the StreamReader and/or
StreamReaderProtocol classes, just copy the code -- there's
really nothing special here except some convenience.) | open_connection | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | MIT |
async def start_server(client_connected_cb, host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Start a socket server, call back for each client connected.
The first parameter, `client_connected_cb`, takes two parameters:
client_reader, client_writer. client_reader is a StreamReader
object, while client_writer is a StreamWriter object. This
parameter can either be a plain callback function or a coroutine;
if it is a coroutine, it will be automatically converted into a
Task.
The rest of the arguments are all the usual arguments to
loop.create_server() except protocol_factory; most common are
positional host and port, with various optional keyword arguments
following. The return value is the same as loop.create_server().
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
The return value is the same as loop.create_server(), i.e. a
Server object which can be used to stop the service.
"""
if loop is None:
loop = events.get_event_loop()
def factory():
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, client_connected_cb,
loop=loop)
return protocol
return await loop.create_server(factory, host, port, **kwds) | Start a socket server, call back for each client connected.
The first parameter, `client_connected_cb`, takes two parameters:
client_reader, client_writer. client_reader is a StreamReader
object, while client_writer is a StreamWriter object. This
parameter can either be a plain callback function or a coroutine;
if it is a coroutine, it will be automatically converted into a
Task.
The rest of the arguments are all the usual arguments to
loop.create_server() except protocol_factory; most common are
positional host and port, with various optional keyword arguments
following. The return value is the same as loop.create_server().
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
The return value is the same as loop.create_server(), i.e. a
Server object which can be used to stop the service. | start_server | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | MIT |
async def open_unix_connection(path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = await loop.create_unix_connection(
lambda: protocol, path, **kwds)
writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer | Similar to `open_connection` but works with UNIX Domain Sockets. | open_unix_connection | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | MIT |
async def start_unix_server(client_connected_cb, path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `start_server` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
def factory():
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, client_connected_cb,
loop=loop)
return protocol
return await loop.create_unix_server(factory, path, **kwds) | Similar to `start_server` but works with UNIX Domain Sockets. | start_unix_server | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | MIT |
async def drain(self):
"""Flush the write buffer.
The intended use is to write
w.write(data)
await w.drain()
"""
if self._reader is not None:
exc = self._reader.exception()
if exc is not None:
raise exc
if self._transport.is_closing():
# Yield to the event loop so connection_lost() may be
# called. Without this, _drain_helper() would return
# immediately, and code that calls
# write(...); await drain()
# in a loop would never call connection_lost(), so it
# would not see an error when the socket is closed.
await sleep(0, loop=self._loop)
await self._protocol._drain_helper() | Flush the write buffer.
The intended use is to write
w.write(data)
await w.drain() | drain | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | MIT |
def _wakeup_waiter(self):
"""Wakeup read*() functions waiting for data or EOF."""
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(None) | Wakeup read*() functions waiting for data or EOF. | _wakeup_waiter | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | MIT |
def at_eof(self):
"""Return True if the buffer is empty and 'feed_eof' was called."""
return self._eof and not self._buffer | Return True if the buffer is empty and 'feed_eof' was called. | at_eof | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | MIT |
async def _wait_for_data(self, func_name):
"""Wait until feed_data() or feed_eof() is called.
If stream was paused, automatically resume it.
"""
# StreamReader uses a future to link the protocol feed_data() method
# to a read coroutine. Running two read coroutines at the same time
# would have an unexpected behaviour. It would not possible to know
# which coroutine would get the next data.
if self._waiter is not None:
raise RuntimeError(
f'{func_name}() called while another coroutine is '
f'already waiting for incoming data')
assert not self._eof, '_wait_for_data after EOF'
# Waiting for data while paused will make deadlock, so prevent it.
# This is essential for readexactly(n) for case when n > self._limit.
if self._paused:
self._paused = False
self._transport.resume_reading()
self._waiter = self._loop.create_future()
try:
await self._waiter
finally:
self._waiter = None | Wait until feed_data() or feed_eof() is called.
If stream was paused, automatically resume it. | _wait_for_data | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | MIT |
async def readline(self):
"""Read chunk of data from the stream until newline (b'\n') is found.
On success, return chunk that ends with newline. If only partial
line can be read due to EOF, return incomplete line without
terminating newline. When EOF was reached while no bytes read, empty
bytes object is returned.
If limit is reached, ValueError will be raised. In that case, if
newline was found, complete line including newline will be removed
from internal buffer. Else, internal buffer will be cleared. Limit is
compared against part of the line without newline.
If stream was paused, this function will automatically resume it if
needed.
"""
sep = b'\n'
seplen = len(sep)
try:
line = await self.readuntil(sep)
except IncompleteReadError as e:
return e.partial
except LimitOverrunError as e:
if self._buffer.startswith(sep, e.consumed):
del self._buffer[:e.consumed + seplen]
else:
self._buffer.clear()
self._maybe_resume_transport()
raise ValueError(e.args[0])
return line | Read chunk of data from the stream until newline (b'\n') is found.
On success, return chunk that ends with newline. If only partial
line can be read due to EOF, return incomplete line without
terminating newline. When EOF was reached while no bytes read, empty
bytes object is returned.
If limit is reached, ValueError will be raised. In that case, if
newline was found, complete line including newline will be removed
from internal buffer. Else, internal buffer will be cleared. Limit is
compared against part of the line without newline.
If stream was paused, this function will automatically resume it if
needed. | readline | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | MIT |
async def readuntil(self, separator=b'\n'):
"""Read data from the stream until ``separator`` is found.
On success, the data and separator will be removed from the
internal buffer (consumed). Returned data will include the
separator at the end.
Configured stream limit is used to check result. Limit sets the
maximal length of data that can be returned, not counting the
separator.
If an EOF occurs and the complete separator is still not found,
an IncompleteReadError exception will be raised, and the internal
buffer will be reset. The IncompleteReadError.partial attribute
may contain the separator partially.
If the data cannot be read because of over limit, a
LimitOverrunError exception will be raised, and the data
will be left in the internal buffer, so it can be read again.
"""
seplen = len(separator)
if seplen == 0:
raise ValueError('Separator should be at least one-byte string')
if self._exception is not None:
raise self._exception
# Consume whole buffer except last bytes, which length is
# one less than seplen. Let's check corner cases with
# separator='SEPARATOR':
# * we have received almost complete separator (without last
# byte). i.e buffer='some textSEPARATO'. In this case we
# can safely consume len(separator) - 1 bytes.
# * last byte of buffer is first byte of separator, i.e.
# buffer='abcdefghijklmnopqrS'. We may safely consume
# everything except that last byte, but this require to
# analyze bytes of buffer that match partial separator.
# This is slow and/or require FSM. For this case our
# implementation is not optimal, since require rescanning
# of data that is known to not belong to separator. In
# real world, separator will not be so long to notice
# performance problems. Even when reading MIME-encoded
# messages :)
# `offset` is the number of bytes from the beginning of the buffer
# where there is no occurrence of `separator`.
offset = 0
# Loop until we find `separator` in the buffer, exceed the buffer size,
# or an EOF has happened.
while True:
buflen = len(self._buffer)
# Check if we now have enough data in the buffer for `separator` to
# fit.
if buflen - offset >= seplen:
isep = self._buffer.find(separator, offset)
if isep != -1:
# `separator` is in the buffer. `isep` will be used later
# to retrieve the data.
break
# see upper comment for explanation.
offset = buflen + 1 - seplen
if offset > self._limit:
raise LimitOverrunError(
'Separator is not found, and chunk exceed the limit',
offset)
# Complete message (with full separator) may be present in buffer
# even when EOF flag is set. This may happen when the last chunk
# adds data which makes separator be found. That's why we check for
# EOF *ater* inspecting the buffer.
if self._eof:
chunk = bytes(self._buffer)
self._buffer.clear()
raise IncompleteReadError(chunk, None)
# _wait_for_data() will resume reading if stream was paused.
await self._wait_for_data('readuntil')
if isep > self._limit:
raise LimitOverrunError(
'Separator is found, but chunk is longer than limit', isep)
chunk = self._buffer[:isep + seplen]
del self._buffer[:isep + seplen]
self._maybe_resume_transport()
return bytes(chunk) | Read data from the stream until ``separator`` is found.
On success, the data and separator will be removed from the
internal buffer (consumed). Returned data will include the
separator at the end.
Configured stream limit is used to check result. Limit sets the
maximal length of data that can be returned, not counting the
separator.
If an EOF occurs and the complete separator is still not found,
an IncompleteReadError exception will be raised, and the internal
buffer will be reset. The IncompleteReadError.partial attribute
may contain the separator partially.
If the data cannot be read because of over limit, a
LimitOverrunError exception will be raised, and the data
will be left in the internal buffer, so it can be read again. | readuntil | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | MIT |
async def read(self, n=-1):
"""Read up to `n` bytes from the stream.
If n is not provided, or set to -1, read until EOF and return all read
bytes. If the EOF was received and the internal buffer is empty, return
an empty bytes object.
If n is zero, return empty bytes object immediately.
If n is positive, this function try to read `n` bytes, and may return
less or equal bytes than requested, but at least one byte. If EOF was
received before any byte is read, this function returns empty byte
object.
Returned value is not limited with limit, configured at stream
creation.
If stream was paused, this function will automatically resume it if
needed.
"""
if self._exception is not None:
raise self._exception
if n == 0:
return b''
if n < 0:
# This used to just loop creating a new waiter hoping to
# collect everything in self._buffer, but that would
# deadlock if the subprocess sends more than self.limit
# bytes. So just call self.read(self._limit) until EOF.
blocks = []
while True:
block = await self.read(self._limit)
if not block:
break
blocks.append(block)
return b''.join(blocks)
if not self._buffer and not self._eof:
await self._wait_for_data('read')
# This will work right even if buffer is less than n bytes
data = bytes(self._buffer[:n])
del self._buffer[:n]
self._maybe_resume_transport()
return data | Read up to `n` bytes from the stream.
If n is not provided, or set to -1, read until EOF and return all read
bytes. If the EOF was received and the internal buffer is empty, return
an empty bytes object.
If n is zero, return empty bytes object immediately.
If n is positive, this function try to read `n` bytes, and may return
less or equal bytes than requested, but at least one byte. If EOF was
received before any byte is read, this function returns empty byte
object.
Returned value is not limited with limit, configured at stream
creation.
If stream was paused, this function will automatically resume it if
needed. | read | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | MIT |
async def readexactly(self, n):
"""Read exactly `n` bytes.
Raise an IncompleteReadError if EOF is reached before `n` bytes can be
read. The IncompleteReadError.partial attribute of the exception will
contain the partial read bytes.
if n is zero, return empty bytes object.
Returned value is not limited with limit, configured at stream
creation.
If stream was paused, this function will automatically resume it if
needed.
"""
if n < 0:
raise ValueError('readexactly size can not be less than zero')
if self._exception is not None:
raise self._exception
if n == 0:
return b''
while len(self._buffer) < n:
if self._eof:
incomplete = bytes(self._buffer)
self._buffer.clear()
raise IncompleteReadError(incomplete, n)
await self._wait_for_data('readexactly')
if len(self._buffer) == n:
data = bytes(self._buffer)
self._buffer.clear()
else:
data = bytes(self._buffer[:n])
del self._buffer[:n]
self._maybe_resume_transport()
return data | Read exactly `n` bytes.
Raise an IncompleteReadError if EOF is reached before `n` bytes can be
read. The IncompleteReadError.partial attribute of the exception will
contain the partial read bytes.
if n is zero, return empty bytes object.
Returned value is not limited with limit, configured at stream
creation.
If stream was paused, this function will automatically resume it if
needed. | readexactly | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/streams.py | MIT |
def current_task(loop=None):
"""Return a currently executed task."""
if loop is None:
loop = events.get_running_loop()
return _current_tasks.get(loop) | Return a currently executed task. | current_task | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def all_tasks(loop=None):
"""Return a set of all tasks for the loop."""
if loop is None:
loop = events.get_running_loop()
# Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another
# thread while we do so. Therefore we cast it to list prior to filtering. The list
# cast itself requires iteration, so we repeat it several times ignoring
# RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for
# details.
i = 0
while True:
try:
tasks = list(_all_tasks)
except RuntimeError:
i += 1
if i >= 1000:
raise
else:
break
return {t for t in tasks
if futures._get_loop(t) is loop and not t.done()} | Return a set of all tasks for the loop. | all_tasks | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def current_task(cls, loop=None):
"""Return the currently running task in an event loop or None.
By default the current task for the current event loop is returned.
None is returned when called not in the context of a Task.
"""
warnings.warn("Task.current_task() is deprecated, "
"use asyncio.current_task() instead",
PendingDeprecationWarning,
stacklevel=2)
if loop is None:
loop = events.get_event_loop()
return current_task(loop) | Return the currently running task in an event loop or None.
By default the current task for the current event loop is returned.
None is returned when called not in the context of a Task. | current_task | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def all_tasks(cls, loop=None):
"""Return a set of all tasks for an event loop.
By default all tasks for the current event loop are returned.
"""
warnings.warn("Task.all_tasks() is deprecated, "
"use asyncio.all_tasks() instead",
PendingDeprecationWarning,
stacklevel=2)
return _all_tasks_compat(loop) | Return a set of all tasks for an event loop.
By default all tasks for the current event loop are returned. | all_tasks | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def get_stack(self, *, limit=None):
"""Return the list of stack frames for this task's coroutine.
If the coroutine is not done, this returns the stack where it is
suspended. If the coroutine has completed successfully or was
cancelled, this returns an empty list. If the coroutine was
terminated by an exception, this returns the list of traceback
frames.
The frames are always ordered from oldest to newest.
The optional limit gives the maximum number of frames to
return; by default all available frames are returned. Its
meaning differs depending on whether a stack or a traceback is
returned: the newest frames of a stack are returned, but the
oldest frames of a traceback are returned. (This matches the
behavior of the traceback module.)
For reasons beyond our control, only one stack frame is
returned for a suspended coroutine.
"""
return base_tasks._task_get_stack(self, limit) | Return the list of stack frames for this task's coroutine.
If the coroutine is not done, this returns the stack where it is
suspended. If the coroutine has completed successfully or was
cancelled, this returns an empty list. If the coroutine was
terminated by an exception, this returns the list of traceback
frames.
The frames are always ordered from oldest to newest.
The optional limit gives the maximum number of frames to
return; by default all available frames are returned. Its
meaning differs depending on whether a stack or a traceback is
returned: the newest frames of a stack are returned, but the
oldest frames of a traceback are returned. (This matches the
behavior of the traceback module.)
For reasons beyond our control, only one stack frame is
returned for a suspended coroutine. | get_stack | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def print_stack(self, *, limit=None, file=None):
"""Print the stack or traceback for this task's coroutine.
This produces output similar to that of the traceback module,
for the frames retrieved by get_stack(). The limit argument
is passed to get_stack(). The file argument is an I/O stream
to which the output is written; by default output is written
to sys.stderr.
"""
return base_tasks._task_print_stack(self, limit, file) | Print the stack or traceback for this task's coroutine.
This produces output similar to that of the traceback module,
for the frames retrieved by get_stack(). The limit argument
is passed to get_stack(). The file argument is an I/O stream
to which the output is written; by default output is written
to sys.stderr. | print_stack | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def cancel(self):
"""Request that this task cancel itself.
This arranges for a CancelledError to be thrown into the
wrapped coroutine on the next cycle through the event loop.
The coroutine then has a chance to clean up or even deny
the request using try/except/finally.
Unlike Future.cancel, this does not guarantee that the
task will be cancelled: the exception might be caught and
acted upon, delaying cancellation of the task or preventing
cancellation completely. The task may also return a value or
raise a different exception.
Immediately after this method is called, Task.cancelled() will
not return True (unless the task was already cancelled). A
task will be marked as cancelled when the wrapped coroutine
terminates with a CancelledError exception (even if cancel()
was not called).
"""
self._log_traceback = False
if self.done():
return False
if self._fut_waiter is not None:
if self._fut_waiter.cancel():
# Leave self._fut_waiter; it may be a Task that
# catches and ignores the cancellation so we may have
# to cancel it again later.
return True
# It must be the case that self.__step is already scheduled.
self._must_cancel = True
return True | Request that this task cancel itself.
This arranges for a CancelledError to be thrown into the
wrapped coroutine on the next cycle through the event loop.
The coroutine then has a chance to clean up or even deny
the request using try/except/finally.
Unlike Future.cancel, this does not guarantee that the
task will be cancelled: the exception might be caught and
acted upon, delaying cancellation of the task or preventing
cancellation completely. The task may also return a value or
raise a different exception.
Immediately after this method is called, Task.cancelled() will
not return True (unless the task was already cancelled). A
task will be marked as cancelled when the wrapped coroutine
terminates with a CancelledError exception (even if cancel()
was not called). | cancel | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def create_task(coro):
"""Schedule the execution of a coroutine object in a spawn task.
Return a Task object.
"""
loop = events.get_running_loop()
return loop.create_task(coro) | Schedule the execution of a coroutine object in a spawn task.
Return a Task object. | create_task | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
async def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the Futures and coroutines given by fs to complete.
The sequence futures must not be empty.
Coroutines will be wrapped in Tasks.
Returns two sets of Future: (done, pending).
Usage:
done, pending = await asyncio.wait(fs)
Note: This does not raise TimeoutError! Futures that aren't done
when the timeout occurs are returned in the second set.
"""
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
raise TypeError(f"expect a list of futures, not {type(fs).__name__}")
if not fs:
raise ValueError('Set of coroutines/Futures is empty.')
if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
raise ValueError(f'Invalid return_when value: {return_when}')
if loop is None:
loop = events.get_event_loop()
fs = {ensure_future(f, loop=loop) for f in set(fs)}
return await _wait(fs, timeout, return_when, loop) | Wait for the Futures and coroutines given by fs to complete.
The sequence futures must not be empty.
Coroutines will be wrapped in Tasks.
Returns two sets of Future: (done, pending).
Usage:
done, pending = await asyncio.wait(fs)
Note: This does not raise TimeoutError! Futures that aren't done
when the timeout occurs are returned in the second set. | wait | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
async def wait_for(fut, timeout, *, loop=None):
"""Wait for the single Future or coroutine to complete, with timeout.
Coroutine will be wrapped in Task.
Returns result of the Future or coroutine. When a timeout occurs,
it cancels the task and raises TimeoutError. To avoid the task
cancellation, wrap it in shield().
If the wait is cancelled, the task is also cancelled.
This function is a coroutine.
"""
if loop is None:
loop = events.get_event_loop()
if timeout is None:
return await fut
if timeout <= 0:
fut = ensure_future(fut, loop=loop)
if fut.done():
return fut.result()
fut.cancel()
raise futures.TimeoutError()
waiter = loop.create_future()
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
cb = functools.partial(_release_waiter, waiter)
fut = ensure_future(fut, loop=loop)
fut.add_done_callback(cb)
try:
# wait until the future completes or the timeout
try:
await waiter
except futures.CancelledError:
fut.remove_done_callback(cb)
fut.cancel()
raise
if fut.done():
return fut.result()
else:
fut.remove_done_callback(cb)
# We must ensure that the task is not running
# after wait_for() returns.
# See https://bugs.python.org/issue32751
await _cancel_and_wait(fut, loop=loop)
raise futures.TimeoutError()
finally:
timeout_handle.cancel() | Wait for the single Future or coroutine to complete, with timeout.
Coroutine will be wrapped in Task.
Returns result of the Future or coroutine. When a timeout occurs,
it cancels the task and raises TimeoutError. To avoid the task
cancellation, wrap it in shield().
If the wait is cancelled, the task is also cancelled.
This function is a coroutine. | wait_for | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
async def _wait(fs, timeout, return_when, loop):
"""Internal helper for wait().
The fs argument must be a collection of Futures.
"""
assert fs, 'Set of Futures is empty.'
waiter = loop.create_future()
timeout_handle = None
if timeout is not None:
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
counter = len(fs)
def _on_completion(f):
nonlocal counter
counter -= 1
if (counter <= 0 or
return_when == FIRST_COMPLETED or
return_when == FIRST_EXCEPTION and (not f.cancelled() and
f.exception() is not None)):
if timeout_handle is not None:
timeout_handle.cancel()
if not waiter.done():
waiter.set_result(None)
for f in fs:
f.add_done_callback(_on_completion)
try:
await waiter
finally:
if timeout_handle is not None:
timeout_handle.cancel()
for f in fs:
f.remove_done_callback(_on_completion)
done, pending = set(), set()
for f in fs:
if f.done():
done.add(f)
else:
pending.add(f)
return done, pending | Internal helper for wait().
The fs argument must be a collection of Futures. | _wait | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
async def _cancel_and_wait(fut, loop):
"""Cancel the *fut* future or task and wait until it completes."""
waiter = loop.create_future()
cb = functools.partial(_release_waiter, waiter)
fut.add_done_callback(cb)
try:
fut.cancel()
# We cannot wait on *fut* directly to make
# sure _cancel_and_wait itself is reliably cancellable.
await waiter
finally:
fut.remove_done_callback(cb) | Cancel the *fut* future or task and wait until it completes. | _cancel_and_wait | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def as_completed(fs, *, loop=None, timeout=None):
"""Return an iterator whose values are coroutines.
When waiting for the yielded coroutines you'll get the results (or
exceptions!) of the original Futures (or coroutines), in the order
in which and as soon as they complete.
This differs from PEP 3148; the proper way to use this is:
for f in as_completed(fs):
result = await f # The 'await' may raise.
# Use result.
If a timeout is specified, the 'await' will raise
TimeoutError when the timeout occurs before all Futures are done.
Note: The futures 'f' are not necessarily members of fs.
"""
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
raise TypeError(f"expect a list of futures, not {type(fs).__name__}")
loop = loop if loop is not None else events.get_event_loop()
todo = {ensure_future(f, loop=loop) for f in set(fs)}
from .queues import Queue # Import here to avoid circular import problem.
done = Queue(loop=loop)
timeout_handle = None
def _on_timeout():
for f in todo:
f.remove_done_callback(_on_completion)
done.put_nowait(None) # Queue a dummy value for _wait_for_one().
todo.clear() # Can't do todo.remove(f) in the loop.
def _on_completion(f):
if not todo:
return # _on_timeout() was here first.
todo.remove(f)
done.put_nowait(f)
if not todo and timeout_handle is not None:
timeout_handle.cancel()
async def _wait_for_one():
f = await done.get()
if f is None:
# Dummy value from _on_timeout().
raise futures.TimeoutError
return f.result() # May raise f.exception().
for f in todo:
f.add_done_callback(_on_completion)
if todo and timeout is not None:
timeout_handle = loop.call_later(timeout, _on_timeout)
for _ in range(len(todo)):
yield _wait_for_one() | Return an iterator whose values are coroutines.
When waiting for the yielded coroutines you'll get the results (or
exceptions!) of the original Futures (or coroutines), in the order
in which and as soon as they complete.
This differs from PEP 3148; the proper way to use this is:
for f in as_completed(fs):
result = await f # The 'await' may raise.
# Use result.
If a timeout is specified, the 'await' will raise
TimeoutError when the timeout occurs before all Futures are done.
Note: The futures 'f' are not necessarily members of fs. | as_completed | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def __sleep0():
"""Skip one event loop run cycle.
This is a private helper for 'asyncio.sleep()', used
when the 'delay' is set to 0. It uses a bare 'yield'
expression (which Task.__step knows how to handle)
instead of creating a Future object.
"""
yield | Skip one event loop run cycle.
This is a private helper for 'asyncio.sleep()', used
when the 'delay' is set to 0. It uses a bare 'yield'
expression (which Task.__step knows how to handle)
instead of creating a Future object. | __sleep0 | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
async def sleep(delay, result=None, *, loop=None):
"""Coroutine that completes after a given time (in seconds)."""
if delay <= 0:
await __sleep0()
return result
if loop is None:
loop = events.get_event_loop()
future = loop.create_future()
h = loop.call_later(delay,
futures._set_result_unless_cancelled,
future, result)
try:
return await future
finally:
h.cancel() | Coroutine that completes after a given time (in seconds). | sleep | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def ensure_future(coro_or_future, *, loop=None):
"""Wrap a coroutine or an awaitable in a future.
If the argument is a Future, it is returned directly.
"""
if coroutines.iscoroutine(coro_or_future):
if loop is None:
loop = events.get_event_loop()
task = loop.create_task(coro_or_future)
if task._source_traceback:
del task._source_traceback[-1]
return task
elif futures.isfuture(coro_or_future):
if loop is not None and loop is not futures._get_loop(coro_or_future):
raise ValueError('loop argument must agree with Future')
return coro_or_future
elif inspect.isawaitable(coro_or_future):
return ensure_future(_wrap_awaitable(coro_or_future), loop=loop)
else:
raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
'required') | Wrap a coroutine or an awaitable in a future.
If the argument is a Future, it is returned directly. | ensure_future | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def _wrap_awaitable(awaitable):
"""Helper for asyncio.ensure_future().
Wraps awaitable (an object with __await__) into a coroutine
that will later be wrapped in a Task by ensure_future().
"""
return (yield from awaitable.__await__()) | Helper for asyncio.ensure_future().
Wraps awaitable (an object with __await__) into a coroutine
that will later be wrapped in a Task by ensure_future(). | _wrap_awaitable | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def gather(*coros_or_futures, loop=None, return_exceptions=False):
"""Return a future aggregating results from the given coroutines/futures.
Coroutines will be wrapped in a future and scheduled in the event
loop. They will not necessarily be scheduled in the same order as
passed in.
All futures must share the same event loop. If all the tasks are
done successfully, the returned future's result is the list of
results (in the order of the original sequence, not necessarily
the order of results arrival). If *return_exceptions* is True,
exceptions in the tasks are treated the same as successful
results, and gathered in the result list; otherwise, the first
raised exception will be immediately propagated to the returned
future.
Cancellation: if the outer Future is cancelled, all children (that
have not completed yet) are also cancelled. If any child is
cancelled, this is treated as if it raised CancelledError --
the outer Future is *not* cancelled in this case. (This is to
prevent the cancellation of one child to cause other children to
be cancelled.)
"""
if not coros_or_futures:
if loop is None:
loop = events.get_event_loop()
outer = loop.create_future()
outer.set_result([])
return outer
def _done_callback(fut):
nonlocal nfinished
nfinished += 1
if outer.done():
if not fut.cancelled():
# Mark exception retrieved.
fut.exception()
return
if not return_exceptions:
if fut.cancelled():
# Check if 'fut' is cancelled first, as
# 'fut.exception()' will *raise* a CancelledError
# instead of returning it.
exc = futures.CancelledError()
outer.set_exception(exc)
return
else:
exc = fut.exception()
if exc is not None:
outer.set_exception(exc)
return
if nfinished == nfuts:
# All futures are done; create a list of results
# and set it to the 'outer' future.
results = []
for fut in children:
if fut.cancelled():
# Check if 'fut' is cancelled first, as
# 'fut.exception()' will *raise* a CancelledError
# instead of returning it.
res = futures.CancelledError()
else:
res = fut.exception()
if res is None:
res = fut.result()
results.append(res)
if outer._cancel_requested:
# If gather is being cancelled we must propagate the
# cancellation regardless of *return_exceptions* argument.
# See issue 32684.
outer.set_exception(futures.CancelledError())
else:
outer.set_result(results)
arg_to_fut = {}
children = []
nfuts = 0
nfinished = 0
for arg in coros_or_futures:
if arg not in arg_to_fut:
fut = ensure_future(arg, loop=loop)
if loop is None:
loop = futures._get_loop(fut)
if fut is not arg:
# 'arg' was not a Future, therefore, 'fut' is a new
# Future created specifically for 'arg'. Since the caller
# can't control it, disable the "destroy pending task"
# warning.
fut._log_destroy_pending = False
nfuts += 1
arg_to_fut[arg] = fut
fut.add_done_callback(_done_callback)
else:
# There's a duplicate Future object in coros_or_futures.
fut = arg_to_fut[arg]
children.append(fut)
outer = _GatheringFuture(children, loop=loop)
return outer | Return a future aggregating results from the given coroutines/futures.
Coroutines will be wrapped in a future and scheduled in the event
loop. They will not necessarily be scheduled in the same order as
passed in.
All futures must share the same event loop. If all the tasks are
done successfully, the returned future's result is the list of
results (in the order of the original sequence, not necessarily
the order of results arrival). If *return_exceptions* is True,
exceptions in the tasks are treated the same as successful
results, and gathered in the result list; otherwise, the first
raised exception will be immediately propagated to the returned
future.
Cancellation: if the outer Future is cancelled, all children (that
have not completed yet) are also cancelled. If any child is
cancelled, this is treated as if it raised CancelledError --
the outer Future is *not* cancelled in this case. (This is to
prevent the cancellation of one child to cause other children to
be cancelled.) | gather | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def shield(arg, *, loop=None):
"""Wait for a future, shielding it from cancellation.
The statement
res = await shield(something())
is exactly equivalent to the statement
res = await something()
*except* that if the coroutine containing it is cancelled, the
task running in something() is not cancelled. From the POV of
something(), the cancellation did not happen. But its caller is
still cancelled, so the yield-from expression still raises
CancelledError. Note: If something() is cancelled by other means
this will still cancel shield().
If you want to completely ignore cancellation (not recommended)
you can combine shield() with a try/except clause, as follows:
try:
res = await shield(something())
except CancelledError:
res = None
"""
inner = ensure_future(arg, loop=loop)
if inner.done():
# Shortcut.
return inner
loop = futures._get_loop(inner)
outer = loop.create_future()
def _inner_done_callback(inner):
if outer.cancelled():
if not inner.cancelled():
# Mark inner's result as retrieved.
inner.exception()
return
if inner.cancelled():
outer.cancel()
else:
exc = inner.exception()
if exc is not None:
outer.set_exception(exc)
else:
outer.set_result(inner.result())
def _outer_done_callback(outer):
if not inner.done():
inner.remove_done_callback(_inner_done_callback)
inner.add_done_callback(_inner_done_callback)
outer.add_done_callback(_outer_done_callback)
return outer | Wait for a future, shielding it from cancellation.
The statement
res = await shield(something())
is exactly equivalent to the statement
res = await something()
*except* that if the coroutine containing it is cancelled, the
task running in something() is not cancelled. From the POV of
something(), the cancellation did not happen. But its caller is
still cancelled, so the yield-from expression still raises
CancelledError. Note: If something() is cancelled by other means
this will still cancel shield().
If you want to completely ignore cancellation (not recommended)
you can combine shield() with a try/except clause, as follows:
try:
res = await shield(something())
except CancelledError:
res = None | shield | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def run_coroutine_threadsafe(coro, loop):
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future()
def callback():
try:
futures._chain_future(ensure_future(coro, loop=loop), future)
except Exception as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
raise
loop.call_soon_threadsafe(callback)
return future | Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result. | run_coroutine_threadsafe | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def _register_task(task):
"""Register a new task in asyncio as executed by loop."""
_all_tasks.add(task) | Register a new task in asyncio as executed by loop. | _register_task | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def _unregister_task(task):
"""Unregister a task."""
_all_tasks.discard(task) | Unregister a task. | _unregister_task | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/tasks.py | MIT |
def connection_made(self, transport):
"""Called when a connection is made.
The argument is the transport representing the pipe connection.
To receive data, wait for data_received() calls.
When the connection is closed, connection_lost() is called.
""" | Called when a connection is made.
The argument is the transport representing the pipe connection.
To receive data, wait for data_received() calls.
When the connection is closed, connection_lost() is called. | connection_made | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def connection_lost(self, exc):
"""Called when the connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed).
""" | Called when the connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed). | connection_lost | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def pause_writing(self):
"""Called when the transport's buffer goes over the high-water mark.
Pause and resume calls are paired -- pause_writing() is called
once when the buffer goes strictly over the high-water mark
(even if subsequent writes increases the buffer size even
more), and eventually resume_writing() is called once when the
buffer size reaches the low-water mark.
Note that if the buffer size equals the high-water mark,
pause_writing() is not called -- it must go strictly over.
Conversely, resume_writing() is called when the buffer size is
equal or lower than the low-water mark. These end conditions
are important to ensure that things go as expected when either
mark is zero.
NOTE: This is the only Protocol callback that is not called
through EventLoop.call_soon() -- if it were, it would have no
effect when it's most needed (when the app keeps writing
without yielding until pause_writing() is called).
""" | Called when the transport's buffer goes over the high-water mark.
Pause and resume calls are paired -- pause_writing() is called
once when the buffer goes strictly over the high-water mark
(even if subsequent writes increases the buffer size even
more), and eventually resume_writing() is called once when the
buffer size reaches the low-water mark.
Note that if the buffer size equals the high-water mark,
pause_writing() is not called -- it must go strictly over.
Conversely, resume_writing() is called when the buffer size is
equal or lower than the low-water mark. These end conditions
are important to ensure that things go as expected when either
mark is zero.
NOTE: This is the only Protocol callback that is not called
through EventLoop.call_soon() -- if it were, it would have no
effect when it's most needed (when the app keeps writing
without yielding until pause_writing() is called). | pause_writing | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def resume_writing(self):
"""Called when the transport's buffer drains below the low-water mark.
See pause_writing() for details.
""" | Called when the transport's buffer drains below the low-water mark.
See pause_writing() for details. | resume_writing | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def data_received(self, data):
"""Called when some data is received.
The argument is a bytes object.
""" | Called when some data is received.
The argument is a bytes object. | data_received | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def eof_received(self):
"""Called when the other end calls write_eof() or equivalent.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
""" | Called when the other end calls write_eof() or equivalent.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol. | eof_received | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def get_buffer(self, sizehint):
"""Called to allocate a new receive buffer.
*sizehint* is a recommended minimal size for the returned
buffer. When set to -1, the buffer size can be arbitrary.
Must return an object that implements the
:ref:`buffer protocol <bufferobjects>`.
It is an error to return a zero-sized buffer.
""" | Called to allocate a new receive buffer.
*sizehint* is a recommended minimal size for the returned
buffer. When set to -1, the buffer size can be arbitrary.
Must return an object that implements the
:ref:`buffer protocol <bufferobjects>`.
It is an error to return a zero-sized buffer. | get_buffer | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def buffer_updated(self, nbytes):
"""Called when the buffer was updated with the received data.
*nbytes* is the total number of bytes that were written to
the buffer.
""" | Called when the buffer was updated with the received data.
*nbytes* is the total number of bytes that were written to
the buffer. | buffer_updated | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def eof_received(self):
"""Called when the other end calls write_eof() or equivalent.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
""" | Called when the other end calls write_eof() or equivalent.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol. | eof_received | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def datagram_received(self, data, addr):
"""Called when some datagram is received.""" | Called when some datagram is received. | datagram_received | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def error_received(self, exc):
"""Called when a send or receive operation raises an OSError.
(Other than BlockingIOError or InterruptedError.)
""" | Called when a send or receive operation raises an OSError.
(Other than BlockingIOError or InterruptedError.) | error_received | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def pipe_data_received(self, fd, data):
"""Called when the subprocess writes data into stdout/stderr pipe.
fd is int file descriptor.
data is bytes object.
""" | Called when the subprocess writes data into stdout/stderr pipe.
fd is int file descriptor.
data is bytes object. | pipe_data_received | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def pipe_connection_lost(self, fd, exc):
"""Called when a file descriptor associated with the child process is
closed.
fd is the int file descriptor that was closed.
""" | Called when a file descriptor associated with the child process is
closed.
fd is the int file descriptor that was closed. | pipe_connection_lost | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def process_exited(self):
"""Called when subprocess has exited.""" | Called when subprocess has exited. | process_exited | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/protocols.py | MIT |
def _sighandler_noop(signum, frame):
"""Dummy signal handler."""
pass | Dummy signal handler. | _sighandler_noop | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def add_signal_handler(self, sig, callback, *args):
"""Add a handler for a signal. UNIX only.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError("coroutines cannot be used "
"with add_signal_handler()")
self._check_signal(sig)
self._check_closed()
try:
# set_wakeup_fd() raises ValueError if this is not the
# main thread. By calling it early we ensure that an
# event loop running in another thread cannot add a signal
# handler.
signal.set_wakeup_fd(self._csock.fileno())
except (ValueError, OSError) as exc:
raise RuntimeError(str(exc))
handle = events.Handle(callback, args, self, None)
self._signal_handlers[sig] = handle
try:
# Register a dummy signal handler to ask Python to write the signal
# number in the wakup file descriptor. _process_self_data() will
# read signal numbers from this file descriptor to handle signals.
signal.signal(sig, _sighandler_noop)
# Set SA_RESTART to limit EINTR occurrences.
signal.siginterrupt(sig, False)
except OSError as exc:
del self._signal_handlers[sig]
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except (ValueError, OSError) as nexc:
logger.info('set_wakeup_fd(-1) failed: %s', nexc)
if exc.errno == errno.EINVAL:
raise RuntimeError(f'sig {sig} cannot be caught')
else:
raise | Add a handler for a signal. UNIX only.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler. | add_signal_handler | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def _handle_signal(self, sig):
"""Internal helper that is the actual signal handler."""
handle = self._signal_handlers.get(sig)
if handle is None:
return # Assume it's some race condition.
if handle._cancelled:
self.remove_signal_handler(sig) # Remove it properly.
else:
self._add_callback_signalsafe(handle) | Internal helper that is the actual signal handler. | _handle_signal | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def remove_signal_handler(self, sig):
"""Remove a handler for a signal. UNIX only.
Return True if a signal handler was removed, False if not.
"""
self._check_signal(sig)
try:
del self._signal_handlers[sig]
except KeyError:
return False
if sig == signal.SIGINT:
handler = signal.default_int_handler
else:
handler = signal.SIG_DFL
try:
signal.signal(sig, handler)
except OSError as exc:
if exc.errno == errno.EINVAL:
raise RuntimeError(f'sig {sig} cannot be caught')
else:
raise
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except (ValueError, OSError) as exc:
logger.info('set_wakeup_fd(-1) failed: %s', exc)
return True | Remove a handler for a signal. UNIX only.
Return True if a signal handler was removed, False if not. | remove_signal_handler | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def _check_signal(self, sig):
"""Internal helper to validate a signal.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if not isinstance(sig, int):
raise TypeError(f'sig must be an int, not {sig!r}')
if not (1 <= sig < signal.NSIG):
raise ValueError(f'sig {sig} out of range(1, {signal.NSIG})') | Internal helper to validate a signal.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler. | _check_signal | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def add_child_handler(self, pid, callback, *args):
"""Register a new child handler.
Arrange for callback(pid, returncode, *args) to be called when
process 'pid' terminates. Specifying another callback for the same
process replaces the previous handler.
Note: callback() must be thread-safe.
"""
raise NotImplementedError() | Register a new child handler.
Arrange for callback(pid, returncode, *args) to be called when
process 'pid' terminates. Specifying another callback for the same
process replaces the previous handler.
Note: callback() must be thread-safe. | add_child_handler | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def remove_child_handler(self, pid):
"""Removes the handler for process 'pid'.
The function returns True if the handler was successfully removed,
False if there was nothing to remove."""
raise NotImplementedError() | Removes the handler for process 'pid'.
The function returns True if the handler was successfully removed,
False if there was nothing to remove. | remove_child_handler | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def attach_loop(self, loop):
"""Attach the watcher to an event loop.
If the watcher was previously attached to an event loop, then it is
first detached before attaching to the new loop.
Note: loop may be None.
"""
raise NotImplementedError() | Attach the watcher to an event loop.
If the watcher was previously attached to an event loop, then it is
first detached before attaching to the new loop.
Note: loop may be None. | attach_loop | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def close(self):
"""Close the watcher.
This must be called to make sure that any underlying resource is freed.
"""
raise NotImplementedError() | Close the watcher.
This must be called to make sure that any underlying resource is freed. | close | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def __enter__(self):
"""Enter the watcher's context and allow starting new processes
This function must return self"""
raise NotImplementedError() | Enter the watcher's context and allow starting new processes
This function must return self | __enter__ | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def __exit__(self, a, b, c):
"""Exit the watcher's context"""
raise NotImplementedError() | Exit the watcher's context | __exit__ | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def set_event_loop(self, loop):
"""Set the event loop.
As a side effect, if a child watcher was set before, then calling
.set_event_loop() from the main thread will call .attach_loop(loop) on
the child watcher.
"""
super().set_event_loop(loop)
if (self._watcher is not None and
isinstance(threading.current_thread(), threading._MainThread)):
self._watcher.attach_loop(loop) | Set the event loop.
As a side effect, if a child watcher was set before, then calling
.set_event_loop() from the main thread will call .attach_loop(loop) on
the child watcher. | set_event_loop | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def get_child_watcher(self):
"""Get the watcher for child processes.
If not yet set, a SafeChildWatcher object is automatically created.
"""
if self._watcher is None:
self._init_watcher()
return self._watcher | Get the watcher for child processes.
If not yet set, a SafeChildWatcher object is automatically created. | get_child_watcher | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
def set_child_watcher(self, watcher):
"""Set the watcher for child processes."""
assert watcher is None or isinstance(watcher, AbstractChildWatcher)
if self._watcher is not None:
self._watcher.close()
self._watcher = watcher | Set the watcher for child processes. | set_child_watcher | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/unix_events.py | MIT |
async def wait(self):
"""Wait until the process exit and return the process return code."""
return await self._transport._wait() | Wait until the process exit and return the process return code. | wait | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/subprocess.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/subprocess.py | MIT |
def coroutine(func):
"""Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
if inspect.iscoroutinefunction(func):
# In Python 3.5 that's all we need to do for coroutines
# defined with "async def".
return func
if inspect.isgeneratorfunction(func):
coro = func
else:
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if (base_futures.isfuture(res) or inspect.isgenerator(res) or
isinstance(res, CoroWrapper)):
res = yield from res
else:
# If 'res' is an awaitable, run it.
try:
await_meth = res.__await__
except AttributeError:
pass
else:
if isinstance(res, collections.abc.Awaitable):
res = yield from await_meth()
return res
coro = types.coroutine(coro)
if not _DEBUG:
wrapper = coro
else:
@functools.wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func=func)
if w._source_traceback:
del w._source_traceback[-1]
# Python < 3.5 does not implement __qualname__
# on generator objects, so we set it manually.
# We use getattr as some callables (such as
# functools.partial may lack __qualname__).
w.__name__ = getattr(func, '__name__', None)
w.__qualname__ = getattr(func, '__qualname__', None)
return w
wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction().
return wrapper | Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged. | coroutine | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/coroutines.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/coroutines.py | MIT |
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function."""
return (inspect.iscoroutinefunction(func) or
getattr(func, '_is_coroutine', None) is _is_coroutine) | Return True if func is a decorated coroutine function. | iscoroutinefunction | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/coroutines.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/coroutines.py | MIT |
def iscoroutine(obj):
"""Return True if obj is a coroutine object."""
if type(obj) in _iscoroutine_typecache:
return True
if isinstance(obj, _COROUTINE_TYPES):
# Just in case we don't want to cache more than 100
# positive types. That shouldn't ever happen, unless
# someone stressing the system on purpose.
if len(_iscoroutine_typecache) < 100:
_iscoroutine_typecache.add(type(obj))
return True
else:
return False | Return True if obj is a coroutine object. | iscoroutine | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/coroutines.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/coroutines.py | MIT |
def _format_args_and_kwargs(args, kwargs):
"""Format function arguments and keyword arguments.
Special case for a single parameter: ('hello',) is formatted as ('hello').
"""
# use reprlib to limit the length of the output
items = []
if args:
items.extend(reprlib.repr(arg) for arg in args)
if kwargs:
items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items())
return '({})'.format(', '.join(items)) | Format function arguments and keyword arguments.
Special case for a single parameter: ('hello',) is formatted as ('hello'). | _format_args_and_kwargs | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/format_helpers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/format_helpers.py | MIT |
def extract_stack(f=None, limit=None):
"""Replacement for traceback.extract_stack() that only does the
necessary work for asyncio debug mode.
"""
if f is None:
f = sys._getframe().f_back
if limit is None:
# Limit the amount of work to a reasonable amount, as extract_stack()
# can be called for each coroutine and future in debug mode.
limit = constants.DEBUG_STACK_DEPTH
stack = traceback.StackSummary.extract(traceback.walk_stack(f),
limit=limit,
lookup_lines=False)
stack.reverse()
return stack | Replacement for traceback.extract_stack() that only does the
necessary work for asyncio debug mode. | extract_stack | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/format_helpers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/asyncio/format_helpers.py | MIT |
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None | Use the specified filename for streamed logging | __init__ | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record) | Emit a record.
Output the record to the file, catering for rollover as described
in doRollover(). | emit | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result | Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file. | rotation_filename | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest) | When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'. | rotate | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount | Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs. | __init__ | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open() | Do a rollover, as described in __init__(). | doRollover | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0 | Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have. | shouldRollover | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result | Work out the rollover time based on the specified time. | computeRollover | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0 | Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same | shouldRollover | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
if len(result) < self.backupCount:
result = []
else:
result.sort()
result = result[:len(result) - self.backupCount]
return result | Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob(). | getFilesToDelete | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt | do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix. | doRollover | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def reopenIfNeeded(self):
"""
Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream() | Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream. | reopenIfNeeded | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def emit(self, record):
"""
Emit a record.
If underlying file has changed, reopen the file before emitting the
record to it.
"""
self.reopenIfNeeded()
logging.FileHandler.emit(self, record) | Emit a record.
If underlying file has changed, reopen the file before emitting the
record to it. | emit | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0 | Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call. | __init__ | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result | A factory method which allows subclasses to define the precise
type of socket they want. | makeSocket | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod | Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored. | createSocket | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time | Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy. | send | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
# Issue #25685: delete 'message' if present: redundant with 'msg'
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s | Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket. | makePickle | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record) | Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event. | handleError | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record) | Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket. | emit | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release() | Closes the socket. | close | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False | Initializes the handler with a specific host address and port. | __init__ | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s | The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM). | makeSocket | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address) | Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence. | send | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority | Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers. | encodePriority | python | sajjadium/ctf-archives | ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | https://github.com/sajjadium/ctf-archives/blob/master/ctfs/TyphoonCon/2022/pwn/beautifier_player/python3.7/lib/python3.7/logging/handlers.py | MIT |
Subsets and Splits