_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q600
|
MockRedis._hincrby
|
train
|
def _hincrby(self, hashkey, attribute, command, type_, increment):
"""Shared hincrby and hincrbyfloat routine"""
redis_hash = self._get_hash(hashkey, command, create=True)
attribute = self._encode(attribute)
|
python
|
{
"resource": ""
}
|
q601
|
MockRedis.lrange
|
train
|
def lrange(self, key, start, stop):
"""Emulate lrange."""
redis_list = self._get_list(key, 'LRANGE')
|
python
|
{
"resource": ""
}
|
q602
|
MockRedis.lindex
|
train
|
def lindex(self, key, index):
"""Emulate lindex."""
redis_list = self._get_list(key, 'LINDEX')
if self._encode(key) not in self.redis:
return None
try:
|
python
|
{
"resource": ""
}
|
q603
|
MockRedis._blocking_pop
|
train
|
def _blocking_pop(self, pop_func, keys, timeout):
"""Emulate blocking pop functionality"""
if not isinstance(timeout, (int, long)):
raise RuntimeError('timeout is not an integer or out of range')
if timeout is None or timeout == 0:
timeout = self.blocking_timeout
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
elapsed_time = 0
start = time.time()
while
|
python
|
{
"resource": ""
}
|
q604
|
MockRedis.lpush
|
train
|
def lpush(self, key, *args):
"""Emulate lpush."""
redis_list = self._get_list(key, 'LPUSH', create=True)
# Creates the list at this key if it doesn't exist, and appends args to its beginning
args_reversed = [self._encode(arg) for arg in args]
args_reversed.reverse()
updated_list = args_reversed
|
python
|
{
"resource": ""
}
|
q605
|
MockRedis.rpop
|
train
|
def rpop(self, key):
"""Emulate lpop."""
redis_list = self._get_list(key, 'RPOP')
if self._encode(key) not in self.redis:
return None
|
python
|
{
"resource": ""
}
|
q606
|
MockRedis.rpush
|
train
|
def rpush(self, key, *args):
"""Emulate rpush."""
redis_list = self._get_list(key, 'RPUSH', create=True)
# Creates the list at this key if it doesn't exist, and appends args to it
|
python
|
{
"resource": ""
}
|
q607
|
MockRedis.lrem
|
train
|
def lrem(self, key, value, count=0):
"""Emulate lrem."""
value = self._encode(value)
redis_list = self._get_list(key, 'LREM')
removed_count = 0
if self._encode(key) in self.redis:
if count == 0:
# Remove all ocurrences
while redis_list.count(value):
redis_list.remove(value)
removed_count += 1
elif count > 0:
counter = 0
# remove first 'count' ocurrences
while redis_list.count(value):
redis_list.remove(value)
counter += 1
removed_count += 1
if counter >= count:
break
elif count <
|
python
|
{
"resource": ""
}
|
q608
|
MockRedis.ltrim
|
train
|
def ltrim(self, key, start, stop):
"""Emulate ltrim."""
redis_list = self._get_list(key, 'LTRIM')
if redis_list:
start, stop = self._translate_range(len(redis_list), start,
|
python
|
{
"resource": ""
}
|
q609
|
MockRedis.lset
|
train
|
def lset(self, key, index, value):
"""Emulate lset."""
redis_list = self._get_list(key, 'LSET')
if redis_list is None:
raise ResponseError("no such key")
try:
|
python
|
{
"resource": ""
}
|
q610
|
MockRedis._common_scan
|
train
|
def _common_scan(self, values_function, cursor='0', match=None, count=10, key=None):
"""
Common scanning skeleton.
:param key: optional function used to identify what 'match' is applied to
"""
if count is None:
count = 10
cursor = int(cursor)
count = int(count)
if not count:
raise ValueError('if specified, count must be > 0: %s' % count)
values = values_function()
if cursor + count >= len(values):
# we reached the end, back to zero
result_cursor = 0
|
python
|
{
"resource": ""
}
|
q611
|
MockRedis.scan
|
train
|
def scan(self, cursor='0', match=None, count=10):
"""Emulate scan."""
def value_function():
return sorted(self.redis.keys()) # sorted list for consistent order
|
python
|
{
"resource": ""
}
|
q612
|
MockRedis.sscan
|
train
|
def sscan(self, name, cursor='0', match=None, count=10):
"""Emulate sscan."""
def value_function():
members = list(self.smembers(name))
|
python
|
{
"resource": ""
}
|
q613
|
MockRedis.zscan
|
train
|
def zscan(self, name, cursor='0', match=None, count=10):
"""Emulate zscan."""
def value_function():
values = self.zrange(name, 0, -1, withscores=True)
values.sort(key=lambda x: x[1]) # sort for consistent order
|
python
|
{
"resource": ""
}
|
q614
|
MockRedis.hscan
|
train
|
def hscan(self, name, cursor='0', match=None, count=10):
"""Emulate hscan."""
def value_function():
values = self.hgetall(name)
values = list(values.items()) # list of tuples for sorting and matching
values.sort(key=lambda x: x[0]) # sort for consistent order
return
|
python
|
{
"resource": ""
}
|
q615
|
MockRedis.hscan_iter
|
train
|
def hscan_iter(self, name, match=None, count=10):
"""Emulate hscan_iter."""
cursor = '0'
while cursor != 0:
cursor, data = self.hscan(name, cursor=cursor,
|
python
|
{
"resource": ""
}
|
q616
|
MockRedis.sadd
|
train
|
def sadd(self, key, *values):
"""Emulate sadd."""
if len(values) == 0:
raise ResponseError("wrong number of arguments for 'sadd' command")
|
python
|
{
"resource": ""
}
|
q617
|
MockRedis.sdiff
|
train
|
def sdiff(self, keys, *args):
"""Emulate sdiff."""
func = lambda left, right: left.difference(right)
|
python
|
{
"resource": ""
}
|
q618
|
MockRedis.sdiffstore
|
train
|
def sdiffstore(self, dest, keys, *args):
"""Emulate sdiffstore."""
result = self.sdiff(keys, *args)
|
python
|
{
"resource": ""
}
|
q619
|
MockRedis.sinter
|
train
|
def sinter(self, keys, *args):
"""Emulate sinter."""
func = lambda left, right: left.intersection(right)
|
python
|
{
"resource": ""
}
|
q620
|
MockRedis.sinterstore
|
train
|
def sinterstore(self, dest, keys, *args):
"""Emulate sinterstore."""
result = self.sinter(keys, *args)
|
python
|
{
"resource": ""
}
|
q621
|
MockRedis.sismember
|
train
|
def sismember(self, name, value):
"""Emulate sismember."""
redis_set = self._get_set(name, 'SISMEMBER')
|
python
|
{
"resource": ""
}
|
q622
|
MockRedis.smove
|
train
|
def smove(self, src, dst, value):
"""Emulate smove."""
src_set = self._get_set(src, 'SMOVE')
dst_set = self._get_set(dst, 'SMOVE')
value = self._encode(value)
if value not in src_set:
return False
|
python
|
{
"resource": ""
}
|
q623
|
MockRedis.spop
|
train
|
def spop(self, name):
"""Emulate spop."""
redis_set = self._get_set(name, 'SPOP')
if not redis_set:
|
python
|
{
"resource": ""
}
|
q624
|
MockRedis.srandmember
|
train
|
def srandmember(self, name, number=None):
"""Emulate srandmember."""
redis_set = self._get_set(name, 'SRANDMEMBER')
if not redis_set:
return None if number is None else []
if number is None:
return choice(list(redis_set))
|
python
|
{
"resource": ""
}
|
q625
|
MockRedis.srem
|
train
|
def srem(self, key, *values):
"""Emulate srem."""
redis_set = self._get_set(key, 'SREM')
if not redis_set:
return 0
before_count = len(redis_set)
for value in values:
redis_set.discard(self._encode(value))
after_count
|
python
|
{
"resource": ""
}
|
q626
|
MockRedis.sunion
|
train
|
def sunion(self, keys, *args):
"""Emulate sunion."""
func = lambda left, right: left.union(right)
|
python
|
{
"resource": ""
}
|
q627
|
MockRedis.sunionstore
|
train
|
def sunionstore(self, dest, keys, *args):
"""Emulate sunionstore."""
result = self.sunion(keys, *args)
|
python
|
{
"resource": ""
}
|
q628
|
MockRedis.call
|
train
|
def call(self, command, *args):
"""
Sends call to the function, whose name is specified by command.
Used by Script invocations and normalizes calls using standard
Redis arguments to use the
|
python
|
{
"resource": ""
}
|
q629
|
MockRedis._normalize_command_args
|
train
|
def _normalize_command_args(self, command, *args):
"""
Modifies the command arguments to match the
strictness of the redis client.
"""
if command == 'zadd' and not self.strict and len(args) >= 3:
# Reorder score and name
zadd_args = [x for tup in zip(args[2::2], args[1::2]) for x in tup]
return [args[0]] + zadd_args
if command in ('zrangebyscore', 'zrevrangebyscore'):
# expected format is: <command> name min max start num with_scores score_cast_func
if len(args) <= 3:
# just plain min/max
return args
start, num = None, None
withscores = False
for i, arg in enumerate(args[3:], 3):
# keywords are case-insensitive
|
python
|
{
"resource": ""
}
|
q630
|
MockRedis.config_get
|
train
|
def config_get(self, pattern='*'):
"""
Get one or more configuration parameters.
"""
result = {}
for name, value in self.redis_config.items():
if fnmatch.fnmatch(name, pattern):
try:
|
python
|
{
"resource": ""
}
|
q631
|
MockRedis._translate_range
|
train
|
def _translate_range(self, len_, start, end):
"""
Translate range to valid bounds.
"""
if start < 0:
start += len_
start = max(0, min(start, len_))
|
python
|
{
"resource": ""
}
|
q632
|
MockRedis._translate_limit
|
train
|
def _translate_limit(self, len_, start, num):
"""
Translate limit to valid bounds.
"""
|
python
|
{
"resource": ""
}
|
q633
|
MockRedis._aggregate_func
|
train
|
def _aggregate_func(self, aggregate):
"""
Return a suitable aggregate score function.
"""
funcs = {"sum": add, "min": min, "max": max}
func_name = aggregate.lower() if aggregate else 'sum'
try:
|
python
|
{
"resource": ""
}
|
q634
|
MockRedis._apply_to_sets
|
train
|
def _apply_to_sets(self, func, operation, keys, *args):
"""Helper function for sdiff, sinter, and sunion"""
keys = self._list_or_args(keys, args)
if not keys:
raise TypeError("{} takes at least two arguments".format(operation.lower()))
|
python
|
{
"resource": ""
}
|
q635
|
MockRedis._list_or_args
|
train
|
def _list_or_args(self, keys, args):
"""
Shamelessly copied from redis-py.
"""
# returns a single list combining keys and args
try:
iter(keys)
# a string can be iterated, but indicates
# keys wasn't passed
|
python
|
{
"resource": ""
}
|
q636
|
MockRedis._encode
|
train
|
def _encode(self, value):
"Return a bytestring representation of the value. Taken from redis-py connection.py"
if isinstance(value, bytes):
return value
elif isinstance(value, (int, long)):
value = str(value).encode('utf-8')
elif isinstance(value, float):
|
python
|
{
"resource": ""
}
|
q637
|
MockRedisPipeline.watch
|
train
|
def watch(self, *keys):
"""
Put the pipeline into immediate execution mode.
Does not actually watch any keys.
"""
if self.explicit_transaction:
raise RedisError("Cannot issue a WATCH after a MULTI")
self.watching = True
|
python
|
{
"resource": ""
}
|
q638
|
MockRedisPipeline.execute
|
train
|
def execute(self):
"""
Execute all of the saved commands and return results.
"""
try:
for key, value in self._watched_keys.items():
if self.mock_redis.redis.get(self.mock_redis._encode(key)) != value:
|
python
|
{
"resource": ""
}
|
q639
|
MockRedisPipeline._reset
|
train
|
def _reset(self):
"""
Reset instance variables.
"""
self.commands = []
self.watching = False
|
python
|
{
"resource": ""
}
|
q640
|
Language.convert_to_duckling_language_id
|
train
|
def convert_to_duckling_language_id(cls, lang):
"""Ensure a language identifier has the correct duckling format and is supported."""
if lang is not None and cls.is_supported(lang):
return lang
elif lang is not None and cls.is_supported(lang + "$core"): # Support ISO 639-1 Language Codes (e.g. "en")
|
python
|
{
"resource": ""
}
|
q641
|
Duckling.load
|
train
|
def load(self, languages=[]):
"""Loads the Duckling corpus.
Languages can be specified, defaults to all.
Args:
languages: Optional parameter to specify languages,
e.g. [Duckling.ENGLISH, Duckling.FRENCH] or supported ISO 639-1 Codes (e.g. ["en", "fr"])
"""
duckling_load = self.clojure.var("duckling.core", "load!")
clojure_hashmap = self.clojure.var("clojure.core", "hash-map")
|
python
|
{
"resource": ""
}
|
q642
|
DucklingWrapper.parse_time
|
train
|
def parse_time(self, input_str, reference_time=''):
"""Parses input with Duckling for occurences of times.
Args:
input_str: An input string, e.g. 'Let's meet at 11:45am'.
reference_time: Optional reference time for Duckling.
Returns:
A preprocessed list of results (dicts) from Duckling output. For
example:
[
{
"dim":"time",
"end":21,
"start":11,
"value":{
"value":"2016-10-11T11:45:00.000-07:00",
"others":[
"2016-10-11T11:45:00.000-07:00",
|
python
|
{
"resource": ""
}
|
q643
|
AdHocClient.get_commands
|
train
|
def get_commands(self, peer_jid):
"""
Return the list of commands offered by the peer.
:param peer_jid: JID of the peer to query
:type peer_jid: :class:`~aioxmpp.JID`
:rtype: :class:`list` of :class:`~.disco.xso.Item`
:return: List of command items
In the returned list, each :class:`~.disco.xso.Item` represents one
command supported by the peer. The :attr:`~.disco.xso.Item.node`
attribute is the identifier of the command which
|
python
|
{
"resource": ""
}
|
q644
|
AdHocClient.get_command_info
|
train
|
def get_command_info(self, peer_jid, command_name):
"""
Obtain information about a command.
:param peer_jid: JID of the peer to query
:type peer_jid: :class:`~aioxmpp.JID`
:param command_name: Node name of the command
:type command_name: :class:`str`
:rtype: :class:`~.disco.xso.InfoQuery`
:return: Service discovery information about the command
Sends a service discovery query to the service discovery node of the
command. The returned object contains information about the command,
such as the namespaces used by its implementation (generally the
:xep:`4` data forms namespace) and possibly localisations of the
commands name.
|
python
|
{
"resource": ""
}
|
q645
|
AdHocClient.execute
|
train
|
def execute(self, peer_jid, command_name):
"""
Start execution of a command with a peer.
:param peer_jid: JID of the peer to start the command at.
:type peer_jid: :class:`~aioxmpp.JID`
:param command_name: Node name of the command to execute.
:type command_name: :class:`str`
:rtype: :class:`~.adhoc.service.ClientSession`
|
python
|
{
"resource": ""
}
|
q646
|
AdHocServer.register_stateless_command
|
train
|
def register_stateless_command(self, node, name, handler, *,
is_allowed=None,
features={namespaces.xep0004_data}):
"""
Register a handler for a stateless command.
:param node: Name of the command (``node`` in the service discovery
list).
:type node: :class:`str`
:param name: Human-readable name of the command
:type name: :class:`str` or :class:`~.LanguageMap`
:param handler: Coroutine function to run to get the response for a
request.
:param is_allowed: A predicate which determines whether the command is
shown and allowed for a given peer.
:type is_allowed: function or :data:`None`
:param features: Set of features to announce for the command
:type features: :class:`set` of :class:`str`
When a request for the command is received, `handler` is invoked. The
semantics of `handler` are the same as for
:meth:`~.StanzaStream.register_iq_request_handler`. It must produce a
valid :class:`~.adhoc.xso.Command` response payload.
If `is_allowed` is not :data:`None`, it is invoked whenever a command
listing is generated and whenever a command request is received. The
:class:`aioxmpp.JID` of the requester is passed as positional argument
to `is_allowed`. If `is_allowed` returns false, the command is not
|
python
|
{
"resource": ""
}
|
q647
|
ClientSession.start
|
train
|
def start(self):
"""
Initiate the session by starting to execute the command with the peer.
:return: The :attr:`~.xso.Command.first_payload` of the response
This sends an empty command IQ request with the
:attr:`~.ActionType.EXECUTE` action.
The :attr:`status`, :attr:`response` and related attributes get updated
with the newly received values.
"""
if self._response is not None:
raise RuntimeError("command execution already started")
request = aioxmpp.IQ(
|
python
|
{
"resource": ""
}
|
q648
|
ClientSession.proceed
|
train
|
def proceed(self, *,
action=adhoc_xso.ActionType.EXECUTE,
payload=None):
"""
Proceed command execution to the next stage.
:param action: Action type for proceeding
:type action: :class:`~.ActionTyp`
:param payload: Payload for the request, or :data:`None`
:return: The :attr:`~.xso.Command.first_payload` of the response
`action` must be one of the actions returned by
:attr:`allowed_actions`. It defaults to :attr:`~.ActionType.EXECUTE`,
which is (alongside with :attr:`~.ActionType.CANCEL`) always allowed.
`payload` may be a sequence of XSOs, a single XSO or :data:`None`. If
it is :data:`None`, the XSOs from the request are re-used. This is
useful if you modify the payload in-place (e.g. via
:attr:`first_payload`). Otherwise, the payload on the request is set to
the `payload` argument; if it is a single XSO, it is wrapped in a
sequence.
|
python
|
{
"resource": ""
}
|
q649
|
IBBTransport.write
|
train
|
def write(self, data):
"""
Send `data` over the IBB. If `data` is larger than the block size
is is chunked and sent in chunks.
Chunks from one call of :meth:`write` will always be sent in
|
python
|
{
"resource": ""
}
|
q650
|
IBBTransport.close
|
train
|
def close(self):
"""
Close the session.
"""
if self.is_closing():
return
|
python
|
{
"resource": ""
}
|
q651
|
IBBService.open_session
|
train
|
def open_session(self, protocol_factory, peer_jid, *,
stanza_type=ibb_xso.IBBStanzaType.IQ,
block_size=4096, sid=None):
"""
Establish an in-band bytestream session with `peer_jid` and
return the transport and protocol.
:param protocol_factory: the protocol factory
:type protocol_factory: a nullary callable returning an
:class:`asyncio.Protocol` instance
:param peer_jid: the JID with which to establish the byte-stream.
:type peer_jid: :class:`aioxmpp.JID`
:param stanza_type: the stanza type to use
:type stanza_type: class:`~aioxmpp.ibb.IBBStanzaType`
:param block_size: the maximal size of blocks to transfer
:type block_size: :class:`int`
:param sid: the session id to use
:type sid: :class:`str` (must be a valid NMTOKEN)
:returns: the transport and protocol
:rtype: a tuple of :class:`aioxmpp.ibb.service.IBBTransport`
and :class:`asyncio.Protocol`
"""
if block_size > MAX_BLOCK_SIZE:
raise ValueError("block_size too large")
if sid is None:
|
python
|
{
"resource": ""
}
|
q652
|
Room.features
|
train
|
def features(self):
"""
The set of features supported by this MUC. This may vary depending on
features exported by the MUC service, so be sure to check this for each
individual MUC.
"""
return {
aioxmpp.im.conversation.ConversationFeature.BAN,
aioxmpp.im.conversation.ConversationFeature.BAN_WITH_KICK,
aioxmpp.im.conversation.ConversationFeature.KICK,
aioxmpp.im.conversation.ConversationFeature.SEND_MESSAGE,
|
python
|
{
"resource": ""
}
|
q653
|
Room.send_message
|
train
|
def send_message(self, msg):
"""
Send a message to the MUC.
:param msg: The message to send.
:type msg: :class:`aioxmpp.Message`
:return: The stanza token of the message.
:rtype: :class:`~aioxmpp.stream.StanzaToken`
There is no need to set the address attributes or the type of the
message correctly; those will be overridden by this method to conform
to the requirements of a message to the MUC. Other attributes are left
untouched (except that :meth:`~.StanzaBase.autoset_id` is called) and
can be used as desired for the message.
.. seealso::
:meth:`.AbstractConversation.send_message` for the full interface
|
python
|
{
"resource": ""
}
|
q654
|
Room.send_message_tracked
|
train
|
def send_message_tracked(self, msg):
"""
Send a message to the MUC with tracking.
:param msg: The message to send.
:type msg: :class:`aioxmpp.Message`
.. warning::
Please read :ref:`api-tracking-memory`. This is especially relevant
for MUCs because tracking is not guaranteed to work due to how
:xep:`45` is written. It will work in many cases, probably in all
cases you test during development, but it may fail to work for some
individual messages and it may fail to work consistently for some
services. See the implementation details below for reasons.
The message is tracked and is considered
:attr:`~.MessageState.DELIVERED_TO_RECIPIENT` when it is reflected back
to us by the MUC service. The reflected message is then available in
the :attr:`~.MessageTracker.response` attribute.
.. note::
Two things:
1. The MUC service may change the contents of the message. An
example of this is the Prosody developer MUC which replaces
messages with more than a few lines with a pastebin link.
2. Reflected messages which are caught by tracking are not emitted
through :meth:`on_message`.
There is no need to set the address attributes or the type of the
message correctly; those will be overridden by this method to conform
to the requirements of a message to the MUC. Other attributes are left
untouched (except that :meth:`~.StanzaBase.autoset_id` is called) and
can be used as desired for the message.
.. warning::
Using :meth:`send_message_tracked` before :meth:`on_join` has
emitted will cause the `member` object in the resulting
:meth:`on_message` event to be :data:`None` (the message will be
delivered just fine).
Using :meth:`send_message_tracked` before history replay is over
will cause the :meth:`on_message` event to be emitted during
history replay, even though everyone else in the MUC will -- of
course -- only see the message after the history.
:meth:`send_message` is not affected by these quirks.
.. seealso::
:meth:`.AbstractConversation.send_message_tracked` for the full
interface specification.
**Implementation details:** Currently, we try to detect reflected
messages using two different criteria. First, if we see a message with
the same message ID (note that message IDs contain 120 bits of entropy)
as the message we sent, we consider it as the reflection. As some MUC
|
python
|
{
"resource": ""
}
|
q655
|
Room.set_nick
|
train
|
def set_nick(self, new_nick):
"""
Change the nick name of the occupant.
:param new_nick: New nickname to use
:type new_nick: :class:`str`
This sends the request to change the nickname and waits for the request
to be sent over the stream.
The nick change may or may not happen,
|
python
|
{
"resource": ""
}
|
q656
|
Room.kick
|
train
|
def kick(self, member, reason=None):
"""
Kick an occupant from the MUC.
:param member: The member to kick.
:type member: :class:`Occupant`
:param reason: A reason to show to the members of the conversation
including the kicked member.
:type reason: :class:`str`
:raises aioxmpp.errors.XMPPError: if the server returned an error for
the kick command.
|
python
|
{
"resource": ""
}
|
q657
|
Room.muc_set_role
|
train
|
def muc_set_role(self, nick, role, *, reason=None):
"""
Change the role of an occupant.
:param nick: The nickname of the occupant whose role shall be changed.
:type nick: :class:`str`
:param role: The new role for the occupant.
:type role: :class:`str`
:param reason: An optional reason to show to the occupant (and all
others).
Change the role of an occupant, identified by their `nick`, to the
given new `role`. Optionally, a `reason` for the role change can be
provided.
Setting the different roles require different privilegues of the local
|
python
|
{
"resource": ""
}
|
q658
|
Room.ban
|
train
|
def ban(self, member, reason=None, *, request_kick=True):
"""
Ban an occupant from re-joining the MUC.
:param member: The occupant to ban.
:type member: :class:`Occupant`
:param reason: A reason to show to the members of the conversation
including the banned member.
:type reason: :class:`str`
:param request_kick: A flag indicating that the member should be
removed from the conversation immediately, too.
:type request_kick: :class:`bool`
`request_kick` is supported by MUC, but setting it to false has no
effect: banned members are always immediately kicked.
.. seealso::
|
python
|
{
"resource": ""
}
|
q659
|
Room.leave
|
train
|
def leave(self):
"""
Leave the MUC.
"""
fut = self.on_exit.future()
def cb(**kwargs):
fut.set_result(None)
return True # disconnect
self.on_exit.connect(cb)
presence = aioxmpp.stanza.Presence(
|
python
|
{
"resource": ""
}
|
q660
|
MUCClient.set_affiliation
|
train
|
def set_affiliation(self, mucjid, jid, affiliation, *, reason=None):
"""
Change the affiliation of an entity with a MUC.
:param mucjid: The bare JID identifying the MUC.
:type mucjid: :class:`~aioxmpp.JID`
:param jid: The bare JID of the entity whose affiliation shall be
changed.
:type jid: :class:`~aioxmpp.JID`
:param affiliation: The new affiliation for the entity.
:type affiliation: :class:`str`
:param reason: Optional reason for the affiliation change.
:type reason: :class:`str` or :data:`None`
Change the affiliation of the given `jid` with the MUC identified by
the bare `mucjid` to the given new `affiliation`. Optionally, a
`reason` can be given.
If you are joined in the MUC, :meth:`Room.muc_set_affiliation` may be
more convenient, but it is possible to modify the affiliations of a MUC
without being joined, given sufficient privilegues.
Setting the different affiliations require different privilegues of the
local user. The details can be checked in :xep:`0045` and are enforced
solely by the server, not local code.
The coroutine returns when the change in affiliation has been
acknowledged by the server. If the server returns an error, an
appropriate :class:`aioxmpp.errors.XMPPError` subclass is raised.
|
python
|
{
"resource": ""
}
|
q661
|
MUCClient.get_room_config
|
train
|
def get_room_config(self, mucjid):
"""
Query and return the room configuration form for the given MUC.
:param mucjid: JID of the room to query
:type mucjid: bare :class:`~.JID`
:return: data form template for the room configuration
:rtype: :class:`aioxmpp.forms.Data`
.. seealso::
:class:`~.ConfigurationForm`
for a form template to work with the returned form
.. versionadded:: 0.7
"""
|
python
|
{
"resource": ""
}
|
q662
|
request_slot
|
train
|
def request_slot(client,
service: JID,
filename: str,
size: int,
content_type: str):
"""
Request an HTTP upload slot.
:param client: The client to request the slot with.
:type client: :class:`aioxmpp.Client`
:param service: Address of the HTTP upload service.
:type service: :class:`~aioxmpp.JID`
:param filename: Name of the file (without path), may be used by the server
to generate the URL.
:type filename: :class:`str`
:param size: Size of the file in bytes
:type size: :class:`int`
:param content_type: The MIME type of the file
:type content_type: :class:`str`
:return: The assigned upload slot.
|
python
|
{
"resource": ""
}
|
q663
|
query_version
|
train
|
def query_version(stream: aioxmpp.stream.StanzaStream,
target: aioxmpp.JID) -> version_xso.Query:
"""
Query the software version of an entity.
:param stream: A stanza stream to send the query on.
:type stream: :class:`aioxmpp.stream.StanzaStream`
:param target: The address of the entity to query.
:type target: :class:`aioxmpp.JID`
:raises OSError: if a connection issue occured before a reply was received
|
python
|
{
"resource": ""
}
|
q664
|
as_bookmark_class
|
train
|
def as_bookmark_class(xso_class):
"""
Decorator to register `xso_class` as a custom bookmark class.
This is necessary to store and retrieve such bookmarks.
The registered class must be a subclass of the abstract base class
:class:`Bookmark`.
:raises TypeError: if `xso_class` is not a subclass of :class:`Bookmark`.
"""
if not issubclass(xso_class, Bookmark):
|
python
|
{
"resource": ""
}
|
q665
|
basic_filter_languages
|
train
|
def basic_filter_languages(languages, ranges):
"""
Filter languages using the string-based basic filter algorithm described in
RFC4647.
`languages` must be a sequence of :class:`LanguageTag` instances which are
to be filtered.
`ranges` must be an iterable which represent the basic language ranges to
filter with, in priority order. The language ranges must be given as
:class:`LanguageRange` objects.
Return an iterator of languages which matched any of the `ranges`. The
sequence produced by the iterator is in match order and duplicate-free. The
first range to match a language yields the language into the iterator, no
other range can yield that language afterwards.
"""
if LanguageRange.WILDCARD in ranges:
yield from languages
return
found = set()
for language_range in ranges:
range_str = language_range.match_str
for language in languages:
if language in found:
continue
|
python
|
{
"resource": ""
}
|
q666
|
JID.fromstr
|
train
|
def fromstr(cls, s, *, strict=True):
"""
Construct a JID out of a string containing it.
:param s: The string to parse.
:type s: :class:`str`
:param strict: Whether to enable strict parsing.
:type strict: :class:`bool`
:raises: See :class:`JID`
:return: The parsed JID
:rtype: :class:`JID`
See the :class:`JID` class level documentation for the semantics of
`strict`.
"""
nodedomain, sep, resource = s.partition("/")
if
|
python
|
{
"resource": ""
}
|
q667
|
Service.get_conversation
|
train
|
def get_conversation(self, peer_jid, *, current_jid=None):
"""
Get or create a new one-to-one conversation with a peer.
:param peer_jid: The JID of the peer to converse with.
:type peer_jid: :class:`aioxmpp.JID`
:param current_jid: The current JID to lock the conversation to (see
:rfc:`6121`).
|
python
|
{
"resource": ""
}
|
q668
|
reconfigure_resolver
|
train
|
def reconfigure_resolver():
"""
Reset the resolver configured for this thread to a fresh instance. This
essentially re-reads the system-wide resolver configuration.
If a custom resolver has been set using :func:`set_resolver`, the flag
|
python
|
{
"resource": ""
}
|
q669
|
iq_handler
|
train
|
def iq_handler(type_, payload_cls, *, with_send_reply=False):
"""
Register the decorated function or coroutine function as IQ request
handler.
:param type_: IQ type to listen for
:type type_: :class:`~.IQType`
:param payload_cls: Payload XSO class to listen for
:type payload_cls: :class:`~.XSO` subclass
:param with_send_reply: Whether to pass a function to send a reply
to the decorated callable as second argument.
:type with_send_reply: :class:`bool`
:raises ValueError: if `payload_cls` is not a registered IQ payload
If the decorated function is not a coroutine function, it must return an
awaitable instead.
.. seealso::
:meth:`~.StanzaStream.register_iq_request_handler` for more
details on the `type_`, `payload_cls` and
`with_send_reply` arguments, as well as behaviour expected
from the decorated function.
:meth:`aioxmpp.IQ.as_payload_class`
for a way to register a XSO as IQ payload
.. versionadded:: 0.11
The `with_send_reply` argument.
.. versionchanged:: 0.10
The decorator now checks if `payload_cls` is a valid, registered IQ
payload and raises :class:`ValueError` if not.
"""
if (not hasattr(payload_cls, "TAG") or
|
python
|
{
"resource": ""
}
|
q670
|
inbound_message_filter
|
train
|
def inbound_message_filter(f):
"""
Register the decorated function as a service-level inbound message filter.
:raise TypeError: if the decorated object is a coroutine function
.. seealso::
:class:`StanzaStream`
for important remarks regarding the use of stanza filters.
"""
if asyncio.iscoroutinefunction(f):
|
python
|
{
"resource": ""
}
|
q671
|
inbound_presence_filter
|
train
|
def inbound_presence_filter(f):
"""
Register the decorated function as a service-level inbound presence filter.
:raise TypeError: if the decorated object is a coroutine function
.. seealso::
:class:`StanzaStream`
for important remarks regarding the use of stanza filters.
"""
if asyncio.iscoroutinefunction(f):
|
python
|
{
"resource": ""
}
|
q672
|
outbound_message_filter
|
train
|
def outbound_message_filter(f):
"""
Register the decorated function as a service-level outbound message filter.
:raise TypeError: if the decorated object is a coroutine function
.. seealso::
:class:`StanzaStream`
for important remarks regarding the use of stanza filters.
"""
if asyncio.iscoroutinefunction(f):
|
python
|
{
"resource": ""
}
|
q673
|
outbound_presence_filter
|
train
|
def outbound_presence_filter(f):
"""
Register the decorated function as a service-level outbound presence
filter.
:raise TypeError: if the decorated object is a coroutine function
.. seealso::
:class:`StanzaStream`
for important remarks regarding the use of stanza filters.
"""
if asyncio.iscoroutinefunction(f):
|
python
|
{
"resource": ""
}
|
q674
|
depsignal
|
train
|
def depsignal(class_, signal_name, *, defer=False):
"""
Connect the decorated method or coroutine method to the addressed signal on
a class on which the service depends.
:param class_: A service class which is listed in the
:attr:`~.Meta.ORDER_AFTER` relationship.
:type class_: :class:`Service` class or one of the special cases below
:param signal_name: Attribute name of the signal to connect to
:type signal_name: :class:`str`
:param defer: Flag indicating whether deferred execution of the decorated
method is desired; see below for details.
:type defer: :class:`bool`
The signal is discovered by accessing the attribute with the name
`signal_name` on the given `class_`. In addition, the following arguments
are supported for `class_`:
1. :class:`aioxmpp.stream.StanzaStream`: the corresponding signal of the
stream of the client running the service is used.
2. :class:`aioxmpp.Client`: the corresponding signal of the client running
the service is used.
If the signal is a :class:`.callbacks.Signal` and `defer` is false, the
decorated object
|
python
|
{
"resource": ""
}
|
q675
|
attrsignal
|
train
|
def attrsignal(descriptor, signal_name, *, defer=False):
"""
Connect the decorated method or coroutine method to the addressed signal on
a descriptor.
:param descriptor: The descriptor to connect to.
:type descriptor: :class:`Descriptor` subclass.
:param signal_name: Attribute name of the signal to connect to
:type signal_name: :class:`str`
:param defer: Flag indicating whether deferred execution of the decorated
method is desired; see below for details.
:type defer: :class:`bool`
The signal is discovered by accessing the attribute with the name
`signal_name` on the :attr:`~Descriptor.value_type` of the `descriptor`.
During instantiation of the service, the value of the descriptor is used
to obtain the signal and then the decorated method is connected to the
signal.
If the signal is a :class:`.callbacks.Signal` and `defer` is false, the
decorated object is connected using the default
:attr:`~.callbacks.AdHocSignal.STRONG` mode.
If the
|
python
|
{
"resource": ""
}
|
q676
|
Descriptor.add_to_stack
|
train
|
def add_to_stack(self, instance, stack):
"""
Get the context manager for the service `instance` and push it to the
context manager `stack`.
:param instance: The service to get the context manager for.
:type instance: :class:`Service`
:param stack: The context manager stack to push the CM onto.
:type stack: :class:`contextlib.ExitStack`
|
python
|
{
"resource": ""
}
|
q677
|
Meta.orders_after
|
train
|
def orders_after(self, other, *, visited=None):
"""
Return whether `self` depends on `other` and will be instanciated
later.
|
python
|
{
"resource": ""
}
|
q678
|
Meta.orders_after_any
|
train
|
def orders_after_any(self, other, *, visited=None):
"""
Return whether `self` orders after any of the services in the set
`other`.
:param other: Another service.
:type other: A :class:`set` of
:class:`aioxmpp.service.Service` instances
.. versionadded:: 0.11
"""
if not other:
return False
if visited is None:
visited = set()
elif self in visited:
|
python
|
{
"resource": ""
}
|
q679
|
TaskPool.set_limit
|
train
|
def set_limit(self, group, new_limit):
"""
Set a new limit on the number of tasks in the `group`.
:param group: Group key of the group to modify.
:type group: hashable
:param new_limit: New limit for the number of tasks running in `group`.
:type new_limit: non-negative :class:`int` or :data:`None`
:raise ValueError: if `new_limit` is non-positive
The limit of tasks for the `group` is set to `new_limit`. If there are
currently more than `new_limit` tasks running in `group`, those tasks
will continue to run, however, the creation of new tasks is inhibited
until the group is below its limit.
If the limit is set
|
python
|
{
"resource": ""
}
|
q680
|
TaskPool.spawn
|
train
|
def spawn(self, __groups, __coro_fun, *args, **kwargs):
"""
Start a new coroutine and add it to the pool atomically.
:param groups: The groups the coroutine belongs to.
:type groups: :class:`set` of group keys
:param coro_fun: Coroutine function to run
:param args: Positional arguments to pass to `coro_fun`
:param kwargs: Keyword arguments to pass to `coro_fun`
:raise RuntimeError: if the limit on any of the groups or the total
limit is exhausted
:rtype: :class:`asyncio.Task`
|
python
|
{
"resource": ""
}
|
q681
|
CarbonsClient.enable
|
train
|
def enable(self):
"""
Enable message carbons.
:raises RuntimeError: if the server does not support message carbons.
:raises aioxmpp.XMPPError: if the server responded with an error to the
|
python
|
{
"resource": ""
}
|
q682
|
CarbonsClient.disable
|
train
|
def disable(self):
"""
Disable message carbons.
:raises RuntimeError: if the server does not support message carbons.
:raises aioxmpp.XMPPError: if the server responded with an error to the
|
python
|
{
"resource": ""
}
|
q683
|
StanzaStream._process_incoming_iq
|
train
|
def _process_incoming_iq(self, stanza_obj):
"""
Process an incoming IQ stanza `stanza_obj`. Calls the response handler,
spawns a request handler coroutine or drops the stanza while logging a
warning if no handler can be found.
"""
self._logger.debug("incoming iq: %r", stanza_obj)
if stanza_obj.type_.is_response:
# iq response
self._logger.debug("iq is response")
keys = [(stanza_obj.from_, stanza_obj.id_)]
if self._local_jid is not None:
# needed for some servers
if keys[0][0] == self._local_jid:
keys.append((None, keys[0][1]))
elif keys[0][0] is None:
keys.append((self._local_jid, keys[0][1]))
for key in keys:
try:
self._iq_response_map.unicast(key, stanza_obj)
self._logger.debug("iq response delivered to key %r", key)
break
except KeyError:
pass
else:
self._logger.warning(
"unexpected IQ response: from=%r, id=%r",
*key)
else:
# iq request
self._logger.debug("iq is request")
key = (stanza_obj.type_, type(stanza_obj.payload))
try:
coro, with_send_reply = self._iq_request_map[key]
except KeyError:
self._logger.warning(
"unhandleable IQ request: from=%r, type_=%r, payload=%r",
stanza_obj.from_,
stanza_obj.type_,
stanza_obj.payload
)
response = stanza_obj.make_reply(type_=structs.IQType.ERROR)
response.error = stanza.Error(
condition=errors.ErrorCondition.SERVICE_UNAVAILABLE,
)
self._enqueue(response)
return
|
python
|
{
"resource": ""
}
|
q684
|
StanzaStream._process_incoming_message
|
train
|
def _process_incoming_message(self, stanza_obj):
"""
Process an incoming message stanza `stanza_obj`.
"""
self._logger.debug("incoming message: %r", stanza_obj)
stanza_obj = self.service_inbound_message_filter.filter(stanza_obj)
if stanza_obj is None:
self._logger.debug("incoming message dropped by service "
"filter chain")
return
|
python
|
{
"resource": ""
}
|
q685
|
StanzaStream._process_incoming_presence
|
train
|
def _process_incoming_presence(self, stanza_obj):
"""
Process an incoming presence stanza `stanza_obj`.
"""
self._logger.debug("incoming presence: %r", stanza_obj)
stanza_obj = self.service_inbound_presence_filter.filter(stanza_obj)
if stanza_obj is None:
self._logger.debug("incoming presence dropped by service filter"
" chain")
return
|
python
|
{
"resource": ""
}
|
q686
|
StanzaStream._process_incoming
|
train
|
def _process_incoming(self, xmlstream, queue_entry):
"""
Dispatch to the different methods responsible for the different stanza
types or handle a non-stanza stream-level element from `stanza_obj`,
which has arrived over the given `xmlstream`.
"""
stanza_obj, exc = queue_entry
# first, handle SM stream objects
if isinstance(stanza_obj, nonza.SMAcknowledgement):
self._logger.debug("received SM ack: %r", stanza_obj)
if not self._sm_enabled:
self._logger.warning("received SM ack, but SM not enabled")
return
self.sm_ack(stanza_obj.counter)
return
elif isinstance(stanza_obj, nonza.SMRequest):
self._logger.debug("received SM request: %r", stanza_obj)
if not self._sm_enabled:
self._logger.warning("received SM request, but SM not enabled")
return
response = nonza.SMAcknowledgement()
response.counter = self._sm_inbound_ctr
self._logger.debug("sending SM ack: %r", response)
xmlstream.send_xso(response)
return
# raise if it is not a stanza
|
python
|
{
"resource": ""
}
|
q687
|
StanzaStream.flush_incoming
|
train
|
def flush_incoming(self):
"""
Flush all incoming queues to the respective processing methods. The
handlers are called as usual, thus it may require at least one
iteration through the asyncio event loop before effects can be seen.
|
python
|
{
"resource": ""
}
|
q688
|
StanzaStream._send_stanza
|
train
|
def _send_stanza(self, xmlstream, token):
"""
Send a stanza token `token` over the given `xmlstream`.
Only sends if the `token` has not been aborted (see
:meth:`StanzaToken.abort`). Sends the state of the token acoording to
:attr:`sm_enabled`.
"""
if token.state == StanzaState.ABORTED:
return
stanza_obj = token.stanza
if isinstance(stanza_obj, stanza.Presence):
stanza_obj = self.app_outbound_presence_filter.filter(
stanza_obj
)
if stanza_obj is not None:
stanza_obj = self.service_outbound_presence_filter.filter(
stanza_obj
)
elif isinstance(stanza_obj, stanza.Message):
stanza_obj = self.app_outbound_message_filter.filter(
stanza_obj
)
if stanza_obj is not None:
stanza_obj = self.service_outbound_message_filter.filter(
stanza_obj
)
if stanza_obj is None:
|
python
|
{
"resource": ""
}
|
q689
|
StanzaStream.register_iq_request_handler
|
train
|
def register_iq_request_handler(self, type_, payload_cls, cb, *,
with_send_reply=False):
"""
Register a coroutine function or a function returning an awaitable to
run when an IQ request is received.
:param type_: IQ type to react to (must be a request type).
:type type_: :class:`~aioxmpp.IQType`
:param payload_cls: Payload class to react to (subclass of
:class:`~xso.XSO`)
:type payload_cls: :class:`~.XMLStreamClass`
:param cb: Function or coroutine function to invoke
:param with_send_reply: Whether to pass a function to send a reply
to `cb` as second argument.
:type with_send_reply: :class:`bool`
:raises ValueError: if there is already a coroutine registered for this
target
:raises ValueError: if `type_` is not a request IQ type
:raises ValueError: if `type_` is not a valid
:class:`~.IQType` (and cannot be cast to a
:class:`~.IQType`)
The callback `cb` will be called whenever an IQ stanza with the given
`type_` and payload being an instance of the `payload_cls` is received.
The callback must either be a coroutine function or otherwise return an
awaitable. The awaitable must evaluate to a valid value for the
:attr:`.IQ.payload` attribute. That value will be set as the payload
attribute value of an IQ response (with type :attr:`~.IQType.RESULT`)
which is generated and sent by the stream.
If the awaitable or the function raises an exception, it will be
converted to a :class:`~.stanza.Error` object. That error object is
then used as payload for an IQ response (with type
:attr:`~.IQType.ERROR`) which is generated and sent by the stream.
If the exception is a subclass of :class:`aioxmpp.errors.XMPPError`, it
is converted to an :class:`~.stanza.Error` instance directly.
Otherwise, it is wrapped in a :class:`aioxmpp.XMPPCancelError`
with ``undefined-condition``.
For this to work, `payload_cls` *must* be registered using
:meth:`~.IQ.as_payload_class`. Otherwise, the payload will
not be recognised by the stream parser and the IQ is automatically
responded to with a ``feature-not-implemented`` error.
.. warning::
When using a coroutine function for `cb`, there is no guarantee
that concurrent IQ handlers and other coroutines will execute in
any defined order. This implies that the strong ordering guarantees
normally provided by XMPP XML Streams are lost when using coroutine
functions for `cb`. For this reason, the use of non-coroutine
functions is allowed.
.. note::
Using a non-coroutine function for `cb` will generally lead to
less readable code. For the sake of readability, it is recommended
to prefer coroutine functions when strong ordering guarantees are
|
python
|
{
"resource": ""
}
|
q690
|
StanzaStream.register_message_callback
|
train
|
def register_message_callback(self, type_, from_, cb):
"""
Register a callback to be called when a message is received.
:param type_: Message type to listen for, or :data:`None` for a
wildcard match.
:type type_: :class:`~.MessageType` or :data:`None`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`
:param cb: Callback function to call
:raises ValueError: if another function is already registered for the
same ``(type_, from_)`` pair.
:raises ValueError: if `type_` is not a valid
:class:`~.MessageType` (and cannot be cast
to a :class:`~.MessageType`)
`cb` will be called whenever a message stanza matching the `type_` and
`from_` is received, according to the wildcarding rules below. More
specific callbacks win over less specific callbacks, and the match on
the `from_` address takes precedence over the match on the `type_`.
See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact
wildcarding rules.
.. versionchanged:: 0.7
The `type_` argument is now supposed to be a
:class:`~.MessageType` member.
.. deprecated:: 0.7
Passing a :class:`str` as `type_` argument is deprecated and will
|
python
|
{
"resource": ""
}
|
q691
|
StanzaStream.register_presence_callback
|
train
|
def register_presence_callback(self, type_, from_, cb):
"""
Register a callback to be called when a presence stanza is received.
:param type_: Presence type to listen for.
:type type_: :class:`~.PresenceType`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`.
:param cb: Callback function
:raises ValueError: if another listener with the same ``(type_,
from_)`` pair is already registered
:raises ValueError: if `type_` is not a valid
:class:`~.PresenceType` (and cannot be cast
to a :class:`~.PresenceType`)
`cb` will be called whenever a presence stanza matching the `type_` is
received from the specified sender. `from_` may be :data:`None` to
indicate a wildcard. Like with :meth:`register_message_callback`, more
specific callbacks win over less specific callbacks. The fallback order
is identical, except that the ``type_=None`` entries described there do
not apply for presence stanzas and are thus omitted.
See :meth:`.SimpleStanzaDispatcher.register_callback` for the exact
wildcarding rules.
.. versionchanged:: 0.7
The `type_` argument is now supposed to be a
:class:`~.PresenceType` member.
.. deprecated:: 0.7
|
python
|
{
"resource": ""
}
|
q692
|
StanzaStream.wait_stop
|
train
|
def wait_stop(self):
"""
Stop the stream and wait for it to stop.
See :meth:`stop` for the general stopping conditions. You can assume
that :meth:`stop` is the first thing this coroutine calls.
"""
if not self.running:
|
python
|
{
"resource": ""
}
|
q693
|
StanzaStream.resume_sm
|
train
|
def resume_sm(self, xmlstream):
"""
Resume an SM-enabled stream using the given `xmlstream`.
If the server rejects the attempt to resume stream management, a
:class:`.errors.StreamNegotiationFailure` is raised. The stream is then
in stopped state and stream management has been stopped.
.. warning::
This method cannot and does not check whether the server advertised
support for stream management. Attempting to negotiate stream
management without server support might lead to termination of the
stream.
If the XML stream dies at any point during the negotiation, the SM
state is left unchanged. If no response has been received yet, the
exception which caused the stream to die is re-raised. The state of the
stream depends on whether the main task already noticed the dead
stream.
If negotiation succeeds, this coroutine resumes the stream management
session and initiates the retransmission of any unacked stanzas. The
stream is then in running state.
.. versionchanged:: 0.11
Support for using the counter value provided some servers on a
failed resumption was added. Stanzas which are covered by the
counter will be marked as :attr:`~StanzaState.ACKED`; other stanzas
will be marked as :attr:`~StanzaState.DISCONNECTED`.
This is in contrast to the behaviour when resumption fails
*without* a counter given. In that case, stanzas which have not
been acked are marked as :attr:`~StanzaState.SENT_WITHOUT_SM`.
"""
if self.running:
raise RuntimeError("Cannot resume Stream Management while"
" StanzaStream is running")
self._start_prepare(xmlstream, self.recv_stanza)
try:
response = yield from protocol.send_and_wait_for(
xmlstream,
[
nonza.SMResume(previd=self.sm_id,
|
python
|
{
"resource": ""
}
|
q694
|
StanzaStream._send_immediately
|
train
|
def _send_immediately(self, stanza, *, timeout=None, cb=None):
"""
Send a stanza without waiting for the stream to be ready to send
stanzas.
This is only useful from within :class:`aioxmpp.node.Client` before
the stream is fully established.
"""
stanza.autoset_id()
self._logger.debug("sending %r and waiting for it to be sent",
stanza)
if not isinstance(stanza, stanza_.IQ) or stanza.type_.is_response:
if cb is not None:
raise ValueError(
"cb not supported with non-IQ non-request stanzas"
)
yield from self._enqueue(stanza)
return
# we use the long way with a custom listener instead of a future here
# to ensure that the callback is called synchronously from within the
# queue handling loop.
# we need that to ensure that the strong ordering guarantees reach the
# `cb` function.
fut = asyncio.Future()
def nested_cb(task):
"""
This callback is used to handle awaitables returned by the `cb`.
"""
nonlocal fut
if task.exception() is None:
fut.set_result(task.result())
else:
fut.set_exception(task.exception())
def handler_ok(stanza):
"""
This handler is invoked synchronously by
:meth:`_process_incoming_iq` (via
:class:`aioxmpp.callbacks.TagDispatcher`) for response stanzas
(including error stanzas).
"""
nonlocal fut
if fut.cancelled():
return
if cb is not None:
try:
nested_fut = cb(stanza)
except Exception as exc:
fut.set_exception(exc)
else:
if nested_fut is not None:
nested_fut.add_done_callback(nested_cb)
return
|
python
|
{
"resource": ""
}
|
q695
|
_process_features
|
train
|
def _process_features(features):
"""
Generate the `Features String` from an iterable of features.
:param features: The features to generate the features string from.
:type features: :class:`~collections.abc.Iterable` of :class:`str`
|
python
|
{
"resource": ""
}
|
q696
|
_process_identities
|
train
|
def _process_identities(identities):
"""
Generate the `Identities String` from an iterable of identities.
:param identities: The identities to generate the features string from.
:type identities: :class:`~collections.abc.Iterable` of
:class:`~.disco.xso.Identity`
:return: The `Identities String`
:rtype: :class:`bytes`
Generate the `Identities String` from the given
|
python
|
{
"resource": ""
}
|
q697
|
_process_extensions
|
train
|
def _process_extensions(exts):
"""
Generate the `Extensions String` from an iterable of data forms.
:param exts: The data forms to generate the extensions string from.
:type exts: :class:`~collections.abc.Iterable` of
:class:`~.forms.xso.Data`
:return: The `Extensions String`
:rtype: :class:`bytes`
Generate the `Extensions String` from the given
|
python
|
{
"resource": ""
}
|
q698
|
OrderedStateMachine.wait_for
|
train
|
def wait_for(self, new_state):
"""
Wait for an exact state `new_state` to be reached by the state
machine.
If the state is skipped, that is, if a state which is greater than
`new_state` is written to :attr:`state`, the coroutine raises
|
python
|
{
"resource": ""
}
|
q699
|
OrderedStateMachine.wait_for_at_least
|
train
|
def wait_for_at_least(self, new_state):
"""
Wait for a state to be entered which is greater than or equal to
`new_state` and return.
"""
if not (self._state < new_state):
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.