response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Calculate the remaining time for a start date and a timedelta. For example, "how many seconds left for 30 seconds after start?" Arguments: start (~datetime.datetime): Starting date. ends_in (~datetime.timedelta): The end delta. relative (bool): If enabled the end time will be calculated using :func:`delta_resolution` (i.e., rounded to the resolution of `ends_in`). now (Callable): Function returning the current time and date. Defaults to :func:`datetime.now(timezone.utc)`. Returns: ~datetime.timedelta: Remaining time.
def remaining( start: datetime, ends_in: timedelta, now: Callable | None = None, relative: bool = False) -> timedelta: """Calculate the remaining time for a start date and a timedelta. For example, "how many seconds left for 30 seconds after start?" Arguments: start (~datetime.datetime): Starting date. ends_in (~datetime.timedelta): The end delta. relative (bool): If enabled the end time will be calculated using :func:`delta_resolution` (i.e., rounded to the resolution of `ends_in`). now (Callable): Function returning the current time and date. Defaults to :func:`datetime.now(timezone.utc)`. Returns: ~datetime.timedelta: Remaining time. """ now = now or datetime.now(datetime_timezone.utc) if str( start.tzinfo) == str( now.tzinfo) and now.utcoffset() != start.utcoffset(): # DST started/ended start = start.replace(tzinfo=now.tzinfo) end_date = start + ends_in if relative: end_date = delta_resolution(end_date, ends_in).replace(microsecond=0) ret = end_date - now if C_REMDEBUG: # pragma: no cover print('rem: NOW:{!r} START:{!r} ENDS_IN:{!r} END_DATE:{} REM:{}'.format( now, start, ends_in, end_date, ret)) return ret
Convert rate string (`"100/m"`, `"2/h"` or `"0.5/s"`) to seconds.
def rate(r: str) -> float: """Convert rate string (`"100/m"`, `"2/h"` or `"0.5/s"`) to seconds.""" if r: if isinstance(r, str): ops, _, modifier = r.partition('/') return RATE_MODIFIER_MAP[modifier or 's'](float(ops)) or 0 return r or 0 return 0
Return the position of a weekday: 0 - 7, where 0 is Sunday. Example: >>> weekday('sunday'), weekday('sun'), weekday('mon') (0, 0, 1)
def weekday(name: str) -> int: """Return the position of a weekday: 0 - 7, where 0 is Sunday. Example: >>> weekday('sunday'), weekday('sun'), weekday('mon') (0, 0, 1) """ abbreviation = name[0:3].lower() try: return WEEKDAYS[abbreviation] except KeyError: # Show original day name in exception, instead of abbr. raise KeyError(name)
Show seconds in human form. For example, 60 becomes "1 minute", and 7200 becomes "2 hours". Arguments: prefix (str): can be used to add a preposition to the output (e.g., 'in' will give 'in 1 second', but add nothing to 'now'). now (str): Literal 'now'. microseconds (bool): Include microseconds.
def humanize_seconds( secs: int, prefix: str = '', sep: str = '', now: str = 'now', microseconds: bool = False) -> str: """Show seconds in human form. For example, 60 becomes "1 minute", and 7200 becomes "2 hours". Arguments: prefix (str): can be used to add a preposition to the output (e.g., 'in' will give 'in 1 second', but add nothing to 'now'). now (str): Literal 'now'. microseconds (bool): Include microseconds. """ secs = float(format(float(secs), '.2f')) for unit, divider, formatter in TIME_UNITS: if secs >= divider: w = secs / float(divider) return '{}{}{} {}'.format(prefix, sep, formatter(w), pluralize(w, unit)) if microseconds and secs > 0.0: return '{prefix}{sep}{0:.2f} seconds'.format( secs, sep=sep, prefix=prefix) return now
Either ``datetime | str -> datetime`` or ``None -> None``.
def maybe_iso8601(dt: datetime | str | None) -> None | datetime: """Either ``datetime | str -> datetime`` or ``None -> None``.""" if not dt: return if isinstance(dt, datetime): return dt return isoparse(dt)
Return True if :class:`~datetime.datetime` is naive, meaning it doesn't have timezone info set.
def is_naive(dt: datetime) -> bool: """Return True if :class:`~datetime.datetime` is naive, meaning it doesn't have timezone info set.""" return dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None
Helper function to determine if a timezone can detect ambiguous times using dateutil.
def _can_detect_ambiguous(tz: tzinfo) -> bool: """Helper function to determine if a timezone can detect ambiguous times using dateutil.""" return isinstance(tz, ZoneInfo) or hasattr(tz, "is_ambiguous")
Helper function to determine if a timezone is ambiguous using python's dateutil module. Returns False if the timezone cannot detect ambiguity, or if there is no ambiguity, otherwise True. In order to detect ambiguous datetimes, the timezone must be built using ZoneInfo, or have an is_ambiguous method. Previously, pytz timezones would throw an AmbiguousTimeError if the localized dt was ambiguous, but now we need to specifically check for ambiguity with dateutil, as pytz is deprecated.
def _is_ambigious(dt: datetime, tz: tzinfo) -> bool: """Helper function to determine if a timezone is ambiguous using python's dateutil module. Returns False if the timezone cannot detect ambiguity, or if there is no ambiguity, otherwise True. In order to detect ambiguous datetimes, the timezone must be built using ZoneInfo, or have an is_ambiguous method. Previously, pytz timezones would throw an AmbiguousTimeError if the localized dt was ambiguous, but now we need to specifically check for ambiguity with dateutil, as pytz is deprecated. """ return _can_detect_ambiguous(tz) and dateutil_tz.datetime_ambiguous(dt)
Set timezone for a :class:`~datetime.datetime` object.
def make_aware(dt: datetime, tz: tzinfo) -> datetime: """Set timezone for a :class:`~datetime.datetime` object.""" dt = dt.replace(tzinfo=tz) if _is_ambigious(dt, tz): dt = min(dt.replace(fold=0), dt.replace(fold=1)) return dt
Convert aware :class:`~datetime.datetime` to another timezone. Using a ZoneInfo timezone will give the most flexibility in terms of ambiguous DST handling.
def localize(dt: datetime, tz: tzinfo) -> datetime: """Convert aware :class:`~datetime.datetime` to another timezone. Using a ZoneInfo timezone will give the most flexibility in terms of ambiguous DST handling. """ if is_naive(dt): # Ensure timezone aware datetime dt = make_aware(dt, tz) if dt.tzinfo == ZoneInfo("UTC"): dt = dt.astimezone(tz) # Always safe to call astimezone on utc zones return dt
Convert naive :class:`~datetime.datetime` to UTC.
def to_utc(dt: datetime) -> datetime: """Convert naive :class:`~datetime.datetime` to UTC.""" return make_aware(dt, timezone.utc)
Convert dt to aware datetime, do nothing if dt is already aware.
def maybe_make_aware(dt: datetime, tz: tzinfo | None = None, naive_as_utc: bool = True) -> datetime: """Convert dt to aware datetime, do nothing if dt is already aware.""" if is_naive(dt): if naive_as_utc: dt = to_utc(dt) return localize( dt, timezone.utc if tz is None else timezone.tz_or_local(tz), ) return dt
Return the current offset to UTC in hours.
def utcoffset( time: ModuleType = _time, localtime: Callable[..., _time.struct_time] = _time.localtime) -> float: """Return the current offset to UTC in hours.""" if localtime().tm_isdst: return time.altzone // 3600 return time.timezone // 3600
Adjust timestamp based on provided utcoffset.
def adjust_timestamp(ts: float, offset: int, here: Callable[..., float] = utcoffset) -> float: """Adjust timestamp based on provided utcoffset.""" return ts - (offset - here()) * 3600
Calculate the exponential backoff wait time.
def get_exponential_backoff_interval( factor: int, retries: int, maximum: int, full_jitter: bool = False ) -> int: """Calculate the exponential backoff wait time.""" # Will be zero if factor equals 0 countdown = min(maximum, factor * (2 ** retries)) # Full jitter according to # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ if full_jitter: countdown = random.randrange(countdown + 1) # Adjust according to maximum wait time and account for negative values. return max(0, countdown)
Get weakref constructor appropriate for `obj`. `obj` may be a bound method. Bound method objects must be special-cased because they're usually garbage collected immediately, even if the instance they're bound to persists. Returns: a (weakref constructor, main object) tuple. `weakref constructor` is either :class:`weakref.ref` or :class:`weakref.WeakMethod`. `main object` is the instance that `obj` is bound to if it is a bound method; otherwise `main object` is simply `obj.
def _boundmethod_safe_weakref(obj): """Get weakref constructor appropriate for `obj`. `obj` may be a bound method. Bound method objects must be special-cased because they're usually garbage collected immediately, even if the instance they're bound to persists. Returns: a (weakref constructor, main object) tuple. `weakref constructor` is either :class:`weakref.ref` or :class:`weakref.WeakMethod`. `main object` is the instance that `obj` is bound to if it is a bound method; otherwise `main object` is simply `obj. """ try: obj.__func__ obj.__self__ # Bound method return WeakMethod, obj.__self__ except AttributeError: # Not a bound method return weakref.ref, obj
Get filename for static file.
def get_file(*args): # type: (*str) -> str """Get filename for static file.""" return os.path.join(os.path.abspath(os.path.dirname(__file__)), *args)
Celery logo image.
def logo(): # type: () -> bytes """Celery logo image.""" return get_file('celery_128.png')
Information about Celery installation for bug reports.
def report(state): """Information about Celery installation for bug reports.""" return ok(state.app.bugreport())
List configuration.
def conf(state, with_defaults=False, **kwargs): """List configuration.""" return jsonify(state.app.conf.table(with_defaults=with_defaults), keyfilter=_wanted_config_key, unknown_type_filter=safe_repr)
Query for task information by id.
def query_task(state, ids, **kwargs): """Query for task information by id.""" return { req.id: (_state_of_task(req), req.info()) for req in _find_requests_by_id(maybe_list(ids)) }
Revoke task by task id (or list of ids). Keyword Arguments: terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``).
def revoke(state, task_id, terminate=False, signal=None, **kwargs): """Revoke task by task id (or list of ids). Keyword Arguments: terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``). """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 task_ids, task_id = set(maybe_list(task_id) or []), None task_ids = _revoke(state, task_ids, terminate, signal, **kwargs) if isinstance(task_ids, dict) and 'ok' in task_ids: return task_ids return ok(f'tasks {task_ids} flagged as revoked')
Revoke task by header (or list of headers). Keyword Arguments: headers(dictionary): Dictionary that contains stamping scheme name as keys and stamps as values. If headers is a list, it will be converted to a dictionary. terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``). Sample headers input: {'mtask_id': [id1, id2, id3]}
def revoke_by_stamped_headers(state, headers, terminate=False, signal=None, **kwargs): """Revoke task by header (or list of headers). Keyword Arguments: headers(dictionary): Dictionary that contains stamping scheme name as keys and stamps as values. If headers is a list, it will be converted to a dictionary. terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``). Sample headers input: {'mtask_id': [id1, id2, id3]} """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 signum = _signals.signum(signal or TERM_SIGNAME) if isinstance(headers, list): headers = {h.split('=')[0]: h.split('=')[1] for h in headers} for header, stamps in headers.items(): updated_stamps = maybe_list(worker_state.revoked_stamps.get(header) or []) + list(maybe_list(stamps)) worker_state.revoked_stamps[header] = updated_stamps if not terminate: return ok(f'headers {headers} flagged as revoked, but not terminated') active_requests = list(worker_state.active_requests) terminated_scheme_to_stamps_mapping = defaultdict(set) # Terminate all running tasks of matching headers # Go through all active requests, and check if one of the # requests has a stamped header that matches the given headers to revoke for req in active_requests: # Check stamps exist if hasattr(req, "stamps") and req.stamps: # if so, check if any stamps match a revoked stamp for expected_header_key, expected_header_value in headers.items(): if expected_header_key in req.stamps: expected_header_value = maybe_list(expected_header_value) actual_header = maybe_list(req.stamps[expected_header_key]) matching_stamps_for_request = set(actual_header) & set(expected_header_value) # Check any possible match regardless if the stamps are a sequence or not if matching_stamps_for_request: terminated_scheme_to_stamps_mapping[expected_header_key].update(matching_stamps_for_request) req.terminate(state.consumer.pool, signal=signum) if not terminated_scheme_to_stamps_mapping: return ok(f'headers {headers} were not terminated') return ok(f'headers {terminated_scheme_to_stamps_mapping} revoked')
Terminate task by task id (or list of ids).
def terminate(state, signal, task_id, **kwargs): """Terminate task by task id (or list of ids).""" return revoke(state, task_id, terminate=True, signal=signal)
Tell worker(s) to modify the rate limit for a task by type. See Also: :attr:`celery.app.task.Task.rate_limit`. Arguments: task_name (str): Type of task to set rate limit for. rate_limit (int, str): New rate limit.
def rate_limit(state, task_name, rate_limit, **kwargs): """Tell worker(s) to modify the rate limit for a task by type. See Also: :attr:`celery.app.task.Task.rate_limit`. Arguments: task_name (str): Type of task to set rate limit for. rate_limit (int, str): New rate limit. """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. try: rate(rate_limit) except ValueError as exc: return nok(f'Invalid rate limit string: {exc!r}') try: state.app.tasks[task_name].rate_limit = rate_limit except KeyError: logger.error('Rate limit attempt for unknown task %s', task_name, exc_info=True) return nok('unknown task') state.consumer.reset_rate_limits() if not rate_limit: logger.info('Rate limits disabled for tasks of type %s', task_name) return ok('rate limit disabled successfully') logger.info('New rate limit for tasks of type %s: %s.', task_name, rate_limit) return ok('new rate limit set successfully')
Tell worker(s) to modify the time limit for task by type. Arguments: task_name (str): Name of task to change. hard (float): Hard time limit. soft (float): Soft time limit.
def time_limit(state, task_name=None, hard=None, soft=None, **kwargs): """Tell worker(s) to modify the time limit for task by type. Arguments: task_name (str): Name of task to change. hard (float): Hard time limit. soft (float): Soft time limit. """ try: task = state.app.tasks[task_name] except KeyError: logger.error('Change time limit attempt for unknown task %s', task_name, exc_info=True) return nok('unknown task') task.soft_time_limit = soft task.time_limit = hard logger.info('New time limits for tasks of type %s: soft=%s hard=%s', task_name, soft, hard) return ok('time limits set successfully')
Get current logical clock value.
def clock(state, **kwargs): """Get current logical clock value.""" return {'clock': state.app.clock.value}
Hold election. Arguments: id (str): Unique election id. topic (str): Election topic. action (str): Action to take for elected actor.
def election(state, id, topic, action=None, **kwargs): """Hold election. Arguments: id (str): Unique election id. topic (str): Election topic. action (str): Action to take for elected actor. """ if state.consumer.gossip: state.consumer.gossip.election(id, topic, action)
Tell worker(s) to send task-related events.
def enable_events(state): """Tell worker(s) to send task-related events.""" dispatcher = state.consumer.event_dispatcher if dispatcher.groups and 'task' not in dispatcher.groups: dispatcher.groups.add('task') logger.info('Events of group {task} enabled by remote.') return ok('task events enabled') return ok('task events already enabled')
Tell worker(s) to stop sending task-related events.
def disable_events(state): """Tell worker(s) to stop sending task-related events.""" dispatcher = state.consumer.event_dispatcher if 'task' in dispatcher.groups: dispatcher.groups.discard('task') logger.info('Events of group {task} disabled by remote.') return ok('task events disabled') return ok('task events already disabled')
Tell worker(s) to send event heartbeat immediately.
def heartbeat(state): """Tell worker(s) to send event heartbeat immediately.""" logger.debug('Heartbeat requested by remote.') dispatcher = state.consumer.event_dispatcher dispatcher.send('worker-heartbeat', freq=5, **worker_state.SOFTWARE_INFO)
Request mingle sync-data.
def hello(state, from_node, revoked=None, **kwargs): """Request mingle sync-data.""" # pylint: disable=redefined-outer-name # XXX Note that this redefines `revoked`: # Outside of this scope that is a function. if from_node != state.hostname: logger.info('sync with %s', from_node) if revoked: worker_state.revoked.update(revoked) # Do not send expired items to the other worker. worker_state.revoked.purge() return { 'revoked': worker_state.revoked._data, 'clock': state.app.clock.forward(), }
Ping worker(s).
def ping(state, **kwargs): """Ping worker(s).""" return ok('pong')
Request worker statistics/information.
def stats(state, **kwargs): """Request worker statistics/information.""" return state.consumer.controller.stats()
List of currently scheduled ETA/countdown tasks.
def scheduled(state, **kwargs): """List of currently scheduled ETA/countdown tasks.""" return list(_iter_schedule_requests(state.consumer.timer))
List of currently reserved tasks, not including scheduled/active.
def reserved(state, **kwargs): """List of currently reserved tasks, not including scheduled/active.""" reserved_tasks = ( state.tset(worker_state.reserved_requests) - state.tset(worker_state.active_requests) ) if not reserved_tasks: return [] return [request.info() for request in reserved_tasks]
List of tasks currently being executed.
def active(state, safe=False, **kwargs): """List of tasks currently being executed.""" return [request.info(safe=safe) for request in state.tset(worker_state.active_requests)]
List of revoked task-ids.
def revoked(state, **kwargs): """List of revoked task-ids.""" return list(worker_state.revoked)
List of registered tasks. Arguments: taskinfoitems (Sequence[str]): List of task attributes to include. Defaults to ``exchange,routing_key,rate_limit``. builtins (bool): Also include built-in tasks.
def registered(state, taskinfoitems=None, builtins=False, **kwargs): """List of registered tasks. Arguments: taskinfoitems (Sequence[str]): List of task attributes to include. Defaults to ``exchange,routing_key,rate_limit``. builtins (bool): Also include built-in tasks. """ reg = state.app.tasks taskinfoitems = taskinfoitems or DEFAULT_TASK_INFO_ITEMS tasks = reg if builtins else ( task for task in reg if not task.startswith('celery.')) def _extract_info(task): fields = { field: str(getattr(task, field, None)) for field in taskinfoitems if getattr(task, field, None) is not None } if fields: info = ['='.join(f) for f in fields.items()] return '{} [{}]'.format(task.name, ' '.join(info)) return task.name return [_extract_info(reg[task]) for task in sorted(tasks)]
Create graph of uncollected objects (memory-leak debugging). Arguments: num (int): Max number of objects to graph. max_depth (int): Traverse at most n levels deep. type (str): Name of object to graph. Default is ``"Request"``.
def objgraph(state, num=200, max_depth=10, type='Request'): # pragma: no cover """Create graph of uncollected objects (memory-leak debugging). Arguments: num (int): Max number of objects to graph. max_depth (int): Traverse at most n levels deep. type (str): Name of object to graph. Default is ``"Request"``. """ try: import objgraph as _objgraph except ImportError: raise ImportError('Requires the objgraph library') logger.info('Dumping graph for type %r', type) with tempfile.NamedTemporaryFile(prefix='cobjg', suffix='.png', delete=False) as fh: objects = _objgraph.by_type(type)[:num] _objgraph.show_backrefs( objects, max_depth=max_depth, highlight=lambda v: v in objects, filename=fh.name, ) return {'filename': fh.name}
Sample current RSS memory usage.
def memsample(state, **kwargs): """Sample current RSS memory usage.""" from celery.utils.debug import sample_mem return sample_mem()
Dump statistics of previous memsample requests.
def memdump(state, samples=10, **kwargs): # pragma: no cover """Dump statistics of previous memsample requests.""" from celery.utils import debug out = io.StringIO() debug.memdump(file=out) return out.getvalue()
Grow pool by n processes/threads.
def pool_grow(state, n=1, **kwargs): """Grow pool by n processes/threads.""" if state.consumer.controller.autoscaler: return nok("pool_grow is not supported with autoscale. Adjust autoscale range instead.") else: state.consumer.pool.grow(n) state.consumer._update_prefetch_count(n) return ok('pool will grow')
Shrink pool by n processes/threads.
def pool_shrink(state, n=1, **kwargs): """Shrink pool by n processes/threads.""" if state.consumer.controller.autoscaler: return nok("pool_shrink is not supported with autoscale. Adjust autoscale range instead.") else: state.consumer.pool.shrink(n) state.consumer._update_prefetch_count(-n) return ok('pool will shrink')
Restart execution pool.
def pool_restart(state, modules=None, reload=False, reloader=None, **kwargs): """Restart execution pool.""" if state.app.conf.worker_pool_restarts: state.consumer.controller.reload(modules, reload, reloader=reloader) return ok('reload started') else: raise ValueError('Pool restarts not enabled')
Modify autoscale settings.
def autoscale(state, max=None, min=None): """Modify autoscale settings.""" autoscaler = state.consumer.controller.autoscaler if autoscaler: max_, min_ = autoscaler.update(max, min) return ok(f'autoscale now max={max_} min={min_}') raise ValueError('Autoscale not enabled')
Shutdown worker(s).
def shutdown(state, msg='Got shutdown from remote', **kwargs): """Shutdown worker(s).""" logger.warning(msg) raise WorkerShutdown(0)
Tell worker(s) to consume from task queue by name.
def add_consumer(state, queue, exchange=None, exchange_type=None, routing_key=None, **options): """Tell worker(s) to consume from task queue by name.""" state.consumer.call_soon( state.consumer.add_task_queue, queue, exchange, exchange_type or 'direct', routing_key, **options) return ok(f'add consumer {queue}')
Tell worker(s) to stop consuming from task queue by name.
def cancel_consumer(state, queue, **_): """Tell worker(s) to stop consuming from task queue by name.""" state.consumer.call_soon( state.consumer.cancel_task_queue, queue, ) return ok(f'no longer consuming from {queue}')
List the task queues a worker is currently consuming from.
def active_queues(state): """List the task queues a worker is currently consuming from.""" if state.consumer.task_consumer: return [dict(queue.as_dict(recurse=True)) for queue in state.consumer.task_consumer.queues] return []
Non-blocking event loop.
def asynloop(obj, connection, consumer, blueprint, hub, qos, heartbeat, clock, hbrate=2.0): """Non-blocking event loop.""" RUN = bootsteps.RUN update_qos = qos.update errors = connection.connection_errors on_task_received = obj.create_task_handler() heartbeat_error = _enable_amqheartbeats(hub.timer, connection, rate=hbrate) consumer.on_message = on_task_received obj.controller.register_with_event_loop(hub) obj.register_with_event_loop(hub) consumer.consume() obj.on_ready() # did_start_ok will verify that pool processes were able to start, # but this will only work the first time we start, as # maxtasksperchild will mess up metrics. if not obj.restart_count and not obj.pool.did_start_ok(): raise WorkerLostError('Could not start worker processes') # consumer.consume() may have prefetched up to our # limit - drain an event so we're in a clean state # prior to starting our event loop. if connection.transport.driver_type == 'amqp': hub.call_soon(_quick_drain, connection) # FIXME: Use loop.run_forever # Tried and works, but no time to test properly before release. hub.propagate_errors = errors loop = hub.create_loop() try: while blueprint.state == RUN and obj.connection: state.maybe_shutdown() if heartbeat_error[0] is not None: raise heartbeat_error[0] # We only update QoS when there's no more messages to read. # This groups together qos calls, and makes sure that remote # control commands will be prioritized over task messages. if qos.prev != qos.value: update_qos() try: next(loop) except StopIteration: loop = hub.create_loop() finally: try: hub.reset() except Exception as exc: # pylint: disable=broad-except logger.exception( 'Error cleaning up after event loop: %r', exc)
Fallback blocking event loop for transports that doesn't support AIO.
def synloop(obj, connection, consumer, blueprint, hub, qos, heartbeat, clock, hbrate=2.0, **kwargs): """Fallback blocking event loop for transports that doesn't support AIO.""" RUN = bootsteps.RUN on_task_received = obj.create_task_handler() perform_pending_operations = obj.perform_pending_operations heartbeat_error = [None] if getattr(obj.pool, 'is_green', False): heartbeat_error = _enable_amqheartbeats(obj.timer, connection, rate=hbrate) consumer.on_message = on_task_received consumer.consume() obj.on_ready() while blueprint.state == RUN and obj.connection: state.maybe_shutdown() if heartbeat_error[0] is not None: raise heartbeat_error[0] if qos.prev != qos.value: qos.update() try: perform_pending_operations() connection.drain_events(timeout=2.0) except socket.timeout: pass except OSError: if blueprint.state == RUN: raise
Shutdown if flags have been set.
def maybe_shutdown(): """Shutdown if flags have been set.""" if should_terminate is not None and should_terminate is not False: raise WorkerTerminate(should_terminate) elif should_stop is not None and should_stop is not False: raise WorkerShutdown(should_stop)
Update global state when a task has been reserved.
def task_reserved(request, add_request=requests.__setitem__, add_reserved_request=reserved_requests.add): """Update global state when a task has been reserved.""" add_request(request.id, request) add_reserved_request(request)
Update global state when a task has been accepted.
def task_accepted(request, _all_total_count=None, add_request=requests.__setitem__, add_active_request=active_requests.add, add_to_total_count=total_count.update): """Update global state when a task has been accepted.""" if not _all_total_count: _all_total_count = all_total_count add_request(request.id, request) add_active_request(request) add_to_total_count({request.name: 1}) all_total_count[0] += 1
Update global state when a task is ready.
def task_ready(request, successful=False, remove_request=requests.pop, discard_active_request=active_requests.discard, discard_reserved_request=reserved_requests.discard): """Update global state when a task is ready.""" if successful: successful_requests.add(request.id) remove_request(request.id, None) discard_active_request(request) discard_reserved_request(request)
Create a fresh protocol 2 message from a hybrid protocol 1/2 message.
def hybrid_to_proto2(message, body): """Create a fresh protocol 2 message from a hybrid protocol 1/2 message.""" try: args, kwargs = body.get('args', ()), body.get('kwargs', {}) kwargs.items # pylint: disable=pointless-statement except KeyError: raise InvalidTaskError('Message does not have args/kwargs') except AttributeError: raise InvalidTaskError( 'Task keyword arguments must be a mapping', ) headers = { 'lang': body.get('lang'), 'task': body.get('task'), 'id': body.get('id'), 'root_id': body.get('root_id'), 'parent_id': body.get('parent_id'), 'group': body.get('group'), 'meth': body.get('meth'), 'shadow': body.get('shadow'), 'eta': body.get('eta'), 'expires': body.get('expires'), 'retries': body.get('retries', 0), 'timelimit': body.get('timelimit', (None, None)), 'argsrepr': body.get('argsrepr'), 'kwargsrepr': body.get('kwargsrepr'), 'origin': body.get('origin'), } headers.update(message.headers or {}) embed = { 'callbacks': body.get('callbacks'), 'errbacks': body.get('errbacks'), 'chord': body.get('chord'), 'chain': None, } return (args, kwargs, embed), headers, True, body.get('utc', True)
Convert Task message protocol 1 arguments to protocol 2. Returns: Tuple: of ``(body, headers, already_decoded_status, utc)``
def proto1_to_proto2(message, body): """Convert Task message protocol 1 arguments to protocol 2. Returns: Tuple: of ``(body, headers, already_decoded_status, utc)`` """ try: args, kwargs = body.get('args', ()), body.get('kwargs', {}) kwargs.items # pylint: disable=pointless-statement except KeyError: raise InvalidTaskError('Message does not have args/kwargs') except AttributeError: raise InvalidTaskError( 'Task keyword arguments must be a mapping', ) body.update( argsrepr=saferepr(args), kwargsrepr=saferepr(kwargs), headers=message.headers, ) try: body['group'] = body['taskset'] except KeyError: pass embed = { 'callbacks': body.get('callbacks'), 'errbacks': body.get('errbacks'), 'chord': body.get('chord'), 'chain': None, } return (args, kwargs, embed), body, True, body.get('utc', True)
Default task execution strategy. Note: Strategies are here as an optimization, so sadly it's not very easy to override.
def default(task, app, consumer, info=logger.info, error=logger.error, task_reserved=task_reserved, to_system_tz=timezone.to_system, bytes=bytes, proto1_to_proto2=proto1_to_proto2): """Default task execution strategy. Note: Strategies are here as an optimization, so sadly it's not very easy to override. """ hostname = consumer.hostname connection_errors = consumer.connection_errors _does_info = logger.isEnabledFor(logging.INFO) # task event related # (optimized to avoid calling request.send_event) eventer = consumer.event_dispatcher events = eventer and eventer.enabled send_event = eventer and eventer.send task_sends_events = events and task.send_events call_at = consumer.timer.call_at apply_eta_task = consumer.apply_eta_task rate_limits_enabled = not consumer.disable_rate_limits get_bucket = consumer.task_buckets.__getitem__ handle = consumer.on_task_request limit_task = consumer._limit_task limit_post_eta = consumer._limit_post_eta Request = symbol_by_name(task.Request) Req = create_request_cls(Request, task, consumer.pool, hostname, eventer, app=app) revoked_tasks = consumer.controller.state.revoked def task_message_handler(message, body, ack, reject, callbacks, to_timestamp=to_timestamp): if body is None and 'args' not in message.payload: body, headers, decoded, utc = ( message.body, message.headers, False, app.uses_utc_timezone(), ) else: if 'args' in message.payload: body, headers, decoded, utc = hybrid_to_proto2(message, message.payload) else: body, headers, decoded, utc = proto1_to_proto2(message, body) req = Req( message, on_ack=ack, on_reject=reject, app=app, hostname=hostname, eventer=eventer, task=task, connection_errors=connection_errors, body=body, headers=headers, decoded=decoded, utc=utc, ) if _does_info: # Similar to `app.trace.info()`, we pass the formatting args as the # `extra` kwarg for custom log handlers context = { 'id': req.id, 'name': req.name, 'args': req.argsrepr, 'kwargs': req.kwargsrepr, 'eta': req.eta, } info(_app_trace.LOG_RECEIVED, context, extra={'data': context}) if (req.expires or req.id in revoked_tasks) and req.revoked(): return signals.task_received.send(sender=consumer, request=req) if task_sends_events: send_event( 'task-received', uuid=req.id, name=req.name, args=req.argsrepr, kwargs=req.kwargsrepr, root_id=req.root_id, parent_id=req.parent_id, retries=req.request_dict.get('retries', 0), eta=req.eta and req.eta.isoformat(), expires=req.expires and req.expires.isoformat(), ) bucket = None eta = None if req.eta: try: if req.utc: eta = to_timestamp(to_system_tz(req.eta)) else: eta = to_timestamp(req.eta, app.timezone) except (OverflowError, ValueError) as exc: error("Couldn't convert ETA %r to timestamp: %r. Task: %r", req.eta, exc, req.info(safe=True), exc_info=True) req.reject(requeue=False) if rate_limits_enabled: bucket = get_bucket(task.name) if eta and bucket: consumer.qos.increment_eventually() return call_at(eta, limit_post_eta, (req, bucket, 1), priority=6) if eta: consumer.qos.increment_eventually() call_at(eta, apply_eta_task, (req,), priority=6) return task_message_handler if bucket: return limit_task(req, bucket, 1) task_reserved(req) if callbacks: [callback(req) for callback in callbacks] handle(req) return task_message_handler
Format message body for debugging purposes.
def dump_body(m, body): """Format message body for debugging purposes.""" # v2 protocol does not deserialize body body = m.body if body is None else body return '{} ({}b)'.format(truncate(safe_repr(body), 1024), len(m.body))
Return the domain part of a URL.
def domain(url): """Return the domain part of a URL.""" return urlsplit(url)[1].split(':')[0]
Creates a canvas to calculate: n * sum(1..n) * 10 For example, if n = 3, the result is 3 * (1 + 2 + 3) * 10 = 180
def create_canvas(n: int) -> Signature: """Creates a canvas to calculate: n * sum(1..n) * 10 For example, if n = 3, the result is 3 * (1 + 2 + 3) * 10 = 180 """ canvas = chain( group(identity_task.s(i) for i in range(1, n+1)) | xsum.s(), chord(group(mul.s(10) for _ in range(1, n+1)), xsum.s()), ) return canvas
Revokes the last task in the workflow by its stamped header Arguments: result (AsyncResult): Can be either a frozen or a running result terminate (bool): If True, the revoked task will be terminated
def revoke_by_headers(result: AsyncResult, terminate: bool) -> None: """Revokes the last task in the workflow by its stamped header Arguments: result (AsyncResult): Can be either a frozen or a running result terminate (bool): If True, the revoked task will be terminated """ result.revoke_by_stamped_headers({'mystamp': 'I am a stamp!'}, terminate=terminate)
Creates a canvas that waits "n * sum(1..n) * 10" in seconds, with n = 3. The canvas itself is stamped with a unique monitoring id stamp per task. The waiting task is stamped with different consistent stamp, which is used to revoke the task by its stamped header.
def prepare_workflow() -> Signature: """Creates a canvas that waits "n * sum(1..n) * 10" in seconds, with n = 3. The canvas itself is stamped with a unique monitoring id stamp per task. The waiting task is stamped with different consistent stamp, which is used to revoke the task by its stamped header. """ canvas = create_canvas(n=3) canvas = canvas | wait_for_revoke.s() canvas.stamp(MonitoringIdStampingVisitor()) return canvas
Runs the workflow and lets the waiting task run for a while. Then, the waiting task is revoked by its stamped header. The expected outcome is that the canvas will be calculated to the end, but the waiting task will be revoked and terminated *during its run*. See worker logs for more details.
def run_then_revoke(): """Runs the workflow and lets the waiting task run for a while. Then, the waiting task is revoked by its stamped header. The expected outcome is that the canvas will be calculated to the end, but the waiting task will be revoked and terminated *during its run*. See worker logs for more details. """ canvas = prepare_workflow() result = canvas.delay() print('Wait 5 seconds, then revoke the last task by its stamped header: "mystamp": "I am a stamp!"') sleep(5) print('Revoking the last task...') revoke_by_headers(result, terminate=True)
Revokes the waiting task by its stamped header before it runs. Then, run the workflow, which will not run the waiting task that was revoked. The expected outcome is that the canvas will be calculated to the end, but the waiting task will not run at all. See worker logs for more details.
def revoke_then_run(): """Revokes the waiting task by its stamped header before it runs. Then, run the workflow, which will not run the waiting task that was revoked. The expected outcome is that the canvas will be calculated to the end, but the waiting task will not run at all. See worker logs for more details. """ canvas = prepare_workflow() result = canvas.freeze() revoke_by_headers(result, terminate=False) result = canvas.delay()
Identity function
def identity_task(self, x): """Identity function""" log_demo(self) return x
Multiply two numbers
def mul(x: int, y: int) -> int: """Multiply two numbers""" return x * y
Sum a list of numbers
def xsum(numbers: list) -> int: """Sum a list of numbers""" return sum(numbers)
Wait for "seconds" seconds, ticking every second.
def waitfor(seconds: int) -> None: """Wait for "seconds" seconds, ticking every second.""" print(f"Waiting for {seconds} seconds...") for i in range(seconds): sleep(1) print(f"{i+1} seconds passed")
Replace this task with a new task that waits for "seconds" seconds.
def wait_for_revoke(self: StampOnReplace, seconds: int) -> None: """Replace this task with a new task that waits for "seconds" seconds.""" self.replace(waitfor.s(seconds))
Return the argument.
def identity(x): """Return the argument.""" return x
Add two or three numbers.
def add(x, y, z=None): """Add two or three numbers.""" if z: return x + y + z else: return x + y
Multiply two numbers
def mul(x: int, y: int) -> int: """Multiply two numbers""" return x * y
Add two numbers, but don't check arguments
def add_not_typed(x, y): """Add two numbers, but don't check arguments""" return x + y
Add two numbers.
def add_ignore_result(x, y): """Add two numbers.""" return x + y
Deliberately raise an error.
def raise_error(*args): """Deliberately raise an error.""" raise ValueError("deliberate error")
Sum the iterable of numbers.
def delayed_sum(numbers, pause_time=1): """Sum the iterable of numbers.""" # Allow the task to be in STARTED state for # a limited period of time. sleep(pause_time) return sum(numbers)
Sum the iterable of numbers.
def delayed_sum_with_soft_guard(numbers, pause_time=1): """Sum the iterable of numbers.""" try: sleep(pause_time) return sum(numbers) except SoftTimeLimitExceeded: return 0
Sum an iterable of numbers.
def tsum(nums): """Sum an iterable of numbers.""" return sum(nums)
Sum of ints and lists.
def xsum(nums): """Sum of ints and lists.""" return sum(sum(num) if isinstance(num, Iterable) else num for num in nums)
Add two numbers (via the add task).
def add_replaced(self, x, y): """Add two numbers (via the add task).""" raise self.replace(add.s(x, y))
Add the given value to all supplied numbers.
def add_to_all(self, nums, val): """Add the given value to all supplied numbers.""" subtasks = [add.s(num, val) for num in nums] raise self.replace(group(*subtasks))
Task that both logs and print strings containing funny characters.
def print_unicode(log_message='hå它 valmuefrø', print_message='hiöäüß'): """Task that both logs and print strings containing funny characters.""" logger.warning(log_message) print(print_message)
Return a tuple containing the exception message and sentinel value.
def return_exception(e): """Return a tuple containing the exception message and sentinel value.""" return e, True
Task sleeping for ``i`` seconds, and returning nothing.
def sleeping(i, **_): """Task sleeping for ``i`` seconds, and returning nothing.""" sleep(i)
Returns a tuple of ``root_id``, ``parent_id`` and the argument passed as ``i``.
def ids(self, i): """Returns a tuple of ``root_id``, ``parent_id`` and the argument passed as ``i``.""" return self.request.root_id, self.request.parent_id, i
Used as a callback in a chain or group where the previous tasks are :task:`ids`: returns a tuple of:: (previous_result, (root_id, parent_id, i))
def collect_ids(self, res, i): """Used as a callback in a chain or group where the previous tasks are :task:`ids`: returns a tuple of:: (previous_result, (root_id, parent_id, i)) """ return res, (self.request.root_id, self.request.parent_id, i)
Task simulating multiple retries. When return_value is provided, the task after retries returns the result. Otherwise it fails.
def retry(self, return_value=None): """Task simulating multiple retries. When return_value is provided, the task after retries returns the result. Otherwise it fails. """ if return_value: attempt = getattr(self, 'attempt', 0) print('attempt', attempt) if attempt >= 3: delattr(self, 'attempt') return return_value self.attempt = attempt + 1 raise self.retry(exc=ExpectedException(), countdown=5)
Task that fails with an unpickleable exception and is retried.
def retry_unpickleable(self, foo, bar, *, retry_kwargs): """Task that fails with an unpickleable exception and is retried.""" raise self.retry(exc=UnpickleableException(foo, bar), **retry_kwargs)
Task that fails and is retried. Returns the number of retries.
def retry_once(self, *args, expires=None, max_retries=1, countdown=0.1): """Task that fails and is retried. Returns the number of retries.""" if self.request.retries: return self.request.retries raise self.retry(countdown=countdown, expires=expires, max_retries=max_retries)
Task that fails and is retried. Returns the priority.
def retry_once_priority(self, *args, expires=60.0, max_retries=1, countdown=0.1): """Task that fails and is retried. Returns the priority.""" if self.request.retries: return self.request.delivery_info['priority'] raise self.retry(countdown=countdown, max_retries=max_retries)
Task that fails and is retried. Returns headers.
def retry_once_headers(self, *args, max_retries=1, countdown=0.1): """Task that fails and is retried. Returns headers.""" if self.request.retries: return self.request.headers raise self.retry(countdown=countdown, max_retries=max_retries)
Task that appends the message to a redis list.
def redis_echo(message, redis_key="redis-echo"): """Task that appends the message to a redis list.""" redis_connection = get_redis_connection() redis_connection.rpush(redis_key, message)
Task that increments a specified or well-known redis key.
def redis_count(redis_key="redis-count"): """Task that increments a specified or well-known redis key.""" redis_connection = get_redis_connection() redis_connection.incr(redis_key)
Task to build a chain. This task builds a chain and returns the chain's AsyncResult to verify that Asyncresults are correctly converted into serializable objects
def build_chain_inside_task(self): """Task to build a chain. This task builds a chain and returns the chain's AsyncResult to verify that Asyncresults are correctly converted into serializable objects""" test_chain = ( add.s(1, 1) | add.s(2) | group( add.s(3), add.s(4) ) | add.s(5) ) result = test_chain() return result
Task that simply raises ExpectedException.
def fail(*args): """Task that simply raises ExpectedException.""" args = ("Task expected to fail",) + args raise ExpectedException(*args)
Task that raises an unpickleable exception.
def fail_unpickleable(foo, bar): """Task that raises an unpickleable exception.""" raise UnpickleableException(foo, bar)
Replace this task with one which raises ExpectedException.
def fail_replaced(self, *args): """Replace this task with one which raises ExpectedException.""" raise self.replace(fail.si(*args))
Helper to wait for a specified or well-known redis key to contain a string.
def await_redis_echo(expected_msgs, redis_key="redis-echo", timeout=TIMEOUT): """ Helper to wait for a specified or well-known redis key to contain a string. """ redis_connection = get_redis_connection() if isinstance(expected_msgs, (str, bytes, bytearray)): expected_msgs = (expected_msgs,) expected_msgs = collections.Counter( e if not isinstance(e, str) else e.encode("utf-8") for e in expected_msgs ) # This can technically wait for `len(expected_msg_or_msgs) * timeout` :/ while +expected_msgs: maybe_key_msg = redis_connection.blpop(redis_key, timeout) if maybe_key_msg is None: raise TimeoutError( "Fetching from {!r} timed out - still awaiting {!r}" .format(redis_key, dict(+expected_msgs)) ) retrieved_key, msg = maybe_key_msg assert retrieved_key.decode("utf-8") == redis_key expected_msgs[msg] -= 1 # silently accepts unexpected messages # There should be no more elements - block momentarily assert redis_connection.blpop(redis_key, min(1, timeout)) is None