response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Task used by Task.replace when replacing task with group. | def add_accumulate_task(app):
"""Task used by Task.replace when replacing task with group."""
@app.task(bind=True, name='celery.accumulate', shared=False, lazy=False)
def accumulate(self, *args, **kwargs):
index = kwargs.get('index')
return args[index] if index is not None else args
return accumulate |
Task used by result backends without native chord support.
Will joins chord by creating a task chain polling the header
for completion. | def add_unlock_chord_task(app):
"""Task used by result backends without native chord support.
Will joins chord by creating a task chain polling the header
for completion.
"""
from celery.canvas import maybe_signature
from celery.exceptions import ChordError
from celery.result import allow_join_result, result_from_tuple
@app.task(name='celery.chord_unlock', max_retries=None, shared=False,
default_retry_delay=app.conf.result_chord_retry_interval, ignore_result=True, lazy=False, bind=True)
def unlock_chord(self, group_id, callback, interval=None,
max_retries=None, result=None,
Result=app.AsyncResult, GroupResult=app.GroupResult,
result_from_tuple=result_from_tuple, **kwargs):
if interval is None:
interval = self.default_retry_delay
# check if the task group is ready, and if so apply the callback.
callback = maybe_signature(callback, app)
deps = GroupResult(
group_id,
[result_from_tuple(r, app=app) for r in result],
app=app,
)
j = deps.join_native if deps.supports_native_join else deps.join
try:
ready = deps.ready()
except Exception as exc:
raise self.retry(
exc=exc, countdown=interval, max_retries=max_retries,
)
else:
if not ready:
raise self.retry(countdown=interval, max_retries=max_retries)
callback = maybe_signature(callback, app=app)
try:
with allow_join_result():
ret = j(
timeout=app.conf.result_chord_join_timeout,
propagate=True,
)
except Exception as exc: # pylint: disable=broad-except
try:
culprit = next(deps._failed_join_report())
reason = f'Dependency {culprit.id} raised {exc!r}'
except StopIteration:
reason = repr(exc)
logger.exception('Chord %r raised: %r', group_id, exc)
app.backend.chord_error_from_stack(callback, ChordError(reason))
else:
try:
callback.delay(ret)
except Exception as exc: # pylint: disable=broad-except
logger.exception('Chord %r raised: %r', group_id, exc)
app.backend.chord_error_from_stack(
callback,
exc=ChordError(f'Callback error: {exc!r}'),
)
return unlock_chord |
No longer used, but here for backwards compatibility. | def add_group_task(app):
"""No longer used, but here for backwards compatibility."""
from celery.canvas import maybe_signature
from celery.result import result_from_tuple
@app.task(name='celery.group', bind=True, shared=False, lazy=False)
def group(self, tasks, result, group_id, partial_args, add_to_parent=True):
app = self.app
result = result_from_tuple(result, app)
# any partial args are added to all tasks in the group
taskit = (maybe_signature(task, app=app).clone(partial_args)
for i, task in enumerate(tasks))
with app.producer_or_acquire() as producer:
[stask.apply_async(group_id=group_id, producer=producer,
add_to_parent=False) for stask in taskit]
parent = app.current_worker_task
if add_to_parent and parent:
parent.add_trail(result)
return result
return group |
No longer used, but here for backwards compatibility. | def add_chain_task(app):
"""No longer used, but here for backwards compatibility."""
@app.task(name='celery.chain', shared=False, lazy=False)
def chain(*args, **kwargs):
raise NotImplementedError('chain is not a real task')
return chain |
No longer used, but here for backwards compatibility. | def add_chord_task(app):
"""No longer used, but here for backwards compatibility."""
from celery import chord as _chord
from celery import group
from celery.canvas import maybe_signature
@app.task(name='celery.chord', bind=True, ignore_result=False,
shared=False, lazy=False)
def chord(self, header, body, partial_args=(), interval=None,
countdown=1, max_retries=None, eager=False, **kwargs):
app = self.app
# - convert back to group if serialized
tasks = header.tasks if isinstance(header, group) else header
header = group([
maybe_signature(s, app=app) for s in tasks
], app=self.app)
body = maybe_signature(body, app=app)
ch = _chord(header, body)
return ch.run(header, body, partial_args, app, interval,
countdown, max_retries, **kwargs)
return chord |
Flatten node replies.
Convert from a list of replies in this format::
[{'[email protected]': reply},
{'[email protected]': reply}]
into this format::
{'[email protected]': reply,
'[email protected]': reply} | def flatten_reply(reply):
"""Flatten node replies.
Convert from a list of replies in this format::
[{'[email protected]': reply},
{'[email protected]': reply}]
into this format::
{'[email protected]': reply,
'[email protected]': reply}
"""
nodes, dupes = {}, set()
for item in reply:
[dupes.add(name) for name in item if name in nodes]
nodes.update(item)
if dupes:
warnings.warn(DuplicateNodenameWarning(
W_DUPNODE.format(
pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)),
),
))
return nodes |
Flatten settings. | def flatten(d, root='', keyfilter=_flatten_keys):
"""Flatten settings."""
stack = deque([(root, d)])
while stack:
ns, options = stack.popleft()
for key, opt in options.items():
if isinstance(opt, dict):
stack.append((ns + key + '_', opt))
else:
yield from keyfilter(ns, key, opt) |
Find setting by name. | def find(name, namespace='celery'):
"""Find setting by name."""
# - Try specified name-space first.
namespace = namespace.lower()
try:
return searchresult(
namespace, name.lower(), NAMESPACES[namespace][name.lower()],
)
except KeyError:
# - Try all the other namespaces.
for ns, opts in NAMESPACES.items():
if ns.lower() == name.lower():
return searchresult(None, ns, opts)
elif isinstance(opts, dict):
try:
return searchresult(ns, name.lower(), opts[name.lower()])
except KeyError:
pass
# - See if name is a qualname last.
return searchresult(None, name.lower(), DEFAULTS[name.lower()]) |
Expand the :setting:`task_routes` setting. | def prepare(routes):
"""Expand the :setting:`task_routes` setting."""
def expand_route(route):
if isinstance(route, (Mapping, list, tuple)):
return MapRoute(route)
if isinstance(route, str):
return mlazy(expand_router_string, route)
return route
if routes is None:
return ()
if not isinstance(routes, (list, tuple)):
routes = (routes,)
return [expand_route(route) for route in routes] |
Log 'fmt % context' with severity 'INFO'.
'context' is also passed in extra with key 'data' for custom handlers. | def info(fmt, context):
"""Log 'fmt % context' with severity 'INFO'.
'context' is also passed in extra with key 'data' for custom handlers.
"""
logger.info(fmt, context, extra={'data': context}) |
Return true if the task overrides ``attr``. | def task_has_custom(task, attr):
"""Return true if the task overrides ``attr``."""
return mro_lookup(task.__class__, attr, stop={BaseTask, object},
monkey_patched=['celery.app.task']) |
Use 'shadow' in request for the task name if applicable. | def get_task_name(request, default):
"""Use 'shadow' in request for the task name if applicable."""
# request.shadow could be None or an empty string.
# If so, we should use default.
return getattr(request, 'shadow', None) or default |
Return a function that traces task execution.
Catches all exceptions and updates result backend with the
state and result.
If the call was successful, it saves the result to the task result
backend, and sets the task status to `"SUCCESS"`.
If the call raises :exc:`~@Retry`, it extracts
the original exception, uses that as the result and sets the task state
to `"RETRY"`.
If the call results in an exception, it saves the exception as the task
result, and sets the task state to `"FAILURE"`.
Return a function that takes the following arguments:
:param uuid: The id of the task.
:param args: List of positional args to pass on to the function.
:param kwargs: Keyword arguments mapping to pass on to the function.
:keyword request: Request dict. | def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
Info=TraceInfo, eager=False, propagate=False, app=None,
monotonic=time.monotonic, trace_ok_t=trace_ok_t,
IGNORE_STATES=IGNORE_STATES):
"""Return a function that traces task execution.
Catches all exceptions and updates result backend with the
state and result.
If the call was successful, it saves the result to the task result
backend, and sets the task status to `"SUCCESS"`.
If the call raises :exc:`~@Retry`, it extracts
the original exception, uses that as the result and sets the task state
to `"RETRY"`.
If the call results in an exception, it saves the exception as the task
result, and sets the task state to `"FAILURE"`.
Return a function that takes the following arguments:
:param uuid: The id of the task.
:param args: List of positional args to pass on to the function.
:param kwargs: Keyword arguments mapping to pass on to the function.
:keyword request: Request dict.
"""
# pylint: disable=too-many-statements
# If the task doesn't define a custom __call__ method
# we optimize it away by simply calling the run method directly,
# saving the extra method call and a line less in the stack trace.
fun = task if task_has_custom(task, '__call__') else task.run
loader = loader or app.loader
ignore_result = task.ignore_result
track_started = task.track_started
track_started = not eager and (task.track_started and not ignore_result)
# #6476
if eager and not ignore_result and task.store_eager_result:
publish_result = True
else:
publish_result = not eager and not ignore_result
deduplicate_successful_tasks = ((app.conf.task_acks_late or task.acks_late)
and app.conf.worker_deduplicate_successful_tasks
and app.backend.persistent)
hostname = hostname or gethostname()
inherit_parent_priority = app.conf.task_inherit_parent_priority
loader_task_init = loader.on_task_init
loader_cleanup = loader.on_process_cleanup
task_before_start = None
task_on_success = None
task_after_return = None
if task_has_custom(task, 'before_start'):
task_before_start = task.before_start
if task_has_custom(task, 'on_success'):
task_on_success = task.on_success
if task_has_custom(task, 'after_return'):
task_after_return = task.after_return
pid = os.getpid()
request_stack = task.request_stack
push_request = request_stack.push
pop_request = request_stack.pop
push_task = _task_stack.push
pop_task = _task_stack.pop
_does_info = logger.isEnabledFor(logging.INFO)
resultrepr_maxsize = task.resultrepr_maxsize
prerun_receivers = signals.task_prerun.receivers
postrun_receivers = signals.task_postrun.receivers
success_receivers = signals.task_success.receivers
from celery import canvas
signature = canvas.maybe_signature # maybe_ does not clone if already
def on_error(request, exc, state=FAILURE, call_errbacks=True):
if propagate:
raise
I = Info(state, exc)
R = I.handle_error_state(
task, request, eager=eager, call_errbacks=call_errbacks,
)
return I, R, I.state, I.retval
def trace_task(uuid, args, kwargs, request=None):
# R - is the possibly prepared return value.
# I - is the Info object.
# T - runtime
# Rstr - textual representation of return value
# retval - is the always unmodified return value.
# state - is the resulting task state.
# This function is very long because we've unrolled all the calls
# for performance reasons, and because the function is so long
# we want the main variables (I, and R) to stand out visually from the
# the rest of the variables, so breaking PEP8 is worth it ;)
R = I = T = Rstr = retval = state = None
task_request = None
time_start = monotonic()
try:
try:
kwargs.items
except AttributeError:
raise InvalidTaskError(
'Task keyword arguments is not a mapping')
task_request = Context(request or {}, args=args,
called_directly=False, kwargs=kwargs)
redelivered = (task_request.delivery_info
and task_request.delivery_info.get('redelivered', False))
if deduplicate_successful_tasks and redelivered:
if task_request.id in successful_requests:
return trace_ok_t(R, I, T, Rstr)
r = AsyncResult(task_request.id, app=app)
try:
state = r.state
except BackendGetMetaError:
pass
else:
if state == SUCCESS:
info(LOG_IGNORED, {
'id': task_request.id,
'name': get_task_name(task_request, name),
'description': 'Task already completed successfully.'
})
return trace_ok_t(R, I, T, Rstr)
push_task(task)
root_id = task_request.root_id or uuid
task_priority = task_request.delivery_info.get('priority') if \
inherit_parent_priority else None
push_request(task_request)
try:
# -*- PRE -*-
if prerun_receivers:
send_prerun(sender=task, task_id=uuid, task=task,
args=args, kwargs=kwargs)
loader_task_init(uuid, task)
if track_started:
task.backend.store_result(
uuid, {'pid': pid, 'hostname': hostname}, STARTED,
request=task_request,
)
# -*- TRACE -*-
try:
if task_before_start:
task_before_start(uuid, args, kwargs)
R = retval = fun(*args, **kwargs)
state = SUCCESS
except Reject as exc:
I, R = Info(REJECTED, exc), ExceptionInfo(internal=True)
state, retval = I.state, I.retval
I.handle_reject(task, task_request)
traceback_clear(exc)
except Ignore as exc:
I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
state, retval = I.state, I.retval
I.handle_ignore(task, task_request)
traceback_clear(exc)
except Retry as exc:
I, R, state, retval = on_error(
task_request, exc, RETRY, call_errbacks=False)
traceback_clear(exc)
except Exception as exc:
I, R, state, retval = on_error(task_request, exc)
traceback_clear(exc)
except BaseException:
raise
else:
try:
# callback tasks must be applied before the result is
# stored, so that result.children is populated.
# groups are called inline and will store trail
# separately, so need to call them separately
# so that the trail's not added multiple times :(
# (Issue #1936)
callbacks = task.request.callbacks
if callbacks:
if len(task.request.callbacks) > 1:
sigs, groups = [], []
for sig in callbacks:
sig = signature(sig, app=app)
if isinstance(sig, group):
groups.append(sig)
else:
sigs.append(sig)
for group_ in groups:
group_.apply_async(
(retval,),
parent_id=uuid, root_id=root_id,
priority=task_priority
)
if sigs:
group(sigs, app=app).apply_async(
(retval,),
parent_id=uuid, root_id=root_id,
priority=task_priority
)
else:
signature(callbacks[0], app=app).apply_async(
(retval,), parent_id=uuid, root_id=root_id,
priority=task_priority
)
# execute first task in chain
chain = task_request.chain
if chain:
_chsig = signature(chain.pop(), app=app)
_chsig.apply_async(
(retval,), chain=chain,
parent_id=uuid, root_id=root_id,
priority=task_priority
)
task.backend.mark_as_done(
uuid, retval, task_request, publish_result,
)
except EncodeError as exc:
I, R, state, retval = on_error(task_request, exc)
else:
Rstr = saferepr(R, resultrepr_maxsize)
T = monotonic() - time_start
if task_on_success:
task_on_success(retval, uuid, args, kwargs)
if success_receivers:
send_success(sender=task, result=retval)
if _does_info:
info(LOG_SUCCESS, {
'id': uuid,
'name': get_task_name(task_request, name),
'return_value': Rstr,
'runtime': T,
'args': task_request.get('argsrepr') or safe_repr(args),
'kwargs': task_request.get('kwargsrepr') or safe_repr(kwargs),
})
# -* POST *-
if state not in IGNORE_STATES:
if task_after_return:
task_after_return(
state, retval, uuid, args, kwargs, None,
)
finally:
try:
if postrun_receivers:
send_postrun(sender=task, task_id=uuid, task=task,
args=args, kwargs=kwargs,
retval=retval, state=state)
finally:
pop_task()
pop_request()
if not eager:
try:
task.backend.process_cleanup()
loader_cleanup()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as exc:
logger.error('Process cleanup failed: %r', exc,
exc_info=True)
except MemoryError:
raise
except Exception as exc:
_signal_internal_error(task, uuid, args, kwargs, request, exc)
if eager:
raise
R = report_internal_error(task, exc)
if task_request is not None:
I, _, _, _ = on_error(task_request, exc)
return trace_ok_t(R, I, T, Rstr)
return trace_task |
Trace task execution. | def trace_task(task, uuid, args, kwargs, request=None, **opts):
"""Trace task execution."""
request = {} if not request else request
try:
if task.__trace__ is None:
task.__trace__ = build_tracer(task.name, task, **opts)
return task.__trace__(uuid, args, kwargs, request)
except Exception as exc:
_signal_internal_error(task, uuid, args, kwargs, request, exc)
return trace_ok_t(report_internal_error(task, exc), TraceInfo(FAILURE, exc), 0.0, None) |
Send a special `internal_error` signal to the app for outside body errors. | def _signal_internal_error(task, uuid, args, kwargs, request, exc):
"""Send a special `internal_error` signal to the app for outside body errors."""
try:
_, _, tb = sys.exc_info()
einfo = ExceptionInfo()
einfo.exception = get_pickleable_exception(einfo.exception)
einfo.type = get_pickleable_etype(einfo.type)
signals.task_internal_error.send(
sender=task,
task_id=uuid,
args=args,
kwargs=kwargs,
request=request,
exception=exc,
traceback=tb,
einfo=einfo,
)
finally:
del tb |
Setup worker related optimizations. | def setup_worker_optimizations(app, hostname=None):
"""Setup worker related optimizations."""
hostname = hostname or gethostname()
# make sure custom Task.__call__ methods that calls super
# won't mess up the request/task stack.
_install_stack_protection()
# all new threads start without a current app, so if an app is not
# passed on to the thread it will fall back to the "default app",
# which then could be the wrong app. So for the worker
# we set this to always return our app. This is a hack,
# and means that only a single app can be used for workers
# running in the same process.
app.set_current()
app.set_default()
# evaluate all task classes by finalizing the app.
app.finalize()
# set fast shortcut to task registry
_localized[:] = [
app._tasks,
prepare_accept_content(app.conf.accept_content),
hostname,
]
app.use_fast_trace_task = True |
Reset previously configured optimizations. | def reset_worker_optimizations(app=current_app):
"""Reset previously configured optimizations."""
try:
delattr(BaseTask, '_stackprotected')
except AttributeError:
pass
try:
BaseTask.__call__ = _patched.pop('BaseTask.__call__')
except KeyError:
pass
app.use_fast_trace_task = False |
String used in __repr__ etc, to id app instances. | def appstr(app):
"""String used in __repr__ etc, to id app instances."""
return f'{app.main or "__main__"} at {id(app):#x}' |
Rebuild app for versions 2.5+. | def _unpickle_app(cls, pickler, *args):
"""Rebuild app for versions 2.5+."""
return pickler()(cls, *args) |
Rebuild app for versions 3.1+. | def _unpickle_app_v2(cls, kwargs):
"""Rebuild app for versions 3.1+."""
kwargs['set_as_current'] = False
return cls(**kwargs) |
Filter sensitive settings. | def filter_hidden_settings(conf):
"""Filter sensitive settings."""
def maybe_censor(key, value, mask='*' * 8):
if isinstance(value, Mapping):
return filter_hidden_settings(value)
if isinstance(key, str):
if HIDDEN_SETTINGS.search(key):
return mask
elif 'broker_url' in key.lower():
from kombu import Connection
return Connection(value).as_uri(mask=mask)
elif 'backend' in key.lower():
return maybe_sanitize_url(value, mask=mask)
return value
return {k: maybe_censor(k, v) for k, v in conf.items()} |
Return a string containing information useful in bug-reports. | def bugreport(app):
"""Return a string containing information useful in bug-reports."""
import billiard
import kombu
import celery
try:
conn = app.connection()
driver_v = '{}:{}'.format(conn.transport.driver_name,
conn.transport.driver_version())
transport = conn.transport_cls
except Exception: # pylint: disable=broad-except
transport = driver_v = ''
return BUGREPORT_INFO.format(
system=_platform.system(),
arch=', '.join(x for x in _platform.architecture() if x),
kernel_version=_platform.release(),
py_i=pyimplementation(),
celery_v=celery.VERSION_BANNER,
kombu_v=kombu.__version__,
billiard_v=billiard.__version__,
py_v=_platform.python_version(),
driver_v=driver_v,
transport=transport,
results=maybe_sanitize_url(app.conf.result_backend or 'disabled'),
human_settings=app.conf.humanize(),
loader=qualname(app.loader.__class__),
) |
Find app by name. | def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd):
"""Find app by name."""
from .base import Celery
try:
sym = symbol_by_name(app, imp=imp)
except AttributeError:
# last part was not an attribute, but a module
sym = imp(app)
if isinstance(sym, ModuleType) and ':' not in app:
try:
found = sym.app
if isinstance(found, ModuleType):
raise AttributeError()
except AttributeError:
try:
found = sym.celery
if isinstance(found, ModuleType):
raise AttributeError(
"attribute 'celery' is the celery module not the instance of celery")
except AttributeError:
if getattr(sym, '__path__', None):
try:
return find_app(
f'{app}.celery',
symbol_by_name=symbol_by_name, imp=imp,
)
except ImportError:
pass
for suspect in vars(sym).values():
if isinstance(suspect, Celery):
return suspect
raise
else:
return found
else:
return found
return sym |
Return information useful in bug reports. | def bugreport(app=None):
"""Return information useful in bug reports."""
return (app or _state.get_current_app()).bugreport() |
Create shared task (decorator).
This can be used by library authors to create tasks that'll work
for any app environment.
Returns:
~celery.local.Proxy: A proxy that always takes the task from the
current apps task registry.
Example:
>>> from celery import Celery, shared_task
>>> @shared_task
... def add(x, y):
... return x + y
...
>>> app1 = Celery(broker='amqp://')
>>> add.app is app1
True
>>> app2 = Celery(broker='redis://')
>>> add.app is app2
True | def shared_task(*args, **kwargs):
"""Create shared task (decorator).
This can be used by library authors to create tasks that'll work
for any app environment.
Returns:
~celery.local.Proxy: A proxy that always takes the task from the
current apps task registry.
Example:
>>> from celery import Celery, shared_task
>>> @shared_task
... def add(x, y):
... return x + y
...
>>> app1 = Celery(broker='amqp://')
>>> add.app is app1
True
>>> app2 = Celery(broker='redis://')
>>> add.app is app2
True
"""
def create_shared_task(**options):
def __inner(fun):
name = options.get('name')
# Set as shared task so that unfinalized apps,
# and future apps will register a copy of this task.
_state.connect_on_app_finalize(
lambda app: app._task_from_fun(fun, **options)
)
# Force all finalized apps to take this task as well.
for app in _state._get_active_apps():
if app.finalized:
with app._finalize_mutex:
app._task_from_fun(fun, **options)
# Return a proxy that always gets the task from the current
# apps task registry.
def task_by_cons():
app = _state.get_current_app()
return app.tasks[
name or app.gen_task_name(fun.__name__, fun.__module__)
]
return Proxy(task_by_cons)
return __inner
if len(args) == 1 and callable(args[0]):
return create_shared_task(**kwargs)(args[0])
return create_shared_task(*args, **kwargs) |
Decorator used to register a new result drainer type. | def register_drainer(name):
"""Decorator used to register a new result drainer type."""
def _inner(cls):
drainers[name] = cls
return cls
return _inner |
Return an unpickled backend. | def unpickle_backend(cls, args, kwargs):
"""Return an unpickled backend."""
return cls(*args, app=current_app._get_current_object(), **kwargs) |
AMQP Administration Shell.
Also works for non-AMQP transports (but not ones that
store declarations in memory). | def amqp(ctx):
"""AMQP Administration Shell.
Also works for non-AMQP transports (but not ones that
store declarations in memory).
"""
if not isinstance(ctx.obj, AMQPContext):
ctx.obj = AMQPContext(ctx.obj) |
Extract preload options and return a wrapped callable. | def handle_preload_options(f):
"""Extract preload options and return a wrapped callable."""
def caller(ctx, *args, **kwargs):
app = ctx.obj.app
preload_options = [o.name for o in app.user_options.get('preload', [])]
if preload_options:
user_options = {
preload_option: kwargs[preload_option]
for preload_option in preload_options
}
user_preload_options.send(sender=f, app=app, options=user_options)
return f(ctx, *args, **kwargs)
return update_wrapper(caller, f) |
Start the beat periodic task scheduler. | def beat(ctx, detach=False, logfile=None, pidfile=None, uid=None,
gid=None, umask=None, workdir=None, **kwargs):
"""Start the beat periodic task scheduler."""
app = ctx.obj.app
if ctx.args:
try:
app.config_from_cmdline(ctx.args)
except (KeyError, ValueError) as e:
# TODO: Improve the error messages
raise click.UsageError("Unable to parse extra configuration"
" from command line.\n"
f"Reason: {e}", ctx=ctx)
if not detach:
maybe_drop_privileges(uid=uid, gid=gid)
beat = partial(app.Beat,
logfile=logfile, pidfile=pidfile,
quiet=ctx.obj.quiet, **kwargs)
if detach:
with detached(logfile, pidfile, uid, gid, umask, workdir):
return beat().run()
else:
return beat().run() |
Call a task by name. | def call(ctx, name, args, kwargs, eta, countdown, expires, serializer, queue, exchange, routing_key):
"""Call a task by name."""
task_id = ctx.obj.app.send_task(
name,
args=args, kwargs=kwargs,
countdown=countdown,
serializer=serializer,
queue=queue,
exchange=exchange,
routing_key=routing_key,
eta=eta,
expires=expires
).id
ctx.obj.echo(task_id) |
Celery command entrypoint. | def celery(ctx, app, broker, result_backend, loader, config, workdir,
no_color, quiet, version, skip_checks):
"""Celery command entrypoint."""
if version:
click.echo(VERSION_BANNER)
ctx.exit()
elif ctx.invoked_subcommand is None:
click.echo(ctx.get_help())
ctx.exit()
if loader:
# Default app takes loader from this env (Issue #1066).
os.environ['CELERY_LOADER'] = loader
if broker:
os.environ['CELERY_BROKER_URL'] = broker
if result_backend:
os.environ['CELERY_RESULT_BACKEND'] = result_backend
if config:
os.environ['CELERY_CONFIG_MODULE'] = config
if skip_checks:
os.environ['CELERY_SKIP_CHECKS'] = 'true'
ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir,
quiet=quiet)
# User options
worker.params.extend(ctx.obj.app.user_options.get('worker', []))
beat.params.extend(ctx.obj.app.user_options.get('beat', []))
events.params.extend(ctx.obj.app.user_options.get('events', []))
for command in celery.commands.values():
command.params.extend(ctx.obj.app.user_options.get('preload', [])) |
Shows information useful to include in bug-reports. | def report(ctx, **kwargs):
"""Shows information useful to include in bug-reports."""
app = ctx.obj.app
app.loader.import_default_modules()
ctx.obj.echo(app.bugreport()) |
Start celery umbrella command.
This function is the main entrypoint for the CLI.
:return: The exit code of the CLI. | def main() -> int:
"""Start celery umbrella command.
This function is the main entrypoint for the CLI.
:return: The exit code of the CLI.
"""
return celery(auto_envvar_prefix="CELERY") |
Show list of workers that are online. | def status(ctx, timeout, destination, json, **kwargs):
"""Show list of workers that are online."""
callback = None if json else partial(_say_remote_command_reply, ctx)
replies = ctx.obj.app.control.inspect(timeout=timeout,
destination=destination,
callback=callback).ping()
if not replies:
raise CeleryCommandException(
message='No nodes replied within time constraint',
exit_code=EX_UNAVAILABLE
)
if json:
ctx.obj.echo(dumps(replies))
nodecount = len(replies)
if not kwargs.get('quiet', False):
ctx.obj.echo('\n{} {} online.'.format(
nodecount, text.pluralize(nodecount, 'node'))) |
Inspect the workers by sending them the COMMAND inspect command.
Availability: RabbitMQ (AMQP) and Redis transports. | def inspect(ctx, command, timeout, destination, json, **kwargs):
"""Inspect the workers by sending them the COMMAND inspect command.
Availability: RabbitMQ (AMQP) and Redis transports.
"""
_verify_command_name('inspect', command)
callback = None if json else partial(_say_remote_command_reply, ctx,
show_reply=True)
arguments = _compile_arguments(command, ctx.args)
inspect = ctx.obj.app.control.inspect(timeout=timeout,
destination=destination,
callback=callback)
replies = inspect._request(command, **arguments)
if not replies:
raise CeleryCommandException(
message='No nodes replied within time constraint',
exit_code=EX_UNAVAILABLE
)
if json:
ctx.obj.echo(dumps(replies))
return
nodecount = len(replies)
if not ctx.obj.quiet:
ctx.obj.echo('\n{} {} online.'.format(
nodecount, text.pluralize(nodecount, 'node'))) |
Send the COMMAND control command to the workers.
Availability: RabbitMQ (AMQP), Redis, and MongoDB transports. | def control(ctx, command, timeout, destination, json):
"""Send the COMMAND control command to the workers.
Availability: RabbitMQ (AMQP), Redis, and MongoDB transports.
"""
_verify_command_name('control', command)
callback = None if json else partial(_say_remote_command_reply, ctx,
show_reply=True)
args = ctx.args
arguments = _compile_arguments(command, args)
replies = ctx.obj.app.control.broadcast(command, timeout=timeout,
destination=destination,
callback=callback,
reply=True,
arguments=arguments)
if not replies:
raise CeleryCommandException(
message='No nodes replied within time constraint',
exit_code=EX_UNAVAILABLE
)
if json:
ctx.obj.echo(dumps(replies)) |
Event-stream utilities. | def events(ctx, dump, camera, detach, frequency, maxrate, loglevel, **kwargs):
"""Event-stream utilities."""
app = ctx.obj.app
if dump:
return _run_evdump(app)
if camera:
return _run_evcam(camera, app=app, freq=frequency, maxrate=maxrate,
loglevel=loglevel,
detach=detach,
**kwargs)
return _run_evtop(app) |
The ``celery graph`` command. | def graph(ctx):
"""The ``celery graph`` command.""" |
Display bootsteps graph. | def bootsteps(ctx):
"""Display bootsteps graph."""
worker = ctx.obj.app.WorkController()
include = {arg.lower() for arg in ctx.args or ['worker', 'consumer']}
if 'worker' in include:
worker_graph = worker.blueprint.graph
if 'consumer' in include:
worker.blueprint.connect_with(worker.consumer.blueprint)
else:
worker_graph = worker.consumer.blueprint.graph
worker_graph.to_dot(sys.stdout) |
Display workers graph. | def workers(ctx):
"""Display workers graph."""
def simplearg(arg):
return maybe_list(itemgetter(0, 2)(arg.partition(':')))
def maybe_list(l, sep=','):
return l[0], l[1].split(sep) if sep in l[1] else l[1]
args = dict(simplearg(arg) for arg in ctx.args)
generic = 'generic' in args
def generic_label(node):
return '{} ({}://)'.format(type(node).__name__,
node._label.split('://')[0])
class Node:
force_label = None
scheme = {}
def __init__(self, label, pos=None):
self._label = label
self.pos = pos
def label(self):
return self._label
def __str__(self):
return self.label()
class Thread(Node):
scheme = {
'fillcolor': 'lightcyan4',
'fontcolor': 'yellow',
'shape': 'oval',
'fontsize': 10,
'width': 0.3,
'color': 'black',
}
def __init__(self, label, **kwargs):
self.real_label = label
super().__init__(
label=f'thr-{next(tids)}',
pos=0,
)
class Formatter(GraphFormatter):
def label(self, obj):
return obj and obj.label()
def node(self, obj):
scheme = dict(obj.scheme) if obj.pos else obj.scheme
if isinstance(obj, Thread):
scheme['label'] = obj.real_label
return self.draw_node(
obj, dict(self.node_scheme, **scheme),
)
def terminal_node(self, obj):
return self.draw_node(
obj, dict(self.term_scheme, **obj.scheme),
)
def edge(self, a, b, **attrs):
if isinstance(a, Thread):
attrs.update(arrowhead='none', arrowtail='tee')
return self.draw_edge(a, b, self.edge_scheme, attrs)
def subscript(n):
S = {'0': '₀', '1': '₁', '2': '₂', '3': '₃', '4': '₄',
'5': '₅', '6': '₆', '7': '₇', '8': '₈', '9': '₉'}
return ''.join([S[i] for i in str(n)])
class Worker(Node):
pass
class Backend(Node):
scheme = {
'shape': 'folder',
'width': 2,
'height': 1,
'color': 'black',
'fillcolor': 'peachpuff3',
}
def label(self):
return generic_label(self) if generic else self._label
class Broker(Node):
scheme = {
'shape': 'circle',
'fillcolor': 'cadetblue3',
'color': 'cadetblue4',
'height': 1,
}
def label(self):
return generic_label(self) if generic else self._label
from itertools import count
tids = count(1)
Wmax = int(args.get('wmax', 4) or 0)
Tmax = int(args.get('tmax', 3) or 0)
def maybe_abbr(l, name, max=Wmax):
size = len(l)
abbr = max and size > max
if 'enumerate' in args:
l = [f'{name}{subscript(i + 1)}'
for i, obj in enumerate(l)]
if abbr:
l = l[0:max - 1] + [l[size - 1]]
l[max - 2] = '{}⎨…{}⎬'.format(
name[0], subscript(size - (max - 1)))
return l
app = ctx.obj.app
try:
workers = args['nodes']
threads = args.get('threads') or []
except KeyError:
replies = app.control.inspect().stats() or {}
workers, threads = [], []
for worker, reply in replies.items():
workers.append(worker)
threads.append(reply['pool']['max-concurrency'])
wlen = len(workers)
backend = args.get('backend', app.conf.result_backend)
threads_for = {}
workers = maybe_abbr(workers, 'Worker')
if Wmax and wlen > Wmax:
threads = threads[0:3] + [threads[-1]]
for i, threads in enumerate(threads):
threads_for[workers[i]] = maybe_abbr(
list(range(int(threads))), 'P', Tmax,
)
broker = Broker(args.get(
'broker', app.connection_for_read().as_uri()))
backend = Backend(backend) if backend else None
deps = DependencyGraph(formatter=Formatter())
deps.add_arc(broker)
if backend:
deps.add_arc(backend)
curworker = [0]
for i, worker in enumerate(workers):
worker = Worker(worker, pos=i)
deps.add_arc(worker)
deps.add_edge(worker, broker)
if backend:
deps.add_edge(worker, backend)
threads = threads_for.get(worker._label)
if threads:
for thread in threads:
thread = Thread(thread)
deps.add_arc(thread)
deps.add_edge(thread, worker)
curworker[0] += 1
deps.to_dot(sys.stdout) |
Get info from broker.
Note:
For RabbitMQ the management plugin is required. | def list_(ctx):
"""Get info from broker.
Note:
For RabbitMQ the management plugin is required.
""" |
Inspect queue bindings. | def bindings(ctx):
"""Inspect queue bindings."""
# TODO: Consider using a table formatter for this command.
app = ctx.obj.app
with app.connection() as conn:
app.amqp.TaskConsumer(conn).declare()
try:
bindings = conn.manager.get_bindings()
except NotImplementedError:
raise click.UsageError('Your transport cannot list bindings.')
def fmt(q, e, r):
ctx.obj.echo(f'{q:<28} {e:<28} {r}')
fmt('Queue', 'Exchange', 'Routing Key')
fmt('-' * 16, '-' * 16, '-' * 16)
for b in bindings:
fmt(b['destination'], b['source'], b['routing_key']) |
The ``celery logtool`` command. | def logtool(ctx):
"""The ``celery logtool`` command.""" |
Migrate tasks from one broker to another.
Warning:
This command is experimental, make sure you have a backup of
the tasks before you continue. | def migrate(ctx, source, destination, **kwargs):
"""Migrate tasks from one broker to another.
Warning:
This command is experimental, make sure you have a backup of
the tasks before you continue.
"""
# TODO: Use a progress bar
def on_migrate_task(state, body, message):
ctx.obj.echo(f"Migrating task {state.count}/{state.strtotal}: {body}")
migrate_tasks(Connection(source),
Connection(destination),
callback=on_migrate_task,
**kwargs) |
Start multiple worker instances. | def multi(ctx, **kwargs):
"""Start multiple worker instances."""
cmd = MultiTool(quiet=ctx.obj.quiet, no_color=ctx.obj.no_color)
# In 4.x, celery multi ignores the global --app option.
# Since in 5.0 the --app option is global only we
# rearrange the arguments so that the MultiTool will parse them correctly.
args = sys.argv[1:]
args = args[args.index('multi'):] + args[:args.index('multi')]
return cmd.execute_from_commandline(args) |
Erase all messages from all known task queues.
Warning:
There's no undo operation for this command. | def purge(ctx, force, queues, exclude_queues, **kwargs):
"""Erase all messages from all known task queues.
Warning:
There's no undo operation for this command.
"""
app = ctx.obj.app
queues = set(queues or app.amqp.queues.keys())
exclude_queues = set(exclude_queues or [])
names = queues - exclude_queues
qnum = len(names)
if names:
queues_headline = text.pluralize(qnum, 'queue')
if not force:
queue_names = ', '.join(sorted(names))
click.confirm(f"{ctx.obj.style('WARNING', fg='red')}:"
"This will remove all tasks from "
f"{queues_headline}: {queue_names}.\n"
" There is no undo for this operation!\n\n"
"(to skip this prompt use the -f option)\n"
"Are you sure you want to delete all tasks?",
abort=True)
def _purge(conn, queue):
try:
return conn.default_channel.queue_purge(queue) or 0
except conn.channel_errors:
return 0
with app.connection_for_write() as conn:
messages = sum(_purge(conn, queue) for queue in names)
if messages:
messages_headline = text.pluralize(messages, 'message')
ctx.obj.echo(f"Purged {messages} {messages_headline} from "
f"{qnum} known task {queues_headline}.")
else:
ctx.obj.echo(f"No messages purged from {qnum} {queues_headline}.") |
Print the return value for a given task id. | def result(ctx, task_id, task, traceback):
"""Print the return value for a given task id."""
app = ctx.obj.app
result_cls = app.tasks[task].AsyncResult if task else app.AsyncResult
task_result = result_cls(task_id)
value = task_result.traceback if traceback else task_result.get()
# TODO: Prettify result
ctx.obj.echo(value) |
Start shell session with convenient access to celery symbols.
The following symbols will be added to the main globals:
- ``celery``: the current application.
- ``chord``, ``group``, ``chain``, ``chunks``,
``xmap``, ``xstarmap`` ``subtask``, ``Task``
- all registered tasks. | def shell(ctx, ipython=False, bpython=False,
python=False, without_tasks=False, eventlet=False,
gevent=False, **kwargs):
"""Start shell session with convenient access to celery symbols.
The following symbols will be added to the main globals:
- ``celery``: the current application.
- ``chord``, ``group``, ``chain``, ``chunks``,
``xmap``, ``xstarmap`` ``subtask``, ``Task``
- all registered tasks.
"""
sys.path.insert(0, os.getcwd())
if eventlet:
import_module('celery.concurrency.eventlet')
if gevent:
import_module('celery.concurrency.gevent')
import celery
app = ctx.obj.app
app.loader.import_default_modules()
# pylint: disable=attribute-defined-outside-init
locals = {
'app': app,
'celery': app,
'Task': celery.Task,
'chord': celery.chord,
'group': celery.group,
'chain': celery.chain,
'chunks': celery.chunks,
'xmap': celery.xmap,
'xstarmap': celery.xstarmap,
'subtask': celery.subtask,
'signature': celery.signature,
}
if not without_tasks:
locals.update({
task.__name__: task for task in app.tasks.values()
if not task.name.startswith('celery.')
})
if python:
_invoke_fallback_shell(locals)
elif bpython:
try:
_invoke_bpython_shell(locals)
except ImportError:
ctx.obj.echo(f'{ctx.obj.ERROR}: bpython is not installed')
elif ipython:
try:
_invoke_ipython_shell(locals)
except ImportError as e:
ctx.obj.echo(f'{ctx.obj.ERROR}: {e}')
_invoke_default_shell(locals) |
Perform upgrade between versions. | def upgrade(ctx):
"""Perform upgrade between versions.""" |
Migrate settings from Celery 3.x to Celery 4.x. | def settings(filename, django, compat, no_backup):
"""Migrate settings from Celery 3.x to Celery 4.x."""
lines = _slurp(filename)
keyfilter = _compat_key if django or compat else pass1
print(f'processing {filename}...', file=sys.stderr)
# gives list of tuples: ``(did_change, line_contents)``
new_lines = [
_to_new_key(line, keyfilter) for line in lines
]
if any(n[0] for n in new_lines): # did have changes
if not no_backup:
_backup(filename)
with codecs.open(filename, 'w', 'utf-8') as write_fh:
for _, line in new_lines:
write_fh.write(line)
print('Changes to your setting have been made!',
file=sys.stdout)
else:
print('Does not seem to require any changes :-)',
file=sys.stdout) |
Detach program by argv. | def detach(path, argv, logfile=None, pidfile=None, uid=None,
gid=None, umask=None, workdir=None, fake=False, app=None,
executable=None, hostname=None):
"""Detach program by argv."""
fake = 1 if C_FAKEFORK else fake
# `detached()` will attempt to touch the logfile to confirm that error
# messages won't be lost after detaching stdout/err, but this means we need
# to pre-format it rather than relying on `setup_logging_subsystem()` like
# we can elsewhere.
logfile = node_format(logfile, hostname)
with detached(logfile, pidfile, uid, gid, umask, workdir, fake,
after_forkers=False):
try:
if executable is not None:
path = executable
os.execv(path, [path] + argv)
return EX_OK
except Exception: # pylint: disable=broad-except
if app is None:
from celery import current_app
app = current_app
app.log.setup_logging_subsystem(
'ERROR', logfile, hostname=hostname)
logger.critical("Can't exec %r", ' '.join([path] + argv),
exc_info=True)
return EX_FAILURE |
Start worker instance.
Examples
--------
$ celery --app=proj worker -l INFO
$ celery -A proj worker -l INFO -Q hipri,lopri
$ celery -A proj worker --concurrency=4
$ celery -A proj worker --concurrency=1000 -P eventlet
$ celery worker --autoscale=10,0 | def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
loglevel=None, logfile=None, pidfile=None, statedb=None,
**kwargs):
"""Start worker instance.
\b
Examples
--------
\b
$ celery --app=proj worker -l INFO
$ celery -A proj worker -l INFO -Q hipri,lopri
$ celery -A proj worker --concurrency=4
$ celery -A proj worker --concurrency=1000 -P eventlet
$ celery worker --autoscale=10,0
"""
try:
app = ctx.obj.app
if ctx.args:
try:
app.config_from_cmdline(ctx.args, namespace='worker')
except (KeyError, ValueError) as e:
# TODO: Improve the error messages
raise click.UsageError(
"Unable to parse extra configuration from command line.\n"
f"Reason: {e}", ctx=ctx)
if kwargs.get('detach', False):
argv = ['-m', 'celery'] + sys.argv[1:]
if '--detach' in argv:
argv.remove('--detach')
if '-D' in argv:
argv.remove('-D')
if "--uid" in argv:
argv.remove('--uid')
if "--gid" in argv:
argv.remove('--gid')
return detach(sys.executable,
argv,
logfile=logfile,
pidfile=pidfile,
uid=uid, gid=gid,
umask=kwargs.get('umask', None),
workdir=kwargs.get('workdir', None),
app=app,
executable=kwargs.get('executable', None),
hostname=hostname)
maybe_drop_privileges(uid=uid, gid=gid)
worker = app.Worker(
hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
logfile=logfile, # node format handled by celery.app.log.setup
pidfile=node_format(pidfile, hostname),
statedb=node_format(statedb, hostname),
no_color=ctx.obj.no_color,
quiet=ctx.obj.quiet,
**kwargs)
worker.start()
ctx.exit(worker.exitcode)
except SecurityError as e:
ctx.obj.error(e.args[0])
ctx.exit(1) |
Return true if generator is not started. | def gen_not_started(gen):
"""Return true if generator is not started."""
return inspect.getgeneratorstate(gen) == "GEN_CREATED" |
Simple wrapper to :class:`~select.select`, using :`~select.poll`.
Arguments:
readers (Set[Fd]): Set of reader fds to test if readable.
writers (Set[Fd]): Set of writer fds to test if writable.
err (Set[Fd]): Set of fds to test for error condition.
All fd sets passed must be mutable as this function
will remove non-working fds from them, this also means
the caller must make sure there are still fds in the sets
before calling us again.
Returns:
Tuple[Set, Set, Set]: of ``(readable, writable, again)``, where
``readable`` is a set of fds that have data available for read,
``writable`` is a set of fds that's ready to be written to
and ``again`` is a flag that if set means the caller must
throw away the result and call us again. | def _select(readers=None, writers=None, err=None, timeout=0,
poll=_select_imp):
"""Simple wrapper to :class:`~select.select`, using :`~select.poll`.
Arguments:
readers (Set[Fd]): Set of reader fds to test if readable.
writers (Set[Fd]): Set of writer fds to test if writable.
err (Set[Fd]): Set of fds to test for error condition.
All fd sets passed must be mutable as this function
will remove non-working fds from them, this also means
the caller must make sure there are still fds in the sets
before calling us again.
Returns:
Tuple[Set, Set, Set]: of ``(readable, writable, again)``, where
``readable`` is a set of fds that have data available for read,
``writable`` is a set of fds that's ready to be written to
and ``again`` is a flag that if set means the caller must
throw away the result and call us again.
"""
readers = set() if readers is None else readers
writers = set() if writers is None else writers
err = set() if err is None else err
try:
return poll(readers, writers, err, timeout)
except OSError as exc:
_errno = exc.errno
if _errno == errno.EINTR:
return set(), set(), 1
elif _errno in SELECT_BAD_FD:
for fd in readers | writers | err:
try:
select.select([fd], [], [], 0)
except OSError as exc:
_errno = exc.errno
if _errno not in SELECT_BAD_FD:
raise
readers.discard(fd)
writers.discard(fd)
err.discard(fd)
return set(), set(), 1
else:
raise |
Apply hub method to fds in iter, remove from list if failure.
Some file descriptors may become stale through OS reasons
or possibly other reasons, so safely manage our lists of FDs.
:param fds_iter: the file descriptors to iterate and apply hub_method
:param source_data: data source to remove FD if it renders OSError
:param hub_method: the method to call with each fd and kwargs
:*args to pass through to the hub_method;
with a special syntax string '*fd*' represents a substitution
for the current fd object in the iteration (for some callers).
:**kwargs to pass through to the hub method (no substitutions needed) | def iterate_file_descriptors_safely(fds_iter, source_data,
hub_method, *args, **kwargs):
"""Apply hub method to fds in iter, remove from list if failure.
Some file descriptors may become stale through OS reasons
or possibly other reasons, so safely manage our lists of FDs.
:param fds_iter: the file descriptors to iterate and apply hub_method
:param source_data: data source to remove FD if it renders OSError
:param hub_method: the method to call with each fd and kwargs
:*args to pass through to the hub_method;
with a special syntax string '*fd*' represents a substitution
for the current fd object in the iteration (for some callers).
:**kwargs to pass through to the hub method (no substitutions needed)
"""
def _meta_fd_argument_maker():
# uses the current iterations value for fd
call_args = args
if "*fd*" in call_args:
call_args = [fd if arg == "*fd*" else arg for arg in args]
return call_args
# Track stale FDs for cleanup possibility
stale_fds = []
for fd in fds_iter:
# Handle using the correct arguments to the hub method
hub_args, hub_kwargs = _meta_fd_argument_maker(), kwargs
try: # Call the hub method
hub_method(fd, *hub_args, **hub_kwargs)
except (OSError, FileNotFoundError):
logger.warning(
"Encountered OSError when accessing fd %s ",
fd, exc_info=True)
stale_fds.append(fd) # take note of stale fd
# Remove now defunct fds from the managed list
if source_data:
for fd in stale_fds:
try:
if hasattr(source_data, 'remove'):
source_data.remove(fd)
else: # then not a list/set ... try dict
source_data.pop(fd, None)
except ValueError:
logger.warning("ValueError trying to invalidate %s from %s",
fd, source_data) |
Apply function within pool context. | def apply_target(target, args=(), kwargs=None, callback=None,
accept_callback=None, pid=None, getpid=os.getpid,
propagate=(), monotonic=time.monotonic, **_):
"""Apply function within pool context."""
kwargs = {} if not kwargs else kwargs
if accept_callback:
accept_callback(pid or getpid(), monotonic())
try:
ret = target(*args, **kwargs)
except propagate:
raise
except Exception:
raise
except (WorkerShutdown, WorkerTerminate):
raise
except BaseException as exc:
try:
reraise(WorkerLostError, WorkerLostError(repr(exc)),
sys.exc_info()[2])
except WorkerLostError:
callback(ExceptionInfo())
else:
callback(ret) |
Pool child process initializer.
Initialize the child pool process to ensure the correct
app instance is used and things like logging works. | def process_initializer(app, hostname):
"""Pool child process initializer.
Initialize the child pool process to ensure the correct
app instance is used and things like logging works.
"""
# Each running worker gets SIGKILL by OS when main process exits.
platforms.set_pdeathsig('SIGKILL')
_set_task_join_will_block(True)
platforms.signals.reset(*WORKER_SIGRESET)
platforms.signals.ignore(*WORKER_SIGIGNORE)
platforms.set_mp_process_title('celeryd', hostname=hostname)
# This is for Windows and other platforms not supporting
# fork(). Note that init_worker makes sure it's only
# run once per process.
app.loader.init_worker()
app.loader.init_worker_process()
logfile = os.environ.get('CELERY_LOG_FILE') or None
if logfile and '%i' in logfile.lower():
# logfile path will differ so need to set up logging again.
app.log.already_setup = False
app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0),
logfile,
bool(os.environ.get('CELERY_LOG_REDIRECT', False)),
str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')),
hostname=hostname)
if os.environ.get('FORKED_BY_MULTIPROCESSING'):
# pool did execv after fork
trace.setup_worker_optimizations(app, hostname)
else:
app.set_current()
set_default_app(app)
app.finalize()
trace._tasks = app._tasks # enables fast_trace_task optimization.
# rebuild execution handler for all tasks.
from celery.app.trace import build_tracer
for name, task in app.tasks.items():
task.__trace__ = build_tracer(name, task, app.loader, hostname,
app=app)
from celery.worker import state as worker_state
worker_state.reset_state()
signals.worker_process_init.send(sender=None) |
Pool child process destructor.
Dispatch the :signal:`worker_process_shutdown` signal. | def process_destructor(pid, exitcode):
"""Pool child process destructor.
Dispatch the :signal:`worker_process_shutdown` signal.
"""
signals.worker_process_shutdown.send(
sender=None, pid=pid, exitcode=exitcode,
) |
Return pool implementation by name. | def get_implementation(cls):
"""Return pool implementation by name."""
return symbol_by_name(cls, ALIASES) |
Return all available pool type names. | def get_available_pool_names():
"""Return all available pool type names."""
return tuple(ALIASES.keys()) |
Republish message. | def republish(producer, message, exchange=None, routing_key=None,
remove_props=None):
"""Republish message."""
if not remove_props:
remove_props = ['application_headers', 'content_type',
'content_encoding', 'headers']
body = ensure_bytes(message.body) # use raw message body.
info, headers, props = (message.delivery_info,
message.headers, message.properties)
exchange = info['exchange'] if exchange is None else exchange
routing_key = info['routing_key'] if routing_key is None else routing_key
ctype, enc = message.content_type, message.content_encoding
# remove compression header, as this will be inserted again
# when the message is recompressed.
compression = headers.pop('compression', None)
expiration = props.pop('expiration', None)
# ensure expiration is a float
expiration = float(expiration) if expiration is not None else None
for key in remove_props:
props.pop(key, None)
producer.publish(ensure_bytes(body), exchange=exchange,
routing_key=routing_key, compression=compression,
headers=headers, content_type=ctype,
content_encoding=enc, expiration=expiration,
**props) |
Migrate single task message. | def migrate_task(producer, body_, message, queues=None):
"""Migrate single task message."""
info = message.delivery_info
queues = {} if queues is None else queues
republish(producer, message,
exchange=queues.get(info['exchange']),
routing_key=queues.get(info['routing_key'])) |
Migrate tasks from one broker to another. | def migrate_tasks(source, dest, migrate=migrate_task, app=None,
queues=None, **kwargs):
"""Migrate tasks from one broker to another."""
app = app_or_default(app)
queues = prepare_queues(queues)
producer = app.amqp.Producer(dest, auto_declare=False)
migrate = partial(migrate, producer, queues=queues)
def on_declare_queue(queue):
new_queue = queue(producer.channel)
new_queue.name = queues.get(queue.name, queue.name)
if new_queue.routing_key == queue.name:
new_queue.routing_key = queues.get(queue.name,
new_queue.routing_key)
if new_queue.exchange.name == queue.name:
new_queue.exchange.name = queues.get(queue.name, queue.name)
new_queue.declare()
return start_filter(app, source, migrate, queues=queues,
on_declare_queue=on_declare_queue, **kwargs) |
Find tasks by filtering them and move the tasks to a new queue.
Arguments:
predicate (Callable): Filter function used to decide the messages
to move. Must accept the standard signature of ``(body, message)``
used by Kombu consumer callbacks. If the predicate wants the
message to be moved it must return either:
1) a tuple of ``(exchange, routing_key)``, or
2) a :class:`~kombu.entity.Queue` instance, or
3) any other true value means the specified
``exchange`` and ``routing_key`` arguments will be used.
connection (kombu.Connection): Custom connection to use.
source: List[Union[str, kombu.Queue]]: Optional list of source
queues to use instead of the default (queues
in :setting:`task_queues`). This list can also contain
:class:`~kombu.entity.Queue` instances.
exchange (str, kombu.Exchange): Default destination exchange.
routing_key (str): Default destination routing key.
limit (int): Limit number of messages to filter.
callback (Callable): Callback called after message moved,
with signature ``(state, body, message)``.
transform (Callable): Optional function to transform the return
value (destination) of the filter function.
Also supports the same keyword arguments as :func:`start_filter`.
To demonstrate, the :func:`move_task_by_id` operation can be implemented
like this:
.. code-block:: python
def is_wanted_task(body, message):
if body['id'] == wanted_id:
return Queue('foo', exchange=Exchange('foo'),
routing_key='foo')
move(is_wanted_task)
or with a transform:
.. code-block:: python
def transform(value):
if isinstance(value, str):
return Queue(value, Exchange(value), value)
return value
move(is_wanted_task, transform=transform)
Note:
The predicate may also return a tuple of ``(exchange, routing_key)``
to specify the destination to where the task should be moved,
or a :class:`~kombu.entity.Queue` instance.
Any other true value means that the task will be moved to the
default exchange/routing_key. | def move(predicate, connection=None, exchange=None, routing_key=None,
source=None, app=None, callback=None, limit=None, transform=None,
**kwargs):
"""Find tasks by filtering them and move the tasks to a new queue.
Arguments:
predicate (Callable): Filter function used to decide the messages
to move. Must accept the standard signature of ``(body, message)``
used by Kombu consumer callbacks. If the predicate wants the
message to be moved it must return either:
1) a tuple of ``(exchange, routing_key)``, or
2) a :class:`~kombu.entity.Queue` instance, or
3) any other true value means the specified
``exchange`` and ``routing_key`` arguments will be used.
connection (kombu.Connection): Custom connection to use.
source: List[Union[str, kombu.Queue]]: Optional list of source
queues to use instead of the default (queues
in :setting:`task_queues`). This list can also contain
:class:`~kombu.entity.Queue` instances.
exchange (str, kombu.Exchange): Default destination exchange.
routing_key (str): Default destination routing key.
limit (int): Limit number of messages to filter.
callback (Callable): Callback called after message moved,
with signature ``(state, body, message)``.
transform (Callable): Optional function to transform the return
value (destination) of the filter function.
Also supports the same keyword arguments as :func:`start_filter`.
To demonstrate, the :func:`move_task_by_id` operation can be implemented
like this:
.. code-block:: python
def is_wanted_task(body, message):
if body['id'] == wanted_id:
return Queue('foo', exchange=Exchange('foo'),
routing_key='foo')
move(is_wanted_task)
or with a transform:
.. code-block:: python
def transform(value):
if isinstance(value, str):
return Queue(value, Exchange(value), value)
return value
move(is_wanted_task, transform=transform)
Note:
The predicate may also return a tuple of ``(exchange, routing_key)``
to specify the destination to where the task should be moved,
or a :class:`~kombu.entity.Queue` instance.
Any other true value means that the task will be moved to the
default exchange/routing_key.
"""
app = app_or_default(app)
queues = [_maybe_queue(app, queue) for queue in source or []] or None
with app.connection_or_acquire(connection, pool=False) as conn:
producer = app.amqp.Producer(conn)
state = State()
def on_task(body, message):
ret = predicate(body, message)
if ret:
if transform:
ret = transform(ret)
if isinstance(ret, Queue):
maybe_declare(ret, conn.default_channel)
ex, rk = ret.exchange.name, ret.routing_key
else:
ex, rk = expand_dest(ret, exchange, routing_key)
republish(producer, message,
exchange=ex, routing_key=rk)
message.ack()
state.filtered += 1
if callback:
callback(state, body, message)
if limit and state.filtered >= limit:
raise StopFiltering()
return start_filter(app, conn, on_task, consume_from=queues, **kwargs) |
Return true if task id equals task_id'. | def task_id_eq(task_id, body, message):
"""Return true if task id equals task_id'."""
return body['id'] == task_id |
Return true if task id is member of set ids'. | def task_id_in(ids, body, message):
"""Return true if task id is member of set ids'."""
return body['id'] in ids |
Filter tasks. | def start_filter(app, conn, filter, limit=None, timeout=1.0,
ack_messages=False, tasks=None, queues=None,
callback=None, forever=False, on_declare_queue=None,
consume_from=None, state=None, accept=None, **kwargs):
"""Filter tasks."""
return Filterer(
app, conn, filter,
limit=limit,
timeout=timeout,
ack_messages=ack_messages,
tasks=tasks,
queues=queues,
callback=callback,
forever=forever,
on_declare_queue=on_declare_queue,
consume_from=consume_from,
state=state,
accept=accept,
**kwargs).start() |
Find a task by id and move it to another queue.
Arguments:
task_id (str): Id of task to find and move.
dest: (str, kombu.Queue): Destination queue.
transform (Callable): Optional function to transform the return
value (destination) of the filter function.
**kwargs (Any): Also supports the same keyword
arguments as :func:`move`. | def move_task_by_id(task_id, dest, **kwargs):
"""Find a task by id and move it to another queue.
Arguments:
task_id (str): Id of task to find and move.
dest: (str, kombu.Queue): Destination queue.
transform (Callable): Optional function to transform the return
value (destination) of the filter function.
**kwargs (Any): Also supports the same keyword
arguments as :func:`move`.
"""
return move_by_idmap({task_id: dest}, **kwargs) |
Move tasks by matching from a ``task_id: queue`` mapping.
Where ``queue`` is a queue to move the task to.
Example:
>>> move_by_idmap({
... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue('name'),
... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue('name'),
... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue('name')},
... queues=['hipri']) | def move_by_idmap(map, **kwargs):
"""Move tasks by matching from a ``task_id: queue`` mapping.
Where ``queue`` is a queue to move the task to.
Example:
>>> move_by_idmap({
... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue('name'),
... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue('name'),
... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue('name')},
... queues=['hipri'])
"""
def task_id_in_map(body, message):
return map.get(message.properties['correlation_id'])
# adding the limit means that we don't have to consume any more
# when we've found everything.
return move(task_id_in_map, limit=len(map), **kwargs) |
Move tasks by matching from a ``task_name: queue`` mapping.
``queue`` is the queue to move the task to.
Example:
>>> move_by_taskmap({
... 'tasks.add': Queue('name'),
... 'tasks.mul': Queue('name'),
... }) | def move_by_taskmap(map, **kwargs):
"""Move tasks by matching from a ``task_name: queue`` mapping.
``queue`` is the queue to move the task to.
Example:
>>> move_by_taskmap({
... 'tasks.add': Queue('name'),
... 'tasks.mul': Queue('name'),
... })
"""
def task_name_in_map(body, message):
return map.get(body['task']) # <- name of task
return move(task_name_in_map, **kwargs) |
Register additional pytest configuration. | def pytest_configure(config):
"""Register additional pytest configuration."""
# add the pytest.mark.celery() marker registration to the pytest.ini [markers] section
# this prevents pytest 4.5 and newer from issuing a warning about an unknown marker
# and shows helpful marker documentation when running pytest --markers.
config.addinivalue_line(
"markers", "celery(**overrides): override celery configuration for a test case"
) |
Utility context used to setup Celery app for pytest fixtures. | def _create_app(enable_logging=False,
use_trap=False,
parameters=None,
**config):
# type: (Any, Any, Any, **Any) -> Celery
"""Utility context used to setup Celery app for pytest fixtures."""
from .testing.app import TestApp, setup_default_app
parameters = {} if not parameters else parameters
test_app = TestApp(
set_as_current=False,
enable_logging=enable_logging,
config=config,
**parameters
)
with setup_default_app(test_app, use_trap=use_trap):
yield test_app |
You can override this fixture to enable the app trap.
The app trap raises an exception whenever something attempts
to use the current or default apps. | def use_celery_app_trap():
# type: () -> bool
"""You can override this fixture to enable the app trap.
The app trap raises an exception whenever something attempts
to use the current or default apps.
"""
return False |
Session Fixture: Return app for session fixtures. | def celery_session_app(request,
celery_config,
celery_parameters,
celery_enable_logging,
use_celery_app_trap):
# type: (Any, Any, Any, Any, Any) -> Celery
"""Session Fixture: Return app for session fixtures."""
mark = request.node.get_closest_marker('celery')
config = dict(celery_config, **mark.kwargs if mark else {})
with _create_app(enable_logging=celery_enable_logging,
use_trap=use_celery_app_trap,
parameters=celery_parameters,
**config) as app:
if not use_celery_app_trap:
app.set_default()
app.set_current()
yield app |
Session Fixture: Start worker that lives throughout test suite. | def celery_session_worker(
request, # type: Any
celery_session_app, # type: Celery
celery_includes, # type: Sequence[str]
celery_class_tasks, # type: str
celery_worker_pool, # type: Any
celery_worker_parameters, # type: Mapping[str, Any]
):
# type: (...) -> WorkController
"""Session Fixture: Start worker that lives throughout test suite."""
from .testing import worker
if not NO_WORKER:
for module in celery_includes:
celery_session_app.loader.import_task_module(module)
for class_task in celery_class_tasks:
celery_session_app.register_task(class_task)
with worker.start_worker(celery_session_app,
pool=celery_worker_pool,
**celery_worker_parameters) as w:
yield w |
You can override this fixture to enable logging. | def celery_enable_logging():
# type: () -> bool
"""You can override this fixture to enable logging."""
return False |
You can override this include modules when a worker start.
You can have this return a list of module names to import,
these can be task modules, modules registering signals, and so on. | def celery_includes():
# type: () -> Sequence[str]
"""You can override this include modules when a worker start.
You can have this return a list of module names to import,
these can be task modules, modules registering signals, and so on.
"""
return () |
You can override this fixture to set the worker pool.
The "solo" pool is used by default, but you can set this to
return e.g. "prefork". | def celery_worker_pool():
# type: () -> Union[str, Any]
"""You can override this fixture to set the worker pool.
The "solo" pool is used by default, but you can set this to
return e.g. "prefork".
"""
return 'solo' |
Redefine this fixture to configure the test Celery app.
The config returned by your fixture will then be used
to configure the :func:`celery_app` fixture. | def celery_config():
# type: () -> Mapping[str, Any]
"""Redefine this fixture to configure the test Celery app.
The config returned by your fixture will then be used
to configure the :func:`celery_app` fixture.
"""
return {} |
Redefine this fixture to change the init parameters of test Celery app.
The dict returned by your fixture will then be used
as parameters when instantiating :class:`~celery.Celery`. | def celery_parameters():
# type: () -> Mapping[str, Any]
"""Redefine this fixture to change the init parameters of test Celery app.
The dict returned by your fixture will then be used
as parameters when instantiating :class:`~celery.Celery`.
"""
return {} |
Redefine this fixture to change the init parameters of Celery workers.
This can be used e. g. to define queues the worker will consume tasks from.
The dict returned by your fixture will then be used
as parameters when instantiating :class:`~celery.worker.WorkController`. | def celery_worker_parameters():
# type: () -> Mapping[str, Any]
"""Redefine this fixture to change the init parameters of Celery workers.
This can be used e. g. to define queues the worker will consume tasks from.
The dict returned by your fixture will then be used
as parameters when instantiating :class:`~celery.worker.WorkController`.
"""
return {} |
Fixture creating a Celery application instance. | def celery_app(request,
celery_config,
celery_parameters,
celery_enable_logging,
use_celery_app_trap):
"""Fixture creating a Celery application instance."""
mark = request.node.get_closest_marker('celery')
config = dict(celery_config, **mark.kwargs if mark else {})
with _create_app(enable_logging=celery_enable_logging,
use_trap=use_celery_app_trap,
parameters=celery_parameters,
**config) as app:
yield app |
Redefine this fixture to register tasks with the test Celery app. | def celery_class_tasks():
"""Redefine this fixture to register tasks with the test Celery app."""
return [] |
Fixture: Start worker in a thread, stop it when the test returns. | def celery_worker(request,
celery_app,
celery_includes,
celery_worker_pool,
celery_worker_parameters):
# type: (Any, Celery, Sequence[str], str, Any) -> WorkController
"""Fixture: Start worker in a thread, stop it when the test returns."""
from .testing import worker
if not NO_WORKER:
for module in celery_includes:
celery_app.loader.import_task_module(module)
with worker.start_worker(celery_app,
pool=celery_worker_pool,
**celery_worker_parameters) as w:
yield w |
Fixture that sets app as current. | def depends_on_current_app(celery_app):
"""Fixture that sets app as current."""
celery_app.set_current() |
Return the current debugger instance, or create if none. | def debugger():
"""Return the current debugger instance, or create if none."""
rdb = _current[0]
if rdb is None or not rdb.active:
rdb = _current[0] = Rdb()
return rdb |
Set break-point at current location, or a specified frame. | def set_trace(frame=None):
"""Set break-point at current location, or a specified frame."""
if frame is None:
frame = _frame().f_back
return debugger().set_trace(frame) |
Handler for autodoc-skip-member event. | def autodoc_skip_member_handler(app, what, name, obj, skip, options):
"""Handler for autodoc-skip-member event."""
# Celery tasks created with the @task decorator have the property
# that *obj.__doc__* and *obj.__class__.__doc__* are equal, which
# trips up the logic in sphinx.ext.autodoc that is supposed to
# suppress repetition of class documentation in an instance of the
# class. This overrides that behavior.
if isinstance(obj, BaseTask) and getattr(obj, '__wrapped__'):
if skip:
return False
return None |
Setup Sphinx extension. | def setup(app):
"""Setup Sphinx extension."""
app.setup_extension('sphinx.ext.autodoc')
app.add_autodocumenter(TaskDocumenter)
app.add_directive_to_domain('py', 'task', TaskDirective)
app.add_config_value('celery_task_prefix', '(task)', True)
app.connect('autodoc-skip-member', autodoc_skip_member_handler)
return {
'parallel_read_safe': True
} |
App used for testing. | def TestApp(name=None, config=None, enable_logging=False, set_as_current=False,
log=UnitLogging, backend=None, broker=None, **kwargs):
"""App used for testing."""
from . import tasks # noqa
config = dict(deepcopy(DEFAULT_TEST_CONFIG), **config or {})
if broker is not None:
config.pop('broker_url', None)
if backend is not None:
config.pop('result_backend', None)
log = None if enable_logging else log
test_app = Celery(
name or 'celery.tests',
set_as_current=set_as_current,
log=log,
broker=broker,
backend=backend,
**kwargs)
test_app.add_defaults(config)
return test_app |
Contextmanager that installs the trap app.
The trap means that anything trying to use the current or default app
will raise an exception. | def set_trap(app):
"""Contextmanager that installs the trap app.
The trap means that anything trying to use the current or default app
will raise an exception.
"""
trap = Trap()
prev_tls = _state._tls
_state.set_default_app(trap)
class NonTLS:
current_app = trap
_state._tls = NonTLS()
try:
yield
finally:
_state._tls = prev_tls |
Setup default app for testing.
Ensures state is clean after the test returns. | def setup_default_app(app, use_trap=False):
"""Setup default app for testing.
Ensures state is clean after the test returns.
"""
prev_current_app = _state.get_current_app()
prev_default_app = _state.default_app
prev_finalizers = set(_state._on_app_finalizers)
prev_apps = weakref.WeakSet(_state._apps)
try:
if use_trap:
with set_trap(app):
yield
else:
yield
finally:
_state.set_default_app(prev_default_app)
_state._tls.current_app = prev_current_app
if app is not prev_current_app:
app.close()
_state._on_app_finalizers = prev_finalizers
_state._apps = prev_apps |
Create task message in protocol 2 format. | def TaskMessage(
name, # type: str
id=None, # type: str
args=(), # type: Sequence
kwargs=None, # type: Mapping
callbacks=None, # type: Sequence[Signature]
errbacks=None, # type: Sequence[Signature]
chain=None, # type: Sequence[Signature]
shadow=None, # type: str
utc=None, # type: bool
**options # type: Any
):
# type: (...) -> Any
"""Create task message in protocol 2 format."""
kwargs = {} if not kwargs else kwargs
from kombu.serialization import dumps
from celery import uuid
id = id or uuid()
message = Mock(name=f'TaskMessage-{id}')
message.headers = {
'id': id,
'task': name,
'shadow': shadow,
}
embed = {'callbacks': callbacks, 'errbacks': errbacks, 'chain': chain}
message.headers.update(options)
message.content_type, message.content_encoding, message.body = dumps(
(args, kwargs, embed), serializer='json',
)
message.payload = (args, kwargs, embed)
return message |
Create task message in protocol 1 format. | def TaskMessage1(
name, # type: str
id=None, # type: str
args=(), # type: Sequence
kwargs=None, # type: Mapping
callbacks=None, # type: Sequence[Signature]
errbacks=None, # type: Sequence[Signature]
chain=None, # type: Sequence[Signature]
**options # type: Any
):
# type: (...) -> Any
"""Create task message in protocol 1 format."""
kwargs = {} if not kwargs else kwargs
from kombu.serialization import dumps
from celery import uuid
id = id or uuid()
message = Mock(name=f'TaskMessage-{id}')
message.headers = {}
message.payload = {
'task': name,
'id': id,
'args': args,
'kwargs': kwargs,
'callbacks': callbacks,
'errbacks': errbacks,
}
message.payload.update(options)
message.content_type, message.content_encoding, message.body = dumps(
message.payload,
)
return message |
Create task message from :class:`celery.Signature`.
Example:
>>> m = task_message_from_sig(app, add.s(2, 2))
>>> amqp_client.basic_publish(m, exchange='ex', routing_key='rkey') | def task_message_from_sig(app, sig, utc=True, TaskMessage=TaskMessage):
# type: (Celery, Signature, bool, Any) -> Any
"""Create task message from :class:`celery.Signature`.
Example:
>>> m = task_message_from_sig(app, add.s(2, 2))
>>> amqp_client.basic_publish(m, exchange='ex', routing_key='rkey')
"""
sig.freeze()
callbacks = sig.options.pop('link', None)
errbacks = sig.options.pop('link_error', None)
countdown = sig.options.pop('countdown', None)
if countdown:
eta = app.now() + timedelta(seconds=countdown)
else:
eta = sig.options.pop('eta', None)
if eta and isinstance(eta, datetime):
eta = eta.isoformat()
expires = sig.options.pop('expires', None)
if expires and isinstance(expires, numbers.Real):
expires = app.now() + timedelta(seconds=expires)
if expires and isinstance(expires, datetime):
expires = expires.isoformat()
return TaskMessage(
sig.task, id=sig.id, args=sig.args,
kwargs=sig.kwargs,
callbacks=[dict(s) for s in callbacks] if callbacks else None,
errbacks=[dict(s) for s in errbacks] if errbacks else None,
eta=eta,
expires=expires,
utc=utc,
**sig.options
) |
Mock that mocks :keyword:`with` statement contexts. | def ContextMock(*args, **kwargs):
"""Mock that mocks :keyword:`with` statement contexts."""
obj = _ContextMock(*args, **kwargs)
obj.attach_mock(_ContextMock(), '__enter__')
obj.attach_mock(_ContextMock(), '__exit__')
obj.__enter__.return_value = obj
# if __exit__ return a value the exception is ignored,
# so it must return None here.
obj.__exit__.return_value = None
return obj |
Simple task that just returns 'pong'. | def ping():
# type: () -> str
"""Simple task that just returns 'pong'."""
return 'pong' |
Start embedded worker.
Yields:
celery.app.worker.Worker: worker instance. | def start_worker(
app, # type: Celery
concurrency=1, # type: int
pool='solo', # type: str
loglevel=WORKER_LOGLEVEL, # type: Union[str, int]
logfile=None, # type: str
perform_ping_check=True, # type: bool
ping_task_timeout=10.0, # type: float
shutdown_timeout=10.0, # type: float
**kwargs # type: Any
):
# type: (...) -> Iterable
"""Start embedded worker.
Yields:
celery.app.worker.Worker: worker instance.
"""
test_worker_starting.send(sender=app)
worker = None
try:
with _start_worker_thread(app,
concurrency=concurrency,
pool=pool,
loglevel=loglevel,
logfile=logfile,
perform_ping_check=perform_ping_check,
shutdown_timeout=shutdown_timeout,
**kwargs) as worker:
if perform_ping_check:
from .tasks import ping
with allow_join_result():
assert ping.delay().get(timeout=ping_task_timeout) == 'pong'
yield worker
finally:
test_worker_stopped.send(sender=app, worker=worker) |
Start Celery worker in a thread.
Yields:
celery.worker.Worker: worker instance. | def _start_worker_thread(app: Celery,
concurrency: int = 1,
pool: str = 'solo',
loglevel: Union[str, int] = WORKER_LOGLEVEL,
logfile: Optional[str] = None,
WorkController: Any = TestWorkController,
perform_ping_check: bool = True,
shutdown_timeout: float = 10.0,
**kwargs) -> Iterable[worker.WorkController]:
"""Start Celery worker in a thread.
Yields:
celery.worker.Worker: worker instance.
"""
setup_app_for_worker(app, loglevel, logfile)
if perform_ping_check:
assert 'celery.ping' in app.tasks
# Make sure we can connect to the broker
with app.connection(hostname=os.environ.get('TEST_BROKER')) as conn:
conn.default_channel.queue_declare
worker = WorkController(
app=app,
concurrency=concurrency,
hostname=anon_nodename(),
pool=pool,
loglevel=loglevel,
logfile=logfile,
# not allowed to override TestWorkController.on_consumer_ready
ready_callback=None,
without_heartbeat=kwargs.pop("without_heartbeat", True),
without_mingle=True,
without_gossip=True,
**kwargs)
t = threading.Thread(target=worker.start, daemon=True)
t.start()
worker.ensure_started()
_set_task_join_will_block(False)
try:
yield worker
finally:
from celery.worker import state
state.should_terminate = 0
t.join(shutdown_timeout)
if t.is_alive():
raise RuntimeError(
"Worker thread failed to exit within the allocated timeout. "
"Consider raising `shutdown_timeout` if your tasks take longer "
"to execute."
)
state.should_terminate = None |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.