INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Executes all macros and returns result string
Executes macros only when not in safe_mode | def execute_macros(self):
"""Executes all macros and returns result string
Executes macros only when not in safe_mode
"""
if self.safe_mode:
return '', "Safe mode activated. Code not executed."
# Windows exec does not like Windows newline
self.macros = self.macros.replace('\r\n', '\n')
# Set up environment for evaluation
globals().update(self._get_updated_environment())
# Create file-like string to capture output
code_out = cStringIO.StringIO()
code_err = cStringIO.StringIO()
err_msg = cStringIO.StringIO()
# Capture output and errors
sys.stdout = code_out
sys.stderr = code_err
try:
import signal
signal.signal(signal.SIGALRM, self.handler)
signal.alarm(config["timeout"])
except:
# No POSIX system
pass
try:
exec(self.macros, globals())
try:
signal.alarm(0)
except:
# No POSIX system
pass
except Exception:
# Print exception
# (Because of how the globals are handled during execution
# we must import modules here)
from traceback import print_exception
from src.lib.exception_handling import get_user_codeframe
exc_info = sys.exc_info()
user_tb = get_user_codeframe(exc_info[2]) or exc_info[2]
print_exception(exc_info[0], exc_info[1], user_tb, None, err_msg)
# Restore stdout and stderr
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
results = code_out.getvalue()
errs = code_err.getvalue() + err_msg.getvalue()
code_out.close()
code_err.close()
# Reset result cache
self.result_cache.clear()
# Reset frozen cache
self.frozen_cache.clear()
return results, errs |
Generator that yields sorted keys starting with startkey
Parameters
----------
keys: Iterable of tuple/list
\tKey sequence that is sorted
startkey: Tuple/list
\tFirst key to be yielded
reverse: Bool
\tSort direction reversed if True | def _sorted_keys(self, keys, startkey, reverse=False):
"""Generator that yields sorted keys starting with startkey
Parameters
----------
keys: Iterable of tuple/list
\tKey sequence that is sorted
startkey: Tuple/list
\tFirst key to be yielded
reverse: Bool
\tSort direction reversed if True
"""
tuple_key = lambda t: t[::-1]
if reverse:
tuple_cmp = lambda t: t[::-1] > startkey[::-1]
else:
tuple_cmp = lambda t: t[::-1] < startkey[::-1]
searchkeys = sorted(keys, key=tuple_key, reverse=reverse)
searchpos = sum(1 for _ in ifilter(tuple_cmp, searchkeys))
searchkeys = searchkeys[searchpos:] + searchkeys[:searchpos]
for key in searchkeys:
yield key |
Returns position of findstring in datastring or None if not found.
Flags is a list of strings. Supported strings are:
* "MATCH_CASE": The case has to match for valid find
* "WHOLE_WORD": The word has to be surrounded by whitespace characters
if in the middle of the string
* "REG_EXP": A regular expression is evaluated. | def string_match(self, datastring, findstring, flags=None):
"""
Returns position of findstring in datastring or None if not found.
Flags is a list of strings. Supported strings are:
* "MATCH_CASE": The case has to match for valid find
* "WHOLE_WORD": The word has to be surrounded by whitespace characters
if in the middle of the string
* "REG_EXP": A regular expression is evaluated.
"""
if type(datastring) is IntType: # Empty cell
return None
if flags is None:
flags = []
if "REG_EXP" in flags:
match = re.search(findstring, datastring)
if match is None:
pos = -1
else:
pos = match.start()
else:
if "MATCH_CASE" not in flags:
datastring = datastring.lower()
findstring = findstring.lower()
if "WHOLE_WORD" in flags:
pos = -1
matchstring = r'\b' + findstring + r'+\b'
for match in re.finditer(matchstring, datastring):
pos = match.start()
break # find 1st occurrance
else:
pos = datastring.find(findstring)
if pos == -1:
return None
else:
return pos |
Returns a tuple with the position of the next match of find_string
Returns None if string not found.
Parameters:
-----------
startkey: Start position of search
find_string:String to be searched for
flags: List of strings, out of
["UP" xor "DOWN", "WHOLE_WORD", "MATCH_CASE", "REG_EXP"]
search_result: Bool, defaults to True
\tIf True then the search includes the result string (slower) | def findnextmatch(self, startkey, find_string, flags, search_result=True):
""" Returns a tuple with the position of the next match of find_string
Returns None if string not found.
Parameters:
-----------
startkey: Start position of search
find_string:String to be searched for
flags: List of strings, out of
["UP" xor "DOWN", "WHOLE_WORD", "MATCH_CASE", "REG_EXP"]
search_result: Bool, defaults to True
\tIf True then the search includes the result string (slower)
"""
assert "UP" in flags or "DOWN" in flags
assert not ("UP" in flags and "DOWN" in flags)
if search_result:
def is_matching(key, find_string, flags):
code = self(key)
if self.string_match(code, find_string, flags) is not None:
return True
else:
res_str = unicode(self[key])
return self.string_match(res_str, find_string, flags) \
is not None
else:
def is_matching(code, find_string, flags):
code = self(key)
return self.string_match(code, find_string, flags) is not None
# List of keys in sgrid in search order
reverse = "UP" in flags
for key in self._sorted_keys(self.keys(), startkey, reverse=reverse):
try:
if is_matching(key, find_string, flags):
return key
except Exception:
# re errors are cryptical: sre_constants,...
pass |
Returns True if Value in digits, False otherwise | def Validate(self, win):
"""Returns True if Value in digits, False otherwise"""
val = self.GetWindow().GetValue()
for x in val:
if x not in string.digits:
return False
return True |
Eats event if key not in digits | def OnChar(self, event):
"""Eats event if key not in digits"""
key = event.GetKeyCode()
if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255 or \
chr(key) in string.digits:
event.Skip() |
Draws the text and the combobox icon | def Draw(self, grid, attr, dc, rect, row, col, is_selected):
"""Draws the text and the combobox icon"""
render = wx.RendererNative.Get()
# clear the background
dc.SetBackgroundMode(wx.SOLID)
if is_selected:
dc.SetBrush(wx.Brush(wx.BLUE, wx.SOLID))
dc.SetPen(wx.Pen(wx.BLUE, 1, wx.SOLID))
else:
dc.SetBrush(wx.Brush(wx.WHITE, wx.SOLID))
dc.SetPen(wx.Pen(wx.WHITE, 1, wx.SOLID))
dc.DrawRectangleRect(rect)
cb_lbl = grid.GetCellValue(row, col)
string_x = rect.x + 2
string_y = rect.y + 2
dc.DrawText(cb_lbl, string_x, string_y)
button_x = rect.x + rect.width - self.iconwidth
button_y = rect.y
button_width = self.iconwidth
button_height = rect.height
button_size = button_x, button_y, button_width, button_height
render.DrawComboBoxDropButton(grid, dc, button_size,
wx.CONTROL_CURRENT) |
Creates the parameter entry widgets and binds them to methods | def _setup_param_widgets(self):
"""Creates the parameter entry widgets and binds them to methods"""
for parameter in self.csv_params:
pname, ptype, plabel, phelp = parameter
label = wx.StaticText(self.parent, -1, plabel)
widget = self.type2widget[ptype](self.parent)
# Append choicebox items and bind handler
if pname in self.choices:
widget.AppendItems(self.choices[pname])
widget.SetValue = widget.Select
widget.SetSelection(0)
# Bind event handler to widget
if ptype is types.StringType or ptype is types.UnicodeType:
event_type = wx.EVT_TEXT
elif ptype is types.BooleanType:
event_type = wx.EVT_CHECKBOX
else:
event_type = wx.EVT_CHOICE
handler = getattr(self, self.widget_handlers[pname])
self.parent.Bind(event_type, handler, widget)
# Tool tips
label.SetToolTipString(phelp)
widget.SetToolTipString(phelp)
label.__name__ = wx.StaticText.__name__.lower()
widget.__name__ = self.type2widget[ptype].__name__.lower()
self.param_labels.append(label)
self.param_widgets.append(widget)
self.__setattr__("_".join([label.__name__, pname]), label)
self.__setattr__("_".join([widget.__name__, pname]), widget) |
Sizer hell, returns a sizer that contains all widgets | def _do_layout(self):
"""Sizer hell, returns a sizer that contains all widgets"""
sizer_csvoptions = wx.FlexGridSizer(5, 4, 5, 5)
# Adding parameter widgets to sizer_csvoptions
leftpos = wx.LEFT | wx.ADJUST_MINSIZE
rightpos = wx.RIGHT | wx.EXPAND
current_label_margin = 0 # smaller for left column
other_label_margin = 15
for label, widget in zip(self.param_labels, self.param_widgets):
sizer_csvoptions.Add(label, 0, leftpos, current_label_margin)
sizer_csvoptions.Add(widget, 0, rightpos, current_label_margin)
current_label_margin, other_label_margin = \
other_label_margin, current_label_margin
sizer_csvoptions.AddGrowableCol(1)
sizer_csvoptions.AddGrowableCol(3)
self.sizer_csvoptions = sizer_csvoptions |
Sets the widget settings to those of the chosen dialect | def _update_settings(self, dialect):
"""Sets the widget settings to those of the chosen dialect"""
# the first parameter is the dialect itself --> ignore
for parameter in self.csv_params[2:]:
pname, ptype, plabel, phelp = parameter
widget = self._widget_from_p(pname, ptype)
if ptype is types.TupleType:
ptype = types.ObjectType
digest = Digest(acceptable_types=[ptype])
if pname == 'self.has_header':
if self.has_header is not None:
widget.SetValue(digest(self.has_header))
else:
value = getattr(dialect, pname)
widget.SetValue(digest(value)) |
Returns a widget from its ptype and pname | def _widget_from_p(self, pname, ptype):
"""Returns a widget from its ptype and pname"""
widget_name = self.type2widget[ptype].__name__.lower()
widget_name = "_".join([widget_name, pname])
return getattr(self, widget_name) |
Updates all param widgets confirming to the selcted dialect | def OnDialectChoice(self, event):
"""Updates all param widgets confirming to the selcted dialect"""
dialect_name = event.GetString()
value = list(self.choices['dialects']).index(dialect_name)
if dialect_name == 'sniffer':
if self.csvfilepath is None:
event.Skip()
return None
dialect, self.has_header = sniff(self.csvfilepath)
elif dialect_name == 'user':
event.Skip()
return None
else:
dialect = csv.get_dialect(dialect_name)
self._update_settings(dialect)
self.choice_dialects.SetValue(value) |
Update the dialect widget to 'user | def OnWidget(self, event):
"""Update the dialect widget to 'user'"""
self.choice_dialects.SetValue(len(self.choices['dialects']) - 1)
event.Skip() |
Returns a new dialect that implements the current selection | def get_dialect(self):
"""Returns a new dialect that implements the current selection"""
parameters = {}
for parameter in self.csv_params[2:]:
pname, ptype, plabel, phelp = parameter
widget = self._widget_from_p(pname, ptype)
if ptype is types.StringType or ptype is types.UnicodeType:
parameters[pname] = str(widget.GetValue())
elif ptype is types.BooleanType:
parameters[pname] = widget.GetValue()
elif pname == 'quoting':
choice = self.choices['quoting'][widget.GetSelection()]
parameters[pname] = getattr(csv, choice)
else:
raise TypeError(_("{type} unknown.").format(type=ptype))
has_header = parameters.pop("self.has_header")
try:
csv.register_dialect('user', **parameters)
except TypeError, err:
msg = _("The dialect is invalid. \n "
"\nError message:\n{msg}").format(msg=err)
dlg = wx.MessageDialog(self.parent, msg, style=wx.ID_CANCEL)
dlg.ShowModal()
dlg.Destroy()
raise TypeError(err)
return csv.get_dialect('user'), has_header |
Reduces clicks to enter an edit control | def OnMouse(self, event):
"""Reduces clicks to enter an edit control"""
self.SetGridCursor(event.Row, event.Col)
self.EnableCellEditControl(True)
event.Skip() |
Used to capture Editor close events | def OnGridEditorCreated(self, event):
"""Used to capture Editor close events"""
editor = event.GetControl()
editor.Bind(wx.EVT_KILL_FOCUS, self.OnGridEditorClosed)
event.Skip() |
Event handler for end of output type choice | def OnGridEditorClosed(self, event):
"""Event handler for end of output type choice"""
try:
dialect, self.has_header = \
self.parent.csvwidgets.get_dialect()
except TypeError:
event.Skip()
return 0
self.fill_cells(dialect, self.has_header, choices=False) |
Fills the grid for preview of csv data
Parameters
----------
dialect: csv,dialect
\tDialect used for csv reader
choices: Bool
\tCreate and show choices | def fill_cells(self, dialect, has_header, choices=True):
"""Fills the grid for preview of csv data
Parameters
----------
dialect: csv,dialect
\tDialect used for csv reader
choices: Bool
\tCreate and show choices
"""
# Get columns from csv
first_line = get_first_line(self.csvfilepath, dialect)
self.shape[1] = no_cols = len(first_line)
if no_cols > self.GetNumberCols():
missing_cols = no_cols - self.GetNumberCols()
self.AppendCols(missing_cols)
elif no_cols < self.GetNumberCols():
obsolete_cols = self.GetNumberCols() - no_cols
self.DeleteCols(pos=no_cols - 1, numCols=obsolete_cols)
# Retrieve type choices
digest_keys = self.get_digest_keys()
# Is a header present? --> Import as strings in first line
if has_header:
for i, header in enumerate(first_line):
self.SetCellValue(0, i, header)
if choices:
# Add Choices
for col in xrange(self.shape[1]):
choice_renderer = ChoiceRenderer(self)
choice_editor = wx.grid.GridCellChoiceEditor(
self.digest_types.keys(), False)
self.SetCellRenderer(has_header, col, choice_renderer)
self.SetCellEditor(has_header, col, choice_editor)
self.SetCellValue(has_header, col, digest_keys[col])
# Fill in the rest of the lines
self.dtypes = []
for key in self.get_digest_keys():
try:
self.dtypes.append(self.digest_types[key])
except KeyError:
self.dtypes.append(types.NoneType)
topleft = (has_header + 1, 0)
digest_gen = csv_digest_gen(self.csvfilepath, dialect, has_header,
self.dtypes)
for row, col, val in cell_key_val_gen(digest_gen, self.shape, topleft):
self.SetCellValue(row, col, val)
self.Refresh() |
Returns a list of the type choices | def get_digest_keys(self):
"""Returns a list of the type choices"""
digest_keys = []
for col in xrange(self.GetNumberCols()):
digest_key = self.GetCellValue(self.has_header, col)
if digest_key == "":
digest_key = self.digest_types.keys()[0]
digest_keys.append(digest_key)
return digest_keys |
Fills the grid for preview of csv data
Parameters
----------
data: 2-dim array of strings
\tData that is written to preview TextCtrl
dialect: csv,dialect
\tDialect used for csv reader | def fill(self, data, dialect):
"""Fills the grid for preview of csv data
Parameters
----------
data: 2-dim array of strings
\tData that is written to preview TextCtrl
dialect: csv,dialect
\tDialect used for csv reader
"""
csvfile = cStringIO.StringIO()
csvwriter = csv.writer(csvfile, dialect=dialect)
for i, line in enumerate(data):
csvwriter.writerow(list(encode_gen(line)))
if i >= self.preview_lines:
break
preview = csvfile.getvalue()
csvfile.close()
preview = preview.decode("utf-8").replace("\r\n", "\n")
self.SetValue(preview) |
Sets dialog title and size limitations of the widgets | def _set_properties(self):
"""Sets dialog title and size limitations of the widgets"""
title = _("CSV Import: {filepath}").format(filepath=self.csvfilename)
self.SetTitle(title)
self.SetSize((600, 600))
for button in [self.button_cancel, self.button_ok]:
button.SetMinSize((80, 28)) |
Sets dialog title and size limitations of the widgets | def _set_properties(self):
"""Sets dialog title and size limitations of the widgets"""
self.SetTitle("CSV Export")
self.SetSize((600, 600))
for button in [self.button_cancel, self.button_apply, self.button_ok]:
button.SetMinSize((80, 28)) |
Set sizers | def _do_layout(self):
"""Set sizers"""
sizer_dialog = wx.FlexGridSizer(3, 1, 0, 0)
# Sub sizers
sizer_buttons = wx.FlexGridSizer(1, 3, 5, 5)
# Adding buttons to sizer_buttons
for button in [self.button_cancel, self.button_apply, self.button_ok]:
sizer_buttons.Add(button, 0, wx.ALL | wx.EXPAND, 5)
sizer_buttons.AddGrowableRow(0)
for col in xrange(3):
sizer_buttons.AddGrowableCol(col)
# Adding main components
sizer_dialog.Add(self.csvwidgets.sizer_csvoptions,
0, wx.ALL | wx.EXPAND, 5)
sizer_dialog.Add(self.preview_textctrl, 1, wx.ALL | wx.EXPAND, 0)
sizer_dialog.Add(sizer_buttons, 0, wx.ALL | wx.EXPAND, 5)
self.SetSizer(sizer_dialog)
sizer_dialog.AddGrowableRow(1)
sizer_dialog.AddGrowableCol(0)
self.Layout()
self.Centre() |
Updates the preview_textctrl | def OnButtonApply(self, event):
"""Updates the preview_textctrl"""
try:
dialect, self.has_header = self.csvwidgets.get_dialect()
except TypeError:
event.Skip()
return 0
self.preview_textctrl.fill(data=self.data, dialect=dialect)
event.Skip() |
Layout sizers | def _do_layout(self):
"""Layout sizers"""
dialog_main_sizer = wx.BoxSizer(wx.HORIZONTAL)
upper_sizer = wx.BoxSizer(wx.HORIZONTAL)
lower_sizer = wx.FlexGridSizer(2, 1, 5, 0)
lower_sizer.AddGrowableRow(0)
lower_sizer.AddGrowableCol(0)
button_sizer = wx.BoxSizer(wx.HORIZONTAL)
upper_sizer.Add(self.codetext_ctrl, 1, wx.EXPAND, 0)
lower_sizer.Add(self.result_ctrl, 1, wx.EXPAND, 0)
lower_sizer.Add(button_sizer, 1, wx.EXPAND, 0)
button_sizer.Add(self.apply_button, 1, wx.EXPAND, 0)
self.upper_panel.SetSizer(upper_sizer)
self.lower_panel.SetSizer(lower_sizer)
sash_50 = int(round((config["window_size"][1] - 100) * 0.5))
self.splitter.SplitHorizontally(self.upper_panel,
self.lower_panel, sash_50)
dialog_main_sizer.Add(self.splitter, 1, wx.EXPAND, 0)
self.SetSizer(dialog_main_sizer)
self.Layout() |
Setup title, size and tooltips | def _set_properties(self):
"""Setup title, size and tooltips"""
self.codetext_ctrl.SetToolTipString(_("Enter python code here."))
self.apply_button.SetToolTipString(_("Apply changes to current macro"))
self.splitter.SetBackgroundStyle(wx.BG_STYLE_COLOUR)
self.result_ctrl.SetMinSize((10, 10)) |
Event handler for Apply button | def OnApply(self, event):
"""Event handler for Apply button"""
# See if we have valid python
try:
ast.parse(self.macros)
except:
# Grab the traceback and print it for the user
s = StringIO()
e = exc_info()
# usr_tb will more than likely be none because ast throws
# SytnaxErrorsas occurring outside of the current
# execution frame
usr_tb = get_user_codeframe(e[2]) or None
print_exception(e[0], e[1], usr_tb, None, s)
post_command_event(self.parent, self.MacroErrorMsg,
err=s.getvalue())
success = False
else:
self.result_ctrl.SetValue('')
post_command_event(self.parent, self.MacroReplaceMsg,
macros=self.macros)
post_command_event(self.parent, self.MacroExecuteMsg)
success = True
event.Skip()
return success |
Update event result following execution by main window | def update_result_ctrl(self, event):
"""Update event result following execution by main window"""
# Check to see if macro window still exists
if not self:
return
printLen = 0
self.result_ctrl.SetValue('')
if hasattr(event, 'msg'):
# Output of script (from print statements, for example)
self.result_ctrl.AppendText(event.msg)
printLen = len(event.msg)
if hasattr(event, 'err'):
# Error messages
errLen = len(event.err)
errStyle = wx.TextAttr(wx.RED)
self.result_ctrl.AppendText(event.err)
self.result_ctrl.SetStyle(printLen, printLen+errLen, errStyle)
if not hasattr(event, 'err') or event.err == '':
# No error passed. Close dialog if user requested it.
if self._ok_pressed:
self.Destroy()
self._ok_pressed = False |
Layout sizers | def _do_layout(self):
"""Layout sizers"""
label_style = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
button_style = wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL | \
wx.ALIGN_CENTER_VERTICAL | wx.FIXED_MINSIZE
grid_sizer_1 = wx.GridSizer(4, 2, 3, 3)
grid_sizer_1.Add(self.Rows_Label, 0, label_style, 3)
grid_sizer_1.Add(self.X_DimensionsEntry, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.Columns_Label, 0, label_style, 3)
grid_sizer_1.Add(self.Y_DimensionsEntry, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.Tabs_Label, 0, label_style, 3)
grid_sizer_1.Add(self.Z_DimensionsEntry, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.ok_button, 0, button_style, 3)
grid_sizer_1.Add(self.cancel_button, 0, button_style, 3)
self.SetSizer(grid_sizer_1)
grid_sizer_1.Fit(self)
self.Layout()
self.X_DimensionsEntry.SetFocus() |
Converts valuestring to int and assigns result to self.dim
If there is an error (such as an empty valuestring) or if
the value is < 1, the value 1 is assigned to self.dim
Parameters
----------
dimension: int
\tDimension that is to be updated. Must be in [1:4]
valuestring: string
\t A string that can be converted to an int | def _ondim(self, dimension, valuestring):
"""Converts valuestring to int and assigns result to self.dim
If there is an error (such as an empty valuestring) or if
the value is < 1, the value 1 is assigned to self.dim
Parameters
----------
dimension: int
\tDimension that is to be updated. Must be in [1:4]
valuestring: string
\t A string that can be converted to an int
"""
try:
self.dimensions[dimension] = int(valuestring)
except ValueError:
self.dimensions[dimension] = 1
self.textctrls[dimension].SetValue(str(1))
if self.dimensions[dimension] < 1:
self.dimensions[dimension] = 1
self.textctrls[dimension].SetValue(str(1)) |
Posts a command event that makes the grid show the entered cell | def OnOk(self, event):
"""Posts a command event that makes the grid show the entered cell"""
# Get key values from textctrls
key_strings = [self.row_textctrl.GetValue(),
self.col_textctrl.GetValue(),
self.tab_textctrl.GetValue()]
key = []
for key_string in key_strings:
try:
key.append(int(key_string))
except ValueError:
key.append(0)
# Post event
post_command_event(self.parent, self.GotoCellMsg, key=tuple(key)) |
Setup title and label | def _set_properties(self):
"""Setup title and label"""
self.SetTitle(_("About pyspread"))
label = _("pyspread {version}\nCopyright Martin Manns")
label = label.format(version=VERSION)
self.about_label.SetLabel(label) |
Layout sizers | def _do_layout(self):
"""Layout sizers"""
sizer_v = wx.BoxSizer(wx.VERTICAL)
sizer_h = wx.BoxSizer(wx.HORIZONTAL)
style = wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL
sizer_h.Add(self.logo_pyspread, 0, style, 10)
sizer_h.Add(self.about_label, 0, style, 10)
sizer_v.Add(sizer_h)
self.SetSizer(sizer_v)
sizer_v.Add(self.button_close, 0, style, 10)
sizer_v.Fit(self)
self.Layout()
self.Centre() |
Returns maximum dimensionality over which obj is iterable <= 2 | def get_max_dim(self, obj):
"""Returns maximum dimensionality over which obj is iterable <= 2"""
try:
iter(obj)
except TypeError:
return 0
try:
for o in obj:
iter(o)
break
except TypeError:
return 1
return 2 |
Runs alter operation. | def alter(self, operation, timeout=None, metadata=None, credentials=None):
"""Runs alter operation."""
return self.stub.Alter(operation, timeout=timeout, metadata=metadata,
credentials=credentials) |
Runs query operation. | def query(self, req, timeout=None, metadata=None, credentials=None):
"""Runs query operation."""
return self.stub.Query(req, timeout=timeout, metadata=metadata,
credentials=credentials) |
Runs mutate operation. | def mutate(self, mutation, timeout=None, metadata=None, credentials=None):
"""Runs mutate operation."""
return self.stub.Mutate(mutation, timeout=timeout, metadata=metadata,
credentials=credentials) |
Runs commit or abort operation. | def commit_or_abort(self, ctx, timeout=None, metadata=None,
credentials=None):
"""Runs commit or abort operation."""
return self.stub.CommitOrAbort(ctx, timeout=timeout, metadata=metadata,
credentials=credentials) |
Returns the version of the Dgraph instance. | def check_version(self, check, timeout=None, metadata=None,
credentials=None):
"""Returns the version of the Dgraph instance."""
return self.stub.CheckVersion(check, timeout=timeout,
metadata=metadata,
credentials=credentials) |
Runs a modification via this client. | def alter(self, operation, timeout=None, metadata=None, credentials=None):
"""Runs a modification via this client."""
new_metadata = self.add_login_metadata(metadata)
try:
return self.any_client().alter(operation, timeout=timeout,
metadata=new_metadata,
credentials=credentials)
except Exception as error:
if util.is_jwt_expired(error):
self.retry_login()
new_metadata = self.add_login_metadata(metadata)
return self.any_client().alter(operation, timeout=timeout,
metadata=new_metadata,
credentials=credentials)
else:
raise error |
Creates a transaction. | def txn(self, read_only=False, best_effort=False):
"""Creates a transaction."""
return txn.Txn(self, read_only=read_only, best_effort=best_effort) |
Adds a query operation to the transaction. | def query(self, query, variables=None, timeout=None, metadata=None,
credentials=None):
"""Adds a query operation to the transaction."""
new_metadata = self._dg.add_login_metadata(metadata)
req = self._common_query(query, variables=variables)
try:
res = self._dc.query(req, timeout=timeout,
metadata=new_metadata,
credentials=credentials)
except Exception as error:
if util.is_jwt_expired(error):
self._dg.retry_login()
new_metadata = self._dg.add_login_metadata(metadata)
res = self._dc.query(req, timeout=timeout,
metadata=new_metadata,
credentials=credentials)
else:
raise error
self.merge_context(res.txn)
return res |
Adds a mutate operation to the transaction. | def mutate(self, mutation=None, set_obj=None, del_obj=None, set_nquads=None,
del_nquads=None, commit_now=None, ignore_index_conflict=None,
timeout=None, metadata=None, credentials=None):
"""Adds a mutate operation to the transaction."""
mutation = self._common_mutate(
mutation=mutation, set_obj=set_obj, del_obj=del_obj,
set_nquads=set_nquads, del_nquads=del_nquads,
commit_now=commit_now, ignore_index_conflict=ignore_index_conflict)
new_metadata = self._dg.add_login_metadata(metadata)
mutate_error = None
try:
assigned = self._dc.mutate(mutation, timeout=timeout,
metadata=new_metadata,
credentials=credentials)
except Exception as error:
if util.is_jwt_expired(error):
self._dg.retry_login()
new_metadata = self._dg.add_login_metadata(metadata)
try:
assigned = self._dc.mutate(mutation, timeout=timeout,
metadata=new_metadata,
credentials=credentials)
except Exception as error:
mutate_error = error
else:
mutate_error = error
if mutate_error is not None:
try:
self.discard(timeout=timeout, metadata=metadata,
credentials=credentials)
except:
# Ignore error - user should see the original error.
pass
self._common_except_mutate(mutate_error)
if mutation.commit_now:
self._finished = True
self.merge_context(assigned.context)
return assigned |
Commits the transaction. | def commit(self, timeout=None, metadata=None, credentials=None):
"""Commits the transaction."""
if not self._common_commit():
return
new_metadata = self._dg.add_login_metadata(metadata)
try:
self._dc.commit_or_abort(self._ctx, timeout=timeout,
metadata=new_metadata,
credentials=credentials)
except Exception as error:
if util.is_jwt_expired(error):
self._dg.retry_login()
new_metadata = self._dg.add_login_metadata(metadata)
try:
self._dc.commit_or_abort(self._ctx, timeout=timeout,
metadata=new_metadata,
credentials=credentials)
except Exception as error:
return self._common_except_commit(error)
self._common_except_commit(error) |
Discards the transaction. | def discard(self, timeout=None, metadata=None, credentials=None):
"""Discards the transaction."""
if not self._common_discard():
return
new_metadata = self._dg.add_login_metadata(metadata)
try:
self._dc.commit_or_abort(self._ctx, timeout=timeout,
metadata=new_metadata,
credentials=credentials)
except Exception as error:
if util.is_jwt_expired(error):
self._dg.retry_login()
new_metadata = self._dg.add_login_metadata(metadata)
self._dc.commit_or_abort(self._ctx, timeout=timeout,
metadata=new_metadata,
credentials=credentials)
else:
raise error |
Merges context from this instance with src. | def merge_context(self, src=None):
"""Merges context from this instance with src."""
if src is None:
# This condition will be true only if the server doesn't return a
# txn context after a query or mutation.
return
if self._ctx.start_ts == 0:
self._ctx.start_ts = src.start_ts
elif self._ctx.start_ts != src.start_ts:
# This condition should never be true.
raise Exception('StartTs mismatch')
self._ctx.keys.extend(src.keys)
self._ctx.preds.extend(src.preds) |
Returns the token generator class based on the configuration in DJANGO_REST_PASSWORDRESET_TOKEN_CONFIG.CLASS and
DJANGO_REST_PASSWORDRESET_TOKEN_CONFIG.OPTIONS
:return: | def get_token_generator():
"""
Returns the token generator class based on the configuration in DJANGO_REST_PASSWORDRESET_TOKEN_CONFIG.CLASS and
DJANGO_REST_PASSWORDRESET_TOKEN_CONFIG.OPTIONS
:return:
"""
# by default, we are using the String Token Generator
token_class = RandomStringTokenGenerator
options = {}
# get the settings object
DJANGO_REST_PASSWORDRESET_TOKEN_CONFIG = getattr(settings, 'DJANGO_REST_PASSWORDRESET_TOKEN_CONFIG', None)
# check if something is in the settings object, and work with it
if DJANGO_REST_PASSWORDRESET_TOKEN_CONFIG:
if "CLASS" in DJANGO_REST_PASSWORDRESET_TOKEN_CONFIG:
class_path_name = DJANGO_REST_PASSWORDRESET_TOKEN_CONFIG["CLASS"]
module_name, class_name = class_path_name.rsplit('.', 1)
mod = import_module(module_name)
token_class = getattr(mod, class_name)
if "OPTIONS" in DJANGO_REST_PASSWORDRESET_TOKEN_CONFIG:
options = DJANGO_REST_PASSWORDRESET_TOKEN_CONFIG["OPTIONS"]
# initialize the token class and pass options
return token_class(**options) |
generates a pseudo random code using os.urandom and binascii.hexlify | def generate_token(self, *args, **kwargs):
""" generates a pseudo random code using os.urandom and binascii.hexlify """
# determine the length based on min_length and max_length
length = random.randint(self.min_length, self.max_length)
# generate the token using os.urandom and hexlify
return binascii.hexlify(
os.urandom(self.max_length)
).decode()[0:length] |
Try to emit whatever text is in the node. | def get_text(self, node):
"""Try to emit whatever text is in the node."""
try:
return node.children[0].content or ""
except (AttributeError, IndexError):
return node.content or "" |
Emit all the children of a node. | def emit_children(self, node):
"""Emit all the children of a node."""
return "".join([self.emit_node(child) for child in node.children]) |
Emit a single node. | def emit_node(self, node):
"""Emit a single node."""
emit = getattr(self, "%s_emit" % node.kind, self.default_emit)
return emit(node) |
Currently only supports markdown | def ajax_preview(request, **kwargs):
"""
Currently only supports markdown
"""
data = {
"html": render_to_string("pinax/blog/_preview.html", {
"content": parse(request.POST.get("markup"))
})
}
return JsonResponse(data) |
Set system lock for the semaphore.
Sets a system lock that will expire in timeout seconds. This
overrides all other locks. Existing locks cannot be renewed
and no new locks will be permitted until the system lock
expires.
Arguments:
redis: Redis client
name: Name of lock. Used as ZSET key.
timeout: Timeout in seconds for system lock | def set_system_lock(cls, redis, name, timeout):
"""
Set system lock for the semaphore.
Sets a system lock that will expire in timeout seconds. This
overrides all other locks. Existing locks cannot be renewed
and no new locks will be permitted until the system lock
expires.
Arguments:
redis: Redis client
name: Name of lock. Used as ZSET key.
timeout: Timeout in seconds for system lock
"""
pipeline = redis.pipeline()
pipeline.zadd(name, SYSTEM_LOCK_ID, time.time() + timeout)
pipeline.expire(name, timeout + 10) # timeout plus buffer for troubleshooting
pipeline.execute() |
Obtain a semaphore lock.
Returns: Tuple that contains True/False if the lock was acquired and number of
locks in semaphore. | def acquire(self):
"""
Obtain a semaphore lock.
Returns: Tuple that contains True/False if the lock was acquired and number of
locks in semaphore.
"""
acquired, locks = self._semaphore(keys=[self.name],
args=[self.lock_id, self.max_locks,
self.timeout, time.time()])
# Convert Lua boolean returns to Python booleans
acquired = True if acquired == 1 else False
return acquired, locks |
Use Redis to hold a shared, distributed lock named ``name``.
Returns True once the lock is acquired.
If ``blocking`` is False, always return immediately. If the lock
was acquired, return True, otherwise return False. | def acquire(self, blocking=True):
"""
Use Redis to hold a shared, distributed lock named ``name``.
Returns True once the lock is acquired.
If ``blocking`` is False, always return immediately. If the lock
was acquired, return True, otherwise return False.
"""
sleep = self.sleep
timeout = self.timeout
while 1:
unixtime = time.time()
if timeout:
timeout_at = unixtime + timeout
else:
timeout_at = Lock.LOCK_FOREVER
timeout_at = float(timeout_at)
if self.redis.setnx(self.name, timeout_at):
self.acquired_until = timeout_at
return True
# We want blocking, but didn't acquire the lock
# check to see if the current lock is expired
try:
existing = float(self.redis.get(self.name) or 1)
except ValueError:
existing = Lock.LOCK_FOREVER
if existing < unixtime:
# the previous lock is expired, attempt to overwrite it
try:
existing = float(self.redis.getset(self.name, timeout_at) or 1)
except ValueError:
existing = Lock.LOCK_FOREVER
if existing < unixtime:
# we successfully acquired the lock
self.acquired_until = timeout_at
return True
if not blocking:
return False
time.sleep(sleep) |
Releases the already acquired lock | def release(self):
"Releases the already acquired lock"
if self.acquired_until is None:
raise ValueError("Cannot release an unlocked lock")
existing = float(self.redis.get(self.name) or 1)
# if the lock time is in the future, delete the lock
if existing >= self.acquired_until:
self.redis.delete(self.name)
self.acquired_until = None |
Sets a new timeout for an already acquired lock.
``new_timeout`` can be specified as an integer or a float, both
representing the number of seconds. | def renew(self, new_timeout):
"""
Sets a new timeout for an already acquired lock.
``new_timeout`` can be specified as an integer or a float, both
representing the number of seconds.
"""
if self.local.token is None:
raise LockError("Cannot extend an unlocked lock")
if self.timeout is None:
raise LockError("Cannot extend a lock with no timeout")
return self.do_renew(new_timeout) |
Function decorator that defines the behavior of the function when it is
used as a task. To use the default behavior, tasks don't need to be
decorated.
See README.rst for an explanation of the options. | def task(self, _fn=None, queue=None, hard_timeout=None, unique=None,
lock=None, lock_key=None, retry=None, retry_on=None,
retry_method=None, schedule=None, batch=False,
max_queue_size=None):
"""
Function decorator that defines the behavior of the function when it is
used as a task. To use the default behavior, tasks don't need to be
decorated.
See README.rst for an explanation of the options.
"""
def _delay(func):
def _delay_inner(*args, **kwargs):
return self.delay(func, args=args, kwargs=kwargs)
return _delay_inner
# Periodic tasks are unique.
if schedule is not None:
unique = True
def _wrap(func):
if hard_timeout is not None:
func._task_hard_timeout = hard_timeout
if queue is not None:
func._task_queue = queue
if unique is not None:
func._task_unique = unique
if lock is not None:
func._task_lock = lock
if lock_key is not None:
func._task_lock_key = lock_key
if retry is not None:
func._task_retry = retry
if retry_on is not None:
func._task_retry_on = retry_on
if retry_method is not None:
func._task_retry_method = retry_method
if batch is not None:
func._task_batch = batch
if schedule is not None:
func._task_schedule = schedule
if max_queue_size is not None:
func._task_max_queue_size = max_queue_size
func.delay = _delay(func)
if schedule is not None:
serialized_func = serialize_func_name(func)
assert serialized_func not in self.periodic_task_funcs, \
"attempted duplicate registration of periodic task"
self.periodic_task_funcs[serialized_func] = func
return func
return _wrap if _fn is None else _wrap(_fn) |
Main worker entry point method.
The arguments are explained in the module-level run_worker() method's
click options. | def run_worker(self, queues=None, module=None, exclude_queues=None,
max_workers_per_queue=None, store_tracebacks=None):
"""
Main worker entry point method.
The arguments are explained in the module-level run_worker() method's
click options.
"""
try:
module_names = module or ''
for module_name in module_names.split(','):
module_name = module_name.strip()
if module_name:
importlib.import_module(module_name)
self.log.debug('imported module', module_name=module_name)
worker = Worker(self,
queues.split(',') if queues else None,
exclude_queues.split(',') if exclude_queues else None,
max_workers_per_queue=max_workers_per_queue,
store_tracebacks=store_tracebacks)
worker.run()
except Exception:
self.log.exception('Unhandled exception')
raise |
Queues a task. See README.rst for an explanation of the options. | def delay(self, func, args=None, kwargs=None, queue=None,
hard_timeout=None, unique=None, lock=None, lock_key=None,
when=None, retry=None, retry_on=None, retry_method=None,
max_queue_size=None):
"""
Queues a task. See README.rst for an explanation of the options.
"""
task = Task(self, func, args=args, kwargs=kwargs, queue=queue,
hard_timeout=hard_timeout, unique=unique,
lock=lock, lock_key=lock_key,
retry=retry, retry_on=retry_on, retry_method=retry_method)
task.delay(when=when, max_queue_size=max_queue_size)
return task |
Get the queue's number of tasks in each state.
Returns dict with queue size for the QUEUED, SCHEDULED, and ACTIVE
states. Does not include size of error queue. | def get_queue_sizes(self, queue):
"""
Get the queue's number of tasks in each state.
Returns dict with queue size for the QUEUED, SCHEDULED, and ACTIVE
states. Does not include size of error queue.
"""
states = [QUEUED, SCHEDULED, ACTIVE]
pipeline = self.connection.pipeline()
for state in states:
pipeline.zcard(self._key(state, queue))
results = pipeline.execute()
return dict(zip(states, results)) |
Get system lock timeout
Returns time system lock expires or None if lock does not exist | def get_queue_system_lock(self, queue):
"""
Get system lock timeout
Returns time system lock expires or None if lock does not exist
"""
key = self._key(LOCK_REDIS_KEY, queue)
return Semaphore.get_system_lock(self.connection, key) |
Set system lock on a queue.
Max workers for this queue must be used for this to have any effect.
This will keep workers from processing tasks for this queue until
the timeout has expired. Active tasks will continue processing their
current task.
timeout is number of seconds to hold the lock | def set_queue_system_lock(self, queue, timeout):
"""
Set system lock on a queue.
Max workers for this queue must be used for this to have any effect.
This will keep workers from processing tasks for this queue until
the timeout has expired. Active tasks will continue processing their
current task.
timeout is number of seconds to hold the lock
"""
key = self._key(LOCK_REDIS_KEY, queue)
Semaphore.set_system_lock(self.connection, key, timeout) |
Returns a dict with stats about all the queues. The keys are the queue
names, the values are dicts representing how many tasks are in a given
status ("queued", "active", "error" or "scheduled").
Example return value:
{ "default": { "queued": 1, "error": 2 } } | def get_queue_stats(self):
"""
Returns a dict with stats about all the queues. The keys are the queue
names, the values are dicts representing how many tasks are in a given
status ("queued", "active", "error" or "scheduled").
Example return value:
{ "default": { "queued": 1, "error": 2 } }
"""
states = (QUEUED, ACTIVE, SCHEDULED, ERROR)
pipeline = self.connection.pipeline()
for state in states:
pipeline.smembers(self._key(state))
queue_results = pipeline.execute()
pipeline = self.connection.pipeline()
for state, result in zip(states, queue_results):
for queue in result:
pipeline.zcard(self._key(state, queue))
card_results = pipeline.execute()
queue_stats = defaultdict(dict)
for state, result in zip(states, queue_results):
for queue in result:
queue_stats[queue][state] = card_results.pop(0)
return queue_stats |
Sets up signal handlers for safely stopping the worker. | def _install_signal_handlers(self):
"""
Sets up signal handlers for safely stopping the worker.
"""
def request_stop(signum, frame):
self._stop_requested = True
self.log.info('stop requested, waiting for task to finish')
signal.signal(signal.SIGINT, request_stop)
signal.signal(signal.SIGTERM, request_stop) |
Restores default signal handlers. | def _uninstall_signal_handlers(self):
"""
Restores default signal handlers.
"""
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL) |
Applies the queue filter to the given list of queues and returns the
queues that match. Note that a queue name matches any subqueues
starting with the name, followed by a date. For example, "foo" will
match both "foo" and "foo.bar". | def _filter_queues(self, queues):
"""
Applies the queue filter to the given list of queues and returns the
queues that match. Note that a queue name matches any subqueues
starting with the name, followed by a date. For example, "foo" will
match both "foo" and "foo.bar".
"""
def match(queue):
"""
Returns whether the given queue should be included by checking each
part of the queue name.
"""
for part in reversed_dotted_parts(queue):
if part in self.exclude_queues:
return False
if part in self.only_queues:
return True
return not self.only_queues
return [q for q in queues if match(q)] |
Helper method that takes due tasks from the SCHEDULED queue and puts
them in the QUEUED queue for execution. This should be called
periodically. | def _worker_queue_scheduled_tasks(self):
"""
Helper method that takes due tasks from the SCHEDULED queue and puts
them in the QUEUED queue for execution. This should be called
periodically.
"""
queues = set(self._filter_queues(self.connection.smembers(
self._key(SCHEDULED))))
now = time.time()
for queue in queues:
# Move due items from the SCHEDULED queue to the QUEUED queue. If
# items were moved, remove the queue from the scheduled set if it
# is empty, and add it to the queued set so the task gets picked
# up. If any unique tasks are already queued, don't update their
# queue time (because the new queue time would be later).
result = self.scripts.zpoppush(
self._key(SCHEDULED, queue),
self._key(QUEUED, queue),
self.config['SCHEDULED_TASK_BATCH_SIZE'],
now,
now,
if_exists=('noupdate',),
on_success=('update_sets', queue,
self._key(SCHEDULED), self._key(QUEUED)),
)
self.log.debug('scheduled tasks', queue=queue, qty=len(result))
# XXX: ideally this would be in the same pipeline, but we only want
# to announce if there was a result.
if result:
self.connection.publish(self._key('activity'), queue)
self._did_work = True |
Check activity channel and wait as necessary.
This method is also used to slow down the main processing loop to reduce
the effects of rapidly sending Redis commands. This method will exit
for any of these conditions:
1. _did_work is True, suggests there could be more work pending
2. Found new queue and after batch timeout. Note batch timeout
can be zero so it will exit immediately.
3. Timeout seconds have passed, this is the maximum time to stay in
this method | def _wait_for_new_tasks(self, timeout=0, batch_timeout=0):
"""
Check activity channel and wait as necessary.
This method is also used to slow down the main processing loop to reduce
the effects of rapidly sending Redis commands. This method will exit
for any of these conditions:
1. _did_work is True, suggests there could be more work pending
2. Found new queue and after batch timeout. Note batch timeout
can be zero so it will exit immediately.
3. Timeout seconds have passed, this is the maximum time to stay in
this method
"""
new_queue_found = False
start_time = batch_exit = time.time()
while True:
# Check to see if batch_exit has been updated
if batch_exit > start_time:
pubsub_sleep = batch_exit - time.time()
else:
pubsub_sleep = start_time + timeout - time.time()
message = self._pubsub.get_message(timeout=0 if pubsub_sleep < 0 or
self._did_work
else pubsub_sleep)
# Pull remaining messages off of channel
while message:
if message['type'] == 'message':
new_queue_found, batch_exit = self._process_queue_message(
message['data'], new_queue_found, batch_exit,
start_time, timeout, batch_timeout
)
message = self._pubsub.get_message()
if self._did_work:
break # Exit immediately if we did work during the last
# execution loop because there might be more work to do
elif time.time() >= batch_exit and new_queue_found:
break # After finding a new queue we can wait until the
# batch timeout expires
elif time.time() - start_time > timeout:
break |
Helper method that takes expired tasks (where we didn't get a
heartbeat until we reached a timeout) and puts them back into the
QUEUED queue for re-execution if they're idempotent, i.e. retriable
on JobTimeoutException. Otherwise, tasks are moved to the ERROR queue
and an exception is logged. | def _worker_queue_expired_tasks(self):
"""
Helper method that takes expired tasks (where we didn't get a
heartbeat until we reached a timeout) and puts them back into the
QUEUED queue for re-execution if they're idempotent, i.e. retriable
on JobTimeoutException. Otherwise, tasks are moved to the ERROR queue
and an exception is logged.
"""
# Note that we use the lock both to unnecessarily prevent multiple
# workers from requeueing expired tasks, as well as to space out
# this task (i.e. we don't release the lock unless we've exhausted our
# batch size, which will hopefully never happen)
lock = Lock(self.connection, self._key('lock', 'queue_expired_tasks'),
timeout=self.config['REQUEUE_EXPIRED_TASKS_INTERVAL'])
acquired = lock.acquire(blocking=False)
if not acquired:
return
now = time.time()
# Get a batch of expired tasks.
task_data = self.scripts.get_expired_tasks(
self.config['REDIS_PREFIX'],
now - self.config['ACTIVE_TASK_UPDATE_TIMEOUT'],
self.config['REQUEUE_EXPIRED_TASKS_BATCH_SIZE']
)
for (queue, task_id) in task_data:
self.log.debug('expiring task', queue=queue, task_id=task_id)
self._did_work = True
try:
task = Task.from_id(self.tiger, queue, ACTIVE, task_id)
if task.should_retry_on(JobTimeoutException, logger=self.log):
self.log.info('queueing expired task',
queue=queue, task_id=task_id)
# Task is idempotent and can be requeued. If the task
# already exists in the QUEUED queue, don't change its
# time.
task._move(from_state=ACTIVE,
to_state=QUEUED,
when=now,
mode='nx')
else:
self.log.error('failing expired task',
queue=queue, task_id=task_id)
# Assume the task can't be retried and move it to the error
# queue.
task._move(from_state=ACTIVE, to_state=ERROR, when=now)
except TaskNotFound:
# Either the task was requeued by another worker, or we
# have a task without a task object.
# XXX: Ideally, the following block should be atomic.
if not self.connection.get(self._key('task', task_id)):
self.log.error('not found', queue=queue, task_id=task_id)
task = Task(self.tiger, queue=queue,
_data={'id': task_id}, _state=ACTIVE)
task._move()
# Release the lock immediately if we processed a full batch. This way,
# another process will be able to pick up another batch immediately
# without waiting for the lock to time out.
if len(task_data) == self.config['REQUEUE_EXPIRED_TASKS_BATCH_SIZE']:
lock.release() |
Executes the tasks in the forked process. Multiple tasks can be passed
for batch processing. However, they must all use the same function and
will share the execution entry. | def _execute_forked(self, tasks, log):
"""
Executes the tasks in the forked process. Multiple tasks can be passed
for batch processing. However, they must all use the same function and
will share the execution entry.
"""
success = False
execution = {}
assert len(tasks)
task_func = tasks[0].serialized_func
assert all([task_func == task.serialized_func for task in tasks[1:]])
execution['time_started'] = time.time()
exc = None
exc_info = None
try:
func = tasks[0].func
is_batch_func = getattr(func, '_task_batch', False)
g['current_task_is_batch'] = is_batch_func
if is_batch_func:
# Batch process if the task supports it.
params = [{
'args': task.args,
'kwargs': task.kwargs,
} for task in tasks]
task_timeouts = [task.hard_timeout for task in tasks if task.hard_timeout is not None]
hard_timeout = ((max(task_timeouts) if task_timeouts else None)
or
getattr(func, '_task_hard_timeout', None) or
self.config['DEFAULT_HARD_TIMEOUT'])
g['current_tasks'] = tasks
with UnixSignalDeathPenalty(hard_timeout):
func(params)
else:
# Process sequentially.
for task in tasks:
hard_timeout = (task.hard_timeout or
getattr(func, '_task_hard_timeout', None) or
self.config['DEFAULT_HARD_TIMEOUT'])
g['current_tasks'] = [task]
with UnixSignalDeathPenalty(hard_timeout):
func(*task.args, **task.kwargs)
except RetryException as exc:
execution['retry'] = True
if exc.method:
execution['retry_method'] = serialize_retry_method(exc.method)
execution['log_error'] = exc.log_error
execution['exception_name'] = serialize_func_name(exc.__class__)
exc_info = exc.exc_info or sys.exc_info()
except (JobTimeoutException, Exception) as exc:
execution['exception_name'] = serialize_func_name(exc.__class__)
exc_info = sys.exc_info()
else:
success = True
if not success:
execution['time_failed'] = time.time()
if self.store_tracebacks:
# Currently we only log failed task executions to Redis.
execution['traceback'] = \
''.join(traceback.format_exception(*exc_info))
execution['success'] = success
execution['host'] = socket.gethostname()
serialized_execution = json.dumps(execution)
for task in tasks:
self.connection.rpush(self._key('task', task.id, 'executions'),
serialized_execution)
return success |
Get queue batch size. | def _get_queue_batch_size(self, queue):
"""Get queue batch size."""
# Fetch one item unless this is a batch queue.
# XXX: It would be more efficient to loop in reverse order and break.
batch_queues = self.config['BATCH_QUEUES']
batch_size = 1
for part in dotted_parts(queue):
if part in batch_queues:
batch_size = batch_queues[part]
return batch_size |
Get queue lock for max worker queues.
For max worker queues it returns a Lock if acquired and whether
it failed to acquire the lock. | def _get_queue_lock(self, queue, log):
"""Get queue lock for max worker queues.
For max worker queues it returns a Lock if acquired and whether
it failed to acquire the lock.
"""
max_workers = self.max_workers_per_queue
# Check if this is single worker queue
for part in dotted_parts(queue):
if part in self.single_worker_queues:
log.debug('single worker queue')
max_workers = 1
break
# Max worker queues require us to get a queue lock before
# moving tasks
if max_workers:
queue_lock = Semaphore(self.connection,
self._key(LOCK_REDIS_KEY, queue),
self.id, max_locks=max_workers,
timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT'])
acquired, locks = queue_lock.acquire()
if not acquired:
return None, True
log.debug('acquired queue lock', locks=locks)
else:
queue_lock = None
return queue_lock, False |
Updates the heartbeat for the given task IDs to prevent them from
timing out and being requeued. | def _heartbeat(self, queue, task_ids):
"""
Updates the heartbeat for the given task IDs to prevent them from
timing out and being requeued.
"""
now = time.time()
self.connection.zadd(self._key(ACTIVE, queue),
**{task_id: now for task_id in task_ids}) |
Executes the given tasks. Returns a boolean indicating whether
the tasks were executed successfully. | def _execute(self, queue, tasks, log, locks, queue_lock, all_task_ids):
"""
Executes the given tasks. Returns a boolean indicating whether
the tasks were executed successfully.
"""
# The tasks must use the same function.
assert len(tasks)
task_func = tasks[0].serialized_func
assert all([task_func == task.serialized_func for task in tasks[1:]])
# Before executing periodic tasks, queue them for the next period.
if task_func in self.tiger.periodic_task_funcs:
tasks[0]._queue_for_next_period()
with g_fork_lock:
child_pid = os.fork()
if child_pid == 0:
# Child process
log = log.bind(child_pid=os.getpid())
# Disconnect the Redis connection inherited from the main process.
# Note that this doesn't disconnect the socket in the main process.
self.connection.connection_pool.disconnect()
random.seed()
# Ignore Ctrl+C in the child so we don't abort the job -- the main
# process already takes care of a graceful shutdown.
signal.signal(signal.SIGINT, signal.SIG_IGN)
with WorkerContextManagerStack(self.config['CHILD_CONTEXT_MANAGERS']):
success = self._execute_forked(tasks, log)
# Wait for any threads that might be running in the child, just
# like sys.exit() would. Note we don't call sys.exit() directly
# because it would perform additional cleanup (e.g. calling atexit
# handlers twice). See also: https://bugs.python.org/issue18966
threading._shutdown()
os._exit(int(not success))
else:
# Main process
log = log.bind(child_pid=child_pid)
for task in tasks:
log.info('processing', func=task_func, task_id=task.id,
params={'args': task.args, 'kwargs': task.kwargs})
# Attach a signal handler to SIGCHLD (sent when the child process
# exits) so we can capture it.
signal.signal(signal.SIGCHLD, sigchld_handler)
# Since newer Python versions retry interrupted system calls we can't
# rely on the fact that select() is interrupted with EINTR. Instead,
# we'll set up a wake-up file descriptor below.
# Create a new pipe and apply the non-blocking flag (required for
# set_wakeup_fd).
pipe_r, pipe_w = os.pipe()
flags = fcntl.fcntl(pipe_w, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(pipe_w, fcntl.F_SETFL, flags)
# A byte will be written to pipe_w if a signal occurs (and can be
# read from pipe_r).
old_wakeup_fd = signal.set_wakeup_fd(pipe_w)
def check_child_exit():
"""
Do a non-blocking check to see if the child process exited.
Returns None if the process is still running, or the exit code
value of the child process.
"""
try:
pid, return_code = os.waitpid(child_pid, os.WNOHANG)
if pid != 0: # The child process is done.
return return_code
except OSError as e:
# Of course EINTR can happen if the child process exits
# while we're checking whether it exited. In this case it
# should be safe to retry.
if e.errno == errno.EINTR:
return check_child_exit()
else:
raise
# Wait for the child to exit and perform a periodic heartbeat.
# We check for the child twice in this loop so that we avoid
# unnecessary waiting if the child exited just before entering
# the while loop or while renewing heartbeat/locks.
while True:
return_code = check_child_exit()
if return_code is not None:
break
# Wait until the timeout or a signal / child exit occurs.
try:
select.select([pipe_r], [], [],
self.config['ACTIVE_TASK_UPDATE_TIMER'])
except select.error as e:
if e.args[0] != errno.EINTR:
raise
return_code = check_child_exit()
if return_code is not None:
break
try:
self._heartbeat(queue, all_task_ids)
for lock in locks:
lock.renew(self.config['ACTIVE_TASK_UPDATE_TIMEOUT'])
if queue_lock:
acquired, current_locks = queue_lock.renew()
if not acquired:
log.debug('queue lock renew failure')
except OSError as e:
# EINTR happens if the task completed. Since we're just
# renewing locks/heartbeat it's okay if we get interrupted.
if e.errno != errno.EINTR:
raise
# Restore signals / clean up
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
signal.set_wakeup_fd(old_wakeup_fd)
os.close(pipe_r)
os.close(pipe_w)
success = (return_code == 0)
return success |
Process a queue message from activity channel. | def _process_queue_message(self, message_queue, new_queue_found, batch_exit,
start_time, timeout, batch_timeout):
"""Process a queue message from activity channel."""
for queue in self._filter_queues([message_queue]):
if queue not in self._queue_set:
if not new_queue_found:
new_queue_found = True
batch_exit = time.time() + batch_timeout
# Limit batch_exit to max timeout
if batch_exit > start_time + timeout:
batch_exit = start_time + timeout
self._queue_set.add(queue)
self.log.debug('new queue', queue=queue)
return new_queue_found, batch_exit |
Process tasks in queue. | def _process_queue_tasks(self, queue, queue_lock, task_ids, now, log):
"""Process tasks in queue."""
processed_count = 0
# Get all tasks
serialized_tasks = self.connection.mget([
self._key('task', task_id) for task_id in task_ids
])
# Parse tasks
tasks = []
for task_id, serialized_task in zip(task_ids, serialized_tasks):
if serialized_task:
task_data = json.loads(serialized_task)
else:
# In the rare case where we don't find the task which is
# queued (see ReliabilityTestCase.test_task_disappears),
# we log an error and remove the task below. We need to
# at least initialize the Task object with an ID so we can
# remove it.
task_data = {'id': task_id}
task = Task(self.tiger, queue=queue, _data=task_data,
_state=ACTIVE, _ts=now)
if not serialized_task:
# Remove task as per comment above
log.error('not found', task_id=task_id)
task._move()
elif task.id != task_id:
log.error('task ID mismatch', task_id=task_id)
# Remove task
task._move()
else:
tasks.append(task)
# List of task IDs that exist and we will update the heartbeat on.
valid_task_ids = set(task.id for task in tasks)
# Group by task func
tasks_by_func = OrderedDict()
for task in tasks:
func = task.serialized_func
if func in tasks_by_func:
tasks_by_func[func].append(task)
else:
tasks_by_func[func] = [task]
# Execute tasks for each task func
for tasks in tasks_by_func.values():
success, processed_tasks = self._execute_task_group(queue,
tasks, valid_task_ids, queue_lock)
processed_count = processed_count + len(processed_tasks)
log.debug('processed', attempted=len(tasks),
processed=processed_count)
for task in processed_tasks:
self._finish_task_processing(queue, task, success)
return processed_count |
Internal method to process a task batch from the given queue.
Args:
queue: Queue name to be processed
Returns:
Task IDs: List of tasks that were processed (even if there was an
error so that client code can assume the queue is empty
if nothing was returned)
Count: The number of tasks that were attempted to be executed or
-1 if the queue lock couldn't be acquired. | def _process_from_queue(self, queue):
"""
Internal method to process a task batch from the given queue.
Args:
queue: Queue name to be processed
Returns:
Task IDs: List of tasks that were processed (even if there was an
error so that client code can assume the queue is empty
if nothing was returned)
Count: The number of tasks that were attempted to be executed or
-1 if the queue lock couldn't be acquired.
"""
now = time.time()
log = self.log.bind(queue=queue)
batch_size = self._get_queue_batch_size(queue)
queue_lock, failed_to_acquire = self._get_queue_lock(queue, log)
if failed_to_acquire:
return [], -1
# Move an item to the active queue, if available.
# We need to be careful when moving unique tasks: We currently don't
# support concurrent processing of multiple unique tasks. If the task
# is already in the ACTIVE queue, we need to execute the queued task
# later, i.e. move it to the SCHEDULED queue (prefer the earliest
# time if it's already scheduled). We want to make sure that the last
# queued instance of the task always gets executed no earlier than it
# was queued.
later = time.time() + self.config['LOCK_RETRY']
task_ids = self.scripts.zpoppush(
self._key(QUEUED, queue),
self._key(ACTIVE, queue),
batch_size,
None,
now,
if_exists=('add', self._key(SCHEDULED, queue), later, 'min'),
on_success=('update_sets', queue, self._key(QUEUED),
self._key(ACTIVE), self._key(SCHEDULED))
)
log.debug('moved tasks', src_queue=QUEUED, dest_queue=ACTIVE,
qty=len(task_ids))
processed_count = 0
if task_ids:
processed_count = self._process_queue_tasks(queue, queue_lock,
task_ids, now, log)
if queue_lock:
queue_lock.release()
log.debug('released swq lock')
return task_ids, processed_count |
Executes the given tasks in the queue. Updates the heartbeat for task
IDs passed in all_task_ids. This internal method is only meant to be
called from within _process_from_queue. | def _execute_task_group(self, queue, tasks, all_task_ids, queue_lock):
"""
Executes the given tasks in the queue. Updates the heartbeat for task
IDs passed in all_task_ids. This internal method is only meant to be
called from within _process_from_queue.
"""
log = self.log.bind(queue=queue)
locks = []
# Keep track of the acquired locks: If two tasks in the list require
# the same lock we only acquire it once.
lock_ids = set()
ready_tasks = []
for task in tasks:
if task.lock:
if task.lock_key:
kwargs = task.kwargs
lock_id = gen_unique_id(
task.serialized_func,
None,
{key: kwargs.get(key) for key in task.lock_key},
)
else:
lock_id = gen_unique_id(
task.serialized_func,
task.args,
task.kwargs,
)
if lock_id not in lock_ids:
lock = Lock(self.connection, self._key('lock', lock_id), timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT'])
acquired = lock.acquire(blocking=False)
if acquired:
lock_ids.add(lock_id)
locks.append(lock)
else:
log.info('could not acquire lock', task_id=task.id)
# Reschedule the task (but if the task is already
# scheduled in case of a unique task, don't prolong
# the schedule date).
when = time.time() + self.config['LOCK_RETRY']
task._move(from_state=ACTIVE, to_state=SCHEDULED,
when=when, mode='min')
# Make sure to remove it from this list so we don't
# re-add to the ACTIVE queue by updating the heartbeat.
all_task_ids.remove(task.id)
continue
ready_tasks.append(task)
if not ready_tasks:
return True, []
if self.stats_thread:
self.stats_thread.report_task_start()
success = self._execute(queue, ready_tasks, log, locks, queue_lock, all_task_ids)
if self.stats_thread:
self.stats_thread.report_task_end()
for lock in locks:
lock.release()
return success, ready_tasks |
After a task is executed, this method is called and ensures that
the task gets properly removed from the ACTIVE queue and, in case of an
error, retried or marked as failed. | def _finish_task_processing(self, queue, task, success):
"""
After a task is executed, this method is called and ensures that
the task gets properly removed from the ACTIVE queue and, in case of an
error, retried or marked as failed.
"""
log = self.log.bind(queue=queue, task_id=task.id)
def _mark_done():
# Remove the task from active queue
task._move(from_state=ACTIVE)
log.info('done')
if success:
_mark_done()
else:
should_retry = False
should_log_error = True
# Get execution info (for logging and retry purposes)
execution = self.connection.lindex(
self._key('task', task.id, 'executions'), -1)
if execution:
execution = json.loads(execution)
if execution and execution.get('retry'):
if 'retry_method' in execution:
retry_func, retry_args = execution['retry_method']
else:
# We expect the serialized method here.
retry_func, retry_args = serialize_retry_method( \
self.config['DEFAULT_RETRY_METHOD'])
should_log_error = execution['log_error']
should_retry = True
if task.retry_method and not should_retry:
retry_func, retry_args = task.retry_method
if task.retry_on:
if execution:
exception_name = execution.get('exception_name')
try:
exception_class = import_attribute(exception_name)
except TaskImportError:
log.error('could not import exception',
exception_name=exception_name)
else:
if task.should_retry_on(exception_class, logger=log):
should_retry = True
else:
should_retry = True
state = ERROR
when = time.time()
log_context = {
'func': task.serialized_func
}
if should_retry:
retry_num = task.n_executions()
log_context['retry_func'] = retry_func
log_context['retry_num'] = retry_num
try:
func = import_attribute(retry_func)
except TaskImportError:
log.error('could not import retry function',
func=retry_func)
else:
try:
retry_delay = func(retry_num, *retry_args)
log_context['retry_delay'] = retry_delay
when += retry_delay
except StopRetry:
pass
else:
state = SCHEDULED
if execution:
if state == ERROR and should_log_error:
log_func = log.error
else:
log_func = log.warning
log_context.update({
'time_failed': execution.get('time_failed'),
'traceback': execution.get('traceback'),
'exception_name': execution.get('exception_name'),
})
log_func('task error', **log_context)
else:
log.error('execution not found', **log_context)
# Move task to the scheduled queue for retry, or move to error
# queue if we don't want to retry.
if state == ERROR and not should_log_error:
_mark_done()
else:
task._move(from_state=ACTIVE, to_state=state, when=when) |
Performs one worker run:
* Processes a set of messages from each queue and removes any empty
queues from the working set.
* Move any expired items from the active queue to the queued queue.
* Move any scheduled items from the scheduled queue to the queued
queue. | def _worker_run(self):
"""
Performs one worker run:
* Processes a set of messages from each queue and removes any empty
queues from the working set.
* Move any expired items from the active queue to the queued queue.
* Move any scheduled items from the scheduled queue to the queued
queue.
"""
queues = list(self._queue_set)
random.shuffle(queues)
for queue in queues:
task_ids, processed_count = self._process_from_queue(queue)
# Remove queue if queue was processed and was empty
if not task_ids and processed_count != -1:
self._queue_set.remove(queue)
if self._stop_requested:
break
if processed_count > 0:
self._did_work = True
if time.time() - self._last_task_check > self.config['SELECT_TIMEOUT'] and \
not self._stop_requested:
self._worker_queue_scheduled_tasks()
self._worker_queue_expired_tasks()
self._last_task_check = time.time() |
Main loop of the worker.
Use once=True to execute any queued tasks and then exit.
Use force_once=True with once=True to always exit after one processing
loop even if tasks remain queued. | def run(self, once=False, force_once=False):
"""
Main loop of the worker.
Use once=True to execute any queued tasks and then exit.
Use force_once=True with once=True to always exit after one processing
loop even if tasks remain queued.
"""
self.log.info('ready', id=self.id,
queues=sorted(self.only_queues),
exclude_queues=sorted(self.exclude_queues),
single_worker_queues=sorted(self.single_worker_queues),
max_workers=self.max_workers_per_queue)
if not self.scripts.can_replicate_commands:
# Older Redis versions may create additional overhead when
# executing pipelines.
self.log.warn('using old Redis version')
if self.config['STATS_INTERVAL']:
self.stats_thread = StatsThread(self)
self.stats_thread.start()
# Queue any periodic tasks that are not queued yet.
self._queue_periodic_tasks()
# First scan all the available queues for new items until they're empty.
# Then, listen to the activity channel.
# XXX: This can get inefficient when having lots of queues.
self._pubsub = self.connection.pubsub()
self._pubsub.subscribe(self._key('activity'))
self._queue_set = set(self._filter_queues(
self.connection.smembers(self._key(QUEUED))))
try:
while True:
# Update the queue set on every iteration so we don't get stuck
# on processing a specific queue.
self._wait_for_new_tasks(timeout=self.config['SELECT_TIMEOUT'],
batch_timeout=self.config['SELECT_BATCH_TIMEOUT'])
self._install_signal_handlers()
self._did_work = False
self._worker_run()
self._uninstall_signal_handlers()
if once and (not self._queue_set or force_once):
break
if self._stop_requested:
raise KeyboardInterrupt()
except KeyboardInterrupt:
pass
except Exception as e:
self.log.exception(event='exception')
raise
finally:
if self.stats_thread:
self.stats_thread.stop()
self.stats_thread = None
# Free up Redis connection
self._pubsub.reset()
self.log.info('done') |
Whether Redis supports single command replication. | def can_replicate_commands(self):
"""
Whether Redis supports single command replication.
"""
if not hasattr(self, '_can_replicate_commands'):
info = self.redis.info('server')
version_info = info['redis_version'].split('.')
major, minor = int(version_info[0]), int(version_info[1])
result = major > 3 or major == 3 and minor >= 2
self._can_replicate_commands = result
return self._can_replicate_commands |
Like ZADD, but supports different score update modes, in case the
member already exists in the ZSET:
- "nx": Don't update the score
- "xx": Only update elements that already exist. Never add elements.
- "min": Use the smaller of the given and existing score
- "max": Use the larger of the given and existing score | def zadd(self, key, score, member, mode, client=None):
"""
Like ZADD, but supports different score update modes, in case the
member already exists in the ZSET:
- "nx": Don't update the score
- "xx": Only update elements that already exist. Never add elements.
- "min": Use the smaller of the given and existing score
- "max": Use the larger of the given and existing score
"""
if mode == 'nx':
f = self._zadd_noupdate
elif mode == 'xx':
f = self._zadd_update_existing
elif mode == 'min':
f = self._zadd_update_min
elif mode == 'max':
f = self._zadd_update_max
else:
raise NotImplementedError('mode "%s" unsupported' % mode)
return f(keys=[key], args=[score, member], client=client) |
Pops the first ``count`` members from the ZSET ``source`` and adds them
to the ZSET ``destination`` with a score of ``new_score``. If ``score``
is not None, only members up to a score of ``score`` are used. Returns
the members that were moved and, if ``withscores`` is True, their
original scores.
If items were moved, the action defined in ``on_success`` is executed.
The only implemented option is a tuple in the form ('update_sets',
``set_value``, ``remove_from_set``, ``add_to_set``
[, ``add_to_set_if_exists``]).
If no items are left in the ``source`` ZSET, the ``set_value`` is
removed from ``remove_from_set``. If any items were moved to the
``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If
any items were moved to the ``if_exists_key`` ZSET (see below), the
``set_value`` is added to the ``add_to_set_if_exists`` set.
If ``if_exists`` is specified as a tuple ('add', if_exists_key,
if_exists_score, if_exists_mode), then members that are already in the
``destination`` set will not be returned or updated, but they will be
added to a ZSET ``if_exists_key`` with a score of ``if_exists_score``
and the given behavior specified in ``if_exists_mode`` for members that
already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be
one of the following:
- "nx": Don't update the score
- "min": Use the smaller of the given and existing score
- "max": Use the larger of the given and existing score
If ``if_exists`` is specified as a tuple ('noupdate',), then no action
will be taken for members that are already in the ``destination`` ZSET
(their score will not be updated). | def zpoppush(self, source, destination, count, score, new_score,
client=None, withscores=False, on_success=None,
if_exists=None):
"""
Pops the first ``count`` members from the ZSET ``source`` and adds them
to the ZSET ``destination`` with a score of ``new_score``. If ``score``
is not None, only members up to a score of ``score`` are used. Returns
the members that were moved and, if ``withscores`` is True, their
original scores.
If items were moved, the action defined in ``on_success`` is executed.
The only implemented option is a tuple in the form ('update_sets',
``set_value``, ``remove_from_set``, ``add_to_set``
[, ``add_to_set_if_exists``]).
If no items are left in the ``source`` ZSET, the ``set_value`` is
removed from ``remove_from_set``. If any items were moved to the
``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If
any items were moved to the ``if_exists_key`` ZSET (see below), the
``set_value`` is added to the ``add_to_set_if_exists`` set.
If ``if_exists`` is specified as a tuple ('add', if_exists_key,
if_exists_score, if_exists_mode), then members that are already in the
``destination`` set will not be returned or updated, but they will be
added to a ZSET ``if_exists_key`` with a score of ``if_exists_score``
and the given behavior specified in ``if_exists_mode`` for members that
already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be
one of the following:
- "nx": Don't update the score
- "min": Use the smaller of the given and existing score
- "max": Use the larger of the given and existing score
If ``if_exists`` is specified as a tuple ('noupdate',), then no action
will be taken for members that are already in the ``destination`` ZSET
(their score will not be updated).
"""
if score is None:
score = '+inf' # Include all elements.
if withscores:
if on_success:
raise NotImplementedError()
return self._zpoppush_withscores(
keys=[source, destination],
args=[score, count, new_score],
client=client)
else:
if if_exists and if_exists[0] == 'add':
_, if_exists_key, if_exists_score, if_exists_mode = if_exists
if if_exists_mode != 'min':
raise NotImplementedError()
if not on_success or on_success[0] != 'update_sets':
raise NotImplementedError()
set_value, remove_from_set, add_to_set, add_to_set_if_exists \
= on_success[1:]
return self._zpoppush_exists_min_update_sets(
keys=[source, destination, remove_from_set, add_to_set,
add_to_set_if_exists, if_exists_key],
args=[score, count, new_score, set_value, if_exists_score],
)
elif if_exists and if_exists[0] == 'noupdate':
if not on_success or on_success[0] != 'update_sets':
raise NotImplementedError()
set_value, remove_from_set, add_to_set \
= on_success[1:]
return self._zpoppush_exists_ignore_update_sets(
keys=[source, destination, remove_from_set, add_to_set],
args=[score, count, new_score, set_value],
)
if on_success:
if on_success[0] != 'update_sets':
raise NotImplementedError()
else:
set_value, remove_from_set, add_to_set = on_success[1:]
return self._zpoppush_update_sets(
keys=[source, destination, remove_from_set, add_to_set],
args=[score, count, new_score, set_value],
client=client)
else:
return self._zpoppush(
keys=[source, destination],
args=[score, count, new_score],
client=client) |
Removes ``member`` from the set ``key`` if ``other_key`` does not
exist (i.e. is empty). Returns the number of removed elements (0 or 1). | def srem_if_not_exists(self, key, member, other_key, client=None):
"""
Removes ``member`` from the set ``key`` if ``other_key`` does not
exist (i.e. is empty). Returns the number of removed elements (0 or 1).
"""
return self._srem_if_not_exists(
keys=[key, other_key],
args=[member],
client=client) |
Removes ``key`` only if ``member`` is not member of any sets in the
``set_list``. Returns the number of removed elements (0 or 1). | def delete_if_not_in_zsets(self, key, member, set_list, client=None):
"""
Removes ``key`` only if ``member`` is not member of any sets in the
``set_list``. Returns the number of removed elements (0 or 1).
"""
return self._delete_if_not_in_zsets(
keys=[key]+set_list,
args=[member],
client=client) |
Fails with an error containing the string '<FAIL_IF_NOT_IN_ZSET>' if
the given ``member`` is not in the ZSET ``key``. This can be used in
a pipeline to assert that the member is in the ZSET and cancel the
execution otherwise. | def fail_if_not_in_zset(self, key, member, client=None):
"""
Fails with an error containing the string '<FAIL_IF_NOT_IN_ZSET>' if
the given ``member`` is not in the ZSET ``key``. This can be used in
a pipeline to assert that the member is in the ZSET and cancel the
execution otherwise.
"""
self._fail_if_not_in_zset(keys=[key], args=[member], client=client) |
Returns a list of expired tasks (older than ``time``) by looking at all
active queues. The list is capped at ``batch_size``. The list contains
tuples (queue, task_id). | def get_expired_tasks(self, key_prefix, time, batch_size, client=None):
"""
Returns a list of expired tasks (older than ``time``) by looking at all
active queues. The list is capped at ``batch_size``. The list contains
tuples (queue, task_id).
"""
result = self._get_expired_tasks(args=[key_prefix, time, batch_size],
client=client)
# [queue1, task1, queue2, task2] -> [(queue1, task1), (queue2, task2)]
return list(zip(result[::2], result[1::2])) |
Executes the given Redis pipeline as a Lua script. When an error
occurs, the transaction stops executing, and an exception is raised.
This differs from Redis transactions, where execution continues after an
error. On success, a list of results is returned. The pipeline is
cleared after execution and can no longer be reused.
Example:
p = conn.pipeline()
p.lrange('x', 0, -1)
p.set('success', 1)
# If "x" is empty or a list, an array [[...], True] is returned.
# Otherwise, ResponseError is raised and "success" is not set.
results = redis_scripts.execute_pipeline(p) | def execute_pipeline(self, pipeline, client=None):
"""
Executes the given Redis pipeline as a Lua script. When an error
occurs, the transaction stops executing, and an exception is raised.
This differs from Redis transactions, where execution continues after an
error. On success, a list of results is returned. The pipeline is
cleared after execution and can no longer be reused.
Example:
p = conn.pipeline()
p.lrange('x', 0, -1)
p.set('success', 1)
# If "x" is empty or a list, an array [[...], True] is returned.
# Otherwise, ResponseError is raised and "success" is not set.
results = redis_scripts.execute_pipeline(p)
"""
client = client or self.redis
executing_pipeline = None
try:
# Prepare args
stack = pipeline.command_stack
script_args = [int(self.can_replicate_commands), len(stack)]
for args, options in stack:
script_args += [len(args)-1] + list(args)
# Run the pipeline
if self.can_replicate_commands: # Redis 3.2 or higher
# Make sure scripts exist
if pipeline.scripts:
pipeline.load_scripts()
raw_results = self._execute_pipeline(args=script_args,
client=client)
else:
executing_pipeline = client.pipeline()
# Always load scripts to avoid issues when Redis loads data
# from AOF file / when replicating.
for s in pipeline.scripts:
executing_pipeline.script_load(s.script)
# Run actual pipeline lua script
self._execute_pipeline(args=script_args,
client=executing_pipeline)
# Always load all scripts and run actual pipeline lua script
raw_results = executing_pipeline.execute()[-1]
# Run response callbacks on results.
results = []
response_callbacks = pipeline.response_callbacks
for ((args, options), result) in zip(stack, raw_results):
command_name = args[0]
if command_name in response_callbacks:
result = response_callbacks[command_name](result,
**options)
results.append(result)
return results
finally:
if executing_pipeline:
executing_pipeline.reset()
pipeline.reset() |
Periodic task schedule: Use to schedule a task to run periodically,
starting from start_date (or None to be active immediately) until end_date
(or None to repeat forever).
The period starts at the given start_date, or on Jan 1st 2000.
For more details, see README. | def periodic(seconds=0, minutes=0, hours=0, days=0, weeks=0, start_date=None,
end_date=None):
"""
Periodic task schedule: Use to schedule a task to run periodically,
starting from start_date (or None to be active immediately) until end_date
(or None to repeat forever).
The period starts at the given start_date, or on Jan 1st 2000.
For more details, see README.
"""
period = seconds + minutes*60 + hours*3600 + days*86400 + weeks*604800
assert period > 0, "Must specify a positive period."
if not start_date:
# Saturday at midnight
start_date = datetime.datetime(2000, 1, 1)
return (_periodic, (period, start_date, end_date)) |
Return an attribute from a dotted path name (e.g. "path.to.func"). | def import_attribute(name):
"""Return an attribute from a dotted path name (e.g. "path.to.func")."""
try:
sep = ':' if ':' in name else '.' # For backwards compatibility
module_name, attribute = name.rsplit(sep, 1)
module = importlib.import_module(module_name)
return operator.attrgetter(attribute)(module)
except (ValueError, ImportError, AttributeError) as e:
raise TaskImportError(e) |
Generates and returns a hex-encoded 256-bit ID for the given task name and
args. Used to generate IDs for unique tasks or for task locks. | def gen_unique_id(serialized_name, args, kwargs):
"""
Generates and returns a hex-encoded 256-bit ID for the given task name and
args. Used to generate IDs for unique tasks or for task locks.
"""
return hashlib.sha256(json.dumps({
'func': serialized_name,
'args': args,
'kwargs': kwargs,
}, sort_keys=True).encode('utf8')).hexdigest() |
Returns the dotted serialized path to the passed function. | def serialize_func_name(func):
"""
Returns the dotted serialized path to the passed function.
"""
if func.__module__ == '__main__':
raise ValueError('Functions from the __main__ module cannot be '
'processed by workers.')
try:
# This will only work on Python 3.3 or above, but it will allow us to use static/classmethods
func_name = func.__qualname__
except AttributeError:
func_name = func.__name__
return ':'.join([func.__module__, func_name]) |
For a string "a.b.c", yields "a", "a.b", "a.b.c". | def dotted_parts(s):
"""
For a string "a.b.c", yields "a", "a.b", "a.b.c".
"""
idx = -1
while s:
idx = s.find('.', idx+1)
if idx == -1:
yield s
break
yield s[:idx] |
For a string "a.b.c", yields "a.b.c", "a.b", "a". | def reversed_dotted_parts(s):
"""
For a string "a.b.c", yields "a.b.c", "a.b", "a".
"""
idx = -1
if s:
yield s
while s:
idx = s.rfind('.', 0, idx)
if idx == -1:
break
yield s[:idx] |
TaskTiger structlog processor.
Inject the current task id for non-batch tasks. | def tasktiger_processor(logger, method_name, event_dict):
"""
TaskTiger structlog processor.
Inject the current task id for non-batch tasks.
"""
if g['current_tasks'] is not None and not g['current_task_is_batch']:
event_dict['task_id'] = g['current_tasks'][0].id
return event_dict |
Whether this task should be retried when the given exception occurs. | def should_retry_on(self, exception_class, logger=None):
"""
Whether this task should be retried when the given exception occurs.
"""
for n in (self.retry_on or []):
try:
if issubclass(exception_class, import_attribute(n)):
return True
except TaskImportError:
if logger:
logger.error('should_retry_on could not import class',
exception_name=n)
return False |
Internal helper to move a task from one state to another (e.g. from
QUEUED to DELAYED). The "when" argument indicates the timestamp of the
task in the new state. If no to_state is specified, the task will be
simply removed from the original state.
The "mode" param can be specified to define how the timestamp in the
new state should be updated and is passed to the ZADD Redis script (see
its documentation for details).
Raises TaskNotFound if the task is not in the expected state or not in
the expected queue. | def _move(self, from_state=None, to_state=None, when=None, mode=None):
"""
Internal helper to move a task from one state to another (e.g. from
QUEUED to DELAYED). The "when" argument indicates the timestamp of the
task in the new state. If no to_state is specified, the task will be
simply removed from the original state.
The "mode" param can be specified to define how the timestamp in the
new state should be updated and is passed to the ZADD Redis script (see
its documentation for details).
Raises TaskNotFound if the task is not in the expected state or not in
the expected queue.
"""
pipeline = self.tiger.connection.pipeline()
scripts = self.tiger.scripts
_key = self.tiger._key
from_state = from_state or self.state
queue = self.queue
assert from_state
assert queue
scripts.fail_if_not_in_zset(_key(from_state, queue), self.id,
client=pipeline)
if to_state:
if not when:
when = time.time()
if mode:
scripts.zadd(_key(to_state, queue), when, self.id,
mode, client=pipeline)
else:
pipeline.zadd(_key(to_state, queue), self.id, when)
pipeline.sadd(_key(to_state), queue)
pipeline.zrem(_key(from_state, queue), self.id)
if not to_state: # Remove the task if necessary
if self.unique:
# Only delete if it's not in any other queue
check_states = set([ACTIVE, QUEUED, ERROR, SCHEDULED])
check_states.remove(from_state)
# TODO: Do the following two in one call.
scripts.delete_if_not_in_zsets(_key('task', self.id, 'executions'),
self.id, [
_key(state, queue) for state in check_states
], client=pipeline)
scripts.delete_if_not_in_zsets(_key('task', self.id),
self.id, [
_key(state, queue) for state in check_states
], client=pipeline)
else:
# Safe to remove
pipeline.delete(_key('task', self.id, 'executions'))
pipeline.delete(_key('task', self.id))
scripts.srem_if_not_exists(_key(from_state), queue,
_key(from_state, queue), client=pipeline)
if to_state == QUEUED:
pipeline.publish(_key('activity'), queue)
try:
scripts.execute_pipeline(pipeline)
except redis.ResponseError as e:
if '<FAIL_IF_NOT_IN_ZSET>' in e.args[0]:
raise TaskNotFound('Task {} not found in queue "{}" in state "{}".'.format(
self.id, queue, from_state
))
raise
else:
self._state = to_state |
Updates a scheduled task's date to the given date. If the task is not
scheduled, a TaskNotFound exception is raised. | def update_scheduled_time(self, when):
"""
Updates a scheduled task's date to the given date. If the task is not
scheduled, a TaskNotFound exception is raised.
"""
tiger = self.tiger
ts = get_timestamp(when)
assert ts
pipeline = tiger.connection.pipeline()
key = tiger._key(SCHEDULED, self.queue)
tiger.scripts.zadd(key, ts, self.id, mode='xx', client=pipeline)
pipeline.zscore(key, self.id)
_, score = pipeline.execute()
if not score:
raise TaskNotFound('Task {} not found in queue "{}" in state "{}".'.format(
self.id, self.queue, SCHEDULED
))
self._ts = ts |