text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Indefinitely checks the writer queue for data to write
<END_TASK>
<USER_TASK:>
Description:
def _writer(self):
"""
Indefinitely checks the writer queue for data to write
to socket.
""" |
while not self.closed:
try:
sock, data = self._write_queue.get(timeout=0.1)
self._write_queue.task_done()
sock.send(data)
except Empty:
pass # nothing to write after timeout
except socket.error as err:
if err.errno == errno.EBADF:
self._clean_dead_sessions() |
<SYSTEM_TASK:>
While the client is not marked as closed, performs a socket select
<END_TASK>
<USER_TASK:>
Description:
def _select(self):
"""
While the client is not marked as closed, performs a socket select
on all PushSession sockets. If any data is received, parses and
forwards it on to the callback function. If the callback is
successful, a PublishMessageReceived message is sent.
""" |
try:
while not self.closed:
try:
inputready = select.select(self.sessions.keys(), [], [], 0.1)[0]
for sock in inputready:
session = self.sessions[sock]
sck = session.socket
if sck is None:
# Socket has since been deleted, continue
continue
# If no defined message length, nothing has been
# consumed yet, parse the header.
if session.message_length == 0:
# Read header information before receiving rest of
# message.
response_type = _read_msg_header(session)
if response_type == NO_DATA:
# No data could be read, assume socket closed.
if session.socket is not None:
self.log.error("Socket closed for Monitor %s." % session.monitor_id)
self._restart_session(session)
continue
elif response_type == INCOMPLETE:
# More Data to be read. Continue.
continue
elif response_type != PUBLISH_MESSAGE:
self.log.warn("Response Type (%x) does not match PublishMessage (%x)"
% (response_type, PUBLISH_MESSAGE))
continue
try:
if not _read_msg(session):
# Data not completely read, continue.
continue
except PushException as err:
# If Socket is None, it was closed,
# otherwise it was closed when it shouldn't
# have been restart it.
session.data = six.b("")
session.message_length = 0
if session.socket is None:
del self.sessions[sck]
else:
self.log.exception(err)
self._restart_session(session)
continue
# We received full payload,
# clear session data and parse it.
data = session.data
session.data = six.b("")
session.message_length = 0
block_id = struct.unpack('!H', data[0:2])[0]
compression = struct.unpack('!B', data[4:5])[0]
payload = data[10:]
if compression == 0x01:
# Data is compressed, uncompress it.
payload = zlib.decompress(payload)
# Enqueue payload into a callback queue to be
# invoked
self._callback_pool.queue_callback(session, block_id, payload)
except select.error as err:
# Evaluate sessions if we get a bad file descriptor, if
# socket is gone, delete the session.
if err.args[0] == errno.EBADF:
self._clean_dead_sessions()
except Exception as err:
self.log.exception(err)
finally:
for session in self.sessions.values():
if session is not None:
session.stop() |
<SYSTEM_TASK:>
Initializes the IO and Writer threads
<END_TASK>
<USER_TASK:>
Description:
def _init_threads(self):
"""Initializes the IO and Writer threads""" |
if self._io_thread is None:
self._io_thread = Thread(target=self._select)
self._io_thread.start()
if self._writer_thread is None:
self._writer_thread = Thread(target=self._writer)
self._writer_thread.start() |
<SYSTEM_TASK:>
Creates and Returns a PushSession instance based on the input monitor
<END_TASK>
<USER_TASK:>
Description:
def create_session(self, callback, monitor_id):
"""
Creates and Returns a PushSession instance based on the input monitor
and callback. When data is received, callback will be invoked.
If neither monitor or monitor_id are specified, throws an Exception.
:param callback: Callback function to call when PublishMessage
messages are received. Expects 1 argument which will contain the
payload of the pushed message. Additionally, expects
function to return True if callback was able to process
the message, False or None otherwise.
:param monitor_id: The id of the Monitor, will be queried
to understand parameters of the monitor.
""" |
self.log.info("Creating Session for Monitor %s." % monitor_id)
session = SecurePushSession(callback, monitor_id, self, self._ca_certs) \
if self._secure else PushSession(callback, monitor_id, self)
session.start()
self.sessions[session.socket.fileno()] = session
self._init_threads()
return session |
<SYSTEM_TASK:>
Stops all session activity.
<END_TASK>
<USER_TASK:>
Description:
def stop(self):
"""Stops all session activity.
Blocks until io and writer thread dies
""" |
if self._io_thread is not None:
self.log.info("Waiting for I/O thread to stop...")
self.closed = True
self._io_thread.join()
if self._writer_thread is not None:
self.log.info("Waiting for Writer Thread to stop...")
self.closed = True
self._writer_thread.join()
self.log.info("All worker threads stopped.") |
<SYSTEM_TASK:>
Detect the encoding of a file.
<END_TASK>
<USER_TASK:>
Description:
def detect(filename, include_confidence=False):
"""
Detect the encoding of a file.
Returns only the predicted current encoding as a string.
If `include_confidence` is True,
Returns tuple containing: (str encoding, float confidence)
""" |
f = open(filename)
detection = chardet.detect(f.read())
f.close()
encoding = detection.get('encoding')
confidence = detection.get('confidence')
if include_confidence:
return (encoding, confidence)
return encoding |
<SYSTEM_TASK:>
Utility function for downloading files from the web
<END_TASK>
<USER_TASK:>
Description:
def download(url, localFileName=None, localDirName=None):
"""
Utility function for downloading files from the web
and retaining the same filename.
""" |
localName = url2name(url)
req = Request(url)
r = urlopen(req)
if r.info().has_key('Content-Disposition'):
# If the response has Content-Disposition, we take file name from it
localName = r.info()['Content-Disposition'].split('filename=')
if len(localName) > 1:
localName = localName[1]
if localName[0] == '"' or localName[0] == "'":
localName = localName[1:-1]
else:
localName = url2name(r.url)
elif r.url != url:
# if we were redirected, the real file name we take from the final URL
localName = url2name(r.url)
if localFileName:
# we can force to save the file as specified name
localName = localFileName
if localDirName:
# we can also put it in some custom directory
if not os.path.exists(localDirName):
os.makedirs(localDirName)
localName = os.path.join(localDirName, localName)
f = open(localName, 'wb')
f.write(r.read())
f.close() |
<SYSTEM_TASK:>
This is a unexposed function, is responsibility for translation internal.
<END_TASK>
<USER_TASK:>
Description:
def _t(unistr, charset_from, charset_to):
"""
This is a unexposed function, is responsibility for translation internal.
""" |
# if type(unistr) is str:
# try:
# unistr = unistr.decode('utf-8')
# # Python 3 returns AttributeError when .decode() is called on a str
# # This means it is already unicode.
# except AttributeError:
# pass
# try:
# if type(unistr) is not unicode:
# return unistr
# # Python 3 returns NameError because unicode is not a type.
# except NameError:
# pass
chars = []
for c in unistr:
idx = charset_from.find(c)
chars.append(charset_to[idx] if idx!=-1 else c)
return u''.join(chars) |
<SYSTEM_TASK:>
Identify whether a string is simplified or traditional Chinese.
<END_TASK>
<USER_TASK:>
Description:
def identify(text):
"""Identify whether a string is simplified or traditional Chinese.
Returns:
None: if there are no recognizd Chinese characters.
EITHER: if the test is inconclusive.
TRAD: if the text is traditional.
SIMP: if the text is simplified.
BOTH: the text has characters recognized as being solely traditional
and other characters recognized as being solely simplified.
""" |
filtered_text = set(list(text)).intersection(ALL_CHARS)
if len(filtered_text) is 0:
return None
if filtered_text.issubset(SHARED_CHARS):
return EITHER
if filtered_text.issubset(TRAD_CHARS):
return TRAD
if filtered_text.issubset(SIMP_CHARS):
return SIMP
if filtered_text.difference(TRAD_CHARS).issubset(SIMP_CHARS):
return BOTH |
<SYSTEM_TASK:>
u"""
<END_TASK>
<USER_TASK:>
Description:
def split_text(text, include_part_of_speech=False, strip_english=False, strip_numbers=False):
u"""
Split Chinese text at word boundaries.
include_pos: also returns the Part Of Speech for each of the words.
Some of the different parts of speech are:
r: pronoun
v: verb
ns: proper noun
etc...
This all gets returned as a tuple:
index 0: the split word
index 1: the word's part of speech
strip_english: remove all entries that have English or numbers in them (useful sometimes)
""" |
if not include_part_of_speech:
seg_list = pseg.cut(text)
if strip_english:
seg_list = filter(lambda x: not contains_english(x), seg_list)
if strip_numbers:
seg_list = filter(lambda x: not _is_number(x), seg_list)
return list(map(lambda i: i.word, seg_list))
else:
seg_list = pseg.cut(text)
objs = map(lambda w: (w.word, w.flag), seg_list)
if strip_english:
objs = filter(lambda x: not contains_english(x[0]), objs)
if strip_english:
objs = filter(lambda x: not _is_number(x[0]), objs)
return objs
# if was_traditional:
# seg_list = map(tradify, seg_list)
return list(seg_list) |
<SYSTEM_TASK:>
Returns a boolean indicating whether or not the string can be parsed by
<END_TASK>
<USER_TASK:>
Description:
def is_special_atom(cron_atom, span):
"""
Returns a boolean indicating whether or not the string can be parsed by
parse_atom to produce a static set. In the process of examining the
string, the syntax of any special character uses is also checked.
""" |
for special_char in ('%', '#', 'L', 'W'):
if special_char not in cron_atom:
continue
if special_char == '#':
if span != DAYS_OF_WEEK:
raise ValueError("\"#\" invalid where used.")
elif not VALIDATE_POUND.match(cron_atom):
raise ValueError("\"#\" syntax incorrect.")
elif special_char == "W":
if span != DAYS_OF_MONTH:
raise ValueError("\"W\" syntax incorrect.")
elif not(VALIDATE_W.match(cron_atom) and int(cron_atom[:-1]) > 0):
raise ValueError("Invalid use of \"W\".")
elif special_char == "L":
if span not in L_FIELDS:
raise ValueError("\"L\" invalid where used.")
elif span == DAYS_OF_MONTH:
if cron_atom != "L":
raise ValueError("\"L\" must be alone in days of month.")
elif span == DAYS_OF_WEEK:
if not VALIDATE_L_IN_DOW.match(cron_atom):
raise ValueError("\"L\" syntax incorrect.")
elif special_char == "%":
if not(cron_atom[1:].isdigit() and int(cron_atom[1:]) > 1):
raise ValueError("\"%\" syntax incorrect.")
return True
else:
return False |
<SYSTEM_TASK:>
Returns a set containing valid values for a given cron-style range of
<END_TASK>
<USER_TASK:>
Description:
def parse_atom(parse, minmax):
"""
Returns a set containing valid values for a given cron-style range of
numbers. The 'minmax' arguments is a two element iterable containing the
inclusive upper and lower limits of the expression.
Examples:
>>> parse_atom("1-5",(0,6))
set([1, 2, 3, 4, 5])
>>> parse_atom("*/6",(0,23))
set([0, 6, 12, 18])
>>> parse_atom("18-6/4",(0,23))
set([18, 22, 0, 4])
>>> parse_atom("*/9",(0,23))
set([0, 9, 18])
""" |
parse = parse.strip()
increment = 1
if parse == '*':
return set(xrange(minmax[0], minmax[1] + 1))
elif parse.isdigit():
# A single number still needs to be returned as a set
value = int(parse)
if value >= minmax[0] and value <= minmax[1]:
return set((value,))
else:
raise ValueError("\"%s\" is not within valid range." % parse)
elif '-' in parse or '/' in parse:
divide = parse.split('/')
subrange = divide[0]
if len(divide) == 2:
# Example: 1-3/5 or */7 increment should be 5 and 7 respectively
increment = int(divide[1])
if '-' in subrange:
# Example: a-b
prefix, suffix = [int(n) for n in subrange.split('-')]
if prefix < minmax[0] or suffix > minmax[1]:
raise ValueError("\"%s\" is not within valid range." % parse)
elif subrange.isdigit():
# Handle offset increments e.g. 5/15 to run at :05, :20, :35, and :50
return set(xrange(int(subrange), minmax[1] + 1, increment))
elif subrange == '*':
# Include all values with the given range
prefix, suffix = minmax
else:
raise ValueError("Unrecognized symbol \"%s\"" % subrange)
if prefix < suffix:
# Example: 7-10
return set(xrange(prefix, suffix + 1, increment))
else:
# Example: 12-4/2; (12, 12 + n, ..., 12 + m*n) U (n_0, ..., 4)
noskips = list(xrange(prefix, minmax[1] + 1))
noskips += list(xrange(minmax[0], suffix + 1))
return set(noskips[::increment])
else:
raise ValueError("Atom \"%s\" not in a recognized format." % parse) |
<SYSTEM_TASK:>
Recomputes the sets for the static ranges of the trigger time.
<END_TASK>
<USER_TASK:>
Description:
def compute_numtab(self):
"""
Recomputes the sets for the static ranges of the trigger time.
This method should only be called by the user if the string_tab
member is modified.
""" |
self.numerical_tab = []
for field_str, span in zip(self.string_tab, FIELD_RANGES):
split_field_str = field_str.split(',')
if len(split_field_str) > 1 and "*" in split_field_str:
raise ValueError("\"*\" must be alone in a field.")
unified = set()
for cron_atom in split_field_str:
# parse_atom only handles static cases
if not(is_special_atom(cron_atom, span)):
unified.update(parse_atom(cron_atom, span))
self.numerical_tab.append(unified)
if self.string_tab[2] == "*" and self.string_tab[4] != "*":
self.numerical_tab[2] = set()
elif self.string_tab[4] == "*" and self.string_tab[2] != "*":
self.numerical_tab[4] = set() |
<SYSTEM_TASK:>
Returns boolean indicating if the trigger is active at the given time.
<END_TASK>
<USER_TASK:>
Description:
def check_trigger(self, date_tuple, utc_offset=0):
"""
Returns boolean indicating if the trigger is active at the given time.
The date tuple should be in the local time. Unless periodicities are
used, utc_offset does not need to be specified. If periodicities are
used, specifically in the hour and minutes fields, it is crucial that
the utc_offset is specified.
""" |
year, month, day, hour, mins = date_tuple
given_date = datetime.date(year, month, day)
zeroday = datetime.date(*self.epoch[:3])
last_dom = calendar.monthrange(year, month)[-1]
dom_matched = True
# In calendar and datetime.date.weekday, Monday = 0
given_dow = (datetime.date.weekday(given_date) + 1) % 7
first_dow = (given_dow + 1 - day) % 7
# Figure out how much time has passed from the epoch to the given date
utc_diff = utc_offset - self.epoch[5]
mod_delta_yrs = year - self.epoch[0]
mod_delta_mon = month - self.epoch[1] + mod_delta_yrs * 12
mod_delta_day = (given_date - zeroday).days
mod_delta_hrs = hour - self.epoch[3] + mod_delta_day * 24 + utc_diff
mod_delta_min = mins - self.epoch[4] + mod_delta_hrs * 60
# Makes iterating through like components easier.
quintuple = zip(
(mins, hour, day, month, given_dow),
self.numerical_tab,
self.string_tab,
(mod_delta_min, mod_delta_hrs, mod_delta_day, mod_delta_mon,
mod_delta_day),
FIELD_RANGES)
for value, valid_values, field_str, delta_t, field_type in quintuple:
# All valid, static values for the fields are stored in sets
if value in valid_values:
continue
# The following for loop implements the logic for context
# sensitive and epoch sensitive constraints. break statements,
# which are executed when a match is found, lead to a continue
# in the outer loop. If there are no matches found, the given date
# does not match expression constraints, so the function returns
# False as seen at the end of this for...else... construct.
for cron_atom in field_str.split(','):
if cron_atom[0] == '%':
if not(delta_t % int(cron_atom[1:])):
break
elif '#' in cron_atom:
D, N = int(cron_atom[0]), int(cron_atom[2])
# Computes Nth occurence of D day of the week
if (((D - first_dow) % 7) + 1 + 7 * (N - 1)) == day:
break
elif cron_atom[-1] == 'W':
target = min(int(cron_atom[:-1]), last_dom)
lands_on = (first_dow + target - 1) % 7
if lands_on == 0:
# Shift from Sun. to Mon. unless Mon. is next month
if target < last_dom:
target += 1
else:
target -= 2
elif lands_on == 6:
# Shift from Sat. to Fri. unless Fri. in prior month
if target > 1:
target -= 1
else:
target += 2
# Break if the day is correct, and target is a weekday
if target == day and (first_dow + target) % 7 > 1:
break
elif cron_atom[-1] == 'L':
# In dom field, L means the last day of the month
target = last_dom
if field_type == DAYS_OF_WEEK:
# Calculates the last occurence of given day of week
desired_dow = int(cron_atom[:-1])
target = (((desired_dow - first_dow) % 7) + 29)
if target > last_dom:
target -= 7
if target == day:
break
else:
# See 2010.11.15 of CHANGELOG
if field_type == DAYS_OF_MONTH and self.string_tab[4] != '*':
dom_matched = False
continue
elif field_type == DAYS_OF_WEEK and self.string_tab[2] != '*':
# If we got here, then days of months validated so it does
# not matter that days of the week failed.
return dom_matched
# None of the expressions matched which means this field fails
return False
# Arriving at this point means the date landed within the constraints
# of all fields; the associated trigger should be fired.
return True |
<SYSTEM_TASK:>
Show the structure of self.rules_list, only for debug.
<END_TASK>
<USER_TASK:>
Description:
def show(self):
"""Show the structure of self.rules_list, only for debug.""" |
for rule in self.rules_list:
result = ", ".join([str(check) for check, deny in rule])
print(result) |
<SYSTEM_TASK:>
Run self.rules_list.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Run self.rules_list.
Return True if one rule channel has been passed.
Otherwise return False and the deny() method of the last failed rule.
""" |
failed_result = None
for rule in self.rules_list:
for check, deny in rule:
if not check():
failed_result = (False, deny)
break
else:
return (True, None)
return failed_result |
<SYSTEM_TASK:>
Set the meter indicator. Value should be between 0 and 1.
<END_TASK>
<USER_TASK:>
Description:
def set_fraction(self, value):
"""Set the meter indicator. Value should be between 0 and 1.""" |
if value < 0:
value *= -1
value = min(value, 1)
if self.horizontal:
width = int(self.width * value)
height = self.height
else:
width = self.width
height = int(self.height * value)
self.canvas.coords(self.meter, self.xpos, self.ypos,
self.xpos + width, self.ypos + height) |
<SYSTEM_TASK:>
Returns a human readable string of a byte-value.
<END_TASK>
<USER_TASK:>
Description:
def format_num(num, unit='bytes'):
"""
Returns a human readable string of a byte-value.
If 'num' is bits, set unit='bits'.
""" |
if unit == 'bytes':
extension = 'B'
else:
# if it's not bytes, it's bits
extension = 'Bit'
for dimension in (unit, 'K', 'M', 'G', 'T'):
if num < 1024:
if dimension == unit:
return '%3.1f %s' % (num, dimension)
return '%3.1f %s%s' % (num, dimension, extension)
num /= 1024
return '%3.1f P%s' % (num, extension) |
<SYSTEM_TASK:>
Generate a Content-Disposition header for a given filename.
<END_TASK>
<USER_TASK:>
Description:
def build_header(
filename, disposition='attachment', filename_compat=None
):
"""Generate a Content-Disposition header for a given filename.
For legacy clients that don't understand the filename* parameter,
a filename_compat value may be given.
It should either be ascii-only (recommended) or iso-8859-1 only.
In the later case it should be a character string
(unicode in Python 2).
Options for generating filename_compat (only useful for legacy clients):
- ignore (will only send filename*);
- strip accents using unicode's decomposing normalisations,
which can be done from unicode data (stdlib), and keep only ascii;
- use the ascii transliteration tables from Unidecode (PyPI);
- use iso-8859-1
Ignore is the safest, and can be used to trigger a fallback
to the document location (which can be percent-encoded utf-8
if you control the URLs).
See https://tools.ietf.org/html/rfc6266#appendix-D
""" |
# While this method exists, it could also sanitize the filename
# by rejecting slashes or other weirdness that might upset a receiver.
if disposition != 'attachment':
assert is_token(disposition)
rv = disposition
if is_token(filename):
rv += '; filename=%s' % (filename, )
return rv
elif is_ascii(filename) and is_lws_safe(filename):
qd_filename = qd_quote(filename)
rv += '; filename="%s"' % (qd_filename, )
if qd_filename == filename:
# RFC 6266 claims some implementations are iffy on qdtext's
# backslash-escaping, we'll include filename* in that case.
return rv
elif filename_compat:
if is_token(filename_compat):
rv += '; filename=%s' % (filename_compat, )
else:
assert is_lws_safe(filename_compat)
rv += '; filename="%s"' % (qd_quote(filename_compat), )
# alnum are already considered always-safe, but the rest isn't.
# Python encodes ~ when it shouldn't, for example.
rv += "; filename*=utf-8''%s" % (percent_encode(
filename, safe=attr_chars_nonalnum, encoding='utf-8'), )
# This will only encode filename_compat, if it used non-ascii iso-8859-1.
return rv.encode('iso-8859-1') |
<SYSTEM_TASK:>
The filename from the Content-Disposition header.
<END_TASK>
<USER_TASK:>
Description:
def filename_unsafe(self):
"""The filename from the Content-Disposition header.
If a location was passed at instanciation, the basename
from that may be used as a fallback. Otherwise, this may
be the None value.
On safety:
This property records the intent of the sender.
You shouldn't use this sender-controlled value as a filesystem
path, it can be insecure. Serving files with this filename can be
dangerous as well, due to a certain browser using the part after the
dot for mime-sniffing.
Saving it to a database is fine by itself though.
""" |
if 'filename*' in self.assocs:
return self.assocs['filename*'].string
elif 'filename' in self.assocs:
# XXX Reject non-ascii (parsed via qdtext) here?
return self.assocs['filename']
elif self.location is not None:
return posixpath.basename(self.location_path.rstrip('/')) |
<SYSTEM_TASK:>
Returns a filename that is safer to use on the filesystem.
<END_TASK>
<USER_TASK:>
Description:
def filename_sanitized(self, extension, default_filename='file'):
"""Returns a filename that is safer to use on the filesystem.
The filename will not contain a slash (nor the path separator
for the current platform, if different), it will not
start with a dot, and it will have the expected extension.
No guarantees that makes it "safe enough".
No effort is made to remove special characters;
using this value blindly might overwrite existing files, etc.
""" |
assert extension
assert extension[0] != '.'
assert default_filename
assert '.' not in default_filename
extension = '.' + extension
fname = self.filename_unsafe
if fname is None:
fname = default_filename
fname = posixpath.basename(fname)
fname = os.path.basename(fname)
fname = fname.lstrip('.')
if not fname:
fname = default_filename
if not fname.endswith(extension):
fname = fname + extension
return fname |
<SYSTEM_TASK:>
uptime in human readable format.
<END_TASK>
<USER_TASK:>
Description:
def str_uptime(self):
"""uptime in human readable format.""" |
mins, secs = divmod(self.uptime, 60)
hours, mins = divmod(mins, 60)
return '%02d:%02d:%02d' % (hours, mins, secs) |
<SYSTEM_TASK:>
Returns the upstream, downstream values as a tuple in bytes per
<END_TASK>
<USER_TASK:>
Description:
def transmission_rate(self):
"""
Returns the upstream, downstream values as a tuple in bytes per
second. Use this for periodical calling.
""" |
sent = self.bytes_sent
received = self.bytes_received
traffic_call = time.time()
time_delta = traffic_call - self.last_traffic_call
upstream = int(1.0 * (sent - self.last_bytes_sent)/time_delta)
downstream = int(1.0 * (received - self.last_bytes_received)/time_delta)
self.last_bytes_sent = sent
self.last_bytes_received = received
self.last_traffic_call = traffic_call
return upstream, downstream |
<SYSTEM_TASK:>
Returns a tuple of human readable transmission rates in bytes.
<END_TASK>
<USER_TASK:>
Description:
def str_transmission_rate(self):
"""Returns a tuple of human readable transmission rates in bytes.""" |
upstream, downstream = self.transmission_rate
return (
fritztools.format_num(upstream),
fritztools.format_num(downstream)
) |
<SYSTEM_TASK:>
Helper method to construct the appropriate SOAP-body to call a
<END_TASK>
<USER_TASK:>
Description:
def _body_builder(self, kwargs):
"""
Helper method to construct the appropriate SOAP-body to call a
FritzBox-Service.
""" |
p = {
'action_name': self.name,
'service_type': self.service_type,
'arguments': '',
}
if kwargs:
arguments = [
self.argument_template % {'name': k, 'value': v}
for k, v in kwargs.items()
]
p['arguments'] = ''.join(arguments)
body = self.body_template.strip() % p
return body |
<SYSTEM_TASK:>
Calls the FritzBox action and returns a dictionary with the arguments.
<END_TASK>
<USER_TASK:>
Description:
def execute(self, **kwargs):
"""
Calls the FritzBox action and returns a dictionary with the arguments.
""" |
headers = self.header.copy()
headers['soapaction'] = '%s#%s' % (self.service_type, self.name)
data = self.envelope.strip() % self._body_builder(kwargs)
url = 'http://%s:%s%s' % (self.address, self.port, self.control_url)
auth = None
if self.password:
auth=HTTPDigestAuth(self.user, self.password)
response = requests.post(url, data=data, headers=headers, auth=auth)
# lxml needs bytes, therefore response.content (not response.text)
result = self.parse_response(response.content)
return result |
<SYSTEM_TASK:>
Returns a list of FritzAction instances.
<END_TASK>
<USER_TASK:>
Description:
def get_actions(self):
"""Returns a list of FritzAction instances.""" |
self._read_state_variables()
actions = []
nodes = self.root.iterfind(
'.//ns:action', namespaces={'ns': self.namespace})
for node in nodes:
action = FritzAction(self.service.service_type,
self.service.control_url)
action.name = node.find(self.nodename('name')).text
action.arguments = self._get_arguments(node)
actions.append(action)
return actions |
<SYSTEM_TASK:>
Returns a dictionary of arguments for the given action_node.
<END_TASK>
<USER_TASK:>
Description:
def _get_arguments(self, action_node):
"""
Returns a dictionary of arguments for the given action_node.
""" |
arguments = {}
argument_nodes = action_node.iterfind(
r'./ns:argumentList/ns:argument', namespaces={'ns': self.namespace})
for argument_node in argument_nodes:
argument = self._get_argument(argument_node)
arguments[argument.name] = argument
return arguments |
<SYSTEM_TASK:>
Returns a FritzActionArgument instance for the given argument_node.
<END_TASK>
<USER_TASK:>
Description:
def _get_argument(self, argument_node):
"""
Returns a FritzActionArgument instance for the given argument_node.
""" |
argument = FritzActionArgument()
argument.name = argument_node.find(self.nodename('name')).text
argument.direction = argument_node.find(self.nodename('direction')).text
rsv = argument_node.find(self.nodename('relatedStateVariable')).text
# TODO: track malformed xml-nodes (i.e. misspelled)
argument.data_type = self.state_variables.get(rsv, None)
return argument |
<SYSTEM_TASK:>
Read and evaluate the igddesc.xml file
<END_TASK>
<USER_TASK:>
Description:
def _read_descriptions(self, password):
"""
Read and evaluate the igddesc.xml file
and the tr64desc.xml file if a password is given.
""" |
descfiles = [FRITZ_IGD_DESC_FILE]
if password:
descfiles.append(FRITZ_TR64_DESC_FILE)
for descfile in descfiles:
parser = FritzDescParser(self.address, self.port, descfile)
if not self.modelname:
self.modelname = parser.get_modelname()
services = parser.get_services()
self._read_services(services) |
<SYSTEM_TASK:>
Get actions from services.
<END_TASK>
<USER_TASK:>
Description:
def _read_services(self, services):
"""Get actions from services.""" |
for service in services:
parser = FritzSCDPParser(self.address, self.port, service)
actions = parser.get_actions()
service.actions = {action.name: action for action in actions}
self.services[service.name] = service |
<SYSTEM_TASK:>
Returns a alphabetical sorted list of tuples with all known
<END_TASK>
<USER_TASK:>
Description:
def actionnames(self):
"""
Returns a alphabetical sorted list of tuples with all known
service- and action-names.
""" |
actions = []
for service_name in sorted(self.services.keys()):
action_names = self.services[service_name].actions.keys()
for action_name in sorted(action_names):
actions.append((service_name, action_name))
return actions |
<SYSTEM_TASK:>
Returns a list of tuples with all known arguments for the given
<END_TASK>
<USER_TASK:>
Description:
def get_action_arguments(self, service_name, action_name):
"""
Returns a list of tuples with all known arguments for the given
service- and action-name combination. The tuples contain the
argument-name, direction and data_type.
""" |
return self.services[service_name].actions[action_name].info |
<SYSTEM_TASK:>
Executes the given action. Raise a KeyError on unkown actions.
<END_TASK>
<USER_TASK:>
Description:
def call_action(self, service_name, action_name, **kwargs):
"""Executes the given action. Raise a KeyError on unkown actions.""" |
action = self.services[service_name].actions[action_name]
return action.execute(**kwargs) |
<SYSTEM_TASK:>
Recursively inject aXe into all iframes and the top level document.
<END_TASK>
<USER_TASK:>
Description:
def inject(self):
"""
Recursively inject aXe into all iframes and the top level document.
:param script_url: location of the axe-core script.
:type script_url: string
""" |
with open(self.script_url, "r", encoding="utf8") as f:
self.selenium.execute_script(f.read()) |
<SYSTEM_TASK:>
Run axe against the current page.
<END_TASK>
<USER_TASK:>
Description:
def run(self, context=None, options=None):
"""
Run axe against the current page.
:param context: which page part(s) to analyze and/or what to exclude.
:param options: dictionary of aXe options.
""" |
template = (
"var callback = arguments[arguments.length - 1];"
+ "axe.run(%s).then(results => callback(results))"
)
args = ""
# If context parameter is passed, add to args
if context is not None:
args += "%r" % context
# Add comma delimiter only if both parameters are passed
if context is not None and options is not None:
args += ","
# If options parameter is passed, add to args
if options is not None:
args += "%s" % options
command = template % args
response = self.selenium.execute_async_script(command)
return response |
<SYSTEM_TASK:>
Return readable report of accessibility violations found.
<END_TASK>
<USER_TASK:>
Description:
def report(self, violations):
"""
Return readable report of accessibility violations found.
:param violations: Dictionary of violations.
:type violations: dict
:return report: Readable report of violations.
:rtype: string
""" |
string = ""
string += "Found " + str(len(violations)) + " accessibility violations:"
for violation in violations:
string += (
"\n\n\nRule Violated:\n"
+ violation["id"]
+ " - "
+ violation["description"]
+ "\n\tURL: "
+ violation["helpUrl"]
+ "\n\tImpact Level: "
+ violation["impact"]
+ "\n\tTags:"
)
for tag in violation["tags"]:
string += " " + tag
string += "\n\tElements Affected:"
i = 1
for node in violation["nodes"]:
for target in node["target"]:
string += "\n\t" + str(i) + ") Target: " + target
i += 1
for item in node["all"]:
string += "\n\t\t" + item["message"]
for item in node["any"]:
string += "\n\t\t" + item["message"]
for item in node["none"]:
string += "\n\t\t" + item["message"]
string += "\n\n\n"
return string |
<SYSTEM_TASK:>
Write JSON to file with the specified name.
<END_TASK>
<USER_TASK:>
Description:
def write_results(self, data, name=None):
"""
Write JSON to file with the specified name.
:param name: Path to the file to be written to. If no path is passed
a new JSON file "results.json" will be created in the
current working directory.
:param output: JSON object.
""" |
if name:
filepath = os.path.abspath(name)
else:
filepath = os.path.join(os.path.getcwd(), "results.json")
with open(filepath, "w", encoding="utf8") as f:
try:
f.write(unicode(json.dumps(data, indent=4)))
except NameError:
f.write(json.dumps(data, indent=4)) |
<SYSTEM_TASK:>
Constant time comparison of bytes for py3, strings for py2
<END_TASK>
<USER_TASK:>
Description:
def _hashes_match(self, a, b):
"""Constant time comparison of bytes for py3, strings for py2""" |
if len(a) != len(b):
return False
diff = 0
if six.PY2:
a = bytearray(a)
b = bytearray(b)
for x, y in zip(a, b):
diff |= x ^ y
return not diff |
<SYSTEM_TASK:>
Convert ACIS 'll' value into separate latitude and longitude.
<END_TASK>
<USER_TASK:>
Description:
def parse(self):
"""
Convert ACIS 'll' value into separate latitude and longitude.
""" |
super(AcisIO, self).parse()
# This is more of a "mapping" step than a "parsing" step, but mappers
# only allow one-to-one mapping from input fields to output fields.
for row in self.data:
if 'meta' in row:
row = row['meta']
if 'll' in row:
row['longitude'], row['latitude'] = row['ll']
del row['ll'] |
<SYSTEM_TASK:>
ACIS web service returns "meta" and "data" for each station;
<END_TASK>
<USER_TASK:>
Description:
def get_field_names(self):
"""
ACIS web service returns "meta" and "data" for each station;
Use meta attributes as field names
""" |
field_names = super(StationDataIO, self).get_field_names()
if set(field_names) == set(['meta', 'data']):
meta_fields = list(self.data[0]['meta'].keys())
if set(meta_fields) < set(self.getvalue('meta')):
meta_fields = self.getvalue('meta')
field_names = list(meta_fields) + ['data']
return field_names |
<SYSTEM_TASK:>
ACIS web service returns "meta" and "data" for each station; use meta
<END_TASK>
<USER_TASK:>
Description:
def usable_item(self, data):
"""
ACIS web service returns "meta" and "data" for each station; use meta
attributes as item values, and add an IO for iterating over "data"
""" |
# Use metadata as item
item = data['meta']
# Add nested IO for data
elems, elems_is_complex = self.getlist('parameter')
if elems_is_complex:
elems = [elem['name'] for elem in elems]
add, add_is_complex = self.getlist('add')
item['data'] = DataIO(
data=data['data'],
parameter=elems,
add=add,
start_date=self.getvalue('start_date'),
end_date=self.getvalue('end_date'),
)
# TupleMapper will convert item to namedtuple
return super(StationDataIO, self).usable_item(item) |
<SYSTEM_TASK:>
MultiStnData data results are arrays without explicit dates;
<END_TASK>
<USER_TASK:>
Description:
def load_data(self, data):
"""
MultiStnData data results are arrays without explicit dates;
Infer time series based on start date.
""" |
dates = fill_date_range(self.start_date, self.end_date)
for row, date in zip(data, dates):
data = {'date': date}
if self.add:
# If self.add is set, results will contain additional
# attributes (e.g. flags). In that case, create one row per
# result, with attributes "date", "elem", "value", and one for
# each item in self.add.
for elem, vals in zip(self.parameter, row):
data['elem'] = elem
for add, val in zip(['value'] + self.add, vals):
data[add] = val
yield data
else:
# Otherwise, return one row per date, with "date" and each
# element's value as attributes.
for elem, val in zip(self.parameter, row):
# namedtuple doesn't like numeric field names
if elem.isdigit():
elem = "e%s" % elem
data[elem] = val
yield data |
<SYSTEM_TASK:>
Enforce rules and return parsed value
<END_TASK>
<USER_TASK:>
Description:
def parse(self, value):
"""
Enforce rules and return parsed value
""" |
if self.required and value is None:
raise ValueError("%s is required!" % self.name)
elif self.ignored and value is not None:
warn("%s is ignored for this class!" % self.name)
elif not self.multi and isinstance(value, (list, tuple)):
if len(value) > 1:
raise ValueError(
"%s does not accept multiple values!" % self.name
)
return value[0]
elif self.multi and value is not None:
if not isinstance(value, (list, tuple)):
return [value]
return value |
<SYSTEM_TASK:>
Set parameter key, noting whether list value is "complex"
<END_TASK>
<USER_TASK:>
Description:
def set_param(self, into, name):
"""
Set parameter key, noting whether list value is "complex"
""" |
value, complex = self.getlist(name)
if value is not None:
into[name] = value
return complex |
<SYSTEM_TASK:>
Get parameters for web service, noting whether any are "complex"
<END_TASK>
<USER_TASK:>
Description:
def get_params(self):
"""
Get parameters for web service, noting whether any are "complex"
""" |
params = {}
complex = False
for name, opt in self.filter_options.items():
if opt.ignored:
continue
if self.set_param(params, name):
complex = True
return params, complex |
<SYSTEM_TASK:>
Sync the template with the python code.
<END_TASK>
<USER_TASK:>
Description:
def do_pot(self):
"""
Sync the template with the python code.
""" |
files_to_translate = []
log.debug("Collecting python sources for pot ...")
for source_path in self._source_paths:
for source_path in self._iter_suffix(path=source_path, suffix=".py"):
log.debug("... add to pot: {source}".format(source=str(source_path)))
files_to_translate.append(str(source_path))
for system_file in self.SYSTEM_SOURCE_FILES:
files_to_translate.append(str(self._system_path / system_file))
# FIXME: use separate domain for system source translations? Nerge them when generating mo's?
log.debug("Finished collection sources.")
pot_path = (self._po_path / self._basename).with_suffix(".pot")
command = ["xgettext", "--keyword=_", "--keyword=_translate",
"--output={output}".format(output=str(pot_path))]
command.extend(files_to_translate)
check_call(command)
log.debug("pot file \"{pot}\" created!".format(pot=str(pot_path)))
pot_copy_path = self._mo_path / pot_path.name
log.debug("Copying pot file to mo path: {pot_copy_path}".format(pot_copy_path=str(pot_copy_path)))
shutil.copy(str(pot_path), str(pot_copy_path)) |
<SYSTEM_TASK:>
Update all po files with the data in the pot reference file.
<END_TASK>
<USER_TASK:>
Description:
def do_po(self):
"""
Update all po files with the data in the pot reference file.
""" |
log.debug("Start updating po files ...")
pot_path = (self._po_path / self._basename).with_suffix(".pot")
for po_dir_path in self._iter_po_dir():
po_path = (po_dir_path / self._basename).with_suffix(".po")
if po_path.exists():
log.debug("update {po}:".format(po=str(po_path)))
check_call(["msgmerge", "-U", str(po_path), str(pot_path)])
else:
log.debug("create {po}:".format(po=str(po_path)))
check_call(["msginit", "-i", str(pot_path), "-o", str(po_path), "--no-translator"])
po_copy_path = self._mo_path / po_path.parent.name / po_path.name
po_copy_path.parent.mkdir(exist_ok=True)
log.debug("Copying po file to mo path: {po_copy_path}".format(po_copy_path=str(po_copy_path)))
shutil.copy(str(po_path), str(po_copy_path))
log.debug("All po files updated") |
<SYSTEM_TASK:>
Generate mo files for all po files.
<END_TASK>
<USER_TASK:>
Description:
def do_mo(self):
"""
Generate mo files for all po files.
""" |
log.debug("Start updating mo files ...")
for po_dir_path in self._iter_po_dir():
po_path = (po_dir_path / self._basename).with_suffix(".po")
lc_path = self._mo_path / po_dir_path.name / "LC_MESSAGES"
lc_path.mkdir(parents=True, exist_ok=True)
mo_path = (lc_path / self._basename).with_suffix(".mo")
log.debug("Creating from {po}: {mo}".format(po=str(po_path), mo=str(mo_path)))
check_call(["msgfmt", str(po_path), "-o", str(mo_path)])
log.debug("All mo files updated") |
<SYSTEM_TASK:>
Processes the message received from the queue.
<END_TASK>
<USER_TASK:>
Description:
def _process_message(self, message: amqp.Message) -> None:
"""Processes the message received from the queue.""" |
if self.shutdown_pending.is_set():
return
try:
if isinstance(message.body, bytes):
message.body = message.body.decode()
description = json.loads(message.body)
except Exception:
logger.error("Cannot decode message. Dropping. Message: %r", message.body)
traceback.print_exc()
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
logger.info("Processing task: %r", description)
self._process_description(message, description) |
<SYSTEM_TASK:>
Logs the time spent while running the task.
<END_TASK>
<USER_TASK:>
Description:
def _apply_task(task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
"""Logs the time spent while running the task.""" |
if args is None:
args = ()
if kwargs is None:
kwargs = {}
start = monotonic()
try:
return task.apply(*args, **kwargs)
finally:
delta = monotonic() - start
logger.info("%s finished in %i seconds." % (task.name, delta)) |
<SYSTEM_TASK:>
Counts down from MAX_WORKER_RUN_TIME. When it reaches zero sutdown
<END_TASK>
<USER_TASK:>
Description:
def _shutdown_timer(self) -> None:
"""Counts down from MAX_WORKER_RUN_TIME. When it reaches zero sutdown
gracefully.
""" |
remaining = self._max_run_time - self.uptime
if not self.shutdown_pending.wait(remaining):
logger.warning('Run time reached zero')
self.shutdown() |
<SYSTEM_TASK:>
Shutdown after processing current task.
<END_TASK>
<USER_TASK:>
Description:
def _handle_sigint(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task.""" |
logger.warning("Catched SIGINT")
self.shutdown() |
<SYSTEM_TASK:>
Used internally to fail the task when connection to RabbitMQ is
<END_TASK>
<USER_TASK:>
Description:
def _handle_sighup(self, signum: int, frame: Any) -> None:
"""Used internally to fail the task when connection to RabbitMQ is
lost during the execution of the task.
""" |
logger.warning("Catched SIGHUP")
exc_info = self._heartbeat_exc_info
self._heartbeat_exc_info = None
# Format exception info to see in tools like Sentry.
formatted_exception = ''.join(traceback.format_exception(*exc_info)) # noqa
raise HeartbeatError(exc_info) |
<SYSTEM_TASK:>
What to do when a Folder in the tree is clicked
<END_TASK>
<USER_TASK:>
Description:
def onFolderTreeClicked(self, proxyIndex):
"""What to do when a Folder in the tree is clicked""" |
if not proxyIndex.isValid():
return
index = self.proxyFileModel.mapToSource(proxyIndex)
settings = QSettings()
folder_path = self.fileModel.filePath(index)
settings.setValue('mainwindow/workingDirectory', folder_path) |
<SYSTEM_TASK:>
Sends a message to the queue.
<END_TASK>
<USER_TASK:>
Description:
def send_to_queue(
self,
args: Tuple=(),
kwargs: Dict[str, Any]={},
host: str=None,
wait_result: Union[int, float]=None,
message_ttl: Union[int, float]=None,
) -> Any:
"""
Sends a message to the queue.
A worker will run the task's function when it receives the message.
:param args: Arguments that will be passed to task on execution.
:param kwargs: Keyword arguments that will be passed to task
on execution.
:param host: Send this task to specific host. ``host`` will be
appended to the queue name. If ``host`` is "localhost", hostname
of the server will be appended to the queue name.
:param wait_result:
Wait for result from worker for ``wait_result`` seconds.
If timeout occurs,
:class:`~kuyruk.exceptions.ResultTimeout` is raised.
If excecption occurs in worker,
:class:`~kuyruk.exceptions.RemoteException` is raised.
:param message_ttl:
If set, message will be destroyed in queue after ``message_ttl``
seconds.
:return: Result from worker if ``wait_result`` is set,
else :const:`None`.
""" |
if self.kuyruk.config.EAGER:
# Run the task in current process
result = self.apply(*args, **kwargs)
return result if wait_result else None
logger.debug("Task.send_to_queue args=%r, kwargs=%r", args, kwargs)
queue = self._queue_for_host(host)
description = self._get_description(args, kwargs)
self._send_signal(signals.task_presend, args=args, kwargs=kwargs, description=description)
body = json.dumps(description)
msg = amqp.Message(body=body)
if wait_result:
# Use direct reply-to feature from RabbitMQ:
# https://www.rabbitmq.com/direct-reply-to.html
msg.properties['reply_to'] = 'amq.rabbitmq.reply-to'
if message_ttl:
msg.properties['expiration'] = str(int(message_ttl * 1000))
with self.kuyruk.channel() as ch:
if wait_result:
result = Result(ch.connection)
ch.basic_consume(queue='amq.rabbitmq.reply-to', no_ack=True, callback=result.process_message)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
ch.basic_publish(msg, exchange="", routing_key=queue)
self._send_signal(signals.task_postsend, args=args, kwargs=kwargs, description=description)
if wait_result:
return result.wait(wait_result) |
<SYSTEM_TASK:>
Called by workers to run the wrapped function.
<END_TASK>
<USER_TASK:>
Description:
def apply(self, *args: Any, **kwargs: Any) -> Any:
"""Called by workers to run the wrapped function.
You may call it yourself if you want to run the task in current process
without sending to the queue.
If task has a `retry` property it will be retried on failure.
If task has a `max_run_time` property the task will not be allowed to
run more than that.
""" |
def send_signal(sig: Signal, **extra: Any) -> None:
self._send_signal(sig, args=args, kwargs=kwargs, **extra)
logger.debug("Applying %r, args=%r, kwargs=%r", self, args, kwargs)
send_signal(signals.task_preapply)
try:
tries = 1 + self.retry
while 1:
tries -= 1
send_signal(signals.task_prerun)
try:
with time_limit(self.max_run_time or 0):
return self.f(*args, **kwargs)
except Exception:
send_signal(signals.task_error, exc_info=sys.exc_info())
if tries <= 0:
raise
else:
break
finally:
send_signal(signals.task_postrun)
except Exception:
send_signal(signals.task_failure, exc_info=sys.exc_info())
raise
else:
send_signal(signals.task_success)
finally:
send_signal(signals.task_postapply) |
<SYSTEM_TASK:>
Module name of the wrapped function.
<END_TASK>
<USER_TASK:>
Description:
def _module_name(self) -> str:
"""Module name of the wrapped function.""" |
name = self.f.__module__
if name == '__main__':
return importer.main_module_name()
return name |
<SYSTEM_TASK:>
Load values from environment variables.
<END_TASK>
<USER_TASK:>
Description:
def from_env_vars(self) -> None:
"""Load values from environment variables.
Keys must start with `KUYRUK_`.""" |
for key, value in os.environ.items():
if key.startswith('KUYRUK_'):
key = key[7:]
if hasattr(Config, key):
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
self._setattr(key, value) |
<SYSTEM_TASK:>
Context manager for temporarily setting a keyword argument and
<END_TASK>
<USER_TASK:>
Description:
def option(current_kwargs, **kwargs):
"""
Context manager for temporarily setting a keyword argument and
then restoring it to whatever it was before.
""" |
tmp_kwargs = dict((key, current_kwargs.get(key)) for key, value in kwargs.items())
current_kwargs.update(kwargs)
yield
current_kwargs.update(tmp_kwargs) |
<SYSTEM_TASK:>
Returns True if `node` is a method call for `method_name`. `method_name`
<END_TASK>
<USER_TASK:>
Description:
def is_method_call(node, method_name):
"""
Returns True if `node` is a method call for `method_name`. `method_name`
can be either a string or an iterable of strings.
""" |
if not isinstance(node, nodes.Call):
return False
if isinstance(node.node, nodes.Getattr):
# e.g. foo.bar()
method = node.node.attr
elif isinstance(node.node, nodes.Name):
# e.g. bar()
method = node.node.name
elif isinstance(node.node, nodes.Getitem):
# e.g. foo["bar"]()
method = node.node.arg.value
else:
return False
if isinstance(method_name, (list, tuple)):
return method in method_name
return method == method_name |
<SYSTEM_TASK:>
Returns the variable name assigned to the given dependency or None if the dependency has
<END_TASK>
<USER_TASK:>
Description:
def _get_depencency_var_name(self, dependency):
"""
Returns the variable name assigned to the given dependency or None if the dependency has
not yet been registered.
Args:
dependency (str): Thet dependency that needs to be imported.
Returns:
str or None
""" |
for dep_path, var_name in self.dependencies:
if dep_path == dependency:
return var_name |
<SYSTEM_TASK:>
Adds the given dependency and returns the variable name to use to access it. If `var_name`
<END_TASK>
<USER_TASK:>
Description:
def _add_dependency(self, dependency, var_name=None):
"""
Adds the given dependency and returns the variable name to use to access it. If `var_name`
is not given then a random one will be created.
Args:
dependency (str):
var_name (str, optional):
Returns:
str
""" |
if var_name is None:
var_name = next(self.temp_var_names)
# Don't add duplicate dependencies
if (dependency, var_name) not in self.dependencies:
self.dependencies.append((dependency, var_name))
return var_name |
<SYSTEM_TASK:>
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
<END_TASK>
<USER_TASK:>
Description:
def _process_output(self, node, **kwargs):
"""
Processes an output node, which will contain things like `Name` and `TemplateData` nodes.
""" |
for n in node.nodes:
self._process_node(n, **kwargs) |
<SYSTEM_TASK:>
Processes a `TemplateData` node, this is just a bit of as-is text
<END_TASK>
<USER_TASK:>
Description:
def _process_templatedata(self, node, **_):
"""
Processes a `TemplateData` node, this is just a bit of as-is text
to be written to the output.
""" |
# escape double quotes
value = re.sub('"', r'\\"', node.data)
# escape new lines
value = re.sub('\n', r'\\n', value)
# append value to the result
self.output.write('__result += "' + value + '";') |
<SYSTEM_TASK:>
Context manager for executing some JavaScript inside a template.
<END_TASK>
<USER_TASK:>
Description:
def _execution(self):
"""
Context manager for executing some JavaScript inside a template.
""" |
did_start_executing = False
if self.state == STATE_DEFAULT:
did_start_executing = True
self.state = STATE_EXECUTING
def close():
if did_start_executing and self.state == STATE_EXECUTING:
self.state = STATE_DEFAULT
yield close
close() |
<SYSTEM_TASK:>
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
<END_TASK>
<USER_TASK:>
Description:
def _scoped_variables(self, nodes_list, **kwargs):
"""
Context manager for creating scoped variables defined by the nodes in `nodes_list`.
These variables will be added to the context, and when the context manager exits the
context object will be restored to it's previous state.
""" |
tmp_vars = []
for node in nodes_list:
is_assign_node = isinstance(node, nodes.Assign)
name = node.target.name if is_assign_node else node.name
# create a temp variable name
tmp_var = next(self.temp_var_names)
# save previous context value
with self._execution():
# save the current value of this name
self.output.write('var %s = %s.%s;' % (tmp_var, self.context_name, name))
# add new value to context
self.output.write('%s.%s = ' % (self.context_name, name))
if is_assign_node:
self._process_node(node.node, **kwargs)
else:
self.output.write(node.name)
self.output.write(';')
tmp_vars.append((tmp_var, name))
yield
# restore context
for tmp_var, name in tmp_vars:
with self._execution():
self.output.write('%s.%s = %s;' % (self.context_name, name, tmp_var)) |
<SYSTEM_TASK:>
Returns a new channel from a new connection as a context manager.
<END_TASK>
<USER_TASK:>
Description:
def channel(self) -> Iterator[amqp.Channel]:
"""Returns a new channel from a new connection as a context manager.""" |
with self.connection() as conn:
ch = conn.channel()
logger.info('Opened new channel')
with _safe_close(ch):
yield ch |
<SYSTEM_TASK:>
Returns a new connection as a context manager.
<END_TASK>
<USER_TASK:>
Description:
def connection(self) -> Iterator[amqp.Connection]:
"""Returns a new connection as a context manager.""" |
TCP_USER_TIMEOUT = 18 # constant is available on Python 3.6+.
socket_settings = {TCP_USER_TIMEOUT: self.config.TCP_USER_TIMEOUT}
if sys.platform.startswith('darwin'):
del socket_settings[TCP_USER_TIMEOUT]
conn = amqp.Connection(
host="%s:%s" % (self.config.RABBIT_HOST, self.config.RABBIT_PORT),
userid=self.config.RABBIT_USER,
password=self.config.RABBIT_PASSWORD,
virtual_host=self.config.RABBIT_VIRTUAL_HOST,
connect_timeout=self.config.RABBIT_CONNECT_TIMEOUT,
read_timeout=self.config.RABBIT_READ_TIMEOUT,
write_timeout=self.config.RABBIT_WRITE_TIMEOUT,
socket_settings=socket_settings,
heartbeat=self.config.RABBIT_HEARTBEAT,
)
conn.connect()
logger.info('Connected to RabbitMQ')
with _safe_close(conn):
yield conn |
<SYSTEM_TASK:>
Helper routine that converts a Sentence protobuf to a string from
<END_TASK>
<USER_TASK:>
Description:
def to_text(sentence):
"""
Helper routine that converts a Sentence protobuf to a string from
its tokens.
""" |
text = ""
for i, tok in enumerate(sentence.token):
if i != 0:
text += tok.before
text += tok.word
return text |
<SYSTEM_TASK:>
Public method to show a message in the bottom part of the splashscreen.
<END_TASK>
<USER_TASK:>
Description:
def showMessage(self, message, *args):
"""
Public method to show a message in the bottom part of the splashscreen.
@param message message to be shown (string or QString)
""" |
QSplashScreen.showMessage(
self, message, Qt.AlignBottom | Qt.AlignRight | Qt.AlignAbsolute, QColor(Qt.white)) |
<SYSTEM_TASK:>
Returns main module and module name pair.
<END_TASK>
<USER_TASK:>
Description:
def main_module_name() -> str:
"""Returns main module and module name pair.""" |
if not hasattr(main_module, '__file__'):
# running from interactive shell
return None
main_filename = os.path.basename(main_module.__file__)
module_name, ext = os.path.splitext(main_filename)
return module_name |
<SYSTEM_TASK:>
Returns the connection status of the data store.
<END_TASK>
<USER_TASK:>
Description:
def is_connected(self):
""" Returns the connection status of the data store.
Returns:
bool: ``True`` if the data store is connected to the MongoDB server.
""" |
if self._client is not None:
try:
self._client.server_info()
except ConnectionFailure:
return False
return True
else:
return False |
<SYSTEM_TASK:>
Establishes a connection to the MongoDB server.
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
""" Establishes a connection to the MongoDB server.
Use the MongoProxy library in order to automatically handle AutoReconnect
exceptions in a graceful and reliable way.
""" |
mongodb_args = {
'host': self.host,
'port': self.port,
'username': self._username,
'password': self._password,
'authSource': self._auth_source,
'serverSelectionTimeoutMS': self._connect_timeout
}
if self._auth_mechanism is not None:
mongodb_args['authMechanism'] = self._auth_mechanism
self._client = MongoClient(**mongodb_args)
if self._handle_reconnect:
self._client = MongoClientProxy(self._client) |
<SYSTEM_TASK:>
Checks whether a document with the specified workflow id already exists.
<END_TASK>
<USER_TASK:>
Description:
def exists(self, workflow_id):
""" Checks whether a document with the specified workflow id already exists.
Args:
workflow_id (str): The workflow id that should be checked.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
bool: ``True`` if a document with the specified workflow id exists.
""" |
try:
db = self._client[self.database]
col = db[WORKFLOW_DATA_COLLECTION_NAME]
return col.find_one({"_id": ObjectId(workflow_id)}) is not None
except ConnectionFailure:
raise DataStoreNotConnected() |
<SYSTEM_TASK:>
Adds a new document to the data store and returns its id.
<END_TASK>
<USER_TASK:>
Description:
def add(self, payload=None):
""" Adds a new document to the data store and returns its id.
Args:
payload (dict): Dictionary of initial data that should be stored
in the new document in the meta section.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
str: The id of the newly created document.
""" |
try:
db = self._client[self.database]
col = db[WORKFLOW_DATA_COLLECTION_NAME]
return str(col.insert_one({
DataStoreDocumentSection.Meta:
payload if isinstance(payload, dict) else {},
DataStoreDocumentSection.Data: {}
}).inserted_id)
except ConnectionFailure:
raise DataStoreNotConnected() |
<SYSTEM_TASK:>
Removes a document specified by its id from the data store.
<END_TASK>
<USER_TASK:>
Description:
def remove(self, workflow_id):
""" Removes a document specified by its id from the data store.
All associated GridFs documents are deleted as well.
Args:
workflow_id (str): The id of the document that represents a workflow run.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
""" |
try:
db = self._client[self.database]
fs = GridFSProxy(GridFS(db.unproxied_object))
for grid_doc in fs.find({"workflow_id": workflow_id},
no_cursor_timeout=True):
fs.delete(grid_doc._id)
col = db[WORKFLOW_DATA_COLLECTION_NAME]
return col.delete_one({"_id": ObjectId(workflow_id)})
except ConnectionFailure:
raise DataStoreNotConnected() |
<SYSTEM_TASK:>
Returns the document for the given workflow id.
<END_TASK>
<USER_TASK:>
Description:
def get(self, workflow_id):
""" Returns the document for the given workflow id.
Args:
workflow_id (str): The id of the document that represents a workflow run.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
DataStoreDocument: The document for the given workflow id.
""" |
try:
db = self._client[self.database]
fs = GridFSProxy(GridFS(db.unproxied_object))
return DataStoreDocument(db[WORKFLOW_DATA_COLLECTION_NAME], fs, workflow_id)
except ConnectionFailure:
raise DataStoreNotConnected() |
<SYSTEM_TASK:>
Return the field specified by its key from the specified section.
<END_TASK>
<USER_TASK:>
Description:
def get(self, key, default=None, *, section=DataStoreDocumentSection.Data):
""" Return the field specified by its key from the specified section.
This method access the specified section of the workflow document and returns the
value for the given key.
Args:
key (str): The key pointing to the value that should be retrieved. It supports
MongoDB's dot notation for nested fields.
default: The default value that is returned if the key does not exist.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
object: The value from the field that the specified key is pointing to. If the
key does not exist, the default value is returned. If no default value
is provided and the key does not exist ``None`` is returned.
""" |
key_notation = '.'.join([section, key])
try:
return self._decode_value(self._data_from_dotnotation(key_notation, default))
except KeyError:
return None |
<SYSTEM_TASK:>
Store a value under the specified key in the given section of the document.
<END_TASK>
<USER_TASK:>
Description:
def set(self, key, value, *, section=DataStoreDocumentSection.Data):
""" Store a value under the specified key in the given section of the document.
This method stores a value into the specified section of the workflow data store
document. Any existing value is overridden. Before storing a value, any linked
GridFS document under the specified key is deleted.
Args:
key (str): The key pointing to the value that should be stored/updated.
It supports MongoDB's dot notation for nested fields.
value: The value that should be stored/updated.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
bool: ``True`` if the value could be set/updated, otherwise ``False``.
""" |
key_notation = '.'.join([section, key])
try:
self._delete_gridfs_data(self._data_from_dotnotation(key_notation,
default=None))
except KeyError:
logger.info('Adding new field {} to the data store'.format(key_notation))
result = self._collection.update_one(
{"_id": ObjectId(self._workflow_id)},
{
"$set": {
key_notation: self._encode_value(value)
},
"$currentDate": {"lastModified": True}
}
)
return result.modified_count == 1 |
<SYSTEM_TASK:>
Appends a value to a list in the specified section of the document.
<END_TASK>
<USER_TASK:>
Description:
def push(self, key, value, *, section=DataStoreDocumentSection.Data):
""" Appends a value to a list in the specified section of the document.
Args:
key (str): The key pointing to the value that should be stored/updated.
It supports MongoDB's dot notation for nested fields.
value: The value that should be appended to a list in the data store.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
bool: ``True`` if the value could be appended, otherwise ``False``.
""" |
key_notation = '.'.join([section, key])
result = self._collection.update_one(
{"_id": ObjectId(self._workflow_id)},
{
"$push": {
key_notation: self._encode_value(value)
},
"$currentDate": {"lastModified": True}
}
)
return result.modified_count == 1 |
<SYSTEM_TASK:>
Extends a list in the data store with the elements of values.
<END_TASK>
<USER_TASK:>
Description:
def extend(self, key, values, *, section=DataStoreDocumentSection.Data):
""" Extends a list in the data store with the elements of values.
Args:
key (str): The key pointing to the value that should be stored/updated.
It supports MongoDB's dot notation for nested fields.
values (list): A list of the values that should be used to extend the list
in the document.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
bool: ``True`` if the list in the database could be extended,
otherwise ``False``.
""" |
key_notation = '.'.join([section, key])
if not isinstance(values, list):
return False
result = self._collection.update_one(
{"_id": ObjectId(self._workflow_id)},
{
"$push": {
key_notation: {"$each": self._encode_value(values)}
},
"$currentDate": {"lastModified": True}
}
)
return result.modified_count == 1 |
<SYSTEM_TASK:>
Returns the MongoDB data from a key using dot notation.
<END_TASK>
<USER_TASK:>
Description:
def _data_from_dotnotation(self, key, default=None):
""" Returns the MongoDB data from a key using dot notation.
Args:
key (str): The key to the field in the workflow document. Supports MongoDB's
dot notation for embedded fields.
default (object): The default value that is returned if the key
does not exist.
Returns:
object: The data for the specified key or the default value.
""" |
if key is None:
raise KeyError('NoneType is not a valid key!')
doc = self._collection.find_one({"_id": ObjectId(self._workflow_id)})
if doc is None:
return default
for k in key.split('.'):
doc = doc[k]
return doc |
<SYSTEM_TASK:>
Encodes the value such that it can be stored into MongoDB.
<END_TASK>
<USER_TASK:>
Description:
def _encode_value(self, value):
""" Encodes the value such that it can be stored into MongoDB.
Any primitive types are stored directly into MongoDB, while non-primitive types
are pickled and stored as GridFS objects. The id pointing to a GridFS object
replaces the original value.
Args:
value (object): The object that should be encoded for storing in MongoDB.
Returns:
object: The encoded value ready to be stored in MongoDB.
""" |
if isinstance(value, (int, float, str, bool, datetime)):
return value
elif isinstance(value, list):
return [self._encode_value(item) for item in value]
elif isinstance(value, dict):
result = {}
for key, item in value.items():
result[key] = self._encode_value(item)
return result
else:
return self._gridfs.put(Binary(pickle.dumps(value)),
workflow_id=self._workflow_id) |
<SYSTEM_TASK:>
Decodes the value by turning any binary data back into Python objects.
<END_TASK>
<USER_TASK:>
Description:
def _decode_value(self, value):
""" Decodes the value by turning any binary data back into Python objects.
The method searches for ObjectId values, loads the associated binary data from
GridFS and returns the decoded Python object.
Args:
value (object): The value that should be decoded.
Raises:
DataStoreDecodingError: An ObjectId was found but the id is not a valid
GridFS id.
DataStoreDecodeUnknownType: The type of the specified value is unknown.
Returns:
object: The decoded value as a valid Python object.
""" |
if isinstance(value, (int, float, str, bool, datetime)):
return value
elif isinstance(value, list):
return [self._decode_value(item) for item in value]
elif isinstance(value, dict):
result = {}
for key, item in value.items():
result[key] = self._decode_value(item)
return result
elif isinstance(value, ObjectId):
if self._gridfs.exists({"_id": value}):
return pickle.loads(self._gridfs.get(value).read())
else:
raise DataStoreGridfsIdInvalid()
else:
raise DataStoreDecodeUnknownType() |
<SYSTEM_TASK:>
Delete all GridFS data that is linked by fields in the specified data.
<END_TASK>
<USER_TASK:>
Description:
def _delete_gridfs_data(self, data):
""" Delete all GridFS data that is linked by fields in the specified data.
Args:
data: The data that is parsed for MongoDB ObjectIDs. The linked GridFs object
for any ObjectID is deleted.
""" |
if isinstance(data, ObjectId):
if self._gridfs.exists({"_id": data}):
self._gridfs.delete(data)
else:
raise DataStoreGridfsIdInvalid()
elif isinstance(data, list):
for item in data:
self._delete_gridfs_data(item)
elif isinstance(data, dict):
for key, item in data.items():
self._delete_gridfs_data(item) |
<SYSTEM_TASK:>
truncate call until it corresponds to a Prefix in the database
<END_TASK>
<USER_TASK:>
Description:
def _iterate_prefix(self, callsign, timestamp=timestamp_now):
"""truncate call until it corresponds to a Prefix in the database""" |
prefix = callsign
if re.search('(VK|AX|VI)9[A-Z]{3}', callsign): #special rule for VK9 calls
if timestamp > datetime(2006,1,1, tzinfo=UTC):
prefix = callsign[0:3]+callsign[4:5]
while len(prefix) > 0:
try:
return self._lookuplib.lookup_prefix(prefix, timestamp)
except KeyError:
prefix = prefix.replace(' ', '')[:-1]
continue
raise KeyError |
<SYSTEM_TASK:>
Lookup a callsign and return all data available from the underlying database
<END_TASK>
<USER_TASK:>
Description:
def get_all(self, callsign, timestamp=timestamp_now):
""" Lookup a callsign and return all data available from the underlying database
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the callsign specific data
Raises:
KeyError: Callsign could not be identified
Example:
The following code returns all available information from the country-files.com database for the
callsign "DH1TW"
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.get_all("DH1TW")
{
'country': 'Fed. Rep. of Germany',
'adif': 230,
'continent': 'EU',
'latitude': 51.0,
'longitude': -10.0,
'cqz': 14,
'ituz': 28
}
Note:
The content of the returned data depends entirely on the injected
:py:class:`LookupLib` (and the used database). While the country-files.com provides
for example the ITU Zone, Clublog doesn't. Consequently, the item "ituz"
would be missing with Clublog (API or XML) :py:class:`LookupLib`.
""" |
callsign_data = self._lookup_callsign(callsign, timestamp)
try:
cqz = self._lookuplib.lookup_zone_exception(callsign, timestamp)
callsign_data[const.CQZ] = cqz
except KeyError:
pass
return callsign_data |
<SYSTEM_TASK:>
Returns Latitude and Longitude for a callsign
<END_TASK>
<USER_TASK:>
Description:
def get_lat_long(self, callsign, timestamp=timestamp_now):
""" Returns Latitude and Longitude for a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Containing Latitude and Longitude
Raises:
KeyError: No data found for callsign
Example:
The following code returns Latitude & Longitude for "DH1TW"
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.get_lat_long("DH1TW")
{
'latitude': 51.0,
'longitude': -10.0
}
Note:
Unfortunately, in most cases the returned Latitude and Longitude are not very precise.
Clublog and Country-files.com use the country's capital coordinates in most cases, if no
dedicated entry in the database exists. Best results will be retrieved with QRZ.com Lookup.
""" |
callsign_data = self.get_all(callsign, timestamp=timestamp)
return {
const.LATITUDE: callsign_data[const.LATITUDE],
const.LONGITUDE: callsign_data[const.LONGITUDE]
} |
<SYSTEM_TASK:>
Returns CQ Zone of a callsign
<END_TASK>
<USER_TASK:>
Description:
def get_cqz(self, callsign, timestamp=timestamp_now):
""" Returns CQ Zone of a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: containing the callsign's CQ Zone
Raises:
KeyError: no CQ Zone found for callsign
""" |
return self.get_all(callsign, timestamp)[const.CQZ] |
<SYSTEM_TASK:>
Returns the country name where the callsign is located
<END_TASK>
<USER_TASK:>
Description:
def get_country_name(self, callsign, timestamp=timestamp_now):
""" Returns the country name where the callsign is located
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
str: name of the Country
Raises:
KeyError: No Country found for callsign
Note:
Don't rely on the country name when working with several instances of
py:class:`Callinfo`. Clublog and Country-files.org use slightly different names
for countries. Example:
- Country-files.com: "Fed. Rep. of Germany"
- Clublog: "FEDERAL REPUBLIC OF GERMANY"
""" |
return self.get_all(callsign, timestamp)[const.COUNTRY] |
<SYSTEM_TASK:>
Returns the continent Identifier of a callsign
<END_TASK>
<USER_TASK:>
Description:
def get_continent(self, callsign, timestamp=timestamp_now):
""" Returns the continent Identifier of a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
str: continent identified
Raises:
KeyError: No Continent found for callsign
Note:
The following continent identifiers are used:
- EU: Europe
- NA: North America
- SA: South America
- AS: Asia
- AF: Africa
- OC: Oceania
- AN: Antarctica
""" |
return self.get_all(callsign, timestamp)[const.CONTINENT] |
<SYSTEM_TASK:>
Returns the indices for all occurrences of 'element' in 'lst'.
<END_TASK>
<USER_TASK:>
Description:
def find_indices(lst, element):
""" Returns the indices for all occurrences of 'element' in 'lst'.
Args:
lst (list): List to search.
element: Element to find.
Returns:
list: List of indices or values
""" |
result = []
offset = -1
while True:
try:
offset = lst.index(element, offset+1)
except ValueError:
return result
result.append(offset) |
<SYSTEM_TASK:>
Create a workflow object from a workflow script.
<END_TASK>
<USER_TASK:>
Description:
def from_name(cls, name, *, queue=DefaultJobQueueName.Workflow,
clear_data_store=True, arguments=None):
""" Create a workflow object from a workflow script.
Args:
name (str): The name of the workflow script.
queue (str): Name of the queue the workflow should be scheduled to.
clear_data_store (bool): Remove any documents created during the workflow
run in the data store after the run.
arguments (dict): Dictionary of additional arguments that are ingested
into the data store prior to the execution of the workflow.
Returns:
Workflow: A fully initialised workflow object
""" |
new_workflow = cls(queue=queue, clear_data_store=clear_data_store)
new_workflow.load(name, arguments=arguments)
return new_workflow |
<SYSTEM_TASK:>
Import the workflow script and load all known objects.
<END_TASK>
<USER_TASK:>
Description:
def load(self, name, *, arguments=None, validate_arguments=True, strict_dag=False):
""" Import the workflow script and load all known objects.
The workflow script is treated like a module and imported
into the Python namespace. After the import, the method looks
for instances of known classes and stores a reference for further
use in the workflow object.
Args:
name (str): The name of the workflow script.
arguments (dict): Dictionary of additional arguments that are ingested
into the data store prior to the execution of the workflow.
validate_arguments (bool): Whether to check that all required arguments have
been supplied.
strict_dag (bool): If true then the loaded workflow module must contain an
instance of Dag.
Raises:
WorkflowArgumentError: If the workflow requires arguments to be set that
were not supplied to the workflow.
WorkflowImportError: If the import of the workflow fails.
""" |
arguments = {} if arguments is None else arguments
try:
workflow_module = importlib.import_module(name)
dag_present = False
# extract objects of specific types from the workflow module
for key, obj in workflow_module.__dict__.items():
if isinstance(obj, Dag):
self._dags_blueprint[obj.name] = obj
dag_present = True
elif isinstance(obj, Parameters):
self._parameters.extend(obj)
self._name = name
self._docstring = inspect.getdoc(workflow_module)
del sys.modules[name]
if strict_dag and not dag_present:
raise WorkflowImportError(
'Workflow does not include a dag {}'.format(name))
if validate_arguments:
missing_parameters = self._parameters.check_missing(arguments)
if len(missing_parameters) > 0:
raise WorkflowArgumentError(
'The following parameters are required ' +
'by the workflow, but are missing: {}'.format(
', '.join(missing_parameters)))
self._provided_arguments = arguments
except (TypeError, ImportError):
logger.error('Cannot import workflow {}'.format(name))
raise WorkflowImportError('Cannot import workflow {}'.format(name)) |
<SYSTEM_TASK:>
Run all autostart dags in the workflow.
<END_TASK>
<USER_TASK:>
Description:
def run(self, config, data_store, signal_server, workflow_id):
""" Run all autostart dags in the workflow.
Only the dags that are flagged as autostart are started.
Args:
config (Config): Reference to the configuration object from which the
settings for the workflow are retrieved.
data_store (DataStore): A DataStore object that is fully initialised and
connected to the persistent data storage.
signal_server (Server): A signal Server object that receives requests
from dags and tasks.
workflow_id (str): A unique workflow id that represents this workflow run
""" |
self._workflow_id = workflow_id
self._celery_app = create_app(config)
# pre-fill the data store with supplied arguments
args = self._parameters.consolidate(self._provided_arguments)
for key, value in args.items():
data_store.get(self._workflow_id).set(key, value)
# start all dags with the autostart flag set to True
for name, dag in self._dags_blueprint.items():
if dag.autostart:
self._queue_dag(name)
# as long as there are dags in the list keep running
while self._dags_running:
if config.workflow_polling_time > 0.0:
sleep(config.workflow_polling_time)
# handle new requests from dags, tasks and the library (e.g. cli, web)
for i in range(MAX_SIGNAL_REQUESTS):
request = signal_server.receive()
if request is None:
break
try:
response = self._handle_request(request)
if response is not None:
signal_server.send(response)
else:
signal_server.restore(request)
except (RequestActionUnknown, RequestFailed):
signal_server.send(Response(success=False, uid=request.uid))
# remove any dags and their result data that finished running
for name, dag in list(self._dags_running.items()):
if dag.ready():
if self._celery_app.conf.result_expires == 0:
dag.forget()
del self._dags_running[name]
elif dag.failed():
self._stop_workflow = True
# remove the signal entry
signal_server.clear()
# delete all entries in the data_store under this workflow id, if requested
if self._clear_data_store:
data_store.remove(self._workflow_id) |
<SYSTEM_TASK:>
Add a new dag to the queue.
<END_TASK>
<USER_TASK:>
Description:
def _queue_dag(self, name, *, data=None):
""" Add a new dag to the queue.
If the stop workflow flag is set, no new dag can be queued.
Args:
name (str): The name of the dag that should be queued.
data (MultiTaskData): The data that should be passed on to the new dag.
Raises:
DagNameUnknown: If the specified dag name does not exist
Returns:
str: The name of the queued dag.
""" |
if self._stop_workflow:
return None
if name not in self._dags_blueprint:
raise DagNameUnknown()
new_dag = copy.deepcopy(self._dags_blueprint[name])
new_dag.workflow_name = self.name
self._dags_running[new_dag.name] = self._celery_app.send_task(
JobExecPath.Dag, args=(new_dag, self._workflow_id, data),
queue=new_dag.queue, routing_key=new_dag.queue)
return new_dag.name |
<SYSTEM_TASK:>
Handle an incoming request by forwarding it to the appropriate method.
<END_TASK>
<USER_TASK:>
Description:
def _handle_request(self, request):
""" Handle an incoming request by forwarding it to the appropriate method.
Args:
request (Request): Reference to a request object containing the
incoming request.
Raises:
RequestActionUnknown: If the action specified in the request is not known.
Returns:
Response: A response object containing the response from the method handling
the request.
""" |
if request is None:
return Response(success=False, uid=request.uid)
action_map = {
'start_dag': self._handle_start_dag,
'stop_workflow': self._handle_stop_workflow,
'join_dags': self._handle_join_dags,
'stop_dag': self._handle_stop_dag,
'is_dag_stopped': self._handle_is_dag_stopped
}
if request.action in action_map:
return action_map[request.action](request)
else:
raise RequestActionUnknown() |
<SYSTEM_TASK:>
The handler for the start_dag request.
<END_TASK>
<USER_TASK:>
Description:
def _handle_start_dag(self, request):
""" The handler for the start_dag request.
The start_dag request creates a new dag and adds it to the queue.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be started
'data': the data that is passed onto the start tasks
Returns:
Response: A response object containing the following fields:
- dag_name: The name of the started dag.
""" |
dag_name = self._queue_dag(name=request.payload['name'],
data=request.payload['data'])
return Response(success=dag_name is not None, uid=request.uid,
payload={'dag_name': dag_name}) |
<SYSTEM_TASK:>
The handler for the stop_workflow request.
<END_TASK>
<USER_TASK:>
Description:
def _handle_stop_workflow(self, request):
""" The handler for the stop_workflow request.
The stop_workflow request adds all running dags to the list of dags
that should be stopped and prevents new dags from being started. The dags will
then stop queueing new tasks, which will terminate the dags and in turn the
workflow.
Args:
request (Request): Reference to a request object containing the
incoming request.
Returns:
Response: A response object containing the following fields:
- success: True if the dags were added successfully to the list
of dags that should be stopped.
""" |
self._stop_workflow = True
for name, dag in self._dags_running.items():
if name not in self._stop_dags:
self._stop_dags.append(name)
return Response(success=True, uid=request.uid) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.