text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Import a module with an optional yaml config file, but only if we haven't
<END_TASK>
<USER_TASK:>
Description:
def module_cache_get(cache, module):
"""
Import a module with an optional yaml config file, but only if we haven't
imported it already.
:param cache: object which holds information on which modules and config
files have been loaded and whether config files should be
loaded.
:param module: the path of the module to load.
:returns: the loaded module.
""" |
if getattr(cache, "config", False):
config_file = module[:-2] + "yaml"
if config_file not in cache.config_files and os.path.exists(config_file):
try:
config = yaml_safe_load(config_file, type=dict)
except TypeError as e:
tangelo.log_warning("TANGELO", "Bad configuration in file %s: %s" % (config_file, e))
raise
except IOError:
tangelo.log_warning("TANGELO", "Could not open config file %s" % (config_file))
raise
except ValueError as e:
tangelo.log_warning("TANGELO", "Error reading config file %s: %s" % (config_file, e))
raise
cache.config_files[config_file] = True
else:
config = {}
cherrypy.config["module-config"][module] = config
cherrypy.config["module-store"].setdefault(module, {})
# If two threads are importing the same module nearly concurrently, we
# could load it twice unless we use the import lock.
imp.acquire_lock()
try:
if module not in cache.modules:
name = module[:-3]
# load the module.
service = imp.load_source(name, module)
cache.modules[module] = service
else:
service = cache.modules[module]
finally:
imp.release_lock()
return service |
<SYSTEM_TASK:>
Flush data, write 28 bytes BGZF EOF marker, and close BGZF file.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Flush data, write 28 bytes BGZF EOF marker, and close BGZF file.
samtools will look for a magic EOF marker, just a 28 byte empty BGZF
block, and if it is missing warns the BAM file may be truncated. In
addition to samtools writing this block, so too does bgzip - so this
implementation does too.
""" |
if self._buffer:
self.flush()
self._handle.write(_bgzf_eof)
self._handle.flush()
self._handle.close() |
<SYSTEM_TASK:>
Check if secret key to encryot sessions exists,
<END_TASK>
<USER_TASK:>
Description:
def ensure_secret():
"""Check if secret key to encryot sessions exists,
generate it otherwise.""" |
home_dir = os.environ['HOME']
file_name = home_dir + "/.ipcamweb"
if os.path.exists(file_name):
with open(file_name, "r") as s_file:
secret = s_file.readline()
else:
secret = os.urandom(24)
with open(file_name, "w") as s_file:
secret = s_file.write(secret+"\n")
return secret |
<SYSTEM_TASK:>
Returns a list of screenshots
<END_TASK>
<USER_TASK:>
Description:
def list_snapshots_for_a_minute(path, cam_id, day, hourm):
"""Returns a list of screenshots""" |
screenshoots_path = path+"/"+str(cam_id)+"/"+day+"/"+hourm
if os.path.exists(screenshoots_path):
screenshots = [scr for scr in sorted(os.listdir(screenshoots_path))]
return screenshots
else:
return [] |
<SYSTEM_TASK:>
Return affected start position in 0-based coordinates
<END_TASK>
<USER_TASK:>
Description:
def affected_start(self):
"""Return affected start position in 0-based coordinates
For SNVs, MNVs, and deletions, the behaviour is the start position.
In the case of insertions, the position behind the insert position is
returned, yielding a 0-length interval together with
:py:meth:`~Record.affected_end`
""" |
types = {alt.type for alt in self.ALT} # set!
BAD_MIX = {INS, SV, BND, SYMBOLIC} # don't mix well with others
if (BAD_MIX & types) and len(types) == 1 and list(types)[0] == INS:
# Only insertions, return 0-based position right of first base
return self.POS # right of first base
else: # Return 0-based start position of first REF base
return self.POS - 1 |
<SYSTEM_TASK:>
Add label to FILTER if not set yet, removing ``PASS`` entry if
<END_TASK>
<USER_TASK:>
Description:
def add_filter(self, label):
"""Add label to FILTER if not set yet, removing ``PASS`` entry if
present
""" |
if label not in self.FILTER:
if "PASS" in self.FILTER:
self.FILTER = [f for f in self.FILTER if f != "PASS"]
self.FILTER.append(label) |
<SYSTEM_TASK:>
Add an entry to format
<END_TASK>
<USER_TASK:>
Description:
def add_format(self, key, value=None):
"""Add an entry to format
The record's calls ``data[key]`` will be set to ``value`` if not yet
set and value is not ``None``. If key is already in FORMAT then
nothing is done.
""" |
if key in self.FORMAT:
return
self.FORMAT.append(key)
if value is not None:
for call in self:
call.data.setdefault(key, value) |
<SYSTEM_TASK:>
Return ``True`` for filtered calls
<END_TASK>
<USER_TASK:>
Description:
def is_filtered(self, require=None, ignore=None):
"""Return ``True`` for filtered calls
:param iterable ignore: if set, the filters to ignore, make sure to
include 'PASS', when setting, default is ``['PASS']``
:param iterable require: if set, the filters to require for returning
``True``
""" |
ignore = ignore or ["PASS"]
if "FT" not in self.data or not self.data["FT"]:
return False
for ft in self.data["FT"]:
if ft in ignore:
continue # skip
if not require:
return True
elif ft in require:
return True
return False |
<SYSTEM_TASK:>
Split the ``str`` in ``pair_str`` at ``'='``
<END_TASK>
<USER_TASK:>
Description:
def split_mapping(pair_str):
"""Split the ``str`` in ``pair_str`` at ``'='``
Warn if key needs to be stripped
""" |
orig_key, value = pair_str.split("=", 1)
key = orig_key.strip()
if key != orig_key:
warnings.warn(
"Mapping key {} has leading or trailing space".format(repr(orig_key)),
LeadingTrailingSpaceInKey,
)
return key, value |
<SYSTEM_TASK:>
Parse the given VCF header line mapping
<END_TASK>
<USER_TASK:>
Description:
def parse_mapping(value):
"""Parse the given VCF header line mapping
Such a mapping consists of "key=value" pairs, separated by commas and
wrapped into angular brackets ("<...>"). Strings are usually quoted,
for certain known keys, exceptions are made, depending on the tag key.
this, however, only gets important when serializing.
:raises: :py:class:`vcfpy.exceptions.InvalidHeaderException` if
there was a problem parsing the file
""" |
if not value.startswith("<") or not value.endswith(">"):
raise exceptions.InvalidHeaderException(
"Header mapping value was not wrapped in angular brackets"
)
# split the comma-separated list into pairs, ignoring commas in quotes
pairs = split_quoted_string(value[1:-1], delim=",", quote='"')
# split these pairs into key/value pairs, converting flags to mappings
# to True
key_values = []
for pair in pairs:
if "=" in pair:
key, value = split_mapping(pair)
if value.startswith('"') and value.endswith('"'):
value = ast.literal_eval(value)
elif value.startswith("[") and value.endswith("]"):
value = [v.strip() for v in value[1:-1].split(",")]
else:
key, value = pair, True
key_values.append((key, value))
# return completely parsed mapping as OrderedDict
return OrderedDict(key_values) |
<SYSTEM_TASK:>
Return mapping for parsers to use for each VCF header type
<END_TASK>
<USER_TASK:>
Description:
def build_header_parsers():
"""Return mapping for parsers to use for each VCF header type
Inject the WarningHelper into the parsers.
""" |
result = {
"ALT": MappingHeaderLineParser(header.AltAlleleHeaderLine),
"contig": MappingHeaderLineParser(header.ContigHeaderLine),
"FILTER": MappingHeaderLineParser(header.FilterHeaderLine),
"FORMAT": MappingHeaderLineParser(header.FormatHeaderLine),
"INFO": MappingHeaderLineParser(header.InfoHeaderLine),
"META": MappingHeaderLineParser(header.MetaHeaderLine),
"PEDIGREE": MappingHeaderLineParser(header.PedigreeHeaderLine),
"SAMPLE": MappingHeaderLineParser(header.SampleHeaderLine),
"__default__": StupidHeaderLineParser(), # fallback
}
return result |
<SYSTEM_TASK:>
Convert atomic field value according to the type
<END_TASK>
<USER_TASK:>
Description:
def convert_field_value(type_, value):
"""Convert atomic field value according to the type""" |
if value == ".":
return None
elif type_ in ("Character", "String"):
if "%" in value:
for k, v in record.UNESCAPE_MAPPING:
value = value.replace(k, v)
return value
else:
try:
return _CONVERTERS[type_](value)
except ValueError:
warnings.warn(
("{} cannot be converted to {}, keeping as " "string.").format(value, type_),
CannotConvertValue,
)
return value |
<SYSTEM_TASK:>
Parse ``value`` according to ``field_info``
<END_TASK>
<USER_TASK:>
Description:
def parse_field_value(field_info, value):
"""Parse ``value`` according to ``field_info``
""" |
if field_info.id == "FT":
return [x for x in value.split(";") if x != "."]
elif field_info.type == "Flag":
return True
elif field_info.number == 1:
return convert_field_value(field_info.type, value)
else:
if value == ".":
return []
else:
return [convert_field_value(field_info.type, x) for x in value.split(",")] |
<SYSTEM_TASK:>
Parse breakend and return tuple with results, parameters for BreakEnd
<END_TASK>
<USER_TASK:>
Description:
def parse_breakend(alt_str):
"""Parse breakend and return tuple with results, parameters for BreakEnd
constructor
""" |
arr = BREAKEND_PATTERN.split(alt_str)
mate_chrom, mate_pos = arr[1].split(":", 1)
mate_pos = int(mate_pos)
if mate_chrom[0] == "<":
mate_chrom = mate_chrom[1:-1]
within_main_assembly = False
else:
within_main_assembly = True
FWD_REV = {True: record.FORWARD, False: record.REVERSE}
orientation = FWD_REV[alt_str[0] == "[" or alt_str[0] == "]"]
mate_orientation = FWD_REV["[" in alt_str]
if orientation == record.FORWARD:
sequence = arr[2]
else:
sequence = arr[0]
return (mate_chrom, mate_pos, orientation, mate_orientation, sequence, within_main_assembly) |
<SYSTEM_TASK:>
Process substution where the string grows
<END_TASK>
<USER_TASK:>
Description:
def process_sub_grow(ref, alt_str):
"""Process substution where the string grows""" |
if len(alt_str) == 0:
raise exceptions.InvalidRecordException("Invalid VCF, empty ALT")
elif len(alt_str) == 1:
if ref[0] == alt_str[0]:
return record.Substitution(record.DEL, alt_str)
else:
return record.Substitution(record.INDEL, alt_str)
else:
return record.Substitution(record.INDEL, alt_str) |
<SYSTEM_TASK:>
Process substution where the string shrink
<END_TASK>
<USER_TASK:>
Description:
def process_sub_shrink(ref, alt_str):
"""Process substution where the string shrink""" |
if len(ref) == 0:
raise exceptions.InvalidRecordException("Invalid VCF, empty REF")
elif len(ref) == 1:
if ref[0] == alt_str[0]:
return record.Substitution(record.INS, alt_str)
else:
return record.Substitution(record.INDEL, alt_str)
else:
return record.Substitution(record.INDEL, alt_str) |
<SYSTEM_TASK:>
Process alternative value using Header in ``header``
<END_TASK>
<USER_TASK:>
Description:
def process_alt(header, ref, alt_str): # pylint: disable=W0613
"""Process alternative value using Header in ``header``""" |
# By its nature, this function contains a large number of case distinctions
if "]" in alt_str or "[" in alt_str:
return record.BreakEnd(*parse_breakend(alt_str))
elif alt_str[0] == "." and len(alt_str) > 0:
return record.SingleBreakEnd(record.FORWARD, alt_str[1:])
elif alt_str[-1] == "." and len(alt_str) > 0:
return record.SingleBreakEnd(record.REVERSE, alt_str[:-1])
elif alt_str[0] == "<" and alt_str[-1] == ">":
inner = alt_str[1:-1]
return record.SymbolicAllele(inner)
else: # substitution
return process_sub(ref, alt_str) |
<SYSTEM_TASK:>
Split string ``s`` at delimiter, correctly interpreting quotes
<END_TASK>
<USER_TASK:>
Description:
def run(self, s):
"""Split string ``s`` at delimiter, correctly interpreting quotes
Further, interprets arrays wrapped in one level of ``[]``. No
recursive brackets are interpreted (as this would make the grammar
non-regular and currently this complexity is not needed). Currently,
quoting inside of braces is not supported either. This is just to
support the example from VCF v4.3.
""" |
begins, ends = [0], []
# transition table
DISPATCH = {
self.NORMAL: self._handle_normal,
self.QUOTED: self._handle_quoted,
self.ARRAY: self._handle_array,
self.DELIM: self._handle_delim,
self.ESCAPED: self._handle_escaped,
}
# run state automaton
state = self.NORMAL
for pos, c in enumerate(s):
state = DISPATCH[state](c, pos, begins, ends)
ends.append(len(s))
assert len(begins) == len(ends)
# Build resulting list
return [s[start:end] for start, end in zip(begins, ends)] |
<SYSTEM_TASK:>
Handle FORMAT and calls columns, factored out of parse_line
<END_TASK>
<USER_TASK:>
Description:
def _handle_calls(self, alts, format_, format_str, arr):
"""Handle FORMAT and calls columns, factored out of parse_line""" |
if format_str not in self._format_cache:
self._format_cache[format_str] = list(map(self.header.get_format_field_info, format_))
# per-sample calls
calls = []
for sample, raw_data in zip(self.samples.names, arr[9:]):
if self.samples.is_parsed(sample):
data = self._parse_calls_data(format_, self._format_cache[format_str], raw_data)
call = record.Call(sample, data)
self._format_checker.run(call, len(alts))
self._check_filters(call.data.get("FT"), "FORMAT/FT", call.sample)
calls.append(call)
else:
calls.append(record.UnparsedCall(sample, raw_data))
return calls |
<SYSTEM_TASK:>
Split line and check number of columns
<END_TASK>
<USER_TASK:>
Description:
def _split_line(self, line_str):
"""Split line and check number of columns""" |
arr = line_str.rstrip().split("\t")
if len(arr) != self.expected_fields:
raise exceptions.InvalidRecordException(
(
"The line contains an invalid number of fields. Was "
"{} but expected {}\n{}".format(len(arr), 9 + len(self.samples.names), line_str)
)
)
return arr |
<SYSTEM_TASK:>
Parse genotype call information from arrays using format array
<END_TASK>
<USER_TASK:>
Description:
def _parse_calls_data(klass, format_, infos, gt_str):
"""Parse genotype call information from arrays using format array
:param list format: List of strings with format names
:param gt_str arr: string with genotype information values
""" |
data = OrderedDict()
# The standard is very nice to parsers, we can simply split at
# colon characters, although I (Manuel) don't know how strict
# programs follow this
for key, info, value in zip(format_, infos, gt_str.split(":")):
data[key] = parse_field_value(info, value)
return data |
<SYSTEM_TASK:>
Check ``FORMAT`` of a record.Call
<END_TASK>
<USER_TASK:>
Description:
def run(self, call, num_alts):
"""Check ``FORMAT`` of a record.Call
Currently, only checks for consistent counts are implemented
""" |
for key, value in call.data.items():
self._check_count(call, key, value, num_alts) |
<SYSTEM_TASK:>
Read next line store in self._line and return old one
<END_TASK>
<USER_TASK:>
Description:
def _read_next_line(self):
"""Read next line store in self._line and return old one""" |
prev_line = self._line
self._line = self.stream.readline()
return prev_line |
<SYSTEM_TASK:>
Peform additional check on samples line
<END_TASK>
<USER_TASK:>
Description:
def _check_samples_line(klass, arr):
"""Peform additional check on samples line""" |
if len(arr) <= len(REQUIRE_NO_SAMPLE_HEADER):
if tuple(arr) != REQUIRE_NO_SAMPLE_HEADER:
raise exceptions.IncorrectVCFFormat(
"Sample header line indicates no sample but does not "
"equal required prefix {}".format("\t".join(REQUIRE_NO_SAMPLE_HEADER))
)
elif tuple(arr[: len(REQUIRE_SAMPLE_HEADER)]) != REQUIRE_SAMPLE_HEADER:
raise exceptions.IncorrectVCFFormat(
'Sample header line (starting with "#CHROM") does not '
"start with required prefix {}".format("\t".join(REQUIRE_SAMPLE_HEADER))
) |
<SYSTEM_TASK:>
Get a snapshot and save it to disk.
<END_TASK>
<USER_TASK:>
Description:
def snap(self, path=None):
"""Get a snapshot and save it to disk.""" |
if path is None:
path = "/tmp"
else:
path = path.rstrip("/")
day_dir = datetime.datetime.now().strftime("%d%m%Y")
hour_dir = datetime.datetime.now().strftime("%H%M")
ensure_snapshot_dir(path+"/"+self.cam_id+"/"+day_dir+"/"+hour_dir)
f_path = "{0}/{1}/{2}/{3}/{4}.jpg".format(
path,
self.cam_id,
day_dir,
hour_dir,
datetime.datetime.now().strftime("%S"),
)
urllib.urlretrieve(
'http://{0}/snapshot.cgi?user={1}&pwd={2}'.format(
self.address,
self.user,
self.pswd,
),
f_path,
) |
<SYSTEM_TASK:>
Jump to the start position of the given chromosomal position
<END_TASK>
<USER_TASK:>
Description:
def fetch(self, chrom_or_region, begin=None, end=None):
"""Jump to the start position of the given chromosomal position
and limit iteration to the end position
:param str chrom_or_region: name of the chromosome to jump to if
begin and end are given and a samtools region string otherwise
(e.g. "chr1:123,456-123,900").
:param int begin: 0-based begin position (inclusive)
:param int end: 0-based end position (exclusive)
""" |
if begin is not None and end is None:
raise ValueError("begin and end must both be None or neither")
# close tabix file if any and is open
if self.tabix_file and not self.tabix_file.closed:
self.tabix_file.close()
# open tabix file if not yet open
if not self.tabix_file or self.tabix_file.closed:
self.tabix_file = pysam.TabixFile(filename=self.path, index=self.tabix_path)
# jump to the next position
if begin is None:
self.tabix_iter = self.tabix_file.fetch(region=chrom_or_region)
else:
self.tabix_iter = self.tabix_file.fetch(reference=chrom_or_region, start=begin, end=end)
return self |
<SYSTEM_TASK:>
Close underlying stream
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Close underlying stream""" |
if self.tabix_file and not self.tabix_file.closed:
self.tabix_file.close()
if self.stream:
self.stream.close() |
<SYSTEM_TASK:>
Serialize value for the given mapping key for a VCF header line
<END_TASK>
<USER_TASK:>
Description:
def serialize_for_header(key, value):
"""Serialize value for the given mapping key for a VCF header line""" |
if key in QUOTE_FIELDS:
return json.dumps(value)
elif isinstance(value, str):
if " " in value or "\t" in value:
return json.dumps(value)
else:
return value
elif isinstance(value, list):
return "[{}]".format(", ".join(value))
else:
return str(value) |
<SYSTEM_TASK:>
Build indices for the different field types
<END_TASK>
<USER_TASK:>
Description:
def _build_indices(self):
"""Build indices for the different field types""" |
result = {key: OrderedDict() for key in LINES_WITH_ID}
for line in self.lines:
if line.key in LINES_WITH_ID:
result.setdefault(line.key, OrderedDict())
if line.mapping["ID"] in result[line.key]:
warnings.warn(
("Seen {} header more than once: {}, using first" "occurence").format(
line.key, line.mapping["ID"]
),
DuplicateHeaderLineWarning,
)
else:
result[line.key][line.mapping["ID"]] = line
else:
result.setdefault(line.key, [])
result[line.key].append(line)
return result |
<SYSTEM_TASK:>
Return header lines having the given ``key`` as their type
<END_TASK>
<USER_TASK:>
Description:
def get_lines(self, key):
"""Return header lines having the given ``key`` as their type""" |
if key in self._indices:
return self._indices[key].values()
else:
return [] |
<SYSTEM_TASK:>
Return whether there is a header line with the given ID of the
<END_TASK>
<USER_TASK:>
Description:
def has_header_line(self, key, id_):
"""Return whether there is a header line with the given ID of the
type given by ``key``
:param key: The VCF header key/line type.
:param id_: The ID value to compare fore
:return: ``True`` if there is a header line starting with ``##${key}=``
in the VCF file having the mapping entry ``ID`` set to ``id_``.
""" |
if key not in self._indices:
return False
else:
return id_ in self._indices[key] |
<SYSTEM_TASK:>
Add header line, updating any necessary support indices
<END_TASK>
<USER_TASK:>
Description:
def add_line(self, header_line):
"""Add header line, updating any necessary support indices
:return: ``False`` on conflicting line and ``True`` otherwise
""" |
self.lines.append(header_line)
self._indices.setdefault(header_line.key, OrderedDict())
if not hasattr(header_line, "mapping"):
return False # no registration required
if self.has_header_line(header_line.key, header_line.mapping["ID"]):
warnings.warn(
(
"Detected duplicate header line with type {} and ID {}. "
"Ignoring this and subsequent one"
).format(header_line.key, header_line.mapping["ID"]),
DuplicateHeaderLineWarning,
)
return False
else:
self._indices[header_line.key][header_line.mapping["ID"]] = header_line
return True |
<SYSTEM_TASK:>
Create a switch.
<END_TASK>
<USER_TASK:>
Description:
def create_switch(type, settings, pin):
"""Create a switch.
Args:
type: (str): type of the switch [A,B,C,D]
settings (str): a comma separted list
pin (int): wiringPi pin
Returns:
switch
""" |
switch = None
if type == "A":
group, device = settings.split(",")
switch = pi_switch.RCSwitchA(group, device)
elif type == "B":
addr, channel = settings.split(",")
addr = int(addr)
channel = int(channel)
switch = pi_switch.RCSwitchB(addr, channel)
elif type == "C":
family, group, device = settings.split(",")
group = int(group)
device = int(device)
switch = pi_switch.RCSwitchC(family, group, device)
elif type == "D":
group, device = settings.split(",")
device = int(device)
switch = pi_switch.RCSwitchD(group, device)
else:
print "Type %s is not supported!" % type
sys.exit()
switch.enableTransmit(pin)
return switch |
<SYSTEM_TASK:>
Format atomic value
<END_TASK>
<USER_TASK:>
Description:
def format_atomic(value):
"""Format atomic value
This function also takes care of escaping the value in case one of the
reserved characters occurs in the value.
""" |
# Perform escaping
if isinstance(value, str):
if any(r in value for r in record.RESERVED_CHARS):
for k, v in record.ESCAPE_MAPPING:
value = value.replace(k, v)
# String-format the given value
if value is None:
return "."
else:
return str(value) |
<SYSTEM_TASK:>
Format possibly compound value given the FieldInfo
<END_TASK>
<USER_TASK:>
Description:
def format_value(field_info, value, section):
"""Format possibly compound value given the FieldInfo""" |
if section == "FORMAT" and field_info.id == "FT":
if not value:
return "."
elif isinstance(value, list):
return ";".join(map(format_atomic, value))
elif field_info.number == 1:
if value is None:
return "."
else:
return format_atomic(value)
else:
if not value:
return "."
else:
return ",".join(map(format_atomic, value)) |
<SYSTEM_TASK:>
Serialize whole Record
<END_TASK>
<USER_TASK:>
Description:
def _serialize_record(self, record):
"""Serialize whole Record""" |
f = self._empty_to_dot
row = [record.CHROM, record.POS]
row.append(f(";".join(record.ID)))
row.append(f(record.REF))
if not record.ALT:
row.append(".")
else:
row.append(",".join([f(a.serialize()) for a in record.ALT]))
row.append(f(record.QUAL))
row.append(f(";".join(record.FILTER)))
row.append(f(self._serialize_info(record)))
if record.FORMAT:
row.append(":".join(record.FORMAT))
row += [
self._serialize_call(record.FORMAT, record.call_for_sample[s])
for s in self.header.samples.names
]
print(*row, sep="\t", file=self.stream) |
<SYSTEM_TASK:>
Return serialized version of record.INFO
<END_TASK>
<USER_TASK:>
Description:
def _serialize_info(self, record):
"""Return serialized version of record.INFO""" |
result = []
for key, value in record.INFO.items():
info = self.header.get_info_field_info(key)
if info.type == "Flag":
result.append(key)
else:
result.append("{}={}".format(key, format_value(info, value, "INFO")))
return ";".join(result) |
<SYSTEM_TASK:>
Return serialized version of the Call using the record's FORMAT
<END_TASK>
<USER_TASK:>
Description:
def _serialize_call(self, format_, call):
"""Return serialized version of the Call using the record's FORMAT'""" |
if isinstance(call, record.UnparsedCall):
return call.unparsed_data
else:
result = [
format_value(self.header.get_format_field_info(key), call.data.get(key), "FORMAT")
for key in format_
]
return ":".join(result) |
<SYSTEM_TASK:>
Loops through the nodes and looks for special jinja tags that
<END_TASK>
<USER_TASK:>
Description:
def _create_extended_jinja_tags(self, nodes):
"""Loops through the nodes and looks for special jinja tags that
contains more than one tag but only one ending tag.""" |
jinja_a = None
jinja_b = None
ext_node = None
ext_nodes = []
for node in nodes:
if isinstance(node, EmptyLine):
continue
if node.has_children():
node.children = self._create_extended_jinja_tags(node.children)
if not isinstance(node, JinjaTag):
jinja_a = None
continue
if jinja_a is None or (
node.tag_name in self._extended_tags and jinja_a.tag_name not in self._extended_tags[node.tag_name]):
jinja_a = node
continue
if node.tag_name in self._extended_tags and \
jinja_a.tag_name in self._extended_tags[node.tag_name]:
if ext_node is None:
ext_node = ExtendedJinjaTag()
ext_node.add(jinja_a)
ext_nodes.append(ext_node)
ext_node.add(node)
else:
ext_node = None
jinja_a = node
#replace the nodes with the new extended node
for node in ext_nodes:
nodes.insert(nodes.index(node.children[0]), node)
index = nodes.index(node.children[0])
del nodes[index:index+len(node.children)]
return nodes |
<SYSTEM_TASK:>
Get screenshots from all cams at defined intervall.
<END_TASK>
<USER_TASK:>
Description:
def watch(cams, path=None, delay=10):
"""Get screenshots from all cams at defined intervall.""" |
while True:
for c in cams:
c.snap(path)
time.sleep(delay) |
<SYSTEM_TASK:>
Score is an API that delivers reputation scoring based on phone number intelligence, traffic patterns, machine
<END_TASK>
<USER_TASK:>
Description:
def score(self, phone_number, account_lifecycle_event, **params):
"""
Score is an API that delivers reputation scoring based on phone number intelligence, traffic patterns, machine
learning, and a global data consortium.
See https://developer.telesign.com/docs/score-api for detailed API documentation.
""" |
return self.post(SCORE_RESOURCE.format(phone_number=phone_number),
account_lifecycle_event=account_lifecycle_event,
**params) |
<SYSTEM_TASK:>
Generates the TeleSign REST API headers used to authenticate requests.
<END_TASK>
<USER_TASK:>
Description:
def generate_telesign_headers(customer_id,
api_key,
method_name,
resource,
url_encoded_fields,
date_rfc2616=None,
nonce=None,
user_agent=None,
content_type=None):
"""
Generates the TeleSign REST API headers used to authenticate requests.
Creates the canonicalized string_to_sign and generates the HMAC signature. This is used to authenticate requests
against the TeleSign REST API.
See https://developer.telesign.com/docs/authentication for detailed API documentation.
:param customer_id: Your account customer_id.
:param api_key: Your account api_key.
:param method_name: The HTTP method name of the request as a upper case string, should be one of 'POST', 'GET',
'PUT' or 'DELETE'.
:param resource: The partial resource URI to perform the request against, as a string.
:param url_encoded_fields: HTTP body parameters to perform the HTTP request with, must be a urlencoded string.
:param date_rfc2616: The date and time of the request formatted in rfc 2616, as a string.
:param nonce: A unique cryptographic nonce for the request, as a string.
:param user_agent: (optional) User Agent associated with the request, as a string.
:param content_type: (optional) ContentType of the request, as a string.
:return: The TeleSign authentication headers.
""" |
if date_rfc2616 is None:
date_rfc2616 = formatdate(usegmt=True)
if nonce is None:
nonce = str(uuid.uuid4())
if not content_type:
content_type = "application/x-www-form-urlencoded" if method_name in ("POST", "PUT") else ""
auth_method = "HMAC-SHA256"
string_to_sign_builder = ["{method}".format(method=method_name)]
string_to_sign_builder.append("\n{content_type}".format(content_type=content_type))
string_to_sign_builder.append("\n{date}".format(date=date_rfc2616))
string_to_sign_builder.append("\nx-ts-auth-method:{auth_method}".format(auth_method=auth_method))
string_to_sign_builder.append("\nx-ts-nonce:{nonce}".format(nonce=nonce))
if content_type and url_encoded_fields:
string_to_sign_builder.append("\n{fields}".format(fields=url_encoded_fields))
string_to_sign_builder.append("\n{resource}".format(resource=resource))
string_to_sign = "".join(string_to_sign_builder)
signer = hmac.new(b64decode(api_key), string_to_sign.encode("utf-8"), sha256)
signature = b64encode(signer.digest()).decode("utf-8")
authorization = "TSA {customer_id}:{signature}".format(
customer_id=customer_id,
signature=signature)
headers = {
"Authorization": authorization,
"Date": date_rfc2616,
"Content-Type": content_type,
"x-ts-auth-method": auth_method,
"x-ts-nonce": nonce
}
if user_agent:
headers['User-Agent'] = user_agent
return headers |
<SYSTEM_TASK:>
Generic TeleSign REST API POST handler.
<END_TASK>
<USER_TASK:>
Description:
def post(self, resource, **params):
"""
Generic TeleSign REST API POST handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the POST request with, as a dictionary.
:return: The RestClient Response object.
""" |
return self._execute(self.session.post, 'POST', resource, **params) |
<SYSTEM_TASK:>
Generic TeleSign REST API GET handler.
<END_TASK>
<USER_TASK:>
Description:
def get(self, resource, **params):
"""
Generic TeleSign REST API GET handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the GET request with, as a dictionary.
:return: The RestClient Response object.
""" |
return self._execute(self.session.get, 'GET', resource, **params) |
<SYSTEM_TASK:>
Generic TeleSign REST API PUT handler.
<END_TASK>
<USER_TASK:>
Description:
def put(self, resource, **params):
"""
Generic TeleSign REST API PUT handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the PUT request with, as a dictionary.
:return: The RestClient Response object.
""" |
return self._execute(self.session.put, 'PUT', resource, **params) |
<SYSTEM_TASK:>
Generic TeleSign REST API DELETE handler.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, resource, **params):
"""
Generic TeleSign REST API DELETE handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the DELETE request with, as a dictionary.
:return: The RestClient Response object.
""" |
return self._execute(self.session.delete, 'DELETE', resource, **params) |
<SYSTEM_TASK:>
Generic TeleSign REST API request handler.
<END_TASK>
<USER_TASK:>
Description:
def _execute(self, method_function, method_name, resource, **params):
"""
Generic TeleSign REST API request handler.
:param method_function: The Requests HTTP request function to perform the request.
:param method_name: The HTTP method name, as an upper case string.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the HTTP request with, as a dictionary.
:return: The RestClient Response object.
""" |
resource_uri = "{api_host}{resource}".format(api_host=self.api_host, resource=resource)
url_encoded_fields = self._encode_params(params)
headers = RestClient.generate_telesign_headers(self.customer_id,
self.api_key,
method_name,
resource,
url_encoded_fields,
user_agent=self.user_agent)
if method_name in ['POST', 'PUT']:
payload = {'data': url_encoded_fields}
else:
payload = {'params': url_encoded_fields}
response = self.Response(method_function(resource_uri,
headers=headers,
timeout=self.timeout,
**payload))
return response |
<SYSTEM_TASK:>
Send a message to the target phone_number.
<END_TASK>
<USER_TASK:>
Description:
def message(self, phone_number, message, message_type, **params):
"""
Send a message to the target phone_number.
See https://developer.telesign.com/docs/messaging-api for detailed API documentation.
""" |
return self.post(MESSAGING_RESOURCE,
phone_number=phone_number,
message=message,
message_type=message_type,
**params) |
<SYSTEM_TASK:>
The PhoneID API provides a cleansed phone number, phone type, and telecom carrier information to determine the
<END_TASK>
<USER_TASK:>
Description:
def phoneid(self, phone_number, **params):
"""
The PhoneID API provides a cleansed phone number, phone type, and telecom carrier information to determine the
best communication method - SMS or voice.
See https://developer.telesign.com/docs/phoneid-api for detailed API documentation.
""" |
return self.post(PHONEID_RESOURCE.format(phone_number=phone_number),
**params) |
<SYSTEM_TASK:>
Returns a new instance which identical to this instance.
<END_TASK>
<USER_TASK:>
Description:
def copy(self):
"""Returns a new instance which identical to this instance.""" |
if self._pre_computed_hash is None:
temp = ssdeep(buf="")
else:
temp = ssdeep(hash=hash)
libssdeep_wrapper.fuzzy_free(temp._state)
temp._state = libssdeep_wrapper.fuzzy_clone(self._state)
temp._updatable = self._updatable
temp._pre_computed_hash = self._pre_computed_hash
return temp |
<SYSTEM_TASK:>
Send a voice call to the target phone_number.
<END_TASK>
<USER_TASK:>
Description:
def call(self, phone_number, message, message_type, **params):
"""
Send a voice call to the target phone_number.
See https://developer.telesign.com/docs/voice-api for detailed API documentation.
""" |
return self.post(VOICE_RESOURCE,
phone_number=phone_number,
message=message,
message_type=message_type,
**params) |
<SYSTEM_TASK:>
Retrieves the verification result for an App Verify transaction by external_id. To ensure a secure verification
<END_TASK>
<USER_TASK:>
Description:
def status(self, external_id, **params):
"""
Retrieves the verification result for an App Verify transaction by external_id. To ensure a secure verification
flow you must check the status using TeleSign's servers on your backend. Do not rely on the SDK alone to
indicate a successful verification.
See https://developer.telesign.com/docs/app-verify-android-sdk-self#section-get-status-service or
https://developer.telesign.com/docs/app-verify-ios-sdk-self#section-get-status-service for detailed
API documentation.
""" |
return self.get(APPVERIFY_STATUS_RESOURCE.format(external_id=external_id),
**params) |
<SYSTEM_TASK:>
Get Asset Location.
<END_TASK>
<USER_TASK:>
Description:
def get_asset_location(element, attr):
"""
Get Asset Location.
Remove leading slash e.g '/static/images.jpg' ==> static/images.jpg
Also, if the url is also prefixed with static, it would be removed.
e.g static/image.jpg ==> image.jpg
""" |
asset_location = re.match(r'^/?(static)?/?(.*)', element[attr],
re.IGNORECASE)
# replace relative links i.e (../../static)
asset_location = asset_location.group(2).replace('../', '')
return asset_location |
<SYSTEM_TASK:>
The actual transformation occurs here.
<END_TASK>
<USER_TASK:>
Description:
def transform(matches, framework, namespace, static_endpoint):
"""
The actual transformation occurs here.
flask example: images/staticfy.jpg', ==>
"{{ url_for('static', filename='images/staticfy.jpg') }}"
""" |
transformed = []
namespace = namespace + '/' if namespace else ''
for attribute, elements in matches:
for element in elements:
asset_location = get_asset_location(element, attribute)
# string substitution
sub_dict = {
'static_endpoint': static_endpoint, 'namespace': namespace,
'asset_location': asset_location
}
transformed_string = frameworks[framework] % sub_dict
res = (attribute, element[attribute], transformed_string)
transformed.append(res)
return transformed |
<SYSTEM_TASK:>
Extract all the elements we're interested in.
<END_TASK>
<USER_TASK:>
Description:
def get_elements(html_file, tags):
"""
Extract all the elements we're interested in.
Returns a list of tuples with the attribute as first item
and the list of elements as the second item.
""" |
with open(html_file) as f:
document = BeautifulSoup(f, 'html.parser')
def condition(tag, attr):
# Don't include external links
return lambda x: x.name == tag \
and not x.get(attr, 'http').startswith(('http', '//'))
all_tags = [(attr, document.find_all(condition(tag, attr)))
for tag, attr in tags]
return all_tags |
<SYSTEM_TASK:>
Replace lines in the old file with the transformed lines.
<END_TASK>
<USER_TASK:>
Description:
def replace_lines(html_file, transformed):
"""Replace lines in the old file with the transformed lines.""" |
result = []
with codecs.open(html_file, 'r', 'utf-8') as input_file:
for line in input_file:
# replace all single quotes with double quotes
line = re.sub(r'\'', '"', line)
for attr, value, new_link in transformed:
if attr in line and value in line:
# replace old link with new staticfied link
new_line = line.replace(value, new_link)
result.append(new_line)
break
else:
result.append(line)
return ''.join(result) |
<SYSTEM_TASK:>
Staticfy method.
<END_TASK>
<USER_TASK:>
Description:
def staticfy(html_file, args=argparse.ArgumentParser()):
"""
Staticfy method.
Loop through each line of the file and replaces the old links
""" |
# unpack arguments
static_endpoint = args.static_endpoint or 'static'
framework = args.framework or os.getenv('STATICFY_FRAMEWORK', 'flask')
add_tags = args.add_tags or {}
exc_tags = args.exc_tags or {}
namespace = args.namespace or {}
# default tags
tags = {('img', 'src'), ('link', 'href'), ('script', 'src')}
# generate additional_tags
add_tags = {(tag, attr) for tag, attr in add_tags.items()}
tags.update(add_tags)
# remove tags if any was specified
exc_tags = {(tag, attr) for tag, attr in exc_tags.items()}
tags = tags - exc_tags
# get elements we're interested in
matches = get_elements(html_file, tags)
# transform old links to new links
transformed = transform(matches, framework, namespace, static_endpoint)
return replace_lines(html_file, transformed) |
<SYSTEM_TASK:>
Write to stdout or a file
<END_TASK>
<USER_TASK:>
Description:
def file_ops(staticfied, args):
"""Write to stdout or a file""" |
destination = args.o or args.output
if destination:
with open(destination, 'w') as file:
file.write(staticfied)
else:
print(staticfied) |
<SYSTEM_TASK:>
Get keys specified in arguments
<END_TASK>
<USER_TASK:>
Description:
def find_keys(args):
"""Get keys specified in arguments
returns list of keys or None
""" |
key = args['--key']
if key:
return [key]
keyfile = args['--apikeys']
if keyfile:
return read_keyfile(keyfile)
envkey = os.environ.get('TINYPNG_API_KEY', None)
if envkey:
return [envkey]
local_keys = join(abspath("."), "tinypng.keys")
if isfile(local_keys):
return read_keyfile(local_keys)
home_keys = join(expanduser("~/.tinypng.keys"))
if isfile(home_keys):
return read_keyfile(home_keys)
return [] |
<SYSTEM_TASK:>
Shrink png file and write it back to a new file
<END_TASK>
<USER_TASK:>
Description:
def shrink_file(in_filepath, api_key=None, out_filepath=None):
"""Shrink png file and write it back to a new file
The default file path replaces ".png" with ".tiny.png".
returns api_info (including info['ouput']['filepath'])
""" |
info = get_shrink_file_info(in_filepath, api_key, out_filepath)
write_shrunk_file(info)
return info |
<SYSTEM_TASK:>
Verify that a callback was made by TeleSign and was not sent by a malicious client by verifying the signature.
<END_TASK>
<USER_TASK:>
Description:
def verify_telesign_callback_signature(api_key, signature, json_str):
"""
Verify that a callback was made by TeleSign and was not sent by a malicious client by verifying the signature.
:param api_key: the TeleSign API api_key associated with your account.
:param signature: the TeleSign Authorization header value supplied in the callback, as a string.
:param json_str: the POST body text, that is, the JSON string sent by TeleSign describing the transaction status.
""" |
your_signature = b64encode(HMAC(b64decode(api_key), json_str.encode("utf-8"), sha256).digest()).decode("utf-8")
if len(signature) != len(your_signature):
return False
# avoid timing attack with constant time equality check
signatures_equal = True
for x, y in zip(signature, your_signature):
if not x == y:
signatures_equal = False
return signatures_equal |
<SYSTEM_TASK:>
The move format is in long algebraic notation.
<END_TASK>
<USER_TASK:>
Description:
def setposition(self, position):
"""
The move format is in long algebraic notation.
Takes list of stirngs = ['e2e4', 'd7d5']
OR
FEN = 'rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1'
""" |
try:
if isinstance(position, list):
self.send('position startpos moves {}'.format(
self.__listtostring(position)))
self.isready()
elif re.match('\s*^(((?:[rnbqkpRNBQKP1-8]+\/){7})[rnbqkpRNBQKP1-8]+)\s([b|w])\s([K|Q|k|q|-]{1,4})\s(-|[a-h][1-8])\s(\d+\s\d+)$', position):
regexList = re.match('\s*^(((?:[rnbqkpRNBQKP1-8]+\/){7})[rnbqkpRNBQKP1-8]+)\s([b|w])\s([K|Q|k|q|-]{1,4})\s(-|[a-h][1-8])\s(\d+\s\d+)$', position).groups()
fen = regexList[0].split("/")
if len(fen) != 8:
raise ValueError("expected 8 rows in position part of fen: {0}".format(repr(fen)))
for fenPart in fen:
field_sum = 0
previous_was_digit, previous_was_piece = False, False
for c in fenPart:
if c in ["1", "2", "3", "4", "5", "6", "7", "8"]:
if previous_was_digit:
raise ValueError("two subsequent digits in position part of fen: {0}".format(repr(fen)))
field_sum += int(c)
previous_was_digit = True
previous_was_piece = False
elif c == "~":
if not previous_was_piece:
raise ValueError("~ not after piece in position part of fen: {0}".format(repr(fen)))
previous_was_digit, previous_was_piece = False, False
elif c.lower() in ["p", "n", "b", "r", "q", "k"]:
field_sum += 1
previous_was_digit = False
previous_was_piece = True
else:
raise ValueError("invalid character in position part of fen: {0}".format(repr(fen)))
if field_sum != 8:
raise ValueError("expected 8 columns per row in position part of fen: {0}".format(repr(fen)))
self.send('position fen {}'.format(position))
self.isready()
else: raise ValueError("fen doesn`t match follow this example: rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1 ")
except ValueError as e:
print('\nCheck position correctness\n')
sys.exit(e.message) |
<SYSTEM_TASK:>
Add a single item into DataContainer
<END_TASK>
<USER_TASK:>
Description:
def add_item(self, host, key, value, clock=None, state=0):
"""
Add a single item into DataContainer
:host: hostname to which item will be linked to
:key: item key as defined in Zabbix
:value: item value
:clock: timestemp as integer. If not provided self.clock()) will be used
""" |
if clock is None:
clock = self.clock
if self._config.data_type == "items":
item = {"host": host, "key": key,
"value": value, "clock": clock, "state": state}
elif self._config.data_type == "lld":
item = {"host": host, "key": key, "clock": clock, "state": state,
"value": json.dumps({"data": value})}
else:
if self.logger: # pragma: no cover
self.logger.error("Setup data_type before adding data")
raise ValueError('Setup data_type before adding data')
self._items_list.append(item) |
<SYSTEM_TASK:>
Add a list of item into the container
<END_TASK>
<USER_TASK:>
Description:
def add(self, data):
"""
Add a list of item into the container
:data: dict of items & value per hostname
""" |
for host in data:
for key in data[host]:
if not data[host][key] == []:
self.add_item(host, key, data[host][key]) |
<SYSTEM_TASK:>
Common part of sending operations
<END_TASK>
<USER_TASK:>
Description:
def _send_common(self, item):
"""
Common part of sending operations
Calls SenderProtocol._send_to_zabbix
Returns result as provided by _handle_response
:item: either a list or a single item depending on debug_level
""" |
total = len(item)
processed = failed = time = 0
if self._config.dryrun is True:
total = len(item)
processed = failed = time = 0
response = 'dryrun'
else:
self._send_to_zabbix(item)
response, processed, failed, total, time = self._read_from_zabbix()
output_key = '(bulk)'
output_item = '(bulk)'
if self.debug_level >= 4:
output_key = item[0]['key']
output_item = item[0]['value']
if self.logger: # pragma: no cover
self.logger.info(
"" +
ZBX_DBG_SEND_RESULT % (
processed,
failed,
total,
output_key,
output_item,
response
)
)
return response, processed, failed, total, time |
<SYSTEM_TASK:>
Set logger instance for the class
<END_TASK>
<USER_TASK:>
Description:
def logger(self, value):
"""
Set logger instance for the class
""" |
if isinstance(value, logging.Logger):
self._logger = value
else:
if self._logger: # pragma: no cover
self._logger.error("logger requires a logging instance")
raise ValueError('logger requires a logging instance') |
<SYSTEM_TASK:>
Returns a full representation of the issue for the given issue key.
<END_TASK>
<USER_TASK:>
Description:
def get_issue(self, issue_id, params=None):
"""Returns a full representation of the issue for the given issue key.
The issue JSON consists of the issue key and a collection of fields. Additional information like links to
workflow transition sub-resources, or HTML rendered values of the fields supporting HTML rendering can be
retrieved with expand request parameter specified.
The fields request parameter accepts a comma-separated list of fields to include in the response. It can be used
to retrieve a subset of fields. By default all fields are returned in the response. A particular field can be
excluded from the response if prefixed with a "-" (minus) sign. Parameter can be provided multiple times on a
single request.
By default, all fields are returned in the response. Note: this is different from a JQL search - only navigable
fields are returned by default (*navigable).
Args:
issue_id:
params:
Returns:
""" |
return self._get(self.API_URL + 'issue/{}'.format(issue_id), params=params) |
<SYSTEM_TASK:>
Creates an issue or a sub-task from a JSON representation.
<END_TASK>
<USER_TASK:>
Description:
def create_issue(self, data, params=None):
"""Creates an issue or a sub-task from a JSON representation.
You can provide two parameters in request's body: update or fields. The fields, that can be set on an issue
create operation, can be determined using the /rest/api/2/issue/createmeta resource. If a particular field is
not configured to appear on the issue's Create screen, then it will not be returned in the createmeta response.
A field validation error will occur if such field is submitted in request.
Creating a sub-task is similar to creating an issue with the following differences:
issueType field must be set to a sub-task issue type (use /issue/createmeta to find sub-task issue types), and
You must provide a parent field with the ID or key of the parent issue.
Args:
data:
params:
Returns:
""" |
return self._post(self.API_URL + 'issue', data=data, params=params) |
<SYSTEM_TASK:>
Deletes an individual issue.
<END_TASK>
<USER_TASK:>
Description:
def delete_issue(self, issue_id, params=None):
"""Deletes an individual issue.
If the issue has sub-tasks you must set the deleteSubtasks=true parameter to delete the issue. You cannot delete
an issue without deleting its sub-tasks.
Args:
issue_id:
params:
Returns:
""" |
return self._delete(self.API_URL + 'issue/{}'.format(issue_id), params=params) |
<SYSTEM_TASK:>
Retrieves a list of objects.
<END_TASK>
<USER_TASK:>
Description:
def list(self, pagination=True, page_size=None, page=None, **queryparams):
"""
Retrieves a list of objects.
By default uses local cache and remote pagination
If pagination is used and no page is requested (the default), all the
remote objects are retrieved and appended in a single list.
If pagination is disabled, all the objects are fetched from the
endpoint and returned. This may trigger some parsing error if the
result set is very large.
:param pagination: Use pagination (default: `True`)
:param page_size: Size of the pagination page (default: `100`).
Any non numeric value will be casted to the
default value
:param page: Page number to retrieve (default: `None`). Ignored if
`pagination` is `False`
:param queryparams: Additional filter parameters as accepted by the
remote API
:return: <SearchableList>
""" |
if page_size and pagination:
try:
page_size = int(page_size)
except (ValueError, TypeError):
page_size = 100
queryparams['page_size'] = page_size
result = self.requester.get(
self.instance.endpoint, query=queryparams, paginate=pagination
)
objects = SearchableList()
objects.extend(self.parse_list(result.json()))
if result.headers.get('X-Pagination-Next', False) and not page:
next_page = 2
else:
next_page = None
while next_page:
pageparams = queryparams.copy()
pageparams['page'] = next_page
result = self.requester.get(
self.instance.endpoint, query=pageparams,
)
objects.extend(self.parse_list(result.json()))
if result.headers.get('X-Pagination-Next', False):
next_page += 1
else:
next_page = None
return objects |
<SYSTEM_TASK:>
Turns a JSON object into a model instance.
<END_TASK>
<USER_TASK:>
Description:
def parse(cls, requester, entry):
"""
Turns a JSON object into a model instance.
""" |
if not type(entry) is dict:
return entry
for key_to_parse, cls_to_parse in six.iteritems(cls.parser):
if key_to_parse in entry:
entry[key_to_parse] = cls_to_parse.parse(
requester, entry[key_to_parse]
)
return cls(requester, **entry) |
<SYSTEM_TASK:>
Set attribute to a specific value
<END_TASK>
<USER_TASK:>
Description:
def set_attribute(self, id, value, version=1):
"""
Set attribute to a specific value
:param id: id of the attribute
:param value: value of the attribute
:param version: version of the attribute (default = 1)
""" |
attributes = self._get_attributes(cache=True)
formatted_id = '{0}'.format(id)
attributes['attributes_values'][formatted_id] = value
response = self.requester.patch(
'/{endpoint}/custom-attributes-values/{id}',
endpoint=self.endpoint, id=self.id,
payload={
'attributes_values': attributes['attributes_values'],
'version': version
}
)
cache_key = self.requester.get_full_url(
'/{endpoint}/custom-attributes-values/{id}',
endpoint=self.endpoint, id=self.id
)
self.requester.cache.put(cache_key, response)
return response.json() |
<SYSTEM_TASK:>
Get stats for issues of the project
<END_TASK>
<USER_TASK:>
Description:
def issues_stats(self):
"""
Get stats for issues of the project
""" |
response = self.requester.get(
'/{endpoint}/{id}/issues_stats',
endpoint=self.endpoint, id=self.id
)
return response.json() |
<SYSTEM_TASK:>
Like the project
<END_TASK>
<USER_TASK:>
Description:
def like(self):
"""
Like the project
""" |
self.requester.post(
'/{endpoint}/{id}/like',
endpoint=self.endpoint, id=self.id
)
return self |
<SYSTEM_TASK:>
Unlike the project
<END_TASK>
<USER_TASK:>
Description:
def unlike(self):
"""
Unlike the project
""" |
self.requester.post(
'/{endpoint}/{id}/unlike',
endpoint=self.endpoint, id=self.id
)
return self |
<SYSTEM_TASK:>
Stars the project
<END_TASK>
<USER_TASK:>
Description:
def star(self):
"""
Stars the project
.. deprecated:: 0.8.5
Update Taiga and use like instead
""" |
warnings.warn(
"Deprecated! Update Taiga and use .like() instead",
DeprecationWarning
)
self.requester.post(
'/{endpoint}/{id}/star',
endpoint=self.endpoint, id=self.id
)
return self |
<SYSTEM_TASK:>
Takes a class and a dict and try to build an instance of the class
<END_TASK>
<USER_TASK:>
Description:
def parse(cls, value):
"""Takes a class and a dict and try to build an instance of the class
:param cls: The class to parse
:param value: either a dict, a list or a scalar value
""" |
if is_list_annotation(cls):
if not isinstance(value, list):
raise TypeError('Could not parse {} because value is not a list'.format(cls))
return [parse(cls.__args__[0], o) for o in value]
else:
return GenericParser(cls, ModelProviderImpl()).parse(value) |
<SYSTEM_TASK:>
Log a request so that it matches our local log format.
<END_TASK>
<USER_TASK:>
Description:
def log_local_message(message_format, *args):
"""
Log a request so that it matches our local log format.
""" |
prefix = '{} {}'.format(color('INFO', fg=248), color('request', fg=5))
message = message_format % args
sys.stderr.write('{} {}\n'.format(prefix, message)) |
<SYSTEM_TASK:>
Takes a object and produces a dict-like representation
<END_TASK>
<USER_TASK:>
Description:
def serialize(obj):
"""Takes a object and produces a dict-like representation
:param obj: the object to serialize
""" |
if isinstance(obj, list):
return [serialize(o) for o in obj]
return GenericSerializer(ModelProviderImpl()).serialize(obj) |
<SYSTEM_TASK:>
By default, marks the block as expecting an indented "body" blocks of which are then supplied
<END_TASK>
<USER_TASK:>
Description:
def of(self, *indented_blocks) -> "CodeBlock":
"""
By default, marks the block as expecting an indented "body" blocks of which are then supplied
as arguments to this method.
Unless the block specifies a "closed_by", if no body blocks are supplied or they are all Nones,
this will generate a "pass" statement as the body. If there is a "closed_by" specified, then
that will be used on the same indentation level as the opening of the block.
After all the arguments have been handled, this block is marked as finalised and no more blocks
can be appended to it.
None blocks are skipped.
Returns the block itself.
""" |
if self.closed_by is None:
self.expects_body_or_pass = True
for block in indented_blocks:
if block is not None:
self._blocks.append((1, block))
# Finalise it so that we cannot add more sub-blocks to this block.
self.finalise()
return self |
<SYSTEM_TASK:>
Adds sub-blocks at the specified indentation level, which defaults to 0.
<END_TASK>
<USER_TASK:>
Description:
def add(self, *blocks, indentation=0) -> "CodeBlock":
"""
Adds sub-blocks at the specified indentation level, which defaults to 0.
Nones are skipped.
Returns the parent block itself, useful for chaining.
""" |
for block in blocks:
if block is not None:
self._blocks.append((indentation, block))
return self |
<SYSTEM_TASK:>
Generate the code and return it as a string.
<END_TASK>
<USER_TASK:>
Description:
def to_code(self, context: Context =None):
"""
Generate the code and return it as a string.
""" |
# Do not override this method!
context = context or Context()
for imp in self.imports:
if imp not in context.imports:
context.imports.append(imp)
counter = Counter()
lines = list(self.to_lines(context=context, counter=counter))
if counter.num_indented_non_doc_blocks == 0:
if self.expects_body_or_pass:
lines.append(" pass")
elif self.closed_by:
lines[-1] += self.closed_by
else:
if self.closed_by:
lines.append(self.closed_by)
return join_lines(*lines) + self._suffix |
<SYSTEM_TASK:>
Execute simple code blocks.
<END_TASK>
<USER_TASK:>
Description:
def exec(self, globals=None, locals=None):
"""
Execute simple code blocks.
Do not attempt this on modules or other blocks where you have
imports as they won't work.
Instead write the code to a file and use runpy.run_path()
""" |
if locals is None:
locals = {}
builtins.exec(self.to_code(), globals, locals)
return locals |
<SYSTEM_TASK:>
Build a basic code block.
<END_TASK>
<USER_TASK:>
Description:
def block(self, *blocks, **kwargs) -> "CodeBlock":
"""
Build a basic code block.
Positional arguments should be instances of CodeBlock or strings.
All code blocks passed as positional arguments are added at indentation level 0.
None blocks are skipped.
""" |
assert "name" not in kwargs
kwargs.setdefault("code", self)
code = CodeBlock(**kwargs)
for block in blocks:
if block is not None:
code._blocks.append((0, block))
return code |
<SYSTEM_TASK:>
Generate code for a dictionary of locals whose value is not the specified literal.
<END_TASK>
<USER_TASK:>
Description:
def dict_from_locals(self, name, params: List[Parameter], not_specified_literal=Constants.VALUE_NOT_SET):
"""
Generate code for a dictionary of locals whose value is not the specified literal.
""" |
code = self.block(f"{name} = {{}}")
for p in params:
code.add(
self.block(f"if {p.name} is not {not_specified_literal}:").of(
f"{name}[{p.name!r}] = {p.name}"
),
)
return code |
<SYSTEM_TASK:>
Search in your Taiga.io instance
<END_TASK>
<USER_TASK:>
Description:
def search(self, project, text=''):
"""
Search in your Taiga.io instance
:param project: the project id
:param text: the query of your search
""" |
result = self.raw_request.get(
'search', query={'project': project, 'text': text}
)
result = result.json()
search_result = SearchResult()
search_result.tasks = self.tasks.parse_list(result['tasks'])
search_result.issues = self.issues.parse_list(result['issues'])
search_result.user_stories = self.user_stories.parse_list(
result['userstories']
)
search_result.wikipages = self.wikipages.parse_list(
result['wikipages']
)
return search_result |
<SYSTEM_TASK:>
Authenticate an app
<END_TASK>
<USER_TASK:>
Description:
def auth_app(self, app_id, app_secret, auth_code, state=''):
"""
Authenticate an app
:param app_id: the app id
:param app_secret: the app secret
:param auth_code: the app auth code
""" |
headers = {
'Content-type': 'application/json'
}
payload = {
'application': app_id,
'auth_code': auth_code,
'state': state
}
try:
full_url = utils.urljoin(
self.host,
'/api/v1/application-tokens/validate'
)
response = requests.post(
full_url,
data=json.dumps(payload),
headers=headers,
verify=self.tls_verify
)
except RequestException:
raise exceptions.TaigaRestException(
full_url, 400,
'NETWORK ERROR', 'POST'
)
if response.status_code != 200:
raise exceptions.TaigaRestException(
full_url,
response.status_code,
response.text,
'POST'
)
cyphered_token = response.json().get('cyphered_token', '')
if cyphered_token:
from jwkest.jwk import SYMKey
from jwkest.jwe import JWE
sym_key = SYMKey(key=app_secret, alg='A128KW')
data, success = JWE().decrypt(cyphered_token, keys=[sym_key]), True
if isinstance(data, tuple):
data, success = data
try:
self.token = json.loads(data.decode('utf-8')).get('token', None)
except ValueError: # pragma: no cover
self.token = None
if not success:
self.token = None
else:
self.token = None
if self.token is None:
raise exceptions.TaigaRestException(
full_url, 400,
'INVALID TOKEN', 'POST'
)
self.raw_request = RequestMaker('/api/v1', self.host, self.token,
'Application', self.tls_verify)
self._init_resources() |
<SYSTEM_TASK:>
Iterate over sorted members of shape in the same order in which
<END_TASK>
<USER_TASK:>
Description:
def sorted_members(self):
"""
Iterate over sorted members of shape in the same order in which
the members are declared except yielding the required members before
any optional members.
""" |
members = collections.OrderedDict()
required_names = self.metadata.get("required", ())
for name, shape in self.members.items():
members[name] = AbShapeMember(name=name, shape=shape, is_required=name in required_names)
if self.is_output_shape:
# ResponseMetadata is the first member for all output shapes.
yield AbShapeMember(
name="ResponseMetadata",
shape=self._shape_resolver.get_shape_by_name("ResponseMetadata"),
is_required=True,
)
yield from sorted(members.values(), key=lambda m: not m.is_required) |
<SYSTEM_TASK:>
decode byte strings and convert to int where needed
<END_TASK>
<USER_TASK:>
Description:
def _decode(value):
"""
decode byte strings and convert to int where needed
""" |
if value.isdigit():
return int(value)
if isinstance(value, bytes):
return value.decode('utf-8')
else:
return value |
<SYSTEM_TASK:>
Annotation function to set an Enum to be case insensitive on parsing
<END_TASK>
<USER_TASK:>
Description:
def caseinsensitive(cls):
"""Annotation function to set an Enum to be case insensitive on parsing""" |
if not issubclass(cls, Enum):
raise TypeError('caseinsensitive decorator can only be applied to subclasses of enum.Enum')
enum_options = getattr(cls, PYCKSON_ENUM_OPTIONS, {})
enum_options[ENUM_CASE_INSENSITIVE] = True
setattr(cls, PYCKSON_ENUM_OPTIONS, enum_options)
return cls |
<SYSTEM_TASK:>
Uses shell32.GetCommandLineArgvW to get sys.argv as a list of UTF-8
<END_TASK>
<USER_TASK:>
Description:
def win32_utf8_argv():
"""Uses shell32.GetCommandLineArgvW to get sys.argv as a list of UTF-8
strings.
Versions 2.5 and older of Python don't support Unicode in sys.argv on
Windows, with the underlying Windows API instead replacing multi-byte
characters with '?'.
Returns None on failure.
Example usage:
>>> def main(argv=None):
... if argv is None:
... argv = win32_utf8_argv() or sys.argv
...
""" |
try:
from ctypes import POINTER, byref, cdll, c_int, windll
from ctypes.wintypes import LPCWSTR, LPWSTR
GetCommandLineW = cdll.kernel32.GetCommandLineW
GetCommandLineW.argtypes = []
GetCommandLineW.restype = LPCWSTR
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPWSTR)
cmd = GetCommandLineW()
argc = c_int(0)
argv = CommandLineToArgvW(cmd, byref(argc))
if argc.value > 0:
# Remove Python executable if present
if argc.value - len(sys.argv) == 1:
start = 1
else:
start = 0
return [argv[i].encode('utf-8') for i in
range(start, argc.value)]
except Exception:
pass |
<SYSTEM_TASK:>
Encrypt data and return as ascii string. Hexadecimal digest as default.
<END_TASK>
<USER_TASK:>
Description:
def encrypt_ascii(self, data, key=None, v=None, extra_bytes=0,
digest="hex"):
"""
Encrypt data and return as ascii string. Hexadecimal digest as default.
Avaiable digests:
hex: Hexadecimal
base64: Base 64
hqx: hexbin4
""" |
digests = {"hex": binascii.b2a_hex,
"base64": binascii.b2a_base64,
"hqx": binascii.b2a_hqx}
digestor = digests.get(digest)
if not digestor:
TripleSecError(u"Digestor not supported.")
binary_result = self.encrypt(data, key, v, extra_bytes)
result = digestor(binary_result)
return result |
<SYSTEM_TASK:>
Receive ascii string and return decrypted data.
<END_TASK>
<USER_TASK:>
Description:
def decrypt_ascii(self, ascii_string, key=None, digest="hex"):
"""
Receive ascii string and return decrypted data.
Avaiable digests:
hex: Hexadecimal
base64: Base 64
hqx: hexbin4
""" |
digests = {"hex": binascii.a2b_hex,
"base64": binascii.a2b_base64,
"hqx": binascii.a2b_hqx}
digestor = digests.get(digest)
if not digestor:
TripleSecError(u"Digestor not supported.")
binary_string = digestor(ascii_string)
result = self.decrypt(binary_string, key)
return result |
<SYSTEM_TASK:>
Build a tree of commands.
<END_TASK>
<USER_TASK:>
Description:
def _build_cmd_tree(self, cmd_cls, cmd_name=None):
"""
Build a tree of commands.
:param cmd_cls:
The Command class or object to start with.
:param cmd_name:
Hard-coded name of the command (can be None for auto-detection)
:returns:
A tree structure represented as tuple
``(cmd_obj, cmd_name, children)``
Where ``cmd_obj`` is a Command instance, cmd_name is its name, if
any (it might be None) and ``children`` is a tuple of identical
tuples.
Note that command name auto-detection relies on
:meth:`guacamole.recipes.cmd.Command.get_cmd_name()`.
Let's look at a simple git-like example::
>>> from guacamole import Command
>>> class git_log(Command):
>>> pass
>>> class git_stash_list(Command):
>>> pass
>>> class git_stash(Command):
>>> sub_commands = (('list', git_stash_list),)
>>> class git(Command):
>>> sub_commands = (('log', git_log),
>>> ('stash', git_stash))
>>> build_cmd_tree(git)
(None, '<git>', (
('log', <git_log>, ()),
('stash', <git_stash>, (
('list', <git_stash_list>, ()),),),),)
""" |
if isinstance(cmd_cls, type):
cmd_obj = cmd_cls()
else:
cmd_obj = cmd_cls
if cmd_name is None:
cmd_name = cmd_obj.get_cmd_name()
return cmd_tree_node(cmd_name, cmd_obj, tuple([
self._build_cmd_tree(subcmd_cls, subcmd_name)
for subcmd_name, subcmd_cls in cmd_obj.get_sub_commands()])) |
<SYSTEM_TASK:>
Utility while developing to dump message data to play with in the
<END_TASK>
<USER_TASK:>
Description:
def debug_dump(message, file_prefix="dump"):
"""
Utility while developing to dump message data to play with in the
interpreter
""" |
global index
index += 1
with open("%s_%s.dump" % (file_prefix, index), 'w') as f:
f.write(message.SerializeToString())
f.close() |
<SYSTEM_TASK:>
Get a player attribute that depends on which side the player is on.
<END_TASK>
<USER_TASK:>
Description:
def get_side_attr(attr, invert, player):
"""
Get a player attribute that depends on which side the player is on.
A creep kill for a radiant hero is a badguy_kill, while a creep kill
for a dire hero is a goodguy_kill.
""" |
t = player.team
if invert:
t = not player.team
return getattr(player, "%s_%s" % ("goodguy" if t else "badguy", attr)) |
<SYSTEM_TASK:>
The chat messages that arrive when certain events occur.
<END_TASK>
<USER_TASK:>
Description:
def parse_dota_um(self, event):
"""
The chat messages that arrive when certain events occur.
The most useful ones are CHAT_MESSAGE_RUNE_PICKUP,
CHAT_MESSAGE_RUNE_BOTTLE, CHAT_MESSAGE_GLYPH_USED,
CHAT_MESSAGE_TOWER_KILL
""" |
if event.type == dota_usermessages_pb2.CHAT_MESSAGE_AEGIS:
self.aegis.append((self.tick, event.playerid_1)) |
<SYSTEM_TASK:>
Parse a PlayerInfo struct. This arrives before a FileInfo message
<END_TASK>
<USER_TASK:>
Description:
def parse_player_info(self, player):
"""
Parse a PlayerInfo struct. This arrives before a FileInfo message
""" |
if not player.ishltv:
self.player_info[player.name] = {
"user_id": player.userID,
"guid": player.guid,
"bot": player.fakeplayer,
} |
<SYSTEM_TASK:>
The CDemoFileInfo contains our winners as well as the length of the
<END_TASK>
<USER_TASK:>
Description:
def parse_file_info(self, file_info):
"""
The CDemoFileInfo contains our winners as well as the length of the
demo
""" |
self.info["playback_time"] = file_info.playback_time
self.info["match_id"] = file_info.game_info.dota.match_id
self.info["game_mode"] = file_info.game_info.dota.game_mode
self.info["game_winner"] = file_info.game_info.dota.game_winner
for index, player in enumerate(file_info.game_info.dota.player_info):
p = self.heroes[player.hero_name]
p.name = player.player_name
p.index = index
p.team = 0 if index < 5 else 1
self.indexed_players[index] = p
self.info["players"][player.player_name] = p |
<SYSTEM_TASK:>
Game events contain the combat log as well as 'chase_hero' events which
<END_TASK>
<USER_TASK:>
Description:
def parse_game_event(self, ge):
"""
Game events contain the combat log as well as 'chase_hero' events which
could be interesting
""" |
if ge.name == "dota_combatlog":
if ge.keys["type"] == 4:
#Something died
try:
source = self.dp.combat_log_names.get(ge.keys["sourcename"],
"unknown")
target = self.dp.combat_log_names.get(ge.keys["targetname"],
"unknown")
target_illusion = ge.keys["targetillusion"]
timestamp = ge.keys["timestamp"]
if (target.startswith("npc_dota_hero") and not
target_illusion):
self.kills.append({
"target": target,
"source": source,
"timestamp": timestamp,
"tick": self.tick,
})
elif source.startswith("npc_dota_hero"):
self.heroes[source].creep_kill(target, timestamp)
except KeyError:
"""
Sometimes we get combat logs for things we dont have in
combat_log_names. My theory is that the server sends
us incremental updates to the string table using
CSVCMsg_UpdateStringTable but I'm not sure how to parse
that
"""
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.