text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Parses the TLS directory.
<END_TASK>
<USER_TASK:>
Description:
def _parseTlsDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the TLS directory.
@type rva: int
@param rva: The RVA where the TLS directory starts.
@type size: int
@param size: The size of the TLS directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{TLSDirectory}
@return: A new L{TLSDirectory}.
@note: if the L{PE} instance is a PE64 file then a new L{TLSDirectory64} is returned.
""" |
data = self.getDataAtRva(rva, size)
rd = utils.ReadData(data)
if magic == consts.PE32:
return directories.TLSDirectory.parse(rd)
elif magic == consts.PE64:
return directories.TLSDirectory64.parse(rd)
else:
raise excep.InvalidParameterException("Wrong magic") |
<SYSTEM_TASK:>
Parses the relocation directory.
<END_TASK>
<USER_TASK:>
Description:
def _parseRelocsDirectory(self, rva, size, magic = consts.PE32):
"""
Parses the relocation directory.
@type rva: int
@param rva: The RVA where the relocation directory starts.
@type size: int
@param size: The size of the relocation directory.
@type magic: int
@param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}.
@rtype: L{ImageBaseRelocation}
@return: A new L{ImageBaseRelocation} object.
""" |
data = self.getDataAtRva(rva, size)
#print "Length Relocation data: %x" % len(data)
rd = utils.ReadData(data)
relocsArray = directories.ImageBaseRelocation()
while rd.offset < size:
relocEntry = directories.ImageBaseRelocationEntry.parse(rd)
relocsArray.append(relocEntry)
return relocsArray |
<SYSTEM_TASK:>
Netmiko is being used to push set commands.
<END_TASK>
<USER_TASK:>
Description:
def _send_merge_commands(self, config, file_config):
"""
Netmiko is being used to push set commands.
""" |
if self.loaded is False:
if self._save_backup() is False:
raise MergeConfigException('Error while storing backup '
'config.')
if self.ssh_connection is False:
self._open_ssh()
if file_config:
if isinstance(config, str):
config = config.splitlines()
else:
if isinstance(config, str):
config = str(config).split()
self.ssh_device.send_config_set(config)
self.loaded = True
self.merge_config = True |
<SYSTEM_TASK:>
Netmiko is being used to obtain config diffs because pan-python
<END_TASK>
<USER_TASK:>
Description:
def compare_config(self):
"""
Netmiko is being used to obtain config diffs because pan-python
doesn't support the needed command.
""" |
if self.ssh_connection is False:
self._open_ssh()
self.ssh_device.exit_config_mode()
diff = self.ssh_device.send_command("show config diff")
return diff.strip() |
<SYSTEM_TASK:>
Netmiko is being used to commit the configuration because it takes
<END_TASK>
<USER_TASK:>
Description:
def commit_config(self):
"""
Netmiko is being used to commit the configuration because it takes
a better care of results compared to pan-python.
""" |
if self.loaded:
if self.ssh_connection is False:
self._open_ssh()
try:
self.ssh_device.commit()
time.sleep(3)
self.loaded = False
self.changed = True
except: # noqa
if self.merge_config:
raise MergeConfigException('Error while commiting config')
else:
raise ReplaceConfigException('Error while commiting config')
else:
raise ReplaceConfigException('No config loaded.') |
<SYSTEM_TASK:>
Netmiko is being used to commit the rollback configuration because
<END_TASK>
<USER_TASK:>
Description:
def rollback(self):
"""
Netmiko is being used to commit the rollback configuration because
it takes a better care of results compared to pan-python.
""" |
if self.changed:
rollback_cmd = '<load><config><from>{0}</from></config></load>'.format(self.backup_file)
self.device.op(cmd=rollback_cmd)
time.sleep(5)
if self.ssh_connection is False:
self._open_ssh()
try:
self.ssh_device.commit()
self.loaded = False
self.changed = False
self.merge_config = False
except: # noqa
ReplaceConfigException("Error while loading backup config") |
<SYSTEM_TASK:>
Return a refined CSG. To each polygon, a middle point is added to each edge and to the center
<END_TASK>
<USER_TASK:>
Description:
def refine(self):
"""
Return a refined CSG. To each polygon, a middle point is added to each edge and to the center
of the polygon
""" |
newCSG = CSG()
for poly in self.polygons:
verts = poly.vertices
numVerts = len(verts)
if numVerts == 0:
continue
midPos = reduce(operator.add, [v.pos for v in verts]) / float(numVerts)
midNormal = None
if verts[0].normal is not None:
midNormal = poly.plane.normal
midVert = Vertex(midPos, midNormal)
newVerts = verts + \
[verts[i].interpolate(verts[(i + 1)%numVerts], 0.5) for i in range(numVerts)] + \
[midVert]
i = 0
vs = [newVerts[i], newVerts[i+numVerts], newVerts[2*numVerts], newVerts[2*numVerts-1]]
newPoly = Polygon(vs, poly.shared)
newPoly.shared = poly.shared
newPoly.plane = poly.plane
newCSG.polygons.append(newPoly)
for i in range(1, numVerts):
vs = [newVerts[i], newVerts[numVerts+i], newVerts[2*numVerts], newVerts[numVerts+i-1]]
newPoly = Polygon(vs, poly.shared)
newCSG.polygons.append(newPoly)
return newCSG |
<SYSTEM_TASK:>
Return a new CSG solid with solid and empty space switched. This solid is
<END_TASK>
<USER_TASK:>
Description:
def inverse(self):
"""
Return a new CSG solid with solid and empty space switched. This solid is
not modified.
""" |
csg = self.clone()
map(lambda p: p.flip(), csg.polygons)
return csg |
<SYSTEM_TASK:>
Load the nbt file at the specified location.
<END_TASK>
<USER_TASK:>
Description:
def load(filename, *, gzipped=None, byteorder='big'):
"""Load the nbt file at the specified location.
By default, the function will figure out by itself if the file is
gzipped before loading it. You can pass a boolean to the `gzipped`
keyword only argument to specify explicitly whether the file is
compressed or not. You can also use the `byteorder` keyword only
argument to specify whether the file is little-endian or big-endian.
""" |
if gzipped is not None:
return File.load(filename, gzipped, byteorder)
# if we don't know we read the magic number
with open(filename, 'rb') as buff:
magic_number = buff.read(2)
buff.seek(0)
if magic_number == b'\x1f\x8b':
buff = gzip.GzipFile(fileobj=buff)
return File.from_buffer(buff, byteorder) |
<SYSTEM_TASK:>
Load nbt file from a file-like object.
<END_TASK>
<USER_TASK:>
Description:
def from_buffer(cls, buff, byteorder='big'):
"""Load nbt file from a file-like object.
The `buff` argument can be either a standard `io.BufferedReader`
for uncompressed nbt or a `gzip.GzipFile` for gzipped nbt data.
""" |
self = cls.parse(buff, byteorder)
self.filename = getattr(buff, 'name', self.filename)
self.gzipped = isinstance(buff, gzip.GzipFile)
self.byteorder = byteorder
return self |
<SYSTEM_TASK:>
Read, parse and return the file at the specified location.
<END_TASK>
<USER_TASK:>
Description:
def load(cls, filename, gzipped, byteorder='big'):
"""Read, parse and return the file at the specified location.
The `gzipped` argument is used to indicate if the specified
file is gzipped. The `byteorder` argument lets you specify
whether the file is big-endian or little-endian.
""" |
open_file = gzip.open if gzipped else open
with open_file(filename, 'rb') as buff:
return cls.from_buffer(buff, byteorder) |
<SYSTEM_TASK:>
Write the file at the specified location.
<END_TASK>
<USER_TASK:>
Description:
def save(self, filename=None, *, gzipped=None, byteorder=None):
"""Write the file at the specified location.
The `gzipped` keyword only argument indicates if the file should
be gzipped. The `byteorder` keyword only argument lets you
specify whether the file should be big-endian or little-endian.
If the method is called without any argument, it will default to
the instance attributes and use the file's `filename`,
`gzipped` and `byteorder` attributes. Calling the method without
a `filename` will raise a `ValueError` if the `filename` of the
file is `None`.
""" |
if gzipped is None:
gzipped = self.gzipped
if filename is None:
filename = self.filename
if filename is None:
raise ValueError('No filename specified')
open_file = gzip.open if gzipped else open
with open_file(filename, 'wb') as buff:
self.write(buff, byteorder or self.byteorder) |
<SYSTEM_TASK:>
Handle actions that need to be done with every response
<END_TASK>
<USER_TASK:>
Description:
def _do_post_request_tasks(self, response_data):
"""Handle actions that need to be done with every response
I'm not sure what these session_ops are actually used for yet, seems to
be a way to tell the client to do *something* if needed.
""" |
try:
sess_ops = response_data.get('ops', [])
except AttributeError:
pass
else:
self._session_ops.extend(sess_ops) |
<SYSTEM_TASK:>
Build a function to do an API request
<END_TASK>
<USER_TASK:>
Description:
def _build_request(self, method, url, params=None):
"""Build a function to do an API request
"We have to go deeper" or "It's functions all the way down!"
""" |
full_params = self._get_base_params()
if params is not None:
full_params.update(params)
try:
request_func = lambda u, d: \
getattr(self._connector, method.lower())(u, params=d,
headers=self._request_headers)
except AttributeError:
raise ApiException('Invalid request method')
# TODO: need to catch a network here and raise as ApiNetworkException
def do_request():
logger.debug('Sending %s request "%s" with params: %r',
method, url, full_params)
try:
resp = request_func(url, full_params)
logger.debug('Received response code: %d', resp.status_code)
except requests.RequestException as err:
raise ApiNetworkException(err)
try:
resp_json = resp.json()
except TypeError:
resp_json = resp.json
method_returns_list = False
try:
resp_json['error']
except TypeError:
logger.warn('Api method did not return map: %s', method)
method_returns_list = True
except KeyError:
logger.warn('Api method did not return map with error key: %s', method)
if method_returns_list is None:
raise ApiBadResponseException(resp.content)
elif method_returns_list:
data = resp_json
else:
try:
if resp_json['error']:
raise ApiError('%s: %s' % (resp_json['code'], resp_json['message']))
except KeyError:
data = resp_json
else:
data = resp_json['data']
self._do_post_request_tasks(data)
self._last_response = resp
return data
return do_request |
<SYSTEM_TASK:>
Returns the positions and colors of all intervals inside the colorbar.
<END_TASK>
<USER_TASK:>
Description:
def calculate_colorbar(self):
"""
Returns the positions and colors of all intervals inside the colorbar.
""" |
self._base._process_values()
self._base._find_range()
X, Y = self._base._mesh()
C = self._base._values[:, np.newaxis]
return X, Y, C |
<SYSTEM_TASK:>
CR doesn't seem to provide the video_format and video_quality params
<END_TASK>
<USER_TASK:>
Description:
def get_media_formats(self, media_id):
"""CR doesn't seem to provide the video_format and video_quality params
through any of the APIs so we have to scrape the video page
""" |
url = (SCRAPER.API_URL + 'media-' + media_id).format(
protocol=SCRAPER.PROTOCOL_INSECURE)
format_pattern = re.compile(SCRAPER.VIDEO.FORMAT_PATTERN)
formats = {}
for format, param in iteritems(SCRAPER.VIDEO.FORMAT_PARAMS):
resp = self._connector.get(url, params={param: '1'})
if not resp.ok:
continue
try:
match = format_pattern.search(resp.content)
except TypeError:
match = format_pattern.search(resp.text)
if match:
formats[format] = (int(match.group(1)), int(match.group(2)))
return formats |
<SYSTEM_TASK:>
Parse a literal nbt string and return the resulting tag.
<END_TASK>
<USER_TASK:>
Description:
def parse_nbt(literal):
"""Parse a literal nbt string and return the resulting tag.""" |
parser = Parser(tokenize(literal))
tag = parser.parse()
cursor = parser.token_span[1]
leftover = literal[cursor:]
if leftover.strip():
parser.token_span = cursor, cursor + len(leftover)
raise parser.error(f'Expected end of string but got {leftover!r}')
return tag |
<SYSTEM_TASK:>
Match and yield all the tokens of the input string.
<END_TASK>
<USER_TASK:>
Description:
def tokenize(string):
"""Match and yield all the tokens of the input string.""" |
for match in TOKENS_REGEX.finditer(string):
yield Token(match.lastgroup, match.group().strip(), match.span()) |
<SYSTEM_TASK:>
Move to the next token in the token stream.
<END_TASK>
<USER_TASK:>
Description:
def next(self):
"""Move to the next token in the token stream.""" |
self.current_token = next(self.token_stream, None)
if self.current_token is None:
self.token_span = self.token_span[1], self.token_span[1]
raise self.error('Unexpected end of input')
self.token_span = self.current_token.span
return self |
<SYSTEM_TASK:>
Parse and return an nbt literal from the token stream.
<END_TASK>
<USER_TASK:>
Description:
def parse(self):
"""Parse and return an nbt literal from the token stream.""" |
token_type = self.current_token.type.lower()
handler = getattr(self, f'parse_{token_type}', None)
if handler is None:
raise self.error(f'Invalid literal {self.current_token.value!r}')
return handler() |
<SYSTEM_TASK:>
Parse a number from the token stream.
<END_TASK>
<USER_TASK:>
Description:
def parse_number(self):
"""Parse a number from the token stream.""" |
value = self.current_token.value
suffix = value[-1].lower()
try:
if suffix in NUMBER_SUFFIXES:
return NUMBER_SUFFIXES[suffix](value[:-1])
return Double(value) if '.' in value else Int(value)
except (OutOfRange, ValueError):
return String(value) |
<SYSTEM_TASK:>
Parse a regular unquoted string from the token stream.
<END_TASK>
<USER_TASK:>
Description:
def parse_string(self):
"""Parse a regular unquoted string from the token stream.""" |
aliased_value = LITERAL_ALIASES.get(self.current_token.value.lower())
if aliased_value is not None:
return aliased_value
return String(self.current_token.value) |
<SYSTEM_TASK:>
Parse and yield array items from the token stream.
<END_TASK>
<USER_TASK:>
Description:
def array_items(self, number_type, *, number_suffix=''):
"""Parse and yield array items from the token stream.""" |
for token in self.collect_tokens_until('CLOSE_BRACKET'):
is_number = token.type == 'NUMBER'
value = token.value.lower()
if not (is_number and value.endswith(number_suffix)):
raise self.error(f'Invalid {number_type} array element '
f'{token.value!r}')
yield int(value.replace(number_suffix, '')) |
<SYSTEM_TASK:>
Parse a list from the token stream.
<END_TASK>
<USER_TASK:>
Description:
def parse_list(self):
"""Parse a list from the token stream.""" |
try:
return List([self.parse() for _ in
self.collect_tokens_until('CLOSE_BRACKET')])
except IncompatibleItemType as exc:
raise self.error(f'Item {str(exc.item)!r} is not a '
f'{exc.subtype.__name__} tag') from None |
<SYSTEM_TASK:>
Return the unquoted value of a quoted string.
<END_TASK>
<USER_TASK:>
Description:
def unquote_string(self, string):
"""Return the unquoted value of a quoted string.""" |
value = string[1:-1]
forbidden_sequences = {ESCAPE_SUBS[STRING_QUOTES[string[0]]]}
valid_sequences = set(ESCAPE_SEQUENCES) - forbidden_sequences
for seq in ESCAPE_REGEX.findall(value):
if seq not in valid_sequences:
raise self.error(f'Invalid escape sequence "{seq}"')
for seq, sub in ESCAPE_SEQUENCES.items():
value = value.replace(seq, sub)
return value |
<SYSTEM_TASK:>
Returns a function that will open a file in a zipfile by name.
<END_TASK>
<USER_TASK:>
Description:
def opener_from_zipfile(zipfile):
"""
Returns a function that will open a file in a zipfile by name.
For Python3 compatibility, the raw file will be converted to text.
""" |
def opener(filename):
inner_file = zipfile.open(filename)
if PY3:
from io import TextIOWrapper
return TextIOWrapper(inner_file)
else:
return inner_file
return opener |
<SYSTEM_TASK:>
Serialize an nbt tag to its literal representation.
<END_TASK>
<USER_TASK:>
Description:
def serialize_tag(tag, *, indent=None, compact=False, quote=None):
"""Serialize an nbt tag to its literal representation.""" |
serializer = Serializer(indent=indent, compact=compact, quote=quote)
return serializer.serialize(tag) |
<SYSTEM_TASK:>
Increase the level of indentation by one.
<END_TASK>
<USER_TASK:>
Description:
def depth(self):
"""Increase the level of indentation by one.""" |
if self.indentation is None:
yield
else:
previous = self.previous_indent
self.previous_indent = self.indent
self.indent += self.indentation
yield
self.indent = self.previous_indent
self.previous_indent = previous |
<SYSTEM_TASK:>
Return whether the specified tag should be expanded.
<END_TASK>
<USER_TASK:>
Description:
def should_expand(self, tag):
"""Return whether the specified tag should be expanded.""" |
return self.indentation is not None and tag and (
not self.previous_indent or (
tag.serializer == 'list'
and tag.subtype.serializer in ('array', 'list', 'compound')
) or (
tag.serializer == 'compound'
)
) |
<SYSTEM_TASK:>
Return the escaped literal representation of an nbt string.
<END_TASK>
<USER_TASK:>
Description:
def escape_string(self, string):
"""Return the escaped literal representation of an nbt string.""" |
if self.quote:
quote = self.quote
else:
found = QUOTE_REGEX.search(string)
quote = STRING_QUOTES[found.group()] if found else next(iter(STRING_QUOTES))
for match, seq in ESCAPE_SUBS.items():
if match == quote or match not in STRING_QUOTES:
string = string.replace(match, seq)
return f'{quote}{string}{quote}' |
<SYSTEM_TASK:>
Escape the compound key if it can't be represented unquoted.
<END_TASK>
<USER_TASK:>
Description:
def stringify_compound_key(self, key):
"""Escape the compound key if it can't be represented unquoted.""" |
if UNQUOTED_COMPOUND_KEY.match(key):
return key
return self.escape_string(key) |
<SYSTEM_TASK:>
Return the literal representation of a tag.
<END_TASK>
<USER_TASK:>
Description:
def serialize(self, tag):
"""Return the literal representation of a tag.""" |
handler = getattr(self, f'serialize_{tag.serializer}', None)
if handler is None:
raise TypeError(f'Can\'t serialize {type(tag)!r} instance')
return handler(tag) |
<SYSTEM_TASK:>
Return the literal representation of a numeric tag.
<END_TASK>
<USER_TASK:>
Description:
def serialize_numeric(self, tag):
"""Return the literal representation of a numeric tag.""" |
str_func = int.__str__ if isinstance(tag, int) else float.__str__
return str_func(tag) + tag.suffix |
<SYSTEM_TASK:>
Return the literal representation of an array tag.
<END_TASK>
<USER_TASK:>
Description:
def serialize_array(self, tag):
"""Return the literal representation of an array tag.""" |
elements = self.comma.join(f'{el}{tag.item_suffix}' for el in tag)
return f'[{tag.array_prefix}{self.semicolon}{elements}]' |
<SYSTEM_TASK:>
Return the literal representation of a list tag.
<END_TASK>
<USER_TASK:>
Description:
def serialize_list(self, tag):
"""Return the literal representation of a list tag.""" |
separator, fmt = self.comma, '[{}]'
with self.depth():
if self.should_expand(tag):
separator, fmt = self.expand(separator, fmt)
return fmt.format(separator.join(map(self.serialize, tag))) |
<SYSTEM_TASK:>
Return the literal representation of a compound tag.
<END_TASK>
<USER_TASK:>
Description:
def serialize_compound(self, tag):
"""Return the literal representation of a compound tag.""" |
separator, fmt = self.comma, '{{{}}}'
with self.depth():
if self.should_expand(tag):
separator, fmt = self.expand(separator, fmt)
return fmt.format(separator.join(
f'{self.stringify_compound_key(key)}{self.colon}{self.serialize(value)}'
for key, value in tag.items()
)) |
<SYSTEM_TASK:>
Turn an AndroidApi's method into a function that builds the request,
<END_TASK>
<USER_TASK:>
Description:
def make_android_api_method(req_method, secure=True, version=0):
"""Turn an AndroidApi's method into a function that builds the request,
sends it, then passes the response to the actual method. Should be used
as a decorator.
""" |
def outer_func(func):
def inner_func(self, **kwargs):
req_url = self._build_request_url(secure, func.__name__, version)
req_func = self._build_request(req_method, req_url, params=kwargs)
response = req_func()
func(self, response)
return response
return inner_func
return outer_func |
<SYSTEM_TASK:>
Get the params that will be included with every request
<END_TASK>
<USER_TASK:>
Description:
def _get_base_params(self):
"""Get the params that will be included with every request
""" |
base_params = {
'locale': self._get_locale(),
'device_id': ANDROID.DEVICE_ID,
'device_type': ANDROID.APP_PACKAGE,
'access_token': ANDROID.ACCESS_TOKEN,
'version': ANDROID.APP_CODE,
}
base_params.update(dict((k, v) \
for k, v in iteritems(self._state_params) \
if v is not None))
return base_params |
<SYSTEM_TASK:>
Get if the session is premium for a given media type
<END_TASK>
<USER_TASK:>
Description:
def is_premium(self, media_type):
"""Get if the session is premium for a given media type
@param str media_type Should be one of ANDROID.MEDIA_TYPE_*
@return bool
""" |
if self.logged_in:
if media_type in self._user_data['premium']:
return True
return False |
<SYSTEM_TASK:>
Read a numeric value from a file-like object.
<END_TASK>
<USER_TASK:>
Description:
def read_numeric(fmt, buff, byteorder='big'):
"""Read a numeric value from a file-like object.""" |
try:
fmt = fmt[byteorder]
return fmt.unpack(buff.read(fmt.size))[0]
except StructError:
return 0
except KeyError as exc:
raise ValueError('Invalid byte order') from exc |
<SYSTEM_TASK:>
Write a numeric value to a file-like object.
<END_TASK>
<USER_TASK:>
Description:
def write_numeric(fmt, value, buff, byteorder='big'):
"""Write a numeric value to a file-like object.""" |
try:
buff.write(fmt[byteorder].pack(value))
except KeyError as exc:
raise ValueError('Invalid byte order') from exc |
<SYSTEM_TASK:>
Read a string from a file-like object.
<END_TASK>
<USER_TASK:>
Description:
def read_string(buff, byteorder='big'):
"""Read a string from a file-like object.""" |
length = read_numeric(USHORT, buff, byteorder)
return buff.read(length).decode('utf-8') |
<SYSTEM_TASK:>
Write a string to a file-like object.
<END_TASK>
<USER_TASK:>
Description:
def write_string(value, buff, byteorder='big'):
"""Write a string to a file-like object.""" |
data = value.encode('utf-8')
write_numeric(USHORT, len(data), buff, byteorder)
buff.write(data) |
<SYSTEM_TASK:>
Infer a list subtype from a collection of items.
<END_TASK>
<USER_TASK:>
Description:
def infer_list_subtype(items):
"""Infer a list subtype from a collection of items.""" |
subtype = End
for item in items:
item_type = type(item)
if not issubclass(item_type, Base):
continue
if subtype is End:
subtype = item_type
if not issubclass(subtype, List):
return subtype
elif subtype is not item_type:
stype, itype = subtype, item_type
generic = List
while issubclass(stype, List) and issubclass(itype, List):
stype, itype = stype.subtype, itype.subtype
generic = List[generic]
if stype is End:
subtype = item_type
elif itype is not End:
return generic.subtype
return subtype |
<SYSTEM_TASK:>
Cast list item to the appropriate tag type.
<END_TASK>
<USER_TASK:>
Description:
def cast_item(cls, item):
"""Cast list item to the appropriate tag type.""" |
if not isinstance(item, cls.subtype):
incompatible = isinstance(item, Base) and not any(
issubclass(cls.subtype, tag_type) and isinstance(item, tag_type)
for tag_type in cls.all_tags.values()
)
if incompatible:
raise IncompatibleItemType(item, cls.subtype)
try:
return cls.subtype(item)
except EndInstantiation:
raise ValueError('List tags without an explicit subtype must '
'either be empty or instantiated with '
'elements from which a subtype can be '
'inferred') from None
except (IncompatibleItemType, CastError):
raise
except Exception as exc:
raise CastError(item, cls.subtype) from exc
return item |
<SYSTEM_TASK:>
Recursively merge tags from another compound.
<END_TASK>
<USER_TASK:>
Description:
def merge(self, other):
"""Recursively merge tags from another compound.""" |
for key, value in other.items():
if key in self and (isinstance(self[key], Compound)
and isinstance(value, dict)):
self[key].merge(value)
else:
self[key] = value |
<SYSTEM_TASK:>
Decrypt encrypted subtitle data in high level model object
<END_TASK>
<USER_TASK:>
Description:
def decrypt_subtitle(self, subtitle):
"""Decrypt encrypted subtitle data in high level model object
@param crunchyroll.models.Subtitle subtitle
@return str
""" |
return self.decrypt(self._build_encryption_key(int(subtitle.id)),
subtitle['iv'][0].text.decode('base64'),
subtitle['data'][0].text.decode('base64')) |
<SYSTEM_TASK:>
Decrypt encrypted subtitle data
<END_TASK>
<USER_TASK:>
Description:
def decrypt(self, encryption_key, iv, encrypted_data):
"""Decrypt encrypted subtitle data
@param int subtitle_id
@param str iv
@param str encrypted_data
@return str
""" |
logger.info('Decrypting subtitles with length (%d bytes), key=%r',
len(encrypted_data), encryption_key)
return zlib.decompress(aes_decrypt(encryption_key, iv, encrypted_data)) |
<SYSTEM_TASK:>
Generate the encryption key for a given media item
<END_TASK>
<USER_TASK:>
Description:
def _build_encryption_key(self, subtitle_id, key_size=ENCRYPTION_KEY_SIZE):
"""Generate the encryption key for a given media item
Encryption key is basically just
sha1(<magic value based on subtitle_id> + '"#$&).6CXzPHw=2N_+isZK') then
padded with 0s to 32 chars
@param int subtitle_id
@param int key_size
@return str
""" |
# generate a 160-bit SHA1 hash
sha1_hash = hashlib.new('sha1', self._build_hash_secret((1, 2)) +
self._build_hash_magic(subtitle_id)).digest()
# pad to 256-bit hash for 32 byte key
sha1_hash += '\x00' * max(key_size - len(sha1_hash), 0)
return sha1_hash[:key_size] |
<SYSTEM_TASK:>
Build the other half of the encryption key hash
<END_TASK>
<USER_TASK:>
Description:
def _build_hash_magic(self, subtitle_id):
"""Build the other half of the encryption key hash
I have no idea what is going on here
@param int subtitle_id
@return str
""" |
media_magic = self.HASH_MAGIC_CONST ^ subtitle_id
hash_magic = media_magic ^ media_magic >> 3 ^ media_magic * 32
return str(hash_magic) |
<SYSTEM_TASK:>
Build a seed for the hash based on the Fibonacci sequence
<END_TASK>
<USER_TASK:>
Description:
def _build_hash_secret(self, seq_seed, seq_len=HASH_SECRET_LENGTH,
mod_value=HASH_SECRET_MOD_CONST):
"""Build a seed for the hash based on the Fibonacci sequence
Take first `seq_len` + len(`seq_seed`) characters of Fibonacci
sequence, starting with `seq_seed`, and applying e % `mod_value` +
`HASH_SECRET_CHAR_OFFSET` to the resulting sequence, then return as
a string
@param tuple|list seq_seed
@param int seq_len
@param int mod_value
@return str
""" |
# make sure we use a list, tuples are immutable
fbn_seq = list(seq_seed)
for i in range(seq_len):
fbn_seq.append(fbn_seq[-1] + fbn_seq[-2])
hash_secret = list(map(
lambda c: chr(c % mod_value + self.HASH_SECRET_CHAR_OFFSET),
fbn_seq[2:]))
return ''.join(hash_secret) |
<SYSTEM_TASK:>
Turn a string containing the subs xml document into the formatted
<END_TASK>
<USER_TASK:>
Description:
def format(self, subtitles):
"""Turn a string containing the subs xml document into the formatted
subtitle string
@param str|crunchyroll.models.StyledSubtitle sub_xml_text
@return str
""" |
logger.debug('Formatting subtitles (id=%s) with %s',
subtitles.id, self.__class__.__name__)
return self._format(subtitles).encode('utf-8') |
<SYSTEM_TASK:>
Check if API sessions are started and start them if not
<END_TASK>
<USER_TASK:>
Description:
def require_session_started(func):
"""Check if API sessions are started and start them if not
""" |
@functools.wraps(func)
def inner_func(self, *pargs, **kwargs):
if not self.session_started:
logger.info('Starting session for required meta method')
self.start_session()
return func(self, *pargs, **kwargs)
return inner_func |
<SYSTEM_TASK:>
Check if andoid manga API is logged in and login if credentials were provided,
<END_TASK>
<USER_TASK:>
Description:
def optional_manga_logged_in(func):
"""Check if andoid manga API is logged in and login if credentials were provided,
implies `require_session_started`
""" |
@functools.wraps(func)
@require_session_started
def inner_func(self, *pargs, **kwargs):
if not self._manga_api.logged_in and self.has_credentials:
logger.info('Logging into android manga API for optional meta method')
self._manga_api.cr_login(account=self._state['username'],
password=self._state['password'])
return func(self, *pargs, **kwargs)
return inner_func |
<SYSTEM_TASK:>
Check if ajax API is logged in and login if not
<END_TASK>
<USER_TASK:>
Description:
def require_ajax_logged_in(func):
"""Check if ajax API is logged in and login if not
""" |
@functools.wraps(func)
def inner_func(self, *pargs, **kwargs):
if not self._ajax_api.logged_in:
logger.info('Logging into AJAX API for required meta method')
if not self.has_credentials:
raise ApiLoginFailure(
'Login is required but no credentials were provided')
self._ajax_api.User_Login(name=self._state['username'],
password=self._state['password'])
return func(self, *pargs, **kwargs)
return inner_func |
<SYSTEM_TASK:>
Start the underlying APIs sessions
<END_TASK>
<USER_TASK:>
Description:
def start_session(self):
"""Start the underlying APIs sessions
Calling this is not required, it will be called automatically if
a method that needs a session is called
@return bool
""" |
self._android_api.start_session()
self._manga_api.cr_start_session()
return self.session_started |
<SYSTEM_TASK:>
Get a list of anime series
<END_TASK>
<USER_TASK:>
Description:
def list_anime_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0):
"""Get a list of anime series
@param str sort pick how results should be sorted, should be one
of META.SORT_*
@param int limit limit number of series to return, there doesn't
seem to be an upper bound
@param int offset list series starting from this offset, for pagination
@return list<crunchyroll.models.Series>
""" |
result = self._android_api.list_series(
media_type=ANDROID.MEDIA_TYPE_ANIME,
filter=sort,
limit=limit,
offset=offset)
return result |
<SYSTEM_TASK:>
Get a list of drama series
<END_TASK>
<USER_TASK:>
Description:
def list_drama_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0):
"""Get a list of drama series
@param str sort pick how results should be sorted, should be one
of META.SORT_*
@param int limit limit number of series to return, there doesn't
seem to be an upper bound
@param int offset list series starting from this offset, for pagination
@return list<crunchyroll.models.Series>
""" |
result = self._android_api.list_series(
media_type=ANDROID.MEDIA_TYPE_DRAMA,
filter=sort,
limit=limit,
offset=offset)
return result |
<SYSTEM_TASK:>
Get a list of manga series
<END_TASK>
<USER_TASK:>
Description:
def list_manga_series(self, filter=None, content_type='jp_manga'):
"""Get a list of manga series
""" |
result = self._manga_api.list_series(filter, content_type)
return result |
<SYSTEM_TASK:>
Search anime series list by series name, case-sensitive
<END_TASK>
<USER_TASK:>
Description:
def search_anime_series(self, query_string):
"""Search anime series list by series name, case-sensitive
@param str query_string string to search for, note that the search
is very simplistic and only matches against
the start of the series name, ex) search
for "space" matches "Space Brothers" but
wouldn't match "Brothers Space"
@return list<crunchyroll.models.Series>
""" |
result = self._android_api.list_series(
media_type=ANDROID.MEDIA_TYPE_ANIME,
filter=ANDROID.FILTER_PREFIX + query_string)
return result |
<SYSTEM_TASK:>
Search drama series list by series name, case-sensitive
<END_TASK>
<USER_TASK:>
Description:
def search_drama_series(self, query_string):
"""Search drama series list by series name, case-sensitive
@param str query_string string to search for, note that the search
is very simplistic and only matches against
the start of the series name, ex) search
for "space" matches "Space Brothers" but
wouldn't match "Brothers Space"
@return list<crunchyroll.models.Series>
""" |
result = self._android_api.list_series(
media_type=ANDROID.MEDIA_TYPE_DRAMA,
filter=ANDROID.FILTER_PREFIX + query_string)
return result |
<SYSTEM_TASK:>
Search the manga series list by name, case-insensitive
<END_TASK>
<USER_TASK:>
Description:
def search_manga_series(self, query_string):
"""Search the manga series list by name, case-insensitive
@param str query_string
@return list<crunchyroll.models.Series>
""" |
result = self._manga_api.list_series()
return [series for series in result \
if series['locale']['enUS']['name'].lower().startswith(
query_string.lower())] |
<SYSTEM_TASK:>
List media for a given series or collection
<END_TASK>
<USER_TASK:>
Description:
def list_media(self, series, sort=META.SORT_DESC, limit=META.MAX_MEDIA, offset=0):
"""List media for a given series or collection
@param crunchyroll.models.Series series the series to search for
@param str sort choose the ordering of the
results, only META.SORT_DESC
is known to work
@param int limit limit size of results
@param int offset start results from this index,
for pagination
@return list<crunchyroll.models.Media>
""" |
params = {
'sort': sort,
'offset': offset,
'limit': limit,
}
params.update(self._get_series_query_dict(series))
result = self._android_api.list_media(**params)
return result |
<SYSTEM_TASK:>
Search for media from a series starting with query_string, case-sensitive
<END_TASK>
<USER_TASK:>
Description:
def search_media(self, series, query_string):
"""Search for media from a series starting with query_string, case-sensitive
@param crunchyroll.models.Series series the series to search in
@param str query_string the search query, same restrictions
as `search_anime_series`
@return list<crunchyroll.models.Media>
""" |
params = {
'sort': ANDROID.FILTER_PREFIX + query_string,
}
params.update(self._get_series_query_dict(series))
result = self._android_api.list_media(**params)
return result |
<SYSTEM_TASK:>
Get the stream data for a given media item
<END_TASK>
<USER_TASK:>
Description:
def get_media_stream(self, media_item, format, quality):
"""Get the stream data for a given media item
@param crunchyroll.models.Media media_item
@param int format
@param int quality
@return crunchyroll.models.MediaStream
""" |
result = self._ajax_api.VideoPlayer_GetStandardConfig(
media_id=media_item.media_id,
video_format=format,
video_quality=quality)
return MediaStream(result) |
<SYSTEM_TASK:>
Turn a SubtitleStub into a full Subtitle object
<END_TASK>
<USER_TASK:>
Description:
def unfold_subtitle_stub(self, subtitle_stub):
"""Turn a SubtitleStub into a full Subtitle object
@param crunchyroll.models.SubtitleStub subtitle_stub
@return crunchyroll.models.Subtitle
""" |
return Subtitle(self._ajax_api.Subtitle_GetXml(
subtitle_script_id=int(subtitle_stub.id))) |
<SYSTEM_TASK:>
Get the available media formats for a given media item
<END_TASK>
<USER_TASK:>
Description:
def get_stream_formats(self, media_item):
"""Get the available media formats for a given media item
@param crunchyroll.models.Media
@return dict
""" |
scraper = ScraperApi(self._ajax_api._connector)
formats = scraper.get_media_formats(media_item.media_id)
return formats |
<SYSTEM_TASK:>
List the series in the queue, optionally filtering by type of media
<END_TASK>
<USER_TASK:>
Description:
def list_queue(self, media_types=[META.TYPE_ANIME, META.TYPE_DRAMA]):
"""List the series in the queue, optionally filtering by type of media
@param list<str> media_types a list of media types to filter the queue
with, should be of META.TYPE_*
@return list<crunchyroll.models.Series>
""" |
result = self._android_api.queue(media_types='|'.join(media_types))
return [queue_item['series'] for queue_item in result] |
<SYSTEM_TASK:>
Add a series to the queue
<END_TASK>
<USER_TASK:>
Description:
def add_to_queue(self, series):
"""Add a series to the queue
@param crunchyroll.models.Series series
@return bool
""" |
result = self._android_api.add_to_queue(series_id=series.series_id)
return result |
<SYSTEM_TASK:>
Remove a series from the queue
<END_TASK>
<USER_TASK:>
Description:
def remove_from_queue(self, series):
"""Remove a series from the queue
@param crunchyroll.models.Series series
@return bool
""" |
result = self._android_api.remove_from_queue(series_id=series.series_id)
return result |
<SYSTEM_TASK:>
Create a compound tag schema.
<END_TASK>
<USER_TASK:>
Description:
def schema(name, dct, *, strict=False):
"""Create a compound tag schema.
This function is a short convenience function that makes it easy to
subclass the base `CompoundSchema` class.
The `name` argument is the name of the class and `dct` should be a
dictionnary containing the actual schema. The schema should map keys
to tag types or other compound schemas.
If the `strict` keyword only argument is set to True, interacting
with keys that are not defined in the schema will raise a
`TypeError`.
""" |
return type(name, (CompoundSchema,), {'__slots__': (), 'schema': dct,
'strict': strict}) |
<SYSTEM_TASK:>
Cast schema item to the appropriate tag type.
<END_TASK>
<USER_TASK:>
Description:
def cast_item(cls, key, value):
"""Cast schema item to the appropriate tag type.""" |
schema_type = cls.schema.get(key)
if schema_type is None:
if cls.strict:
raise TypeError(f'Invalid key {key!r}')
elif not isinstance(value, schema_type):
try:
return schema_type(value)
except CastError:
raise
except Exception as exc:
raise CastError(value, schema_type) from exc
return value |
<SYSTEM_TASK:>
Tars and bzips a directory, preserving as much metadata as possible.
<END_TASK>
<USER_TASK:>
Description:
def tarbz(source_directory_path, output_file_full_path, silent=False):
""" Tars and bzips a directory, preserving as much metadata as possible.
Adds '.tbz' to the provided output file name. """ |
output_directory_path = output_file_full_path.rsplit("/", 1)[0]
create_folders(output_directory_path)
# Note: default compression for bzip is supposed to be -9, highest compression.
full_tar_file_path = output_file_full_path + ".tbz"
if path.exists(full_tar_file_path):
raise Exception("%s already exists, aborting." % (full_tar_file_path))
# preserve permissions, create file, use files (not tape devices), preserve
# access time. tar is the only program in the universe to use (dstn, src).
tar_command = ("tar jpcfvC %s %s %s" %
(full_tar_file_path, source_directory_path, "./"))
call(tar_command, silent=silent)
return full_tar_file_path |
<SYSTEM_TASK:>
Restores your mongo database backup from a .tbz created using this library.
<END_TASK>
<USER_TASK:>
Description:
def untarbz(source_file_path, output_directory_path, silent=False):
""" Restores your mongo database backup from a .tbz created using this library.
This function will ensure that a directory is created at the file path
if one does not exist already.
If used in conjunction with this library's mongodump operation, the backup
data will be extracted directly into the provided directory path.
This command will fail if the output directory is not empty as existing files
with identical names are not overwritten by tar. """ |
if not path.exists(source_file_path):
raise Exception("the provided tar file %s does not exist." % (source_file_path))
if output_directory_path[0:1] == "./":
output_directory_path = path.abspath(output_directory_path)
if output_directory_path[0] != "/":
raise Exception("your output directory path must start with '/' or './'; you used: %s"
% (output_directory_path))
create_folders(output_directory_path)
if listdir(output_directory_path):
raise Exception("Your output directory isn't empty. Aborting as "
+ "exiting files are not overwritten by tar.")
untar_command = ("tar jxfvkCp %s %s --atime-preserve " %
(source_file_path, output_directory_path))
call(untar_command, silent=silent) |
<SYSTEM_TASK:>
Determine if any of the items in the value list for the given
<END_TASK>
<USER_TASK:>
Description:
def value_contains(self, value, attribute):
"""
Determine if any of the items in the value list for the given
attribute contain value.
""" |
for item in self[attribute]:
if value in item:
return True
return False |
<SYSTEM_TASK:>
Clear all search defaults specified by the list of parameter names
<END_TASK>
<USER_TASK:>
Description:
def clear_search_defaults(self, args=None):
"""
Clear all search defaults specified by the list of parameter names
given as ``args``. If ``args`` is not given, then clear all existing
search defaults.
Examples::
conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn'])
conn.clear_search_defaults(['scope'])
conn.clear_search_defaults()
""" |
if args is None:
self._search_defaults.clear()
else:
for arg in args:
if arg in self._search_defaults:
del self._search_defaults[arg] |
<SYSTEM_TASK:>
Get a single object.
<END_TASK>
<USER_TASK:>
Description:
def get(self, *args, **kwargs):
"""
Get a single object.
This is a convenience wrapper for the search method that checks that
only one object was returned, and returns that single object instead
of a list. This method takes the exact same arguments as search.
""" |
results = self.search(*args, **kwargs)
num_results = len(results)
if num_results == 1:
return results[0]
if num_results > 1:
raise MultipleObjectsFound()
raise ObjectNotFound() |
<SYSTEM_TASK:>
Attempt to authenticate given dn and password using a bind operation.
<END_TASK>
<USER_TASK:>
Description:
def authenticate(self, dn='', password=''):
"""
Attempt to authenticate given dn and password using a bind operation.
Return True if the bind is successful, and return False there was an
exception raised that is contained in
self.failed_authentication_exceptions.
""" |
try:
self.connection.simple_bind_s(dn, password)
except tuple(self.failed_authentication_exceptions):
return False
else:
return True |
<SYSTEM_TASK:>
Compare the ``attr`` of the entry ``dn`` with given ``value``.
<END_TASK>
<USER_TASK:>
Description:
def compare(self, dn, attr, value):
"""
Compare the ``attr`` of the entry ``dn`` with given ``value``.
This is a convenience wrapper for the ldap library's ``compare``
function that returns a boolean value instead of 1 or 0.
""" |
return self.connection.compare_s(dn, attr, value) == 1 |
<SYSTEM_TASK:>
Get the accessor function for an instance to look for `key`.
<END_TASK>
<USER_TASK:>
Description:
def get_property_func(key):
"""
Get the accessor function for an instance to look for `key`.
Look for it as an attribute, and if that does not work, look to see if it
is a tag.
""" |
def get_it(obj):
try:
return getattr(obj, key)
except AttributeError:
return obj.tags.get(key)
return get_it |
<SYSTEM_TASK:>
List all the kinesis applications along with the shards for each stream
<END_TASK>
<USER_TASK:>
Description:
def list_kinesis_applications(region, filter_by_kwargs):
"""List all the kinesis applications along with the shards for each stream""" |
conn = boto.kinesis.connect_to_region(region)
streams = conn.list_streams()['StreamNames']
kinesis_streams = {}
for stream_name in streams:
shard_ids = []
shards = conn.describe_stream(stream_name)['StreamDescription']['Shards']
for shard in shards:
shard_ids.append(shard['ShardId'])
kinesis_streams[stream_name] = shard_ids
return kinesis_streams |
<SYSTEM_TASK:>
Commit to the use of specified Qt api.
<END_TASK>
<USER_TASK:>
Description:
def comittoapi(api):
"""
Commit to the use of specified Qt api.
Raise an error if another Qt api is already loaded in sys.modules
""" |
global USED_API
assert USED_API is None, "committoapi called again!"
check = ["PyQt4", "PyQt5", "PySide", "PySide2"]
assert api in [QT_API_PYQT5, QT_API_PYQT4, QT_API_PYSIDE, QT_API_PYSIDE2]
for name in check:
if name.lower() != api and name in sys.modules:
raise RuntimeError(
"{} was already imported. Cannot commit to {}!"
.format(name, api)
)
else:
api = _intern(api)
USED_API = api
AnyQt.__SELECTED_API = api
AnyQt.USED_API = api |
<SYSTEM_TASK:>
Return dictionary of metadata for given dist
<END_TASK>
<USER_TASK:>
Description:
def get_metadata(dist):
"""
Return dictionary of metadata for given dist
@param dist: distribution
@type dist: pkg_resources Distribution object
@returns: dict of metadata or None
""" |
if not dist.has_metadata('PKG-INFO'):
return
msg = email.message_from_string(dist.get_metadata('PKG-INFO'))
metadata = {}
for header in [l for l in msg._headers]:
metadata[header[0]] = header[1]
return metadata |
<SYSTEM_TASK:>
Add command-line options for this plugin.
<END_TASK>
<USER_TASK:>
Description:
def add_options(self, parser):
"""Add command-line options for this plugin.
The base plugin class adds --with-$name by default, used to enable the
plugin.
""" |
parser.add_option("--with-%s" % self.name,
action="store_true",
dest=self.enable_opt,
help="Enable plugin %s: %s" %
(self.__class__.__name__, self.help())
) |
<SYSTEM_TASK:>
Configure the plugin and system, based on selected options.
<END_TASK>
<USER_TASK:>
Description:
def configure(self, options, conf):
"""Configure the plugin and system, based on selected options.
The base plugin class sets the plugin to enabled if the enable option
for the plugin (self.enable_opt) is true.
""" |
self.conf = conf
if hasattr(options, self.enable_opt):
self.enabled = getattr(options, self.enable_opt) |
<SYSTEM_TASK:>
Decode the response body
<END_TASK>
<USER_TASK:>
Description:
def decode_body(headers: MutableMapping, body: bytes) -> dict:
"""
Decode the response body
For 'application/json' content-type load the body as a dictionary
Args:
headers: Response headers
body: Response body
Returns:
decoded body
""" |
type_, encoding = parse_content_type(headers)
decoded_body = body.decode(encoding)
# There is one api that just returns `ok` instead of json. In order to have a consistent API we decided to modify the returned payload into a dict.
if type_ == "application/json":
payload = json.loads(decoded_body)
else:
if decoded_body == "ok":
payload = {"ok": True}
else:
payload = {"ok": False, "data": decoded_body}
return payload |
<SYSTEM_TASK:>
Find content-type and encoding of the response
<END_TASK>
<USER_TASK:>
Description:
def parse_content_type(headers: MutableMapping) -> Tuple[Optional[str], str]:
"""
Find content-type and encoding of the response
Args:
headers: Response headers
Returns:
:py:class:`tuple` (content-type, encoding)
""" |
content_type = headers.get("content-type")
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding |
<SYSTEM_TASK:>
Prepare outgoing request
<END_TASK>
<USER_TASK:>
Description:
def prepare_request(
url: Union[str, methods],
data: Optional[MutableMapping],
headers: Optional[MutableMapping],
global_headers: MutableMapping,
token: str,
as_json: Optional[bool] = None,
) -> Tuple[str, Union[str, MutableMapping], MutableMapping]:
"""
Prepare outgoing request
Create url, headers, add token to the body and if needed json encode it
Args:
url: :class:`slack.methods` item or string of url
data: Outgoing data
headers: Custom headers
global_headers: Global headers
token: Slack API token
as_json: Post JSON to the slack API
Returns:
:py:class:`tuple` (url, body, headers)
""" |
if isinstance(url, methods):
as_json = as_json or url.value[3]
real_url = url.value[0]
else:
real_url = url
as_json = False
if not headers:
headers = {**global_headers}
else:
headers = {**global_headers, **headers}
payload: Optional[Union[str, MutableMapping]] = None
if real_url.startswith(HOOK_URL) or (real_url.startswith(ROOT_URL) and as_json):
payload, headers = _prepare_json_request(data, token, headers)
elif real_url.startswith(ROOT_URL) and not as_json:
payload = _prepare_form_encoded_request(data, token)
else:
real_url = ROOT_URL + real_url
payload = _prepare_form_encoded_request(data, token)
return real_url, payload, headers |
<SYSTEM_TASK:>
Decode incoming response
<END_TASK>
<USER_TASK:>
Description:
def decode_response(status: int, headers: MutableMapping, body: bytes) -> dict:
"""
Decode incoming response
Args:
status: Response status
headers: Response headers
body: Response body
Returns:
Response data
""" |
data = decode_body(headers, body)
raise_for_status(status, headers, data)
raise_for_api_error(headers, data)
return data |
<SYSTEM_TASK:>
Prepare outgoing iteration request
<END_TASK>
<USER_TASK:>
Description:
def prepare_iter_request(
url: Union[methods, str],
data: MutableMapping,
*,
iterkey: Optional[str] = None,
itermode: Optional[str] = None,
limit: int = 200,
itervalue: Optional[Union[str, int]] = None,
) -> Tuple[MutableMapping, str, str]:
"""
Prepare outgoing iteration request
Args:
url: :class:`slack.methods` item or string of url
data: Outgoing data
limit: Maximum number of results to return per call.
iterkey: Key in response data to iterate over (required for url string).
itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)
itervalue: Value for current iteration (cursor hash, page or timestamp depending on the itermode)
Returns:
:py:class:`tuple` (data, iterkey, itermode)
""" |
itermode, iterkey = find_iteration(url, itermode, iterkey)
if itermode == "cursor":
data["limit"] = limit
if itervalue:
data["cursor"] = itervalue
elif itermode == "page":
data["count"] = limit
if itervalue:
data["page"] = itervalue
elif itermode == "timeline":
data["count"] = limit
if itervalue:
data["latest"] = itervalue
return data, iterkey, itermode |
<SYSTEM_TASK:>
Decode incoming response from an iteration request
<END_TASK>
<USER_TASK:>
Description:
def decode_iter_request(data: dict) -> Optional[Union[str, int]]:
"""
Decode incoming response from an iteration request
Args:
data: Response data
Returns:
Next itervalue
""" |
if "response_metadata" in data:
return data["response_metadata"].get("next_cursor")
elif "paging" in data:
current_page = int(data["paging"].get("page", 1))
max_page = int(data["paging"].get("pages", 1))
if current_page < max_page:
return current_page + 1
elif "has_more" in data and data["has_more"] and "latest" in data:
return data["messages"][-1]["ts"]
return None |
<SYSTEM_TASK:>
Check if the incoming event needs to be discarded
<END_TASK>
<USER_TASK:>
Description:
def discard_event(event: events.Event, bot_id: str = None) -> bool:
"""
Check if the incoming event needs to be discarded
Args:
event: Incoming :class:`slack.events.Event`
bot_id: Id of connected bot
Returns:
boolean
""" |
if event["type"] in SKIP_EVENTS:
return True
elif bot_id and isinstance(event, events.Message):
if event.get("bot_id") == bot_id:
LOG.debug("Ignoring event: %s", event)
return True
elif "message" in event and event["message"].get("bot_id") == bot_id:
LOG.debug("Ignoring event: %s", event)
return True
return False |
<SYSTEM_TASK:>
Validate incoming request signature using the application signing secret.
<END_TASK>
<USER_TASK:>
Description:
def validate_request_signature(
body: str, headers: MutableMapping, signing_secret: str
) -> None:
"""
Validate incoming request signature using the application signing secret.
Contrary to the ``team_id`` and ``verification_token`` verification this method is not called by ``slack-sansio`` when creating object from incoming HTTP request. Because the body of the request needs to be provided as text and not decoded as json beforehand.
Args:
body: Raw request body
headers: Request headers
signing_secret: Application signing_secret
Raise:
:class:`slack.exceptions.InvalidSlackSignature`: when provided and calculated signature do not match
:class:`slack.exceptions.InvalidTimestamp`: when incoming request timestamp is more than 5 minutes old
""" |
request_timestamp = int(headers["X-Slack-Request-Timestamp"])
if (int(time.time()) - request_timestamp) > (60 * 5):
raise exceptions.InvalidTimestamp(timestamp=request_timestamp)
slack_signature = headers["X-Slack-Signature"]
calculated_signature = (
"v0="
+ hmac.new(
signing_secret.encode("utf-8"),
f"""v0:{headers["X-Slack-Request-Timestamp"]}:{body}""".encode("utf-8"),
digestmod=hashlib.sha256,
).hexdigest()
)
if not hmac.compare_digest(slack_signature, calculated_signature):
raise exceptions.InvalidSlackSignature(slack_signature, calculated_signature) |
<SYSTEM_TASK:>
Returns a datetime object computed from a file name string, with
<END_TASK>
<USER_TASK:>
Description:
def get_backup_file_time_tag(file_name, custom_prefix="backup"):
""" Returns a datetime object computed from a file name string, with
formatting based on DATETIME_FORMAT.""" |
name_string = file_name[len(custom_prefix):]
time_tag = name_string.split(".", 1)[0]
return datetime.strptime(time_tag, DATETIME_FORMAT) |
<SYSTEM_TASK:>
Use setuptools to search for a package's URI
<END_TASK>
<USER_TASK:>
Description:
def get_download_uri(package_name, version, source, index_url=None):
"""
Use setuptools to search for a package's URI
@returns: URI string
""" |
tmpdir = None
force_scan = True
develop_ok = False
if not index_url:
index_url = 'http://cheeseshop.python.org/pypi'
if version:
pkg_spec = "%s==%s" % (package_name, version)
else:
pkg_spec = package_name
req = pkg_resources.Requirement.parse(pkg_spec)
pkg_index = MyPackageIndex(index_url)
try:
pkg_index.fetch_distribution(req, tmpdir, force_scan, source,
develop_ok)
except DownloadURI as url:
#Remove #egg=pkg-dev
clean_url = url.value.split("#")[0]
#If setuptools is asked for an egg and there isn't one, it will
#return source if available, which we don't want.
if not source and not clean_url.endswith(".egg") and \
not clean_url.endswith(".EGG"):
return
else:
return clean_url |
<SYSTEM_TASK:>
Return list of all installed packages
<END_TASK>
<USER_TASK:>
Description:
def get_pkglist():
"""
Return list of all installed packages
Note: It returns one project name per pkg no matter how many versions
of a particular package is installed
@returns: list of project name strings for every installed pkg
""" |
dists = Distributions()
projects = []
for (dist, _active) in dists.get_distributions("all"):
if dist.project_name not in projects:
projects.append(dist.project_name)
return projects |
<SYSTEM_TASK:>
Register a new handler for a specific slash command
<END_TASK>
<USER_TASK:>
Description:
def register(self, command: str, handler: Any):
"""
Register a new handler for a specific slash command
Args:
command: Slash command
handler: Callback
""" |
if not command.startswith("/"):
command = f"/{command}"
LOG.info("Registering %s to %s", command, handler)
self._routes[command].append(handler) |
<SYSTEM_TASK:>
Set the preferred Qt API.
<END_TASK>
<USER_TASK:>
Description:
def setpreferredapi(api):
"""
Set the preferred Qt API.
Will raise a RuntimeError if a Qt API was already selected.
Note that QT_API environment variable (if set) will take precedence.
""" |
global __PREFERRED_API
if __SELECTED_API is not None:
raise RuntimeError("A Qt api {} was already selected"
.format(__SELECTED_API))
if api.lower() not in {"pyqt4", "pyqt5", "pyside", "pyside2"}:
raise ValueError(api)
__PREFERRED_API = api.lower() |
<SYSTEM_TASK:>
Select an Qt API to use.
<END_TASK>
<USER_TASK:>
Description:
def selectapi(api):
"""
Select an Qt API to use.
This can only be set once and before any of the Qt modules are explicitly
imported.
""" |
global __SELECTED_API, USED_API
if api.lower() not in {"pyqt4", "pyqt5", "pyside", "pyside2"}:
raise ValueError(api)
if __SELECTED_API is not None and __SELECTED_API.lower() != api.lower():
raise RuntimeError("A Qt API {} was already selected"
.format(__SELECTED_API))
elif __SELECTED_API is None:
__SELECTED_API = api.lower()
from . import _api
USED_API = _api.USED_API |
<SYSTEM_TASK:>
Returns highest available version for a package in a list of versions
<END_TASK>
<USER_TASK:>
Description:
def get_highest_version(versions):
"""
Returns highest available version for a package in a list of versions
Uses pkg_resources to parse the versions
@param versions: List of PyPI package versions
@type versions: List of strings
@returns: string of a PyPI package version
""" |
sorted_versions = []
for ver in versions:
sorted_versions.append((pkg_resources.parse_version(ver), ver))
sorted_versions = sorted(sorted_versions)
sorted_versions.reverse()
return sorted_versions[0][1] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.