repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
log2timeline/dfwinreg
dfwinreg/key_paths.py
https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/key_paths.py#L9-L36
def JoinKeyPath(path_segments): """Joins the path segments into key path. Args: path_segments (list[str]): Windows Registry key path segments. Returns: str: key path. """ # This is an optimized way to combine the path segments into a single path # and combine multiple successive path separators to one. # Split all the path segments based on the path (segment) separator. path_segments = [ segment.split(definitions.KEY_PATH_SEPARATOR) for segment in path_segments] # Flatten the sublists into one list. path_segments = [ element for sublist in path_segments for element in sublist] # Remove empty path segments. path_segments = filter(None, path_segments) key_path = definitions.KEY_PATH_SEPARATOR.join(path_segments) if not key_path.startswith('HKEY_'): key_path = '{0:s}{1:s}'.format(definitions.KEY_PATH_SEPARATOR, key_path) return key_path
[ "def", "JoinKeyPath", "(", "path_segments", ")", ":", "# This is an optimized way to combine the path segments into a single path", "# and combine multiple successive path separators to one.", "# Split all the path segments based on the path (segment) separator.", "path_segments", "=", "[", "segment", ".", "split", "(", "definitions", ".", "KEY_PATH_SEPARATOR", ")", "for", "segment", "in", "path_segments", "]", "# Flatten the sublists into one list.", "path_segments", "=", "[", "element", "for", "sublist", "in", "path_segments", "for", "element", "in", "sublist", "]", "# Remove empty path segments.", "path_segments", "=", "filter", "(", "None", ",", "path_segments", ")", "key_path", "=", "definitions", ".", "KEY_PATH_SEPARATOR", ".", "join", "(", "path_segments", ")", "if", "not", "key_path", ".", "startswith", "(", "'HKEY_'", ")", ":", "key_path", "=", "'{0:s}{1:s}'", ".", "format", "(", "definitions", ".", "KEY_PATH_SEPARATOR", ",", "key_path", ")", "return", "key_path" ]
Joins the path segments into key path. Args: path_segments (list[str]): Windows Registry key path segments. Returns: str: key path.
[ "Joins", "the", "path", "segments", "into", "key", "path", "." ]
python
train
31.571429
wummel/linkchecker
linkcheck/htmlutil/linkparse.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/htmlutil/linkparse.py#L234-L249
def get_link_name (self, tag, attrs, attr): """Parse attrs for link name. Return name of link.""" if tag == 'a' and attr == 'href': # Look for name only up to MAX_NAMELEN characters data = self.parser.peek(MAX_NAMELEN) data = data.decode(self.parser.encoding, "ignore") name = linkname.href_name(data) if not name: name = attrs.get_true('title', u'') elif tag == 'img': name = attrs.get_true('alt', u'') if not name: name = attrs.get_true('title', u'') else: name = u"" return name
[ "def", "get_link_name", "(", "self", ",", "tag", ",", "attrs", ",", "attr", ")", ":", "if", "tag", "==", "'a'", "and", "attr", "==", "'href'", ":", "# Look for name only up to MAX_NAMELEN characters", "data", "=", "self", ".", "parser", ".", "peek", "(", "MAX_NAMELEN", ")", "data", "=", "data", ".", "decode", "(", "self", ".", "parser", ".", "encoding", ",", "\"ignore\"", ")", "name", "=", "linkname", ".", "href_name", "(", "data", ")", "if", "not", "name", ":", "name", "=", "attrs", ".", "get_true", "(", "'title'", ",", "u''", ")", "elif", "tag", "==", "'img'", ":", "name", "=", "attrs", ".", "get_true", "(", "'alt'", ",", "u''", ")", "if", "not", "name", ":", "name", "=", "attrs", ".", "get_true", "(", "'title'", ",", "u''", ")", "else", ":", "name", "=", "u\"\"", "return", "name" ]
Parse attrs for link name. Return name of link.
[ "Parse", "attrs", "for", "link", "name", ".", "Return", "name", "of", "link", "." ]
python
train
39.625
cltk/cltk
cltk/corpus/arabic/utils/pyarabic/araby.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/arabic/utils/pyarabic/araby.py#L549-L561
def is_arabicstring(text): """ Checks for an Arabic standard Unicode block characters An arabic string can contain spaces, digits and pounctuation. but only arabic standard characters, not extended arabic @param text: input text @type text: unicode @return: True if all charaters are in Arabic block @rtype: Boolean """ if re.search(u"([^\u0600-\u0652%s%s%s\s\d])" \ % (LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_MADDA_ABOVE), text): return False return True
[ "def", "is_arabicstring", "(", "text", ")", ":", "if", "re", ".", "search", "(", "u\"([^\\u0600-\\u0652%s%s%s\\s\\d])\"", "%", "(", "LAM_ALEF", ",", "LAM_ALEF_HAMZA_ABOVE", ",", "LAM_ALEF_MADDA_ABOVE", ")", ",", "text", ")", ":", "return", "False", "return", "True" ]
Checks for an Arabic standard Unicode block characters An arabic string can contain spaces, digits and pounctuation. but only arabic standard characters, not extended arabic @param text: input text @type text: unicode @return: True if all charaters are in Arabic block @rtype: Boolean
[ "Checks", "for", "an", "Arabic", "standard", "Unicode", "block", "characters", "An", "arabic", "string", "can", "contain", "spaces", "digits", "and", "pounctuation", ".", "but", "only", "arabic", "standard", "characters", "not", "extended", "arabic" ]
python
train
40
twaldear/flask-secure-headers
flask_secure_headers/headers.py
https://github.com/twaldear/flask-secure-headers/blob/3eca972b369608a7669b67cbe66679570a6505ce/flask_secure_headers/headers.py#L26-L34
def update_policy(self,defaultHeaders): """ if policy in default but not input still return """ if self.inputs is not None: for k,v in defaultHeaders.items(): if k not in self.inputs: self.inputs[k] = v return self.inputs else: return self.inputs
[ "def", "update_policy", "(", "self", ",", "defaultHeaders", ")", ":", "if", "self", ".", "inputs", "is", "not", "None", ":", "for", "k", ",", "v", "in", "defaultHeaders", ".", "items", "(", ")", ":", "if", "k", "not", "in", "self", ".", "inputs", ":", "self", ".", "inputs", "[", "k", "]", "=", "v", "return", "self", ".", "inputs", "else", ":", "return", "self", ".", "inputs" ]
if policy in default but not input still return
[ "if", "policy", "in", "default", "but", "not", "input", "still", "return" ]
python
train
29.111111
ets-labs/python-dependency-injector
examples/miniapps/use_cases/example/use_cases.py
https://github.com/ets-labs/python-dependency-injector/blob/d04fe41eb17f667da38b97525e2d16c8f2d272fe/examples/miniapps/use_cases/example/use_cases.py#L19-L22
def execute(self, email): """Execute use case handling.""" print('Sign up user {0}'.format(email)) self.email_sender.send(email, 'Welcome, "{}"'.format(email))
[ "def", "execute", "(", "self", ",", "email", ")", ":", "print", "(", "'Sign up user {0}'", ".", "format", "(", "email", ")", ")", "self", ".", "email_sender", ".", "send", "(", "email", ",", "'Welcome, \"{}\"'", ".", "format", "(", "email", ")", ")" ]
Execute use case handling.
[ "Execute", "use", "case", "handling", "." ]
python
train
45
Unidata/MetPy
metpy/calc/thermo.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/calc/thermo.py#L1912-L1941
def moist_static_energy(heights, temperature, specific_humidity): r"""Calculate the moist static energy of parcels. This function will calculate the moist static energy following equation 3.72 in [Hobbs2006]_. Notes ----- .. math::\text{moist static energy} = c_{pd} * T + gz + L_v q * :math:`T` is temperature * :math:`z` is height * :math:`q` is specific humidity Parameters ---------- heights : array-like Atmospheric height temperature : array-like Atmospheric temperature specific_humidity : array-like Atmospheric specific humidity Returns ------- `pint.Quantity` The moist static energy """ return (dry_static_energy(heights, temperature) + mpconsts.Lv * specific_humidity.to('dimensionless')).to('kJ/kg')
[ "def", "moist_static_energy", "(", "heights", ",", "temperature", ",", "specific_humidity", ")", ":", "return", "(", "dry_static_energy", "(", "heights", ",", "temperature", ")", "+", "mpconsts", ".", "Lv", "*", "specific_humidity", ".", "to", "(", "'dimensionless'", ")", ")", ".", "to", "(", "'kJ/kg'", ")" ]
r"""Calculate the moist static energy of parcels. This function will calculate the moist static energy following equation 3.72 in [Hobbs2006]_. Notes ----- .. math::\text{moist static energy} = c_{pd} * T + gz + L_v q * :math:`T` is temperature * :math:`z` is height * :math:`q` is specific humidity Parameters ---------- heights : array-like Atmospheric height temperature : array-like Atmospheric temperature specific_humidity : array-like Atmospheric specific humidity Returns ------- `pint.Quantity` The moist static energy
[ "r", "Calculate", "the", "moist", "static", "energy", "of", "parcels", "." ]
python
train
26.933333
adafruit/Adafruit_Python_BluefruitLE
Adafruit_BluefruitLE/interfaces/gatt.py
https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/34fc6f596371b961628369d78ce836950514062f/Adafruit_BluefruitLE/interfaces/gatt.py#L94-L101
def find_descriptor(self, uuid): """Return the first child descriptor found that has the specified UUID. Will return None if no descriptor that matches is found. """ for desc in self.list_descriptors(): if desc.uuid == uuid: return desc return None
[ "def", "find_descriptor", "(", "self", ",", "uuid", ")", ":", "for", "desc", "in", "self", ".", "list_descriptors", "(", ")", ":", "if", "desc", ".", "uuid", "==", "uuid", ":", "return", "desc", "return", "None" ]
Return the first child descriptor found that has the specified UUID. Will return None if no descriptor that matches is found.
[ "Return", "the", "first", "child", "descriptor", "found", "that", "has", "the", "specified", "UUID", ".", "Will", "return", "None", "if", "no", "descriptor", "that", "matches", "is", "found", "." ]
python
valid
38.75
glormph/msstitch
src/app/actions/prottable/info.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/info.py#L41-L45
def get_protein_data_pgrouped(proteindata, p_acc, headerfields): """Parses protein data for a certain protein into tsv output dictionary""" report = get_protein_data_base(proteindata, p_acc, headerfields) return get_cov_protnumbers(proteindata, p_acc, report)
[ "def", "get_protein_data_pgrouped", "(", "proteindata", ",", "p_acc", ",", "headerfields", ")", ":", "report", "=", "get_protein_data_base", "(", "proteindata", ",", "p_acc", ",", "headerfields", ")", "return", "get_cov_protnumbers", "(", "proteindata", ",", "p_acc", ",", "report", ")" ]
Parses protein data for a certain protein into tsv output dictionary
[ "Parses", "protein", "data", "for", "a", "certain", "protein", "into", "tsv", "output", "dictionary" ]
python
train
54.2
ArchiveTeam/wpull
wpull/network/pool.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/network/pool.py#L231-L237
def no_wait_release(self, connection: Connection): '''Synchronous version of :meth:`release`.''' _logger.debug('No wait check in.') release_task = asyncio.get_event_loop().create_task( self.release(connection) ) self._release_tasks.add(release_task)
[ "def", "no_wait_release", "(", "self", ",", "connection", ":", "Connection", ")", ":", "_logger", ".", "debug", "(", "'No wait check in.'", ")", "release_task", "=", "asyncio", ".", "get_event_loop", "(", ")", ".", "create_task", "(", "self", ".", "release", "(", "connection", ")", ")", "self", ".", "_release_tasks", ".", "add", "(", "release_task", ")" ]
Synchronous version of :meth:`release`.
[ "Synchronous", "version", "of", ":", "meth", ":", "release", "." ]
python
train
42.142857
devricks/soft_drf
soft_drf/api/serializers/base.py
https://github.com/devricks/soft_drf/blob/1869b13f9341bfcebd931059e93de2bc38570da3/soft_drf/api/serializers/base.py#L11-L19
def build_absolute_uri(self, uri): """ Return a fully qualified absolute url for the given uri. """ request = self.context.get('request', None) return ( request.build_absolute_uri(uri) if request is not None else uri )
[ "def", "build_absolute_uri", "(", "self", ",", "uri", ")", ":", "request", "=", "self", ".", "context", ".", "get", "(", "'request'", ",", "None", ")", "return", "(", "request", ".", "build_absolute_uri", "(", "uri", ")", "if", "request", "is", "not", "None", "else", "uri", ")" ]
Return a fully qualified absolute url for the given uri.
[ "Return", "a", "fully", "qualified", "absolute", "url", "for", "the", "given", "uri", "." ]
python
train
30.111111
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L6528-L6555
def _format_coredump_stdout(cmd_ret): ''' Helper function to format the stdout from the get_coredump_network_config function. cmd_ret The return dictionary that comes from a cmd.run_all call. ''' ret_dict = {} for line in cmd_ret['stdout'].splitlines(): line = line.strip().lower() if line.startswith('enabled:'): enabled = line.split(':') if 'true' in enabled[1]: ret_dict['enabled'] = True else: ret_dict['enabled'] = False break if line.startswith('host vnic:'): host_vnic = line.split(':') ret_dict['host_vnic'] = host_vnic[1].strip() if line.startswith('network server ip:'): ip = line.split(':') ret_dict['ip'] = ip[1].strip() if line.startswith('network server port:'): ip_port = line.split(':') ret_dict['port'] = ip_port[1].strip() return ret_dict
[ "def", "_format_coredump_stdout", "(", "cmd_ret", ")", ":", "ret_dict", "=", "{", "}", "for", "line", "in", "cmd_ret", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "lower", "(", ")", "if", "line", ".", "startswith", "(", "'enabled:'", ")", ":", "enabled", "=", "line", ".", "split", "(", "':'", ")", "if", "'true'", "in", "enabled", "[", "1", "]", ":", "ret_dict", "[", "'enabled'", "]", "=", "True", "else", ":", "ret_dict", "[", "'enabled'", "]", "=", "False", "break", "if", "line", ".", "startswith", "(", "'host vnic:'", ")", ":", "host_vnic", "=", "line", ".", "split", "(", "':'", ")", "ret_dict", "[", "'host_vnic'", "]", "=", "host_vnic", "[", "1", "]", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "'network server ip:'", ")", ":", "ip", "=", "line", ".", "split", "(", "':'", ")", "ret_dict", "[", "'ip'", "]", "=", "ip", "[", "1", "]", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "'network server port:'", ")", ":", "ip_port", "=", "line", ".", "split", "(", "':'", ")", "ret_dict", "[", "'port'", "]", "=", "ip_port", "[", "1", "]", ".", "strip", "(", ")", "return", "ret_dict" ]
Helper function to format the stdout from the get_coredump_network_config function. cmd_ret The return dictionary that comes from a cmd.run_all call.
[ "Helper", "function", "to", "format", "the", "stdout", "from", "the", "get_coredump_network_config", "function", "." ]
python
train
34.392857
facelessuser/backrefs
tools/unipropgen.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/tools/unipropgen.py#L159-L217
def gen_blocks(output, ascii_props=False, append=False, prefix=""): """Generate Unicode blocks.""" with codecs.open(output, 'a' if append else 'w', 'utf-8') as f: if not append: f.write(HEADER) f.write('%s_blocks = {' % prefix) no_block = [] last = -1 max_range = MAXASCII if ascii_props else MAXUNICODE formatter = bytesformat if ascii_props else uniformat with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'Blocks.txt'), 'r', 'utf-8') as uf: for line in uf: if not line.startswith('#'): data = line.split(';') if len(data) < 2: continue block = [int(i, 16) for i in data[0].strip().split('..')] if block[0] > last + 1: if (last + 1) <= max_range: endval = block[0] - 1 if (block[0] - 1) < max_range else max_range no_block.append((last + 1, endval)) last = block[1] name = format_name(data[1]) inverse_range = [] if block[0] > max_range: if ascii_props: f.write('\n "%s": "",' % name) f.write('\n "^%s": "%s-%s",' % (name, formatter(0), formatter(max_range))) continue if block[0] > 0: inverse_range.append("%s-%s" % (formatter(0), formatter(block[0] - 1))) if block[1] < max_range: inverse_range.append("%s-%s" % (formatter(block[1] + 1), formatter(max_range))) f.write('\n "%s": "%s-%s",' % (name, formatter(block[0]), formatter(block[1]))) f.write('\n "^%s": "%s",' % (name, ''.join(inverse_range))) if last < max_range: if (last + 1) <= max_range: no_block.append((last + 1, max_range)) last = -1 no_block_inverse = [] if not no_block: no_block_inverse.append((0, max_range)) else: for piece in no_block: if piece[0] > last + 1: no_block_inverse.append((last + 1, piece[0] - 1)) last = piece[1] for block, name in ((no_block, 'noblock'), (no_block_inverse, '^noblock')): f.write('\n "%s": "' % name) for piece in block: if piece[0] == piece[1]: f.write(formatter(piece[0])) else: f.write("%s-%s" % (formatter(piece[0]), formatter(piece[1]))) f.write('",') f.write('\n}\n')
[ "def", "gen_blocks", "(", "output", ",", "ascii_props", "=", "False", ",", "append", "=", "False", ",", "prefix", "=", "\"\"", ")", ":", "with", "codecs", ".", "open", "(", "output", ",", "'a'", "if", "append", "else", "'w'", ",", "'utf-8'", ")", "as", "f", ":", "if", "not", "append", ":", "f", ".", "write", "(", "HEADER", ")", "f", ".", "write", "(", "'%s_blocks = {'", "%", "prefix", ")", "no_block", "=", "[", "]", "last", "=", "-", "1", "max_range", "=", "MAXASCII", "if", "ascii_props", "else", "MAXUNICODE", "formatter", "=", "bytesformat", "if", "ascii_props", "else", "uniformat", "with", "codecs", ".", "open", "(", "os", ".", "path", ".", "join", "(", "HOME", ",", "'unicodedata'", ",", "UNIVERSION", ",", "'Blocks.txt'", ")", ",", "'r'", ",", "'utf-8'", ")", "as", "uf", ":", "for", "line", "in", "uf", ":", "if", "not", "line", ".", "startswith", "(", "'#'", ")", ":", "data", "=", "line", ".", "split", "(", "';'", ")", "if", "len", "(", "data", ")", "<", "2", ":", "continue", "block", "=", "[", "int", "(", "i", ",", "16", ")", "for", "i", "in", "data", "[", "0", "]", ".", "strip", "(", ")", ".", "split", "(", "'..'", ")", "]", "if", "block", "[", "0", "]", ">", "last", "+", "1", ":", "if", "(", "last", "+", "1", ")", "<=", "max_range", ":", "endval", "=", "block", "[", "0", "]", "-", "1", "if", "(", "block", "[", "0", "]", "-", "1", ")", "<", "max_range", "else", "max_range", "no_block", ".", "append", "(", "(", "last", "+", "1", ",", "endval", ")", ")", "last", "=", "block", "[", "1", "]", "name", "=", "format_name", "(", "data", "[", "1", "]", ")", "inverse_range", "=", "[", "]", "if", "block", "[", "0", "]", ">", "max_range", ":", "if", "ascii_props", ":", "f", ".", "write", "(", "'\\n \"%s\": \"\",'", "%", "name", ")", "f", ".", "write", "(", "'\\n \"^%s\": \"%s-%s\",'", "%", "(", "name", ",", "formatter", "(", "0", ")", ",", "formatter", "(", "max_range", ")", ")", ")", "continue", "if", "block", "[", "0", "]", ">", "0", ":", "inverse_range", ".", "append", "(", "\"%s-%s\"", "%", "(", "formatter", "(", "0", ")", ",", "formatter", "(", "block", "[", "0", "]", "-", "1", ")", ")", ")", "if", "block", "[", "1", "]", "<", "max_range", ":", "inverse_range", ".", "append", "(", "\"%s-%s\"", "%", "(", "formatter", "(", "block", "[", "1", "]", "+", "1", ")", ",", "formatter", "(", "max_range", ")", ")", ")", "f", ".", "write", "(", "'\\n \"%s\": \"%s-%s\",'", "%", "(", "name", ",", "formatter", "(", "block", "[", "0", "]", ")", ",", "formatter", "(", "block", "[", "1", "]", ")", ")", ")", "f", ".", "write", "(", "'\\n \"^%s\": \"%s\",'", "%", "(", "name", ",", "''", ".", "join", "(", "inverse_range", ")", ")", ")", "if", "last", "<", "max_range", ":", "if", "(", "last", "+", "1", ")", "<=", "max_range", ":", "no_block", ".", "append", "(", "(", "last", "+", "1", ",", "max_range", ")", ")", "last", "=", "-", "1", "no_block_inverse", "=", "[", "]", "if", "not", "no_block", ":", "no_block_inverse", ".", "append", "(", "(", "0", ",", "max_range", ")", ")", "else", ":", "for", "piece", "in", "no_block", ":", "if", "piece", "[", "0", "]", ">", "last", "+", "1", ":", "no_block_inverse", ".", "append", "(", "(", "last", "+", "1", ",", "piece", "[", "0", "]", "-", "1", ")", ")", "last", "=", "piece", "[", "1", "]", "for", "block", ",", "name", "in", "(", "(", "no_block", ",", "'noblock'", ")", ",", "(", "no_block_inverse", ",", "'^noblock'", ")", ")", ":", "f", ".", "write", "(", "'\\n \"%s\": \"'", "%", "name", ")", "for", "piece", "in", "block", ":", "if", "piece", "[", "0", "]", "==", "piece", "[", "1", "]", ":", "f", ".", "write", "(", "formatter", "(", "piece", "[", "0", "]", ")", ")", "else", ":", "f", ".", "write", "(", "\"%s-%s\"", "%", "(", "formatter", "(", "piece", "[", "0", "]", ")", ",", "formatter", "(", "piece", "[", "1", "]", ")", ")", ")", "f", ".", "write", "(", "'\",'", ")", "f", ".", "write", "(", "'\\n}\\n'", ")" ]
Generate Unicode blocks.
[ "Generate", "Unicode", "blocks", "." ]
python
train
47.508475
OpenTreeOfLife/peyotl
peyotl/phylo/tree.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/phylo/tree.py#L302-L345
def add_bits4subtree_ids(self, relevant_ids): """Adds a long integer bits4subtree_ids to each node (Fails cryptically if that field is already present!) relevant_ids can be a dict of _id to bit representation. If it is not supplied, a dict will be created by registering the leaf._id into a dict (and returning the dict) the bits4subtree_ids will have a 1 bit if the _id is at or descended from this node and 0 if it is not in this subtree. Returns the dict of ids -> longs Also creates a dict of long -> node mappings for all internal nodes. Stores this in self as bits2internal_node """ if relevant_ids: checking = True else: checking = False relevant_ids = {} bit = 1 self.bits2internal_node = {} for node in self.postorder_node_iter(): p = node._parent if p is None: if not node.is_leaf: self.bits2internal_node[node.bits4subtree_ids] = node continue if not hasattr(p, 'bits4subtree_ids'): p.bits4subtree_ids = 0 i = node._id # _LOG.debug('node._id ={}'.format(i)) # _LOG.debug('Before par mrca... = {}'.format(p.bits4subtree_ids)) if checking: b = relevant_ids.get(i) if b: if node.is_leaf: node.bits4subtree_ids = b else: node.bits4subtree_ids |= b else: if node.is_leaf: relevant_ids[i] = bit node.bits4subtree_ids = bit bit <<= 1 if not node.is_leaf: self.bits2internal_node[node.bits4subtree_ids] = node # _LOG.debug('while add bitrep... self.bits2internal_node = {}'.format(self.bits2internal_node)) p.bits4subtree_ids |= node.bits4subtree_ids return relevant_ids
[ "def", "add_bits4subtree_ids", "(", "self", ",", "relevant_ids", ")", ":", "if", "relevant_ids", ":", "checking", "=", "True", "else", ":", "checking", "=", "False", "relevant_ids", "=", "{", "}", "bit", "=", "1", "self", ".", "bits2internal_node", "=", "{", "}", "for", "node", "in", "self", ".", "postorder_node_iter", "(", ")", ":", "p", "=", "node", ".", "_parent", "if", "p", "is", "None", ":", "if", "not", "node", ".", "is_leaf", ":", "self", ".", "bits2internal_node", "[", "node", ".", "bits4subtree_ids", "]", "=", "node", "continue", "if", "not", "hasattr", "(", "p", ",", "'bits4subtree_ids'", ")", ":", "p", ".", "bits4subtree_ids", "=", "0", "i", "=", "node", ".", "_id", "# _LOG.debug('node._id ={}'.format(i))", "# _LOG.debug('Before par mrca... = {}'.format(p.bits4subtree_ids))", "if", "checking", ":", "b", "=", "relevant_ids", ".", "get", "(", "i", ")", "if", "b", ":", "if", "node", ".", "is_leaf", ":", "node", ".", "bits4subtree_ids", "=", "b", "else", ":", "node", ".", "bits4subtree_ids", "|=", "b", "else", ":", "if", "node", ".", "is_leaf", ":", "relevant_ids", "[", "i", "]", "=", "bit", "node", ".", "bits4subtree_ids", "=", "bit", "bit", "<<=", "1", "if", "not", "node", ".", "is_leaf", ":", "self", ".", "bits2internal_node", "[", "node", ".", "bits4subtree_ids", "]", "=", "node", "# _LOG.debug('while add bitrep... self.bits2internal_node = {}'.format(self.bits2internal_node))", "p", ".", "bits4subtree_ids", "|=", "node", ".", "bits4subtree_ids", "return", "relevant_ids" ]
Adds a long integer bits4subtree_ids to each node (Fails cryptically if that field is already present!) relevant_ids can be a dict of _id to bit representation. If it is not supplied, a dict will be created by registering the leaf._id into a dict (and returning the dict) the bits4subtree_ids will have a 1 bit if the _id is at or descended from this node and 0 if it is not in this subtree. Returns the dict of ids -> longs Also creates a dict of long -> node mappings for all internal nodes. Stores this in self as bits2internal_node
[ "Adds", "a", "long", "integer", "bits4subtree_ids", "to", "each", "node", "(", "Fails", "cryptically", "if", "that", "field", "is", "already", "present!", ")", "relevant_ids", "can", "be", "a", "dict", "of", "_id", "to", "bit", "representation", ".", "If", "it", "is", "not", "supplied", "a", "dict", "will", "be", "created", "by", "registering", "the", "leaf", ".", "_id", "into", "a", "dict", "(", "and", "returning", "the", "dict", ")", "the", "bits4subtree_ids", "will", "have", "a", "1", "bit", "if", "the", "_id", "is", "at", "or", "descended", "from", "this", "node", "and", "0", "if", "it", "is", "not", "in", "this", "subtree", ".", "Returns", "the", "dict", "of", "ids", "-", ">", "longs", "Also", "creates", "a", "dict", "of", "long", "-", ">", "node", "mappings", "for", "all", "internal", "nodes", ".", "Stores", "this", "in", "self", "as", "bits2internal_node" ]
python
train
45.409091
gijzelaerr/python-snap7
snap7/client.py
https://github.com/gijzelaerr/python-snap7/blob/a6db134c7a3a2ef187b9eca04669221d6fc634c3/snap7/client.py#L165-L175
def delete(self, block_type, block_num): """ Deletes a block :param block_type: Type of block :param block_num: Bloc number """ logger.info("deleting block") blocktype = snap7.snap7types.block_types[block_type] result = self.library.Cli_Delete(self.pointer, blocktype, block_num) return result
[ "def", "delete", "(", "self", ",", "block_type", ",", "block_num", ")", ":", "logger", ".", "info", "(", "\"deleting block\"", ")", "blocktype", "=", "snap7", ".", "snap7types", ".", "block_types", "[", "block_type", "]", "result", "=", "self", ".", "library", ".", "Cli_Delete", "(", "self", ".", "pointer", ",", "blocktype", ",", "block_num", ")", "return", "result" ]
Deletes a block :param block_type: Type of block :param block_num: Bloc number
[ "Deletes", "a", "block", ":", "param", "block_type", ":", "Type", "of", "block", ":", "param", "block_num", ":", "Bloc", "number" ]
python
train
33.090909
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_maps_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_maps_ext.py#L164-L176
def maps_get_rules_output_rules_policyname(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") maps_get_rules = ET.Element("maps_get_rules") config = maps_get_rules output = ET.SubElement(maps_get_rules, "output") rules = ET.SubElement(output, "rules") policyname = ET.SubElement(rules, "policyname") policyname.text = kwargs.pop('policyname') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "maps_get_rules_output_rules_policyname", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "maps_get_rules", "=", "ET", ".", "Element", "(", "\"maps_get_rules\"", ")", "config", "=", "maps_get_rules", "output", "=", "ET", ".", "SubElement", "(", "maps_get_rules", ",", "\"output\"", ")", "rules", "=", "ET", ".", "SubElement", "(", "output", ",", "\"rules\"", ")", "policyname", "=", "ET", ".", "SubElement", "(", "rules", ",", "\"policyname\"", ")", "policyname", ".", "text", "=", "kwargs", ".", "pop", "(", "'policyname'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
39.692308
clalancette/pycdlib
pycdlib/pycdlib.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L461-L491
def _assign_udf_desc_extents(descs, start_extent): # type: (PyCdlib._UDFDescriptors, int) -> None ''' An internal function to assign a consecutive sequence of extents for the given set of UDF Descriptors, starting at the given extent. Parameters: descs - The PyCdlib._UDFDescriptors object to assign extents for. start_extent - The starting extent to assign from. Returns: Nothing. ''' current_extent = start_extent descs.pvd.set_extent_location(current_extent) current_extent += 1 descs.impl_use.set_extent_location(current_extent) current_extent += 1 descs.partition.set_extent_location(current_extent) current_extent += 1 descs.logical_volume.set_extent_location(current_extent) current_extent += 1 descs.unallocated_space.set_extent_location(current_extent) current_extent += 1 descs.terminator.set_extent_location(current_extent) current_extent += 1
[ "def", "_assign_udf_desc_extents", "(", "descs", ",", "start_extent", ")", ":", "# type: (PyCdlib._UDFDescriptors, int) -> None", "current_extent", "=", "start_extent", "descs", ".", "pvd", ".", "set_extent_location", "(", "current_extent", ")", "current_extent", "+=", "1", "descs", ".", "impl_use", ".", "set_extent_location", "(", "current_extent", ")", "current_extent", "+=", "1", "descs", ".", "partition", ".", "set_extent_location", "(", "current_extent", ")", "current_extent", "+=", "1", "descs", ".", "logical_volume", ".", "set_extent_location", "(", "current_extent", ")", "current_extent", "+=", "1", "descs", ".", "unallocated_space", ".", "set_extent_location", "(", "current_extent", ")", "current_extent", "+=", "1", "descs", ".", "terminator", ".", "set_extent_location", "(", "current_extent", ")", "current_extent", "+=", "1" ]
An internal function to assign a consecutive sequence of extents for the given set of UDF Descriptors, starting at the given extent. Parameters: descs - The PyCdlib._UDFDescriptors object to assign extents for. start_extent - The starting extent to assign from. Returns: Nothing.
[ "An", "internal", "function", "to", "assign", "a", "consecutive", "sequence", "of", "extents", "for", "the", "given", "set", "of", "UDF", "Descriptors", "starting", "at", "the", "given", "extent", "." ]
python
train
29.870968
hydraplatform/hydra-base
hydra_base/lib/attributes.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/attributes.py#L240-L247
def get_attributes(**kwargs): """ Get all attributes """ attrs = db.DBSession.query(Attr).order_by(Attr.name).all() return attrs
[ "def", "get_attributes", "(", "*", "*", "kwargs", ")", ":", "attrs", "=", "db", ".", "DBSession", ".", "query", "(", "Attr", ")", ".", "order_by", "(", "Attr", ".", "name", ")", ".", "all", "(", ")", "return", "attrs" ]
Get all attributes
[ "Get", "all", "attributes" ]
python
train
18.375
mgedmin/check-manifest
check_manifest.py
https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L774-L778
def file_matches(filename, patterns): """Does this filename match any of the patterns?""" return any(fnmatch.fnmatch(filename, pat) or fnmatch.fnmatch(os.path.basename(filename), pat) for pat in patterns)
[ "def", "file_matches", "(", "filename", ",", "patterns", ")", ":", "return", "any", "(", "fnmatch", ".", "fnmatch", "(", "filename", ",", "pat", ")", "or", "fnmatch", ".", "fnmatch", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "pat", ")", "for", "pat", "in", "patterns", ")" ]
Does this filename match any of the patterns?
[ "Does", "this", "filename", "match", "any", "of", "the", "patterns?" ]
python
train
47.6
CZ-NIC/yangson
yangson/statement.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/statement.py#L381-L393
def substatements(self) -> List[Statement]: """Parse substatements. Raises: EndOfInput: If past the end of input. """ res = [] self.opt_separator() while self.peek() != "}": res.append(self.statement()) self.opt_separator() self.offset += 1 return res
[ "def", "substatements", "(", "self", ")", "->", "List", "[", "Statement", "]", ":", "res", "=", "[", "]", "self", ".", "opt_separator", "(", ")", "while", "self", ".", "peek", "(", ")", "!=", "\"}\"", ":", "res", ".", "append", "(", "self", ".", "statement", "(", ")", ")", "self", ".", "opt_separator", "(", ")", "self", ".", "offset", "+=", "1", "return", "res" ]
Parse substatements. Raises: EndOfInput: If past the end of input.
[ "Parse", "substatements", "." ]
python
train
26.153846
PythonCharmers/python-future
src/future/types/newstr.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/types/newstr.py#L265-L273
def index(self, sub, *args): """ Like newstr.find() but raise ValueError when the substring is not found. """ pos = self.find(sub, *args) if pos == -1: raise ValueError('substring not found') return pos
[ "def", "index", "(", "self", ",", "sub", ",", "*", "args", ")", ":", "pos", "=", "self", ".", "find", "(", "sub", ",", "*", "args", ")", "if", "pos", "==", "-", "1", ":", "raise", "ValueError", "(", "'substring not found'", ")", "return", "pos" ]
Like newstr.find() but raise ValueError when the substring is not found.
[ "Like", "newstr", ".", "find", "()", "but", "raise", "ValueError", "when", "the", "substring", "is", "not", "found", "." ]
python
train
29.111111
ponty/confduino
confduino/boardremove.py
https://github.com/ponty/confduino/blob/f4c261e5e84997f145a8bdd001f471db74c9054b/confduino/boardremove.py#L9-L20
def remove_board(board_id): """remove board. :param board_id: board id (e.g. 'diecimila') :rtype: None """ log.debug('remove %s', board_id) lines = boards_txt().lines() lines = filter(lambda x: not x.strip().startswith(board_id + '.'), lines) boards_txt().write_lines(lines)
[ "def", "remove_board", "(", "board_id", ")", ":", "log", ".", "debug", "(", "'remove %s'", ",", "board_id", ")", "lines", "=", "boards_txt", "(", ")", ".", "lines", "(", ")", "lines", "=", "filter", "(", "lambda", "x", ":", "not", "x", ".", "strip", "(", ")", ".", "startswith", "(", "board_id", "+", "'.'", ")", ",", "lines", ")", "boards_txt", "(", ")", ".", "write_lines", "(", "lines", ")" ]
remove board. :param board_id: board id (e.g. 'diecimila') :rtype: None
[ "remove", "board", "." ]
python
train
24.833333
pgmpy/pgmpy
pgmpy/factors/discrete/DiscreteFactor.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/discrete/DiscreteFactor.py#L439-L525
def sum(self, phi1, inplace=True): """ DiscreteFactor sum with `phi1`. Parameters ---------- phi1: `DiscreteFactor` instance. DiscreteFactor to be added. inplace: boolean If inplace=True it will modify the factor itself, else would return a new factor. Returns ------- DiscreteFactor or None: if inplace=True (default) returns None if inplace=False returns a new `DiscreteFactor` instance. Example ------- >>> from pgmpy.factors.discrete import DiscreteFactor >>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8)) >>> phi1.sum(phi2, inplace=True) >>> phi1.variables ['x1', 'x2', 'x3', 'x4'] >>> phi1.cardinality array([2, 3, 2, 2]) >>> phi1.values array([[[[ 0, 0], [ 4, 6]], [[ 0, 4], [12, 18]], [[ 0, 8], [20, 30]]], [[[ 6, 18], [35, 49]], [[ 8, 24], [45, 63]], [[10, 30], [55, 77]]]]) """ phi = self if inplace else self.copy() if isinstance(phi1, (int, float)): phi.values += phi1 else: phi1 = phi1.copy() # modifying phi to add new variables extra_vars = set(phi1.variables) - set(phi.variables) if extra_vars: slice_ = [slice(None)] * len(phi.variables) slice_.extend([np.newaxis] * len(extra_vars)) phi.values = phi.values[tuple(slice_)] phi.variables.extend(extra_vars) new_var_card = phi1.get_cardinality(extra_vars) phi.cardinality = np.append(phi.cardinality, [new_var_card[var] for var in extra_vars]) # modifying phi1 to add new variables extra_vars = set(phi.variables) - set(phi1.variables) if extra_vars: slice_ = [slice(None)] * len(phi1.variables) slice_.extend([np.newaxis] * len(extra_vars)) phi1.values = phi1.values[tuple(slice_)] phi1.variables.extend(extra_vars) # No need to modify cardinality as we don't need it. # rearranging the axes of phi1 to match phi for axis in range(phi.values.ndim): exchange_index = phi1.variables.index(phi.variables[axis]) phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], \ phi1.variables[axis] phi1.values = phi1.values.swapaxes(axis, exchange_index) phi.values = phi.values + phi1.values if not inplace: return phi
[ "def", "sum", "(", "self", ",", "phi1", ",", "inplace", "=", "True", ")", ":", "phi", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "if", "isinstance", "(", "phi1", ",", "(", "int", ",", "float", ")", ")", ":", "phi", ".", "values", "+=", "phi1", "else", ":", "phi1", "=", "phi1", ".", "copy", "(", ")", "# modifying phi to add new variables", "extra_vars", "=", "set", "(", "phi1", ".", "variables", ")", "-", "set", "(", "phi", ".", "variables", ")", "if", "extra_vars", ":", "slice_", "=", "[", "slice", "(", "None", ")", "]", "*", "len", "(", "phi", ".", "variables", ")", "slice_", ".", "extend", "(", "[", "np", ".", "newaxis", "]", "*", "len", "(", "extra_vars", ")", ")", "phi", ".", "values", "=", "phi", ".", "values", "[", "tuple", "(", "slice_", ")", "]", "phi", ".", "variables", ".", "extend", "(", "extra_vars", ")", "new_var_card", "=", "phi1", ".", "get_cardinality", "(", "extra_vars", ")", "phi", ".", "cardinality", "=", "np", ".", "append", "(", "phi", ".", "cardinality", ",", "[", "new_var_card", "[", "var", "]", "for", "var", "in", "extra_vars", "]", ")", "# modifying phi1 to add new variables", "extra_vars", "=", "set", "(", "phi", ".", "variables", ")", "-", "set", "(", "phi1", ".", "variables", ")", "if", "extra_vars", ":", "slice_", "=", "[", "slice", "(", "None", ")", "]", "*", "len", "(", "phi1", ".", "variables", ")", "slice_", ".", "extend", "(", "[", "np", ".", "newaxis", "]", "*", "len", "(", "extra_vars", ")", ")", "phi1", ".", "values", "=", "phi1", ".", "values", "[", "tuple", "(", "slice_", ")", "]", "phi1", ".", "variables", ".", "extend", "(", "extra_vars", ")", "# No need to modify cardinality as we don't need it.", "# rearranging the axes of phi1 to match phi", "for", "axis", "in", "range", "(", "phi", ".", "values", ".", "ndim", ")", ":", "exchange_index", "=", "phi1", ".", "variables", ".", "index", "(", "phi", ".", "variables", "[", "axis", "]", ")", "phi1", ".", "variables", "[", "axis", "]", ",", "phi1", ".", "variables", "[", "exchange_index", "]", "=", "phi1", ".", "variables", "[", "exchange_index", "]", ",", "phi1", ".", "variables", "[", "axis", "]", "phi1", ".", "values", "=", "phi1", ".", "values", ".", "swapaxes", "(", "axis", ",", "exchange_index", ")", "phi", ".", "values", "=", "phi", ".", "values", "+", "phi1", ".", "values", "if", "not", "inplace", ":", "return", "phi" ]
DiscreteFactor sum with `phi1`. Parameters ---------- phi1: `DiscreteFactor` instance. DiscreteFactor to be added. inplace: boolean If inplace=True it will modify the factor itself, else would return a new factor. Returns ------- DiscreteFactor or None: if inplace=True (default) returns None if inplace=False returns a new `DiscreteFactor` instance. Example ------- >>> from pgmpy.factors.discrete import DiscreteFactor >>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8)) >>> phi1.sum(phi2, inplace=True) >>> phi1.variables ['x1', 'x2', 'x3', 'x4'] >>> phi1.cardinality array([2, 3, 2, 2]) >>> phi1.values array([[[[ 0, 0], [ 4, 6]], [[ 0, 4], [12, 18]], [[ 0, 8], [20, 30]]], [[[ 6, 18], [35, 49]], [[ 8, 24], [45, 63]], [[10, 30], [55, 77]]]])
[ "DiscreteFactor", "sum", "with", "phi1", "." ]
python
train
32.91954
RedFantom/ttkwidgets
ttkwidgets/color/colorpicker.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/color/colorpicker.py#L527-L534
def ok(self): """Validate color selection and destroy dialog.""" rgb, hsv, hexa = self.square.get() if self.alpha_channel: hexa = self.hexa.get() rgb += (self.alpha.get(),) self.color = rgb, hsv, hexa self.destroy()
[ "def", "ok", "(", "self", ")", ":", "rgb", ",", "hsv", ",", "hexa", "=", "self", ".", "square", ".", "get", "(", ")", "if", "self", ".", "alpha_channel", ":", "hexa", "=", "self", ".", "hexa", ".", "get", "(", ")", "rgb", "+=", "(", "self", ".", "alpha", ".", "get", "(", ")", ",", ")", "self", ".", "color", "=", "rgb", ",", "hsv", ",", "hexa", "self", ".", "destroy", "(", ")" ]
Validate color selection and destroy dialog.
[ "Validate", "color", "selection", "and", "destroy", "dialog", "." ]
python
train
34
ming060/robotframework-uiautomatorlibrary
uiautomatorlibrary/Mobile.py
https://github.com/ming060/robotframework-uiautomatorlibrary/blob/b70202b6a8aa68b4efd9d029c2845407fb33451a/uiautomatorlibrary/Mobile.py#L249-L253
def click_at_coordinates(self, x, y): """ Click at (x,y) coordinates. """ self.device.click(int(x), int(y))
[ "def", "click_at_coordinates", "(", "self", ",", "x", ",", "y", ")", ":", "self", ".", "device", ".", "click", "(", "int", "(", "x", ")", ",", "int", "(", "y", ")", ")" ]
Click at (x,y) coordinates.
[ "Click", "at", "(", "x", "y", ")", "coordinates", "." ]
python
train
27
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/ontonotes.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/ontonotes.py#L184-L198
def dataset_path_iterator(file_path: str) -> Iterator[str]: """ An iterator returning file_paths in a directory containing CONLL-formatted files. """ logger.info("Reading CONLL sentences from dataset files at: %s", file_path) for root, _, files in list(os.walk(file_path)): for data_file in files: # These are a relic of the dataset pre-processing. Every # file will be duplicated - one file called filename.gold_skel # and one generated from the preprocessing called filename.gold_conll. if not data_file.endswith("gold_conll"): continue yield os.path.join(root, data_file)
[ "def", "dataset_path_iterator", "(", "file_path", ":", "str", ")", "->", "Iterator", "[", "str", "]", ":", "logger", ".", "info", "(", "\"Reading CONLL sentences from dataset files at: %s\"", ",", "file_path", ")", "for", "root", ",", "_", ",", "files", "in", "list", "(", "os", ".", "walk", "(", "file_path", ")", ")", ":", "for", "data_file", "in", "files", ":", "# These are a relic of the dataset pre-processing. Every", "# file will be duplicated - one file called filename.gold_skel", "# and one generated from the preprocessing called filename.gold_conll.", "if", "not", "data_file", ".", "endswith", "(", "\"gold_conll\"", ")", ":", "continue", "yield", "os", ".", "path", ".", "join", "(", "root", ",", "data_file", ")" ]
An iterator returning file_paths in a directory containing CONLL-formatted files.
[ "An", "iterator", "returning", "file_paths", "in", "a", "directory", "containing", "CONLL", "-", "formatted", "files", "." ]
python
train
48.066667
pybel/pybel
src/pybel/manager/cache_manager.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/cache_manager.py#L1027-L1043
def get_or_create_author(self, name: str) -> Author: """Get an author by name, or creates one if it does not exist.""" author = self.object_cache_author.get(name) if author is not None: self.session.add(author) return author author = self.get_author_by_name(name) if author is not None: self.object_cache_author[name] = author return author author = self.object_cache_author[name] = Author.from_name(name=name) self.session.add(author) return author
[ "def", "get_or_create_author", "(", "self", ",", "name", ":", "str", ")", "->", "Author", ":", "author", "=", "self", ".", "object_cache_author", ".", "get", "(", "name", ")", "if", "author", "is", "not", "None", ":", "self", ".", "session", ".", "add", "(", "author", ")", "return", "author", "author", "=", "self", ".", "get_author_by_name", "(", "name", ")", "if", "author", "is", "not", "None", ":", "self", ".", "object_cache_author", "[", "name", "]", "=", "author", "return", "author", "author", "=", "self", ".", "object_cache_author", "[", "name", "]", "=", "Author", ".", "from_name", "(", "name", "=", "name", ")", "self", ".", "session", ".", "add", "(", "author", ")", "return", "author" ]
Get an author by name, or creates one if it does not exist.
[ "Get", "an", "author", "by", "name", "or", "creates", "one", "if", "it", "does", "not", "exist", "." ]
python
train
32.294118
xflows/rdm
rdm/db/converters.py
https://github.com/xflows/rdm/blob/d984e2a0297e5fa8d799953bbd0dba79b05d403d/rdm/db/converters.py#L316-L326
def other_Orange_tables(self): ''' Returns the related tables as Orange example tables. :rtype: list ''' target_table = self.db.target_table if not self.db.orng_tables: return [self.convert_table(table, None) for table in self.db.tables if table != target_table] else: return [table for name, table in list(self.db.orng_tables.items()) if name != target_table]
[ "def", "other_Orange_tables", "(", "self", ")", ":", "target_table", "=", "self", ".", "db", ".", "target_table", "if", "not", "self", ".", "db", ".", "orng_tables", ":", "return", "[", "self", ".", "convert_table", "(", "table", ",", "None", ")", "for", "table", "in", "self", ".", "db", ".", "tables", "if", "table", "!=", "target_table", "]", "else", ":", "return", "[", "table", "for", "name", ",", "table", "in", "list", "(", "self", ".", "db", ".", "orng_tables", ".", "items", "(", ")", ")", "if", "name", "!=", "target_table", "]" ]
Returns the related tables as Orange example tables. :rtype: list
[ "Returns", "the", "related", "tables", "as", "Orange", "example", "tables", "." ]
python
train
39.909091
python-diamond/Diamond
src/collectors/dseopscenter/dseopscenter.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/dseopscenter/dseopscenter.py#L37-L111
def get_default_config(self): """ Returns the default collector settings """ config = super(DseOpsCenterCollector, self).get_default_config() metrics = [ 'cf-bf-false-positives', 'cf-bf-false-ratio', 'cf-bf-space-used', 'cf-keycache-hit-rate', 'cf-keycache-hits', 'cf-keycache-requests', 'cf-live-disk-used', 'cf-live-sstables', 'cf-pending-tasks', 'cf-read-latency-op', 'cf-read-ops', 'cf-rowcache-hit-rate', 'cf-rowcache-hits', 'cf-rowcache-requests', 'cf-total-disk-used', 'cf-write-latency-op', 'cf-write-ops', 'cms-collection-count', 'cms-collection-time', 'data-load', 'heap-committed', 'heap-max', 'heap-used', 'key-cache-hit-rate', 'key-cache-hits', 'key-cache-requests', 'nonheap-committed', 'nonheap-max', 'nonheap-used', 'pending-compaction-tasks', 'pending-flush-sorter-tasks', 'pending-flushes', 'pending-gossip-tasks', 'pending-hinted-handoff', 'pending-internal-responses', 'pending-memtable-post-flushers', 'pending-migrations', 'pending-misc-tasks', 'pending-read-ops', 'pending-read-repair-tasks', 'pending-repair-tasks', 'pending-repl-on-write-tasks', 'pending-request-responses', 'pending-streams', 'pending-write-ops', 'read-latency-op', 'read-ops', 'row-cache-hit-rate', 'row-cache-hits', 'row-cache-requests', 'solr-avg-time-per-req', 'solr-errors', 'solr-requests', 'solr-timeouts', 'total-bytes-compacted', 'total-compactions-completed', 'write-latency-op', 'write-ops', ] config.update({ 'host': '127.0.0.1', 'port': 8888, 'path': 'cassandra', 'node_group': '*', 'metrics': ','.join(metrics), 'default_tail_opts': '&forecast=0&node_aggregation=1', }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "DseOpsCenterCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "metrics", "=", "[", "'cf-bf-false-positives'", ",", "'cf-bf-false-ratio'", ",", "'cf-bf-space-used'", ",", "'cf-keycache-hit-rate'", ",", "'cf-keycache-hits'", ",", "'cf-keycache-requests'", ",", "'cf-live-disk-used'", ",", "'cf-live-sstables'", ",", "'cf-pending-tasks'", ",", "'cf-read-latency-op'", ",", "'cf-read-ops'", ",", "'cf-rowcache-hit-rate'", ",", "'cf-rowcache-hits'", ",", "'cf-rowcache-requests'", ",", "'cf-total-disk-used'", ",", "'cf-write-latency-op'", ",", "'cf-write-ops'", ",", "'cms-collection-count'", ",", "'cms-collection-time'", ",", "'data-load'", ",", "'heap-committed'", ",", "'heap-max'", ",", "'heap-used'", ",", "'key-cache-hit-rate'", ",", "'key-cache-hits'", ",", "'key-cache-requests'", ",", "'nonheap-committed'", ",", "'nonheap-max'", ",", "'nonheap-used'", ",", "'pending-compaction-tasks'", ",", "'pending-flush-sorter-tasks'", ",", "'pending-flushes'", ",", "'pending-gossip-tasks'", ",", "'pending-hinted-handoff'", ",", "'pending-internal-responses'", ",", "'pending-memtable-post-flushers'", ",", "'pending-migrations'", ",", "'pending-misc-tasks'", ",", "'pending-read-ops'", ",", "'pending-read-repair-tasks'", ",", "'pending-repair-tasks'", ",", "'pending-repl-on-write-tasks'", ",", "'pending-request-responses'", ",", "'pending-streams'", ",", "'pending-write-ops'", ",", "'read-latency-op'", ",", "'read-ops'", ",", "'row-cache-hit-rate'", ",", "'row-cache-hits'", ",", "'row-cache-requests'", ",", "'solr-avg-time-per-req'", ",", "'solr-errors'", ",", "'solr-requests'", ",", "'solr-timeouts'", ",", "'total-bytes-compacted'", ",", "'total-compactions-completed'", ",", "'write-latency-op'", ",", "'write-ops'", ",", "]", "config", ".", "update", "(", "{", "'host'", ":", "'127.0.0.1'", ",", "'port'", ":", "8888", ",", "'path'", ":", "'cassandra'", ",", "'node_group'", ":", "'*'", ",", "'metrics'", ":", "','", ".", "join", "(", "metrics", ")", ",", "'default_tail_opts'", ":", "'&forecast=0&node_aggregation=1'", ",", "}", ")", "return", "config" ]
Returns the default collector settings
[ "Returns", "the", "default", "collector", "settings" ]
python
train
32.173333
google/flatbuffers
python/flatbuffers/builder.py
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L511-L518
def Slot(self, slotnum): """ Slot sets the vtable key `voffset` to the current location in the buffer. """ self.assertNested() self.current_vtable[slotnum] = self.Offset()
[ "def", "Slot", "(", "self", ",", "slotnum", ")", ":", "self", ".", "assertNested", "(", ")", "self", ".", "current_vtable", "[", "slotnum", "]", "=", "self", ".", "Offset", "(", ")" ]
Slot sets the vtable key `voffset` to the current location in the buffer.
[ "Slot", "sets", "the", "vtable", "key", "voffset", "to", "the", "current", "location", "in", "the", "buffer", "." ]
python
train
26.625
abourget/gevent-socketio
socketio/virtsocket.py
https://github.com/abourget/gevent-socketio/blob/1cdb1594a315326987a17ce0924ea448a82fab01/socketio/virtsocket.py#L28-L53
def default_error_handler(socket, error_name, error_message, endpoint, msg_id, quiet): """This is the default error handler, you can override this when calling :func:`socketio.socketio_manage`. It basically sends an event through the socket with the 'error' name. See documentation for :meth:`Socket.error`. :param quiet: if quiet, this handler will not send a packet to the user, but only log for the server developer. """ pkt = dict(type='event', name='error', args=[error_name, error_message], endpoint=endpoint) if msg_id: pkt['id'] = msg_id # Send an error event through the Socket if not quiet: socket.send_packet(pkt) # Log that error somewhere for debugging... log.error(u"default_error_handler: {}, {} (endpoint={}, msg_id={})".format( error_name, error_message, endpoint, msg_id ))
[ "def", "default_error_handler", "(", "socket", ",", "error_name", ",", "error_message", ",", "endpoint", ",", "msg_id", ",", "quiet", ")", ":", "pkt", "=", "dict", "(", "type", "=", "'event'", ",", "name", "=", "'error'", ",", "args", "=", "[", "error_name", ",", "error_message", "]", ",", "endpoint", "=", "endpoint", ")", "if", "msg_id", ":", "pkt", "[", "'id'", "]", "=", "msg_id", "# Send an error event through the Socket", "if", "not", "quiet", ":", "socket", ".", "send_packet", "(", "pkt", ")", "# Log that error somewhere for debugging...", "log", ".", "error", "(", "u\"default_error_handler: {}, {} (endpoint={}, msg_id={})\"", ".", "format", "(", "error_name", ",", "error_message", ",", "endpoint", ",", "msg_id", ")", ")" ]
This is the default error handler, you can override this when calling :func:`socketio.socketio_manage`. It basically sends an event through the socket with the 'error' name. See documentation for :meth:`Socket.error`. :param quiet: if quiet, this handler will not send a packet to the user, but only log for the server developer.
[ "This", "is", "the", "default", "error", "handler", "you", "can", "override", "this", "when", "calling", ":", "func", ":", "socketio", ".", "socketio_manage", "." ]
python
valid
35.730769
dereneaton/ipyrad
ipyrad/analysis/bucky.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/bucky.py#L158-L264
def write_nexus_files(self, force=False, quiet=False): """ Write nexus files to {workdir}/{name}/[0-N].nex, If the directory already exists an exception will be raised unless you use the force flag which will remove all files in the directory. Parameters: ----------- force (bool): If True then all files in {workdir}/{name}/*.nex* will be removed. """ ## clear existing files existing = glob.glob(os.path.join(self.workdir, self.name, "*.nex")) if any(existing): if force: for rfile in existing: os.remove(rfile) else: path = os.path.join(self.workdir, self.name) raise IPyradWarningExit(EXISTING_NEX_FILES.format(path)) ## parse the loci or alleles file with open(self.files.data) as infile: loci = iter(infile.read().strip().split("|\n")) ## use entered samples or parse them from the file if not self.samples: with open(self.files.data) as infile: samples = set((i.split()[0] for i in infile.readlines() \ if "//" not in i)) else: samples = set(self.samples) ## keep track of how many loci pass filtering totn = len(samples) nloci = 0 ## this set is just used for matching, then we randomly ## subsample for real within the locus so it varies if self._alleles: msamples = {i+rbin() for i in samples} else: msamples = samples ## write subsampled set of loci for loc in loci: ## get names and seqs from locus dat = loc.split("\n")[:-1] try: names = [i.split()[0] for i in dat] snames = set(names) seqs = np.array([list(i.split()[1]) for i in dat]) except IndexError: print(ALLELESBUGFIXED) continue ## check name matches if len(snames.intersection(msamples)) == totn: ## prune sample names if alleles. Done here so it is randomly ## different in every locus which allele is selected from ## each sample (e.g., 0 or 1) if self._alleles: _samples = [i+rbin() for i in samples] else: _samples = samples ## re-order seqs to be in set order seqsamp = seqs[[names.index(tax) for tax in _samples]] ## resolve ambiguities randomly if .loci file otherwise ## sample one of the alleles if .alleles file. if not self._alleles: seqsamp = _resolveambig(seqsamp) ## find parsimony informative sites if _count_PIS(seqsamp, self.params.minsnps): ## keep the locus nloci += 1 ## remove empty columns given this sampling copied = seqsamp.copy() copied[copied == "-"] == "N" rmcol = np.all(copied == "N", axis=0) seqsamp = seqsamp[:, ~rmcol] ## write nexus file if self._alleles: ## trim off the allele number samps = [i.rsplit("_", 1)[0] for i in _samples] mdict = dict(zip(samps, [i.tostring() for i in seqsamp])) else: mdict = dict(zip(_samples, [i.tostring() for i in seqsamp])) self._write_nex(mdict, nloci) ## quit early if using maxloci if nloci == self.params.maxloci: break ## print data size if not quiet: path = os.path.join(self.workdir, self.name) path = path.replace(os.path.expanduser("~"), "~") print("wrote {} nexus files to {}".format(nloci, path))
[ "def", "write_nexus_files", "(", "self", ",", "force", "=", "False", ",", "quiet", "=", "False", ")", ":", "## clear existing files ", "existing", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "workdir", ",", "self", ".", "name", ",", "\"*.nex\"", ")", ")", "if", "any", "(", "existing", ")", ":", "if", "force", ":", "for", "rfile", "in", "existing", ":", "os", ".", "remove", "(", "rfile", ")", "else", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "workdir", ",", "self", ".", "name", ")", "raise", "IPyradWarningExit", "(", "EXISTING_NEX_FILES", ".", "format", "(", "path", ")", ")", "## parse the loci or alleles file", "with", "open", "(", "self", ".", "files", ".", "data", ")", "as", "infile", ":", "loci", "=", "iter", "(", "infile", ".", "read", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "\"|\\n\"", ")", ")", "## use entered samples or parse them from the file", "if", "not", "self", ".", "samples", ":", "with", "open", "(", "self", ".", "files", ".", "data", ")", "as", "infile", ":", "samples", "=", "set", "(", "(", "i", ".", "split", "(", ")", "[", "0", "]", "for", "i", "in", "infile", ".", "readlines", "(", ")", "if", "\"//\"", "not", "in", "i", ")", ")", "else", ":", "samples", "=", "set", "(", "self", ".", "samples", ")", "## keep track of how many loci pass filtering", "totn", "=", "len", "(", "samples", ")", "nloci", "=", "0", "## this set is just used for matching, then we randomly", "## subsample for real within the locus so it varies ", "if", "self", ".", "_alleles", ":", "msamples", "=", "{", "i", "+", "rbin", "(", ")", "for", "i", "in", "samples", "}", "else", ":", "msamples", "=", "samples", "## write subsampled set of loci", "for", "loc", "in", "loci", ":", "## get names and seqs from locus", "dat", "=", "loc", ".", "split", "(", "\"\\n\"", ")", "[", ":", "-", "1", "]", "try", ":", "names", "=", "[", "i", ".", "split", "(", ")", "[", "0", "]", "for", "i", "in", "dat", "]", "snames", "=", "set", "(", "names", ")", "seqs", "=", "np", ".", "array", "(", "[", "list", "(", "i", ".", "split", "(", ")", "[", "1", "]", ")", "for", "i", "in", "dat", "]", ")", "except", "IndexError", ":", "print", "(", "ALLELESBUGFIXED", ")", "continue", "## check name matches", "if", "len", "(", "snames", ".", "intersection", "(", "msamples", ")", ")", "==", "totn", ":", "## prune sample names if alleles. Done here so it is randomly", "## different in every locus which allele is selected from ", "## each sample (e.g., 0 or 1)", "if", "self", ".", "_alleles", ":", "_samples", "=", "[", "i", "+", "rbin", "(", ")", "for", "i", "in", "samples", "]", "else", ":", "_samples", "=", "samples", "## re-order seqs to be in set order", "seqsamp", "=", "seqs", "[", "[", "names", ".", "index", "(", "tax", ")", "for", "tax", "in", "_samples", "]", "]", "## resolve ambiguities randomly if .loci file otherwise", "## sample one of the alleles if .alleles file.", "if", "not", "self", ".", "_alleles", ":", "seqsamp", "=", "_resolveambig", "(", "seqsamp", ")", "## find parsimony informative sites", "if", "_count_PIS", "(", "seqsamp", ",", "self", ".", "params", ".", "minsnps", ")", ":", "## keep the locus", "nloci", "+=", "1", "## remove empty columns given this sampling", "copied", "=", "seqsamp", ".", "copy", "(", ")", "copied", "[", "copied", "==", "\"-\"", "]", "==", "\"N\"", "rmcol", "=", "np", ".", "all", "(", "copied", "==", "\"N\"", ",", "axis", "=", "0", ")", "seqsamp", "=", "seqsamp", "[", ":", ",", "~", "rmcol", "]", "## write nexus file", "if", "self", ".", "_alleles", ":", "## trim off the allele number", "samps", "=", "[", "i", ".", "rsplit", "(", "\"_\"", ",", "1", ")", "[", "0", "]", "for", "i", "in", "_samples", "]", "mdict", "=", "dict", "(", "zip", "(", "samps", ",", "[", "i", ".", "tostring", "(", ")", "for", "i", "in", "seqsamp", "]", ")", ")", "else", ":", "mdict", "=", "dict", "(", "zip", "(", "_samples", ",", "[", "i", ".", "tostring", "(", ")", "for", "i", "in", "seqsamp", "]", ")", ")", "self", ".", "_write_nex", "(", "mdict", ",", "nloci", ")", "## quit early if using maxloci", "if", "nloci", "==", "self", ".", "params", ".", "maxloci", ":", "break", "## print data size", "if", "not", "quiet", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "workdir", ",", "self", ".", "name", ")", "path", "=", "path", ".", "replace", "(", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", ",", "\"~\"", ")", "print", "(", "\"wrote {} nexus files to {}\"", ".", "format", "(", "nloci", ",", "path", ")", ")" ]
Write nexus files to {workdir}/{name}/[0-N].nex, If the directory already exists an exception will be raised unless you use the force flag which will remove all files in the directory. Parameters: ----------- force (bool): If True then all files in {workdir}/{name}/*.nex* will be removed.
[ "Write", "nexus", "files", "to", "{", "workdir", "}", "/", "{", "name", "}", "/", "[", "0", "-", "N", "]", ".", "nex", "If", "the", "directory", "already", "exists", "an", "exception", "will", "be", "raised", "unless", "you", "use", "the", "force", "flag", "which", "will", "remove", "all", "files", "in", "the", "directory", "." ]
python
valid
37.523364
ferhatelmas/sexmachine
sexmachine/detector.py
https://github.com/ferhatelmas/sexmachine/blob/85d33bb47ccc017676e69788750f116e391f52db/sexmachine/detector.py#L33-L40
def _parse(self, filename): """Opens data file and for each line, calls _eat_name_line""" self.names = {} with codecs.open(filename, encoding="iso8859-1") as f: for line in f: if any(map(lambda c: 128 < ord(c) < 160, line)): line = line.encode("iso8859-1").decode("windows-1252") self._eat_name_line(line.strip())
[ "def", "_parse", "(", "self", ",", "filename", ")", ":", "self", ".", "names", "=", "{", "}", "with", "codecs", ".", "open", "(", "filename", ",", "encoding", "=", "\"iso8859-1\"", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "any", "(", "map", "(", "lambda", "c", ":", "128", "<", "ord", "(", "c", ")", "<", "160", ",", "line", ")", ")", ":", "line", "=", "line", ".", "encode", "(", "\"iso8859-1\"", ")", ".", "decode", "(", "\"windows-1252\"", ")", "self", ".", "_eat_name_line", "(", "line", ".", "strip", "(", ")", ")" ]
Opens data file and for each line, calls _eat_name_line
[ "Opens", "data", "file", "and", "for", "each", "line", "calls", "_eat_name_line" ]
python
valid
49.25
MagicStack/asyncpg
asyncpg/connection.py
https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L1910-L1928
def _extract_stack(limit=10): """Replacement for traceback.extract_stack() that only does the necessary work for asyncio debug mode. """ frame = sys._getframe().f_back try: stack = traceback.StackSummary.extract( traceback.walk_stack(frame), lookup_lines=False) finally: del frame apg_path = asyncpg.__path__[0] i = 0 while i < len(stack) and stack[i][0].startswith(apg_path): i += 1 stack = stack[i:i + limit] stack.reverse() return ''.join(traceback.format_list(stack))
[ "def", "_extract_stack", "(", "limit", "=", "10", ")", ":", "frame", "=", "sys", ".", "_getframe", "(", ")", ".", "f_back", "try", ":", "stack", "=", "traceback", ".", "StackSummary", ".", "extract", "(", "traceback", ".", "walk_stack", "(", "frame", ")", ",", "lookup_lines", "=", "False", ")", "finally", ":", "del", "frame", "apg_path", "=", "asyncpg", ".", "__path__", "[", "0", "]", "i", "=", "0", "while", "i", "<", "len", "(", "stack", ")", "and", "stack", "[", "i", "]", "[", "0", "]", ".", "startswith", "(", "apg_path", ")", ":", "i", "+=", "1", "stack", "=", "stack", "[", "i", ":", "i", "+", "limit", "]", "stack", ".", "reverse", "(", ")", "return", "''", ".", "join", "(", "traceback", ".", "format_list", "(", "stack", ")", ")" ]
Replacement for traceback.extract_stack() that only does the necessary work for asyncio debug mode.
[ "Replacement", "for", "traceback", ".", "extract_stack", "()", "that", "only", "does", "the", "necessary", "work", "for", "asyncio", "debug", "mode", "." ]
python
train
28.368421
SmokinCaterpillar/pypet
pypet/storageservice.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/storageservice.py#L3114-L3206
def _all_recall_native_type(self, data, ptitem, prefix): """Checks if loaded data has the type it was stored in. If not converts it. :param data: Data item to be checked and converted :param ptitem: HDf5 Node or Leaf from where data was loaded :param prefix: Prefix for recalling the data type from the hdf5 node attributes :return: Tuple, first item is the (converted) `data` item, second boolean whether item was converted or not. """ typestr = self._all_get_from_attrs(ptitem, prefix + HDF5StorageService.SCALAR_TYPE) colltype = self._all_get_from_attrs(ptitem, prefix + HDF5StorageService.COLL_TYPE) type_changed = False # Check what the original data type was from the hdf5 node attributes if colltype == HDF5StorageService.COLL_SCALAR: # Here data item was a scalar if isinstance(data, np.ndarray): # If we recall a numpy scalar, pytables loads a 1d array :-/ # So we have to change it to a real scalar value data = np.array([data])[0] type_changed = True if not typestr is None: # Check if current type and stored type match # if not convert the data if typestr != type(data).__name__: if typestr == str.__name__: data = data.decode(self._encoding) else: try: data = pypetconstants.PARAMETERTYPEDICT[typestr](data) except KeyError: # For compatibility with files from older pypet versions data = pypetconstants.COMPATPARAMETERTYPEDICT[typestr](data) type_changed = True elif (colltype == HDF5StorageService.COLL_TUPLE or colltype == HDF5StorageService.COLL_LIST): # Here data item was originally a tuple or a list if type(data) is not list and type is not tuple: # If the original type cannot be recalled, first convert it to a list type_changed = True data = list(data) if len(data) > 0: first_item = data[0] # Check if the type of the first item was conserved if not typestr == type(first_item).__name__: if not isinstance(data, list): data = list(data) # If type was not conserved we need to convert all items # in the list or tuple for idx, item in enumerate(data): if typestr == str.__name__: data[idx] = data[idx].decode(self._encoding) else: try: data[idx] = pypetconstants.PARAMETERTYPEDICT[typestr](item) except KeyError: # For compatibility with files from older pypet versions: data[idx] = pypetconstants.COMPATPARAMETERTYPEDICT[typestr](item) type_changed = True if colltype == HDF5StorageService.COLL_TUPLE: # If it was originally a tuple we need to convert it back to tuple if type(data) is not tuple: data = tuple(data) type_changed = True elif colltype == HDF5StorageService.COLL_EMPTY_DICT: data = {} type_changed = True elif isinstance(data, np.ndarray): if typestr == str.__name__: data = np.core.defchararray.decode(data, self._encoding) type_changed = True if colltype == HDF5StorageService.COLL_MATRIX: # Here data item was originally a matrix data = np.matrix(data) type_changed = True return data, type_changed
[ "def", "_all_recall_native_type", "(", "self", ",", "data", ",", "ptitem", ",", "prefix", ")", ":", "typestr", "=", "self", ".", "_all_get_from_attrs", "(", "ptitem", ",", "prefix", "+", "HDF5StorageService", ".", "SCALAR_TYPE", ")", "colltype", "=", "self", ".", "_all_get_from_attrs", "(", "ptitem", ",", "prefix", "+", "HDF5StorageService", ".", "COLL_TYPE", ")", "type_changed", "=", "False", "# Check what the original data type was from the hdf5 node attributes", "if", "colltype", "==", "HDF5StorageService", ".", "COLL_SCALAR", ":", "# Here data item was a scalar", "if", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "# If we recall a numpy scalar, pytables loads a 1d array :-/", "# So we have to change it to a real scalar value", "data", "=", "np", ".", "array", "(", "[", "data", "]", ")", "[", "0", "]", "type_changed", "=", "True", "if", "not", "typestr", "is", "None", ":", "# Check if current type and stored type match", "# if not convert the data", "if", "typestr", "!=", "type", "(", "data", ")", ".", "__name__", ":", "if", "typestr", "==", "str", ".", "__name__", ":", "data", "=", "data", ".", "decode", "(", "self", ".", "_encoding", ")", "else", ":", "try", ":", "data", "=", "pypetconstants", ".", "PARAMETERTYPEDICT", "[", "typestr", "]", "(", "data", ")", "except", "KeyError", ":", "# For compatibility with files from older pypet versions", "data", "=", "pypetconstants", ".", "COMPATPARAMETERTYPEDICT", "[", "typestr", "]", "(", "data", ")", "type_changed", "=", "True", "elif", "(", "colltype", "==", "HDF5StorageService", ".", "COLL_TUPLE", "or", "colltype", "==", "HDF5StorageService", ".", "COLL_LIST", ")", ":", "# Here data item was originally a tuple or a list", "if", "type", "(", "data", ")", "is", "not", "list", "and", "type", "is", "not", "tuple", ":", "# If the original type cannot be recalled, first convert it to a list", "type_changed", "=", "True", "data", "=", "list", "(", "data", ")", "if", "len", "(", "data", ")", ">", "0", ":", "first_item", "=", "data", "[", "0", "]", "# Check if the type of the first item was conserved", "if", "not", "typestr", "==", "type", "(", "first_item", ")", ".", "__name__", ":", "if", "not", "isinstance", "(", "data", ",", "list", ")", ":", "data", "=", "list", "(", "data", ")", "# If type was not conserved we need to convert all items", "# in the list or tuple", "for", "idx", ",", "item", "in", "enumerate", "(", "data", ")", ":", "if", "typestr", "==", "str", ".", "__name__", ":", "data", "[", "idx", "]", "=", "data", "[", "idx", "]", ".", "decode", "(", "self", ".", "_encoding", ")", "else", ":", "try", ":", "data", "[", "idx", "]", "=", "pypetconstants", ".", "PARAMETERTYPEDICT", "[", "typestr", "]", "(", "item", ")", "except", "KeyError", ":", "# For compatibility with files from older pypet versions:", "data", "[", "idx", "]", "=", "pypetconstants", ".", "COMPATPARAMETERTYPEDICT", "[", "typestr", "]", "(", "item", ")", "type_changed", "=", "True", "if", "colltype", "==", "HDF5StorageService", ".", "COLL_TUPLE", ":", "# If it was originally a tuple we need to convert it back to tuple", "if", "type", "(", "data", ")", "is", "not", "tuple", ":", "data", "=", "tuple", "(", "data", ")", "type_changed", "=", "True", "elif", "colltype", "==", "HDF5StorageService", ".", "COLL_EMPTY_DICT", ":", "data", "=", "{", "}", "type_changed", "=", "True", "elif", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "if", "typestr", "==", "str", ".", "__name__", ":", "data", "=", "np", ".", "core", ".", "defchararray", ".", "decode", "(", "data", ",", "self", ".", "_encoding", ")", "type_changed", "=", "True", "if", "colltype", "==", "HDF5StorageService", ".", "COLL_MATRIX", ":", "# Here data item was originally a matrix", "data", "=", "np", ".", "matrix", "(", "data", ")", "type_changed", "=", "True", "return", "data", ",", "type_changed" ]
Checks if loaded data has the type it was stored in. If not converts it. :param data: Data item to be checked and converted :param ptitem: HDf5 Node or Leaf from where data was loaded :param prefix: Prefix for recalling the data type from the hdf5 node attributes :return: Tuple, first item is the (converted) `data` item, second boolean whether item was converted or not.
[ "Checks", "if", "loaded", "data", "has", "the", "type", "it", "was", "stored", "in", ".", "If", "not", "converts", "it", "." ]
python
test
43
UCL-INGI/INGInious
inginious/frontend/task_problems.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/task_problems.py#L220-L224
def show_input(self, template_helper, language, seed): """ Show MatchProblem """ header = ParsableText(self.gettext(language, self._header), "rst", translation=self._translations.get(language, gettext.NullTranslations())) return str(DisplayableMatchProblem.get_renderer(template_helper).tasks.match(self.get_id(), header))
[ "def", "show_input", "(", "self", ",", "template_helper", ",", "language", ",", "seed", ")", ":", "header", "=", "ParsableText", "(", "self", ".", "gettext", "(", "language", ",", "self", ".", "_header", ")", ",", "\"rst\"", ",", "translation", "=", "self", ".", "_translations", ".", "get", "(", "language", ",", "gettext", ".", "NullTranslations", "(", ")", ")", ")", "return", "str", "(", "DisplayableMatchProblem", ".", "get_renderer", "(", "template_helper", ")", ".", "tasks", ".", "match", "(", "self", ".", "get_id", "(", ")", ",", "header", ")", ")" ]
Show MatchProblem
[ "Show", "MatchProblem" ]
python
train
74.4
CityOfZion/neo-python
neo/Wallets/Wallet.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Wallets/Wallet.py#L835-L862
def ToScriptHash(self, address): """ Retrieve the script_hash based from an address. Args: address (str): a base58 encoded address. Raises: ValuesError: if an invalid address is supplied or the coin version is incorrect Exception: if the address string does not start with 'A' or the checksum fails Returns: UInt160: script hash. """ if len(address) == 34: if address[0] == 'A': data = b58decode(address) if data[0] != self.AddressVersion: raise ValueError('Not correct Coin Version') checksum = Crypto.Default().Hash256(data[:21])[:4] if checksum != data[21:]: raise Exception('Address format error') return UInt160(data=data[1:21]) else: raise Exception('Address format error') else: raise ValueError('Not correct Address, wrong length.')
[ "def", "ToScriptHash", "(", "self", ",", "address", ")", ":", "if", "len", "(", "address", ")", "==", "34", ":", "if", "address", "[", "0", "]", "==", "'A'", ":", "data", "=", "b58decode", "(", "address", ")", "if", "data", "[", "0", "]", "!=", "self", ".", "AddressVersion", ":", "raise", "ValueError", "(", "'Not correct Coin Version'", ")", "checksum", "=", "Crypto", ".", "Default", "(", ")", ".", "Hash256", "(", "data", "[", ":", "21", "]", ")", "[", ":", "4", "]", "if", "checksum", "!=", "data", "[", "21", ":", "]", ":", "raise", "Exception", "(", "'Address format error'", ")", "return", "UInt160", "(", "data", "=", "data", "[", "1", ":", "21", "]", ")", "else", ":", "raise", "Exception", "(", "'Address format error'", ")", "else", ":", "raise", "ValueError", "(", "'Not correct Address, wrong length.'", ")" ]
Retrieve the script_hash based from an address. Args: address (str): a base58 encoded address. Raises: ValuesError: if an invalid address is supplied or the coin version is incorrect Exception: if the address string does not start with 'A' or the checksum fails Returns: UInt160: script hash.
[ "Retrieve", "the", "script_hash", "based", "from", "an", "address", "." ]
python
train
35.75
ponty/eagexp
eagexp/partlist.py
https://github.com/ponty/eagexp/blob/1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2/eagexp/partlist.py#L104-L112
def print_partlist(input, timeout=20, showgui=False): '''print partlist text delivered by eagle :param input: .sch or .brd file name :param timeout: int :param showgui: Bool, True -> do not hide eagle GUI :rtype: None ''' print raw_partlist(input=input, timeout=timeout, showgui=showgui)
[ "def", "print_partlist", "(", "input", ",", "timeout", "=", "20", ",", "showgui", "=", "False", ")", ":", "print", "raw_partlist", "(", "input", "=", "input", ",", "timeout", "=", "timeout", ",", "showgui", "=", "showgui", ")" ]
print partlist text delivered by eagle :param input: .sch or .brd file name :param timeout: int :param showgui: Bool, True -> do not hide eagle GUI :rtype: None
[ "print", "partlist", "text", "delivered", "by", "eagle" ]
python
train
34.222222
nicolargo/glances
glances/plugins/glances_irq.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_irq.py#L142-L149
def __header(self, line): """Build the header (contain the number of CPU). CPU0 CPU1 CPU2 CPU3 0: 21 0 0 0 IO-APIC 2-edge timer """ self.cpu_number = len(line.split()) return self.cpu_number
[ "def", "__header", "(", "self", ",", "line", ")", ":", "self", ".", "cpu_number", "=", "len", "(", "line", ".", "split", "(", ")", ")", "return", "self", ".", "cpu_number" ]
Build the header (contain the number of CPU). CPU0 CPU1 CPU2 CPU3 0: 21 0 0 0 IO-APIC 2-edge timer
[ "Build", "the", "header", "(", "contain", "the", "number", "of", "CPU", ")", "." ]
python
train
36.75
twisted/axiom
axiom/_fincache.py
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/_fincache.py#L89-L114
def cache(self, key, value): """ Add an entry to the cache. A weakref to the value is stored, rather than a direct reference. The value must have a C{__finalizer__} method that returns a callable which will be invoked when the weakref is broken. @param key: The key identifying the cache entry. @param value: The value for the cache entry. """ fin = value.__finalizer__() try: # It's okay if there's already a cache entry for this key as long # as the weakref has already been broken. See the comment in # get() for an explanation of why this might happen. if self.data[key]() is not None: raise CacheInconsistency( "Duplicate cache key: %r %r %r" % ( key, value, self.data[key])) except KeyError: pass callback = createCacheRemoveCallback(self._ref(self), key, fin) self.data[key] = self._ref(value, callback) return value
[ "def", "cache", "(", "self", ",", "key", ",", "value", ")", ":", "fin", "=", "value", ".", "__finalizer__", "(", ")", "try", ":", "# It's okay if there's already a cache entry for this key as long", "# as the weakref has already been broken. See the comment in", "# get() for an explanation of why this might happen.", "if", "self", ".", "data", "[", "key", "]", "(", ")", "is", "not", "None", ":", "raise", "CacheInconsistency", "(", "\"Duplicate cache key: %r %r %r\"", "%", "(", "key", ",", "value", ",", "self", ".", "data", "[", "key", "]", ")", ")", "except", "KeyError", ":", "pass", "callback", "=", "createCacheRemoveCallback", "(", "self", ".", "_ref", "(", "self", ")", ",", "key", ",", "fin", ")", "self", ".", "data", "[", "key", "]", "=", "self", ".", "_ref", "(", "value", ",", "callback", ")", "return", "value" ]
Add an entry to the cache. A weakref to the value is stored, rather than a direct reference. The value must have a C{__finalizer__} method that returns a callable which will be invoked when the weakref is broken. @param key: The key identifying the cache entry. @param value: The value for the cache entry.
[ "Add", "an", "entry", "to", "the", "cache", "." ]
python
train
39.730769
GibbsConsulting/django-plotly-dash
demo/demo/plotly_apps.py
https://github.com/GibbsConsulting/django-plotly-dash/blob/773ed081fc2ea3cc7607590322a14686a7a79bc5/demo/demo/plotly_apps.py#L149-L188
def callback_liveIn_button_press(red_clicks, blue_clicks, green_clicks, rc_timestamp, bc_timestamp, gc_timestamp, **kwargs): # pylint: disable=unused-argument 'Input app button pressed, so do something interesting' if not rc_timestamp: rc_timestamp = 0 if not bc_timestamp: bc_timestamp = 0 if not gc_timestamp: gc_timestamp = 0 if (rc_timestamp + bc_timestamp + gc_timestamp) < 1: change_col = None timestamp = 0 else: if rc_timestamp > bc_timestamp: change_col = "red" timestamp = rc_timestamp else: change_col = "blue" timestamp = bc_timestamp if gc_timestamp > timestamp: timestamp = gc_timestamp change_col = "green" value = {'red_clicks':red_clicks, 'blue_clicks':blue_clicks, 'green_clicks':green_clicks, 'click_colour':change_col, 'click_timestamp':timestamp, 'user':str(kwargs.get('user', 'UNKNOWN'))} send_to_pipe_channel(channel_name="live_button_counter", label="named_counts", value=value) return "Number of local clicks so far is %s red and %s blue; last change is %s at %s" % (red_clicks, blue_clicks, change_col, datetime.fromtimestamp(0.001*timestamp))
[ "def", "callback_liveIn_button_press", "(", "red_clicks", ",", "blue_clicks", ",", "green_clicks", ",", "rc_timestamp", ",", "bc_timestamp", ",", "gc_timestamp", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "if", "not", "rc_timestamp", ":", "rc_timestamp", "=", "0", "if", "not", "bc_timestamp", ":", "bc_timestamp", "=", "0", "if", "not", "gc_timestamp", ":", "gc_timestamp", "=", "0", "if", "(", "rc_timestamp", "+", "bc_timestamp", "+", "gc_timestamp", ")", "<", "1", ":", "change_col", "=", "None", "timestamp", "=", "0", "else", ":", "if", "rc_timestamp", ">", "bc_timestamp", ":", "change_col", "=", "\"red\"", "timestamp", "=", "rc_timestamp", "else", ":", "change_col", "=", "\"blue\"", "timestamp", "=", "bc_timestamp", "if", "gc_timestamp", ">", "timestamp", ":", "timestamp", "=", "gc_timestamp", "change_col", "=", "\"green\"", "value", "=", "{", "'red_clicks'", ":", "red_clicks", ",", "'blue_clicks'", ":", "blue_clicks", ",", "'green_clicks'", ":", "green_clicks", ",", "'click_colour'", ":", "change_col", ",", "'click_timestamp'", ":", "timestamp", ",", "'user'", ":", "str", "(", "kwargs", ".", "get", "(", "'user'", ",", "'UNKNOWN'", ")", ")", "}", "send_to_pipe_channel", "(", "channel_name", "=", "\"live_button_counter\"", ",", "label", "=", "\"named_counts\"", ",", "value", "=", "value", ")", "return", "\"Number of local clicks so far is %s red and %s blue; last change is %s at %s\"", "%", "(", "red_clicks", ",", "blue_clicks", ",", "change_col", ",", "datetime", ".", "fromtimestamp", "(", "0.001", "*", "timestamp", ")", ")" ]
Input app button pressed, so do something interesting
[ "Input", "app", "button", "pressed", "so", "do", "something", "interesting" ]
python
train
41.75
apache/incubator-mxnet
python/mxnet/io/io.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/io.py#L100-L112
def get_list(shapes, types): """Get DataDesc list from attribute lists. Parameters ---------- shapes : a tuple of (name_, shape_) types : a tuple of (name_, np.dtype) """ if types is not None: type_dict = dict(types) return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes] else: return [DataDesc(x[0], x[1]) for x in shapes]
[ "def", "get_list", "(", "shapes", ",", "types", ")", ":", "if", "types", "is", "not", "None", ":", "type_dict", "=", "dict", "(", "types", ")", "return", "[", "DataDesc", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", ",", "type_dict", "[", "x", "[", "0", "]", "]", ")", "for", "x", "in", "shapes", "]", "else", ":", "return", "[", "DataDesc", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", ")", "for", "x", "in", "shapes", "]" ]
Get DataDesc list from attribute lists. Parameters ---------- shapes : a tuple of (name_, shape_) types : a tuple of (name_, np.dtype)
[ "Get", "DataDesc", "list", "from", "attribute", "lists", "." ]
python
train
32.384615
lorehov/mongolock
src/mongolock.py
https://github.com/lorehov/mongolock/blob/218c9dfacdc8de04616c0d141a5701c9217a9069/src/mongolock.py#L86-L95
def release(self, key, owner): """Release lock with given name. `key` - lock name `owner` - name of application/component/whatever which held a lock Raises `MongoLockException` if no such a lock. """ status = self.collection.find_and_modify( {'_id': key, 'owner': owner}, {'locked': False, 'owner': None, 'created': None, 'expire': None} )
[ "def", "release", "(", "self", ",", "key", ",", "owner", ")", ":", "status", "=", "self", ".", "collection", ".", "find_and_modify", "(", "{", "'_id'", ":", "key", ",", "'owner'", ":", "owner", "}", ",", "{", "'locked'", ":", "False", ",", "'owner'", ":", "None", ",", "'created'", ":", "None", ",", "'expire'", ":", "None", "}", ")" ]
Release lock with given name. `key` - lock name `owner` - name of application/component/whatever which held a lock Raises `MongoLockException` if no such a lock.
[ "Release", "lock", "with", "given", "name", ".", "key", "-", "lock", "name", "owner", "-", "name", "of", "application", "/", "component", "/", "whatever", "which", "held", "a", "lock", "Raises", "MongoLockException", "if", "no", "such", "a", "lock", "." ]
python
train
41.4
YosaiProject/yosai
yosai/core/realm/realm.py
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/realm/realm.py#L377-L404
def is_permitted(self, identifiers, permission_s): """ If the authorization info cannot be obtained from the accountstore, permission check tuple yields False. :type identifiers: subject_abcs.IdentifierCollection :param permission_s: a collection of one or more permissions, represented as string-based permissions or Permission objects and NEVER comingled types :type permission_s: list of string(s) :yields: tuple(Permission, Boolean) """ identifier = identifiers.primary_identifier for required in permission_s: domain = Permission.get_domain(required) # assigned is a list of json blobs: assigned = self.get_authzd_permissions(identifier, domain) is_permitted = False for perms_blob in assigned: is_permitted = self.permission_verifier.\ is_permitted_from_json(required, perms_blob) yield (required, is_permitted)
[ "def", "is_permitted", "(", "self", ",", "identifiers", ",", "permission_s", ")", ":", "identifier", "=", "identifiers", ".", "primary_identifier", "for", "required", "in", "permission_s", ":", "domain", "=", "Permission", ".", "get_domain", "(", "required", ")", "# assigned is a list of json blobs:", "assigned", "=", "self", ".", "get_authzd_permissions", "(", "identifier", ",", "domain", ")", "is_permitted", "=", "False", "for", "perms_blob", "in", "assigned", ":", "is_permitted", "=", "self", ".", "permission_verifier", ".", "is_permitted_from_json", "(", "required", ",", "perms_blob", ")", "yield", "(", "required", ",", "is_permitted", ")" ]
If the authorization info cannot be obtained from the accountstore, permission check tuple yields False. :type identifiers: subject_abcs.IdentifierCollection :param permission_s: a collection of one or more permissions, represented as string-based permissions or Permission objects and NEVER comingled types :type permission_s: list of string(s) :yields: tuple(Permission, Boolean)
[ "If", "the", "authorization", "info", "cannot", "be", "obtained", "from", "the", "accountstore", "permission", "check", "tuple", "yields", "False", "." ]
python
train
37.285714
seomoz/shovel
shovel/tasks.py
https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L275-L278
def dry(self, *args, **kwargs): '''Perform a dry-run of the task''' return 'Would have executed:\n%s%s' % ( self.name, Args(self.spec).explain(*args, **kwargs))
[ "def", "dry", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "'Would have executed:\\n%s%s'", "%", "(", "self", ".", "name", ",", "Args", "(", "self", ".", "spec", ")", ".", "explain", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
Perform a dry-run of the task
[ "Perform", "a", "dry", "-", "run", "of", "the", "task" ]
python
train
46.25
spacetelescope/stsci.tools
lib/stsci/tools/configobj.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/configobj.py#L2337-L2345
def reset(self): """Clear ConfigObj instance and restore to 'freshly created' state.""" self.clear() self._initialise() # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload) # requires an empty dictionary self.configspec = None # Just to be sure ;-) self._original_configspec = None
[ "def", "reset", "(", "self", ")", ":", "self", ".", "clear", "(", ")", "self", ".", "_initialise", "(", ")", "# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)", "# requires an empty dictionary", "self", ".", "configspec", "=", "None", "# Just to be sure ;-)", "self", ".", "_original_configspec", "=", "None" ]
Clear ConfigObj instance and restore to 'freshly created' state.
[ "Clear", "ConfigObj", "instance", "and", "restore", "to", "freshly", "created", "state", "." ]
python
train
41.333333
log2timeline/dfvfs
dfvfs/vfs/os_file_system.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/os_file_system.py#L106-L125
def GetRootFileEntry(self): """Retrieves the root file entry. Returns: OSFileEntry: a file entry or None if not available. """ if platform.system() == 'Windows': # Return the root with the drive letter of the volume the current # working directory is on. location = os.getcwd() location, _, _ = location.partition('\\') location = '{0:s}\\'.format(location) else: location = '/' if not os.path.exists(location): return None path_spec = os_path_spec.OSPathSpec(location=location) return self.GetFileEntryByPathSpec(path_spec)
[ "def", "GetRootFileEntry", "(", "self", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "# Return the root with the drive letter of the volume the current", "# working directory is on.", "location", "=", "os", ".", "getcwd", "(", ")", "location", ",", "_", ",", "_", "=", "location", ".", "partition", "(", "'\\\\'", ")", "location", "=", "'{0:s}\\\\'", ".", "format", "(", "location", ")", "else", ":", "location", "=", "'/'", "if", "not", "os", ".", "path", ".", "exists", "(", "location", ")", ":", "return", "None", "path_spec", "=", "os_path_spec", ".", "OSPathSpec", "(", "location", "=", "location", ")", "return", "self", ".", "GetFileEntryByPathSpec", "(", "path_spec", ")" ]
Retrieves the root file entry. Returns: OSFileEntry: a file entry or None if not available.
[ "Retrieves", "the", "root", "file", "entry", "." ]
python
train
29.4
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L316-L318
def get_delete_security_group_rule_commands(self, sg_id, sg_rule): """Commands for removing rule from ACLS""" return self._get_rule_cmds(sg_id, sg_rule, delete=True)
[ "def", "get_delete_security_group_rule_commands", "(", "self", ",", "sg_id", ",", "sg_rule", ")", ":", "return", "self", ".", "_get_rule_cmds", "(", "sg_id", ",", "sg_rule", ",", "delete", "=", "True", ")" ]
Commands for removing rule from ACLS
[ "Commands", "for", "removing", "rule", "from", "ACLS" ]
python
train
59.666667
mental32/spotify.py
spotify/http.py
https://github.com/mental32/spotify.py/blob/bb296cac7c3dd289908906b7069bd80f43950515/spotify/http.py#L297-L307
def artists(self, spotify_ids): """Get a spotify artists by their IDs. Parameters ---------- spotify_id : List[str] The spotify_ids to search with. """ route = Route('GET', '/artists') payload = {'ids': spotify_ids} return self.request(route, params=payload)
[ "def", "artists", "(", "self", ",", "spotify_ids", ")", ":", "route", "=", "Route", "(", "'GET'", ",", "'/artists'", ")", "payload", "=", "{", "'ids'", ":", "spotify_ids", "}", "return", "self", ".", "request", "(", "route", ",", "params", "=", "payload", ")" ]
Get a spotify artists by their IDs. Parameters ---------- spotify_id : List[str] The spotify_ids to search with.
[ "Get", "a", "spotify", "artists", "by", "their", "IDs", "." ]
python
test
29.545455
helixyte/everest
everest/representers/dataelements.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/representers/dataelements.py#L256-L268
def get_terminal_converted(self, attr): """ Returns the value of the specified attribute converted to a representation value. :param attr: Attribute to retrieve. :type attr: :class:`everest.representers.attributes.MappedAttribute` :returns: Representation string. """ value = self.data.get(attr.repr_name) return self.converter_registry.convert_to_representation( value, attr.value_type)
[ "def", "get_terminal_converted", "(", "self", ",", "attr", ")", ":", "value", "=", "self", ".", "data", ".", "get", "(", "attr", ".", "repr_name", ")", "return", "self", ".", "converter_registry", ".", "convert_to_representation", "(", "value", ",", "attr", ".", "value_type", ")" ]
Returns the value of the specified attribute converted to a representation value. :param attr: Attribute to retrieve. :type attr: :class:`everest.representers.attributes.MappedAttribute` :returns: Representation string.
[ "Returns", "the", "value", "of", "the", "specified", "attribute", "converted", "to", "a", "representation", "value", "." ]
python
train
43.692308
getgauge/gauge-python
getgauge/parser_redbaron.py
https://github.com/getgauge/gauge-python/blob/90f3547dcfd2d16d51f116cdd4e53527eeab1a57/getgauge/parser_redbaron.py#L82-L87
def iter_steps(self): """Iterate over steps in the parsed file.""" for func, decorator in self._iter_step_func_decorators(): step = self._step_decorator_args(decorator) if step: yield step, func.name, self._span_for_node(func, True)
[ "def", "iter_steps", "(", "self", ")", ":", "for", "func", ",", "decorator", "in", "self", ".", "_iter_step_func_decorators", "(", ")", ":", "step", "=", "self", ".", "_step_decorator_args", "(", "decorator", ")", "if", "step", ":", "yield", "step", ",", "func", ".", "name", ",", "self", ".", "_span_for_node", "(", "func", ",", "True", ")" ]
Iterate over steps in the parsed file.
[ "Iterate", "over", "steps", "in", "the", "parsed", "file", "." ]
python
test
47.166667
MKLab-ITI/reveal-user-annotation
reveal_user_annotation/twitter/manage_resources.py
https://github.com/MKLab-ITI/reveal-user-annotation/blob/ed019c031857b091e5601f53ba3f01a499a0e3ef/reveal_user_annotation/twitter/manage_resources.py#L107-L122
def get_topic_keyword_dictionary(): """ Opens the topic-keyword map resource file and returns the corresponding python dictionary. - Input: - file_path: The path pointing to the topic-keyword map resource file. - Output: - topic_set: A topic to keyword python dictionary. """ topic_keyword_dictionary = dict() file_row_gen = get_file_row_generator(get_package_path() + "/twitter/res/topics/topic_keyword_mapping" + ".txt", ",", "utf-8") for file_row in file_row_gen: topic_keyword_dictionary[file_row[0]] = set([keyword for keyword in file_row[1:]]) return topic_keyword_dictionary
[ "def", "get_topic_keyword_dictionary", "(", ")", ":", "topic_keyword_dictionary", "=", "dict", "(", ")", "file_row_gen", "=", "get_file_row_generator", "(", "get_package_path", "(", ")", "+", "\"/twitter/res/topics/topic_keyword_mapping\"", "+", "\".txt\"", ",", "\",\"", ",", "\"utf-8\"", ")", "for", "file_row", "in", "file_row_gen", ":", "topic_keyword_dictionary", "[", "file_row", "[", "0", "]", "]", "=", "set", "(", "[", "keyword", "for", "keyword", "in", "file_row", "[", "1", ":", "]", "]", ")", "return", "topic_keyword_dictionary" ]
Opens the topic-keyword map resource file and returns the corresponding python dictionary. - Input: - file_path: The path pointing to the topic-keyword map resource file. - Output: - topic_set: A topic to keyword python dictionary.
[ "Opens", "the", "topic", "-", "keyword", "map", "resource", "file", "and", "returns", "the", "corresponding", "python", "dictionary", "." ]
python
train
43.625
tensorflow/tensor2tensor
tensor2tensor/models/research/transformer_nat.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L51-L69
def vq_nearest_neighbor(x, hparams): """Find the nearest element in means to elements in x.""" bottleneck_size = 2**hparams.bottleneck_bits means = hparams.means x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) scalar_prod = tf.matmul(x, means, transpose_b=True) dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod if hparams.bottleneck_kind == "em": x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples) x_means_hot = tf.one_hot( x_means_idx, depth=bottleneck_size) x_means_hot = tf.reduce_mean(x_means_hot, axis=1) else: x_means_idx = tf.argmax(-dist, axis=-1) x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size) x_means = tf.matmul(x_means_hot, means) e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, e_loss
[ "def", "vq_nearest_neighbor", "(", "x", ",", "hparams", ")", ":", "bottleneck_size", "=", "2", "**", "hparams", ".", "bottleneck_bits", "means", "=", "hparams", ".", "means", "x_norm_sq", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "x", ")", ",", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", "means_norm_sq", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "means", ")", ",", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", "scalar_prod", "=", "tf", ".", "matmul", "(", "x", ",", "means", ",", "transpose_b", "=", "True", ")", "dist", "=", "x_norm_sq", "+", "tf", ".", "transpose", "(", "means_norm_sq", ")", "-", "2", "*", "scalar_prod", "if", "hparams", ".", "bottleneck_kind", "==", "\"em\"", ":", "x_means_idx", "=", "tf", ".", "multinomial", "(", "-", "dist", ",", "num_samples", "=", "hparams", ".", "num_samples", ")", "x_means_hot", "=", "tf", ".", "one_hot", "(", "x_means_idx", ",", "depth", "=", "bottleneck_size", ")", "x_means_hot", "=", "tf", ".", "reduce_mean", "(", "x_means_hot", ",", "axis", "=", "1", ")", "else", ":", "x_means_idx", "=", "tf", ".", "argmax", "(", "-", "dist", ",", "axis", "=", "-", "1", ")", "x_means_hot", "=", "tf", ".", "one_hot", "(", "x_means_idx", ",", "depth", "=", "bottleneck_size", ")", "x_means", "=", "tf", ".", "matmul", "(", "x_means_hot", ",", "means", ")", "e_loss", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "squared_difference", "(", "x", ",", "tf", ".", "stop_gradient", "(", "x_means", ")", ")", ")", "return", "x_means_hot", ",", "e_loss" ]
Find the nearest element in means to elements in x.
[ "Find", "the", "nearest", "element", "in", "means", "to", "elements", "in", "x", "." ]
python
train
48.210526
ranaroussi/qtpylib
qtpylib/instrument.py
https://github.com/ranaroussi/qtpylib/blob/0dbbc465fafd9cb9b0f4d10e1e07fae4e15032dd/qtpylib/instrument.py#L278-L288
def buy_market(self, quantity, **kwargs): """ Shortcut for ``instrument.order("BUY", ...)`` and accepts all of its `optional parameters <#qtpylib.instrument.Instrument.order>`_ :Parameters: quantity : int Order quantity """ kwargs['limit_price'] = 0 kwargs['order_type'] = "MARKET" self.parent.order("BUY", self, quantity=quantity, **kwargs)
[ "def", "buy_market", "(", "self", ",", "quantity", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'limit_price'", "]", "=", "0", "kwargs", "[", "'order_type'", "]", "=", "\"MARKET\"", "self", ".", "parent", ".", "order", "(", "\"BUY\"", ",", "self", ",", "quantity", "=", "quantity", ",", "*", "*", "kwargs", ")" ]
Shortcut for ``instrument.order("BUY", ...)`` and accepts all of its `optional parameters <#qtpylib.instrument.Instrument.order>`_ :Parameters: quantity : int Order quantity
[ "Shortcut", "for", "instrument", ".", "order", "(", "BUY", "...", ")", "and", "accepts", "all", "of", "its", "optional", "parameters", "<#qtpylib", ".", "instrument", ".", "Instrument", ".", "order", ">", "_" ]
python
train
37.818182
devassistant/devassistant
devassistant/dapi/dapicli.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/dapi/dapicli.py#L322-L334
def _get_assistants_snippets(path, name): '''Get Assistants and Snippets for a given DAP name on a given path''' result = [] subdirs = {'assistants': 2, 'snippets': 1} # Values used for stripping leading path tokens for loc in subdirs: for root, dirs, files in os.walk(os.path.join(path, loc)): for filename in [utils.strip_prefix(os.path.join(root, f), path) for f in files]: stripped = os.path.sep.join(filename.split(os.path.sep)[subdirs[loc]:]) if stripped.startswith(os.path.join(name, '')) or stripped == name + '.yaml': result.append(os.path.join('fakeroot', filename)) return result
[ "def", "_get_assistants_snippets", "(", "path", ",", "name", ")", ":", "result", "=", "[", "]", "subdirs", "=", "{", "'assistants'", ":", "2", ",", "'snippets'", ":", "1", "}", "# Values used for stripping leading path tokens", "for", "loc", "in", "subdirs", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "path", ",", "loc", ")", ")", ":", "for", "filename", "in", "[", "utils", ".", "strip_prefix", "(", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", ",", "path", ")", "for", "f", "in", "files", "]", ":", "stripped", "=", "os", ".", "path", ".", "sep", ".", "join", "(", "filename", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "[", "subdirs", "[", "loc", "]", ":", "]", ")", "if", "stripped", ".", "startswith", "(", "os", ".", "path", ".", "join", "(", "name", ",", "''", ")", ")", "or", "stripped", "==", "name", "+", "'.yaml'", ":", "result", ".", "append", "(", "os", ".", "path", ".", "join", "(", "'fakeroot'", ",", "filename", ")", ")", "return", "result" ]
Get Assistants and Snippets for a given DAP name on a given path
[ "Get", "Assistants", "and", "Snippets", "for", "a", "given", "DAP", "name", "on", "a", "given", "path" ]
python
train
51.692308
pypa/pipenv
pipenv/vendor/yaspin/core.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/yaspin/core.py#L278-L292
def write(self, text): """Write text in the terminal without breaking the spinner.""" # similar to tqdm.write() # https://pypi.python.org/pypi/tqdm#writing-messages sys.stdout.write("\r") self._clear_line() _text = to_unicode(text) if PY2: _text = _text.encode(ENCODING) # Ensure output is bytes for Py2 and Unicode for Py3 assert isinstance(_text, builtin_str) sys.stdout.write("{0}\n".format(_text))
[ "def", "write", "(", "self", ",", "text", ")", ":", "# similar to tqdm.write()", "# https://pypi.python.org/pypi/tqdm#writing-messages", "sys", ".", "stdout", ".", "write", "(", "\"\\r\"", ")", "self", ".", "_clear_line", "(", ")", "_text", "=", "to_unicode", "(", "text", ")", "if", "PY2", ":", "_text", "=", "_text", ".", "encode", "(", "ENCODING", ")", "# Ensure output is bytes for Py2 and Unicode for Py3", "assert", "isinstance", "(", "_text", ",", "builtin_str", ")", "sys", ".", "stdout", ".", "write", "(", "\"{0}\\n\"", ".", "format", "(", "_text", ")", ")" ]
Write text in the terminal without breaking the spinner.
[ "Write", "text", "in", "the", "terminal", "without", "breaking", "the", "spinner", "." ]
python
train
32.133333
ambitioninc/django-query-builder
querybuilder/query.py
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L1267-L1307
def format_sql(self): """ Builds the sql in a format that is easy for humans to read and debug :return: The formatted sql for this query :rtype: str """ # TODO: finish adding the other parts of the sql generation sql = '' # build SELECT select_segment = self.build_select_fields() select_segment = select_segment.replace('SELECT ', '', 1) fields = [field.strip() for field in select_segment.split(',')] sql += 'SELECT\n\t{0}\n'.format(',\n\t'.join(fields)) # build FROM from_segment = self.build_from_table() from_segment = from_segment.replace('FROM ', '', 1) tables = [table.strip() for table in from_segment.split(',')] sql += 'FROM\n\t{0}\n'.format(',\n\t'.join(tables)) # build ORDER BY order_by_segment = self.build_order_by() if len(order_by_segment): order_by_segment = order_by_segment.replace('ORDER BY ', '', 1) sorters = [sorter.strip() for sorter in order_by_segment.split(',')] sql += 'ORDER BY\n\t{0}\n'.format(',\n\t'.join(sorters)) # build LIMIT limit_segment = self.build_limit() if len(limit_segment): if 'LIMIT' in limit_segment: limit_segment = limit_segment.replace('LIMIT ', 'LIMIT\n\t', 1) if 'OFFSET' in limit_segment: limit_segment = limit_segment.replace('OFFSET ', '\nOFFSET\n\t', 1) elif 'OFFSET' in limit_segment: limit_segment = limit_segment.replace('OFFSET ', 'OFFSET\n\t', 1) sql += limit_segment return sql
[ "def", "format_sql", "(", "self", ")", ":", "# TODO: finish adding the other parts of the sql generation", "sql", "=", "''", "# build SELECT", "select_segment", "=", "self", ".", "build_select_fields", "(", ")", "select_segment", "=", "select_segment", ".", "replace", "(", "'SELECT '", ",", "''", ",", "1", ")", "fields", "=", "[", "field", ".", "strip", "(", ")", "for", "field", "in", "select_segment", ".", "split", "(", "','", ")", "]", "sql", "+=", "'SELECT\\n\\t{0}\\n'", ".", "format", "(", "',\\n\\t'", ".", "join", "(", "fields", ")", ")", "# build FROM", "from_segment", "=", "self", ".", "build_from_table", "(", ")", "from_segment", "=", "from_segment", ".", "replace", "(", "'FROM '", ",", "''", ",", "1", ")", "tables", "=", "[", "table", ".", "strip", "(", ")", "for", "table", "in", "from_segment", ".", "split", "(", "','", ")", "]", "sql", "+=", "'FROM\\n\\t{0}\\n'", ".", "format", "(", "',\\n\\t'", ".", "join", "(", "tables", ")", ")", "# build ORDER BY", "order_by_segment", "=", "self", ".", "build_order_by", "(", ")", "if", "len", "(", "order_by_segment", ")", ":", "order_by_segment", "=", "order_by_segment", ".", "replace", "(", "'ORDER BY '", ",", "''", ",", "1", ")", "sorters", "=", "[", "sorter", ".", "strip", "(", ")", "for", "sorter", "in", "order_by_segment", ".", "split", "(", "','", ")", "]", "sql", "+=", "'ORDER BY\\n\\t{0}\\n'", ".", "format", "(", "',\\n\\t'", ".", "join", "(", "sorters", ")", ")", "# build LIMIT", "limit_segment", "=", "self", ".", "build_limit", "(", ")", "if", "len", "(", "limit_segment", ")", ":", "if", "'LIMIT'", "in", "limit_segment", ":", "limit_segment", "=", "limit_segment", ".", "replace", "(", "'LIMIT '", ",", "'LIMIT\\n\\t'", ",", "1", ")", "if", "'OFFSET'", "in", "limit_segment", ":", "limit_segment", "=", "limit_segment", ".", "replace", "(", "'OFFSET '", ",", "'\\nOFFSET\\n\\t'", ",", "1", ")", "elif", "'OFFSET'", "in", "limit_segment", ":", "limit_segment", "=", "limit_segment", ".", "replace", "(", "'OFFSET '", ",", "'OFFSET\\n\\t'", ",", "1", ")", "sql", "+=", "limit_segment", "return", "sql" ]
Builds the sql in a format that is easy for humans to read and debug :return: The formatted sql for this query :rtype: str
[ "Builds", "the", "sql", "in", "a", "format", "that", "is", "easy", "for", "humans", "to", "read", "and", "debug" ]
python
train
39.97561
joowani/quadriga
quadriga/book.py
https://github.com/joowani/quadriga/blob/412f88f414ef0cb53efa6d5841b9674eb9718359/quadriga/book.py#L120-L133
def buy_market_order(self, amount): """Place a buy order at market price. :param amount: Amount of major currency to buy at market price. :type amount: int | float | str | unicode | decimal.Decimal :return: Order details. :rtype: dict """ amount = str(amount) self._log("buy {} {} at market price".format(amount, self.major)) return self._rest_client.post( endpoint='/buy', payload={'book': self.name, 'amount': amount} )
[ "def", "buy_market_order", "(", "self", ",", "amount", ")", ":", "amount", "=", "str", "(", "amount", ")", "self", ".", "_log", "(", "\"buy {} {} at market price\"", ".", "format", "(", "amount", ",", "self", ".", "major", ")", ")", "return", "self", ".", "_rest_client", ".", "post", "(", "endpoint", "=", "'/buy'", ",", "payload", "=", "{", "'book'", ":", "self", ".", "name", ",", "'amount'", ":", "amount", "}", ")" ]
Place a buy order at market price. :param amount: Amount of major currency to buy at market price. :type amount: int | float | str | unicode | decimal.Decimal :return: Order details. :rtype: dict
[ "Place", "a", "buy", "order", "at", "market", "price", "." ]
python
train
36.642857
andreikop/qutepart
qutepart/__init__.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/__init__.py#L350-L416
def _initActions(self): """Init shortcuts for text editing """ def createAction(text, shortcut, slot, iconFileName=None): """Create QAction with given parameters and add to the widget """ action = QAction(text, self) if iconFileName is not None: action.setIcon(getIcon(iconFileName)) keySeq = shortcut if isinstance(shortcut, QKeySequence) else QKeySequence(shortcut) action.setShortcut(keySeq) action.setShortcutContext(Qt.WidgetShortcut) action.triggered.connect(slot) self.addAction(action) return action # scrolling self.scrollUpAction = createAction('Scroll up', 'Ctrl+Up', lambda: self._onShortcutScroll(down = False), 'go-up') self.scrollDownAction = createAction('Scroll down', 'Ctrl+Down', lambda: self._onShortcutScroll(down = True), 'go-down') self.selectAndScrollUpAction = createAction('Select and scroll Up', 'Ctrl+Shift+Up', lambda: self._onShortcutSelectAndScroll(down = False)) self.selectAndScrollDownAction = createAction('Select and scroll Down', 'Ctrl+Shift+Down', lambda: self._onShortcutSelectAndScroll(down = True)) # indentation self.increaseIndentAction = createAction('Increase indentation', 'Tab', self._onShortcutIndent, 'format-indent-more') self.decreaseIndentAction = createAction('Decrease indentation', 'Shift+Tab', lambda: self._indenter.onChangeSelectedBlocksIndent(increase = False), 'format-indent-less') self.autoIndentLineAction = createAction('Autoindent line', 'Ctrl+I', self._indenter.onAutoIndentTriggered) self.indentWithSpaceAction = createAction('Indent with 1 space', 'Ctrl+Shift+Space', lambda: self._indenter.onChangeSelectedBlocksIndent(increase=True, withSpace=True)) self.unIndentWithSpaceAction = createAction('Unindent with 1 space', 'Ctrl+Shift+Backspace', lambda: self._indenter.onChangeSelectedBlocksIndent(increase=False, withSpace=True)) # editing self.undoAction = createAction('Undo', QKeySequence.Undo, self.undo, 'edit-undo') self.redoAction = createAction('Redo', QKeySequence.Redo, self.redo, 'edit-redo') self.moveLineUpAction = createAction('Move line up', 'Alt+Up', lambda: self._onShortcutMoveLine(down = False), 'go-up') self.moveLineDownAction = createAction('Move line down', 'Alt+Down', lambda: self._onShortcutMoveLine(down = True), 'go-down') self.deleteLineAction = createAction('Delete line', 'Alt+Del', self._onShortcutDeleteLine, 'edit-delete') self.cutLineAction = createAction('Cut line', 'Alt+X', self._onShortcutCutLine, 'edit-cut') self.copyLineAction = createAction('Copy line', 'Alt+C', self._onShortcutCopyLine, 'edit-copy') self.pasteLineAction = createAction('Paste line', 'Alt+V', self._onShortcutPasteLine, 'edit-paste') self.duplicateLineAction = createAction('Duplicate line', 'Alt+D', self._onShortcutDuplicateLine) self.invokeCompletionAction = createAction('Invoke completion', 'Ctrl+Space', self._completer.invokeCompletion) # other self.printAction = createAction('Print', 'Ctrl+P', self._onShortcutPrint, 'document-print')
[ "def", "_initActions", "(", "self", ")", ":", "def", "createAction", "(", "text", ",", "shortcut", ",", "slot", ",", "iconFileName", "=", "None", ")", ":", "\"\"\"Create QAction with given parameters and add to the widget\n \"\"\"", "action", "=", "QAction", "(", "text", ",", "self", ")", "if", "iconFileName", "is", "not", "None", ":", "action", ".", "setIcon", "(", "getIcon", "(", "iconFileName", ")", ")", "keySeq", "=", "shortcut", "if", "isinstance", "(", "shortcut", ",", "QKeySequence", ")", "else", "QKeySequence", "(", "shortcut", ")", "action", ".", "setShortcut", "(", "keySeq", ")", "action", ".", "setShortcutContext", "(", "Qt", ".", "WidgetShortcut", ")", "action", ".", "triggered", ".", "connect", "(", "slot", ")", "self", ".", "addAction", "(", "action", ")", "return", "action", "# scrolling", "self", ".", "scrollUpAction", "=", "createAction", "(", "'Scroll up'", ",", "'Ctrl+Up'", ",", "lambda", ":", "self", ".", "_onShortcutScroll", "(", "down", "=", "False", ")", ",", "'go-up'", ")", "self", ".", "scrollDownAction", "=", "createAction", "(", "'Scroll down'", ",", "'Ctrl+Down'", ",", "lambda", ":", "self", ".", "_onShortcutScroll", "(", "down", "=", "True", ")", ",", "'go-down'", ")", "self", ".", "selectAndScrollUpAction", "=", "createAction", "(", "'Select and scroll Up'", ",", "'Ctrl+Shift+Up'", ",", "lambda", ":", "self", ".", "_onShortcutSelectAndScroll", "(", "down", "=", "False", ")", ")", "self", ".", "selectAndScrollDownAction", "=", "createAction", "(", "'Select and scroll Down'", ",", "'Ctrl+Shift+Down'", ",", "lambda", ":", "self", ".", "_onShortcutSelectAndScroll", "(", "down", "=", "True", ")", ")", "# indentation", "self", ".", "increaseIndentAction", "=", "createAction", "(", "'Increase indentation'", ",", "'Tab'", ",", "self", ".", "_onShortcutIndent", ",", "'format-indent-more'", ")", "self", ".", "decreaseIndentAction", "=", "createAction", "(", "'Decrease indentation'", ",", "'Shift+Tab'", ",", "lambda", ":", "self", ".", "_indenter", ".", "onChangeSelectedBlocksIndent", "(", "increase", "=", "False", ")", ",", "'format-indent-less'", ")", "self", ".", "autoIndentLineAction", "=", "createAction", "(", "'Autoindent line'", ",", "'Ctrl+I'", ",", "self", ".", "_indenter", ".", "onAutoIndentTriggered", ")", "self", ".", "indentWithSpaceAction", "=", "createAction", "(", "'Indent with 1 space'", ",", "'Ctrl+Shift+Space'", ",", "lambda", ":", "self", ".", "_indenter", ".", "onChangeSelectedBlocksIndent", "(", "increase", "=", "True", ",", "withSpace", "=", "True", ")", ")", "self", ".", "unIndentWithSpaceAction", "=", "createAction", "(", "'Unindent with 1 space'", ",", "'Ctrl+Shift+Backspace'", ",", "lambda", ":", "self", ".", "_indenter", ".", "onChangeSelectedBlocksIndent", "(", "increase", "=", "False", ",", "withSpace", "=", "True", ")", ")", "# editing", "self", ".", "undoAction", "=", "createAction", "(", "'Undo'", ",", "QKeySequence", ".", "Undo", ",", "self", ".", "undo", ",", "'edit-undo'", ")", "self", ".", "redoAction", "=", "createAction", "(", "'Redo'", ",", "QKeySequence", ".", "Redo", ",", "self", ".", "redo", ",", "'edit-redo'", ")", "self", ".", "moveLineUpAction", "=", "createAction", "(", "'Move line up'", ",", "'Alt+Up'", ",", "lambda", ":", "self", ".", "_onShortcutMoveLine", "(", "down", "=", "False", ")", ",", "'go-up'", ")", "self", ".", "moveLineDownAction", "=", "createAction", "(", "'Move line down'", ",", "'Alt+Down'", ",", "lambda", ":", "self", ".", "_onShortcutMoveLine", "(", "down", "=", "True", ")", ",", "'go-down'", ")", "self", ".", "deleteLineAction", "=", "createAction", "(", "'Delete line'", ",", "'Alt+Del'", ",", "self", ".", "_onShortcutDeleteLine", ",", "'edit-delete'", ")", "self", ".", "cutLineAction", "=", "createAction", "(", "'Cut line'", ",", "'Alt+X'", ",", "self", ".", "_onShortcutCutLine", ",", "'edit-cut'", ")", "self", ".", "copyLineAction", "=", "createAction", "(", "'Copy line'", ",", "'Alt+C'", ",", "self", ".", "_onShortcutCopyLine", ",", "'edit-copy'", ")", "self", ".", "pasteLineAction", "=", "createAction", "(", "'Paste line'", ",", "'Alt+V'", ",", "self", ".", "_onShortcutPasteLine", ",", "'edit-paste'", ")", "self", ".", "duplicateLineAction", "=", "createAction", "(", "'Duplicate line'", ",", "'Alt+D'", ",", "self", ".", "_onShortcutDuplicateLine", ")", "self", ".", "invokeCompletionAction", "=", "createAction", "(", "'Invoke completion'", ",", "'Ctrl+Space'", ",", "self", ".", "_completer", ".", "invokeCompletion", ")", "# other", "self", ".", "printAction", "=", "createAction", "(", "'Print'", ",", "'Ctrl+P'", ",", "self", ".", "_onShortcutPrint", ",", "'document-print'", ")" ]
Init shortcuts for text editing
[ "Init", "shortcuts", "for", "text", "editing" ]
python
train
60.970149
bunq/sdk_python
bunq/sdk/model/generated/object_.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/object_.py#L301-L333
def is_all_field_none(self): """ :rtype: bool """ if self._billing_date is not None: return False if self._type_description is not None: return False if self._type_description_translated is not None: return False if self._unit_vat_exclusive is not None: return False if self._unit_vat_inclusive is not None: return False if self._vat is not None: return False if self._quantity is not None: return False if self._total_vat_exclusive is not None: return False if self._total_vat_inclusive is not None: return False return True
[ "def", "is_all_field_none", "(", "self", ")", ":", "if", "self", ".", "_billing_date", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_type_description", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_type_description_translated", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_unit_vat_exclusive", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_unit_vat_inclusive", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_vat", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_quantity", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_total_vat_exclusive", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_total_vat_inclusive", "is", "not", "None", ":", "return", "False", "return", "True" ]
:rtype: bool
[ ":", "rtype", ":", "bool" ]
python
train
21.666667
CitrineInformatics/python-citrination-client
citrination_client/models/columns/base.py
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/models/columns/base.py#L33-L49
def to_dict(self): """ Converts the column to a dictionary representation accepted by the Citrination server. :return: Dictionary with basic options, plus any column type specific options held under the "options" key :rtype: dict """ return { "type": self.type, "name": self.name, "group_by_key": self.group_by_key, "role": self.role, "units": self.units, "options": self.build_options() }
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "\"type\"", ":", "self", ".", "type", ",", "\"name\"", ":", "self", ".", "name", ",", "\"group_by_key\"", ":", "self", ".", "group_by_key", ",", "\"role\"", ":", "self", ".", "role", ",", "\"units\"", ":", "self", ".", "units", ",", "\"options\"", ":", "self", ".", "build_options", "(", ")", "}" ]
Converts the column to a dictionary representation accepted by the Citrination server. :return: Dictionary with basic options, plus any column type specific options held under the "options" key :rtype: dict
[ "Converts", "the", "column", "to", "a", "dictionary", "representation", "accepted", "by", "the", "Citrination", "server", "." ]
python
valid
30.705882
aparo/pyes
pyes/orm/queryset.py
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/orm/queryset.py#L567-L585
def _cooked_fields(self, dj_fields): """ Returns a tuple of cooked fields :param dj_fields: a list of django name fields :return: """ from django.db import models valids = [] for field in dj_fields: try: dj_field, _, _, _ = self.model._meta.get_field_by_name(field) if isinstance(dj_field, models.ForeignKey): valids.append((field + "_id", field, dj_field)) else: valids.append((field, field, dj_field)) except models.FieldDoesNotExist: valids.append((field, field, None)) return valids
[ "def", "_cooked_fields", "(", "self", ",", "dj_fields", ")", ":", "from", "django", ".", "db", "import", "models", "valids", "=", "[", "]", "for", "field", "in", "dj_fields", ":", "try", ":", "dj_field", ",", "_", ",", "_", ",", "_", "=", "self", ".", "model", ".", "_meta", ".", "get_field_by_name", "(", "field", ")", "if", "isinstance", "(", "dj_field", ",", "models", ".", "ForeignKey", ")", ":", "valids", ".", "append", "(", "(", "field", "+", "\"_id\"", ",", "field", ",", "dj_field", ")", ")", "else", ":", "valids", ".", "append", "(", "(", "field", ",", "field", ",", "dj_field", ")", ")", "except", "models", ".", "FieldDoesNotExist", ":", "valids", ".", "append", "(", "(", "field", ",", "field", ",", "None", ")", ")", "return", "valids" ]
Returns a tuple of cooked fields :param dj_fields: a list of django name fields :return:
[ "Returns", "a", "tuple", "of", "cooked", "fields", ":", "param", "dj_fields", ":", "a", "list", "of", "django", "name", "fields", ":", "return", ":" ]
python
train
35.210526
openearth/mmi-python
mmi/runner.py
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/runner.py#L155-L160
def unregister(self): """unregister model at tracking server""" uuid = self.metadata["tracker"]["uuid"] # connect to server result = requests.delete(urljoin(self.tracker, 'models' + "/" + uuid)) logger.debug("unregistered at server %s with %s: %s", self.tracker, uuid, result)
[ "def", "unregister", "(", "self", ")", ":", "uuid", "=", "self", ".", "metadata", "[", "\"tracker\"", "]", "[", "\"uuid\"", "]", "# connect to server", "result", "=", "requests", ".", "delete", "(", "urljoin", "(", "self", ".", "tracker", ",", "'models'", "+", "\"/\"", "+", "uuid", ")", ")", "logger", ".", "debug", "(", "\"unregistered at server %s with %s: %s\"", ",", "self", ".", "tracker", ",", "uuid", ",", "result", ")" ]
unregister model at tracking server
[ "unregister", "model", "at", "tracking", "server" ]
python
train
51.833333
pandas-dev/pandas
pandas/core/sparse/series.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L268-L284
def _ixs(self, i, axis=0): """ Return the i-th value or values in the SparseSeries by location Parameters ---------- i : int, slice, or sequence of integers Returns ------- value : scalar (int) or Series (slice, sequence) """ label = self.index[i] if isinstance(label, Index): return self.take(i, axis=axis) else: return self._get_val_at(i)
[ "def", "_ixs", "(", "self", ",", "i", ",", "axis", "=", "0", ")", ":", "label", "=", "self", ".", "index", "[", "i", "]", "if", "isinstance", "(", "label", ",", "Index", ")", ":", "return", "self", ".", "take", "(", "i", ",", "axis", "=", "axis", ")", "else", ":", "return", "self", ".", "_get_val_at", "(", "i", ")" ]
Return the i-th value or values in the SparseSeries by location Parameters ---------- i : int, slice, or sequence of integers Returns ------- value : scalar (int) or Series (slice, sequence)
[ "Return", "the", "i", "-", "th", "value", "or", "values", "in", "the", "SparseSeries", "by", "location" ]
python
train
26.235294
biolink/ontobio
ontobio/assoc_factory.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assoc_factory.py#L104-L127
def create_from_assocs(self, assocs, **args): """ Creates from a list of association objects """ amap = defaultdict(list) subject_label_map = {} for a in assocs: subj = a['subject'] subj_id = subj['id'] subj_label = subj['label'] subject_label_map[subj_id] = subj_label if not a['negated']: amap[subj_id].append(a['object']['id']) aset = AssociationSet(subject_label_map=subject_label_map, association_map=amap, **args) aset.associations_by_subj = defaultdict(list) aset.associations_by_subj_obj = defaultdict(list) for a in assocs: sub_id = a['subject']['id'] obj_id = a['object']['id'] aset.associations_by_subj[sub_id].append(a) aset.associations_by_subj_obj[(sub_id,obj_id)].append(a) return aset
[ "def", "create_from_assocs", "(", "self", ",", "assocs", ",", "*", "*", "args", ")", ":", "amap", "=", "defaultdict", "(", "list", ")", "subject_label_map", "=", "{", "}", "for", "a", "in", "assocs", ":", "subj", "=", "a", "[", "'subject'", "]", "subj_id", "=", "subj", "[", "'id'", "]", "subj_label", "=", "subj", "[", "'label'", "]", "subject_label_map", "[", "subj_id", "]", "=", "subj_label", "if", "not", "a", "[", "'negated'", "]", ":", "amap", "[", "subj_id", "]", ".", "append", "(", "a", "[", "'object'", "]", "[", "'id'", "]", ")", "aset", "=", "AssociationSet", "(", "subject_label_map", "=", "subject_label_map", ",", "association_map", "=", "amap", ",", "*", "*", "args", ")", "aset", ".", "associations_by_subj", "=", "defaultdict", "(", "list", ")", "aset", ".", "associations_by_subj_obj", "=", "defaultdict", "(", "list", ")", "for", "a", "in", "assocs", ":", "sub_id", "=", "a", "[", "'subject'", "]", "[", "'id'", "]", "obj_id", "=", "a", "[", "'object'", "]", "[", "'id'", "]", "aset", ".", "associations_by_subj", "[", "sub_id", "]", ".", "append", "(", "a", ")", "aset", ".", "associations_by_subj_obj", "[", "(", "sub_id", ",", "obj_id", ")", "]", ".", "append", "(", "a", ")", "return", "aset" ]
Creates from a list of association objects
[ "Creates", "from", "a", "list", "of", "association", "objects" ]
python
train
37.125
cltrudeau/wrench
wrench/logtools/utils.py
https://github.com/cltrudeau/wrench/blob/bc231dd085050a63a87ff3eb8f0a863928f65a41/wrench/logtools/utils.py#L19-L29
def configure_file_logger(name, log_dir, log_level=logging.DEBUG): """Configures logging to use the :class:`SizeRotatingFileHandler`""" from .srothandler import SizeRotatingFileHandler root = logging.getLogger() root.setLevel(log_level) handler = SizeRotatingFileHandler(os.path.join(log_dir, '%s.log' % name)) handler.setLevel(log_level) handler.setFormatter(logging.Formatter(LOG_FORMAT_STANDARD)) root.addHandler(handler)
[ "def", "configure_file_logger", "(", "name", ",", "log_dir", ",", "log_level", "=", "logging", ".", "DEBUG", ")", ":", "from", ".", "srothandler", "import", "SizeRotatingFileHandler", "root", "=", "logging", ".", "getLogger", "(", ")", "root", ".", "setLevel", "(", "log_level", ")", "handler", "=", "SizeRotatingFileHandler", "(", "os", ".", "path", ".", "join", "(", "log_dir", ",", "'%s.log'", "%", "name", ")", ")", "handler", ".", "setLevel", "(", "log_level", ")", "handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "LOG_FORMAT_STANDARD", ")", ")", "root", ".", "addHandler", "(", "handler", ")" ]
Configures logging to use the :class:`SizeRotatingFileHandler`
[ "Configures", "logging", "to", "use", "the", ":", "class", ":", "SizeRotatingFileHandler" ]
python
train
40.727273
schapman1974/tinymongo
tinymongo/tinymongo.py
https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L177-L207
def insert_one(self, doc, *args, **kwargs): """ Inserts one document into the collection If contains '_id' key it is used, else it is generated. :param doc: the document :return: InsertOneResult """ if self.table is None: self.build_table() if not isinstance(doc, dict): raise ValueError(u'"doc" must be a dict') _id = doc[u'_id'] = doc.get('_id') or generate_id() bypass_document_validation = kwargs.get('bypass_document_validation') if bypass_document_validation is True: # insert doc without validation of duplicated `_id` eid = self.table.insert(doc) else: existing = self.find_one({'_id': _id}) if existing is None: eid = self.table.insert(doc) else: raise DuplicateKeyError( u'_id:{0} already exists in collection:{1}'.format( _id, self.tablename ) ) return InsertOneResult(eid=eid, inserted_id=_id)
[ "def", "insert_one", "(", "self", ",", "doc", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "table", "is", "None", ":", "self", ".", "build_table", "(", ")", "if", "not", "isinstance", "(", "doc", ",", "dict", ")", ":", "raise", "ValueError", "(", "u'\"doc\" must be a dict'", ")", "_id", "=", "doc", "[", "u'_id'", "]", "=", "doc", ".", "get", "(", "'_id'", ")", "or", "generate_id", "(", ")", "bypass_document_validation", "=", "kwargs", ".", "get", "(", "'bypass_document_validation'", ")", "if", "bypass_document_validation", "is", "True", ":", "# insert doc without validation of duplicated `_id`", "eid", "=", "self", ".", "table", ".", "insert", "(", "doc", ")", "else", ":", "existing", "=", "self", ".", "find_one", "(", "{", "'_id'", ":", "_id", "}", ")", "if", "existing", "is", "None", ":", "eid", "=", "self", ".", "table", ".", "insert", "(", "doc", ")", "else", ":", "raise", "DuplicateKeyError", "(", "u'_id:{0} already exists in collection:{1}'", ".", "format", "(", "_id", ",", "self", ".", "tablename", ")", ")", "return", "InsertOneResult", "(", "eid", "=", "eid", ",", "inserted_id", "=", "_id", ")" ]
Inserts one document into the collection If contains '_id' key it is used, else it is generated. :param doc: the document :return: InsertOneResult
[ "Inserts", "one", "document", "into", "the", "collection", "If", "contains", "_id", "key", "it", "is", "used", "else", "it", "is", "generated", ".", ":", "param", "doc", ":", "the", "document", ":", "return", ":", "InsertOneResult" ]
python
train
34.806452
mitsei/dlkit
dlkit/json_/repository/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/managers.py#L941-L956
def get_composition_admin_session(self): """Gets a composition administration session for creating, updating and deleting compositions. return: (osid.repository.CompositionAdminSession) - a ``CompositionAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_composition_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_composition_admin()`` is ``true``.* """ if not self.supports_composition_admin(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.CompositionAdminSession(runtime=self._runtime)
[ "def", "get_composition_admin_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_composition_admin", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "CompositionAdminSession", "(", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets a composition administration session for creating, updating and deleting compositions. return: (osid.repository.CompositionAdminSession) - a ``CompositionAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_composition_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_composition_admin()`` is ``true``.*
[ "Gets", "a", "composition", "administration", "session", "for", "creating", "updating", "and", "deleting", "compositions", "." ]
python
train
45.25
goose3/goose3
goose3/text.py
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/text.py#L35-L60
def get_encodings_from_content(content): """ Code from: https://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py Return encodings from given content string. :param content: string to extract encodings from. """ if isinstance(content, bytes): find_charset = re.compile( br'<meta.*?charset=["\']*([a-z0-9\-_]+?) *?["\'>]', flags=re.I ).findall find_xml = re.compile( br'^<\?xml.*?encoding=["\']*([a-z0-9\-_]+?) *?["\'>]' ).findall return [encoding.decode('utf-8') for encoding in find_charset(content) + find_xml(content)] else: find_charset = re.compile( r'<meta.*?charset=["\']*([a-z0-9\-_]+?) *?["\'>]', flags=re.I ).findall find_xml = re.compile( r'^<\?xml.*?encoding=["\']*([a-z0-9\-_]+?) *?["\'>]' ).findall return find_charset(content) + find_xml(content)
[ "def", "get_encodings_from_content", "(", "content", ")", ":", "if", "isinstance", "(", "content", ",", "bytes", ")", ":", "find_charset", "=", "re", ".", "compile", "(", "br'<meta.*?charset=[\"\\']*([a-z0-9\\-_]+?) *?[\"\\'>]'", ",", "flags", "=", "re", ".", "I", ")", ".", "findall", "find_xml", "=", "re", ".", "compile", "(", "br'^<\\?xml.*?encoding=[\"\\']*([a-z0-9\\-_]+?) *?[\"\\'>]'", ")", ".", "findall", "return", "[", "encoding", ".", "decode", "(", "'utf-8'", ")", "for", "encoding", "in", "find_charset", "(", "content", ")", "+", "find_xml", "(", "content", ")", "]", "else", ":", "find_charset", "=", "re", ".", "compile", "(", "r'<meta.*?charset=[\"\\']*([a-z0-9\\-_]+?) *?[\"\\'>]'", ",", "flags", "=", "re", ".", "I", ")", ".", "findall", "find_xml", "=", "re", ".", "compile", "(", "r'^<\\?xml.*?encoding=[\"\\']*([a-z0-9\\-_]+?) *?[\"\\'>]'", ")", ".", "findall", "return", "find_charset", "(", "content", ")", "+", "find_xml", "(", "content", ")" ]
Code from: https://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py Return encodings from given content string. :param content: string to extract encodings from.
[ "Code", "from", ":", "https", ":", "//", "github", ".", "com", "/", "sigmavirus24", "/", "requests", "-", "toolbelt", "/", "blob", "/", "master", "/", "requests_toolbelt", "/", "utils", "/", "deprecated", ".", "py", "Return", "encodings", "from", "given", "content", "string", ".", ":", "param", "content", ":", "string", "to", "extract", "encodings", "from", "." ]
python
valid
36.769231
hollenstein/maspy
maspy/xml.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/xml.py#L129-L144
def recCopyElement(oldelement): """Generates a copy of an xml element and recursively of all child elements. :param oldelement: an instance of lxml.etree._Element :returns: a copy of the "oldelement" .. warning:: doesn't copy ``.text`` or ``.tail`` of xml elements """ newelement = ETREE.Element(oldelement.tag, oldelement.attrib) if len(oldelement.getchildren()) > 0: for childelement in oldelement.getchildren(): newelement.append(recCopyElement(childelement)) return newelement
[ "def", "recCopyElement", "(", "oldelement", ")", ":", "newelement", "=", "ETREE", ".", "Element", "(", "oldelement", ".", "tag", ",", "oldelement", ".", "attrib", ")", "if", "len", "(", "oldelement", ".", "getchildren", "(", ")", ")", ">", "0", ":", "for", "childelement", "in", "oldelement", ".", "getchildren", "(", ")", ":", "newelement", ".", "append", "(", "recCopyElement", "(", "childelement", ")", ")", "return", "newelement" ]
Generates a copy of an xml element and recursively of all child elements. :param oldelement: an instance of lxml.etree._Element :returns: a copy of the "oldelement" .. warning:: doesn't copy ``.text`` or ``.tail`` of xml elements
[ "Generates", "a", "copy", "of", "an", "xml", "element", "and", "recursively", "of", "all", "child", "elements", "." ]
python
train
33.25
majuss/lupupy
lupupy/devices/alarm.py
https://github.com/majuss/lupupy/blob/71af6c397837ffc393c7b8122be175602638d3c6/lupupy/devices/alarm.py#L24-L37
def set_mode(self, mode): """Set Lupusec alarm mode.""" _LOGGER.debug('State change called from alarm device') if not mode: _LOGGER.info('No mode supplied') elif mode not in CONST.ALL_MODES: _LOGGER.warning('Invalid mode') response_object = self._lupusec.set_mode(CONST.MODE_TRANSLATION[mode]) if response_object['result'] != 1: _LOGGER.warning('Mode setting unsuccessful') self._json_state['mode'] = mode _LOGGER.info('Mode set to: %s', mode) return True
[ "def", "set_mode", "(", "self", ",", "mode", ")", ":", "_LOGGER", ".", "debug", "(", "'State change called from alarm device'", ")", "if", "not", "mode", ":", "_LOGGER", ".", "info", "(", "'No mode supplied'", ")", "elif", "mode", "not", "in", "CONST", ".", "ALL_MODES", ":", "_LOGGER", ".", "warning", "(", "'Invalid mode'", ")", "response_object", "=", "self", ".", "_lupusec", ".", "set_mode", "(", "CONST", ".", "MODE_TRANSLATION", "[", "mode", "]", ")", "if", "response_object", "[", "'result'", "]", "!=", "1", ":", "_LOGGER", ".", "warning", "(", "'Mode setting unsuccessful'", ")", "self", ".", "_json_state", "[", "'mode'", "]", "=", "mode", "_LOGGER", ".", "info", "(", "'Mode set to: %s'", ",", "mode", ")", "return", "True" ]
Set Lupusec alarm mode.
[ "Set", "Lupusec", "alarm", "mode", "." ]
python
train
39.357143
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_mode.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_mode.py#L41-L48
def unknown_command(self, args): '''handle mode switch by mode name as command''' mode_mapping = self.master.mode_mapping() mode = args[0].upper() if mode in mode_mapping: self.master.set_mode(mode_mapping[mode]) return True return False
[ "def", "unknown_command", "(", "self", ",", "args", ")", ":", "mode_mapping", "=", "self", ".", "master", ".", "mode_mapping", "(", ")", "mode", "=", "args", "[", "0", "]", ".", "upper", "(", ")", "if", "mode", "in", "mode_mapping", ":", "self", ".", "master", ".", "set_mode", "(", "mode_mapping", "[", "mode", "]", ")", "return", "True", "return", "False" ]
handle mode switch by mode name as command
[ "handle", "mode", "switch", "by", "mode", "name", "as", "command" ]
python
train
36.75
hydpy-dev/hydpy
hydpy/core/importtools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/importtools.py#L25-L92
def parameterstep(timestep=None): """Define a parameter time step size within a parameter control file. Argument: * timestep(|Period|): Time step size. Function parameterstep should usually be be applied in a line immediately behind the model import. Defining the step size of time dependent parameters is a prerequisite to access any model specific parameter. Note that parameterstep implements some namespace magic by means of the module |inspect|. This makes things a little complicated for framework developers, but it eases the definition of parameter control files for framework users. """ if timestep is not None: parametertools.Parameter.parameterstep(timestep) namespace = inspect.currentframe().f_back.f_locals model = namespace.get('model') if model is None: model = namespace['Model']() namespace['model'] = model if hydpy.pub.options.usecython and 'cythonizer' in namespace: cythonizer = namespace['cythonizer'] namespace['cythonmodule'] = cythonizer.cymodule model.cymodel = cythonizer.cymodule.Model() namespace['cymodel'] = model.cymodel model.cymodel.parameters = cythonizer.cymodule.Parameters() model.cymodel.sequences = cythonizer.cymodule.Sequences() for numpars_name in ('NumConsts', 'NumVars'): if hasattr(cythonizer.cymodule, numpars_name): numpars_new = getattr(cythonizer.cymodule, numpars_name)() numpars_old = getattr(model, numpars_name.lower()) for (name_numpar, numpar) in vars(numpars_old).items(): setattr(numpars_new, name_numpar, numpar) setattr(model.cymodel, numpars_name.lower(), numpars_new) for name in dir(model.cymodel): if (not name.startswith('_')) and hasattr(model, name): setattr(model, name, getattr(model.cymodel, name)) if 'Parameters' not in namespace: namespace['Parameters'] = parametertools.Parameters model.parameters = namespace['Parameters'](namespace) if 'Sequences' not in namespace: namespace['Sequences'] = sequencetools.Sequences model.sequences = namespace['Sequences'](**namespace) namespace['parameters'] = model.parameters for pars in model.parameters: namespace[pars.name] = pars namespace['sequences'] = model.sequences for seqs in model.sequences: namespace[seqs.name] = seqs if 'Masks' in namespace: model.masks = namespace['Masks'](model) namespace['masks'] = model.masks try: namespace.update(namespace['CONSTANTS']) except KeyError: pass focus = namespace.get('focus') for par in model.parameters.control: try: if (focus is None) or (par is focus): namespace[par.name] = par else: namespace[par.name] = lambda *args, **kwargs: None except AttributeError: pass
[ "def", "parameterstep", "(", "timestep", "=", "None", ")", ":", "if", "timestep", "is", "not", "None", ":", "parametertools", ".", "Parameter", ".", "parameterstep", "(", "timestep", ")", "namespace", "=", "inspect", ".", "currentframe", "(", ")", ".", "f_back", ".", "f_locals", "model", "=", "namespace", ".", "get", "(", "'model'", ")", "if", "model", "is", "None", ":", "model", "=", "namespace", "[", "'Model'", "]", "(", ")", "namespace", "[", "'model'", "]", "=", "model", "if", "hydpy", ".", "pub", ".", "options", ".", "usecython", "and", "'cythonizer'", "in", "namespace", ":", "cythonizer", "=", "namespace", "[", "'cythonizer'", "]", "namespace", "[", "'cythonmodule'", "]", "=", "cythonizer", ".", "cymodule", "model", ".", "cymodel", "=", "cythonizer", ".", "cymodule", ".", "Model", "(", ")", "namespace", "[", "'cymodel'", "]", "=", "model", ".", "cymodel", "model", ".", "cymodel", ".", "parameters", "=", "cythonizer", ".", "cymodule", ".", "Parameters", "(", ")", "model", ".", "cymodel", ".", "sequences", "=", "cythonizer", ".", "cymodule", ".", "Sequences", "(", ")", "for", "numpars_name", "in", "(", "'NumConsts'", ",", "'NumVars'", ")", ":", "if", "hasattr", "(", "cythonizer", ".", "cymodule", ",", "numpars_name", ")", ":", "numpars_new", "=", "getattr", "(", "cythonizer", ".", "cymodule", ",", "numpars_name", ")", "(", ")", "numpars_old", "=", "getattr", "(", "model", ",", "numpars_name", ".", "lower", "(", ")", ")", "for", "(", "name_numpar", ",", "numpar", ")", "in", "vars", "(", "numpars_old", ")", ".", "items", "(", ")", ":", "setattr", "(", "numpars_new", ",", "name_numpar", ",", "numpar", ")", "setattr", "(", "model", ".", "cymodel", ",", "numpars_name", ".", "lower", "(", ")", ",", "numpars_new", ")", "for", "name", "in", "dir", "(", "model", ".", "cymodel", ")", ":", "if", "(", "not", "name", ".", "startswith", "(", "'_'", ")", ")", "and", "hasattr", "(", "model", ",", "name", ")", ":", "setattr", "(", "model", ",", "name", ",", "getattr", "(", "model", ".", "cymodel", ",", "name", ")", ")", "if", "'Parameters'", "not", "in", "namespace", ":", "namespace", "[", "'Parameters'", "]", "=", "parametertools", ".", "Parameters", "model", ".", "parameters", "=", "namespace", "[", "'Parameters'", "]", "(", "namespace", ")", "if", "'Sequences'", "not", "in", "namespace", ":", "namespace", "[", "'Sequences'", "]", "=", "sequencetools", ".", "Sequences", "model", ".", "sequences", "=", "namespace", "[", "'Sequences'", "]", "(", "*", "*", "namespace", ")", "namespace", "[", "'parameters'", "]", "=", "model", ".", "parameters", "for", "pars", "in", "model", ".", "parameters", ":", "namespace", "[", "pars", ".", "name", "]", "=", "pars", "namespace", "[", "'sequences'", "]", "=", "model", ".", "sequences", "for", "seqs", "in", "model", ".", "sequences", ":", "namespace", "[", "seqs", ".", "name", "]", "=", "seqs", "if", "'Masks'", "in", "namespace", ":", "model", ".", "masks", "=", "namespace", "[", "'Masks'", "]", "(", "model", ")", "namespace", "[", "'masks'", "]", "=", "model", ".", "masks", "try", ":", "namespace", ".", "update", "(", "namespace", "[", "'CONSTANTS'", "]", ")", "except", "KeyError", ":", "pass", "focus", "=", "namespace", ".", "get", "(", "'focus'", ")", "for", "par", "in", "model", ".", "parameters", ".", "control", ":", "try", ":", "if", "(", "focus", "is", "None", ")", "or", "(", "par", "is", "focus", ")", ":", "namespace", "[", "par", ".", "name", "]", "=", "par", "else", ":", "namespace", "[", "par", ".", "name", "]", "=", "lambda", "*", "args", ",", "*", "*", "kwargs", ":", "None", "except", "AttributeError", ":", "pass" ]
Define a parameter time step size within a parameter control file. Argument: * timestep(|Period|): Time step size. Function parameterstep should usually be be applied in a line immediately behind the model import. Defining the step size of time dependent parameters is a prerequisite to access any model specific parameter. Note that parameterstep implements some namespace magic by means of the module |inspect|. This makes things a little complicated for framework developers, but it eases the definition of parameter control files for framework users.
[ "Define", "a", "parameter", "time", "step", "size", "within", "a", "parameter", "control", "file", "." ]
python
train
45.294118
horazont/aioxmpp
aioxmpp/rsm/xso.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/rsm/xso.py#L274-L286
def last_page(self_or_cls, max_=None): """ Return a query set which requests the last page. :param max_: Maximum number of items to return. :type max_: :class:`int` or :data:`None` :rtype: :class:`ResultSetMetadata` :return: A new request set up to request the last page. """ result = self_or_cls() result.before = Before() result.max_ = max_ return result
[ "def", "last_page", "(", "self_or_cls", ",", "max_", "=", "None", ")", ":", "result", "=", "self_or_cls", "(", ")", "result", ".", "before", "=", "Before", "(", ")", "result", ".", "max_", "=", "max_", "return", "result" ]
Return a query set which requests the last page. :param max_: Maximum number of items to return. :type max_: :class:`int` or :data:`None` :rtype: :class:`ResultSetMetadata` :return: A new request set up to request the last page.
[ "Return", "a", "query", "set", "which", "requests", "the", "last", "page", "." ]
python
train
33.307692
delph-in/pydelphin
delphin/itsdb.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/itsdb.py#L2019-L2039
def read_raw_table(self, table): """ Yield rows in the [incr tsdb()] *table*. A row is a dictionary mapping column names to values. Data from a profile is decoded by decode_row(). No filters or applicators are used. """ fields = self.table_relations(table) if self.cast else None field_names = [f.name for f in self.table_relations(table)] field_len = len(field_names) table_path = os.path.join(self.root, table) with _open_table(table_path, self.encoding) as tbl: for line in tbl: cols = decode_row(line, fields=fields) if len(cols) != field_len: # should this throw an exception instead? logging.error('Number of stored fields ({}) ' 'differ from the expected number({}); ' 'fields may be misaligned!' .format(len(cols), field_len)) row = OrderedDict(zip(field_names, cols)) yield row
[ "def", "read_raw_table", "(", "self", ",", "table", ")", ":", "fields", "=", "self", ".", "table_relations", "(", "table", ")", "if", "self", ".", "cast", "else", "None", "field_names", "=", "[", "f", ".", "name", "for", "f", "in", "self", ".", "table_relations", "(", "table", ")", "]", "field_len", "=", "len", "(", "field_names", ")", "table_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "root", ",", "table", ")", "with", "_open_table", "(", "table_path", ",", "self", ".", "encoding", ")", "as", "tbl", ":", "for", "line", "in", "tbl", ":", "cols", "=", "decode_row", "(", "line", ",", "fields", "=", "fields", ")", "if", "len", "(", "cols", ")", "!=", "field_len", ":", "# should this throw an exception instead?", "logging", ".", "error", "(", "'Number of stored fields ({}) '", "'differ from the expected number({}); '", "'fields may be misaligned!'", ".", "format", "(", "len", "(", "cols", ")", ",", "field_len", ")", ")", "row", "=", "OrderedDict", "(", "zip", "(", "field_names", ",", "cols", ")", ")", "yield", "row" ]
Yield rows in the [incr tsdb()] *table*. A row is a dictionary mapping column names to values. Data from a profile is decoded by decode_row(). No filters or applicators are used.
[ "Yield", "rows", "in", "the", "[", "incr", "tsdb", "()", "]", "*", "table", "*", ".", "A", "row", "is", "a", "dictionary", "mapping", "column", "names", "to", "values", ".", "Data", "from", "a", "profile", "is", "decoded", "by", "decode_row", "()", ".", "No", "filters", "or", "applicators", "are", "used", "." ]
python
train
50.666667
asweigart/pyautogui
pyautogui/_pyautogui_win.py
https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/_pyautogui_win.py#L451-L479
def _click(x, y, button): """Send the mouse click event to Windows by calling the mouse_event() win32 function. Args: button (str): The mouse button, either 'left', 'middle', or 'right' x (int): The x position of the mouse event. y (int): The y position of the mouse event. Returns: None """ if button == 'left': try: _sendMouseEvent(MOUSEEVENTF_LEFTCLICK, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass elif button == 'middle': try: _sendMouseEvent(MOUSEEVENTF_MIDDLECLICK, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass elif button == 'right': try: _sendMouseEvent(MOUSEEVENTF_RIGHTCLICK, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass else: assert False, "button argument not in ('left', 'middle', 'right')"
[ "def", "_click", "(", "x", ",", "y", ",", "button", ")", ":", "if", "button", "==", "'left'", ":", "try", ":", "_sendMouseEvent", "(", "MOUSEEVENTF_LEFTCLICK", ",", "x", ",", "y", ")", "except", "(", "PermissionError", ",", "OSError", ")", ":", "# TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60", "pass", "elif", "button", "==", "'middle'", ":", "try", ":", "_sendMouseEvent", "(", "MOUSEEVENTF_MIDDLECLICK", ",", "x", ",", "y", ")", "except", "(", "PermissionError", ",", "OSError", ")", ":", "# TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60", "pass", "elif", "button", "==", "'right'", ":", "try", ":", "_sendMouseEvent", "(", "MOUSEEVENTF_RIGHTCLICK", ",", "x", ",", "y", ")", "except", "(", "PermissionError", ",", "OSError", ")", ":", "# TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60", "pass", "else", ":", "assert", "False", ",", "\"button argument not in ('left', 'middle', 'right')\"" ]
Send the mouse click event to Windows by calling the mouse_event() win32 function. Args: button (str): The mouse button, either 'left', 'middle', or 'right' x (int): The x position of the mouse event. y (int): The y position of the mouse event. Returns: None
[ "Send", "the", "mouse", "click", "event", "to", "Windows", "by", "calling", "the", "mouse_event", "()", "win32", "function", "." ]
python
train
41.551724
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_cmdlong.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_cmdlong.py#L151-L171
def cmd_velocity(self, args): '''velocity x-ms y-ms z-ms''' if (len(args) != 3): print("Usage: velocity x y z (m/s)") return if (len(args) == 3): x_mps = float(args[0]) y_mps = float(args[1]) z_mps = float(args[2]) #print("x:%f, y:%f, z:%f" % (x_mps, y_mps, z_mps)) self.master.mav.set_position_target_local_ned_send( 0, # time_boot_ms (not used) 0, 0, # target system, target component mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame 0b0000111111000111, # type_mask (only speeds enabled) 0, 0, 0, # x, y, z positions (not used) x_mps, y_mps, -z_mps, # x, y, z velocity in m/s 0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink) 0, 0)
[ "def", "cmd_velocity", "(", "self", ",", "args", ")", ":", "if", "(", "len", "(", "args", ")", "!=", "3", ")", ":", "print", "(", "\"Usage: velocity x y z (m/s)\"", ")", "return", "if", "(", "len", "(", "args", ")", "==", "3", ")", ":", "x_mps", "=", "float", "(", "args", "[", "0", "]", ")", "y_mps", "=", "float", "(", "args", "[", "1", "]", ")", "z_mps", "=", "float", "(", "args", "[", "2", "]", ")", "#print(\"x:%f, y:%f, z:%f\" % (x_mps, y_mps, z_mps))", "self", ".", "master", ".", "mav", ".", "set_position_target_local_ned_send", "(", "0", ",", "# time_boot_ms (not used)", "0", ",", "0", ",", "# target system, target component", "mavutil", ".", "mavlink", ".", "MAV_FRAME_LOCAL_NED", ",", "# frame", "0b0000111111000111", ",", "# type_mask (only speeds enabled)", "0", ",", "0", ",", "0", ",", "# x, y, z positions (not used)", "x_mps", ",", "y_mps", ",", "-", "z_mps", ",", "# x, y, z velocity in m/s", "0", ",", "0", ",", "0", ",", "# x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)", "0", ",", "0", ")" ]
velocity x-ms y-ms z-ms
[ "velocity", "x", "-", "ms", "y", "-", "ms", "z", "-", "ms" ]
python
train
42
romanz/trezor-agent
libagent/util.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L115-L129
def split_bits(value, *bits): """ Split integer value into list of ints, according to `bits` list. For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4] """ result = [] for b in reversed(bits): mask = (1 << b) - 1 result.append(value & mask) value = value >> b assert value == 0 result.reverse() return result
[ "def", "split_bits", "(", "value", ",", "*", "bits", ")", ":", "result", "=", "[", "]", "for", "b", "in", "reversed", "(", "bits", ")", ":", "mask", "=", "(", "1", "<<", "b", ")", "-", "1", "result", ".", "append", "(", "value", "&", "mask", ")", "value", "=", "value", ">>", "b", "assert", "value", "==", "0", "result", ".", "reverse", "(", ")", "return", "result" ]
Split integer value into list of ints, according to `bits` list. For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4]
[ "Split", "integer", "value", "into", "list", "of", "ints", "according", "to", "bits", "list", "." ]
python
train
24.266667
openstack/networking-cisco
networking_cisco/apps/saf/server/cisco_dfa_rest.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/cisco_dfa_rest.py#L740-L761
def update_project(self, org_name, part_name, dci_id=UNKNOWN_DCI_ID, service_node_ip=UNKNOWN_SRVN_NODE_IP, vrf_prof=None, desc=None): """Update project on the DCNM. :param org_name: name of organization. :param part_name: name of partition. :param dci_id: Data Center interconnect id. :param desc: description of project. """ desc = desc or org_name res = self._create_or_update_partition(org_name, part_name, desc, dci_id=dci_id, service_node_ip=service_node_ip, vrf_prof=vrf_prof, operation='PUT') if res and res.status_code in self._resp_ok: LOG.debug("Update %s partition in DCNM.", part_name) else: LOG.error("Failed to update %(part)s partition in DCNM." "Response: %(res)s", {'part': part_name, 'res': res}) raise dexc.DfaClientRequestFailed(reason=res)
[ "def", "update_project", "(", "self", ",", "org_name", ",", "part_name", ",", "dci_id", "=", "UNKNOWN_DCI_ID", ",", "service_node_ip", "=", "UNKNOWN_SRVN_NODE_IP", ",", "vrf_prof", "=", "None", ",", "desc", "=", "None", ")", ":", "desc", "=", "desc", "or", "org_name", "res", "=", "self", ".", "_create_or_update_partition", "(", "org_name", ",", "part_name", ",", "desc", ",", "dci_id", "=", "dci_id", ",", "service_node_ip", "=", "service_node_ip", ",", "vrf_prof", "=", "vrf_prof", ",", "operation", "=", "'PUT'", ")", "if", "res", "and", "res", ".", "status_code", "in", "self", ".", "_resp_ok", ":", "LOG", ".", "debug", "(", "\"Update %s partition in DCNM.\"", ",", "part_name", ")", "else", ":", "LOG", ".", "error", "(", "\"Failed to update %(part)s partition in DCNM.\"", "\"Response: %(res)s\"", ",", "{", "'part'", ":", "part_name", ",", "'res'", ":", "res", "}", ")", "raise", "dexc", ".", "DfaClientRequestFailed", "(", "reason", "=", "res", ")" ]
Update project on the DCNM. :param org_name: name of organization. :param part_name: name of partition. :param dci_id: Data Center interconnect id. :param desc: description of project.
[ "Update", "project", "on", "the", "DCNM", "." ]
python
train
50.545455
NiklasRosenstein-Python/nr-deprecated
nr/stream.py
https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/stream.py#L54-L59
def map(cls, iterable, func, *a, **kw): """ Iterable-first replacement of Python's built-in `map()` function. """ return cls(func(x, *a, **kw) for x in iterable)
[ "def", "map", "(", "cls", ",", "iterable", ",", "func", ",", "*", "a", ",", "*", "*", "kw", ")", ":", "return", "cls", "(", "func", "(", "x", ",", "*", "a", ",", "*", "*", "kw", ")", "for", "x", "in", "iterable", ")" ]
Iterable-first replacement of Python's built-in `map()` function.
[ "Iterable", "-", "first", "replacement", "of", "Python", "s", "built", "-", "in", "map", "()", "function", "." ]
python
train
28.833333
graphql-python/graphql-core-next
graphql/language/lexer.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/language/lexer.py#L493-L509
def char2hex(a: str): """Convert a hex character to its integer value. '0' becomes 0, '9' becomes 9 'A' becomes 10, 'F' becomes 15 'a' becomes 10, 'f' becomes 15 Returns -1 on error. """ if "0" <= a <= "9": return ord(a) - 48 elif "A" <= a <= "F": return ord(a) - 55 elif "a" <= a <= "f": # a-f return ord(a) - 87 return -1
[ "def", "char2hex", "(", "a", ":", "str", ")", ":", "if", "\"0\"", "<=", "a", "<=", "\"9\"", ":", "return", "ord", "(", "a", ")", "-", "48", "elif", "\"A\"", "<=", "a", "<=", "\"F\"", ":", "return", "ord", "(", "a", ")", "-", "55", "elif", "\"a\"", "<=", "a", "<=", "\"f\"", ":", "# a-f", "return", "ord", "(", "a", ")", "-", "87", "return", "-", "1" ]
Convert a hex character to its integer value. '0' becomes 0, '9' becomes 9 'A' becomes 10, 'F' becomes 15 'a' becomes 10, 'f' becomes 15 Returns -1 on error.
[ "Convert", "a", "hex", "character", "to", "its", "integer", "value", "." ]
python
train
22.058824
petl-developers/petl
petl/transform/validation.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/validation.py#L12-L71
def validate(table, constraints=None, header=None): """ Validate a `table` against a set of `constraints` and/or an expected `header`, e.g.:: >>> import petl as etl >>> # define some validation constraints ... header = ('foo', 'bar', 'baz') >>> constraints = [ ... dict(name='foo_int', field='foo', test=int), ... dict(name='bar_date', field='bar', test=etl.dateparser('%Y-%m-%d')), ... dict(name='baz_enum', field='baz', assertion=lambda v: v in ['Y', 'N']), ... dict(name='not_none', assertion=lambda row: None not in row), ... dict(name='qux_int', field='qux', test=int, optional=True), ... ] >>> # now validate a table ... table = (('foo', 'bar', 'bazzz'), ... (1, '2000-01-01', 'Y'), ... ('x', '2010-10-10', 'N'), ... (2, '2000/01/01', 'Y'), ... (3, '2015-12-12', 'x'), ... (4, None, 'N'), ... ('y', '1999-99-99', 'z'), ... (6, '2000-01-01'), ... (7, '2001-02-02', 'N', True)) >>> problems = etl.validate(table, constraints=constraints, header=header) >>> problems.lookall() +--------------+-----+-------+--------------+------------------+ | name | row | field | value | error | +==============+=====+=======+==============+==================+ | '__header__' | 0 | None | None | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'foo_int' | 2 | 'foo' | 'x' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'bar_date' | 3 | 'bar' | '2000/01/01' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'baz_enum' | 4 | 'baz' | 'x' | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'bar_date' | 5 | 'bar' | None | 'AttributeError' | +--------------+-----+-------+--------------+------------------+ | 'not_none' | 5 | None | None | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'foo_int' | 6 | 'foo' | 'y' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'bar_date' | 6 | 'bar' | '1999-99-99' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'baz_enum' | 6 | 'baz' | 'z' | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | '__len__' | 7 | None | 2 | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'baz_enum' | 7 | 'baz' | None | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | '__len__' | 8 | None | 4 | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ Returns a table of validation problems. """ # noqa return ProblemsView(table, constraints=constraints, header=header)
[ "def", "validate", "(", "table", ",", "constraints", "=", "None", ",", "header", "=", "None", ")", ":", "# noqa", "return", "ProblemsView", "(", "table", ",", "constraints", "=", "constraints", ",", "header", "=", "header", ")" ]
Validate a `table` against a set of `constraints` and/or an expected `header`, e.g.:: >>> import petl as etl >>> # define some validation constraints ... header = ('foo', 'bar', 'baz') >>> constraints = [ ... dict(name='foo_int', field='foo', test=int), ... dict(name='bar_date', field='bar', test=etl.dateparser('%Y-%m-%d')), ... dict(name='baz_enum', field='baz', assertion=lambda v: v in ['Y', 'N']), ... dict(name='not_none', assertion=lambda row: None not in row), ... dict(name='qux_int', field='qux', test=int, optional=True), ... ] >>> # now validate a table ... table = (('foo', 'bar', 'bazzz'), ... (1, '2000-01-01', 'Y'), ... ('x', '2010-10-10', 'N'), ... (2, '2000/01/01', 'Y'), ... (3, '2015-12-12', 'x'), ... (4, None, 'N'), ... ('y', '1999-99-99', 'z'), ... (6, '2000-01-01'), ... (7, '2001-02-02', 'N', True)) >>> problems = etl.validate(table, constraints=constraints, header=header) >>> problems.lookall() +--------------+-----+-------+--------------+------------------+ | name | row | field | value | error | +==============+=====+=======+==============+==================+ | '__header__' | 0 | None | None | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'foo_int' | 2 | 'foo' | 'x' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'bar_date' | 3 | 'bar' | '2000/01/01' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'baz_enum' | 4 | 'baz' | 'x' | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'bar_date' | 5 | 'bar' | None | 'AttributeError' | +--------------+-----+-------+--------------+------------------+ | 'not_none' | 5 | None | None | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'foo_int' | 6 | 'foo' | 'y' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'bar_date' | 6 | 'bar' | '1999-99-99' | 'ValueError' | +--------------+-----+-------+--------------+------------------+ | 'baz_enum' | 6 | 'baz' | 'z' | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | '__len__' | 7 | None | 2 | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | 'baz_enum' | 7 | 'baz' | None | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ | '__len__' | 8 | None | 4 | 'AssertionError' | +--------------+-----+-------+--------------+------------------+ Returns a table of validation problems.
[ "Validate", "a", "table", "against", "a", "set", "of", "constraints", "and", "/", "or", "an", "expected", "header", "e", ".", "g", ".", "::" ]
python
train
55.1
nitely/kua
kua/routes.py
https://github.com/nitely/kua/blob/6ffc9d0426e87a34cf8c3f8e7aedac6d35e59cb6/kua/routes.py#L90-L109
def make_params( key_parts: Sequence[str], variable_parts: VariablePartsType) -> Dict[str, Union[str, Tuple[str]]]: """ Map keys to variables. This map\ URL-pattern variables to\ a URL related parts :param key_parts: A list of URL parts :param variable_parts: A linked-list\ (ala nested tuples) of URL parts :return: The param dict with the values\ assigned to the keys :private: """ # The unwrapped variable parts are in reverse order. # Instead of reversing those we reverse the key parts # and avoid the O(n) space required for reversing the vars return dict(zip(reversed(key_parts), _unwrap(variable_parts)))
[ "def", "make_params", "(", "key_parts", ":", "Sequence", "[", "str", "]", ",", "variable_parts", ":", "VariablePartsType", ")", "->", "Dict", "[", "str", ",", "Union", "[", "str", ",", "Tuple", "[", "str", "]", "]", "]", ":", "# The unwrapped variable parts are in reverse order.", "# Instead of reversing those we reverse the key parts", "# and avoid the O(n) space required for reversing the vars", "return", "dict", "(", "zip", "(", "reversed", "(", "key_parts", ")", ",", "_unwrap", "(", "variable_parts", ")", ")", ")" ]
Map keys to variables. This map\ URL-pattern variables to\ a URL related parts :param key_parts: A list of URL parts :param variable_parts: A linked-list\ (ala nested tuples) of URL parts :return: The param dict with the values\ assigned to the keys :private:
[ "Map", "keys", "to", "variables", ".", "This", "map", "\\", "URL", "-", "pattern", "variables", "to", "\\", "a", "URL", "related", "parts" ]
python
train
33.55
saltstack/salt
salt/utils/data.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/data.py#L320-L329
def decode_tuple(data, encoding=None, errors='strict', keep=False, normalize=False, preserve_dict_class=False, to_str=False): ''' Decode all string values to Unicode. Optionally use to_str=True to ensure strings are str types and not unicode on Python 2. ''' return tuple( decode_list(data, encoding, errors, keep, normalize, preserve_dict_class, True, to_str) )
[ "def", "decode_tuple", "(", "data", ",", "encoding", "=", "None", ",", "errors", "=", "'strict'", ",", "keep", "=", "False", ",", "normalize", "=", "False", ",", "preserve_dict_class", "=", "False", ",", "to_str", "=", "False", ")", ":", "return", "tuple", "(", "decode_list", "(", "data", ",", "encoding", ",", "errors", ",", "keep", ",", "normalize", ",", "preserve_dict_class", ",", "True", ",", "to_str", ")", ")" ]
Decode all string values to Unicode. Optionally use to_str=True to ensure strings are str types and not unicode on Python 2.
[ "Decode", "all", "string", "values", "to", "Unicode", ".", "Optionally", "use", "to_str", "=", "True", "to", "ensure", "strings", "are", "str", "types", "and", "not", "unicode", "on", "Python", "2", "." ]
python
train
42.2
janpipek/physt
physt/binnings.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L704-L726
def fixed_width_binning(data=None, bin_width: Union[float, int] = 1, *, range=None, includes_right_edge=False, **kwargs) -> FixedWidthBinning: """Construct fixed-width binning schema. Parameters ---------- bin_width: float range: Optional[tuple] (min, max) align: Optional[float] Must be multiple of bin_width """ result = FixedWidthBinning(bin_width=bin_width, includes_right_edge=includes_right_edge, **kwargs) if range: result._force_bin_existence(range[0]) result._force_bin_existence(range[1], includes_right_edge=True) if not kwargs.get("adaptive"): return result # Otherwise we want to adapt to data if data is not None and data.shape[0]: # print("Jo, tady") result._force_bin_existence([np.min(data), np.max(data)], includes_right_edge=includes_right_edge) return result
[ "def", "fixed_width_binning", "(", "data", "=", "None", ",", "bin_width", ":", "Union", "[", "float", ",", "int", "]", "=", "1", ",", "*", ",", "range", "=", "None", ",", "includes_right_edge", "=", "False", ",", "*", "*", "kwargs", ")", "->", "FixedWidthBinning", ":", "result", "=", "FixedWidthBinning", "(", "bin_width", "=", "bin_width", ",", "includes_right_edge", "=", "includes_right_edge", ",", "*", "*", "kwargs", ")", "if", "range", ":", "result", ".", "_force_bin_existence", "(", "range", "[", "0", "]", ")", "result", ".", "_force_bin_existence", "(", "range", "[", "1", "]", ",", "includes_right_edge", "=", "True", ")", "if", "not", "kwargs", ".", "get", "(", "\"adaptive\"", ")", ":", "return", "result", "# Otherwise we want to adapt to data", "if", "data", "is", "not", "None", "and", "data", ".", "shape", "[", "0", "]", ":", "# print(\"Jo, tady\")", "result", ".", "_force_bin_existence", "(", "[", "np", ".", "min", "(", "data", ")", ",", "np", ".", "max", "(", "data", ")", "]", ",", "includes_right_edge", "=", "includes_right_edge", ")", "return", "result" ]
Construct fixed-width binning schema. Parameters ---------- bin_width: float range: Optional[tuple] (min, max) align: Optional[float] Must be multiple of bin_width
[ "Construct", "fixed", "-", "width", "binning", "schema", "." ]
python
train
40.73913
pyusb/pyusb
tools/vcp_terminal.py
https://github.com/pyusb/pyusb/blob/ffe6faf42c6ad273880b0b464b9bbf44c1d4b2e9/tools/vcp_terminal.py#L540-L549
def fmt_text(text): """ convert characters that aren't printable to hex format """ PRINTABLE_CHAR = set( list(range(ord(' '), ord('~') + 1)) + [ord('\r'), ord('\n')]) newtext = ("\\x{:02X}".format( c) if c not in PRINTABLE_CHAR else chr(c) for c in text) textlines = "\r\n".join(l.strip('\r') for l in "".join(newtext).split('\n')) return textlines
[ "def", "fmt_text", "(", "text", ")", ":", "PRINTABLE_CHAR", "=", "set", "(", "list", "(", "range", "(", "ord", "(", "' '", ")", ",", "ord", "(", "'~'", ")", "+", "1", ")", ")", "+", "[", "ord", "(", "'\\r'", ")", ",", "ord", "(", "'\\n'", ")", "]", ")", "newtext", "=", "(", "\"\\\\x{:02X}\"", ".", "format", "(", "c", ")", "if", "c", "not", "in", "PRINTABLE_CHAR", "else", "chr", "(", "c", ")", "for", "c", "in", "text", ")", "textlines", "=", "\"\\r\\n\"", ".", "join", "(", "l", ".", "strip", "(", "'\\r'", ")", "for", "l", "in", "\"\"", ".", "join", "(", "newtext", ")", ".", "split", "(", "'\\n'", ")", ")", "return", "textlines" ]
convert characters that aren't printable to hex format
[ "convert", "characters", "that", "aren", "t", "printable", "to", "hex", "format" ]
python
train
40.7
ask/carrot
carrot/backends/pystomp.py
https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/backends/pystomp.py#L54-L66
def ack(self): """Acknowledge this message as being processed., This will remove the message from the queue. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected. """ if self.acknowledged: raise self.MessageStateError( "Message already acknowledged with state: %s" % self._state) self.backend.ack(self._frame) self._state = "ACK"
[ "def", "ack", "(", "self", ")", ":", "if", "self", ".", "acknowledged", ":", "raise", "self", ".", "MessageStateError", "(", "\"Message already acknowledged with state: %s\"", "%", "self", ".", "_state", ")", "self", ".", "backend", ".", "ack", "(", "self", ".", "_frame", ")", "self", ".", "_state", "=", "\"ACK\"" ]
Acknowledge this message as being processed., This will remove the message from the queue. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected.
[ "Acknowledge", "this", "message", "as", "being", "processed", ".", "This", "will", "remove", "the", "message", "from", "the", "queue", "." ]
python
train
34.769231
dellis23/ansible-toolkit
ansible_toolkit/vault.py
https://github.com/dellis23/ansible-toolkit/blob/7eb5198e1f68c9a3ca1d129d9e2a52fb3f0e65c5/ansible_toolkit/vault.py#L52-L85
def restore(path, password_file=None): """ Retrieves a file from the atk vault and restores it to its original location, re-encrypting it if it has changed. :param path: path to original file """ vault = VaultLib(get_vault_password(password_file)) atk_path = os.path.join(ATK_VAULT, path) # Load stored data with open(os.path.join(atk_path, 'encrypted'), 'rb') as f: old_data = f.read() with open(os.path.join(atk_path, 'hash'), 'rb') as f: old_hash = f.read() # Load new data with open(path, 'rb') as f: new_data = f.read() new_hash = hashlib.sha1(new_data).hexdigest() # Determine whether to re-encrypt if old_hash != new_hash: new_data = vault.encrypt(new_data) else: new_data = old_data # Update file with open(path, 'wb') as f: f.write(new_data) # Clean atk vault os.remove(os.path.join(atk_path, 'encrypted')) os.remove(os.path.join(atk_path, 'hash'))
[ "def", "restore", "(", "path", ",", "password_file", "=", "None", ")", ":", "vault", "=", "VaultLib", "(", "get_vault_password", "(", "password_file", ")", ")", "atk_path", "=", "os", ".", "path", ".", "join", "(", "ATK_VAULT", ",", "path", ")", "# Load stored data", "with", "open", "(", "os", ".", "path", ".", "join", "(", "atk_path", ",", "'encrypted'", ")", ",", "'rb'", ")", "as", "f", ":", "old_data", "=", "f", ".", "read", "(", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "atk_path", ",", "'hash'", ")", ",", "'rb'", ")", "as", "f", ":", "old_hash", "=", "f", ".", "read", "(", ")", "# Load new data", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "new_data", "=", "f", ".", "read", "(", ")", "new_hash", "=", "hashlib", ".", "sha1", "(", "new_data", ")", ".", "hexdigest", "(", ")", "# Determine whether to re-encrypt", "if", "old_hash", "!=", "new_hash", ":", "new_data", "=", "vault", ".", "encrypt", "(", "new_data", ")", "else", ":", "new_data", "=", "old_data", "# Update file", "with", "open", "(", "path", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "new_data", ")", "# Clean atk vault", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "atk_path", ",", "'encrypted'", ")", ")", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "atk_path", ",", "'hash'", ")", ")" ]
Retrieves a file from the atk vault and restores it to its original location, re-encrypting it if it has changed. :param path: path to original file
[ "Retrieves", "a", "file", "from", "the", "atk", "vault", "and", "restores", "it", "to", "its", "original", "location", "re", "-", "encrypting", "it", "if", "it", "has", "changed", "." ]
python
train
28.411765
dmwm/DBS
Server/Python/src/dbs/web/DBSReaderModel.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSReaderModel.py#L1659-L1697
def listRunSummaries(self, dataset="", run_num=-1): """ API to list run summaries, like the maximal lumisection in a run. :param dataset: dataset name (Optional) :type dataset: str :param run_num: Run number (Required) :type run_num: str, long, int :rtype: list containing a dictionary with key max_lumi """ if run_num==-1: dbsExceptionHandler("dbsException-invalid-input", "The run_num parameter is mandatory", self.logger.exception) if re.search('[*,%]', dataset): dbsExceptionHandler("dbsException-invalid-input", "No wildcards are allowed in dataset", self.logger.exception) # run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours # We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when dataset is given in this API. # YG Jan. 16 2019 if ((run_num == -1 or run_num == '-1') and dataset==''): dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input when no dataset is present.", self.logger.exception) conn = None try: conn = self.dbi.connection() return self.dbsRunSummaryListDAO.execute(conn, dataset, run_num) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/listRunSummaries. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) finally: if conn: conn.close()
[ "def", "listRunSummaries", "(", "self", ",", "dataset", "=", "\"\"", ",", "run_num", "=", "-", "1", ")", ":", "if", "run_num", "==", "-", "1", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"The run_num parameter is mandatory\"", ",", "self", ".", "logger", ".", "exception", ")", "if", "re", ".", "search", "(", "'[*,%]'", ",", "dataset", ")", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"No wildcards are allowed in dataset\"", ",", "self", ".", "logger", ".", "exception", ")", "# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours", "# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when dataset is given in this API.", "# YG Jan. 16 2019", "if", "(", "(", "run_num", "==", "-", "1", "or", "run_num", "==", "'-1'", ")", "and", "dataset", "==", "''", ")", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"Run_num=1 is not a valid input when no dataset is present.\"", ",", "self", ".", "logger", ".", "exception", ")", "conn", "=", "None", "try", ":", "conn", "=", "self", ".", "dbi", ".", "connection", "(", ")", "return", "self", ".", "dbsRunSummaryListDAO", ".", "execute", "(", "conn", ",", "dataset", ",", "run_num", ")", "except", "dbsException", "as", "de", ":", "dbsExceptionHandler", "(", "de", ".", "eCode", ",", "de", ".", "message", ",", "self", ".", "logger", ".", "exception", ",", "de", ".", "serverError", ")", "except", "Exception", "as", "ex", ":", "sError", "=", "\"DBSReaderModel/listRunSummaries. %s\\n. Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")", "finally", ":", "if", "conn", ":", "conn", ".", "close", "(", ")" ]
API to list run summaries, like the maximal lumisection in a run. :param dataset: dataset name (Optional) :type dataset: str :param run_num: Run number (Required) :type run_num: str, long, int :rtype: list containing a dictionary with key max_lumi
[ "API", "to", "list", "run", "summaries", "like", "the", "maximal", "lumisection", "in", "a", "run", "." ]
python
train
50.128205
fastai/fastai
fastai/data_block.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L285-L287
def label_from_func(self, func:Callable, label_cls:Callable=None, **kwargs)->'LabelList': "Apply `func` to every input to get its label." return self._label_from_list([func(o) for o in self.items], label_cls=label_cls, **kwargs)
[ "def", "label_from_func", "(", "self", ",", "func", ":", "Callable", ",", "label_cls", ":", "Callable", "=", "None", ",", "*", "*", "kwargs", ")", "->", "'LabelList'", ":", "return", "self", ".", "_label_from_list", "(", "[", "func", "(", "o", ")", "for", "o", "in", "self", ".", "items", "]", ",", "label_cls", "=", "label_cls", ",", "*", "*", "kwargs", ")" ]
Apply `func` to every input to get its label.
[ "Apply", "func", "to", "every", "input", "to", "get", "its", "label", "." ]
python
train
80.666667
eReuse/utils
ereuse_utils/naming.py
https://github.com/eReuse/utils/blob/989062e85095ea4e1204523fe0e298cf1046a01c/ereuse_utils/naming.py#L108-L110
def hid(manufacturer: str, serial_number: str, model: str) -> str: """Computes the HID for the given properties of a device. The HID is suitable to use to an URI.""" return Naming.url_word(manufacturer) + '-' + Naming.url_word(serial_number) + '-' + Naming.url_word(model)
[ "def", "hid", "(", "manufacturer", ":", "str", ",", "serial_number", ":", "str", ",", "model", ":", "str", ")", "->", "str", ":", "return", "Naming", ".", "url_word", "(", "manufacturer", ")", "+", "'-'", "+", "Naming", ".", "url_word", "(", "serial_number", ")", "+", "'-'", "+", "Naming", ".", "url_word", "(", "model", ")" ]
Computes the HID for the given properties of a device. The HID is suitable to use to an URI.
[ "Computes", "the", "HID", "for", "the", "given", "properties", "of", "a", "device", ".", "The", "HID", "is", "suitable", "to", "use", "to", "an", "URI", "." ]
python
train
95.333333
PetrochukM/PyTorch-NLP
torchnlp/encoders/encoder.py
https://github.com/PetrochukM/PyTorch-NLP/blob/5f7320da5c8d781df072fab3f7e421c6347e5bfa/torchnlp/encoders/encoder.py#L43-L58
def decode(self, encoded): """ Decodes an object. Args: object_ (object): Encoded object. Returns: object: Object decoded. """ if self.enforce_reversible: self.enforce_reversible = False if self.encode(self.decode(encoded)) != encoded: raise ValueError('Decoding is not reversible for "%s"' % encoded) self.enforce_reversible = True return encoded
[ "def", "decode", "(", "self", ",", "encoded", ")", ":", "if", "self", ".", "enforce_reversible", ":", "self", ".", "enforce_reversible", "=", "False", "if", "self", ".", "encode", "(", "self", ".", "decode", "(", "encoded", ")", ")", "!=", "encoded", ":", "raise", "ValueError", "(", "'Decoding is not reversible for \"%s\"'", "%", "encoded", ")", "self", ".", "enforce_reversible", "=", "True", "return", "encoded" ]
Decodes an object. Args: object_ (object): Encoded object. Returns: object: Object decoded.
[ "Decodes", "an", "object", "." ]
python
train
28.6875
Contraz/demosys-py
demosys/context/pyglet/window.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/context/pyglet/window.py#L75-L80
def on_key_release(self, symbol, modifiers): """ Pyglet specific key release callback. Forwards and translates the events to :py:func:`keyboard_event` """ self.keyboard_event(symbol, self.keys.ACTION_RELEASE, modifiers)
[ "def", "on_key_release", "(", "self", ",", "symbol", ",", "modifiers", ")", ":", "self", ".", "keyboard_event", "(", "symbol", ",", "self", ".", "keys", ".", "ACTION_RELEASE", ",", "modifiers", ")" ]
Pyglet specific key release callback. Forwards and translates the events to :py:func:`keyboard_event`
[ "Pyglet", "specific", "key", "release", "callback", ".", "Forwards", "and", "translates", "the", "events", "to", ":", "py", ":", "func", ":", "keyboard_event" ]
python
valid
43.166667
sibirrer/lenstronomy
lenstronomy/ImSim/MultiBand/multi_frame.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/ImSim/MultiBand/multi_frame.py#L126-L142
def error_response(self, kwargs_lens, kwargs_ps): """ returns the 1d array of the error estimate corresponding to the data response :return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties) """ C_D_response, model_error = [], [] for i in range(self._num_bands): if self._compute_bool[i] is True: kwargs_lens_i = [kwargs_lens[k] for k in self._idex_lens_list[i]] C_D_response_i, model_error_i = self._imageModel_list[i].error_response(kwargs_lens_i, kwargs_ps) model_error.append(model_error_i) if C_D_response == []: C_D_response = C_D_response_i else: C_D_response = np.append(C_D_response, C_D_response_i) return C_D_response, model_error
[ "def", "error_response", "(", "self", ",", "kwargs_lens", ",", "kwargs_ps", ")", ":", "C_D_response", ",", "model_error", "=", "[", "]", ",", "[", "]", "for", "i", "in", "range", "(", "self", ".", "_num_bands", ")", ":", "if", "self", ".", "_compute_bool", "[", "i", "]", "is", "True", ":", "kwargs_lens_i", "=", "[", "kwargs_lens", "[", "k", "]", "for", "k", "in", "self", ".", "_idex_lens_list", "[", "i", "]", "]", "C_D_response_i", ",", "model_error_i", "=", "self", ".", "_imageModel_list", "[", "i", "]", ".", "error_response", "(", "kwargs_lens_i", ",", "kwargs_ps", ")", "model_error", ".", "append", "(", "model_error_i", ")", "if", "C_D_response", "==", "[", "]", ":", "C_D_response", "=", "C_D_response_i", "else", ":", "C_D_response", "=", "np", ".", "append", "(", "C_D_response", ",", "C_D_response_i", ")", "return", "C_D_response", ",", "model_error" ]
returns the 1d array of the error estimate corresponding to the data response :return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties)
[ "returns", "the", "1d", "array", "of", "the", "error", "estimate", "corresponding", "to", "the", "data", "response" ]
python
train
50.294118
ArchiveTeam/wpull
wpull/protocol/ftp/stream.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/protocol/ftp/stream.py#L119-L142
def read_reply(self) -> Reply: '''Read a reply from the stream. Returns: .ftp.request.Reply: The reply Coroutine. ''' _logger.debug('Read reply') reply = Reply() while True: line = yield from self._connection.readline() if line[-1:] != b'\n': raise NetworkError('Connection closed.') self._data_event_dispatcher.notify_read(line) reply.parse(line) if reply.code is not None: break return reply
[ "def", "read_reply", "(", "self", ")", "->", "Reply", ":", "_logger", ".", "debug", "(", "'Read reply'", ")", "reply", "=", "Reply", "(", ")", "while", "True", ":", "line", "=", "yield", "from", "self", ".", "_connection", ".", "readline", "(", ")", "if", "line", "[", "-", "1", ":", "]", "!=", "b'\\n'", ":", "raise", "NetworkError", "(", "'Connection closed.'", ")", "self", ".", "_data_event_dispatcher", ".", "notify_read", "(", "line", ")", "reply", ".", "parse", "(", "line", ")", "if", "reply", ".", "code", "is", "not", "None", ":", "break", "return", "reply" ]
Read a reply from the stream. Returns: .ftp.request.Reply: The reply Coroutine.
[ "Read", "a", "reply", "from", "the", "stream", "." ]
python
train
22.708333
BD2KGenomics/toil-scripts
src/toil_scripts/bwa_alignment/bwa_alignment.py
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/bwa_alignment/bwa_alignment.py#L21-L58
def download_reference_files(job, inputs, samples): """ Downloads shared files that are used by all samples for alignment, or generates them if they were not provided. :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace inputs: Input arguments (see main) :param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]] """ # Create dictionary to store FileStoreIDs of shared input files shared_ids = {} urls = [('amb', inputs.amb), ('ann', inputs.ann), ('bwt', inputs.bwt), ('pac', inputs.pac), ('sa', inputs.sa)] # Alt file is optional and can only be provided, not generated if inputs.alt: urls.append(('alt', inputs.alt)) # Download reference download_ref = job.wrapJobFn(download_url_job, inputs.ref, disk='3G') # Human genomes are typically ~3G job.addChild(download_ref) shared_ids['ref'] = download_ref.rv() # If FAI is provided, download it. Otherwise, generate it if inputs.fai: shared_ids['fai'] = job.addChildJobFn(download_url_job, inputs.fai).rv() else: faidx = job.wrapJobFn(run_samtools_faidx, download_ref.rv()) shared_ids['fai'] = download_ref.addChild(faidx).rv() # If all BWA index files are provided, download them. Otherwise, generate them if all(x[1] for x in urls): for name, url in urls: shared_ids[name] = job.addChildJobFn(download_url_job, url).rv() else: job.fileStore.logToMaster('BWA index files not provided, creating now') bwa_index = job.wrapJobFn(run_bwa_index, download_ref.rv()) download_ref.addChild(bwa_index) for x, name in enumerate(['amb', 'ann', 'bwt', 'pac', 'sa']): shared_ids[name] = bwa_index.rv(x) # Map_job distributes one sample in samples to the downlaod_sample_and_align function job.addFollowOnJobFn(map_job, download_sample_and_align, samples, inputs, shared_ids)
[ "def", "download_reference_files", "(", "job", ",", "inputs", ",", "samples", ")", ":", "# Create dictionary to store FileStoreIDs of shared input files", "shared_ids", "=", "{", "}", "urls", "=", "[", "(", "'amb'", ",", "inputs", ".", "amb", ")", ",", "(", "'ann'", ",", "inputs", ".", "ann", ")", ",", "(", "'bwt'", ",", "inputs", ".", "bwt", ")", ",", "(", "'pac'", ",", "inputs", ".", "pac", ")", ",", "(", "'sa'", ",", "inputs", ".", "sa", ")", "]", "# Alt file is optional and can only be provided, not generated", "if", "inputs", ".", "alt", ":", "urls", ".", "append", "(", "(", "'alt'", ",", "inputs", ".", "alt", ")", ")", "# Download reference", "download_ref", "=", "job", ".", "wrapJobFn", "(", "download_url_job", ",", "inputs", ".", "ref", ",", "disk", "=", "'3G'", ")", "# Human genomes are typically ~3G", "job", ".", "addChild", "(", "download_ref", ")", "shared_ids", "[", "'ref'", "]", "=", "download_ref", ".", "rv", "(", ")", "# If FAI is provided, download it. Otherwise, generate it", "if", "inputs", ".", "fai", ":", "shared_ids", "[", "'fai'", "]", "=", "job", ".", "addChildJobFn", "(", "download_url_job", ",", "inputs", ".", "fai", ")", ".", "rv", "(", ")", "else", ":", "faidx", "=", "job", ".", "wrapJobFn", "(", "run_samtools_faidx", ",", "download_ref", ".", "rv", "(", ")", ")", "shared_ids", "[", "'fai'", "]", "=", "download_ref", ".", "addChild", "(", "faidx", ")", ".", "rv", "(", ")", "# If all BWA index files are provided, download them. Otherwise, generate them", "if", "all", "(", "x", "[", "1", "]", "for", "x", "in", "urls", ")", ":", "for", "name", ",", "url", "in", "urls", ":", "shared_ids", "[", "name", "]", "=", "job", ".", "addChildJobFn", "(", "download_url_job", ",", "url", ")", ".", "rv", "(", ")", "else", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'BWA index files not provided, creating now'", ")", "bwa_index", "=", "job", ".", "wrapJobFn", "(", "run_bwa_index", ",", "download_ref", ".", "rv", "(", ")", ")", "download_ref", ".", "addChild", "(", "bwa_index", ")", "for", "x", ",", "name", "in", "enumerate", "(", "[", "'amb'", ",", "'ann'", ",", "'bwt'", ",", "'pac'", ",", "'sa'", "]", ")", ":", "shared_ids", "[", "name", "]", "=", "bwa_index", ".", "rv", "(", "x", ")", "# Map_job distributes one sample in samples to the downlaod_sample_and_align function", "job", ".", "addFollowOnJobFn", "(", "map_job", ",", "download_sample_and_align", ",", "samples", ",", "inputs", ",", "shared_ids", ")" ]
Downloads shared files that are used by all samples for alignment, or generates them if they were not provided. :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace inputs: Input arguments (see main) :param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]]
[ "Downloads", "shared", "files", "that", "are", "used", "by", "all", "samples", "for", "alignment", "or", "generates", "them", "if", "they", "were", "not", "provided", "." ]
python
train
51
DiscordBotList/DBL-Python-Library
dbl/http.py
https://github.com/DiscordBotList/DBL-Python-Library/blob/c1461ae0acc644cdeedef8fd6b5e36f76d81c1aa/dbl/http.py#L78-L148
async def request(self, method, url, **kwargs): """Handles requests to the API""" rate_limiter = RateLimiter(max_calls=59, period=60, callback=limited) # handles ratelimits. max_calls is set to 59 because current implementation will retry in 60s after 60 calls is reached. DBL has a 1h block so obviously this doesn't work well, as it will get a 429 when 60 is reached. async with rate_limiter: # this works but doesn't 'save' over restart. need a better implementation. if not self.token: raise UnauthorizedDetected('UnauthorizedDetected (status code: 401): No TOKEN provided') headers = { 'User-Agent': self.user_agent, 'Content-Type': 'application/json' } if 'json' in kwargs: kwargs['data'] = to_json(kwargs.pop('json')) kwargs['headers'] = headers headers['Authorization'] = self.token for tries in range(5): async with self.session.request(method, url, **kwargs) as resp: log.debug('%s %s with %s has returned %s', method, url, kwargs.get('data'), resp.status) data = await json_or_text(resp) if 300 > resp.status >= 200: return data if resp.status == 429: # we are being ratelimited fmt = 'We are being rate limited. Retrying in %.2f seconds (%.3f minutes).' # sleep a bit retry_after = json.loads(resp.headers.get('Retry-After')) mins = retry_after / 60 log.warning(fmt, retry_after, mins) # check if it's a global rate limit (True as only 1 ratelimit atm - /api/bots) is_global = True # is_global = data.get('global', False) if is_global: self._global_over.clear() await asyncio.sleep(retry_after, loop=self.loop) log.debug('Done sleeping for the rate limit. Retrying...') # release the global lock now that the # global rate limit has passed if is_global: self._global_over.set() log.debug('Global rate limit is now over.') continue if resp.status == 400: raise HTTPException(resp, data) elif resp.status == 401: raise Unauthorized(resp, data) elif resp.status == 403: raise Forbidden(resp, data) elif resp.status == 404: raise NotFound(resp, data) else: raise HTTPException(resp, data) # We've run out of retries, raise. raise HTTPException(resp, data)
[ "async", "def", "request", "(", "self", ",", "method", ",", "url", ",", "*", "*", "kwargs", ")", ":", "rate_limiter", "=", "RateLimiter", "(", "max_calls", "=", "59", ",", "period", "=", "60", ",", "callback", "=", "limited", ")", "# handles ratelimits. max_calls is set to 59 because current implementation will retry in 60s after 60 calls is reached. DBL has a 1h block so obviously this doesn't work well, as it will get a 429 when 60 is reached.", "async", "with", "rate_limiter", ":", "# this works but doesn't 'save' over restart. need a better implementation.", "if", "not", "self", ".", "token", ":", "raise", "UnauthorizedDetected", "(", "'UnauthorizedDetected (status code: 401): No TOKEN provided'", ")", "headers", "=", "{", "'User-Agent'", ":", "self", ".", "user_agent", ",", "'Content-Type'", ":", "'application/json'", "}", "if", "'json'", "in", "kwargs", ":", "kwargs", "[", "'data'", "]", "=", "to_json", "(", "kwargs", ".", "pop", "(", "'json'", ")", ")", "kwargs", "[", "'headers'", "]", "=", "headers", "headers", "[", "'Authorization'", "]", "=", "self", ".", "token", "for", "tries", "in", "range", "(", "5", ")", ":", "async", "with", "self", ".", "session", ".", "request", "(", "method", ",", "url", ",", "*", "*", "kwargs", ")", "as", "resp", ":", "log", ".", "debug", "(", "'%s %s with %s has returned %s'", ",", "method", ",", "url", ",", "kwargs", ".", "get", "(", "'data'", ")", ",", "resp", ".", "status", ")", "data", "=", "await", "json_or_text", "(", "resp", ")", "if", "300", ">", "resp", ".", "status", ">=", "200", ":", "return", "data", "if", "resp", ".", "status", "==", "429", ":", "# we are being ratelimited", "fmt", "=", "'We are being rate limited. Retrying in %.2f seconds (%.3f minutes).'", "# sleep a bit", "retry_after", "=", "json", ".", "loads", "(", "resp", ".", "headers", ".", "get", "(", "'Retry-After'", ")", ")", "mins", "=", "retry_after", "/", "60", "log", ".", "warning", "(", "fmt", ",", "retry_after", ",", "mins", ")", "# check if it's a global rate limit (True as only 1 ratelimit atm - /api/bots)", "is_global", "=", "True", "# is_global = data.get('global', False)", "if", "is_global", ":", "self", ".", "_global_over", ".", "clear", "(", ")", "await", "asyncio", ".", "sleep", "(", "retry_after", ",", "loop", "=", "self", ".", "loop", ")", "log", ".", "debug", "(", "'Done sleeping for the rate limit. Retrying...'", ")", "# release the global lock now that the", "# global rate limit has passed", "if", "is_global", ":", "self", ".", "_global_over", ".", "set", "(", ")", "log", ".", "debug", "(", "'Global rate limit is now over.'", ")", "continue", "if", "resp", ".", "status", "==", "400", ":", "raise", "HTTPException", "(", "resp", ",", "data", ")", "elif", "resp", ".", "status", "==", "401", ":", "raise", "Unauthorized", "(", "resp", ",", "data", ")", "elif", "resp", ".", "status", "==", "403", ":", "raise", "Forbidden", "(", "resp", ",", "data", ")", "elif", "resp", ".", "status", "==", "404", ":", "raise", "NotFound", "(", "resp", ",", "data", ")", "else", ":", "raise", "HTTPException", "(", "resp", ",", "data", ")", "# We've run out of retries, raise.", "raise", "HTTPException", "(", "resp", ",", "data", ")" ]
Handles requests to the API
[ "Handles", "requests", "to", "the", "API" ]
python
test
42.380282
sbrisard/rebin
rebin.py
https://github.com/sbrisard/rebin/blob/a0abc9b6e6f82f3c80fe30129f139f1d54f78471/rebin.py#L88-L146
def rebin(a, factor, func=None): u"""Aggregate data from the input array ``a`` into rectangular tiles. The output array results from tiling ``a`` and applying `func` to each tile. ``factor`` specifies the size of the tiles. More precisely, the returned array ``out`` is such that:: out[i0, i1, ...] = func(a[f0*i0:f0*(i0+1), f1*i1:f1*(i1+1), ...]) If ``factor`` is an integer-like scalar, then ``f0 = f1 = ... = factor`` in the above formula. If ``factor`` is a sequence of integer-like scalars, then ``f0 = factor[0]``, ``f1 = factor[1]``, ... and the length of ``factor`` must equal the number of dimensions of ``a``. The reduction function ``func`` must accept an ``axis`` argument. Examples of such function are - ``numpy.mean`` (default), - ``numpy.sum``, - ``numpy.product``, - ... The following example shows how a (4, 6) array is reduced to a (2, 2) array >>> import numpy >>> from rebin import rebin >>> a = numpy.arange(24).reshape(4, 6) >>> rebin(a, factor=(2, 3), func=numpy.sum) array([[ 24, 42], [ 96, 114]]) If the elements of `factor` are not integer multiples of the dimensions of `a`, the remainding cells are discarded. >>> rebin(a, factor=(2, 2), func=numpy.sum) array([[16, 24, 32], [72, 80, 88]]) """ a = np.asarray(a) dim = a.ndim if np.isscalar(factor): factor = dim*(factor,) elif len(factor) != dim: raise ValueError('length of factor must be {} (was {})' .format(dim, len(factor))) if func is None: func = np.mean for f in factor: if f != int(f): raise ValueError('factor must be an int or a tuple of ints ' '(got {})'.format(f)) new_shape = [n//f for n, f in zip(a.shape, factor)]+list(factor) new_strides = [s*f for s, f in zip(a.strides, factor)]+list(a.strides) aa = as_strided(a, shape=new_shape, strides=new_strides) return func(aa, axis=tuple(range(-dim, 0)))
[ "def", "rebin", "(", "a", ",", "factor", ",", "func", "=", "None", ")", ":", "a", "=", "np", ".", "asarray", "(", "a", ")", "dim", "=", "a", ".", "ndim", "if", "np", ".", "isscalar", "(", "factor", ")", ":", "factor", "=", "dim", "*", "(", "factor", ",", ")", "elif", "len", "(", "factor", ")", "!=", "dim", ":", "raise", "ValueError", "(", "'length of factor must be {} (was {})'", ".", "format", "(", "dim", ",", "len", "(", "factor", ")", ")", ")", "if", "func", "is", "None", ":", "func", "=", "np", ".", "mean", "for", "f", "in", "factor", ":", "if", "f", "!=", "int", "(", "f", ")", ":", "raise", "ValueError", "(", "'factor must be an int or a tuple of ints '", "'(got {})'", ".", "format", "(", "f", ")", ")", "new_shape", "=", "[", "n", "//", "f", "for", "n", ",", "f", "in", "zip", "(", "a", ".", "shape", ",", "factor", ")", "]", "+", "list", "(", "factor", ")", "new_strides", "=", "[", "s", "*", "f", "for", "s", ",", "f", "in", "zip", "(", "a", ".", "strides", ",", "factor", ")", "]", "+", "list", "(", "a", ".", "strides", ")", "aa", "=", "as_strided", "(", "a", ",", "shape", "=", "new_shape", ",", "strides", "=", "new_strides", ")", "return", "func", "(", "aa", ",", "axis", "=", "tuple", "(", "range", "(", "-", "dim", ",", "0", ")", ")", ")" ]
u"""Aggregate data from the input array ``a`` into rectangular tiles. The output array results from tiling ``a`` and applying `func` to each tile. ``factor`` specifies the size of the tiles. More precisely, the returned array ``out`` is such that:: out[i0, i1, ...] = func(a[f0*i0:f0*(i0+1), f1*i1:f1*(i1+1), ...]) If ``factor`` is an integer-like scalar, then ``f0 = f1 = ... = factor`` in the above formula. If ``factor`` is a sequence of integer-like scalars, then ``f0 = factor[0]``, ``f1 = factor[1]``, ... and the length of ``factor`` must equal the number of dimensions of ``a``. The reduction function ``func`` must accept an ``axis`` argument. Examples of such function are - ``numpy.mean`` (default), - ``numpy.sum``, - ``numpy.product``, - ... The following example shows how a (4, 6) array is reduced to a (2, 2) array >>> import numpy >>> from rebin import rebin >>> a = numpy.arange(24).reshape(4, 6) >>> rebin(a, factor=(2, 3), func=numpy.sum) array([[ 24, 42], [ 96, 114]]) If the elements of `factor` are not integer multiples of the dimensions of `a`, the remainding cells are discarded. >>> rebin(a, factor=(2, 2), func=numpy.sum) array([[16, 24, 32], [72, 80, 88]])
[ "u", "Aggregate", "data", "from", "the", "input", "array", "a", "into", "rectangular", "tiles", "." ]
python
train
34.423729
SFDO-Tooling/CumulusCI
cumulusci/core/tasks.py
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/tasks.py#L166-L172
def _log_begin(self): """ Log the beginning of the task execution """ self.logger.info("Beginning task: %s", self.__class__.__name__) if self.salesforce_task and not self.flow: self.logger.info("%15s %s", "As user:", self.org_config.username) self.logger.info("%15s %s", "In org:", self.org_config.org_id) self.logger.info("")
[ "def", "_log_begin", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Beginning task: %s\"", ",", "self", ".", "__class__", ".", "__name__", ")", "if", "self", ".", "salesforce_task", "and", "not", "self", ".", "flow", ":", "self", ".", "logger", ".", "info", "(", "\"%15s %s\"", ",", "\"As user:\"", ",", "self", ".", "org_config", ".", "username", ")", "self", ".", "logger", ".", "info", "(", "\"%15s %s\"", ",", "\"In org:\"", ",", "self", ".", "org_config", ".", "org_id", ")", "self", ".", "logger", ".", "info", "(", "\"\"", ")" ]
Log the beginning of the task execution
[ "Log", "the", "beginning", "of", "the", "task", "execution" ]
python
train
53.714286
pytorch/vision
torchvision/transforms/functional.py
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L212-L246
def resize(img, size, interpolation=Image.BILINEAR): r"""Resize the input PIL Image to the given size. Args: img (PIL Image): Image to be resized. size (sequence or int): Desired output size. If size is a sequence like (h, w), the output size will be matched to this. If size is an int, the smaller edge of the image will be matched to this number maintaing the aspect ratio. i.e, if height > width, then image will be rescaled to :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)` interpolation (int, optional): Desired interpolation. Default is ``PIL.Image.BILINEAR`` Returns: PIL Image: Resized image. """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)): raise TypeError('Got inappropriate size arg: {}'.format(size)) if isinstance(size, int): w, h = img.size if (w <= h and w == size) or (h <= w and h == size): return img if w < h: ow = size oh = int(size * h / w) return img.resize((ow, oh), interpolation) else: oh = size ow = int(size * w / h) return img.resize((ow, oh), interpolation) else: return img.resize(size[::-1], interpolation)
[ "def", "resize", "(", "img", ",", "size", ",", "interpolation", "=", "Image", ".", "BILINEAR", ")", ":", "if", "not", "_is_pil_image", "(", "img", ")", ":", "raise", "TypeError", "(", "'img should be PIL Image. Got {}'", ".", "format", "(", "type", "(", "img", ")", ")", ")", "if", "not", "(", "isinstance", "(", "size", ",", "int", ")", "or", "(", "isinstance", "(", "size", ",", "Iterable", ")", "and", "len", "(", "size", ")", "==", "2", ")", ")", ":", "raise", "TypeError", "(", "'Got inappropriate size arg: {}'", ".", "format", "(", "size", ")", ")", "if", "isinstance", "(", "size", ",", "int", ")", ":", "w", ",", "h", "=", "img", ".", "size", "if", "(", "w", "<=", "h", "and", "w", "==", "size", ")", "or", "(", "h", "<=", "w", "and", "h", "==", "size", ")", ":", "return", "img", "if", "w", "<", "h", ":", "ow", "=", "size", "oh", "=", "int", "(", "size", "*", "h", "/", "w", ")", "return", "img", ".", "resize", "(", "(", "ow", ",", "oh", ")", ",", "interpolation", ")", "else", ":", "oh", "=", "size", "ow", "=", "int", "(", "size", "*", "w", "/", "h", ")", "return", "img", ".", "resize", "(", "(", "ow", ",", "oh", ")", ",", "interpolation", ")", "else", ":", "return", "img", ".", "resize", "(", "size", "[", ":", ":", "-", "1", "]", ",", "interpolation", ")" ]
r"""Resize the input PIL Image to the given size. Args: img (PIL Image): Image to be resized. size (sequence or int): Desired output size. If size is a sequence like (h, w), the output size will be matched to this. If size is an int, the smaller edge of the image will be matched to this number maintaing the aspect ratio. i.e, if height > width, then image will be rescaled to :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)` interpolation (int, optional): Desired interpolation. Default is ``PIL.Image.BILINEAR`` Returns: PIL Image: Resized image.
[ "r", "Resize", "the", "input", "PIL", "Image", "to", "the", "given", "size", "." ]
python
test
41.285714
mitsei/dlkit
dlkit/json_/learning/searches.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/searches.py#L345-L356
def get_proficiencies(self): """Gets the proficiency list resulting from a search. return: (osid.learning.ProficiencyList) - the proficiency list raise: IllegalState - list already retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.ProficiencyList(self._results, runtime=self._runtime)
[ "def", "get_proficiencies", "(", "self", ")", ":", "if", "self", ".", "retrieved", ":", "raise", "errors", ".", "IllegalState", "(", "'List has already been retrieved.'", ")", "self", ".", "retrieved", "=", "True", "return", "objects", ".", "ProficiencyList", "(", "self", ".", "_results", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the proficiency list resulting from a search. return: (osid.learning.ProficiencyList) - the proficiency list raise: IllegalState - list already retrieved *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "proficiency", "list", "resulting", "from", "a", "search", "." ]
python
train
41.166667