repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
johnbywater/eventsourcing
eventsourcing/domain/model/array.py
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/domain/model/array.py#L372-L397
def calc_parent(self, i, j, h): """ Returns get_big_array and end of span of parent sequence that contains given child. """ N = self.repo.array_size c_i = i c_j = j c_h = h # Calculate the number of the sequence in its row (sequences # with same height), from left to right, starting from 0. c_n = c_i // (N ** c_h) p_n = c_n // N # Position of the child ID in the parent array. p_p = c_n % N # Parent height is child height plus one. p_h = c_h + 1 # Span of sequences in parent row is max size N, to the power of the height. span = N ** p_h # Calculate parent i and j. p_i = p_n * span p_j = p_i + span # Check the parent i,j bounds the child i,j, ie child span is contained by parent span. assert p_i <= c_i, 'i greater on parent than child: {}'.format(p_i, p_j) assert p_j >= c_j, 'j less on parent than child: {}'.format(p_i, p_j) # Return parent i, j, h, p. return p_i, p_j, p_h, p_p
[ "def", "calc_parent", "(", "self", ",", "i", ",", "j", ",", "h", ")", ":", "N", "=", "self", ".", "repo", ".", "array_size", "c_i", "=", "i", "c_j", "=", "j", "c_h", "=", "h", "# Calculate the number of the sequence in its row (sequences", "# with same height), from left to right, starting from 0.", "c_n", "=", "c_i", "//", "(", "N", "**", "c_h", ")", "p_n", "=", "c_n", "//", "N", "# Position of the child ID in the parent array.", "p_p", "=", "c_n", "%", "N", "# Parent height is child height plus one.", "p_h", "=", "c_h", "+", "1", "# Span of sequences in parent row is max size N, to the power of the height.", "span", "=", "N", "**", "p_h", "# Calculate parent i and j.", "p_i", "=", "p_n", "*", "span", "p_j", "=", "p_i", "+", "span", "# Check the parent i,j bounds the child i,j, ie child span is contained by parent span.", "assert", "p_i", "<=", "c_i", ",", "'i greater on parent than child: {}'", ".", "format", "(", "p_i", ",", "p_j", ")", "assert", "p_j", ">=", "c_j", ",", "'j less on parent than child: {}'", ".", "format", "(", "p_i", ",", "p_j", ")", "# Return parent i, j, h, p.", "return", "p_i", ",", "p_j", ",", "p_h", ",", "p_p" ]
Returns get_big_array and end of span of parent sequence that contains given child.
[ "Returns", "get_big_array", "and", "end", "of", "span", "of", "parent", "sequence", "that", "contains", "given", "child", "." ]
python
train
40.884615
tomprince/nomenclature
nomenclature/syscalls.py
https://github.com/tomprince/nomenclature/blob/81af4a590034f75211f028d485c0d83fceda5af2/nomenclature/syscalls.py#L27-L38
def setns(fd, nstype): """ Reassociate thread with a namespace :param fd int: The file descriptor referreing to one of the namespace entries in a :directory::`/proc/<pid>/ns/` directory. :param nstype int: The type of namespace the calling thread should be reasscoiated with. """ res = lib.setns(fd, nstype) if res != 0: _check_error(ffi.errno)
[ "def", "setns", "(", "fd", ",", "nstype", ")", ":", "res", "=", "lib", ".", "setns", "(", "fd", ",", "nstype", ")", "if", "res", "!=", "0", ":", "_check_error", "(", "ffi", ".", "errno", ")" ]
Reassociate thread with a namespace :param fd int: The file descriptor referreing to one of the namespace entries in a :directory::`/proc/<pid>/ns/` directory. :param nstype int: The type of namespace the calling thread should be reasscoiated with.
[ "Reassociate", "thread", "with", "a", "namespace" ]
python
train
32.166667
iotile/coretools
iotilebuild/iotile/build/config/site_scons/autobuild.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/site_scons/autobuild.py#L233-L259
def autobuild_trub_script(file_name, slot_assignments=None, os_info=None, sensor_graph=None, app_info=None, use_safeupdate=False): """Build a trub script that loads given firmware into the given slots. slot_assignments should be a list of tuples in the following form: ("slot X" or "controller", firmware_image_name) The output of this autobuild action will be a trub script in build/output/<file_name> that assigns the given firmware to each slot in the order specified in the slot_assignments list. Args: file_name (str): The name of the output file that we should create. This file name should end in .trub slot_assignments (list of (str, str)): A list of tuples containing the slot name and the firmware image that we should use to build our update script. Optional os_info (tuple(int, str)): A tuple of OS version tag and X.Y version number that will be set as part of the OTA script if included. Optional. sensor_graph (str): Name of sgf file. Optional. app_info (tuple(int, str)): A tuple of App version tag and X.Y version number that will be set as part of the OTA script if included. Optional. use_safeupdate (bool): If True, Enables safemode before the firmware update records, then disables them after the firmware update records. """ build_update_script(file_name, slot_assignments, os_info, sensor_graph, app_info, use_safeupdate)
[ "def", "autobuild_trub_script", "(", "file_name", ",", "slot_assignments", "=", "None", ",", "os_info", "=", "None", ",", "sensor_graph", "=", "None", ",", "app_info", "=", "None", ",", "use_safeupdate", "=", "False", ")", ":", "build_update_script", "(", "file_name", ",", "slot_assignments", ",", "os_info", ",", "sensor_graph", ",", "app_info", ",", "use_safeupdate", ")" ]
Build a trub script that loads given firmware into the given slots. slot_assignments should be a list of tuples in the following form: ("slot X" or "controller", firmware_image_name) The output of this autobuild action will be a trub script in build/output/<file_name> that assigns the given firmware to each slot in the order specified in the slot_assignments list. Args: file_name (str): The name of the output file that we should create. This file name should end in .trub slot_assignments (list of (str, str)): A list of tuples containing the slot name and the firmware image that we should use to build our update script. Optional os_info (tuple(int, str)): A tuple of OS version tag and X.Y version number that will be set as part of the OTA script if included. Optional. sensor_graph (str): Name of sgf file. Optional. app_info (tuple(int, str)): A tuple of App version tag and X.Y version number that will be set as part of the OTA script if included. Optional. use_safeupdate (bool): If True, Enables safemode before the firmware update records, then disables them after the firmware update records.
[ "Build", "a", "trub", "script", "that", "loads", "given", "firmware", "into", "the", "given", "slots", "." ]
python
train
55.703704
Tanganelli/CoAPthon3
coapthon/layers/requestlayer.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/layers/requestlayer.py#L46-L72
def _handle_get(self, transaction): """ Handle GET requests :type transaction: Transaction :param transaction: the transaction that owns the request :rtype : Transaction :return: the edited transaction with the response to the request """ path = str("/" + transaction.request.uri_path) transaction.response = Response() transaction.response.destination = transaction.request.source transaction.response.token = transaction.request.token if path == defines.DISCOVERY_URL: transaction = self._server.resourceLayer.discover(transaction) else: try: resource = self._server.root[path] except KeyError: resource = None if resource is None or path == '/': # Not Found transaction.response.code = defines.Codes.NOT_FOUND.number else: transaction.resource = resource transaction = self._server.resourceLayer.get_resource(transaction) return transaction
[ "def", "_handle_get", "(", "self", ",", "transaction", ")", ":", "path", "=", "str", "(", "\"/\"", "+", "transaction", ".", "request", ".", "uri_path", ")", "transaction", ".", "response", "=", "Response", "(", ")", "transaction", ".", "response", ".", "destination", "=", "transaction", ".", "request", ".", "source", "transaction", ".", "response", ".", "token", "=", "transaction", ".", "request", ".", "token", "if", "path", "==", "defines", ".", "DISCOVERY_URL", ":", "transaction", "=", "self", ".", "_server", ".", "resourceLayer", ".", "discover", "(", "transaction", ")", "else", ":", "try", ":", "resource", "=", "self", ".", "_server", ".", "root", "[", "path", "]", "except", "KeyError", ":", "resource", "=", "None", "if", "resource", "is", "None", "or", "path", "==", "'/'", ":", "# Not Found", "transaction", ".", "response", ".", "code", "=", "defines", ".", "Codes", ".", "NOT_FOUND", ".", "number", "else", ":", "transaction", ".", "resource", "=", "resource", "transaction", "=", "self", ".", "_server", ".", "resourceLayer", ".", "get_resource", "(", "transaction", ")", "return", "transaction" ]
Handle GET requests :type transaction: Transaction :param transaction: the transaction that owns the request :rtype : Transaction :return: the edited transaction with the response to the request
[ "Handle", "GET", "requests" ]
python
train
40.222222
amorison/loam
loam/cli.py
https://github.com/amorison/loam/blob/a566c943a75e068a4510099331a1ddfe5bbbdd94/loam/cli.py#L248-L292
def zsh_complete(self, path, cmd, *cmds, sourceable=False): """Write zsh compdef script. Args: path (path-like): desired path of the compdef script. cmd (str): command name that should be completed. cmds (str): extra command names that should be completed. sourceable (bool): if True, the generated file will contain an explicit call to ``compdef``, which means it can be sourced to activate CLI completion. """ grouping = internal.zsh_version() >= (5, 4) path = pathlib.Path(path) firstline = ['#compdef', cmd] firstline.extend(cmds) subcmds = list(self.subcmds.keys()) with path.open('w') as zcf: print(*firstline, end='\n\n', file=zcf) # main function print('function _{} {{'.format(cmd), file=zcf) print('local line', file=zcf) print('_arguments -C', end=BLK, file=zcf) if subcmds: # list of subcommands and their description substrs = ["{}\\:'{}'".format(sub, self.subcmds[sub].help) for sub in subcmds] print('"1:Commands:(({}))"'.format(' '.join(substrs)), end=BLK, file=zcf) self._zsh_comp_command(zcf, None, grouping) if subcmds: print("'*::arg:->args'", file=zcf) print('case $line[1] in', file=zcf) for sub in subcmds: print('{sub}) _{cmd}_{sub} ;;'.format(sub=sub, cmd=cmd), file=zcf) print('esac', file=zcf) print('}', file=zcf) # all subcommand completion handlers for sub in subcmds: print('\nfunction _{}_{} {{'.format(cmd, sub), file=zcf) print('_arguments', end=BLK, file=zcf) self._zsh_comp_command(zcf, sub, grouping) print('}', file=zcf) if sourceable: print('\ncompdef _{0} {0}'.format(cmd), *cmds, file=zcf)
[ "def", "zsh_complete", "(", "self", ",", "path", ",", "cmd", ",", "*", "cmds", ",", "sourceable", "=", "False", ")", ":", "grouping", "=", "internal", ".", "zsh_version", "(", ")", ">=", "(", "5", ",", "4", ")", "path", "=", "pathlib", ".", "Path", "(", "path", ")", "firstline", "=", "[", "'#compdef'", ",", "cmd", "]", "firstline", ".", "extend", "(", "cmds", ")", "subcmds", "=", "list", "(", "self", ".", "subcmds", ".", "keys", "(", ")", ")", "with", "path", ".", "open", "(", "'w'", ")", "as", "zcf", ":", "print", "(", "*", "firstline", ",", "end", "=", "'\\n\\n'", ",", "file", "=", "zcf", ")", "# main function", "print", "(", "'function _{} {{'", ".", "format", "(", "cmd", ")", ",", "file", "=", "zcf", ")", "print", "(", "'local line'", ",", "file", "=", "zcf", ")", "print", "(", "'_arguments -C'", ",", "end", "=", "BLK", ",", "file", "=", "zcf", ")", "if", "subcmds", ":", "# list of subcommands and their description", "substrs", "=", "[", "\"{}\\\\:'{}'\"", ".", "format", "(", "sub", ",", "self", ".", "subcmds", "[", "sub", "]", ".", "help", ")", "for", "sub", "in", "subcmds", "]", "print", "(", "'\"1:Commands:(({}))\"'", ".", "format", "(", "' '", ".", "join", "(", "substrs", ")", ")", ",", "end", "=", "BLK", ",", "file", "=", "zcf", ")", "self", ".", "_zsh_comp_command", "(", "zcf", ",", "None", ",", "grouping", ")", "if", "subcmds", ":", "print", "(", "\"'*::arg:->args'\"", ",", "file", "=", "zcf", ")", "print", "(", "'case $line[1] in'", ",", "file", "=", "zcf", ")", "for", "sub", "in", "subcmds", ":", "print", "(", "'{sub}) _{cmd}_{sub} ;;'", ".", "format", "(", "sub", "=", "sub", ",", "cmd", "=", "cmd", ")", ",", "file", "=", "zcf", ")", "print", "(", "'esac'", ",", "file", "=", "zcf", ")", "print", "(", "'}'", ",", "file", "=", "zcf", ")", "# all subcommand completion handlers", "for", "sub", "in", "subcmds", ":", "print", "(", "'\\nfunction _{}_{} {{'", ".", "format", "(", "cmd", ",", "sub", ")", ",", "file", "=", "zcf", ")", "print", "(", "'_arguments'", ",", "end", "=", "BLK", ",", "file", "=", "zcf", ")", "self", ".", "_zsh_comp_command", "(", "zcf", ",", "sub", ",", "grouping", ")", "print", "(", "'}'", ",", "file", "=", "zcf", ")", "if", "sourceable", ":", "print", "(", "'\\ncompdef _{0} {0}'", ".", "format", "(", "cmd", ")", ",", "*", "cmds", ",", "file", "=", "zcf", ")" ]
Write zsh compdef script. Args: path (path-like): desired path of the compdef script. cmd (str): command name that should be completed. cmds (str): extra command names that should be completed. sourceable (bool): if True, the generated file will contain an explicit call to ``compdef``, which means it can be sourced to activate CLI completion.
[ "Write", "zsh", "compdef", "script", "." ]
python
test
46
kiwiz/gkeepapi
gkeepapi/node.py
https://github.com/kiwiz/gkeepapi/blob/78aaae8b988b1cf616e3973f7f15d4c6d5e996cc/gkeepapi/node.py#L1464-L1476
def add(self, text, checked=False, sort=None): """Add a new sub item to the list. This item must already be attached to a list. Args: text (str): The text. checked (bool): Whether this item is checked. sort (int): Item id for sorting. """ if self.parent is None: raise exception.InvalidException('Item has no parent') node = self.parent.add(text, checked, sort) self.indent(node) return node
[ "def", "add", "(", "self", ",", "text", ",", "checked", "=", "False", ",", "sort", "=", "None", ")", ":", "if", "self", ".", "parent", "is", "None", ":", "raise", "exception", ".", "InvalidException", "(", "'Item has no parent'", ")", "node", "=", "self", ".", "parent", ".", "add", "(", "text", ",", "checked", ",", "sort", ")", "self", ".", "indent", "(", "node", ")", "return", "node" ]
Add a new sub item to the list. This item must already be attached to a list. Args: text (str): The text. checked (bool): Whether this item is checked. sort (int): Item id for sorting.
[ "Add", "a", "new", "sub", "item", "to", "the", "list", ".", "This", "item", "must", "already", "be", "attached", "to", "a", "list", "." ]
python
train
37.230769
edibledinos/pwnypack
pwnypack/codec.py
https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/codec.py#L79-L172
def find_xor_mask(data, alphabet=None, max_depth=3, min_depth=0, iv=None): """ Produce a series of bytestrings that when XORed together end up being equal to ``data`` and only contain characters from the giving ``alphabet``. The initial state (or previous state) can be given as ``iv``. Arguments: data (bytes): The data to recreate as a series of XOR operations. alphabet (bytes): The bytestring containing the allowed characters for the XOR values. If ``None``, all characters except NUL bytes, carriage returns and newlines will be allowed. max_depth (int): The maximum depth to look for a solution. min_depth (int): The minimum depth to look for a solution. iv (bytes): Initialization vector. If ``None``, it will be assumed the operation starts at an all zero string. Returns: A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother if ``iv` is not providede) will be the same as ``data``. Examples: Produce a series of strings that when XORed together will result in the string 'pwnypack' using only ASCII characters in the range 65 to 96: >>> from pwny import * >>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97))) [b'````````', b'AAAAABAA', b'QVOXQCBJ'] >>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ') 'pwnypack' """ if alphabet is None: alphabet = set(i for i in range(256) if i not in (0, 10, 13)) else: alphabet = set(six.iterbytes(alphabet)) if iv is None: iv = b'\0' * len(data) if len(data) != len(iv): raise ValueError('length of iv differs from data') if not min_depth and data == iv: return [] data = xor(data, iv) # Pre-flight check to see if we have all the bits we need. mask = 0 for ch in alphabet: mask |= ch mask = ~mask # Map all bytes in data into a {byte: [pos...]} dictionary, check # if we have enough bits along the way. data_map_tmpl = {} for i, ch in enumerate(six.iterbytes(data)): if ch & mask: raise ValueError('Alphabet does not contain enough bits.') data_map_tmpl.setdefault(ch, []).append(i) # Let's try to find a solution. for depth in range(max(min_depth, 1), max_depth + 1): # Prepare for round. data_map = data_map_tmpl.copy() results = [[None] * len(data) for _ in range(depth)] for values in itertools.product(*([alphabet] * (depth - 1))): # Prepare cumulative mask for this combination of alphabet. mask = 0 for value in values: mask ^= value for ch in list(data_map): r = ch ^ mask if r in alphabet: # Found a solution for this character, mark the result. pos = data_map.pop(ch) for p in pos: results[0][p] = r for i, value in enumerate(values): results[i + 1][p] = value if not data_map: # Aaaand.. We're done! return [ b''.join(six.int2byte(b) for b in r) for r in results ] # No solution found at this depth. Increase depth, try again. raise ValueError('No solution found.')
[ "def", "find_xor_mask", "(", "data", ",", "alphabet", "=", "None", ",", "max_depth", "=", "3", ",", "min_depth", "=", "0", ",", "iv", "=", "None", ")", ":", "if", "alphabet", "is", "None", ":", "alphabet", "=", "set", "(", "i", "for", "i", "in", "range", "(", "256", ")", "if", "i", "not", "in", "(", "0", ",", "10", ",", "13", ")", ")", "else", ":", "alphabet", "=", "set", "(", "six", ".", "iterbytes", "(", "alphabet", ")", ")", "if", "iv", "is", "None", ":", "iv", "=", "b'\\0'", "*", "len", "(", "data", ")", "if", "len", "(", "data", ")", "!=", "len", "(", "iv", ")", ":", "raise", "ValueError", "(", "'length of iv differs from data'", ")", "if", "not", "min_depth", "and", "data", "==", "iv", ":", "return", "[", "]", "data", "=", "xor", "(", "data", ",", "iv", ")", "# Pre-flight check to see if we have all the bits we need.", "mask", "=", "0", "for", "ch", "in", "alphabet", ":", "mask", "|=", "ch", "mask", "=", "~", "mask", "# Map all bytes in data into a {byte: [pos...]} dictionary, check", "# if we have enough bits along the way.", "data_map_tmpl", "=", "{", "}", "for", "i", ",", "ch", "in", "enumerate", "(", "six", ".", "iterbytes", "(", "data", ")", ")", ":", "if", "ch", "&", "mask", ":", "raise", "ValueError", "(", "'Alphabet does not contain enough bits.'", ")", "data_map_tmpl", ".", "setdefault", "(", "ch", ",", "[", "]", ")", ".", "append", "(", "i", ")", "# Let's try to find a solution.", "for", "depth", "in", "range", "(", "max", "(", "min_depth", ",", "1", ")", ",", "max_depth", "+", "1", ")", ":", "# Prepare for round.", "data_map", "=", "data_map_tmpl", ".", "copy", "(", ")", "results", "=", "[", "[", "None", "]", "*", "len", "(", "data", ")", "for", "_", "in", "range", "(", "depth", ")", "]", "for", "values", "in", "itertools", ".", "product", "(", "*", "(", "[", "alphabet", "]", "*", "(", "depth", "-", "1", ")", ")", ")", ":", "# Prepare cumulative mask for this combination of alphabet.", "mask", "=", "0", "for", "value", "in", "values", ":", "mask", "^=", "value", "for", "ch", "in", "list", "(", "data_map", ")", ":", "r", "=", "ch", "^", "mask", "if", "r", "in", "alphabet", ":", "# Found a solution for this character, mark the result.", "pos", "=", "data_map", ".", "pop", "(", "ch", ")", "for", "p", "in", "pos", ":", "results", "[", "0", "]", "[", "p", "]", "=", "r", "for", "i", ",", "value", "in", "enumerate", "(", "values", ")", ":", "results", "[", "i", "+", "1", "]", "[", "p", "]", "=", "value", "if", "not", "data_map", ":", "# Aaaand.. We're done!", "return", "[", "b''", ".", "join", "(", "six", ".", "int2byte", "(", "b", ")", "for", "b", "in", "r", ")", "for", "r", "in", "results", "]", "# No solution found at this depth. Increase depth, try again.", "raise", "ValueError", "(", "'No solution found.'", ")" ]
Produce a series of bytestrings that when XORed together end up being equal to ``data`` and only contain characters from the giving ``alphabet``. The initial state (or previous state) can be given as ``iv``. Arguments: data (bytes): The data to recreate as a series of XOR operations. alphabet (bytes): The bytestring containing the allowed characters for the XOR values. If ``None``, all characters except NUL bytes, carriage returns and newlines will be allowed. max_depth (int): The maximum depth to look for a solution. min_depth (int): The minimum depth to look for a solution. iv (bytes): Initialization vector. If ``None``, it will be assumed the operation starts at an all zero string. Returns: A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother if ``iv` is not providede) will be the same as ``data``. Examples: Produce a series of strings that when XORed together will result in the string 'pwnypack' using only ASCII characters in the range 65 to 96: >>> from pwny import * >>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97))) [b'````````', b'AAAAABAA', b'QVOXQCBJ'] >>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ') 'pwnypack'
[ "Produce", "a", "series", "of", "bytestrings", "that", "when", "XORed", "together", "end", "up", "being", "equal", "to", "data", "and", "only", "contain", "characters", "from", "the", "giving", "alphabet", ".", "The", "initial", "state", "(", "or", "previous", "state", ")", "can", "be", "given", "as", "iv", "." ]
python
train
36.670213
hyperledger/sawtooth-core
cli/sawtooth_cli/admin_command/config.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/cli/sawtooth_cli/admin_command/config.py#L36-L68
def _get_dir(toml_config_setting, sawtooth_home_dir, windows_dir, default_dir): """Determines the directory path based on configuration. Arguments: toml_config_setting (str): The name of the config setting related to the directory which will appear in path.toml. sawtooth_home_dir (str): The directory under the SAWTOOTH_HOME environment variable. For example, for 'data' if the data directory is $SAWTOOTH_HOME/data. windows_dir (str): The windows path relative to the computed base directory. default_dir (str): The default path on Linux. Returns: directory (str): The path. """ conf_file = os.path.join(_get_config_dir(), 'path.toml') if os.path.exists(conf_file): with open(conf_file) as fd: raw_config = fd.read() toml_config = toml.loads(raw_config) if toml_config_setting in toml_config: return toml_config[toml_config_setting] if 'SAWTOOTH_HOME' in os.environ: return os.path.join(os.environ['SAWTOOTH_HOME'], sawtooth_home_dir) if os.name == 'nt': base_dir = \ os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))) return os.path.join(base_dir, windows_dir) return default_dir
[ "def", "_get_dir", "(", "toml_config_setting", ",", "sawtooth_home_dir", ",", "windows_dir", ",", "default_dir", ")", ":", "conf_file", "=", "os", ".", "path", ".", "join", "(", "_get_config_dir", "(", ")", ",", "'path.toml'", ")", "if", "os", ".", "path", ".", "exists", "(", "conf_file", ")", ":", "with", "open", "(", "conf_file", ")", "as", "fd", ":", "raw_config", "=", "fd", ".", "read", "(", ")", "toml_config", "=", "toml", ".", "loads", "(", "raw_config", ")", "if", "toml_config_setting", "in", "toml_config", ":", "return", "toml_config", "[", "toml_config_setting", "]", "if", "'SAWTOOTH_HOME'", "in", "os", ".", "environ", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'SAWTOOTH_HOME'", "]", ",", "sawtooth_home_dir", ")", "if", "os", ".", "name", "==", "'nt'", ":", "base_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "sys", ".", "argv", "[", "0", "]", ")", ")", ")", "return", "os", ".", "path", ".", "join", "(", "base_dir", ",", "windows_dir", ")", "return", "default_dir" ]
Determines the directory path based on configuration. Arguments: toml_config_setting (str): The name of the config setting related to the directory which will appear in path.toml. sawtooth_home_dir (str): The directory under the SAWTOOTH_HOME environment variable. For example, for 'data' if the data directory is $SAWTOOTH_HOME/data. windows_dir (str): The windows path relative to the computed base directory. default_dir (str): The default path on Linux. Returns: directory (str): The path.
[ "Determines", "the", "directory", "path", "based", "on", "configuration", "." ]
python
train
38.666667
cuihantao/andes
andes/utils/math.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/utils/math.py#L118-L120
def sort_idx(m, reverse=False): """Return the indices of m in sorted order (default: ascending order)""" return sorted(range(len(m)), key=lambda k: m[k], reverse=reverse)
[ "def", "sort_idx", "(", "m", ",", "reverse", "=", "False", ")", ":", "return", "sorted", "(", "range", "(", "len", "(", "m", ")", ")", ",", "key", "=", "lambda", "k", ":", "m", "[", "k", "]", ",", "reverse", "=", "reverse", ")" ]
Return the indices of m in sorted order (default: ascending order)
[ "Return", "the", "indices", "of", "m", "in", "sorted", "order", "(", "default", ":", "ascending", "order", ")" ]
python
train
58.666667
ravenac95/lxc4u
lxc4u/overlayutils.py
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/overlayutils.py#L62-L67
def meta(self): """Data for loading later""" mount_points = [] for overlay in self.overlays: mount_points.append(overlay.mount_point) return [self.end_dir, self.start_dir, mount_points]
[ "def", "meta", "(", "self", ")", ":", "mount_points", "=", "[", "]", "for", "overlay", "in", "self", ".", "overlays", ":", "mount_points", ".", "append", "(", "overlay", ".", "mount_point", ")", "return", "[", "self", ".", "end_dir", ",", "self", ".", "start_dir", ",", "mount_points", "]" ]
Data for loading later
[ "Data", "for", "loading", "later" ]
python
train
37.333333
google/grr
grr/server/grr_response_server/gui/archive_generator.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/archive_generator.py#L216-L233
def _WriteFileChunk(self, chunk): """Yields binary chunks, respecting archive file headers and footers. Args: chunk: the StreamedFileChunk to be written """ if chunk.chunk_index == 0: # Make sure size of the original file is passed. It's required # when output_writer is StreamingTarWriter. st = os.stat_result((0o644, 0, 0, 0, 0, 0, chunk.total_size, 0, 0, 0)) target_path = _ClientPathToString(chunk.client_path, prefix=self.prefix) yield self.archive_generator.WriteFileHeader(target_path, st=st) yield self.archive_generator.WriteFileChunk(chunk.data) if chunk.chunk_index == chunk.total_chunks - 1: yield self.archive_generator.WriteFileFooter() self.archived_files.add(chunk.client_path)
[ "def", "_WriteFileChunk", "(", "self", ",", "chunk", ")", ":", "if", "chunk", ".", "chunk_index", "==", "0", ":", "# Make sure size of the original file is passed. It's required", "# when output_writer is StreamingTarWriter.", "st", "=", "os", ".", "stat_result", "(", "(", "0o644", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "chunk", ".", "total_size", ",", "0", ",", "0", ",", "0", ")", ")", "target_path", "=", "_ClientPathToString", "(", "chunk", ".", "client_path", ",", "prefix", "=", "self", ".", "prefix", ")", "yield", "self", ".", "archive_generator", ".", "WriteFileHeader", "(", "target_path", ",", "st", "=", "st", ")", "yield", "self", ".", "archive_generator", ".", "WriteFileChunk", "(", "chunk", ".", "data", ")", "if", "chunk", ".", "chunk_index", "==", "chunk", ".", "total_chunks", "-", "1", ":", "yield", "self", ".", "archive_generator", ".", "WriteFileFooter", "(", ")", "self", ".", "archived_files", ".", "add", "(", "chunk", ".", "client_path", ")" ]
Yields binary chunks, respecting archive file headers and footers. Args: chunk: the StreamedFileChunk to be written
[ "Yields", "binary", "chunks", "respecting", "archive", "file", "headers", "and", "footers", "." ]
python
train
41.722222
secdev/scapy
scapy/layers/radius.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/radius.py#L250-L258
def dispatch_hook(cls, _pkt=None, *args, **kargs): """ Returns the right RadiusAttribute class for the given data. """ if _pkt: attr_type = orb(_pkt[0]) return cls.registered_attributes.get(attr_type, cls) return cls
[ "def", "dispatch_hook", "(", "cls", ",", "_pkt", "=", "None", ",", "*", "args", ",", "*", "*", "kargs", ")", ":", "if", "_pkt", ":", "attr_type", "=", "orb", "(", "_pkt", "[", "0", "]", ")", "return", "cls", ".", "registered_attributes", ".", "get", "(", "attr_type", ",", "cls", ")", "return", "cls" ]
Returns the right RadiusAttribute class for the given data.
[ "Returns", "the", "right", "RadiusAttribute", "class", "for", "the", "given", "data", "." ]
python
train
30.333333
project-rig/rig
rig/machine_control/machine_controller.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1531-L1587
def count_cores_in_state(self, state, app_id): """Count the number of cores in a given state. .. warning:: In current implementations of SARK, signals (which are used to determine the state of cores) are highly likely to arrive but this is not guaranteed (especially when the system's network is heavily utilised). Users should treat this mechanism with caution. Future versions of SARK may resolve this issue. Parameters ---------- state : string or :py:class:`~rig.machine_control.consts.AppState` or iterable Count the number of cores currently in this state. This may be either an entry of the :py:class:`~rig.machine_control.consts.AppState` enum or, for convenience, the name of a state (defined in :py:class:`~rig.machine_control.consts.AppState`) as a string or an iterable of these, in which case the total count will be returned. """ if (isinstance(state, collections.Iterable) and not isinstance(state, str)): # If the state is iterable then call for each state and return the # sum. return sum(self.count_cores_in_state(s, app_id) for s in state) if isinstance(state, str): try: state = getattr(consts.AppState, state) except AttributeError: # The state name is not present in consts.AppSignal! The next # test will throw an appropriate exception since no string can # be "in" an IntEnum. pass if state not in consts.AppState: raise ValueError( "count_cores_in_state: Unknown state {}".format( repr(state))) # TODO Determine a way to nicely express a way to use the region data # stored in arg3. region = 0x0000ffff # Largest possible machine, level 0 level = (region >> 16) & 0x3 mask = region & 0x0000ffff # Construct the packet arg1 = consts.diagnostic_signal_types[consts.AppDiagnosticSignal.count] arg2 = ((level << 26) | (1 << 22) | (consts.AppDiagnosticSignal.count << 20) | (state << 16) | (0xff << 8) | app_id) # App mask for 1 app_id = 0xff arg3 = mask # Transmit and return the count return self._send_scp( 255, 255, 0, SCPCommands.signal, arg1, arg2, arg3).arg1
[ "def", "count_cores_in_state", "(", "self", ",", "state", ",", "app_id", ")", ":", "if", "(", "isinstance", "(", "state", ",", "collections", ".", "Iterable", ")", "and", "not", "isinstance", "(", "state", ",", "str", ")", ")", ":", "# If the state is iterable then call for each state and return the", "# sum.", "return", "sum", "(", "self", ".", "count_cores_in_state", "(", "s", ",", "app_id", ")", "for", "s", "in", "state", ")", "if", "isinstance", "(", "state", ",", "str", ")", ":", "try", ":", "state", "=", "getattr", "(", "consts", ".", "AppState", ",", "state", ")", "except", "AttributeError", ":", "# The state name is not present in consts.AppSignal! The next", "# test will throw an appropriate exception since no string can", "# be \"in\" an IntEnum.", "pass", "if", "state", "not", "in", "consts", ".", "AppState", ":", "raise", "ValueError", "(", "\"count_cores_in_state: Unknown state {}\"", ".", "format", "(", "repr", "(", "state", ")", ")", ")", "# TODO Determine a way to nicely express a way to use the region data", "# stored in arg3.", "region", "=", "0x0000ffff", "# Largest possible machine, level 0", "level", "=", "(", "region", ">>", "16", ")", "&", "0x3", "mask", "=", "region", "&", "0x0000ffff", "# Construct the packet", "arg1", "=", "consts", ".", "diagnostic_signal_types", "[", "consts", ".", "AppDiagnosticSignal", ".", "count", "]", "arg2", "=", "(", "(", "level", "<<", "26", ")", "|", "(", "1", "<<", "22", ")", "|", "(", "consts", ".", "AppDiagnosticSignal", ".", "count", "<<", "20", ")", "|", "(", "state", "<<", "16", ")", "|", "(", "0xff", "<<", "8", ")", "|", "app_id", ")", "# App mask for 1 app_id = 0xff", "arg3", "=", "mask", "# Transmit and return the count", "return", "self", ".", "_send_scp", "(", "255", ",", "255", ",", "0", ",", "SCPCommands", ".", "signal", ",", "arg1", ",", "arg2", ",", "arg3", ")", ".", "arg1" ]
Count the number of cores in a given state. .. warning:: In current implementations of SARK, signals (which are used to determine the state of cores) are highly likely to arrive but this is not guaranteed (especially when the system's network is heavily utilised). Users should treat this mechanism with caution. Future versions of SARK may resolve this issue. Parameters ---------- state : string or :py:class:`~rig.machine_control.consts.AppState` or iterable Count the number of cores currently in this state. This may be either an entry of the :py:class:`~rig.machine_control.consts.AppState` enum or, for convenience, the name of a state (defined in :py:class:`~rig.machine_control.consts.AppState`) as a string or an iterable of these, in which case the total count will be returned.
[ "Count", "the", "number", "of", "cores", "in", "a", "given", "state", "." ]
python
train
43.947368
daknuett/py_register_machine2
core/memory.py
https://github.com/daknuett/py_register_machine2/blob/599c53cd7576297d0d7a53344ed5d9aa98acc751/core/memory.py#L51-L61
def program(self, prog, offset = 0): """ .. _program: Write the content of the iterable ``prog`` starting with the optional offset ``offset`` to the device. Invokes program_word_. """ for addr, word in enumerate(prog): self.program_word(offset + addr, word)
[ "def", "program", "(", "self", ",", "prog", ",", "offset", "=", "0", ")", ":", "for", "addr", ",", "word", "in", "enumerate", "(", "prog", ")", ":", "self", ".", "program_word", "(", "offset", "+", "addr", ",", "word", ")" ]
.. _program: Write the content of the iterable ``prog`` starting with the optional offset ``offset`` to the device. Invokes program_word_.
[ "..", "_program", ":" ]
python
train
24.181818
soasme/rio
rio/setup.py
https://github.com/soasme/rio/blob/f722eb0ff4b0382bceaff77737f0b87cb78429e7/rio/setup.py#L19-L49
def configure_app(app): """Configure Flask/Celery application. * Rio will find environment variable `RIO_SETTINGS` first:: $ export RIO_SETTINGS=/path/to/settings.cfg $ rio worker * If `RIO_SETTINGS` is missing, Rio will try to load configuration module in `rio.settings` according to another environment variable `RIO_ENV`. Default load `rio.settings.dev`. $ export RIO_ENV=prod $ rio worker """ app.config_from_object('rio.settings.default') if environ.get('RIO_SETTINGS'): app.config_from_envvar('RIO_SETTINGS') return config_map = { 'dev': 'rio.settings.dev', 'stag': 'rio.settings.stag', 'prod': 'rio.settings.prod', 'test': 'rio.settings.test', } rio_env = environ.get('RIO_ENV', 'dev') config = config_map.get(rio_env, config_map['dev']) app.config_from_object(config)
[ "def", "configure_app", "(", "app", ")", ":", "app", ".", "config_from_object", "(", "'rio.settings.default'", ")", "if", "environ", ".", "get", "(", "'RIO_SETTINGS'", ")", ":", "app", ".", "config_from_envvar", "(", "'RIO_SETTINGS'", ")", "return", "config_map", "=", "{", "'dev'", ":", "'rio.settings.dev'", ",", "'stag'", ":", "'rio.settings.stag'", ",", "'prod'", ":", "'rio.settings.prod'", ",", "'test'", ":", "'rio.settings.test'", ",", "}", "rio_env", "=", "environ", ".", "get", "(", "'RIO_ENV'", ",", "'dev'", ")", "config", "=", "config_map", ".", "get", "(", "rio_env", ",", "config_map", "[", "'dev'", "]", ")", "app", ".", "config_from_object", "(", "config", ")" ]
Configure Flask/Celery application. * Rio will find environment variable `RIO_SETTINGS` first:: $ export RIO_SETTINGS=/path/to/settings.cfg $ rio worker * If `RIO_SETTINGS` is missing, Rio will try to load configuration module in `rio.settings` according to another environment variable `RIO_ENV`. Default load `rio.settings.dev`. $ export RIO_ENV=prod $ rio worker
[ "Configure", "Flask", "/", "Celery", "application", "." ]
python
train
28.645161
Alignak-monitoring/alignak
alignak/objects/satellitelink.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L744-L751
def get_daemon_stats(self, details=False): """Send a HTTP request to the satellite (GET /get_daemon_stats) :return: Daemon statistics :rtype: dict """ logger.debug("Get daemon statistics for %s, %s %s", self.name, self.alive, self.reachable) return self.con.get('stats%s' % ('?details=1' if details else ''))
[ "def", "get_daemon_stats", "(", "self", ",", "details", "=", "False", ")", ":", "logger", ".", "debug", "(", "\"Get daemon statistics for %s, %s %s\"", ",", "self", ".", "name", ",", "self", ".", "alive", ",", "self", ".", "reachable", ")", "return", "self", ".", "con", ".", "get", "(", "'stats%s'", "%", "(", "'?details=1'", "if", "details", "else", "''", ")", ")" ]
Send a HTTP request to the satellite (GET /get_daemon_stats) :return: Daemon statistics :rtype: dict
[ "Send", "a", "HTTP", "request", "to", "the", "satellite", "(", "GET", "/", "get_daemon_stats", ")" ]
python
train
43.75
rtlee9/serveit
serveit/log_utils.py
https://github.com/rtlee9/serveit/blob/d97b5fbe56bec78d6c0193d6fd2ea2a0c1cbafdc/serveit/log_utils.py#L8-L12
def get_logger(name): """Get a logger with the specified name.""" logger = logging.getLogger(name) logger.setLevel(getenv('LOGLEVEL', 'INFO')) return logger
[ "def", "get_logger", "(", "name", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "logger", ".", "setLevel", "(", "getenv", "(", "'LOGLEVEL'", ",", "'INFO'", ")", ")", "return", "logger" ]
Get a logger with the specified name.
[ "Get", "a", "logger", "with", "the", "specified", "name", "." ]
python
train
33.6
toumorokoshi/transmute-core
transmute_core/swagger/__init__.py
https://github.com/toumorokoshi/transmute-core/blob/a2c26625d5d8bab37e00038f9d615a26167fc7f4/transmute_core/swagger/__init__.py#L53-L57
def add_func(self, transmute_func, transmute_context): """ add a transmute function's swagger definition to the spec """ swagger_path = transmute_func.get_swagger_path(transmute_context) for p in transmute_func.paths: self.add_path(p, swagger_path)
[ "def", "add_func", "(", "self", ",", "transmute_func", ",", "transmute_context", ")", ":", "swagger_path", "=", "transmute_func", ".", "get_swagger_path", "(", "transmute_context", ")", "for", "p", "in", "transmute_func", ".", "paths", ":", "self", ".", "add_path", "(", "p", ",", "swagger_path", ")" ]
add a transmute function's swagger definition to the spec
[ "add", "a", "transmute", "function", "s", "swagger", "definition", "to", "the", "spec" ]
python
train
56
gbowerman/azurerm
azurerm/container.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/container.py#L36-L72
def create_container_instance_group(access_token, subscription_id, resource_group, container_group_name, container_list, location, ostype='Linux', port=80, iptype='public'): '''Create a new container group with a list of containers specifified by container_list. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. container_list (list): A list of container properties. Use create_container_definition to create each container property set. location (str): Azure data center location. E.g. westus. ostype (str): Container operating system type. Linux or Windows. port (int): TCP port number. E.g. 8080. iptype (str): Type of IP address. E.g. public. Returns: HTTP response with JSON body of container group. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API]) container_group_body = {'location': location} properties = {'osType': ostype} properties['containers'] = container_list ipport = {'protocol': 'TCP'} ipport['port'] = port ipaddress = {'ports': [ipport]} ipaddress['type'] = iptype properties['ipAddress'] = ipaddress container_group_body['properties'] = properties body = json.dumps(container_group_body) return do_put(endpoint, body, access_token)
[ "def", "create_container_instance_group", "(", "access_token", ",", "subscription_id", ",", "resource_group", ",", "container_group_name", ",", "container_list", ",", "location", ",", "ostype", "=", "'Linux'", ",", "port", "=", "80", ",", "iptype", "=", "'public'", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourcegroups/'", ",", "resource_group", ",", "'/providers/Microsoft.ContainerInstance/ContainerGroups/'", ",", "container_group_name", ",", "'?api-version='", ",", "CONTAINER_API", "]", ")", "container_group_body", "=", "{", "'location'", ":", "location", "}", "properties", "=", "{", "'osType'", ":", "ostype", "}", "properties", "[", "'containers'", "]", "=", "container_list", "ipport", "=", "{", "'protocol'", ":", "'TCP'", "}", "ipport", "[", "'port'", "]", "=", "port", "ipaddress", "=", "{", "'ports'", ":", "[", "ipport", "]", "}", "ipaddress", "[", "'type'", "]", "=", "iptype", "properties", "[", "'ipAddress'", "]", "=", "ipaddress", "container_group_body", "[", "'properties'", "]", "=", "properties", "body", "=", "json", ".", "dumps", "(", "container_group_body", ")", "return", "do_put", "(", "endpoint", ",", "body", ",", "access_token", ")" ]
Create a new container group with a list of containers specifified by container_list. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. container_list (list): A list of container properties. Use create_container_definition to create each container property set. location (str): Azure data center location. E.g. westus. ostype (str): Container operating system type. Linux or Windows. port (int): TCP port number. E.g. 8080. iptype (str): Type of IP address. E.g. public. Returns: HTTP response with JSON body of container group.
[ "Create", "a", "new", "container", "group", "with", "a", "list", "of", "containers", "specifified", "by", "container_list", "." ]
python
train
48.945946
googlefonts/glyphsLib
Lib/glyphsLib/builder/user_data.py
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/user_data.py#L115-L125
def to_glyphs_family_user_data_from_ufo(self, ufo): """Set the GSFont userData from the UFO family-wide lib data.""" target_user_data = self.font.userData try: for key, value in ufo.lib[FONT_USER_DATA_KEY].items(): # Existing values taken from the designspace lib take precedence if key not in target_user_data.keys(): target_user_data[key] = value except KeyError: # No FONT_USER_DATA in ufo.lib pass
[ "def", "to_glyphs_family_user_data_from_ufo", "(", "self", ",", "ufo", ")", ":", "target_user_data", "=", "self", ".", "font", ".", "userData", "try", ":", "for", "key", ",", "value", "in", "ufo", ".", "lib", "[", "FONT_USER_DATA_KEY", "]", ".", "items", "(", ")", ":", "# Existing values taken from the designspace lib take precedence", "if", "key", "not", "in", "target_user_data", ".", "keys", "(", ")", ":", "target_user_data", "[", "key", "]", "=", "value", "except", "KeyError", ":", "# No FONT_USER_DATA in ufo.lib", "pass" ]
Set the GSFont userData from the UFO family-wide lib data.
[ "Set", "the", "GSFont", "userData", "from", "the", "UFO", "family", "-", "wide", "lib", "data", "." ]
python
train
42.818182
tensorflow/tensorboard
tensorboard/plugins/graph/graph_util.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/graph/graph_util.py#L27-L87
def _safe_copy_proto_list_values(dst_proto_list, src_proto_list, get_key): """Safely merge values from `src_proto_list` into `dst_proto_list`. Each element in `dst_proto_list` must be mapped by `get_key` to a key value that is unique within that list; likewise for `src_proto_list`. If an element of `src_proto_list` has the same key as an existing element in `dst_proto_list`, then the elements must also be equal. Args: dst_proto_list: A `RepeatedCompositeContainer` or `RepeatedScalarContainer` into which values should be copied. src_proto_list: A container holding the same kind of values as in `dst_proto_list` from which values should be copied. get_key: A function that takes an element of `dst_proto_list` or `src_proto_list` and returns a key, such that if two elements have the same key then it is required that they be deep-equal. For instance, if `dst_proto_list` is a list of nodes, then `get_key` might be `lambda node: node.name` to indicate that if two nodes have the same name then they must be the same node. All keys must be hashable. Raises: _ProtoListDuplicateKeyError: A proto_list contains items with duplicate keys. _SameKeyDiffContentError: An item with the same key has different contents. """ def _assert_proto_container_unique_keys(proto_list, get_key): """Asserts proto_list to only contains unique keys. Args: proto_list: A `RepeatedCompositeContainer` or `RepeatedScalarContainer`. get_key: A function that takes an element of `proto_list` and returns a hashable key. Raises: _ProtoListDuplicateKeyError: A proto_list contains items with duplicate keys. """ keys = set() for item in proto_list: key = get_key(item) if key in keys: raise _ProtoListDuplicateKeyError(key) keys.add(key) _assert_proto_container_unique_keys(dst_proto_list, get_key) _assert_proto_container_unique_keys(src_proto_list, get_key) key_to_proto = {} for proto in dst_proto_list: key = get_key(proto) key_to_proto[key] = proto for proto in src_proto_list: key = get_key(proto) if key in key_to_proto: if proto != key_to_proto.get(key): raise _SameKeyDiffContentError(key) else: dst_proto_list.add().CopyFrom(proto)
[ "def", "_safe_copy_proto_list_values", "(", "dst_proto_list", ",", "src_proto_list", ",", "get_key", ")", ":", "def", "_assert_proto_container_unique_keys", "(", "proto_list", ",", "get_key", ")", ":", "\"\"\"Asserts proto_list to only contains unique keys.\n\n Args:\n proto_list: A `RepeatedCompositeContainer` or `RepeatedScalarContainer`.\n get_key: A function that takes an element of `proto_list` and returns a\n hashable key.\n\n Raises:\n _ProtoListDuplicateKeyError: A proto_list contains items with duplicate\n keys.\n \"\"\"", "keys", "=", "set", "(", ")", "for", "item", "in", "proto_list", ":", "key", "=", "get_key", "(", "item", ")", "if", "key", "in", "keys", ":", "raise", "_ProtoListDuplicateKeyError", "(", "key", ")", "keys", ".", "add", "(", "key", ")", "_assert_proto_container_unique_keys", "(", "dst_proto_list", ",", "get_key", ")", "_assert_proto_container_unique_keys", "(", "src_proto_list", ",", "get_key", ")", "key_to_proto", "=", "{", "}", "for", "proto", "in", "dst_proto_list", ":", "key", "=", "get_key", "(", "proto", ")", "key_to_proto", "[", "key", "]", "=", "proto", "for", "proto", "in", "src_proto_list", ":", "key", "=", "get_key", "(", "proto", ")", "if", "key", "in", "key_to_proto", ":", "if", "proto", "!=", "key_to_proto", ".", "get", "(", "key", ")", ":", "raise", "_SameKeyDiffContentError", "(", "key", ")", "else", ":", "dst_proto_list", ".", "add", "(", ")", ".", "CopyFrom", "(", "proto", ")" ]
Safely merge values from `src_proto_list` into `dst_proto_list`. Each element in `dst_proto_list` must be mapped by `get_key` to a key value that is unique within that list; likewise for `src_proto_list`. If an element of `src_proto_list` has the same key as an existing element in `dst_proto_list`, then the elements must also be equal. Args: dst_proto_list: A `RepeatedCompositeContainer` or `RepeatedScalarContainer` into which values should be copied. src_proto_list: A container holding the same kind of values as in `dst_proto_list` from which values should be copied. get_key: A function that takes an element of `dst_proto_list` or `src_proto_list` and returns a key, such that if two elements have the same key then it is required that they be deep-equal. For instance, if `dst_proto_list` is a list of nodes, then `get_key` might be `lambda node: node.name` to indicate that if two nodes have the same name then they must be the same node. All keys must be hashable. Raises: _ProtoListDuplicateKeyError: A proto_list contains items with duplicate keys. _SameKeyDiffContentError: An item with the same key has different contents.
[ "Safely", "merge", "values", "from", "src_proto_list", "into", "dst_proto_list", "." ]
python
train
37.606557
delfick/aws_syncr
aws_syncr/amazon/iam.py
https://github.com/delfick/aws_syncr/blob/8cd214b27c1eee98dfba4632cbb8bc0ae36356bd/aws_syncr/amazon/iam.py#L128-L156
def modify_attached_policies(self, role_name, new_policies): """Make sure this role has just the new policies""" parts = role_name.split('/', 1) if len(parts) == 2: prefix, name = parts prefix = "/{0}/".format(prefix) else: prefix = "/" name = parts[0] current_attached_policies = [] with self.ignore_missing(): current_attached_policies = self.client.list_attached_role_policies(RoleName=name) current_attached_policies = [p['PolicyArn'] for p in current_attached_policies["AttachedPolicies"]] new_attached_policies = ["arn:aws:iam::aws:policy/{0}".format(p) for p in new_policies] changes = list(Differ.compare_two_documents(current_attached_policies, new_attached_policies)) if changes: with self.catch_boto_400("Couldn't modify attached policies", role=role_name): for policy in new_attached_policies: if policy not in current_attached_policies: for _ in self.change("+", "attached_policy", role=role_name, policy=policy): self.client.attach_role_policy(RoleName=name, PolicyArn=policy) for policy in current_attached_policies: if policy not in new_attached_policies: for _ in self.change("-", "attached_policy", role=role_name, changes=changes, policy=policy): self.client.detach_role_policy(RoleName=name, PolicyArn=policy)
[ "def", "modify_attached_policies", "(", "self", ",", "role_name", ",", "new_policies", ")", ":", "parts", "=", "role_name", ".", "split", "(", "'/'", ",", "1", ")", "if", "len", "(", "parts", ")", "==", "2", ":", "prefix", ",", "name", "=", "parts", "prefix", "=", "\"/{0}/\"", ".", "format", "(", "prefix", ")", "else", ":", "prefix", "=", "\"/\"", "name", "=", "parts", "[", "0", "]", "current_attached_policies", "=", "[", "]", "with", "self", ".", "ignore_missing", "(", ")", ":", "current_attached_policies", "=", "self", ".", "client", ".", "list_attached_role_policies", "(", "RoleName", "=", "name", ")", "current_attached_policies", "=", "[", "p", "[", "'PolicyArn'", "]", "for", "p", "in", "current_attached_policies", "[", "\"AttachedPolicies\"", "]", "]", "new_attached_policies", "=", "[", "\"arn:aws:iam::aws:policy/{0}\"", ".", "format", "(", "p", ")", "for", "p", "in", "new_policies", "]", "changes", "=", "list", "(", "Differ", ".", "compare_two_documents", "(", "current_attached_policies", ",", "new_attached_policies", ")", ")", "if", "changes", ":", "with", "self", ".", "catch_boto_400", "(", "\"Couldn't modify attached policies\"", ",", "role", "=", "role_name", ")", ":", "for", "policy", "in", "new_attached_policies", ":", "if", "policy", "not", "in", "current_attached_policies", ":", "for", "_", "in", "self", ".", "change", "(", "\"+\"", ",", "\"attached_policy\"", ",", "role", "=", "role_name", ",", "policy", "=", "policy", ")", ":", "self", ".", "client", ".", "attach_role_policy", "(", "RoleName", "=", "name", ",", "PolicyArn", "=", "policy", ")", "for", "policy", "in", "current_attached_policies", ":", "if", "policy", "not", "in", "new_attached_policies", ":", "for", "_", "in", "self", ".", "change", "(", "\"-\"", ",", "\"attached_policy\"", ",", "role", "=", "role_name", ",", "changes", "=", "changes", ",", "policy", "=", "policy", ")", ":", "self", ".", "client", ".", "detach_role_policy", "(", "RoleName", "=", "name", ",", "PolicyArn", "=", "policy", ")" ]
Make sure this role has just the new policies
[ "Make", "sure", "this", "role", "has", "just", "the", "new", "policies" ]
python
train
53
LogicalDash/LiSE
ELiDE/ELiDE/card.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/card.py#L805-L810
def on_touch_down(self, touch): """Tell my parent if I've been touched""" if self.parent is None: return if self.collide_point(*touch.pos): self.parent.bar_touched(self, touch)
[ "def", "on_touch_down", "(", "self", ",", "touch", ")", ":", "if", "self", ".", "parent", "is", "None", ":", "return", "if", "self", ".", "collide_point", "(", "*", "touch", ".", "pos", ")", ":", "self", ".", "parent", ".", "bar_touched", "(", "self", ",", "touch", ")" ]
Tell my parent if I've been touched
[ "Tell", "my", "parent", "if", "I", "ve", "been", "touched" ]
python
train
36.5
hydpy-dev/hydpy
hydpy/models/lstream/lstream_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lstream/lstream_model.py#L243-L342
def calc_av_uv_v1(self): """Calculate the flown through area and the wetted perimeter of both forelands. Note that the each foreland lies between the main channel and one outer embankment and that water flowing exactly above the a foreland is contributing to |AV|. The theoretical surface seperating water above the main channel from water above the foreland is not contributing to |UV|, but the surface seperating water above the foreland from water above its outer embankment is contributing to |UV|. Required control parameters: |HM| |BV| |BNV| Required derived parameter: |HV| Required flux sequence: |H| Calculated flux sequence: |AV| |UV| Examples: Generally, right trapezoids are assumed. Here, for simplicity, both forelands are assumed to be symmetrical. Their smaller bases (bottoms) hava a length of 2 meters, their non-vertical legs show an inclination of 1 meter per 4 meters, and their height (depths) is 1 meter. Both forelands lie 1 meter above the main channels bottom. >>> from hydpy.models.lstream import * >>> parameterstep() >>> hm(1.0) >>> bv(2.0) >>> bnv(4.0) >>> derived.hv(1.0) The first example deals with normal flow conditions, where water flows within the main channel completely (|H| < |HM|): >>> fluxes.h = 0.5 >>> model.calc_av_uv_v1() >>> fluxes.av av(0.0, 0.0) >>> fluxes.uv uv(0.0, 0.0) The second example deals with moderate high flow conditions, where water flows over both forelands, but not over their embankments (|HM| < |H| < (|HM| + |HV|)): >>> fluxes.h = 1.5 >>> model.calc_av_uv_v1() >>> fluxes.av av(1.5, 1.5) >>> fluxes.uv uv(4.061553, 4.061553) The third example deals with extreme high flow conditions, where water flows over the both foreland and their outer embankments ((|HM| + |HV|) < |H|): >>> fluxes.h = 2.5 >>> model.calc_av_uv_v1() >>> fluxes.av av(7.0, 7.0) >>> fluxes.uv uv(6.623106, 6.623106) The forth example assures that zero widths or hights of the forelands are handled properly: >>> bv.left = 0.0 >>> derived.hv.right = 0.0 >>> model.calc_av_uv_v1() >>> fluxes.av av(4.0, 3.0) >>> fluxes.uv uv(4.623106, 3.5) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess for i in range(2): if flu.h <= con.hm: flu.av[i] = 0. flu.uv[i] = 0. elif flu.h <= (con.hm+der.hv[i]): flu.av[i] = (flu.h-con.hm)*(con.bv[i]+(flu.h-con.hm)*con.bnv[i]/2.) flu.uv[i] = con.bv[i]+(flu.h-con.hm)*(1.+con.bnv[i]**2)**.5 else: flu.av[i] = (der.hv[i]*(con.bv[i]+der.hv[i]*con.bnv[i]/2.) + ((flu.h-(con.hm+der.hv[i])) * (con.bv[i]+der.hv[i]*con.bnv[i]))) flu.uv[i] = ((con.bv[i])+(der.hv[i]*(1.+con.bnv[i]**2)**.5) + (flu.h-(con.hm+der.hv[i])))
[ "def", "calc_av_uv_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "der", "=", "self", ".", "parameters", ".", "derived", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "for", "i", "in", "range", "(", "2", ")", ":", "if", "flu", ".", "h", "<=", "con", ".", "hm", ":", "flu", ".", "av", "[", "i", "]", "=", "0.", "flu", ".", "uv", "[", "i", "]", "=", "0.", "elif", "flu", ".", "h", "<=", "(", "con", ".", "hm", "+", "der", ".", "hv", "[", "i", "]", ")", ":", "flu", ".", "av", "[", "i", "]", "=", "(", "flu", ".", "h", "-", "con", ".", "hm", ")", "*", "(", "con", ".", "bv", "[", "i", "]", "+", "(", "flu", ".", "h", "-", "con", ".", "hm", ")", "*", "con", ".", "bnv", "[", "i", "]", "/", "2.", ")", "flu", ".", "uv", "[", "i", "]", "=", "con", ".", "bv", "[", "i", "]", "+", "(", "flu", ".", "h", "-", "con", ".", "hm", ")", "*", "(", "1.", "+", "con", ".", "bnv", "[", "i", "]", "**", "2", ")", "**", ".5", "else", ":", "flu", ".", "av", "[", "i", "]", "=", "(", "der", ".", "hv", "[", "i", "]", "*", "(", "con", ".", "bv", "[", "i", "]", "+", "der", ".", "hv", "[", "i", "]", "*", "con", ".", "bnv", "[", "i", "]", "/", "2.", ")", "+", "(", "(", "flu", ".", "h", "-", "(", "con", ".", "hm", "+", "der", ".", "hv", "[", "i", "]", ")", ")", "*", "(", "con", ".", "bv", "[", "i", "]", "+", "der", ".", "hv", "[", "i", "]", "*", "con", ".", "bnv", "[", "i", "]", ")", ")", ")", "flu", ".", "uv", "[", "i", "]", "=", "(", "(", "con", ".", "bv", "[", "i", "]", ")", "+", "(", "der", ".", "hv", "[", "i", "]", "*", "(", "1.", "+", "con", ".", "bnv", "[", "i", "]", "**", "2", ")", "**", ".5", ")", "+", "(", "flu", ".", "h", "-", "(", "con", ".", "hm", "+", "der", ".", "hv", "[", "i", "]", ")", ")", ")" ]
Calculate the flown through area and the wetted perimeter of both forelands. Note that the each foreland lies between the main channel and one outer embankment and that water flowing exactly above the a foreland is contributing to |AV|. The theoretical surface seperating water above the main channel from water above the foreland is not contributing to |UV|, but the surface seperating water above the foreland from water above its outer embankment is contributing to |UV|. Required control parameters: |HM| |BV| |BNV| Required derived parameter: |HV| Required flux sequence: |H| Calculated flux sequence: |AV| |UV| Examples: Generally, right trapezoids are assumed. Here, for simplicity, both forelands are assumed to be symmetrical. Their smaller bases (bottoms) hava a length of 2 meters, their non-vertical legs show an inclination of 1 meter per 4 meters, and their height (depths) is 1 meter. Both forelands lie 1 meter above the main channels bottom. >>> from hydpy.models.lstream import * >>> parameterstep() >>> hm(1.0) >>> bv(2.0) >>> bnv(4.0) >>> derived.hv(1.0) The first example deals with normal flow conditions, where water flows within the main channel completely (|H| < |HM|): >>> fluxes.h = 0.5 >>> model.calc_av_uv_v1() >>> fluxes.av av(0.0, 0.0) >>> fluxes.uv uv(0.0, 0.0) The second example deals with moderate high flow conditions, where water flows over both forelands, but not over their embankments (|HM| < |H| < (|HM| + |HV|)): >>> fluxes.h = 1.5 >>> model.calc_av_uv_v1() >>> fluxes.av av(1.5, 1.5) >>> fluxes.uv uv(4.061553, 4.061553) The third example deals with extreme high flow conditions, where water flows over the both foreland and their outer embankments ((|HM| + |HV|) < |H|): >>> fluxes.h = 2.5 >>> model.calc_av_uv_v1() >>> fluxes.av av(7.0, 7.0) >>> fluxes.uv uv(6.623106, 6.623106) The forth example assures that zero widths or hights of the forelands are handled properly: >>> bv.left = 0.0 >>> derived.hv.right = 0.0 >>> model.calc_av_uv_v1() >>> fluxes.av av(4.0, 3.0) >>> fluxes.uv uv(4.623106, 3.5)
[ "Calculate", "the", "flown", "through", "area", "and", "the", "wetted", "perimeter", "of", "both", "forelands", "." ]
python
train
32.26
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L11046-L11054
def gopro_set_response_send(self, cmd_id, status, force_mavlink1=False): ''' Response from a GOPRO_COMMAND set request cmd_id : Command ID (uint8_t) status : Status (uint8_t) ''' return self.send(self.gopro_set_response_encode(cmd_id, status), force_mavlink1=force_mavlink1)
[ "def", "gopro_set_response_send", "(", "self", ",", "cmd_id", ",", "status", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "gopro_set_response_encode", "(", "cmd_id", ",", "status", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
Response from a GOPRO_COMMAND set request cmd_id : Command ID (uint8_t) status : Status (uint8_t)
[ "Response", "from", "a", "GOPRO_COMMAND", "set", "request" ]
python
train
44.666667
rigetti/grove
grove/tomography/process_tomography.py
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/tomography/process_tomography.py#L203-L216
def avg_gate_fidelity(self, reference_unitary): """ Compute the average gate fidelity of the estimated process with respect to a unitary process. See `Chow et al., 2012, <https://doi.org/10.1103/PhysRevLett.109.060501>`_ :param (qutip.Qobj|matrix-like) reference_unitary: A unitary operator that induces a process as `rho -> other*rho*other.dag()`, alternatively a superoperator or Pauli-transfer matrix. :return: The average gate fidelity, a real number between 1/(d+1) and 1, where d is the Hilbert space dimension. :rtype: float """ process_fidelity = self.process_fidelity(reference_unitary) dimension = self.pauli_basis.ops[0].shape[0] return (dimension * process_fidelity + 1.0) / (dimension + 1.0)
[ "def", "avg_gate_fidelity", "(", "self", ",", "reference_unitary", ")", ":", "process_fidelity", "=", "self", ".", "process_fidelity", "(", "reference_unitary", ")", "dimension", "=", "self", ".", "pauli_basis", ".", "ops", "[", "0", "]", ".", "shape", "[", "0", "]", "return", "(", "dimension", "*", "process_fidelity", "+", "1.0", ")", "/", "(", "dimension", "+", "1.0", ")" ]
Compute the average gate fidelity of the estimated process with respect to a unitary process. See `Chow et al., 2012, <https://doi.org/10.1103/PhysRevLett.109.060501>`_ :param (qutip.Qobj|matrix-like) reference_unitary: A unitary operator that induces a process as `rho -> other*rho*other.dag()`, alternatively a superoperator or Pauli-transfer matrix. :return: The average gate fidelity, a real number between 1/(d+1) and 1, where d is the Hilbert space dimension. :rtype: float
[ "Compute", "the", "average", "gate", "fidelity", "of", "the", "estimated", "process", "with", "respect", "to", "a", "unitary", "process", ".", "See", "Chow", "et", "al", ".", "2012", "<https", ":", "//", "doi", ".", "org", "/", "10", ".", "1103", "/", "PhysRevLett", ".", "109", ".", "060501", ">", "_" ]
python
train
56.428571
guaix-ucm/pyemir
emirdrp/processing/bardetect.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/bardetect.py#L110-L120
def simple_prot(x, start): """Find the first peak to the right of start""" # start must b >= 1 for i in range(start,len(x)-1): a,b,c = x[i-1], x[i], x[i+1] if b - a > 0 and b -c >= 0: return i else: return None
[ "def", "simple_prot", "(", "x", ",", "start", ")", ":", "# start must b >= 1", "for", "i", "in", "range", "(", "start", ",", "len", "(", "x", ")", "-", "1", ")", ":", "a", ",", "b", ",", "c", "=", "x", "[", "i", "-", "1", "]", ",", "x", "[", "i", "]", ",", "x", "[", "i", "+", "1", "]", "if", "b", "-", "a", ">", "0", "and", "b", "-", "c", ">=", "0", ":", "return", "i", "else", ":", "return", "None" ]
Find the first peak to the right of start
[ "Find", "the", "first", "peak", "to", "the", "right", "of", "start" ]
python
train
23.181818
QuantEcon/QuantEcon.py
quantecon/quad.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/quad.py#L685-L730
def _qnwcheb1(n, a, b): """ Compute univariate Guass-Checbychev quadrature nodes and weights Parameters ---------- n : int The number of nodes a : int The lower endpoint b : int The upper endpoint Returns ------- nodes : np.ndarray(dtype=float) An n element array of nodes nodes : np.ndarray(dtype=float) An n element array of weights Notes ----- Based of original function ``qnwcheb1`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002. """ nodes = (b+a)/2 - (b-a)/2 * np.cos(np.pi/n * np.linspace(0.5, n-0.5, n)) # Create temporary arrays to be used in computing weights t1 = np.arange(1, n+1) - 0.5 t2 = np.arange(0.0, n, 2) t3 = np.concatenate((np.array([1.0]), -2.0/(np.arange(1.0, n-1, 2)*np.arange(3.0, n+1, 2)))) # compute weights and return weights = ((b-a)/n)*np.cos(np.pi/n*np.outer(t1, t2)) @ t3 return nodes, weights
[ "def", "_qnwcheb1", "(", "n", ",", "a", ",", "b", ")", ":", "nodes", "=", "(", "b", "+", "a", ")", "/", "2", "-", "(", "b", "-", "a", ")", "/", "2", "*", "np", ".", "cos", "(", "np", ".", "pi", "/", "n", "*", "np", ".", "linspace", "(", "0.5", ",", "n", "-", "0.5", ",", "n", ")", ")", "# Create temporary arrays to be used in computing weights", "t1", "=", "np", ".", "arange", "(", "1", ",", "n", "+", "1", ")", "-", "0.5", "t2", "=", "np", ".", "arange", "(", "0.0", ",", "n", ",", "2", ")", "t3", "=", "np", ".", "concatenate", "(", "(", "np", ".", "array", "(", "[", "1.0", "]", ")", ",", "-", "2.0", "/", "(", "np", ".", "arange", "(", "1.0", ",", "n", "-", "1", ",", "2", ")", "*", "np", ".", "arange", "(", "3.0", ",", "n", "+", "1", ",", "2", ")", ")", ")", ")", "# compute weights and return", "weights", "=", "(", "(", "b", "-", "a", ")", "/", "n", ")", "*", "np", ".", "cos", "(", "np", ".", "pi", "/", "n", "*", "np", ".", "outer", "(", "t1", ",", "t2", ")", ")", "@", "t3", "return", "nodes", ",", "weights" ]
Compute univariate Guass-Checbychev quadrature nodes and weights Parameters ---------- n : int The number of nodes a : int The lower endpoint b : int The upper endpoint Returns ------- nodes : np.ndarray(dtype=float) An n element array of nodes nodes : np.ndarray(dtype=float) An n element array of weights Notes ----- Based of original function ``qnwcheb1`` in CompEcon toolbox by Miranda and Fackler References ---------- Miranda, Mario J, and Paul L Fackler. Applied Computational Economics and Finance, MIT Press, 2002.
[ "Compute", "univariate", "Guass", "-", "Checbychev", "quadrature", "nodes", "and", "weights" ]
python
train
23.586957
CulturePlex/django-zotero
django_zotero/signals.py
https://github.com/CulturePlex/django-zotero/blob/de31583a80a2bd2459c118fb5aa767a2842e0b00/django_zotero/signals.py#L51-L68
def check_field_multiplicity(tag, previous_tags): """ Check the multiplicity of a 'field' for an object. """ fail = False #If the field is single if not tag.field.multiple: #If the tag is being created... if not tag.id: #... and the new field was already included in the previous tags, #fail fail = previous_tags.filter(field=tag.field) #If the tag is being modifying... else: #... but there is only one previous tag (the one that is being #modifying), do not fail fail = previous_tags.filter(field=tag.field).count() > 1 return fail
[ "def", "check_field_multiplicity", "(", "tag", ",", "previous_tags", ")", ":", "fail", "=", "False", "#If the field is single", "if", "not", "tag", ".", "field", ".", "multiple", ":", "#If the tag is being created...", "if", "not", "tag", ".", "id", ":", "#... and the new field was already included in the previous tags,", "#fail", "fail", "=", "previous_tags", ".", "filter", "(", "field", "=", "tag", ".", "field", ")", "#If the tag is being modifying...", "else", ":", "#... but there is only one previous tag (the one that is being", "#modifying), do not fail", "fail", "=", "previous_tags", ".", "filter", "(", "field", "=", "tag", ".", "field", ")", ".", "count", "(", ")", ">", "1", "return", "fail" ]
Check the multiplicity of a 'field' for an object.
[ "Check", "the", "multiplicity", "of", "a", "field", "for", "an", "object", "." ]
python
train
36
kgori/treeCl
treeCl/collection.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/collection.py#L137-L139
def records(self): """ Returns a list of records in SORT_KEY order """ return [self._records[i] for i in range(len(self._records))]
[ "def", "records", "(", "self", ")", ":", "return", "[", "self", ".", "_records", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_records", ")", ")", "]" ]
Returns a list of records in SORT_KEY order
[ "Returns", "a", "list", "of", "records", "in", "SORT_KEY", "order" ]
python
train
48.333333
isislovecruft/python-gnupg
pretty_bad_protocol/_parsers.py
https://github.com/isislovecruft/python-gnupg/blob/784571449032e811587249743e183fc5e908a673/pretty_bad_protocol/_parsers.py#L476-L610
def _get_options_group(group=None): """Get a specific group of options which are allowed.""" #: These expect a hexidecimal keyid as their argument, and can be parsed #: with :func:`_is_hex`. hex_options = frozenset(['--check-sigs', '--default-key', '--default-recipient', '--delete-keys', '--delete-secret-keys', '--delete-secret-and-public-keys', '--desig-revoke', '--export', '--export-secret-keys', '--export-secret-subkeys', '--fingerprint', '--gen-revoke', '--hidden-encrypt-to', '--hidden-recipient', '--list-key', '--list-keys', '--list-public-keys', '--list-secret-keys', '--list-sigs', '--recipient', '--recv-keys', '--send-keys', '--edit-key', '--sign-key', ]) #: These options expect value which are left unchecked, though still run #: through :func:`_fix_unsafe`. unchecked_options = frozenset(['--list-options', '--passphrase-fd', '--status-fd', '--verify-options', '--command-fd', ]) #: These have their own parsers and don't really fit into a group other_options = frozenset(['--debug-level', '--keyserver', ]) #: These should have a directory for an argument dir_options = frozenset(['--homedir', ]) #: These expect a keyring or keyfile as their argument keyring_options = frozenset(['--keyring', '--primary-keyring', '--secret-keyring', '--trustdb-name', ]) #: These expect a filename (or the contents of a file as a string) or None #: (meaning that they read from stdin) file_or_none_options = frozenset(['--decrypt', '--decrypt-files', '--encrypt', '--encrypt-files', '--import', '--verify', '--verify-files', '--output', ]) #: These options expect a string. see :func:`_check_preferences`. pref_options = frozenset(['--digest-algo', '--cipher-algo', '--compress-algo', '--compression-algo', '--cert-digest-algo', '--personal-digest-prefs', '--personal-digest-preferences', '--personal-cipher-prefs', '--personal-cipher-preferences', '--personal-compress-prefs', '--personal-compress-preferences', '--pinentry-mode', '--print-md', '--trust-model', ]) #: These options expect no arguments none_options = frozenset(['--allow-loopback-pinentry', '--always-trust', '--armor', '--armour', '--batch', '--check-sigs', '--check-trustdb', '--clearsign', '--debug-all', '--default-recipient-self', '--detach-sign', '--export', '--export-ownertrust', '--export-secret-keys', '--export-secret-subkeys', '--fingerprint', '--fixed-list-mode', '--gen-key', '--import-ownertrust', '--list-config', '--list-key', '--list-keys', '--list-packets', '--list-public-keys', '--list-secret-keys', '--list-sigs', '--lock-multiple', '--lock-never', '--lock-once', '--no-default-keyring', '--no-default-recipient', '--no-emit-version', '--no-options', '--no-tty', '--no-use-agent', '--no-verbose', '--print-mds', '--quiet', '--sign', '--symmetric', '--throw-keyids', '--use-agent', '--verbose', '--version', '--with-colons', '--yes', ]) #: These options expect either None or a hex string hex_or_none_options = hex_options.intersection(none_options) allowed = hex_options.union(unchecked_options, other_options, dir_options, keyring_options, file_or_none_options, pref_options, none_options) if group and group in locals().keys(): return locals()[group]
[ "def", "_get_options_group", "(", "group", "=", "None", ")", ":", "#: These expect a hexidecimal keyid as their argument, and can be parsed", "#: with :func:`_is_hex`.", "hex_options", "=", "frozenset", "(", "[", "'--check-sigs'", ",", "'--default-key'", ",", "'--default-recipient'", ",", "'--delete-keys'", ",", "'--delete-secret-keys'", ",", "'--delete-secret-and-public-keys'", ",", "'--desig-revoke'", ",", "'--export'", ",", "'--export-secret-keys'", ",", "'--export-secret-subkeys'", ",", "'--fingerprint'", ",", "'--gen-revoke'", ",", "'--hidden-encrypt-to'", ",", "'--hidden-recipient'", ",", "'--list-key'", ",", "'--list-keys'", ",", "'--list-public-keys'", ",", "'--list-secret-keys'", ",", "'--list-sigs'", ",", "'--recipient'", ",", "'--recv-keys'", ",", "'--send-keys'", ",", "'--edit-key'", ",", "'--sign-key'", ",", "]", ")", "#: These options expect value which are left unchecked, though still run", "#: through :func:`_fix_unsafe`.", "unchecked_options", "=", "frozenset", "(", "[", "'--list-options'", ",", "'--passphrase-fd'", ",", "'--status-fd'", ",", "'--verify-options'", ",", "'--command-fd'", ",", "]", ")", "#: These have their own parsers and don't really fit into a group", "other_options", "=", "frozenset", "(", "[", "'--debug-level'", ",", "'--keyserver'", ",", "]", ")", "#: These should have a directory for an argument", "dir_options", "=", "frozenset", "(", "[", "'--homedir'", ",", "]", ")", "#: These expect a keyring or keyfile as their argument", "keyring_options", "=", "frozenset", "(", "[", "'--keyring'", ",", "'--primary-keyring'", ",", "'--secret-keyring'", ",", "'--trustdb-name'", ",", "]", ")", "#: These expect a filename (or the contents of a file as a string) or None", "#: (meaning that they read from stdin)", "file_or_none_options", "=", "frozenset", "(", "[", "'--decrypt'", ",", "'--decrypt-files'", ",", "'--encrypt'", ",", "'--encrypt-files'", ",", "'--import'", ",", "'--verify'", ",", "'--verify-files'", ",", "'--output'", ",", "]", ")", "#: These options expect a string. see :func:`_check_preferences`.", "pref_options", "=", "frozenset", "(", "[", "'--digest-algo'", ",", "'--cipher-algo'", ",", "'--compress-algo'", ",", "'--compression-algo'", ",", "'--cert-digest-algo'", ",", "'--personal-digest-prefs'", ",", "'--personal-digest-preferences'", ",", "'--personal-cipher-prefs'", ",", "'--personal-cipher-preferences'", ",", "'--personal-compress-prefs'", ",", "'--personal-compress-preferences'", ",", "'--pinentry-mode'", ",", "'--print-md'", ",", "'--trust-model'", ",", "]", ")", "#: These options expect no arguments", "none_options", "=", "frozenset", "(", "[", "'--allow-loopback-pinentry'", ",", "'--always-trust'", ",", "'--armor'", ",", "'--armour'", ",", "'--batch'", ",", "'--check-sigs'", ",", "'--check-trustdb'", ",", "'--clearsign'", ",", "'--debug-all'", ",", "'--default-recipient-self'", ",", "'--detach-sign'", ",", "'--export'", ",", "'--export-ownertrust'", ",", "'--export-secret-keys'", ",", "'--export-secret-subkeys'", ",", "'--fingerprint'", ",", "'--fixed-list-mode'", ",", "'--gen-key'", ",", "'--import-ownertrust'", ",", "'--list-config'", ",", "'--list-key'", ",", "'--list-keys'", ",", "'--list-packets'", ",", "'--list-public-keys'", ",", "'--list-secret-keys'", ",", "'--list-sigs'", ",", "'--lock-multiple'", ",", "'--lock-never'", ",", "'--lock-once'", ",", "'--no-default-keyring'", ",", "'--no-default-recipient'", ",", "'--no-emit-version'", ",", "'--no-options'", ",", "'--no-tty'", ",", "'--no-use-agent'", ",", "'--no-verbose'", ",", "'--print-mds'", ",", "'--quiet'", ",", "'--sign'", ",", "'--symmetric'", ",", "'--throw-keyids'", ",", "'--use-agent'", ",", "'--verbose'", ",", "'--version'", ",", "'--with-colons'", ",", "'--yes'", ",", "]", ")", "#: These options expect either None or a hex string", "hex_or_none_options", "=", "hex_options", ".", "intersection", "(", "none_options", ")", "allowed", "=", "hex_options", ".", "union", "(", "unchecked_options", ",", "other_options", ",", "dir_options", ",", "keyring_options", ",", "file_or_none_options", ",", "pref_options", ",", "none_options", ")", "if", "group", "and", "group", "in", "locals", "(", ")", ".", "keys", "(", ")", ":", "return", "locals", "(", ")", "[", "group", "]" ]
Get a specific group of options which are allowed.
[ "Get", "a", "specific", "group", "of", "options", "which", "are", "allowed", "." ]
python
train
47.2
contentful/contentful-management.py
contentful_management/webhook.py
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/webhook.py#L94-L114
def to_json(self): """ Returns the JSON representation of the webhook. """ result = super(Webhook, self).to_json() result.update({ 'name': self.name, 'url': self.url, 'topics': self.topics, 'httpBasicUsername': self.http_basic_username, 'headers': self.headers }) if self.filters: result.update({'filters': self.filters}) if self.transformation: result.update({'transformation': self.transformation}) return result
[ "def", "to_json", "(", "self", ")", ":", "result", "=", "super", "(", "Webhook", ",", "self", ")", ".", "to_json", "(", ")", "result", ".", "update", "(", "{", "'name'", ":", "self", ".", "name", ",", "'url'", ":", "self", ".", "url", ",", "'topics'", ":", "self", ".", "topics", ",", "'httpBasicUsername'", ":", "self", ".", "http_basic_username", ",", "'headers'", ":", "self", ".", "headers", "}", ")", "if", "self", ".", "filters", ":", "result", ".", "update", "(", "{", "'filters'", ":", "self", ".", "filters", "}", ")", "if", "self", ".", "transformation", ":", "result", ".", "update", "(", "{", "'transformation'", ":", "self", ".", "transformation", "}", ")", "return", "result" ]
Returns the JSON representation of the webhook.
[ "Returns", "the", "JSON", "representation", "of", "the", "webhook", "." ]
python
train
26.380952
pkgw/pwkit
pwkit/lsqmdl.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/lsqmdl.py#L126-L143
def set_data(self, data, invsigma=None): """Set the data to be modeled. Returns *self*. """ self.data = np.array(data, dtype=np.float, ndmin=1) if invsigma is None: self.invsigma = np.ones(self.data.shape) else: i = np.array(invsigma, dtype=np.float) self.invsigma = np.broadcast_arrays(self.data, i)[1] # allow scalar invsigma if self.invsigma.shape != self.data.shape: raise ValueError('data values and inverse-sigma values must have same shape') return self
[ "def", "set_data", "(", "self", ",", "data", ",", "invsigma", "=", "None", ")", ":", "self", ".", "data", "=", "np", ".", "array", "(", "data", ",", "dtype", "=", "np", ".", "float", ",", "ndmin", "=", "1", ")", "if", "invsigma", "is", "None", ":", "self", ".", "invsigma", "=", "np", ".", "ones", "(", "self", ".", "data", ".", "shape", ")", "else", ":", "i", "=", "np", ".", "array", "(", "invsigma", ",", "dtype", "=", "np", ".", "float", ")", "self", ".", "invsigma", "=", "np", ".", "broadcast_arrays", "(", "self", ".", "data", ",", "i", ")", "[", "1", "]", "# allow scalar invsigma", "if", "self", ".", "invsigma", ".", "shape", "!=", "self", ".", "data", ".", "shape", ":", "raise", "ValueError", "(", "'data values and inverse-sigma values must have same shape'", ")", "return", "self" ]
Set the data to be modeled. Returns *self*.
[ "Set", "the", "data", "to", "be", "modeled", "." ]
python
train
31.111111
ValvePython/steam
steam/webapi.py
https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/webapi.py#L444-L468
def get(interface, method, version=1, apihost=DEFAULT_PARAMS['apihost'], https=DEFAULT_PARAMS['https'], caller=None, session=None, params=None): """Send GET request to an API endpoint .. versionadded:: 0.8.3 :param interface: interface name :type interface: str :param method: method name :type method: str :param version: method version :type version: int :param apihost: API hostname :type apihost: str :param https: whether to use HTTPS :type https: bool :param params: parameters for endpoint :type params: dict :return: endpoint response :rtype: :class:`dict`, :class:`lxml.etree.Element`, :class:`str` """ url = u"%s://%s/%s/%s/v%s/" % ( 'https' if https else 'http', apihost, interface, method, version) return webapi_request(url, 'GET', caller=caller, session=session, params=params)
[ "def", "get", "(", "interface", ",", "method", ",", "version", "=", "1", ",", "apihost", "=", "DEFAULT_PARAMS", "[", "'apihost'", "]", ",", "https", "=", "DEFAULT_PARAMS", "[", "'https'", "]", ",", "caller", "=", "None", ",", "session", "=", "None", ",", "params", "=", "None", ")", ":", "url", "=", "u\"%s://%s/%s/%s/v%s/\"", "%", "(", "'https'", "if", "https", "else", "'http'", ",", "apihost", ",", "interface", ",", "method", ",", "version", ")", "return", "webapi_request", "(", "url", ",", "'GET'", ",", "caller", "=", "caller", ",", "session", "=", "session", ",", "params", "=", "params", ")" ]
Send GET request to an API endpoint .. versionadded:: 0.8.3 :param interface: interface name :type interface: str :param method: method name :type method: str :param version: method version :type version: int :param apihost: API hostname :type apihost: str :param https: whether to use HTTPS :type https: bool :param params: parameters for endpoint :type params: dict :return: endpoint response :rtype: :class:`dict`, :class:`lxml.etree.Element`, :class:`str`
[ "Send", "GET", "request", "to", "an", "API", "endpoint" ]
python
train
34.76
troeger/opensubmit
web/opensubmit/models/userprofile.py
https://github.com/troeger/opensubmit/blob/384a95b7c6fa41e3f949a129d25dafd9a1c54859/web/opensubmit/models/userprofile.py#L124-L144
def move_user_data(primary, secondary): ''' Moves all submissions and other data linked to the secondary user into the primary user. Nothing is deleted here, we just modify foreign user keys. ''' # Update all submission authorships of the secondary to the primary submissions = Submission.objects.filter(authors__id=secondary.pk) for subm in submissions: if subm.submitter == secondary: subm.submitter = primary subm.authors.remove(secondary) subm.authors.add(primary) subm.save() # Transfer course registrations try: for course in secondary.profile.courses.all(): primary.profile.courses.add(course) primary.profile.save() except UserProfile.DoesNotExist: # That's a database consistency problem, but he will go away anyway pass
[ "def", "move_user_data", "(", "primary", ",", "secondary", ")", ":", "# Update all submission authorships of the secondary to the primary", "submissions", "=", "Submission", ".", "objects", ".", "filter", "(", "authors__id", "=", "secondary", ".", "pk", ")", "for", "subm", "in", "submissions", ":", "if", "subm", ".", "submitter", "==", "secondary", ":", "subm", ".", "submitter", "=", "primary", "subm", ".", "authors", ".", "remove", "(", "secondary", ")", "subm", ".", "authors", ".", "add", "(", "primary", ")", "subm", ".", "save", "(", ")", "# Transfer course registrations", "try", ":", "for", "course", "in", "secondary", ".", "profile", ".", "courses", ".", "all", "(", ")", ":", "primary", ".", "profile", ".", "courses", ".", "add", "(", "course", ")", "primary", ".", "profile", ".", "save", "(", ")", "except", "UserProfile", ".", "DoesNotExist", ":", "# That's a database consistency problem, but he will go away anyway", "pass" ]
Moves all submissions and other data linked to the secondary user into the primary user. Nothing is deleted here, we just modify foreign user keys.
[ "Moves", "all", "submissions", "and", "other", "data", "linked", "to", "the", "secondary", "user", "into", "the", "primary", "user", ".", "Nothing", "is", "deleted", "here", "we", "just", "modify", "foreign", "user", "keys", "." ]
python
train
40.428571
jciskey/pygraph
pygraph/predefined_graphs.py
https://github.com/jciskey/pygraph/blob/037bb2f32503fecb60d62921f9766d54109f15e2/pygraph/predefined_graphs.py#L217-L233
def build_chvatal_graph(): """Makes a new Chvatal graph. Ref: http://mathworld.wolfram.com/ChvatalGraph.html""" # The easiest way to build the Chvatal graph is to start # with C12 and add the additional 12 edges graph = build_cycle_graph(12) edge_tpls = [ (1,7), (1,9), (2,5), (2,11), (3,7), (3,9), (4,10), (4,12), (5,8), (6,10), (6,12), (8,11), ] for i, j in edge_tpls: graph.new_edge(i, j) return graph
[ "def", "build_chvatal_graph", "(", ")", ":", "# The easiest way to build the Chvatal graph is to start", "# with C12 and add the additional 12 edges", "graph", "=", "build_cycle_graph", "(", "12", ")", "edge_tpls", "=", "[", "(", "1", ",", "7", ")", ",", "(", "1", ",", "9", ")", ",", "(", "2", ",", "5", ")", ",", "(", "2", ",", "11", ")", ",", "(", "3", ",", "7", ")", ",", "(", "3", ",", "9", ")", ",", "(", "4", ",", "10", ")", ",", "(", "4", ",", "12", ")", ",", "(", "5", ",", "8", ")", ",", "(", "6", ",", "10", ")", ",", "(", "6", ",", "12", ")", ",", "(", "8", ",", "11", ")", ",", "]", "for", "i", ",", "j", "in", "edge_tpls", ":", "graph", ".", "new_edge", "(", "i", ",", "j", ")", "return", "graph" ]
Makes a new Chvatal graph. Ref: http://mathworld.wolfram.com/ChvatalGraph.html
[ "Makes", "a", "new", "Chvatal", "graph", ".", "Ref", ":", "http", ":", "//", "mathworld", ".", "wolfram", ".", "com", "/", "ChvatalGraph", ".", "html" ]
python
train
27.176471
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L352-L365
def get_container_list(self) -> list: """Get list of containers. Returns: list, all the ids of containers """ # Initialising empty list containers = [] containers_list = self._client.containers.list() for c_list in containers_list: containers.append(c_list.short_id) return containers
[ "def", "get_container_list", "(", "self", ")", "->", "list", ":", "# Initialising empty list", "containers", "=", "[", "]", "containers_list", "=", "self", ".", "_client", ".", "containers", ".", "list", "(", ")", "for", "c_list", "in", "containers_list", ":", "containers", ".", "append", "(", "c_list", ".", "short_id", ")", "return", "containers" ]
Get list of containers. Returns: list, all the ids of containers
[ "Get", "list", "of", "containers", "." ]
python
train
25.857143
saltstack/salt
salt/key.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/key.py#L352-L359
def gen_signature(self, privkey, pubkey, sig_path): ''' Generate master public-key-signature ''' return salt.crypt.gen_signature(privkey, pubkey, sig_path, self.passphrase)
[ "def", "gen_signature", "(", "self", ",", "privkey", ",", "pubkey", ",", "sig_path", ")", ":", "return", "salt", ".", "crypt", ".", "gen_signature", "(", "privkey", ",", "pubkey", ",", "sig_path", ",", "self", ".", "passphrase", ")" ]
Generate master public-key-signature
[ "Generate", "master", "public", "-", "key", "-", "signature" ]
python
train
39.625
redapple/parslepy
parslepy/base.py
https://github.com/redapple/parslepy/blob/a8bc4c0592824459629018c8f4c6ae3dad6cc3cc/parslepy/base.py#L338-L429
def _compile(self, parselet_node, level=0): """ Build part of the abstract Parsley extraction tree Arguments: parselet_node (dict) -- part of the Parsley tree to compile (can be the root dict/node) level (int) -- current recursion depth (used for debug) """ if self.DEBUG: debug_offset = "".join([" " for x in range(level)]) if self.DEBUG: print(debug_offset, "%s::compile(%s)" % ( self.__class__.__name__, parselet_node)) if isinstance(parselet_node, dict): parselet_tree = ParsleyNode() for k, v in list(parselet_node.items()): # we parse the key raw elements but without much # interpretation (which is done by the SelectorHandler) try: m = self.REGEX_PARSELET_KEY.match(k) if not m: if self.DEBUG: print(debug_offset, "could not parse key", k) raise InvalidKeySyntax(k) except: raise InvalidKeySyntax("Key %s is not valid" % k) key = m.group('key') # by default, fields are required key_required = True operator = m.group('operator') if operator == '?': key_required = False # FIXME: "!" operator not supported (complete array) scope = m.group('scope') # example: get list of H3 tags # { "titles": ["h3"] } # FIXME: should we support multiple selectors in list? # e.g. { "titles": ["h1", "h2", "h3", "h4"] } if isinstance(v, (list, tuple)): v = v[0] iterate = True else: iterate = False # keys in the abstract Parsley trees are of type `ParsleyContext` try: parsley_context = ParsleyContext( key, operator=operator, required=key_required, scope=self.selector_handler.make(scope) if scope else None, iterate=iterate) except SyntaxError: if self.DEBUG: print("Invalid scope:", k, scope) raise if self.DEBUG: print(debug_offset, "current context:", parsley_context) # go deeper in the Parsley tree... try: child_tree = self._compile(v, level=level+1) except SyntaxError: if self.DEBUG: print("Invalid value: ", v) raise except: raise if self.DEBUG: print(debug_offset, "child tree:", child_tree) parselet_tree[parsley_context] = child_tree return parselet_tree # a string leaf should match some kind of selector, # let the selector handler deal with it elif isstr(parselet_node): return self.selector_handler.make(parselet_node) else: raise ValueError( "Unsupported type(%s) for Parselet node <%s>" % ( type(parselet_node), parselet_node))
[ "def", "_compile", "(", "self", ",", "parselet_node", ",", "level", "=", "0", ")", ":", "if", "self", ".", "DEBUG", ":", "debug_offset", "=", "\"\"", ".", "join", "(", "[", "\" \"", "for", "x", "in", "range", "(", "level", ")", "]", ")", "if", "self", ".", "DEBUG", ":", "print", "(", "debug_offset", ",", "\"%s::compile(%s)\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "parselet_node", ")", ")", "if", "isinstance", "(", "parselet_node", ",", "dict", ")", ":", "parselet_tree", "=", "ParsleyNode", "(", ")", "for", "k", ",", "v", "in", "list", "(", "parselet_node", ".", "items", "(", ")", ")", ":", "# we parse the key raw elements but without much", "# interpretation (which is done by the SelectorHandler)", "try", ":", "m", "=", "self", ".", "REGEX_PARSELET_KEY", ".", "match", "(", "k", ")", "if", "not", "m", ":", "if", "self", ".", "DEBUG", ":", "print", "(", "debug_offset", ",", "\"could not parse key\"", ",", "k", ")", "raise", "InvalidKeySyntax", "(", "k", ")", "except", ":", "raise", "InvalidKeySyntax", "(", "\"Key %s is not valid\"", "%", "k", ")", "key", "=", "m", ".", "group", "(", "'key'", ")", "# by default, fields are required", "key_required", "=", "True", "operator", "=", "m", ".", "group", "(", "'operator'", ")", "if", "operator", "==", "'?'", ":", "key_required", "=", "False", "# FIXME: \"!\" operator not supported (complete array)", "scope", "=", "m", ".", "group", "(", "'scope'", ")", "# example: get list of H3 tags", "# { \"titles\": [\"h3\"] }", "# FIXME: should we support multiple selectors in list?", "# e.g. { \"titles\": [\"h1\", \"h2\", \"h3\", \"h4\"] }", "if", "isinstance", "(", "v", ",", "(", "list", ",", "tuple", ")", ")", ":", "v", "=", "v", "[", "0", "]", "iterate", "=", "True", "else", ":", "iterate", "=", "False", "# keys in the abstract Parsley trees are of type `ParsleyContext`", "try", ":", "parsley_context", "=", "ParsleyContext", "(", "key", ",", "operator", "=", "operator", ",", "required", "=", "key_required", ",", "scope", "=", "self", ".", "selector_handler", ".", "make", "(", "scope", ")", "if", "scope", "else", "None", ",", "iterate", "=", "iterate", ")", "except", "SyntaxError", ":", "if", "self", ".", "DEBUG", ":", "print", "(", "\"Invalid scope:\"", ",", "k", ",", "scope", ")", "raise", "if", "self", ".", "DEBUG", ":", "print", "(", "debug_offset", ",", "\"current context:\"", ",", "parsley_context", ")", "# go deeper in the Parsley tree...", "try", ":", "child_tree", "=", "self", ".", "_compile", "(", "v", ",", "level", "=", "level", "+", "1", ")", "except", "SyntaxError", ":", "if", "self", ".", "DEBUG", ":", "print", "(", "\"Invalid value: \"", ",", "v", ")", "raise", "except", ":", "raise", "if", "self", ".", "DEBUG", ":", "print", "(", "debug_offset", ",", "\"child tree:\"", ",", "child_tree", ")", "parselet_tree", "[", "parsley_context", "]", "=", "child_tree", "return", "parselet_tree", "# a string leaf should match some kind of selector,", "# let the selector handler deal with it", "elif", "isstr", "(", "parselet_node", ")", ":", "return", "self", ".", "selector_handler", ".", "make", "(", "parselet_node", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported type(%s) for Parselet node <%s>\"", "%", "(", "type", "(", "parselet_node", ")", ",", "parselet_node", ")", ")" ]
Build part of the abstract Parsley extraction tree Arguments: parselet_node (dict) -- part of the Parsley tree to compile (can be the root dict/node) level (int) -- current recursion depth (used for debug)
[ "Build", "part", "of", "the", "abstract", "Parsley", "extraction", "tree" ]
python
valid
37.586957
danilobellini/audiolazy
audiolazy/lazy_analysis.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_analysis.py#L722-L802
def overlap_add(blk_sig, size=None, hop=None, wnd=None, normalize=True): """ Overlap-add algorithm using Numpy arrays. Parameters ---------- blk_sig : An iterable of blocks (sequences), such as the ``Stream.blocks`` result. size : Block size for each ``blk_sig`` element, in samples. hop : Number of samples for two adjacent blocks (defaults to the size). wnd : Windowing function to be applied to each block or any iterable with exactly ``size`` elements. If ``None`` (default), applies a rectangular window. normalize : Flag whether the window should be normalized so that the process could happen in the [-1; 1] range, dividing the window by its hop gain. Default is ``True``. Returns ------- A Stream instance with the blocks overlapped and added. See Also -------- Stream.blocks : Splits the Stream instance into blocks with given size and hop. blocks : Same to Stream.blocks but for without using the Stream class. chain : Lazily joins all iterables given as parameters. chain.from_iterable : Same to ``chain(*data)``, but the ``data`` evaluation is lazy. window : Window/apodization/tapering functions for a given size as a StrategyDict. Note ---- Each block has the window function applied to it and the result is the sum of the blocks without any edge-case special treatment for the first and last few blocks. """ import numpy as np # Finds the size from data, if needed if size is None: blk_sig = Stream(blk_sig) size = len(blk_sig.peek()) if hop is None: hop = size # Find the right windowing function to be applied if wnd is None: wnd = np.ones(size) elif callable(wnd) and not isinstance(wnd, Stream): wnd = wnd(size) if isinstance(wnd, Sequence): wnd = np.array(wnd) elif isinstance(wnd, Iterable): wnd = np.hstack(wnd) else: raise TypeError("Window should be an iterable or a callable") # Normalization to the [-1; 1] range if normalize: steps = Stream(wnd).blocks(hop).map(np.array) gain = np.sum(np.abs(np.vstack(steps)), 0).max() if gain: # If gain is zero, normalization couldn't have any effect wnd = wnd / gain # Can't use "/=" nor "*=" as Numpy would keep datatype # Overlap-add algorithm old = np.zeros(size) for blk in (wnd * blk for blk in blk_sig): blk[:-hop] += old[hop:] for el in blk[:hop]: yield el old = blk for el in old[hop:]: # No more blocks, finish yielding the last one yield el
[ "def", "overlap_add", "(", "blk_sig", ",", "size", "=", "None", ",", "hop", "=", "None", ",", "wnd", "=", "None", ",", "normalize", "=", "True", ")", ":", "import", "numpy", "as", "np", "# Finds the size from data, if needed", "if", "size", "is", "None", ":", "blk_sig", "=", "Stream", "(", "blk_sig", ")", "size", "=", "len", "(", "blk_sig", ".", "peek", "(", ")", ")", "if", "hop", "is", "None", ":", "hop", "=", "size", "# Find the right windowing function to be applied", "if", "wnd", "is", "None", ":", "wnd", "=", "np", ".", "ones", "(", "size", ")", "elif", "callable", "(", "wnd", ")", "and", "not", "isinstance", "(", "wnd", ",", "Stream", ")", ":", "wnd", "=", "wnd", "(", "size", ")", "if", "isinstance", "(", "wnd", ",", "Sequence", ")", ":", "wnd", "=", "np", ".", "array", "(", "wnd", ")", "elif", "isinstance", "(", "wnd", ",", "Iterable", ")", ":", "wnd", "=", "np", ".", "hstack", "(", "wnd", ")", "else", ":", "raise", "TypeError", "(", "\"Window should be an iterable or a callable\"", ")", "# Normalization to the [-1; 1] range", "if", "normalize", ":", "steps", "=", "Stream", "(", "wnd", ")", ".", "blocks", "(", "hop", ")", ".", "map", "(", "np", ".", "array", ")", "gain", "=", "np", ".", "sum", "(", "np", ".", "abs", "(", "np", ".", "vstack", "(", "steps", ")", ")", ",", "0", ")", ".", "max", "(", ")", "if", "gain", ":", "# If gain is zero, normalization couldn't have any effect", "wnd", "=", "wnd", "/", "gain", "# Can't use \"/=\" nor \"*=\" as Numpy would keep datatype", "# Overlap-add algorithm", "old", "=", "np", ".", "zeros", "(", "size", ")", "for", "blk", "in", "(", "wnd", "*", "blk", "for", "blk", "in", "blk_sig", ")", ":", "blk", "[", ":", "-", "hop", "]", "+=", "old", "[", "hop", ":", "]", "for", "el", "in", "blk", "[", ":", "hop", "]", ":", "yield", "el", "old", "=", "blk", "for", "el", "in", "old", "[", "hop", ":", "]", ":", "# No more blocks, finish yielding the last one", "yield", "el" ]
Overlap-add algorithm using Numpy arrays. Parameters ---------- blk_sig : An iterable of blocks (sequences), such as the ``Stream.blocks`` result. size : Block size for each ``blk_sig`` element, in samples. hop : Number of samples for two adjacent blocks (defaults to the size). wnd : Windowing function to be applied to each block or any iterable with exactly ``size`` elements. If ``None`` (default), applies a rectangular window. normalize : Flag whether the window should be normalized so that the process could happen in the [-1; 1] range, dividing the window by its hop gain. Default is ``True``. Returns ------- A Stream instance with the blocks overlapped and added. See Also -------- Stream.blocks : Splits the Stream instance into blocks with given size and hop. blocks : Same to Stream.blocks but for without using the Stream class. chain : Lazily joins all iterables given as parameters. chain.from_iterable : Same to ``chain(*data)``, but the ``data`` evaluation is lazy. window : Window/apodization/tapering functions for a given size as a StrategyDict. Note ---- Each block has the window function applied to it and the result is the sum of the blocks without any edge-case special treatment for the first and last few blocks.
[ "Overlap", "-", "add", "algorithm", "using", "Numpy", "arrays", "." ]
python
train
30.283951
chrisjrn/registrasion
registrasion/controllers/credit_note.py
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/controllers/credit_note.py#L60-L83
def cancellation_fee(self, percentage): ''' Generates an invoice with a cancellation fee, and applies credit to the invoice. percentage (Decimal): The percentage of the credit note to turn into a cancellation fee. Must be 0 <= percentage <= 100. ''' # Local import to fix import cycles. Can we do better? from .invoice import InvoiceController assert(percentage >= 0 and percentage <= 100) cancellation_fee = self.credit_note.value * percentage / 100 due = datetime.timedelta(days=1) item = [("Cancellation fee", cancellation_fee)] invoice = InvoiceController.manual_invoice( self.credit_note.invoice.user, due, item ) if not invoice.is_paid: self.apply_to_invoice(invoice) return InvoiceController(invoice)
[ "def", "cancellation_fee", "(", "self", ",", "percentage", ")", ":", "# Local import to fix import cycles. Can we do better?", "from", ".", "invoice", "import", "InvoiceController", "assert", "(", "percentage", ">=", "0", "and", "percentage", "<=", "100", ")", "cancellation_fee", "=", "self", ".", "credit_note", ".", "value", "*", "percentage", "/", "100", "due", "=", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "item", "=", "[", "(", "\"Cancellation fee\"", ",", "cancellation_fee", ")", "]", "invoice", "=", "InvoiceController", ".", "manual_invoice", "(", "self", ".", "credit_note", ".", "invoice", ".", "user", ",", "due", ",", "item", ")", "if", "not", "invoice", ".", "is_paid", ":", "self", ".", "apply_to_invoice", "(", "invoice", ")", "return", "InvoiceController", "(", "invoice", ")" ]
Generates an invoice with a cancellation fee, and applies credit to the invoice. percentage (Decimal): The percentage of the credit note to turn into a cancellation fee. Must be 0 <= percentage <= 100.
[ "Generates", "an", "invoice", "with", "a", "cancellation", "fee", "and", "applies", "credit", "to", "the", "invoice", "." ]
python
test
34.75
gbowerman/azurerm
azurerm/acs.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/acs.py#L64-L81
def delete_container_service(access_token, subscription_id, resource_group, service_name): '''Delete a named container. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. service_name (str): Name of container service. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices/', service_name, '?api-version=', ACS_API]) return do_delete(endpoint, access_token)
[ "def", "delete_container_service", "(", "access_token", ",", "subscription_id", ",", "resource_group", ",", "service_name", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourcegroups/'", ",", "resource_group", ",", "'/providers/Microsoft.ContainerService/ContainerServices/'", ",", "service_name", ",", "'?api-version='", ",", "ACS_API", "]", ")", "return", "do_delete", "(", "endpoint", ",", "access_token", ")" ]
Delete a named container. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. service_name (str): Name of container service. Returns: HTTP response.
[ "Delete", "a", "named", "container", "." ]
python
train
41.611111
user-cont/conu
conu/backend/docker/image.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/docker/image.py#L342-L355
def tag_image(self, repository=None, tag=None): """ Apply additional tags to the image or even add a new name :param repository: str, see constructor :param tag: str, see constructor :return: instance of DockerImage """ if not (repository or tag): raise ValueError("You need to specify either repository or tag.") r = repository or self.name t = "latest" if not tag else tag self.d.tag(image=self.get_full_name(), repository=r, tag=t) return DockerImage(r, tag=t)
[ "def", "tag_image", "(", "self", ",", "repository", "=", "None", ",", "tag", "=", "None", ")", ":", "if", "not", "(", "repository", "or", "tag", ")", ":", "raise", "ValueError", "(", "\"You need to specify either repository or tag.\"", ")", "r", "=", "repository", "or", "self", ".", "name", "t", "=", "\"latest\"", "if", "not", "tag", "else", "tag", "self", ".", "d", ".", "tag", "(", "image", "=", "self", ".", "get_full_name", "(", ")", ",", "repository", "=", "r", ",", "tag", "=", "t", ")", "return", "DockerImage", "(", "r", ",", "tag", "=", "t", ")" ]
Apply additional tags to the image or even add a new name :param repository: str, see constructor :param tag: str, see constructor :return: instance of DockerImage
[ "Apply", "additional", "tags", "to", "the", "image", "or", "even", "add", "a", "new", "name" ]
python
train
39.357143
BD2KGenomics/protect
src/protect/mutation_calling/somaticsniper.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L126-L165
def run_somaticsniper_full(job, tumor_bam, normal_bam, univ_options, somaticsniper_options): """ Run SomaticSniper on the DNA bams. :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq :param dict normal_bam: Dict of bam and bai for normal DNA-Seq :param dict univ_options: Dict of universal options used by almost all tools :param dict somaticsniper_options: Options specific to SomaticSniper :return: fsID to the genome-level vcf :rtype: toil.fileStore.FileID """ work_dir = os.getcwd() input_files = { 'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'], 'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'], 'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'], 'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'], 'genome.fa.tar.gz': somaticsniper_options['genome_fasta'], 'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']} input_files = get_files_from_filestore(job, input_files, work_dir, docker=False) for key in ('genome.fa', 'genome.fa.fai'): input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir) input_files = {key: docker_path(path) for key, path in input_files.items()} output_file = os.path.join(work_dir, 'somatic-sniper_full.vcf') parameters = ['-f', input_files['genome.fa'], '-F', 'vcf', '-G', '-L', '-q', '1', '-Q', '15', input_files['tumor.bam'], input_files['normal.bam'], docker_path(output_file)] docker_call(tool='somaticsniper', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version']) outfile = job.fileStore.writeGlobalFile(output_file) job.fileStore.logToMaster('Ran SomaticSniper on %s successfully' % univ_options['patient']) return outfile
[ "def", "run_somaticsniper_full", "(", "job", ",", "tumor_bam", ",", "normal_bam", ",", "univ_options", ",", "somaticsniper_options", ")", ":", "work_dir", "=", "os", ".", "getcwd", "(", ")", "input_files", "=", "{", "'tumor.bam'", ":", "tumor_bam", "[", "'tumor_dna_fix_pg_sorted.bam'", "]", ",", "'tumor.bam.bai'", ":", "tumor_bam", "[", "'tumor_dna_fix_pg_sorted.bam.bai'", "]", ",", "'normal.bam'", ":", "normal_bam", "[", "'normal_dna_fix_pg_sorted.bam'", "]", ",", "'normal.bam.bai'", ":", "normal_bam", "[", "'normal_dna_fix_pg_sorted.bam.bai'", "]", ",", "'genome.fa.tar.gz'", ":", "somaticsniper_options", "[", "'genome_fasta'", "]", ",", "'genome.fa.fai.tar.gz'", ":", "somaticsniper_options", "[", "'genome_fai'", "]", "}", "input_files", "=", "get_files_from_filestore", "(", "job", ",", "input_files", ",", "work_dir", ",", "docker", "=", "False", ")", "for", "key", "in", "(", "'genome.fa'", ",", "'genome.fa.fai'", ")", ":", "input_files", "[", "key", "]", "=", "untargz", "(", "input_files", "[", "key", "+", "'.tar.gz'", "]", ",", "work_dir", ")", "input_files", "=", "{", "key", ":", "docker_path", "(", "path", ")", "for", "key", ",", "path", "in", "input_files", ".", "items", "(", ")", "}", "output_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "'somatic-sniper_full.vcf'", ")", "parameters", "=", "[", "'-f'", ",", "input_files", "[", "'genome.fa'", "]", ",", "'-F'", ",", "'vcf'", ",", "'-G'", ",", "'-L'", ",", "'-q'", ",", "'1'", ",", "'-Q'", ",", "'15'", ",", "input_files", "[", "'tumor.bam'", "]", ",", "input_files", "[", "'normal.bam'", "]", ",", "docker_path", "(", "output_file", ")", "]", "docker_call", "(", "tool", "=", "'somaticsniper'", ",", "tool_parameters", "=", "parameters", ",", "work_dir", "=", "work_dir", ",", "dockerhub", "=", "univ_options", "[", "'dockerhub'", "]", ",", "tool_version", "=", "somaticsniper_options", "[", "'version'", "]", ")", "outfile", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "output_file", ")", "job", ".", "fileStore", ".", "logToMaster", "(", "'Ran SomaticSniper on %s successfully'", "%", "univ_options", "[", "'patient'", "]", ")", "return", "outfile" ]
Run SomaticSniper on the DNA bams. :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq :param dict normal_bam: Dict of bam and bai for normal DNA-Seq :param dict univ_options: Dict of universal options used by almost all tools :param dict somaticsniper_options: Options specific to SomaticSniper :return: fsID to the genome-level vcf :rtype: toil.fileStore.FileID
[ "Run", "SomaticSniper", "on", "the", "DNA", "bams", "." ]
python
train
49.075
gatkin/declxml
declxml.py
https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1613-L1625
def _xml_namespace_strip(root): # type: (ET.Element) -> None """Strip the XML namespace prefix from all element tags under the given root Element.""" if '}' not in root.tag: return # Nothing to do, no namespace present for element in root.iter(): if '}' in element.tag: element.tag = element.tag.split('}')[1] else: # pragma: no cover # We should never get here. If there is a namespace, then the namespace should be # included in all elements. pass
[ "def", "_xml_namespace_strip", "(", "root", ")", ":", "# type: (ET.Element) -> None", "if", "'}'", "not", "in", "root", ".", "tag", ":", "return", "# Nothing to do, no namespace present", "for", "element", "in", "root", ".", "iter", "(", ")", ":", "if", "'}'", "in", "element", ".", "tag", ":", "element", ".", "tag", "=", "element", ".", "tag", ".", "split", "(", "'}'", ")", "[", "1", "]", "else", ":", "# pragma: no cover", "# We should never get here. If there is a namespace, then the namespace should be", "# included in all elements.", "pass" ]
Strip the XML namespace prefix from all element tags under the given root Element.
[ "Strip", "the", "XML", "namespace", "prefix", "from", "all", "element", "tags", "under", "the", "given", "root", "Element", "." ]
python
train
40.615385
clalancette/pycdlib
pycdlib/rockridge.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/rockridge.py#L566-L599
def parse(self, rrstr): # type: (bytes) -> None ''' Parse a Rock Ridge Extensions Reference record out of a string. Parameters: rrstr - The string to parse the record out of. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('ER record already initialized!') (su_len, su_entry_version_unused, len_id, len_des, len_src, self.ext_ver) = struct.unpack_from('=BBBBBB', rrstr[:8], 2) # We assume that the caller has already checked the su_entry_version, # so we don't bother. # Ensure that the length isn't crazy if su_len > len(rrstr): raise pycdlibexception.PyCdlibInvalidISO('Length of ER record much too long') # Also ensure that the combination of len_id, len_des, and len_src # doesn't overrun su_len; because of the check above, this means it # can't overrun len(rrstr) either total_length = len_id + len_des + len_src if total_length > su_len: raise pycdlibexception.PyCdlibInvalidISO('Combined length of ER ID, des, and src longer than record') fmtstr = '=%ds%ds%ds' % (len_id, len_des, len_src) (self.ext_id, self.ext_des, self.ext_src) = struct.unpack_from(fmtstr, rrstr, 8) self._initialized = True
[ "def", "parse", "(", "self", ",", "rrstr", ")", ":", "# type: (bytes) -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'ER record already initialized!'", ")", "(", "su_len", ",", "su_entry_version_unused", ",", "len_id", ",", "len_des", ",", "len_src", ",", "self", ".", "ext_ver", ")", "=", "struct", ".", "unpack_from", "(", "'=BBBBBB'", ",", "rrstr", "[", ":", "8", "]", ",", "2", ")", "# We assume that the caller has already checked the su_entry_version,", "# so we don't bother.", "# Ensure that the length isn't crazy", "if", "su_len", ">", "len", "(", "rrstr", ")", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidISO", "(", "'Length of ER record much too long'", ")", "# Also ensure that the combination of len_id, len_des, and len_src", "# doesn't overrun su_len; because of the check above, this means it", "# can't overrun len(rrstr) either", "total_length", "=", "len_id", "+", "len_des", "+", "len_src", "if", "total_length", ">", "su_len", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidISO", "(", "'Combined length of ER ID, des, and src longer than record'", ")", "fmtstr", "=", "'=%ds%ds%ds'", "%", "(", "len_id", ",", "len_des", ",", "len_src", ")", "(", "self", ".", "ext_id", ",", "self", ".", "ext_des", ",", "self", ".", "ext_src", ")", "=", "struct", ".", "unpack_from", "(", "fmtstr", ",", "rrstr", ",", "8", ")", "self", ".", "_initialized", "=", "True" ]
Parse a Rock Ridge Extensions Reference record out of a string. Parameters: rrstr - The string to parse the record out of. Returns: Nothing.
[ "Parse", "a", "Rock", "Ridge", "Extensions", "Reference", "record", "out", "of", "a", "string", "." ]
python
train
39.411765
persephone-tools/persephone
persephone/experiment.py
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/experiment.py#L18-L22
def get_exp_dir_num(parent_dir: str) -> int: """ Gets the number of the current experiment directory.""" return max([int(fn.split(".")[0]) for fn in os.listdir(parent_dir) if fn.split(".")[0].isdigit()] + [-1])
[ "def", "get_exp_dir_num", "(", "parent_dir", ":", "str", ")", "->", "int", ":", "return", "max", "(", "[", "int", "(", "fn", ".", "split", "(", "\".\"", ")", "[", "0", "]", ")", "for", "fn", "in", "os", ".", "listdir", "(", "parent_dir", ")", "if", "fn", ".", "split", "(", "\".\"", ")", "[", "0", "]", ".", "isdigit", "(", ")", "]", "+", "[", "-", "1", "]", ")" ]
Gets the number of the current experiment directory.
[ "Gets", "the", "number", "of", "the", "current", "experiment", "directory", "." ]
python
train
50
SeleniumHQ/selenium
py/selenium/webdriver/common/action_chains.py
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/common/action_chains.py#L149-L166
def double_click(self, on_element=None): """ Double-clicks an element. :Args: - on_element: The element to double-click. If None, clicks on current mouse position. """ if on_element: self.move_to_element(on_element) if self._driver.w3c: self.w3c_actions.pointer_action.double_click() for _ in range(4): self.w3c_actions.key_action.pause() else: self._actions.append(lambda: self._driver.execute( Command.DOUBLE_CLICK, {})) return self
[ "def", "double_click", "(", "self", ",", "on_element", "=", "None", ")", ":", "if", "on_element", ":", "self", ".", "move_to_element", "(", "on_element", ")", "if", "self", ".", "_driver", ".", "w3c", ":", "self", ".", "w3c_actions", ".", "pointer_action", ".", "double_click", "(", ")", "for", "_", "in", "range", "(", "4", ")", ":", "self", ".", "w3c_actions", ".", "key_action", ".", "pause", "(", ")", "else", ":", "self", ".", "_actions", ".", "append", "(", "lambda", ":", "self", ".", "_driver", ".", "execute", "(", "Command", ".", "DOUBLE_CLICK", ",", "{", "}", ")", ")", "return", "self" ]
Double-clicks an element. :Args: - on_element: The element to double-click. If None, clicks on current mouse position.
[ "Double", "-", "clicks", "an", "element", "." ]
python
train
33.277778
ashleysommer/sanicpluginsframework
spf/plugin.py
https://github.com/ashleysommer/sanicpluginsframework/blob/2cb1656d9334f04c30c738074784b0450c1b893e/spf/plugin.py#L85-L105
def listener(self, event, *args, **kwargs): """Create a listener from a decorated function. :param event: Event to listen to. :type event: str :param args: captures all of the positional arguments passed in :type args: tuple(Any) :param kwargs: captures the keyword arguments passed in :type kwargs: dict(Any) :return: The function to use as the listener :rtype: fn """ if len(args) == 1 and callable(args[0]): # pragma: no cover raise RuntimeError("Cannot use the @listener decorator without " "arguments") def wrapper(listener_f): if len(kwargs) > 0: listener_f = (listener_f, kwargs) self._listeners[event].append(listener_f) return listener_f return wrapper
[ "def", "listener", "(", "self", ",", "event", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", "==", "1", "and", "callable", "(", "args", "[", "0", "]", ")", ":", "# pragma: no cover", "raise", "RuntimeError", "(", "\"Cannot use the @listener decorator without \"", "\"arguments\"", ")", "def", "wrapper", "(", "listener_f", ")", ":", "if", "len", "(", "kwargs", ")", ">", "0", ":", "listener_f", "=", "(", "listener_f", ",", "kwargs", ")", "self", ".", "_listeners", "[", "event", "]", ".", "append", "(", "listener_f", ")", "return", "listener_f", "return", "wrapper" ]
Create a listener from a decorated function. :param event: Event to listen to. :type event: str :param args: captures all of the positional arguments passed in :type args: tuple(Any) :param kwargs: captures the keyword arguments passed in :type kwargs: dict(Any) :return: The function to use as the listener :rtype: fn
[ "Create", "a", "listener", "from", "a", "decorated", "function", ".", ":", "param", "event", ":", "Event", "to", "listen", "to", ".", ":", "type", "event", ":", "str", ":", "param", "args", ":", "captures", "all", "of", "the", "positional", "arguments", "passed", "in", ":", "type", "args", ":", "tuple", "(", "Any", ")", ":", "param", "kwargs", ":", "captures", "the", "keyword", "arguments", "passed", "in", ":", "type", "kwargs", ":", "dict", "(", "Any", ")", ":", "return", ":", "The", "function", "to", "use", "as", "the", "listener", ":", "rtype", ":", "fn" ]
python
train
40.095238
trolldbois/ctypeslib
ctypeslib/codegen/cursorhandler.py
https://github.com/trolldbois/ctypeslib/blob/2aeb1942a5a32a5cc798c287cd0d9e684a0181a8/ctypeslib/codegen/cursorhandler.py#L159-L178
def FUNCTION_DECL(self, cursor): """Handles function declaration""" # FIXME to UT name = self.get_unique_name(cursor) if self.is_registered(name): return self.get_registered(name) returns = self.parse_cursor_type(cursor.type.get_result()) attributes = [] extern = False obj = typedesc.Function(name, returns, attributes, extern) for arg in cursor.get_arguments(): arg_obj = self.parse_cursor(arg) # if arg_obj is None: # code.interact(local=locals()) obj.add_argument(arg_obj) # code.interact(local=locals()) self.register(name, obj) self.set_location(obj, cursor) self.set_comment(obj, cursor) return obj
[ "def", "FUNCTION_DECL", "(", "self", ",", "cursor", ")", ":", "# FIXME to UT", "name", "=", "self", ".", "get_unique_name", "(", "cursor", ")", "if", "self", ".", "is_registered", "(", "name", ")", ":", "return", "self", ".", "get_registered", "(", "name", ")", "returns", "=", "self", ".", "parse_cursor_type", "(", "cursor", ".", "type", ".", "get_result", "(", ")", ")", "attributes", "=", "[", "]", "extern", "=", "False", "obj", "=", "typedesc", ".", "Function", "(", "name", ",", "returns", ",", "attributes", ",", "extern", ")", "for", "arg", "in", "cursor", ".", "get_arguments", "(", ")", ":", "arg_obj", "=", "self", ".", "parse_cursor", "(", "arg", ")", "# if arg_obj is None:", "# code.interact(local=locals())", "obj", ".", "add_argument", "(", "arg_obj", ")", "# code.interact(local=locals())", "self", ".", "register", "(", "name", ",", "obj", ")", "self", ".", "set_location", "(", "obj", ",", "cursor", ")", "self", ".", "set_comment", "(", "obj", ",", "cursor", ")", "return", "obj" ]
Handles function declaration
[ "Handles", "function", "declaration" ]
python
train
38.05
edx/opaque-keys
opaque_keys/edx/django/models.py
https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/django/models.py#L58-L65
def _strip_object(key): """ Strips branch and version info if the given key supports those attributes. """ if hasattr(key, 'version_agnostic') and hasattr(key, 'for_branch'): return key.for_branch(None).version_agnostic() else: return key
[ "def", "_strip_object", "(", "key", ")", ":", "if", "hasattr", "(", "key", ",", "'version_agnostic'", ")", "and", "hasattr", "(", "key", ",", "'for_branch'", ")", ":", "return", "key", ".", "for_branch", "(", "None", ")", ".", "version_agnostic", "(", ")", "else", ":", "return", "key" ]
Strips branch and version info if the given key supports those attributes.
[ "Strips", "branch", "and", "version", "info", "if", "the", "given", "key", "supports", "those", "attributes", "." ]
python
train
33.375
saltstack/salt
salt/modules/boto_iam.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_iam.py#L1933-L1953
def list_policy_versions(policy_name, region=None, key=None, keyid=None, profile=None): ''' List versions of a policy. CLI Example: .. code-block:: bash salt myminion boto_iam.list_policy_versions mypolicy ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) try: ret = conn.list_policy_versions(policy_arn) return ret.get('list_policy_versions_response', {}).get('list_policy_versions_result', {}).get('versions') except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to list versions for IAM policy %s.', policy_name) return []
[ "def", "list_policy_versions", "(", "policy_name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "policy_arn", "=", "_get_policy_arn", "(", "policy_name", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", "try", ":", "ret", "=", "conn", ".", "list_policy_versions", "(", "policy_arn", ")", "return", "ret", ".", "get", "(", "'list_policy_versions_response'", ",", "{", "}", ")", ".", "get", "(", "'list_policy_versions_result'", ",", "{", "}", ")", ".", "get", "(", "'versions'", ")", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "log", ".", "debug", "(", "e", ")", "log", ".", "error", "(", "'Failed to list versions for IAM policy %s.'", ",", "policy_name", ")", "return", "[", "]" ]
List versions of a policy. CLI Example: .. code-block:: bash salt myminion boto_iam.list_policy_versions mypolicy
[ "List", "versions", "of", "a", "policy", "." ]
python
train
34.714286
TankerHQ/python-cli-ui
cli_ui/__init__.py
https://github.com/TankerHQ/python-cli-ui/blob/4c9928827cea06cf80e6a1f5bd86478d8566863f/cli_ui/__init__.py#L187-L206
def process_tokens( tokens: Sequence[Token], *, end: str = "\n", sep: str = " " ) -> Tuple[str, str]: """ Returns two strings from a list of tokens. One containing ASCII escape codes, the other only the 'normal' characters """ # Flatten the list of tokens in case some of them are of # class UnicodeSequence: flat_tokens = list() # type: List[Token] for token in tokens: if isinstance(token, UnicodeSequence): flat_tokens.extend(token.tuple()) else: flat_tokens.append(token) with_color = _process_tokens(flat_tokens, end=end, sep=sep, color=True) without_color = _process_tokens(flat_tokens, end=end, sep=sep, color=False) return (with_color, without_color)
[ "def", "process_tokens", "(", "tokens", ":", "Sequence", "[", "Token", "]", ",", "*", ",", "end", ":", "str", "=", "\"\\n\"", ",", "sep", ":", "str", "=", "\" \"", ")", "->", "Tuple", "[", "str", ",", "str", "]", ":", "# Flatten the list of tokens in case some of them are of", "# class UnicodeSequence:", "flat_tokens", "=", "list", "(", ")", "# type: List[Token]", "for", "token", "in", "tokens", ":", "if", "isinstance", "(", "token", ",", "UnicodeSequence", ")", ":", "flat_tokens", ".", "extend", "(", "token", ".", "tuple", "(", ")", ")", "else", ":", "flat_tokens", ".", "append", "(", "token", ")", "with_color", "=", "_process_tokens", "(", "flat_tokens", ",", "end", "=", "end", ",", "sep", "=", "sep", ",", "color", "=", "True", ")", "without_color", "=", "_process_tokens", "(", "flat_tokens", ",", "end", "=", "end", ",", "sep", "=", "sep", ",", "color", "=", "False", ")", "return", "(", "with_color", ",", "without_color", ")" ]
Returns two strings from a list of tokens. One containing ASCII escape codes, the other only the 'normal' characters
[ "Returns", "two", "strings", "from", "a", "list", "of", "tokens", ".", "One", "containing", "ASCII", "escape", "codes", "the", "other", "only", "the", "normal", "characters" ]
python
train
36.5
pantsbuild/pants
src/python/pants/backend/jvm/tasks/ivy_resolve.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/ivy_resolve.py#L88-L117
def execute(self): """Resolves the specified confs for the configured targets and returns an iterator over tuples of (conf, jar path). """ if JvmResolveSubsystem.global_instance().get_options().resolver != 'ivy': return compile_classpath = self.context.products.get_data('compile_classpath', init_func=ClasspathProducts.init_func(self.get_options().pants_workdir)) targets = self.context.targets() if all(not isinstance(target, JarLibrary) for target in targets): if self._report: self.context.log.info("Not generating a report. No resolution performed.") return executor = self.create_java_executor() results = self.resolve(executor=executor, targets=targets, classpath_products=compile_classpath, confs=self.get_options().confs, extra_args=self._args) if self._report: results_with_resolved_artifacts = [r for r in results if r.has_resolved_artifacts] if not results_with_resolved_artifacts: self.context.log.info("Not generating a report. No resolution performed.") else: for result in results_with_resolved_artifacts: self._generate_ivy_report(result)
[ "def", "execute", "(", "self", ")", ":", "if", "JvmResolveSubsystem", ".", "global_instance", "(", ")", ".", "get_options", "(", ")", ".", "resolver", "!=", "'ivy'", ":", "return", "compile_classpath", "=", "self", ".", "context", ".", "products", ".", "get_data", "(", "'compile_classpath'", ",", "init_func", "=", "ClasspathProducts", ".", "init_func", "(", "self", ".", "get_options", "(", ")", ".", "pants_workdir", ")", ")", "targets", "=", "self", ".", "context", ".", "targets", "(", ")", "if", "all", "(", "not", "isinstance", "(", "target", ",", "JarLibrary", ")", "for", "target", "in", "targets", ")", ":", "if", "self", ".", "_report", ":", "self", ".", "context", ".", "log", ".", "info", "(", "\"Not generating a report. No resolution performed.\"", ")", "return", "executor", "=", "self", ".", "create_java_executor", "(", ")", "results", "=", "self", ".", "resolve", "(", "executor", "=", "executor", ",", "targets", "=", "targets", ",", "classpath_products", "=", "compile_classpath", ",", "confs", "=", "self", ".", "get_options", "(", ")", ".", "confs", ",", "extra_args", "=", "self", ".", "_args", ")", "if", "self", ".", "_report", ":", "results_with_resolved_artifacts", "=", "[", "r", "for", "r", "in", "results", "if", "r", ".", "has_resolved_artifacts", "]", "if", "not", "results_with_resolved_artifacts", ":", "self", ".", "context", ".", "log", ".", "info", "(", "\"Not generating a report. No resolution performed.\"", ")", "else", ":", "for", "result", "in", "results_with_resolved_artifacts", ":", "self", ".", "_generate_ivy_report", "(", "result", ")" ]
Resolves the specified confs for the configured targets and returns an iterator over tuples of (conf, jar path).
[ "Resolves", "the", "specified", "confs", "for", "the", "configured", "targets", "and", "returns", "an", "iterator", "over", "tuples", "of", "(", "conf", "jar", "path", ")", "." ]
python
train
43.333333
Parsl/parsl
parsl/executors/low_latency/executor.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/executors/low_latency/executor.py#L118-L142
def _start_local_queue_process(self): """ TODO: docstring """ comm_q = Queue(maxsize=10) self.queue_proc = Process(target=interchange.starter, args=(comm_q,), kwargs={"client_ports": (self.outgoing_q.port, self.incoming_q.port), "worker_port": self.worker_port, "worker_port_range": self.worker_port_range # TODO: logdir and logging level }) self.queue_proc.start() try: worker_port = comm_q.get(block=True, timeout=120) logger.debug( "Got worker port {} from interchange".format(worker_port)) except queue.Empty: logger.error( "Interchange has not completed initialization in 120s. Aborting") raise Exception("Interchange failed to start") self.worker_task_url = "tcp://{}:{}".format( self.address, worker_port)
[ "def", "_start_local_queue_process", "(", "self", ")", ":", "comm_q", "=", "Queue", "(", "maxsize", "=", "10", ")", "self", ".", "queue_proc", "=", "Process", "(", "target", "=", "interchange", ".", "starter", ",", "args", "=", "(", "comm_q", ",", ")", ",", "kwargs", "=", "{", "\"client_ports\"", ":", "(", "self", ".", "outgoing_q", ".", "port", ",", "self", ".", "incoming_q", ".", "port", ")", ",", "\"worker_port\"", ":", "self", ".", "worker_port", ",", "\"worker_port_range\"", ":", "self", ".", "worker_port_range", "# TODO: logdir and logging level", "}", ")", "self", ".", "queue_proc", ".", "start", "(", ")", "try", ":", "worker_port", "=", "comm_q", ".", "get", "(", "block", "=", "True", ",", "timeout", "=", "120", ")", "logger", ".", "debug", "(", "\"Got worker port {} from interchange\"", ".", "format", "(", "worker_port", ")", ")", "except", "queue", ".", "Empty", ":", "logger", ".", "error", "(", "\"Interchange has not completed initialization in 120s. Aborting\"", ")", "raise", "Exception", "(", "\"Interchange failed to start\"", ")", "self", ".", "worker_task_url", "=", "\"tcp://{}:{}\"", ".", "format", "(", "self", ".", "address", ",", "worker_port", ")" ]
TODO: docstring
[ "TODO", ":", "docstring" ]
python
valid
45.36
learningequality/ricecooker
ricecooker/utils/paths.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/paths.py#L48-L57
def build_path(levels): """ make a linear directory structure from a list of path levels names levels = ["chefdir", "trees", "test"] builds ./chefdir/trees/test/ """ path = os.path.join(*levels) if not dir_exists(path): os.makedirs(path) return path
[ "def", "build_path", "(", "levels", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "*", "levels", ")", "if", "not", "dir_exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")", "return", "path" ]
make a linear directory structure from a list of path levels names levels = ["chefdir", "trees", "test"] builds ./chefdir/trees/test/
[ "make", "a", "linear", "directory", "structure", "from", "a", "list", "of", "path", "levels", "names", "levels", "=", "[", "chefdir", "trees", "test", "]", "builds", ".", "/", "chefdir", "/", "trees", "/", "test", "/" ]
python
train
28
Josef-Friedrich/phrydy
phrydy/utils.py
https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/utils.py#L42-L58
def displayable_path(path, separator=u'; '): """Attempts to decode a bytestring path to a unicode object for the purpose of displaying it to the user. If the `path` argument is a list or a tuple, the elements are joined with `separator`. """ if isinstance(path, (list, tuple)): return separator.join(displayable_path(p) for p in path) elif isinstance(path, six.text_type): return path elif not isinstance(path, bytes): # A non-string object: just get its unicode representation. return six.text_type(path) try: return path.decode(_fsencoding(), 'ignore') except (UnicodeError, LookupError): return path.decode('utf8', 'ignore')
[ "def", "displayable_path", "(", "path", ",", "separator", "=", "u'; '", ")", ":", "if", "isinstance", "(", "path", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "separator", ".", "join", "(", "displayable_path", "(", "p", ")", "for", "p", "in", "path", ")", "elif", "isinstance", "(", "path", ",", "six", ".", "text_type", ")", ":", "return", "path", "elif", "not", "isinstance", "(", "path", ",", "bytes", ")", ":", "# A non-string object: just get its unicode representation.", "return", "six", ".", "text_type", "(", "path", ")", "try", ":", "return", "path", ".", "decode", "(", "_fsencoding", "(", ")", ",", "'ignore'", ")", "except", "(", "UnicodeError", ",", "LookupError", ")", ":", "return", "path", ".", "decode", "(", "'utf8'", ",", "'ignore'", ")" ]
Attempts to decode a bytestring path to a unicode object for the purpose of displaying it to the user. If the `path` argument is a list or a tuple, the elements are joined with `separator`.
[ "Attempts", "to", "decode", "a", "bytestring", "path", "to", "a", "unicode", "object", "for", "the", "purpose", "of", "displaying", "it", "to", "the", "user", ".", "If", "the", "path", "argument", "is", "a", "list", "or", "a", "tuple", "the", "elements", "are", "joined", "with", "separator", "." ]
python
train
40.941176
mitsei/dlkit
dlkit/services/assessment.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/assessment.py#L3010-L3016
def remove_item(self, *args, **kwargs): """Pass through to provider methods.""" try: self._get_provider_session('assessment_basic_authoring_session').remove_item(*args, **kwargs) except InvalidArgument: self._get_sub_package_provider_session( 'assessment_authoring', 'assessment_part_item_design_session').remove_item(*args, **kwargs)
[ "def", "remove_item", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "_get_provider_session", "(", "'assessment_basic_authoring_session'", ")", ".", "remove_item", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "InvalidArgument", ":", "self", ".", "_get_sub_package_provider_session", "(", "'assessment_authoring'", ",", "'assessment_part_item_design_session'", ")", ".", "remove_item", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Pass through to provider methods.
[ "Pass", "through", "to", "provider", "methods", "." ]
python
train
56
tdegeus/cppmat
cppmat/__init__.py
https://github.com/tdegeus/cppmat/blob/0d5031d08ce621a4d3e9c396779d99fd5203aa00/cppmat/__init__.py#L109-L157
def find_eigen(hint=None): r''' Try to find the Eigen library. If successful the include directory is returned. ''' # search with pkgconfig # --------------------- try: import pkgconfig if pkgconfig.installed('eigen3','>3.0.0'): return pkgconfig.parse('eigen3')['include_dirs'][0] except: pass # manual search # ------------- search_dirs = [] if hint is None else hint search_dirs += [ "/usr/local/include/eigen3", "/usr/local/homebrew/include/eigen3", "/opt/local/var/macports/software/eigen3", "/opt/local/include/eigen3", "/usr/include/eigen3", "/usr/include/local", "/usr/include", ] for d in search_dirs: path = os.path.join(d, "Eigen", "Dense") if os.path.exists(path): vf = os.path.join(d, "Eigen", "src", "Core", "util", "Macros.h") if not os.path.exists(vf): continue src = open(vf, "r").read() v1 = re.findall("#define EIGEN_WORLD_VERSION (.+)", src) v2 = re.findall("#define EIGEN_MAJOR_VERSION (.+)", src) v3 = re.findall("#define EIGEN_MINOR_VERSION (.+)", src) if not len(v1) or not len(v2) or not len(v3): continue v = "{0}.{1}.{2}".format(v1[0], v2[0], v3[0]) print("Found Eigen version {0} in: {1}".format(v, d)) return d return None
[ "def", "find_eigen", "(", "hint", "=", "None", ")", ":", "# search with pkgconfig", "# ---------------------", "try", ":", "import", "pkgconfig", "if", "pkgconfig", ".", "installed", "(", "'eigen3'", ",", "'>3.0.0'", ")", ":", "return", "pkgconfig", ".", "parse", "(", "'eigen3'", ")", "[", "'include_dirs'", "]", "[", "0", "]", "except", ":", "pass", "# manual search", "# -------------", "search_dirs", "=", "[", "]", "if", "hint", "is", "None", "else", "hint", "search_dirs", "+=", "[", "\"/usr/local/include/eigen3\"", ",", "\"/usr/local/homebrew/include/eigen3\"", ",", "\"/opt/local/var/macports/software/eigen3\"", ",", "\"/opt/local/include/eigen3\"", ",", "\"/usr/include/eigen3\"", ",", "\"/usr/include/local\"", ",", "\"/usr/include\"", ",", "]", "for", "d", "in", "search_dirs", ":", "path", "=", "os", ".", "path", ".", "join", "(", "d", ",", "\"Eigen\"", ",", "\"Dense\"", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "vf", "=", "os", ".", "path", ".", "join", "(", "d", ",", "\"Eigen\"", ",", "\"src\"", ",", "\"Core\"", ",", "\"util\"", ",", "\"Macros.h\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "vf", ")", ":", "continue", "src", "=", "open", "(", "vf", ",", "\"r\"", ")", ".", "read", "(", ")", "v1", "=", "re", ".", "findall", "(", "\"#define EIGEN_WORLD_VERSION (.+)\"", ",", "src", ")", "v2", "=", "re", ".", "findall", "(", "\"#define EIGEN_MAJOR_VERSION (.+)\"", ",", "src", ")", "v3", "=", "re", ".", "findall", "(", "\"#define EIGEN_MINOR_VERSION (.+)\"", ",", "src", ")", "if", "not", "len", "(", "v1", ")", "or", "not", "len", "(", "v2", ")", "or", "not", "len", "(", "v3", ")", ":", "continue", "v", "=", "\"{0}.{1}.{2}\"", ".", "format", "(", "v1", "[", "0", "]", ",", "v2", "[", "0", "]", ",", "v3", "[", "0", "]", ")", "print", "(", "\"Found Eigen version {0} in: {1}\"", ".", "format", "(", "v", ",", "d", ")", ")", "return", "d", "return", "None" ]
r''' Try to find the Eigen library. If successful the include directory is returned.
[ "r", "Try", "to", "find", "the", "Eigen", "library", ".", "If", "successful", "the", "include", "directory", "is", "returned", "." ]
python
train
25.857143
sleepyfran/itunespy
itunespy/__init__.py
https://github.com/sleepyfran/itunespy/blob/0e7e931b135b5e0daae49ba68e9167ff4ac73eb5/itunespy/__init__.py#L302-L349
def _url_lookup_builder(id=None, artist_amg_id=None, upc=None, country='US', media='music', entity=None, attribute=None, limit=50): """ Builds the URL to perform the lookup based on the provided data :param id: String. iTunes ID of the artist, album, track, ebook or software :param artist_amg_id: String. All Music Guide ID of the artist :param upc: String. UPCs/EANs :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: The built URL as a string """ built_url = base_lookup_url has_one_argument = False if id is not None: built_url += parameters[6] + str(id) has_one_argument = True if artist_amg_id is not None: if has_one_argument: built_url += ampersand + parameters[7] + artist_amg_id else: built_url += parameters[7] + str(artist_amg_id) has_one_argument = True if upc is not None: if has_one_argument: built_url += ampersand + parameters[8] + upc else: built_url += parameters[8] + str(upc) built_url += ampersand + parameters[1] + country built_url += ampersand + parameters[2] + media if entity is not None: built_url += ampersand + parameters[3] + entity if attribute is not None: built_url += ampersand + parameters[4] + attribute built_url += ampersand + parameters[5] + str(limit) return built_url
[ "def", "_url_lookup_builder", "(", "id", "=", "None", ",", "artist_amg_id", "=", "None", ",", "upc", "=", "None", ",", "country", "=", "'US'", ",", "media", "=", "'music'", ",", "entity", "=", "None", ",", "attribute", "=", "None", ",", "limit", "=", "50", ")", ":", "built_url", "=", "base_lookup_url", "has_one_argument", "=", "False", "if", "id", "is", "not", "None", ":", "built_url", "+=", "parameters", "[", "6", "]", "+", "str", "(", "id", ")", "has_one_argument", "=", "True", "if", "artist_amg_id", "is", "not", "None", ":", "if", "has_one_argument", ":", "built_url", "+=", "ampersand", "+", "parameters", "[", "7", "]", "+", "artist_amg_id", "else", ":", "built_url", "+=", "parameters", "[", "7", "]", "+", "str", "(", "artist_amg_id", ")", "has_one_argument", "=", "True", "if", "upc", "is", "not", "None", ":", "if", "has_one_argument", ":", "built_url", "+=", "ampersand", "+", "parameters", "[", "8", "]", "+", "upc", "else", ":", "built_url", "+=", "parameters", "[", "8", "]", "+", "str", "(", "upc", ")", "built_url", "+=", "ampersand", "+", "parameters", "[", "1", "]", "+", "country", "built_url", "+=", "ampersand", "+", "parameters", "[", "2", "]", "+", "media", "if", "entity", "is", "not", "None", ":", "built_url", "+=", "ampersand", "+", "parameters", "[", "3", "]", "+", "entity", "if", "attribute", "is", "not", "None", ":", "built_url", "+=", "ampersand", "+", "parameters", "[", "4", "]", "+", "attribute", "built_url", "+=", "ampersand", "+", "parameters", "[", "5", "]", "+", "str", "(", "limit", ")", "return", "built_url" ]
Builds the URL to perform the lookup based on the provided data :param id: String. iTunes ID of the artist, album, track, ebook or software :param artist_amg_id: String. All Music Guide ID of the artist :param upc: String. UPCs/EANs :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: The built URL as a string
[ "Builds", "the", "URL", "to", "perform", "the", "lookup", "based", "on", "the", "provided", "data", ":", "param", "id", ":", "String", ".", "iTunes", "ID", "of", "the", "artist", "album", "track", "ebook", "or", "software", ":", "param", "artist_amg_id", ":", "String", ".", "All", "Music", "Guide", "ID", "of", "the", "artist", ":", "param", "upc", ":", "String", ".", "UPCs", "/", "EANs", ":", "param", "country", ":", "String", ".", "The", "two", "-", "letter", "country", "code", "for", "the", "store", "you", "want", "to", "search", ".", "For", "a", "full", "list", "of", "the", "codes", ":", "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "%20ISO_3166", "-", "1_alpha", "-", "2", ":", "param", "media", ":", "String", ".", "The", "media", "type", "you", "want", "to", "search", "for", ".", "Example", ":", "music", ":", "param", "entity", ":", "String", ".", "The", "type", "of", "results", "you", "want", "returned", "relative", "to", "the", "specified", "media", "type", ".", "Example", ":", "musicArtist", ".", "Full", "list", ":", "musicArtist", "musicTrack", "album", "musicVideo", "mix", "song", ":", "param", "attribute", ":", "String", ".", "The", "attribute", "you", "want", "to", "search", "for", "in", "the", "stores", "relative", "to", "the", "specified", "media", "type", ".", ":", "param", "limit", ":", "Integer", ".", "The", "number", "of", "search", "results", "you", "want", "the", "iTunes", "Store", "to", "return", ".", ":", "return", ":", "The", "built", "URL", "as", "a", "string" ]
python
train
42.541667
inasafe/inasafe
safe/gui/tools/help/options_help.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/help/options_help.py#L19-L32
def options_help(): """Help message for options dialog. .. versionadded:: 3.2.1 :returns: A message object containing helpful information. :rtype: messaging.message.Message """ message = m.Message() message.add(m.Brand()) message.add(heading()) message.add(content()) return message
[ "def", "options_help", "(", ")", ":", "message", "=", "m", ".", "Message", "(", ")", "message", ".", "add", "(", "m", ".", "Brand", "(", ")", ")", "message", ".", "add", "(", "heading", "(", ")", ")", "message", ".", "add", "(", "content", "(", ")", ")", "return", "message" ]
Help message for options dialog. .. versionadded:: 3.2.1 :returns: A message object containing helpful information. :rtype: messaging.message.Message
[ "Help", "message", "for", "options", "dialog", "." ]
python
train
22.285714
apache/airflow
airflow/models/dagrun.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagrun.py#L232-L239
def get_previous_scheduled_dagrun(self, session=None): """The previous, SCHEDULED DagRun, if there is one""" dag = self.get_dag() return session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date == dag.previous_schedule(self.execution_date) ).first()
[ "def", "get_previous_scheduled_dagrun", "(", "self", ",", "session", "=", "None", ")", ":", "dag", "=", "self", ".", "get_dag", "(", ")", "return", "session", ".", "query", "(", "DagRun", ")", ".", "filter", "(", "DagRun", ".", "dag_id", "==", "self", ".", "dag_id", ",", "DagRun", ".", "execution_date", "==", "dag", ".", "previous_schedule", "(", "self", ".", "execution_date", ")", ")", ".", "first", "(", ")" ]
The previous, SCHEDULED DagRun, if there is one
[ "The", "previous", "SCHEDULED", "DagRun", "if", "there", "is", "one" ]
python
test
40.5
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/debugger.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/debugger.py#L153-L162
def decorate_fn_with_doc(new_fn, old_fn, additional_text=""): """Make new_fn have old_fn's doc string. This is particularly useful for the do_... commands that hook into the help system. Adapted from from a comp.lang.python posting by Duncan Booth.""" def wrapper(*args, **kw): return new_fn(*args, **kw) if old_fn.__doc__: wrapper.__doc__ = old_fn.__doc__ + additional_text return wrapper
[ "def", "decorate_fn_with_doc", "(", "new_fn", ",", "old_fn", ",", "additional_text", "=", "\"\"", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "return", "new_fn", "(", "*", "args", ",", "*", "*", "kw", ")", "if", "old_fn", ".", "__doc__", ":", "wrapper", ".", "__doc__", "=", "old_fn", ".", "__doc__", "+", "additional_text", "return", "wrapper" ]
Make new_fn have old_fn's doc string. This is particularly useful for the do_... commands that hook into the help system. Adapted from from a comp.lang.python posting by Duncan Booth.
[ "Make", "new_fn", "have", "old_fn", "s", "doc", "string", ".", "This", "is", "particularly", "useful", "for", "the", "do_", "...", "commands", "that", "hook", "into", "the", "help", "system", ".", "Adapted", "from", "from", "a", "comp", ".", "lang", ".", "python", "posting", "by", "Duncan", "Booth", "." ]
python
test
42.4
raiden-network/raiden-contracts
raiden_contracts/deploy/contract_deployer.py
https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/contract_deployer.py#L243-L287
def _register_token_network_without_limits( self, token_registry_abi: Dict, token_registry_address: str, token_address: str, channel_participant_deposit_limit: Optional[int], token_network_deposit_limit: Optional[int], ): """Register token with a TokenNetworkRegistry contract with a contracts-version that doesn't require deposit limits in the TokenNetwork constructor. """ if channel_participant_deposit_limit: raise ValueError( 'contracts_version below 0.9.0 does not expect ' 'channel_participant_deposit_limit', ) if token_network_deposit_limit: raise ValueError( 'contracts_version below 0.9.0 does not expect token_network_deposit_limit', ) token_network_registry = self.web3.eth.contract( abi=token_registry_abi, address=token_registry_address, ) version_from_onchain = token_network_registry.functions.contract_version().call() if version_from_onchain != self.contract_manager.version_string: raise RuntimeError( f'got {version_from_onchain} from the chain, expected ' f'{self.contract_manager.version_string} in the deployment data', ) command = token_network_registry.functions.createERC20TokenNetwork( token_address, ) self.transact(command) token_network_address = token_network_registry.functions.token_to_token_networks( token_address, ).call() token_network_address = to_checksum_address(token_network_address) LOG.debug(f'TokenNetwork address: {token_network_address}') return token_network_address
[ "def", "_register_token_network_without_limits", "(", "self", ",", "token_registry_abi", ":", "Dict", ",", "token_registry_address", ":", "str", ",", "token_address", ":", "str", ",", "channel_participant_deposit_limit", ":", "Optional", "[", "int", "]", ",", "token_network_deposit_limit", ":", "Optional", "[", "int", "]", ",", ")", ":", "if", "channel_participant_deposit_limit", ":", "raise", "ValueError", "(", "'contracts_version below 0.9.0 does not expect '", "'channel_participant_deposit_limit'", ",", ")", "if", "token_network_deposit_limit", ":", "raise", "ValueError", "(", "'contracts_version below 0.9.0 does not expect token_network_deposit_limit'", ",", ")", "token_network_registry", "=", "self", ".", "web3", ".", "eth", ".", "contract", "(", "abi", "=", "token_registry_abi", ",", "address", "=", "token_registry_address", ",", ")", "version_from_onchain", "=", "token_network_registry", ".", "functions", ".", "contract_version", "(", ")", ".", "call", "(", ")", "if", "version_from_onchain", "!=", "self", ".", "contract_manager", ".", "version_string", ":", "raise", "RuntimeError", "(", "f'got {version_from_onchain} from the chain, expected '", "f'{self.contract_manager.version_string} in the deployment data'", ",", ")", "command", "=", "token_network_registry", ".", "functions", ".", "createERC20TokenNetwork", "(", "token_address", ",", ")", "self", ".", "transact", "(", "command", ")", "token_network_address", "=", "token_network_registry", ".", "functions", ".", "token_to_token_networks", "(", "token_address", ",", ")", ".", "call", "(", ")", "token_network_address", "=", "to_checksum_address", "(", "token_network_address", ")", "LOG", ".", "debug", "(", "f'TokenNetwork address: {token_network_address}'", ")", "return", "token_network_address" ]
Register token with a TokenNetworkRegistry contract with a contracts-version that doesn't require deposit limits in the TokenNetwork constructor.
[ "Register", "token", "with", "a", "TokenNetworkRegistry", "contract" ]
python
train
39.888889
sprockets/sprockets.clients.statsd
sprockets/clients/statsd/__init__.py
https://github.com/sprockets/sprockets.clients.statsd/blob/34daf6972ebdc5ed1e8fde2ff85b3443b9c04d2c/sprockets/clients/statsd/__init__.py#L166-L181
def _send(key, value, metric_type): """Send the specified value to the statsd daemon via UDP without a direct socket connection. :param str value: The properly formatted statsd counter value """ if STATSD_PREFIX: key = '.'.join([STATSD_PREFIX, key]) try: STATSD_SOCKET.sendto('{0}:{1}|{2}'.format(key, value, metric_type).encode(), STATSD_ADDR) except socket.error: LOGGER.exception(SOCKET_ERROR)
[ "def", "_send", "(", "key", ",", "value", ",", "metric_type", ")", ":", "if", "STATSD_PREFIX", ":", "key", "=", "'.'", ".", "join", "(", "[", "STATSD_PREFIX", ",", "key", "]", ")", "try", ":", "STATSD_SOCKET", ".", "sendto", "(", "'{0}:{1}|{2}'", ".", "format", "(", "key", ",", "value", ",", "metric_type", ")", ".", "encode", "(", ")", ",", "STATSD_ADDR", ")", "except", "socket", ".", "error", ":", "LOGGER", ".", "exception", "(", "SOCKET_ERROR", ")" ]
Send the specified value to the statsd daemon via UDP without a direct socket connection. :param str value: The properly formatted statsd counter value
[ "Send", "the", "specified", "value", "to", "the", "statsd", "daemon", "via", "UDP", "without", "a", "direct", "socket", "connection", "." ]
python
train
35.25
mikedh/trimesh
trimesh/sample.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/sample.py#L13-L69
def sample_surface(mesh, count): """ Sample the surface of a mesh, returning the specified number of points For individual triangle sampling uses this method: http://mathworld.wolfram.com/TrianglePointPicking.html Parameters --------- mesh: Trimesh object count: number of points to return Returns --------- samples: (count,3) points in space on the surface of mesh face_index: (count,) indices of faces for each sampled point """ # len(mesh.faces) float, array of the areas # of each face of the mesh area = mesh.area_faces # total area (float) area_sum = np.sum(area) # cumulative area (len(mesh.faces)) area_cum = np.cumsum(area) face_pick = np.random.random(count) * area_sum face_index = np.searchsorted(area_cum, face_pick) # pull triangles into the form of an origin + 2 vectors tri_origins = mesh.triangles[:, 0] tri_vectors = mesh.triangles[:, 1:].copy() tri_vectors -= np.tile(tri_origins, (1, 2)).reshape((-1, 2, 3)) # pull the vectors for the faces we are going to sample from tri_origins = tri_origins[face_index] tri_vectors = tri_vectors[face_index] # randomly generate two 0-1 scalar components to multiply edge vectors by random_lengths = np.random.random((len(tri_vectors), 2, 1)) # points will be distributed on a quadrilateral if we use 2 0-1 samples # if the two scalar components sum less than 1.0 the point will be # inside the triangle, so we find vectors longer than 1.0 and # transform them to be inside the triangle random_test = random_lengths.sum(axis=1).reshape(-1) > 1.0 random_lengths[random_test] -= 1.0 random_lengths = np.abs(random_lengths) # multiply triangle edge vectors by the random lengths and sum sample_vector = (tri_vectors * random_lengths).sum(axis=1) # finally, offset by the origin to generate # (n,3) points in space on the triangle samples = sample_vector + tri_origins return samples, face_index
[ "def", "sample_surface", "(", "mesh", ",", "count", ")", ":", "# len(mesh.faces) float, array of the areas", "# of each face of the mesh", "area", "=", "mesh", ".", "area_faces", "# total area (float)", "area_sum", "=", "np", ".", "sum", "(", "area", ")", "# cumulative area (len(mesh.faces))", "area_cum", "=", "np", ".", "cumsum", "(", "area", ")", "face_pick", "=", "np", ".", "random", ".", "random", "(", "count", ")", "*", "area_sum", "face_index", "=", "np", ".", "searchsorted", "(", "area_cum", ",", "face_pick", ")", "# pull triangles into the form of an origin + 2 vectors", "tri_origins", "=", "mesh", ".", "triangles", "[", ":", ",", "0", "]", "tri_vectors", "=", "mesh", ".", "triangles", "[", ":", ",", "1", ":", "]", ".", "copy", "(", ")", "tri_vectors", "-=", "np", ".", "tile", "(", "tri_origins", ",", "(", "1", ",", "2", ")", ")", ".", "reshape", "(", "(", "-", "1", ",", "2", ",", "3", ")", ")", "# pull the vectors for the faces we are going to sample from", "tri_origins", "=", "tri_origins", "[", "face_index", "]", "tri_vectors", "=", "tri_vectors", "[", "face_index", "]", "# randomly generate two 0-1 scalar components to multiply edge vectors by", "random_lengths", "=", "np", ".", "random", ".", "random", "(", "(", "len", "(", "tri_vectors", ")", ",", "2", ",", "1", ")", ")", "# points will be distributed on a quadrilateral if we use 2 0-1 samples", "# if the two scalar components sum less than 1.0 the point will be", "# inside the triangle, so we find vectors longer than 1.0 and", "# transform them to be inside the triangle", "random_test", "=", "random_lengths", ".", "sum", "(", "axis", "=", "1", ")", ".", "reshape", "(", "-", "1", ")", ">", "1.0", "random_lengths", "[", "random_test", "]", "-=", "1.0", "random_lengths", "=", "np", ".", "abs", "(", "random_lengths", ")", "# multiply triangle edge vectors by the random lengths and sum", "sample_vector", "=", "(", "tri_vectors", "*", "random_lengths", ")", ".", "sum", "(", "axis", "=", "1", ")", "# finally, offset by the origin to generate", "# (n,3) points in space on the triangle", "samples", "=", "sample_vector", "+", "tri_origins", "return", "samples", ",", "face_index" ]
Sample the surface of a mesh, returning the specified number of points For individual triangle sampling uses this method: http://mathworld.wolfram.com/TrianglePointPicking.html Parameters --------- mesh: Trimesh object count: number of points to return Returns --------- samples: (count,3) points in space on the surface of mesh face_index: (count,) indices of faces for each sampled point
[ "Sample", "the", "surface", "of", "a", "mesh", "returning", "the", "specified", "number", "of", "points" ]
python
train
34.77193
mitsei/dlkit
dlkit/records/assessment/basic/simple_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/basic/simple_records.py#L636-L640
def _init_metadata(self): """stub""" TextAnswerFormRecord._init_metadata(self) FilesAnswerFormRecord._init_metadata(self) super(AnswerTextAndFilesMixin, self)._init_metadata()
[ "def", "_init_metadata", "(", "self", ")", ":", "TextAnswerFormRecord", ".", "_init_metadata", "(", "self", ")", "FilesAnswerFormRecord", ".", "_init_metadata", "(", "self", ")", "super", "(", "AnswerTextAndFilesMixin", ",", "self", ")", ".", "_init_metadata", "(", ")" ]
stub
[ "stub" ]
python
train
40.6
Qiskit/qiskit-terra
qiskit/extensions/standard/rzz.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/extensions/standard/rzz.py#L49-L51
def rzz(self, theta, qubit1, qubit2): """Apply RZZ to circuit.""" return self.append(RZZGate(theta), [qubit1, qubit2], [])
[ "def", "rzz", "(", "self", ",", "theta", ",", "qubit1", ",", "qubit2", ")", ":", "return", "self", ".", "append", "(", "RZZGate", "(", "theta", ")", ",", "[", "qubit1", ",", "qubit2", "]", ",", "[", "]", ")" ]
Apply RZZ to circuit.
[ "Apply", "RZZ", "to", "circuit", "." ]
python
test
42.666667
hsolbrig/pyjsg
pyjsg/parser_impl/jsg_arrayexpr_parser.py
https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_arrayexpr_parser.py#L61-L68
def visitArrayExpr(self, ctx: jsgParser.ArrayExprContext): """ arrayExpr: OBRACKET valueType (BAR valueType)* ebnfSuffix? CBRACKET; """ from pyjsg.parser_impl.jsg_ebnf_parser import JSGEbnf from pyjsg.parser_impl.jsg_valuetype_parser import JSGValueType self._types = [JSGValueType(self._context, vt) for vt in ctx.valueType()] if ctx.ebnfSuffix(): self._ebnf = JSGEbnf(self._context, ctx.ebnfSuffix())
[ "def", "visitArrayExpr", "(", "self", ",", "ctx", ":", "jsgParser", ".", "ArrayExprContext", ")", ":", "from", "pyjsg", ".", "parser_impl", ".", "jsg_ebnf_parser", "import", "JSGEbnf", "from", "pyjsg", ".", "parser_impl", ".", "jsg_valuetype_parser", "import", "JSGValueType", "self", ".", "_types", "=", "[", "JSGValueType", "(", "self", ".", "_context", ",", "vt", ")", "for", "vt", "in", "ctx", ".", "valueType", "(", ")", "]", "if", "ctx", ".", "ebnfSuffix", "(", ")", ":", "self", ".", "_ebnf", "=", "JSGEbnf", "(", "self", ".", "_context", ",", "ctx", ".", "ebnfSuffix", "(", ")", ")" ]
arrayExpr: OBRACKET valueType (BAR valueType)* ebnfSuffix? CBRACKET;
[ "arrayExpr", ":", "OBRACKET", "valueType", "(", "BAR", "valueType", ")", "*", "ebnfSuffix?", "CBRACKET", ";" ]
python
train
56
a1ezzz/wasp-general
wasp_general/network/primitives.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/primitives.py#L269-L287
def first_address(self, skip_network_address=True): """ Return the first IP address of this network :param skip_network_address: this flag specifies whether this function returns address of the network \ or returns address that follows address of the network (address, that a host could have) :return: WIPV4Address """ bin_address = self.__address.bin_address() bin_address_length = len(bin_address) if self.__mask > (bin_address_length - 2): skip_network_address = False for i in range(bin_address_length - self.__mask): bin_address[self.__mask + i] = 0 if skip_network_address: bin_address[bin_address_length - 1] = 1 return WIPV4Address(bin_address)
[ "def", "first_address", "(", "self", ",", "skip_network_address", "=", "True", ")", ":", "bin_address", "=", "self", ".", "__address", ".", "bin_address", "(", ")", "bin_address_length", "=", "len", "(", "bin_address", ")", "if", "self", ".", "__mask", ">", "(", "bin_address_length", "-", "2", ")", ":", "skip_network_address", "=", "False", "for", "i", "in", "range", "(", "bin_address_length", "-", "self", ".", "__mask", ")", ":", "bin_address", "[", "self", ".", "__mask", "+", "i", "]", "=", "0", "if", "skip_network_address", ":", "bin_address", "[", "bin_address_length", "-", "1", "]", "=", "1", "return", "WIPV4Address", "(", "bin_address", ")" ]
Return the first IP address of this network :param skip_network_address: this flag specifies whether this function returns address of the network \ or returns address that follows address of the network (address, that a host could have) :return: WIPV4Address
[ "Return", "the", "first", "IP", "address", "of", "this", "network" ]
python
train
35.210526
carpedm20/fbchat
fbchat/_client.py
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L3333-L3363
def onNicknameChange( self, mid=None, author_id=None, changed_for=None, new_nickname=None, thread_id=None, thread_type=ThreadType.USER, ts=None, metadata=None, msg=None, ): """ Called when the client is listening, and somebody changes the nickname of a person :param mid: The action ID :param author_id: The ID of the person who changed the nickname :param changed_for: The ID of the person whom got their nickname changed :param new_nickname: The new nickname :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param ts: A timestamp of the action :param metadata: Extra metadata about the action :param msg: A full set of the data recieved :type thread_type: models.ThreadType """ log.info( "Nickname change from {} in {} ({}) for {}: {}".format( author_id, thread_id, thread_type.name, changed_for, new_nickname ) )
[ "def", "onNicknameChange", "(", "self", ",", "mid", "=", "None", ",", "author_id", "=", "None", ",", "changed_for", "=", "None", ",", "new_nickname", "=", "None", ",", "thread_id", "=", "None", ",", "thread_type", "=", "ThreadType", ".", "USER", ",", "ts", "=", "None", ",", "metadata", "=", "None", ",", "msg", "=", "None", ",", ")", ":", "log", ".", "info", "(", "\"Nickname change from {} in {} ({}) for {}: {}\"", ".", "format", "(", "author_id", ",", "thread_id", ",", "thread_type", ".", "name", ",", "changed_for", ",", "new_nickname", ")", ")" ]
Called when the client is listening, and somebody changes the nickname of a person :param mid: The action ID :param author_id: The ID of the person who changed the nickname :param changed_for: The ID of the person whom got their nickname changed :param new_nickname: The new nickname :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param ts: A timestamp of the action :param metadata: Extra metadata about the action :param msg: A full set of the data recieved :type thread_type: models.ThreadType
[ "Called", "when", "the", "client", "is", "listening", "and", "somebody", "changes", "the", "nickname", "of", "a", "person" ]
python
train
37.193548
ChristianKuehnel/btlewrap
btlewrap/pygatt.py
https://github.com/ChristianKuehnel/btlewrap/blob/1b7aec934529dcf03f5ecdccd0b09c25c389974f/btlewrap/pygatt.py#L9-L27
def wrap_exception(func: Callable) -> Callable: """Decorator to wrap pygatt exceptions into BluetoothBackendException.""" try: # only do the wrapping if pygatt is installed. # otherwise it's pointless anyway from pygatt.backends.bgapi.exceptions import BGAPIError from pygatt.exceptions import NotConnectedError except ImportError: return func def _func_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except BGAPIError as exception: raise BluetoothBackendException() from exception except NotConnectedError as exception: raise BluetoothBackendException() from exception return _func_wrapper
[ "def", "wrap_exception", "(", "func", ":", "Callable", ")", "->", "Callable", ":", "try", ":", "# only do the wrapping if pygatt is installed.", "# otherwise it's pointless anyway", "from", "pygatt", ".", "backends", ".", "bgapi", ".", "exceptions", "import", "BGAPIError", "from", "pygatt", ".", "exceptions", "import", "NotConnectedError", "except", "ImportError", ":", "return", "func", "def", "_func_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "BGAPIError", "as", "exception", ":", "raise", "BluetoothBackendException", "(", ")", "from", "exception", "except", "NotConnectedError", "as", "exception", ":", "raise", "BluetoothBackendException", "(", ")", "from", "exception", "return", "_func_wrapper" ]
Decorator to wrap pygatt exceptions into BluetoothBackendException.
[ "Decorator", "to", "wrap", "pygatt", "exceptions", "into", "BluetoothBackendException", "." ]
python
train
37.210526
MSchnei/pyprf_feature
pyprf_feature/analysis/utils_hrf.py
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/utils_hrf.py#L203-L263
def create_boxcar(aryCnd, aryOns, aryDrt, varTr, varNumVol, aryExclCnd=None, varTmpOvsmpl=1000.): """ Creation of condition time courses in temporally upsampled space. Parameters ---------- aryCnd : np.array 1D array with condition identifiers (every condition has its own int) aryOns : np.array, same len as aryCnd 1D array with condition onset times in seconds. aryDrt : np.array, same len as aryCnd 1D array with condition durations of different conditions in seconds. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varNumVol : float, positive Number of volumes of the (fMRI) data. aryExclCnd : array 1D array containing condition identifiers for conditions to be excluded varTmpOvsmpl : float, positive Factor by which the time courses should be temporally upsampled. Returns ------- aryBxCrOut : np.array, float16 Condition time courses in temporally upsampled space. References: ----- [1] https://github.com/fabianp/hrf_estimation """ if aryExclCnd is not None: for cond in aryExclCnd: aryOns = aryOns[aryCnd != cond] aryDrt = aryDrt[aryCnd != cond] aryCnd = aryCnd[aryCnd != cond] resolution = varTr / float(varTmpOvsmpl) aryCnd = np.asarray(aryCnd) aryOns = np.asarray(aryOns, dtype=np.float) unique_conditions = np.sort(np.unique(aryCnd)) boxcar = [] for c in unique_conditions: tmp = np.zeros(int(varNumVol * varTr/resolution)) onset_c = aryOns[aryCnd == c] duration_c = aryDrt[aryCnd == c] onset_idx = np.round(onset_c / resolution).astype(np.int) duration_idx = np.round(duration_c / resolution).astype(np.int) aux = np.arange(int(varNumVol * varTr/resolution)) for start, dur in zip(onset_idx, duration_idx): lgc = np.logical_and(aux >= start, aux < start + dur) tmp = tmp + lgc assert np.all(np.less(tmp, 2)) boxcar.append(tmp) aryBxCrOut = np.array(boxcar).T if aryBxCrOut.shape[1] == 1: aryBxCrOut = np.squeeze(aryBxCrOut) return aryBxCrOut.astype('float16')
[ "def", "create_boxcar", "(", "aryCnd", ",", "aryOns", ",", "aryDrt", ",", "varTr", ",", "varNumVol", ",", "aryExclCnd", "=", "None", ",", "varTmpOvsmpl", "=", "1000.", ")", ":", "if", "aryExclCnd", "is", "not", "None", ":", "for", "cond", "in", "aryExclCnd", ":", "aryOns", "=", "aryOns", "[", "aryCnd", "!=", "cond", "]", "aryDrt", "=", "aryDrt", "[", "aryCnd", "!=", "cond", "]", "aryCnd", "=", "aryCnd", "[", "aryCnd", "!=", "cond", "]", "resolution", "=", "varTr", "/", "float", "(", "varTmpOvsmpl", ")", "aryCnd", "=", "np", ".", "asarray", "(", "aryCnd", ")", "aryOns", "=", "np", ".", "asarray", "(", "aryOns", ",", "dtype", "=", "np", ".", "float", ")", "unique_conditions", "=", "np", ".", "sort", "(", "np", ".", "unique", "(", "aryCnd", ")", ")", "boxcar", "=", "[", "]", "for", "c", "in", "unique_conditions", ":", "tmp", "=", "np", ".", "zeros", "(", "int", "(", "varNumVol", "*", "varTr", "/", "resolution", ")", ")", "onset_c", "=", "aryOns", "[", "aryCnd", "==", "c", "]", "duration_c", "=", "aryDrt", "[", "aryCnd", "==", "c", "]", "onset_idx", "=", "np", ".", "round", "(", "onset_c", "/", "resolution", ")", ".", "astype", "(", "np", ".", "int", ")", "duration_idx", "=", "np", ".", "round", "(", "duration_c", "/", "resolution", ")", ".", "astype", "(", "np", ".", "int", ")", "aux", "=", "np", ".", "arange", "(", "int", "(", "varNumVol", "*", "varTr", "/", "resolution", ")", ")", "for", "start", ",", "dur", "in", "zip", "(", "onset_idx", ",", "duration_idx", ")", ":", "lgc", "=", "np", ".", "logical_and", "(", "aux", ">=", "start", ",", "aux", "<", "start", "+", "dur", ")", "tmp", "=", "tmp", "+", "lgc", "assert", "np", ".", "all", "(", "np", ".", "less", "(", "tmp", ",", "2", ")", ")", "boxcar", ".", "append", "(", "tmp", ")", "aryBxCrOut", "=", "np", ".", "array", "(", "boxcar", ")", ".", "T", "if", "aryBxCrOut", ".", "shape", "[", "1", "]", "==", "1", ":", "aryBxCrOut", "=", "np", ".", "squeeze", "(", "aryBxCrOut", ")", "return", "aryBxCrOut", ".", "astype", "(", "'float16'", ")" ]
Creation of condition time courses in temporally upsampled space. Parameters ---------- aryCnd : np.array 1D array with condition identifiers (every condition has its own int) aryOns : np.array, same len as aryCnd 1D array with condition onset times in seconds. aryDrt : np.array, same len as aryCnd 1D array with condition durations of different conditions in seconds. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varNumVol : float, positive Number of volumes of the (fMRI) data. aryExclCnd : array 1D array containing condition identifiers for conditions to be excluded varTmpOvsmpl : float, positive Factor by which the time courses should be temporally upsampled. Returns ------- aryBxCrOut : np.array, float16 Condition time courses in temporally upsampled space. References: ----- [1] https://github.com/fabianp/hrf_estimation
[ "Creation", "of", "condition", "time", "courses", "in", "temporally", "upsampled", "space", "." ]
python
train
35.836066
pkkid/python-plexapi
plexapi/myplex.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/myplex.py#L600-L611
def _loadData(self, data): """ Load attribute values from Plex XML response. """ self._data = data self.id = utils.cast(int, data.attrib.get('id')) self.serverId = utils.cast(int, data.attrib.get('serverId')) self.machineIdentifier = data.attrib.get('machineIdentifier') self.name = data.attrib.get('name') self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt')) self.numLibraries = utils.cast(int, data.attrib.get('numLibraries')) self.allLibraries = utils.cast(bool, data.attrib.get('allLibraries')) self.owned = utils.cast(bool, data.attrib.get('owned')) self.pending = utils.cast(bool, data.attrib.get('pending'))
[ "def", "_loadData", "(", "self", ",", "data", ")", ":", "self", ".", "_data", "=", "data", "self", ".", "id", "=", "utils", ".", "cast", "(", "int", ",", "data", ".", "attrib", ".", "get", "(", "'id'", ")", ")", "self", ".", "serverId", "=", "utils", ".", "cast", "(", "int", ",", "data", ".", "attrib", ".", "get", "(", "'serverId'", ")", ")", "self", ".", "machineIdentifier", "=", "data", ".", "attrib", ".", "get", "(", "'machineIdentifier'", ")", "self", ".", "name", "=", "data", ".", "attrib", ".", "get", "(", "'name'", ")", "self", ".", "lastSeenAt", "=", "utils", ".", "toDatetime", "(", "data", ".", "attrib", ".", "get", "(", "'lastSeenAt'", ")", ")", "self", ".", "numLibraries", "=", "utils", ".", "cast", "(", "int", ",", "data", ".", "attrib", ".", "get", "(", "'numLibraries'", ")", ")", "self", ".", "allLibraries", "=", "utils", ".", "cast", "(", "bool", ",", "data", ".", "attrib", ".", "get", "(", "'allLibraries'", ")", ")", "self", ".", "owned", "=", "utils", ".", "cast", "(", "bool", ",", "data", ".", "attrib", ".", "get", "(", "'owned'", ")", ")", "self", ".", "pending", "=", "utils", ".", "cast", "(", "bool", ",", "data", ".", "attrib", ".", "get", "(", "'pending'", ")", ")" ]
Load attribute values from Plex XML response.
[ "Load", "attribute", "values", "from", "Plex", "XML", "response", "." ]
python
train
58.666667
UpCloudLtd/upcloud-python-api
upcloud_api/server.py
https://github.com/UpCloudLtd/upcloud-python-api/blob/954b0ad7c4b932b2be31a95d88975f6b0eeac8ed/upcloud_api/server.py#L457-L473
def ensure_started(self): """ Start a server and waits (blocking wait) until it is fully started. """ # server is either starting or stopping (or error) if self.state in ['maintenance', 'error']: self._wait_for_state_change(['stopped', 'started']) if self.state == 'stopped': self.start() self._wait_for_state_change(['started']) if self.state == 'started': return True else: # something went wrong, fail explicitly raise Exception('unknown server state: ' + self.state)
[ "def", "ensure_started", "(", "self", ")", ":", "# server is either starting or stopping (or error)", "if", "self", ".", "state", "in", "[", "'maintenance'", ",", "'error'", "]", ":", "self", ".", "_wait_for_state_change", "(", "[", "'stopped'", ",", "'started'", "]", ")", "if", "self", ".", "state", "==", "'stopped'", ":", "self", ".", "start", "(", ")", "self", ".", "_wait_for_state_change", "(", "[", "'started'", "]", ")", "if", "self", ".", "state", "==", "'started'", ":", "return", "True", "else", ":", "# something went wrong, fail explicitly", "raise", "Exception", "(", "'unknown server state: '", "+", "self", ".", "state", ")" ]
Start a server and waits (blocking wait) until it is fully started.
[ "Start", "a", "server", "and", "waits", "(", "blocking", "wait", ")", "until", "it", "is", "fully", "started", "." ]
python
train
34.823529
timothydmorton/VESPA
vespa/statutils.py
https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/statutils.py#L8-L19
def kdeconf(kde,conf=0.683,xmin=None,xmax=None,npts=500, shortest=True,conftol=0.001,return_max=False): """ Returns desired confidence interval for provided KDE object """ if xmin is None: xmin = kde.dataset.min() if xmax is None: xmax = kde.dataset.max() x = np.linspace(xmin,xmax,npts) return conf_interval(x,kde(x),shortest=shortest,conf=conf, conftol=conftol,return_max=return_max)
[ "def", "kdeconf", "(", "kde", ",", "conf", "=", "0.683", ",", "xmin", "=", "None", ",", "xmax", "=", "None", ",", "npts", "=", "500", ",", "shortest", "=", "True", ",", "conftol", "=", "0.001", ",", "return_max", "=", "False", ")", ":", "if", "xmin", "is", "None", ":", "xmin", "=", "kde", ".", "dataset", ".", "min", "(", ")", "if", "xmax", "is", "None", ":", "xmax", "=", "kde", ".", "dataset", ".", "max", "(", ")", "x", "=", "np", ".", "linspace", "(", "xmin", ",", "xmax", ",", "npts", ")", "return", "conf_interval", "(", "x", ",", "kde", "(", "x", ")", ",", "shortest", "=", "shortest", ",", "conf", "=", "conf", ",", "conftol", "=", "conftol", ",", "return_max", "=", "return_max", ")" ]
Returns desired confidence interval for provided KDE object
[ "Returns", "desired", "confidence", "interval", "for", "provided", "KDE", "object" ]
python
train
37.916667
etalab/cada
cada/csv.py
https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/csv.py#L60-L74
def from_row(row): '''Create an advice from a CSV row''' subject = (row[5][0].upper() + row[5][1:]) if row[5] else row[5] return Advice.objects.create( id=row[0], administration=cleanup(row[1]), type=row[2], session=datetime.strptime(row[4], '%d/%m/%Y'), subject=cleanup(subject), topics=[t.title() for t in cleanup(row[6]).split(', ')], tags=[tag.strip() for tag in row[7].split(',') if tag.strip()], meanings=cleanup(row[8]).replace(' / ', '/').split(', '), part=_part(row[9]), content=cleanup(row[10]), )
[ "def", "from_row", "(", "row", ")", ":", "subject", "=", "(", "row", "[", "5", "]", "[", "0", "]", ".", "upper", "(", ")", "+", "row", "[", "5", "]", "[", "1", ":", "]", ")", "if", "row", "[", "5", "]", "else", "row", "[", "5", "]", "return", "Advice", ".", "objects", ".", "create", "(", "id", "=", "row", "[", "0", "]", ",", "administration", "=", "cleanup", "(", "row", "[", "1", "]", ")", ",", "type", "=", "row", "[", "2", "]", ",", "session", "=", "datetime", ".", "strptime", "(", "row", "[", "4", "]", ",", "'%d/%m/%Y'", ")", ",", "subject", "=", "cleanup", "(", "subject", ")", ",", "topics", "=", "[", "t", ".", "title", "(", ")", "for", "t", "in", "cleanup", "(", "row", "[", "6", "]", ")", ".", "split", "(", "', '", ")", "]", ",", "tags", "=", "[", "tag", ".", "strip", "(", ")", "for", "tag", "in", "row", "[", "7", "]", ".", "split", "(", "','", ")", "if", "tag", ".", "strip", "(", ")", "]", ",", "meanings", "=", "cleanup", "(", "row", "[", "8", "]", ")", ".", "replace", "(", "' / '", ",", "'/'", ")", ".", "split", "(", "', '", ")", ",", "part", "=", "_part", "(", "row", "[", "9", "]", ")", ",", "content", "=", "cleanup", "(", "row", "[", "10", "]", ")", ",", ")" ]
Create an advice from a CSV row
[ "Create", "an", "advice", "from", "a", "CSV", "row" ]
python
train
39.266667
earlzo/hfut
hfut/shortcut.py
https://github.com/earlzo/hfut/blob/09270a9647fba79f26fd1a8a3c53c0678b5257a1/hfut/shortcut.py#L47-L57
def get_class_students(self, xqdm, kcdm, jxbh): """ 教学班查询, 查询指定教学班的所有学生 @structure {'学期': str, '班级名称': str, '学生': [{'姓名': str, '学号': int}]} :param xqdm: 学期代码 :param kcdm: 课程代码 :param jxbh: 教学班号 """ return self.query(GetClassStudents(xqdm, kcdm, jxbh))
[ "def", "get_class_students", "(", "self", ",", "xqdm", ",", "kcdm", ",", "jxbh", ")", ":", "return", "self", ".", "query", "(", "GetClassStudents", "(", "xqdm", ",", "kcdm", ",", "jxbh", ")", ")" ]
教学班查询, 查询指定教学班的所有学生 @structure {'学期': str, '班级名称': str, '学生': [{'姓名': str, '学号': int}]} :param xqdm: 学期代码 :param kcdm: 课程代码 :param jxbh: 教学班号
[ "教学班查询", "查询指定教学班的所有学生" ]
python
train
27.909091
rosenbrockc/fortpy
fortpy/debug.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/debug.py#L43-L51
def increase_indent(func): """Decorator for makin """ def wrapper(*args, **kwargs): global _debug_indent _debug_indent += 1 result = func(*args, **kwargs) _debug_indent -= 1 return result return wrapper
[ "def", "increase_indent", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "global", "_debug_indent", "_debug_indent", "+=", "1", "result", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "_debug_indent", "-=", "1", "return", "result", "return", "wrapper" ]
Decorator for makin
[ "Decorator", "for", "makin" ]
python
train
27.333333
Opentrons/opentrons
api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py#L1068-L1148
def move(self, target, home_flagged_axes=False): ''' Move to the `target` Smoothieware coordinate, along any of the size axes, XYZABC. target: dict dict setting the coordinate that Smoothieware will be at when `move()` returns. `target` keys are the axis in upper-case, and the values are the coordinate in millimeters (float) home_flagged_axes: boolean (default=False) If set to `True`, each axis included within the target coordinate may be homed before moving, determined by Smoothieware's internal homing-status flags (`True` means it has already homed). All axes' flags are set to `False` by Smoothieware under three conditions: 1) Smoothieware boots or resets, 2) if a HALT gcode or signal is sent, or 3) a homing/limitswitch error occured. ''' from numpy import isclose self.run_flag.wait() def valid_movement(coords, axis): return not ( (axis in DISABLE_AXES) or (coords is None) or isclose(coords, self.position[axis]) ) def create_coords_list(coords_dict): return [ axis + str(round(coords, GCODE_ROUNDING_PRECISION)) for axis, coords in sorted(coords_dict.items()) if valid_movement(coords, axis) ] backlash_target = target.copy() backlash_target.update({ axis: value + PLUNGER_BACKLASH_MM for axis, value in sorted(target.items()) if axis in 'BC' and self.position[axis] < value }) target_coords = create_coords_list(target) backlash_coords = create_coords_list(backlash_target) if target_coords: non_moving_axes = ''.join([ ax for ax in AXES if ax not in target.keys() ]) self.dwell_axes(non_moving_axes) self.activate_axes(target.keys()) # include the current-setting gcodes within the moving gcode string # to reduce latency, since we're setting current so much command = self._generate_current_command() if backlash_coords != target_coords: command += ' ' + GCODES['MOVE'] + ''.join(backlash_coords) command += ' ' + GCODES['MOVE'] + ''.join(target_coords) try: for axis in target.keys(): self.engaged_axes[axis] = True if home_flagged_axes: self.home_flagged_axes(''.join(list(target.keys()))) log.debug("move: {}".format(command)) # TODO (andy) a movement's timeout should be calculated by # how long the movement is expected to take. A default timeout # of 30 seconds prevents any movements that take longer self._send_command(command, timeout=DEFAULT_MOVEMENT_TIMEOUT) finally: # dwell pipette motors because they get hot plunger_axis_moved = ''.join(set('BC') & set(target.keys())) if plunger_axis_moved: self.dwell_axes(plunger_axis_moved) self._set_saved_current() self._update_position(target)
[ "def", "move", "(", "self", ",", "target", ",", "home_flagged_axes", "=", "False", ")", ":", "from", "numpy", "import", "isclose", "self", ".", "run_flag", ".", "wait", "(", ")", "def", "valid_movement", "(", "coords", ",", "axis", ")", ":", "return", "not", "(", "(", "axis", "in", "DISABLE_AXES", ")", "or", "(", "coords", "is", "None", ")", "or", "isclose", "(", "coords", ",", "self", ".", "position", "[", "axis", "]", ")", ")", "def", "create_coords_list", "(", "coords_dict", ")", ":", "return", "[", "axis", "+", "str", "(", "round", "(", "coords", ",", "GCODE_ROUNDING_PRECISION", ")", ")", "for", "axis", ",", "coords", "in", "sorted", "(", "coords_dict", ".", "items", "(", ")", ")", "if", "valid_movement", "(", "coords", ",", "axis", ")", "]", "backlash_target", "=", "target", ".", "copy", "(", ")", "backlash_target", ".", "update", "(", "{", "axis", ":", "value", "+", "PLUNGER_BACKLASH_MM", "for", "axis", ",", "value", "in", "sorted", "(", "target", ".", "items", "(", ")", ")", "if", "axis", "in", "'BC'", "and", "self", ".", "position", "[", "axis", "]", "<", "value", "}", ")", "target_coords", "=", "create_coords_list", "(", "target", ")", "backlash_coords", "=", "create_coords_list", "(", "backlash_target", ")", "if", "target_coords", ":", "non_moving_axes", "=", "''", ".", "join", "(", "[", "ax", "for", "ax", "in", "AXES", "if", "ax", "not", "in", "target", ".", "keys", "(", ")", "]", ")", "self", ".", "dwell_axes", "(", "non_moving_axes", ")", "self", ".", "activate_axes", "(", "target", ".", "keys", "(", ")", ")", "# include the current-setting gcodes within the moving gcode string", "# to reduce latency, since we're setting current so much", "command", "=", "self", ".", "_generate_current_command", "(", ")", "if", "backlash_coords", "!=", "target_coords", ":", "command", "+=", "' '", "+", "GCODES", "[", "'MOVE'", "]", "+", "''", ".", "join", "(", "backlash_coords", ")", "command", "+=", "' '", "+", "GCODES", "[", "'MOVE'", "]", "+", "''", ".", "join", "(", "target_coords", ")", "try", ":", "for", "axis", "in", "target", ".", "keys", "(", ")", ":", "self", ".", "engaged_axes", "[", "axis", "]", "=", "True", "if", "home_flagged_axes", ":", "self", ".", "home_flagged_axes", "(", "''", ".", "join", "(", "list", "(", "target", ".", "keys", "(", ")", ")", ")", ")", "log", ".", "debug", "(", "\"move: {}\"", ".", "format", "(", "command", ")", ")", "# TODO (andy) a movement's timeout should be calculated by", "# how long the movement is expected to take. A default timeout", "# of 30 seconds prevents any movements that take longer", "self", ".", "_send_command", "(", "command", ",", "timeout", "=", "DEFAULT_MOVEMENT_TIMEOUT", ")", "finally", ":", "# dwell pipette motors because they get hot", "plunger_axis_moved", "=", "''", ".", "join", "(", "set", "(", "'BC'", ")", "&", "set", "(", "target", ".", "keys", "(", ")", ")", ")", "if", "plunger_axis_moved", ":", "self", ".", "dwell_axes", "(", "plunger_axis_moved", ")", "self", ".", "_set_saved_current", "(", ")", "self", ".", "_update_position", "(", "target", ")" ]
Move to the `target` Smoothieware coordinate, along any of the size axes, XYZABC. target: dict dict setting the coordinate that Smoothieware will be at when `move()` returns. `target` keys are the axis in upper-case, and the values are the coordinate in millimeters (float) home_flagged_axes: boolean (default=False) If set to `True`, each axis included within the target coordinate may be homed before moving, determined by Smoothieware's internal homing-status flags (`True` means it has already homed). All axes' flags are set to `False` by Smoothieware under three conditions: 1) Smoothieware boots or resets, 2) if a HALT gcode or signal is sent, or 3) a homing/limitswitch error occured.
[ "Move", "to", "the", "target", "Smoothieware", "coordinate", "along", "any", "of", "the", "size", "axes", "XYZABC", "." ]
python
train
40.901235
LordSputnik/mutagen
mutagen/ogg.py
https://github.com/LordSputnik/mutagen/blob/38e62c8dc35c72b16554f5dbe7c0fde91acc3411/mutagen/ogg.py#L394-L443
def find_last(fileobj, serial): """Find the last page of the stream 'serial'. If the file is not multiplexed this function is fast. If it is, it must read the whole the stream. This finds the last page in the actual file object, or the last page in the stream (with eos set), whichever comes first. """ # For non-muxed streams, look at the last page. try: fileobj.seek(-256*256, 2) except IOError: # The file is less than 64k in length. fileobj.seek(0) data = fileobj.read() try: index = data.rindex(b"OggS") except ValueError: raise error("unable to find final Ogg header") bytesobj = cBytesIO(data[index:]) best_page = None try: page = OggPage(bytesobj) except error: pass else: if page.serial == serial: if page.last: return page else: best_page = page else: best_page = None # The stream is muxed, so use the slow way. fileobj.seek(0) try: page = OggPage(fileobj) while not page.last: page = OggPage(fileobj) while page.serial != serial: page = OggPage(fileobj) best_page = page return page except error: return best_page except EOFError: return best_page
[ "def", "find_last", "(", "fileobj", ",", "serial", ")", ":", "# For non-muxed streams, look at the last page.", "try", ":", "fileobj", ".", "seek", "(", "-", "256", "*", "256", ",", "2", ")", "except", "IOError", ":", "# The file is less than 64k in length.", "fileobj", ".", "seek", "(", "0", ")", "data", "=", "fileobj", ".", "read", "(", ")", "try", ":", "index", "=", "data", ".", "rindex", "(", "b\"OggS\"", ")", "except", "ValueError", ":", "raise", "error", "(", "\"unable to find final Ogg header\"", ")", "bytesobj", "=", "cBytesIO", "(", "data", "[", "index", ":", "]", ")", "best_page", "=", "None", "try", ":", "page", "=", "OggPage", "(", "bytesobj", ")", "except", "error", ":", "pass", "else", ":", "if", "page", ".", "serial", "==", "serial", ":", "if", "page", ".", "last", ":", "return", "page", "else", ":", "best_page", "=", "page", "else", ":", "best_page", "=", "None", "# The stream is muxed, so use the slow way.", "fileobj", ".", "seek", "(", "0", ")", "try", ":", "page", "=", "OggPage", "(", "fileobj", ")", "while", "not", "page", ".", "last", ":", "page", "=", "OggPage", "(", "fileobj", ")", "while", "page", ".", "serial", "!=", "serial", ":", "page", "=", "OggPage", "(", "fileobj", ")", "best_page", "=", "page", "return", "page", "except", "error", ":", "return", "best_page", "except", "EOFError", ":", "return", "best_page" ]
Find the last page of the stream 'serial'. If the file is not multiplexed this function is fast. If it is, it must read the whole the stream. This finds the last page in the actual file object, or the last page in the stream (with eos set), whichever comes first.
[ "Find", "the", "last", "page", "of", "the", "stream", "serial", "." ]
python
test
30.28
Terrance/SkPy
skpy/main.py
https://github.com/Terrance/SkPy/blob/0f9489c94e8ec4d3effab4314497428872a80ad1/skpy/main.py#L120-L129
def setMood(self, mood): """ Update the activity message for the current user. Args: mood (str): new mood message """ self.conn("POST", "{0}/users/{1}/profile/partial".format(SkypeConnection.API_USER, self.userId), auth=SkypeConnection.Auth.SkypeToken, json={"payload": {"mood": mood or ""}}) self.user.mood = SkypeUser.Mood(plain=mood) if mood else None
[ "def", "setMood", "(", "self", ",", "mood", ")", ":", "self", ".", "conn", "(", "\"POST\"", ",", "\"{0}/users/{1}/profile/partial\"", ".", "format", "(", "SkypeConnection", ".", "API_USER", ",", "self", ".", "userId", ")", ",", "auth", "=", "SkypeConnection", ".", "Auth", ".", "SkypeToken", ",", "json", "=", "{", "\"payload\"", ":", "{", "\"mood\"", ":", "mood", "or", "\"\"", "}", "}", ")", "self", ".", "user", ".", "mood", "=", "SkypeUser", ".", "Mood", "(", "plain", "=", "mood", ")", "if", "mood", "else", "None" ]
Update the activity message for the current user. Args: mood (str): new mood message
[ "Update", "the", "activity", "message", "for", "the", "current", "user", "." ]
python
test
42.4
pytroll/satpy
satpy/writers/__init__.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/writers/__init__.py#L111-L142
def configs_for_writer(writer=None, ppp_config_dir=None): """Generator of writer configuration files for one or more writers Args: writer (Optional[str]): Yield configs only for this writer ppp_config_dir (Optional[str]): Additional configuration directory to search for writer configuration files. Returns: Generator of lists of configuration files """ search_paths = (ppp_config_dir,) if ppp_config_dir else tuple() if writer is not None: if not isinstance(writer, (list, tuple)): writer = [writer] # given a config filename or writer name config_files = [w if w.endswith('.yaml') else w + '.yaml' for w in writer] else: writer_configs = glob_config(os.path.join('writers', '*.yaml'), *search_paths) config_files = set(writer_configs) for config_file in config_files: config_basename = os.path.basename(config_file) writer_configs = config_search_paths( os.path.join("writers", config_basename), *search_paths) if not writer_configs: LOG.warning("No writer configs found for '%s'", writer) continue yield writer_configs
[ "def", "configs_for_writer", "(", "writer", "=", "None", ",", "ppp_config_dir", "=", "None", ")", ":", "search_paths", "=", "(", "ppp_config_dir", ",", ")", "if", "ppp_config_dir", "else", "tuple", "(", ")", "if", "writer", "is", "not", "None", ":", "if", "not", "isinstance", "(", "writer", ",", "(", "list", ",", "tuple", ")", ")", ":", "writer", "=", "[", "writer", "]", "# given a config filename or writer name", "config_files", "=", "[", "w", "if", "w", ".", "endswith", "(", "'.yaml'", ")", "else", "w", "+", "'.yaml'", "for", "w", "in", "writer", "]", "else", ":", "writer_configs", "=", "glob_config", "(", "os", ".", "path", ".", "join", "(", "'writers'", ",", "'*.yaml'", ")", ",", "*", "search_paths", ")", "config_files", "=", "set", "(", "writer_configs", ")", "for", "config_file", "in", "config_files", ":", "config_basename", "=", "os", ".", "path", ".", "basename", "(", "config_file", ")", "writer_configs", "=", "config_search_paths", "(", "os", ".", "path", ".", "join", "(", "\"writers\"", ",", "config_basename", ")", ",", "*", "search_paths", ")", "if", "not", "writer_configs", ":", "LOG", ".", "warning", "(", "\"No writer configs found for '%s'\"", ",", "writer", ")", "continue", "yield", "writer_configs" ]
Generator of writer configuration files for one or more writers Args: writer (Optional[str]): Yield configs only for this writer ppp_config_dir (Optional[str]): Additional configuration directory to search for writer configuration files. Returns: Generator of lists of configuration files
[ "Generator", "of", "writer", "configuration", "files", "for", "one", "or", "more", "writers" ]
python
train
37.90625
sampottinger/pycotracer
pycotracer/mongo_aggregator.py
https://github.com/sampottinger/pycotracer/blob/c66c3230949b7bee8c9fec5fc00ab392865a0c8b/pycotracer/mongo_aggregator.py#L132-L147
def update_expenditure_entry(database, entry): """Update a record of a expenditure report in the provided database. @param db: The MongoDB database to operate on. The expenditures collection will be used from this database. @type db: pymongo.database.Database @param entry: The entry to insert into the database, updating the entry with the same recordID if one exists. @type entry: dict """ entry = clean_entry(entry) database.expenditures.update( {'recordID': entry['recordID']}, {'$set': entry}, upsert=True )
[ "def", "update_expenditure_entry", "(", "database", ",", "entry", ")", ":", "entry", "=", "clean_entry", "(", "entry", ")", "database", ".", "expenditures", ".", "update", "(", "{", "'recordID'", ":", "entry", "[", "'recordID'", "]", "}", ",", "{", "'$set'", ":", "entry", "}", ",", "upsert", "=", "True", ")" ]
Update a record of a expenditure report in the provided database. @param db: The MongoDB database to operate on. The expenditures collection will be used from this database. @type db: pymongo.database.Database @param entry: The entry to insert into the database, updating the entry with the same recordID if one exists. @type entry: dict
[ "Update", "a", "record", "of", "a", "expenditure", "report", "in", "the", "provided", "database", "." ]
python
train
35.875
richardliaw/track
track/convenience.py
https://github.com/richardliaw/track/blob/7ac42ea34e5c1d7bb92fd813e938835a06a63fc7/track/convenience.py#L10-L31
def absl_flags(): """ Extracts absl-py flags that the user has specified and outputs their key-value mapping. By default, extracts only those flags in the current __package__ and mainfile. Useful to put into a trial's param_map. """ # TODO: need same thing for argparse flags_dict = flags.FLAGS.flags_by_module_dict() # only include parameters from modules the user probably cares about def _relevant_module(module_name): if __package__ and __package__ in module_name: return True if module_name == sys.argv[0]: return True return False return { flag.name: flag.value for module, flags in flags_dict.items() for flag in flags if _relevant_module(module)}
[ "def", "absl_flags", "(", ")", ":", "# TODO: need same thing for argparse", "flags_dict", "=", "flags", ".", "FLAGS", ".", "flags_by_module_dict", "(", ")", "# only include parameters from modules the user probably cares about", "def", "_relevant_module", "(", "module_name", ")", ":", "if", "__package__", "and", "__package__", "in", "module_name", ":", "return", "True", "if", "module_name", "==", "sys", ".", "argv", "[", "0", "]", ":", "return", "True", "return", "False", "return", "{", "flag", ".", "name", ":", "flag", ".", "value", "for", "module", ",", "flags", "in", "flags_dict", ".", "items", "(", ")", "for", "flag", "in", "flags", "if", "_relevant_module", "(", "module", ")", "}" ]
Extracts absl-py flags that the user has specified and outputs their key-value mapping. By default, extracts only those flags in the current __package__ and mainfile. Useful to put into a trial's param_map.
[ "Extracts", "absl", "-", "py", "flags", "that", "the", "user", "has", "specified", "and", "outputs", "their", "key", "-", "value", "mapping", "." ]
python
train
33.954545
spyder-ide/spyder
spyder/plugins/onlinehelp/widgets.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/onlinehelp/widgets.py#L80-L84
def initialize(self): """Start pydoc server""" QApplication.setOverrideCursor(QCursor(Qt.WaitCursor)) QApplication.processEvents() self.start_server()
[ "def", "initialize", "(", "self", ")", ":", "QApplication", ".", "setOverrideCursor", "(", "QCursor", "(", "Qt", ".", "WaitCursor", ")", ")", "QApplication", ".", "processEvents", "(", ")", "self", ".", "start_server", "(", ")" ]
Start pydoc server
[ "Start", "pydoc", "server" ]
python
train
36.4
aws/aws-encryption-sdk-python
decrypt_oracle/src/aws_encryption_sdk_decrypt_oracle/key_providers/counting.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/decrypt_oracle/src/aws_encryption_sdk_decrypt_oracle/key_providers/counting.py#L80-L99
def _decrypt_data_key( self, encrypted_data_key: EncryptedDataKey, algorithm: AlgorithmSuite, encryption_context: Dict[Text, Text] ) -> DataKey: """Decrypt an encrypted data key and return the plaintext. :param data_key: Encrypted data key :type data_key: aws_encryption_sdk.structures.EncryptedDataKey :param algorithm: Algorithm object which directs how this Master Key will encrypt the data key :type algorithm: aws_encryption_sdk.identifiers.Algorithm :param dict encryption_context: Encryption context to use in decryption :returns: Data key containing decrypted data key :rtype: aws_encryption_sdk.structures.DataKey :raises DecryptKeyError: if Master Key is unable to decrypt data key """ if encrypted_data_key.encrypted_data_key != self._encrypted_data_key: raise DecryptKeyError( 'Master Key "{provider}" unable to decrypt data key'.format(provider=self.key_provider) ) return self._generate_data_key(algorithm, encryption_context)
[ "def", "_decrypt_data_key", "(", "self", ",", "encrypted_data_key", ":", "EncryptedDataKey", ",", "algorithm", ":", "AlgorithmSuite", ",", "encryption_context", ":", "Dict", "[", "Text", ",", "Text", "]", ")", "->", "DataKey", ":", "if", "encrypted_data_key", ".", "encrypted_data_key", "!=", "self", ".", "_encrypted_data_key", ":", "raise", "DecryptKeyError", "(", "'Master Key \"{provider}\" unable to decrypt data key'", ".", "format", "(", "provider", "=", "self", ".", "key_provider", ")", ")", "return", "self", ".", "_generate_data_key", "(", "algorithm", ",", "encryption_context", ")" ]
Decrypt an encrypted data key and return the plaintext. :param data_key: Encrypted data key :type data_key: aws_encryption_sdk.structures.EncryptedDataKey :param algorithm: Algorithm object which directs how this Master Key will encrypt the data key :type algorithm: aws_encryption_sdk.identifiers.Algorithm :param dict encryption_context: Encryption context to use in decryption :returns: Data key containing decrypted data key :rtype: aws_encryption_sdk.structures.DataKey :raises DecryptKeyError: if Master Key is unable to decrypt data key
[ "Decrypt", "an", "encrypted", "data", "key", "and", "return", "the", "plaintext", "." ]
python
train
53.55
pandas-dev/pandas
pandas/io/json/normalize.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/normalize.py#L99-L286
def json_normalize(data, record_path=None, meta=None, meta_prefix=None, record_prefix=None, errors='raise', sep='.'): """ Normalize semi-structured JSON data into a flat table. Parameters ---------- data : dict or list of dicts Unserialized JSON objects record_path : string or list of strings, default None Path in each object to list of records. If not passed, data will be assumed to be an array of records meta : list of paths (string or list of strings), default None Fields to use as metadata for each record in resulting table meta_prefix : string, default None record_prefix : string, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if path to records is ['foo', 'bar'] errors : {'raise', 'ignore'}, default 'raise' * 'ignore' : will ignore KeyError if keys listed in meta are not always present * 'raise' : will raise KeyError if keys listed in meta are not always present .. versionadded:: 0.20.0 sep : string, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar .. versionadded:: 0.20.0 Returns ------- frame : DataFrame Examples -------- >>> from pandas.io.json import json_normalize >>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}}, ... {'name': {'given': 'Mose', 'family': 'Regner'}}, ... {'id': 2, 'name': 'Faye Raker'}] >>> json_normalize(data) id name name.family name.first name.given name.last 0 1.0 NaN NaN Coleen NaN Volk 1 NaN NaN Regner NaN Mose NaN 2 2.0 Faye Raker NaN NaN NaN NaN >>> data = [{'state': 'Florida', ... 'shortname': 'FL', ... 'info': { ... 'governor': 'Rick Scott' ... }, ... 'counties': [{'name': 'Dade', 'population': 12345}, ... {'name': 'Broward', 'population': 40000}, ... {'name': 'Palm Beach', 'population': 60000}]}, ... {'state': 'Ohio', ... 'shortname': 'OH', ... 'info': { ... 'governor': 'John Kasich' ... }, ... 'counties': [{'name': 'Summit', 'population': 1234}, ... {'name': 'Cuyahoga', 'population': 1337}]}] >>> result = json_normalize(data, 'counties', ['state', 'shortname', ... ['info', 'governor']]) >>> result name population info.governor state shortname 0 Dade 12345 Rick Scott Florida FL 1 Broward 40000 Rick Scott Florida FL 2 Palm Beach 60000 Rick Scott Florida FL 3 Summit 1234 John Kasich Ohio OH 4 Cuyahoga 1337 John Kasich Ohio OH >>> data = {'A': [1, 2]} >>> json_normalize(data, 'A', record_prefix='Prefix.') Prefix.0 0 1 1 2 """ def _pull_field(js, spec): result = js if isinstance(spec, list): for field in spec: result = result[field] else: result = result[spec] return result if isinstance(data, list) and not data: return DataFrame() # A bit of a hackjob if isinstance(data, dict): data = [data] if record_path is None: if any([isinstance(x, dict) for x in y.values()] for y in data): # naive normalization, this is idempotent for flat records # and potentially will inflate the data considerably for # deeply nested structures: # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@} # # TODO: handle record value which are lists, at least error # reasonably data = nested_to_record(data, sep=sep) return DataFrame(data) elif not isinstance(record_path, list): record_path = [record_path] if meta is None: meta = [] elif not isinstance(meta, list): meta = [meta] meta = [m if isinstance(m, list) else [m] for m in meta] # Disastrously inefficient for now records = [] lengths = [] meta_vals = defaultdict(list) if not isinstance(sep, str): sep = str(sep) meta_keys = [sep.join(val) for val in meta] def _recursive_extract(data, path, seen_meta, level=0): if isinstance(data, dict): data = [data] if len(path) > 1: for obj in data: for val, key in zip(meta, meta_keys): if level + 1 == len(val): seen_meta[key] = _pull_field(obj, val[-1]) _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1) else: for obj in data: recs = _pull_field(obj, path[0]) # For repeating the metadata later lengths.append(len(recs)) for val, key in zip(meta, meta_keys): if level + 1 > len(val): meta_val = seen_meta[key] else: try: meta_val = _pull_field(obj, val[level:]) except KeyError as e: if errors == 'ignore': meta_val = np.nan else: raise KeyError("Try running with " "errors='ignore' as key " "{err} is not always present" .format(err=e)) meta_vals[key].append(meta_val) records.extend(recs) _recursive_extract(data, record_path, {}, level=0) result = DataFrame(records) if record_prefix is not None: result = result.rename( columns=lambda x: "{p}{c}".format(p=record_prefix, c=x)) # Data types, a problem for k, v in meta_vals.items(): if meta_prefix is not None: k = meta_prefix + k if k in result: raise ValueError('Conflicting metadata name {name}, ' 'need distinguishing prefix '.format(name=k)) # forcing dtype to object to avoid the metadata being casted to string result[k] = np.array(v, dtype=object).repeat(lengths) return result
[ "def", "json_normalize", "(", "data", ",", "record_path", "=", "None", ",", "meta", "=", "None", ",", "meta_prefix", "=", "None", ",", "record_prefix", "=", "None", ",", "errors", "=", "'raise'", ",", "sep", "=", "'.'", ")", ":", "def", "_pull_field", "(", "js", ",", "spec", ")", ":", "result", "=", "js", "if", "isinstance", "(", "spec", ",", "list", ")", ":", "for", "field", "in", "spec", ":", "result", "=", "result", "[", "field", "]", "else", ":", "result", "=", "result", "[", "spec", "]", "return", "result", "if", "isinstance", "(", "data", ",", "list", ")", "and", "not", "data", ":", "return", "DataFrame", "(", ")", "# A bit of a hackjob", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "data", "=", "[", "data", "]", "if", "record_path", "is", "None", ":", "if", "any", "(", "[", "isinstance", "(", "x", ",", "dict", ")", "for", "x", "in", "y", ".", "values", "(", ")", "]", "for", "y", "in", "data", ")", ":", "# naive normalization, this is idempotent for flat records", "# and potentially will inflate the data considerably for", "# deeply nested structures:", "# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}", "#", "# TODO: handle record value which are lists, at least error", "# reasonably", "data", "=", "nested_to_record", "(", "data", ",", "sep", "=", "sep", ")", "return", "DataFrame", "(", "data", ")", "elif", "not", "isinstance", "(", "record_path", ",", "list", ")", ":", "record_path", "=", "[", "record_path", "]", "if", "meta", "is", "None", ":", "meta", "=", "[", "]", "elif", "not", "isinstance", "(", "meta", ",", "list", ")", ":", "meta", "=", "[", "meta", "]", "meta", "=", "[", "m", "if", "isinstance", "(", "m", ",", "list", ")", "else", "[", "m", "]", "for", "m", "in", "meta", "]", "# Disastrously inefficient for now", "records", "=", "[", "]", "lengths", "=", "[", "]", "meta_vals", "=", "defaultdict", "(", "list", ")", "if", "not", "isinstance", "(", "sep", ",", "str", ")", ":", "sep", "=", "str", "(", "sep", ")", "meta_keys", "=", "[", "sep", ".", "join", "(", "val", ")", "for", "val", "in", "meta", "]", "def", "_recursive_extract", "(", "data", ",", "path", ",", "seen_meta", ",", "level", "=", "0", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "data", "=", "[", "data", "]", "if", "len", "(", "path", ")", ">", "1", ":", "for", "obj", "in", "data", ":", "for", "val", ",", "key", "in", "zip", "(", "meta", ",", "meta_keys", ")", ":", "if", "level", "+", "1", "==", "len", "(", "val", ")", ":", "seen_meta", "[", "key", "]", "=", "_pull_field", "(", "obj", ",", "val", "[", "-", "1", "]", ")", "_recursive_extract", "(", "obj", "[", "path", "[", "0", "]", "]", ",", "path", "[", "1", ":", "]", ",", "seen_meta", ",", "level", "=", "level", "+", "1", ")", "else", ":", "for", "obj", "in", "data", ":", "recs", "=", "_pull_field", "(", "obj", ",", "path", "[", "0", "]", ")", "# For repeating the metadata later", "lengths", ".", "append", "(", "len", "(", "recs", ")", ")", "for", "val", ",", "key", "in", "zip", "(", "meta", ",", "meta_keys", ")", ":", "if", "level", "+", "1", ">", "len", "(", "val", ")", ":", "meta_val", "=", "seen_meta", "[", "key", "]", "else", ":", "try", ":", "meta_val", "=", "_pull_field", "(", "obj", ",", "val", "[", "level", ":", "]", ")", "except", "KeyError", "as", "e", ":", "if", "errors", "==", "'ignore'", ":", "meta_val", "=", "np", ".", "nan", "else", ":", "raise", "KeyError", "(", "\"Try running with \"", "\"errors='ignore' as key \"", "\"{err} is not always present\"", ".", "format", "(", "err", "=", "e", ")", ")", "meta_vals", "[", "key", "]", ".", "append", "(", "meta_val", ")", "records", ".", "extend", "(", "recs", ")", "_recursive_extract", "(", "data", ",", "record_path", ",", "{", "}", ",", "level", "=", "0", ")", "result", "=", "DataFrame", "(", "records", ")", "if", "record_prefix", "is", "not", "None", ":", "result", "=", "result", ".", "rename", "(", "columns", "=", "lambda", "x", ":", "\"{p}{c}\"", ".", "format", "(", "p", "=", "record_prefix", ",", "c", "=", "x", ")", ")", "# Data types, a problem", "for", "k", ",", "v", "in", "meta_vals", ".", "items", "(", ")", ":", "if", "meta_prefix", "is", "not", "None", ":", "k", "=", "meta_prefix", "+", "k", "if", "k", "in", "result", ":", "raise", "ValueError", "(", "'Conflicting metadata name {name}, '", "'need distinguishing prefix '", ".", "format", "(", "name", "=", "k", ")", ")", "# forcing dtype to object to avoid the metadata being casted to string", "result", "[", "k", "]", "=", "np", ".", "array", "(", "v", ",", "dtype", "=", "object", ")", ".", "repeat", "(", "lengths", ")", "return", "result" ]
Normalize semi-structured JSON data into a flat table. Parameters ---------- data : dict or list of dicts Unserialized JSON objects record_path : string or list of strings, default None Path in each object to list of records. If not passed, data will be assumed to be an array of records meta : list of paths (string or list of strings), default None Fields to use as metadata for each record in resulting table meta_prefix : string, default None record_prefix : string, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if path to records is ['foo', 'bar'] errors : {'raise', 'ignore'}, default 'raise' * 'ignore' : will ignore KeyError if keys listed in meta are not always present * 'raise' : will raise KeyError if keys listed in meta are not always present .. versionadded:: 0.20.0 sep : string, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar .. versionadded:: 0.20.0 Returns ------- frame : DataFrame Examples -------- >>> from pandas.io.json import json_normalize >>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}}, ... {'name': {'given': 'Mose', 'family': 'Regner'}}, ... {'id': 2, 'name': 'Faye Raker'}] >>> json_normalize(data) id name name.family name.first name.given name.last 0 1.0 NaN NaN Coleen NaN Volk 1 NaN NaN Regner NaN Mose NaN 2 2.0 Faye Raker NaN NaN NaN NaN >>> data = [{'state': 'Florida', ... 'shortname': 'FL', ... 'info': { ... 'governor': 'Rick Scott' ... }, ... 'counties': [{'name': 'Dade', 'population': 12345}, ... {'name': 'Broward', 'population': 40000}, ... {'name': 'Palm Beach', 'population': 60000}]}, ... {'state': 'Ohio', ... 'shortname': 'OH', ... 'info': { ... 'governor': 'John Kasich' ... }, ... 'counties': [{'name': 'Summit', 'population': 1234}, ... {'name': 'Cuyahoga', 'population': 1337}]}] >>> result = json_normalize(data, 'counties', ['state', 'shortname', ... ['info', 'governor']]) >>> result name population info.governor state shortname 0 Dade 12345 Rick Scott Florida FL 1 Broward 40000 Rick Scott Florida FL 2 Palm Beach 60000 Rick Scott Florida FL 3 Summit 1234 John Kasich Ohio OH 4 Cuyahoga 1337 John Kasich Ohio OH >>> data = {'A': [1, 2]} >>> json_normalize(data, 'A', record_prefix='Prefix.') Prefix.0 0 1 1 2
[ "Normalize", "semi", "-", "structured", "JSON", "data", "into", "a", "flat", "table", "." ]
python
train
35.553191
nuagenetworks/bambou
bambou/nurest_connection.py
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_connection.py#L427-L440
def start(self): """ Make an HTTP request with a specific method """ # TODO : Use Timeout here and _ignore_request_idle from .nurest_session import NURESTSession session = NURESTSession.get_current_session() if self.async: thread = threading.Thread(target=self._make_request, kwargs={'session': session}) thread.is_daemon = False thread.start() return self.transaction_id return self._make_request(session=session)
[ "def", "start", "(", "self", ")", ":", "# TODO : Use Timeout here and _ignore_request_idle", "from", ".", "nurest_session", "import", "NURESTSession", "session", "=", "NURESTSession", ".", "get_current_session", "(", ")", "if", "self", ".", "async", ":", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_make_request", ",", "kwargs", "=", "{", "'session'", ":", "session", "}", ")", "thread", ".", "is_daemon", "=", "False", "thread", ".", "start", "(", ")", "return", "self", ".", "transaction_id", "return", "self", ".", "_make_request", "(", "session", "=", "session", ")" ]
Make an HTTP request with a specific method
[ "Make", "an", "HTTP", "request", "with", "a", "specific", "method" ]
python
train
35.714286
bretth/woven
woven/webservers.py
https://github.com/bretth/woven/blob/ec1da7b401a335f43129e7115fe7a4d145649f1e/woven/webservers.py#L84-L97
def _ls_sites(path): """ List only sites in the domain_sites() to ensure we co-exist with other projects """ with cd(path): sites = run('ls').split('\n') doms = [d.name for d in domain_sites()] dom_sites = [] for s in sites: ds = s.split('-')[0] ds = ds.replace('_','.') if ds in doms and s not in dom_sites: dom_sites.append(s) return dom_sites
[ "def", "_ls_sites", "(", "path", ")", ":", "with", "cd", "(", "path", ")", ":", "sites", "=", "run", "(", "'ls'", ")", ".", "split", "(", "'\\n'", ")", "doms", "=", "[", "d", ".", "name", "for", "d", "in", "domain_sites", "(", ")", "]", "dom_sites", "=", "[", "]", "for", "s", "in", "sites", ":", "ds", "=", "s", ".", "split", "(", "'-'", ")", "[", "0", "]", "ds", "=", "ds", ".", "replace", "(", "'_'", ",", "'.'", ")", "if", "ds", "in", "doms", "and", "s", "not", "in", "dom_sites", ":", "dom_sites", ".", "append", "(", "s", ")", "return", "dom_sites" ]
List only sites in the domain_sites() to ensure we co-exist with other projects
[ "List", "only", "sites", "in", "the", "domain_sites", "()", "to", "ensure", "we", "co", "-", "exist", "with", "other", "projects" ]
python
train
31.214286
decryptus/sonicprobe
sonicprobe/libs/daemonize.py
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/daemonize.py#L186-L234
def daemonize(): """ Daemonize the program, ie. make it run in the "background", detach it from its controlling terminal and from its controlling process group session. NOTES: - This function also umask(0) and chdir("/") - stdin, stdout, and stderr are redirected from/to /dev/null SEE ALSO: http://www.unixguide.net/unix/programming/1.7.shtml """ try: pid = os.fork() if pid > 0: os._exit(0) # pylint: disable-msg=W0212 except OSError, e: log.exception("first fork() failed: %d (%s)", e.errno, e.strerror) sys.exit(1) os.setsid() os.umask(0) os.chdir("/") try: pid = os.fork() if pid > 0: os._exit(0) # pylint: disable-msg=W0212 except OSError, e: log.exception("second fork() failed: %d (%s)", e.errno, e.strerror) sys.exit(1) try: devnull_fd = os.open(os.devnull, os.O_RDWR) for stdf in (sys.__stdout__, sys.__stderr__): try: stdf.flush() except Exception: # pylint: disable-msg=W0703,W0704 pass for stdf in (sys.__stdin__, sys.__stdout__, sys.__stderr__): try: os.dup2(devnull_fd, stdf.fileno()) except OSError: # pylint: disable-msg=W0704 pass except Exception: # pylint: disable-msg=W0703 log.exception("error during file descriptor redirection")
[ "def", "daemonize", "(", ")", ":", "try", ":", "pid", "=", "os", ".", "fork", "(", ")", "if", "pid", ">", "0", ":", "os", ".", "_exit", "(", "0", ")", "# pylint: disable-msg=W0212", "except", "OSError", ",", "e", ":", "log", ".", "exception", "(", "\"first fork() failed: %d (%s)\"", ",", "e", ".", "errno", ",", "e", ".", "strerror", ")", "sys", ".", "exit", "(", "1", ")", "os", ".", "setsid", "(", ")", "os", ".", "umask", "(", "0", ")", "os", ".", "chdir", "(", "\"/\"", ")", "try", ":", "pid", "=", "os", ".", "fork", "(", ")", "if", "pid", ">", "0", ":", "os", ".", "_exit", "(", "0", ")", "# pylint: disable-msg=W0212", "except", "OSError", ",", "e", ":", "log", ".", "exception", "(", "\"second fork() failed: %d (%s)\"", ",", "e", ".", "errno", ",", "e", ".", "strerror", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "devnull_fd", "=", "os", ".", "open", "(", "os", ".", "devnull", ",", "os", ".", "O_RDWR", ")", "for", "stdf", "in", "(", "sys", ".", "__stdout__", ",", "sys", ".", "__stderr__", ")", ":", "try", ":", "stdf", ".", "flush", "(", ")", "except", "Exception", ":", "# pylint: disable-msg=W0703,W0704", "pass", "for", "stdf", "in", "(", "sys", ".", "__stdin__", ",", "sys", ".", "__stdout__", ",", "sys", ".", "__stderr__", ")", ":", "try", ":", "os", ".", "dup2", "(", "devnull_fd", ",", "stdf", ".", "fileno", "(", ")", ")", "except", "OSError", ":", "# pylint: disable-msg=W0704", "pass", "except", "Exception", ":", "# pylint: disable-msg=W0703", "log", ".", "exception", "(", "\"error during file descriptor redirection\"", ")" ]
Daemonize the program, ie. make it run in the "background", detach it from its controlling terminal and from its controlling process group session. NOTES: - This function also umask(0) and chdir("/") - stdin, stdout, and stderr are redirected from/to /dev/null SEE ALSO: http://www.unixguide.net/unix/programming/1.7.shtml
[ "Daemonize", "the", "program", "ie", ".", "make", "it", "run", "in", "the", "background", "detach", "it", "from", "its", "controlling", "terminal", "and", "from", "its", "controlling", "process", "group", "session", "." ]
python
train
29.204082
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/workflow/workflow_real_time_statistics.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/workflow/workflow_real_time_statistics.py#L92-L106
def get_instance(self, payload): """ Build an instance of WorkflowRealTimeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance """ return WorkflowRealTimeStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], workflow_sid=self._solution['workflow_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "WorkflowRealTimeStatisticsInstance", "(", "self", ".", "_version", ",", "payload", ",", "workspace_sid", "=", "self", ".", "_solution", "[", "'workspace_sid'", "]", ",", "workflow_sid", "=", "self", ".", "_solution", "[", "'workflow_sid'", "]", ",", ")" ]
Build an instance of WorkflowRealTimeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance
[ "Build", "an", "instance", "of", "WorkflowRealTimeStatisticsInstance" ]
python
train
43.066667
raiden-network/raiden
raiden/transfer/node.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/transfer/node.py#L1183-L1232
def is_transaction_invalidated(transaction, state_change): """ True if the `transaction` is made invalid by `state_change`. Some transactions will fail due to race conditions. The races are: - Another transaction which has the same side effect is executed before. - Another transaction which *invalidates* the state of the smart contract required by the local transaction is executed before it. The first case is handled by the predicate `is_transaction_effect_satisfied`, where a transaction from a different source which does the same thing is considered. This predicate handles the second scenario. A transaction can **only** invalidate another iff both share a valid initial state but a different end state. Valid example: A close can invalidate a deposit, because both a close and a deposit can be executed from an opened state (same initial state), but a close transaction will transition the channel to a closed state which doesn't allow for deposits (different end state). Invalid example: A settle transaction cannot invalidate a deposit because a settle is only allowed for the closed state and deposits are only allowed for the open state. In such a case a deposit should never have been sent. The deposit transaction for an invalid state is a bug and not a transaction which was invalidated. """ # Most transactions cannot be invalidated by others. These are: # # - close transactions # - settle transactions # - batch unlocks # # Deposits and withdraws are invalidated by the close, but these are not # made atomic through the WAL. is_our_failed_update_transfer = ( isinstance(state_change, ContractReceiveChannelSettled) and isinstance(transaction, ContractSendChannelUpdateTransfer) and state_change.token_network_identifier == transaction.token_network_identifier and state_change.channel_identifier == transaction.channel_identifier ) if is_our_failed_update_transfer: return True return False
[ "def", "is_transaction_invalidated", "(", "transaction", ",", "state_change", ")", ":", "# Most transactions cannot be invalidated by others. These are:", "#", "# - close transactions", "# - settle transactions", "# - batch unlocks", "#", "# Deposits and withdraws are invalidated by the close, but these are not", "# made atomic through the WAL.", "is_our_failed_update_transfer", "=", "(", "isinstance", "(", "state_change", ",", "ContractReceiveChannelSettled", ")", "and", "isinstance", "(", "transaction", ",", "ContractSendChannelUpdateTransfer", ")", "and", "state_change", ".", "token_network_identifier", "==", "transaction", ".", "token_network_identifier", "and", "state_change", ".", "channel_identifier", "==", "transaction", ".", "channel_identifier", ")", "if", "is_our_failed_update_transfer", ":", "return", "True", "return", "False" ]
True if the `transaction` is made invalid by `state_change`. Some transactions will fail due to race conditions. The races are: - Another transaction which has the same side effect is executed before. - Another transaction which *invalidates* the state of the smart contract required by the local transaction is executed before it. The first case is handled by the predicate `is_transaction_effect_satisfied`, where a transaction from a different source which does the same thing is considered. This predicate handles the second scenario. A transaction can **only** invalidate another iff both share a valid initial state but a different end state. Valid example: A close can invalidate a deposit, because both a close and a deposit can be executed from an opened state (same initial state), but a close transaction will transition the channel to a closed state which doesn't allow for deposits (different end state). Invalid example: A settle transaction cannot invalidate a deposit because a settle is only allowed for the closed state and deposits are only allowed for the open state. In such a case a deposit should never have been sent. The deposit transaction for an invalid state is a bug and not a transaction which was invalidated.
[ "True", "if", "the", "transaction", "is", "made", "invalid", "by", "state_change", "." ]
python
train
41.66
awslabs/serverless-application-model
samtranslator/model/sam_resources.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/model/sam_resources.py#L186-L246
def _construct_role(self, managed_policy_map): """Constructs a Lambda execution role based on this SAM function's Policies property. :returns: the generated IAM Role :rtype: model.iam.IAMRole """ execution_role = IAMRole(self.logical_id + 'Role', attributes=self.get_passthrough_resource_attributes()) execution_role.AssumeRolePolicyDocument = IAMRolePolicies.lambda_assume_role_policy() managed_policy_arns = [ArnGenerator.generate_aws_managed_policy_arn('service-role/AWSLambdaBasicExecutionRole')] if self.Tracing: managed_policy_arns.append(ArnGenerator.generate_aws_managed_policy_arn('AWSXrayWriteOnlyAccess')) function_policies = FunctionPolicies({"Policies": self.Policies}, # No support for policy templates in the "core" policy_template_processor=None) policy_documents = [] if self.DeadLetterQueue: policy_documents.append(IAMRolePolicies.dead_letter_queue_policy( self.dead_letter_queue_policy_actions[self.DeadLetterQueue['Type']], self.DeadLetterQueue['TargetArn'])) for index, policy_entry in enumerate(function_policies.get()): if policy_entry.type is PolicyTypes.POLICY_STATEMENT: policy_documents.append({ 'PolicyName': execution_role.logical_id + 'Policy' + str(index), 'PolicyDocument': policy_entry.data }) elif policy_entry.type is PolicyTypes.MANAGED_POLICY: # There are three options: # Managed Policy Name (string): Try to convert to Managed Policy ARN # Managed Policy Arn (string): Insert it directly into the list # Intrinsic Function (dict): Insert it directly into the list # # When you insert into managed_policy_arns list, de-dupe to prevent same ARN from showing up twice # policy_arn = policy_entry.data if isinstance(policy_entry.data, string_types) and policy_entry.data in managed_policy_map: policy_arn = managed_policy_map[policy_entry.data] # De-Duplicate managed policy arns before inserting. Mainly useful # when customer specifies a managed policy which is already inserted # by SAM, such as AWSLambdaBasicExecutionRole if policy_arn not in managed_policy_arns: managed_policy_arns.append(policy_arn) else: # Policy Templates are not supported here in the "core" raise InvalidResourceException( self.logical_id, "Policy at index {} in the 'Policies' property is not valid".format(index)) execution_role.ManagedPolicyArns = list(managed_policy_arns) execution_role.Policies = policy_documents or None execution_role.PermissionsBoundary = self.PermissionsBoundary return execution_role
[ "def", "_construct_role", "(", "self", ",", "managed_policy_map", ")", ":", "execution_role", "=", "IAMRole", "(", "self", ".", "logical_id", "+", "'Role'", ",", "attributes", "=", "self", ".", "get_passthrough_resource_attributes", "(", ")", ")", "execution_role", ".", "AssumeRolePolicyDocument", "=", "IAMRolePolicies", ".", "lambda_assume_role_policy", "(", ")", "managed_policy_arns", "=", "[", "ArnGenerator", ".", "generate_aws_managed_policy_arn", "(", "'service-role/AWSLambdaBasicExecutionRole'", ")", "]", "if", "self", ".", "Tracing", ":", "managed_policy_arns", ".", "append", "(", "ArnGenerator", ".", "generate_aws_managed_policy_arn", "(", "'AWSXrayWriteOnlyAccess'", ")", ")", "function_policies", "=", "FunctionPolicies", "(", "{", "\"Policies\"", ":", "self", ".", "Policies", "}", ",", "# No support for policy templates in the \"core\"", "policy_template_processor", "=", "None", ")", "policy_documents", "=", "[", "]", "if", "self", ".", "DeadLetterQueue", ":", "policy_documents", ".", "append", "(", "IAMRolePolicies", ".", "dead_letter_queue_policy", "(", "self", ".", "dead_letter_queue_policy_actions", "[", "self", ".", "DeadLetterQueue", "[", "'Type'", "]", "]", ",", "self", ".", "DeadLetterQueue", "[", "'TargetArn'", "]", ")", ")", "for", "index", ",", "policy_entry", "in", "enumerate", "(", "function_policies", ".", "get", "(", ")", ")", ":", "if", "policy_entry", ".", "type", "is", "PolicyTypes", ".", "POLICY_STATEMENT", ":", "policy_documents", ".", "append", "(", "{", "'PolicyName'", ":", "execution_role", ".", "logical_id", "+", "'Policy'", "+", "str", "(", "index", ")", ",", "'PolicyDocument'", ":", "policy_entry", ".", "data", "}", ")", "elif", "policy_entry", ".", "type", "is", "PolicyTypes", ".", "MANAGED_POLICY", ":", "# There are three options:", "# Managed Policy Name (string): Try to convert to Managed Policy ARN", "# Managed Policy Arn (string): Insert it directly into the list", "# Intrinsic Function (dict): Insert it directly into the list", "#", "# When you insert into managed_policy_arns list, de-dupe to prevent same ARN from showing up twice", "#", "policy_arn", "=", "policy_entry", ".", "data", "if", "isinstance", "(", "policy_entry", ".", "data", ",", "string_types", ")", "and", "policy_entry", ".", "data", "in", "managed_policy_map", ":", "policy_arn", "=", "managed_policy_map", "[", "policy_entry", ".", "data", "]", "# De-Duplicate managed policy arns before inserting. Mainly useful", "# when customer specifies a managed policy which is already inserted", "# by SAM, such as AWSLambdaBasicExecutionRole", "if", "policy_arn", "not", "in", "managed_policy_arns", ":", "managed_policy_arns", ".", "append", "(", "policy_arn", ")", "else", ":", "# Policy Templates are not supported here in the \"core\"", "raise", "InvalidResourceException", "(", "self", ".", "logical_id", ",", "\"Policy at index {} in the 'Policies' property is not valid\"", ".", "format", "(", "index", ")", ")", "execution_role", ".", "ManagedPolicyArns", "=", "list", "(", "managed_policy_arns", ")", "execution_role", ".", "Policies", "=", "policy_documents", "or", "None", "execution_role", ".", "PermissionsBoundary", "=", "self", ".", "PermissionsBoundary", "return", "execution_role" ]
Constructs a Lambda execution role based on this SAM function's Policies property. :returns: the generated IAM Role :rtype: model.iam.IAMRole
[ "Constructs", "a", "Lambda", "execution", "role", "based", "on", "this", "SAM", "function", "s", "Policies", "property", "." ]
python
train
50.590164
mattja/nsim
nsim/nsim.py
https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/nsim.py#L1594-L1636
def _tosubs(self, ixlist): """Maps a list of integer indices to sub-indices. ixlist can contain repeated indices and does not need to be sorted. Returns pair (ss, ms) where ss is a list of subsim numbers and ms is a list of lists of subindices m (one list for each subsim in ss). """ n = len(ixlist) N = self._n ss = [] ms = [] if n == 0: return ss, ms j = 0 # the position in ixlist currently being processed ix = ixlist[j] if ix >= N or ix < -N: raise IndexError( 'index %d out of bounds for list of %d sims' % (ix, N)) if ix < 0: ix += N while j < n: for s in range(0, self._n): low = self._si[s] high = self._si[s + 1] if ix >= low and ix < high: ss.append(s) msj = [ix - low] j += 1 while j < n: ix = ixlist[j] if ix >= N or ix < -N: raise IndexError( 'index %d out of bounds for list of %d sims' % ( ix, N)) if ix < 0: ix += N if ix < low or ix >= high: break msj.append(ix - low) j += 1 ms.append(msj) if ix < low: break return ss, ms
[ "def", "_tosubs", "(", "self", ",", "ixlist", ")", ":", "n", "=", "len", "(", "ixlist", ")", "N", "=", "self", ".", "_n", "ss", "=", "[", "]", "ms", "=", "[", "]", "if", "n", "==", "0", ":", "return", "ss", ",", "ms", "j", "=", "0", "# the position in ixlist currently being processed", "ix", "=", "ixlist", "[", "j", "]", "if", "ix", ">=", "N", "or", "ix", "<", "-", "N", ":", "raise", "IndexError", "(", "'index %d out of bounds for list of %d sims'", "%", "(", "ix", ",", "N", ")", ")", "if", "ix", "<", "0", ":", "ix", "+=", "N", "while", "j", "<", "n", ":", "for", "s", "in", "range", "(", "0", ",", "self", ".", "_n", ")", ":", "low", "=", "self", ".", "_si", "[", "s", "]", "high", "=", "self", ".", "_si", "[", "s", "+", "1", "]", "if", "ix", ">=", "low", "and", "ix", "<", "high", ":", "ss", ".", "append", "(", "s", ")", "msj", "=", "[", "ix", "-", "low", "]", "j", "+=", "1", "while", "j", "<", "n", ":", "ix", "=", "ixlist", "[", "j", "]", "if", "ix", ">=", "N", "or", "ix", "<", "-", "N", ":", "raise", "IndexError", "(", "'index %d out of bounds for list of %d sims'", "%", "(", "ix", ",", "N", ")", ")", "if", "ix", "<", "0", ":", "ix", "+=", "N", "if", "ix", "<", "low", "or", "ix", ">=", "high", ":", "break", "msj", ".", "append", "(", "ix", "-", "low", ")", "j", "+=", "1", "ms", ".", "append", "(", "msj", ")", "if", "ix", "<", "low", ":", "break", "return", "ss", ",", "ms" ]
Maps a list of integer indices to sub-indices. ixlist can contain repeated indices and does not need to be sorted. Returns pair (ss, ms) where ss is a list of subsim numbers and ms is a list of lists of subindices m (one list for each subsim in ss).
[ "Maps", "a", "list", "of", "integer", "indices", "to", "sub", "-", "indices", ".", "ixlist", "can", "contain", "repeated", "indices", "and", "does", "not", "need", "to", "be", "sorted", ".", "Returns", "pair", "(", "ss", "ms", ")", "where", "ss", "is", "a", "list", "of", "subsim", "numbers", "and", "ms", "is", "a", "list", "of", "lists", "of", "subindices", "m", "(", "one", "list", "for", "each", "subsim", "in", "ss", ")", "." ]
python
train
36.488372
romanorac/discomll
discomll/classification/naivebayes.py
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/classification/naivebayes.py#L17-L57
def map_fit(interface, state, label, inp): """ Function counts occurrences of feature values for every row in given data chunk. For continuous features it returns number of values and it calculates mean and variance for every feature. For discrete features it counts occurrences of labels and values for every feature. It returns occurrences of pairs: label, feature index, feature values. """ import numpy as np combiner = {} # combiner used for joining of intermediate pairs out = interface.output(0) # all outputted pairs have the same output label for row in inp: # for every row in data chunk row = row.strip().split(state["delimiter"]) # split row if len(row) > 1: # check if row is empty for i, j in enumerate(state["X_indices"]): # for defined features if row[j] not in state["missing_vals"]: # check missing values # creates a pair - label, feature index pair = row[state["y_index"]] + state["delimiter"] + str(j) if state["X_meta"][i] == "c": # continuous features if pair in combiner: # convert to float and store value combiner[pair].append(np.float32(row[j])) else: combiner[pair] = [np.float32(row[j])] else: # discrete features # add feature value to pair pair += state["delimiter"] + row[j] # increase counts of current pair combiner[pair] = combiner.get(pair, 0) + 1 # increase label counts combiner[row[state["y_index"]]] = combiner.get(row[state["y_index"]], 0) + 1 for k, v in combiner.iteritems(): # all pairs in combiner are output if len(k.split(state["delimiter"])) == 2: # continous features # number of elements, partial mean and variance out.add(k, (np.size(v), np.mean(v, dtype=np.float32), np.var(v, dtype=np.float32))) else: # discrete features and labels out.add(k, v)
[ "def", "map_fit", "(", "interface", ",", "state", ",", "label", ",", "inp", ")", ":", "import", "numpy", "as", "np", "combiner", "=", "{", "}", "# combiner used for joining of intermediate pairs", "out", "=", "interface", ".", "output", "(", "0", ")", "# all outputted pairs have the same output label", "for", "row", "in", "inp", ":", "# for every row in data chunk", "row", "=", "row", ".", "strip", "(", ")", ".", "split", "(", "state", "[", "\"delimiter\"", "]", ")", "# split row", "if", "len", "(", "row", ")", ">", "1", ":", "# check if row is empty", "for", "i", ",", "j", "in", "enumerate", "(", "state", "[", "\"X_indices\"", "]", ")", ":", "# for defined features", "if", "row", "[", "j", "]", "not", "in", "state", "[", "\"missing_vals\"", "]", ":", "# check missing values", "# creates a pair - label, feature index", "pair", "=", "row", "[", "state", "[", "\"y_index\"", "]", "]", "+", "state", "[", "\"delimiter\"", "]", "+", "str", "(", "j", ")", "if", "state", "[", "\"X_meta\"", "]", "[", "i", "]", "==", "\"c\"", ":", "# continuous features", "if", "pair", "in", "combiner", ":", "# convert to float and store value", "combiner", "[", "pair", "]", ".", "append", "(", "np", ".", "float32", "(", "row", "[", "j", "]", ")", ")", "else", ":", "combiner", "[", "pair", "]", "=", "[", "np", ".", "float32", "(", "row", "[", "j", "]", ")", "]", "else", ":", "# discrete features", "# add feature value to pair", "pair", "+=", "state", "[", "\"delimiter\"", "]", "+", "row", "[", "j", "]", "# increase counts of current pair", "combiner", "[", "pair", "]", "=", "combiner", ".", "get", "(", "pair", ",", "0", ")", "+", "1", "# increase label counts", "combiner", "[", "row", "[", "state", "[", "\"y_index\"", "]", "]", "]", "=", "combiner", ".", "get", "(", "row", "[", "state", "[", "\"y_index\"", "]", "]", ",", "0", ")", "+", "1", "for", "k", ",", "v", "in", "combiner", ".", "iteritems", "(", ")", ":", "# all pairs in combiner are output", "if", "len", "(", "k", ".", "split", "(", "state", "[", "\"delimiter\"", "]", ")", ")", "==", "2", ":", "# continous features", "# number of elements, partial mean and variance", "out", ".", "add", "(", "k", ",", "(", "np", ".", "size", "(", "v", ")", ",", "np", ".", "mean", "(", "v", ",", "dtype", "=", "np", ".", "float32", ")", ",", "np", ".", "var", "(", "v", ",", "dtype", "=", "np", ".", "float32", ")", ")", ")", "else", ":", "# discrete features and labels", "out", ".", "add", "(", "k", ",", "v", ")" ]
Function counts occurrences of feature values for every row in given data chunk. For continuous features it returns number of values and it calculates mean and variance for every feature. For discrete features it counts occurrences of labels and values for every feature. It returns occurrences of pairs: label, feature index, feature values.
[ "Function", "counts", "occurrences", "of", "feature", "values", "for", "every", "row", "in", "given", "data", "chunk", ".", "For", "continuous", "features", "it", "returns", "number", "of", "values", "and", "it", "calculates", "mean", "and", "variance", "for", "every", "feature", ".", "For", "discrete", "features", "it", "counts", "occurrences", "of", "labels", "and", "values", "for", "every", "feature", ".", "It", "returns", "occurrences", "of", "pairs", ":", "label", "feature", "index", "feature", "values", "." ]
python
train
52.780488
brandonxiang/geojson-python-utils
geojson_utils/merger.py
https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/merger.py#L22-L55
def simplify_other(major, minor, dist): """ Simplify the point featurecollection of poi with another point features accoording by distance. Attention: point featurecollection only Keyword arguments: major -- major geojson minor -- minor geojson dist -- distance return a geojson featurecollection with two parts of featurecollection """ result = deepcopy(major) if major['type'] == 'FeatureCollection' and minor['type'] == 'FeatureCollection': arc = dist/6371000*180/math.pi*2 for minorfeature in minor['features']: minorgeom = minorfeature['geometry'] minorlng = minorgeom['coordinates'][0] minorlat = minorgeom['coordinates'][1] is_accept = True for mainfeature in major['features']: maingeom = mainfeature['geometry'] mainlng = maingeom['coordinates'][0] mainlat = maingeom['coordinates'][1] if abs(minorlat-mainlat) <= arc and abs(minorlng-mainlng) <= arc: distance = point_distance(maingeom, minorgeom) if distance < dist: is_accept = False break if is_accept: result["features"].append(minorfeature) return result
[ "def", "simplify_other", "(", "major", ",", "minor", ",", "dist", ")", ":", "result", "=", "deepcopy", "(", "major", ")", "if", "major", "[", "'type'", "]", "==", "'FeatureCollection'", "and", "minor", "[", "'type'", "]", "==", "'FeatureCollection'", ":", "arc", "=", "dist", "/", "6371000", "*", "180", "/", "math", ".", "pi", "*", "2", "for", "minorfeature", "in", "minor", "[", "'features'", "]", ":", "minorgeom", "=", "minorfeature", "[", "'geometry'", "]", "minorlng", "=", "minorgeom", "[", "'coordinates'", "]", "[", "0", "]", "minorlat", "=", "minorgeom", "[", "'coordinates'", "]", "[", "1", "]", "is_accept", "=", "True", "for", "mainfeature", "in", "major", "[", "'features'", "]", ":", "maingeom", "=", "mainfeature", "[", "'geometry'", "]", "mainlng", "=", "maingeom", "[", "'coordinates'", "]", "[", "0", "]", "mainlat", "=", "maingeom", "[", "'coordinates'", "]", "[", "1", "]", "if", "abs", "(", "minorlat", "-", "mainlat", ")", "<=", "arc", "and", "abs", "(", "minorlng", "-", "mainlng", ")", "<=", "arc", ":", "distance", "=", "point_distance", "(", "maingeom", ",", "minorgeom", ")", "if", "distance", "<", "dist", ":", "is_accept", "=", "False", "break", "if", "is_accept", ":", "result", "[", "\"features\"", "]", ".", "append", "(", "minorfeature", ")", "return", "result" ]
Simplify the point featurecollection of poi with another point features accoording by distance. Attention: point featurecollection only Keyword arguments: major -- major geojson minor -- minor geojson dist -- distance return a geojson featurecollection with two parts of featurecollection
[ "Simplify", "the", "point", "featurecollection", "of", "poi", "with", "another", "point", "features", "accoording", "by", "distance", ".", "Attention", ":", "point", "featurecollection", "only" ]
python
train
38.676471
airspeed-velocity/asv
asv/feed.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/feed.py#L201-L217
def _get_id(owner, date, content): """ Generate an unique Atom id for the given content """ h = hashlib.sha256() # Hash still contains the original project url, keep as is h.update("github.com/spacetelescope/asv".encode('utf-8')) for x in content: if x is None: h.update(",".encode('utf-8')) else: h.update(x.encode('utf-8')) h.update(",".encode('utf-8')) if date is None: date = datetime.datetime(1970, 1, 1) return "tag:{0},{1}:/{2}".format(owner, date.strftime('%Y-%m-%d'), h.hexdigest())
[ "def", "_get_id", "(", "owner", ",", "date", ",", "content", ")", ":", "h", "=", "hashlib", ".", "sha256", "(", ")", "# Hash still contains the original project url, keep as is", "h", ".", "update", "(", "\"github.com/spacetelescope/asv\"", ".", "encode", "(", "'utf-8'", ")", ")", "for", "x", "in", "content", ":", "if", "x", "is", "None", ":", "h", ".", "update", "(", "\",\"", ".", "encode", "(", "'utf-8'", ")", ")", "else", ":", "h", ".", "update", "(", "x", ".", "encode", "(", "'utf-8'", ")", ")", "h", ".", "update", "(", "\",\"", ".", "encode", "(", "'utf-8'", ")", ")", "if", "date", "is", "None", ":", "date", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", "return", "\"tag:{0},{1}:/{2}\"", ".", "format", "(", "owner", ",", "date", ".", "strftime", "(", "'%Y-%m-%d'", ")", ",", "h", ".", "hexdigest", "(", ")", ")" ]
Generate an unique Atom id for the given content
[ "Generate", "an", "unique", "Atom", "id", "for", "the", "given", "content" ]
python
train
33.411765
ChrisCummins/labm8
fs.py
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/fs.py#L281-L301
def lsfiles(root=".", **kwargs): """ Return only files from a directory listing. Arguments: root (str): Path to directory. Can be relative or absolute. **kwargs: Any additional arguments to be passed to ls(). Returns: list of str: A list of file paths. Raises: OSError: If root directory does not exist. """ paths = ls(root=root, **kwargs) if isfile(root): return paths return [_path for _path in paths if isfile(path(root, _path))]
[ "def", "lsfiles", "(", "root", "=", "\".\"", ",", "*", "*", "kwargs", ")", ":", "paths", "=", "ls", "(", "root", "=", "root", ",", "*", "*", "kwargs", ")", "if", "isfile", "(", "root", ")", ":", "return", "paths", "return", "[", "_path", "for", "_path", "in", "paths", "if", "isfile", "(", "path", "(", "root", ",", "_path", ")", ")", "]" ]
Return only files from a directory listing. Arguments: root (str): Path to directory. Can be relative or absolute. **kwargs: Any additional arguments to be passed to ls(). Returns: list of str: A list of file paths. Raises: OSError: If root directory does not exist.
[ "Return", "only", "files", "from", "a", "directory", "listing", "." ]
python
train
23.52381
alevinval/scheduling
scheduling/scheduler.py
https://github.com/alevinval/scheduling/blob/127239712c0b73b929ca19b4b5c2855eebb7fcf0/scheduling/scheduler.py#L41-L46
def run(self): """ Run the schedule """ self.main_task.thread.start() self.main_task.thread.join()
[ "def", "run", "(", "self", ")", ":", "self", ".", "main_task", ".", "thread", ".", "start", "(", ")", "self", ".", "main_task", ".", "thread", ".", "join", "(", ")" ]
Run the schedule
[ "Run", "the", "schedule" ]
python
train
22.166667