repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
saltstack/salt
salt/fileclient.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileclient.py#L1304-L1326
def __hash_and_stat_file(self, path, saltenv='base'): ''' Common code for hashing and stating files ''' try: path = self._check_proto(path) except MinionError as err: if not os.path.isfile(path): log.warning( 'specified file %s is not present to generate hash: %s', path, err ) return {}, None else: ret = {} hash_type = self.opts.get('hash_type', 'md5') ret['hsum'] = salt.utils.hashutils.get_hash(path, form=hash_type) ret['hash_type'] = hash_type return ret load = {'path': path, 'saltenv': saltenv, 'cmd': '_file_hash'} return self.channel.send(load)
[ "def", "__hash_and_stat_file", "(", "self", ",", "path", ",", "saltenv", "=", "'base'", ")", ":", "try", ":", "path", "=", "self", ".", "_check_proto", "(", "path", ")", "except", "MinionError", "as", "err", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "log", ".", "warning", "(", "'specified file %s is not present to generate hash: %s'", ",", "path", ",", "err", ")", "return", "{", "}", ",", "None", "else", ":", "ret", "=", "{", "}", "hash_type", "=", "self", ".", "opts", ".", "get", "(", "'hash_type'", ",", "'md5'", ")", "ret", "[", "'hsum'", "]", "=", "salt", ".", "utils", ".", "hashutils", ".", "get_hash", "(", "path", ",", "form", "=", "hash_type", ")", "ret", "[", "'hash_type'", "]", "=", "hash_type", "return", "ret", "load", "=", "{", "'path'", ":", "path", ",", "'saltenv'", ":", "saltenv", ",", "'cmd'", ":", "'_file_hash'", "}", "return", "self", ".", "channel", ".", "send", "(", "load", ")" ]
Common code for hashing and stating files
[ "Common", "code", "for", "hashing", "and", "stating", "files" ]
python
train
35.826087
bionikspoon/pureyaml
pureyaml/_compat/total_ordering.py
https://github.com/bionikspoon/pureyaml/blob/784830b907ca14525c4cecdb6ae35306f6f8a877/pureyaml/_compat/total_ordering.py#L100-L105
def _lt_from_ge(self, other): """Return a < b. Computed by @total_ordering from (not a >= b).""" op_result = self.__ge__(other) if op_result is NotImplemented: return NotImplemented return not op_result
[ "def", "_lt_from_ge", "(", "self", ",", "other", ")", ":", "op_result", "=", "self", ".", "__ge__", "(", "other", ")", "if", "op_result", "is", "NotImplemented", ":", "return", "NotImplemented", "return", "not", "op_result" ]
Return a < b. Computed by @total_ordering from (not a >= b).
[ "Return", "a", "<", "b", ".", "Computed", "by" ]
python
train
37
quantumlib/Cirq
examples/phase_estimator.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/examples/phase_estimator.py#L63-L82
def _decompose_(self, qubits): """A quantum circuit (QFT_inv) with the following structure. ---H--@-------@--------@---------------------------------------------- | | | ------@^-0.5--+--------+---------H--@-------@------------------------- | | | | --------------@^-0.25--+------------@^-0.5--+---------H--@------------ | | | -----------------------@^-0.125-------------@^-0.25------@^-0.5---H--- The number of qubits can be arbitrary. """ qubits = list(qubits) while len(qubits) > 0: q_head = qubits.pop(0) yield cirq.H(q_head) for i, qubit in enumerate(qubits): yield (cirq.CZ**(-1/2.0**(i+1)))(qubit, q_head)
[ "def", "_decompose_", "(", "self", ",", "qubits", ")", ":", "qubits", "=", "list", "(", "qubits", ")", "while", "len", "(", "qubits", ")", ">", "0", ":", "q_head", "=", "qubits", ".", "pop", "(", "0", ")", "yield", "cirq", ".", "H", "(", "q_head", ")", "for", "i", ",", "qubit", "in", "enumerate", "(", "qubits", ")", ":", "yield", "(", "cirq", ".", "CZ", "**", "(", "-", "1", "/", "2.0", "**", "(", "i", "+", "1", ")", ")", ")", "(", "qubit", ",", "q_head", ")" ]
A quantum circuit (QFT_inv) with the following structure. ---H--@-------@--------@---------------------------------------------- | | | ------@^-0.5--+--------+---------H--@-------@------------------------- | | | | --------------@^-0.25--+------------@^-0.5--+---------H--@------------ | | | -----------------------@^-0.125-------------@^-0.25------@^-0.5---H--- The number of qubits can be arbitrary.
[ "A", "quantum", "circuit", "(", "QFT_inv", ")", "with", "the", "following", "structure", "." ]
python
train
42.6
FujiMakoto/AgentML
agentml/parser/init/__init__.py
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/parser/init/__init__.py#L16-L25
def _parse_substitutions(self, element): """ Parse word substitutions :param element: The XML Element object :type element: etree._Element """ subs = element.findall('sub') for sub in subs: self.agentml.set_substitution(attribute(sub, 'word'), sub.text)
[ "def", "_parse_substitutions", "(", "self", ",", "element", ")", ":", "subs", "=", "element", ".", "findall", "(", "'sub'", ")", "for", "sub", "in", "subs", ":", "self", ".", "agentml", ".", "set_substitution", "(", "attribute", "(", "sub", ",", "'word'", ")", ",", "sub", ".", "text", ")" ]
Parse word substitutions :param element: The XML Element object :type element: etree._Element
[ "Parse", "word", "substitutions", ":", "param", "element", ":", "The", "XML", "Element", "object", ":", "type", "element", ":", "etree", ".", "_Element" ]
python
train
31.4
astropy/astropy-helpers
astropy_helpers/commands/build_ext.py
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/commands/build_ext.py#L15-L37
def should_build_with_cython(previous_cython_version, is_release): """ Returns the previously used Cython version (or 'unknown' if not previously built) if Cython should be used to build extension modules from pyx files. """ # Only build with Cython if, of course, Cython is installed, we're in a # development version (i.e. not release) or the Cython-generated source # files haven't been created yet (cython_version == 'unknown'). The latter # case can happen even when release is True if checking out a release tag # from the repository have_cython = False try: from Cython import __version__ as cython_version # noqa have_cython = True except ImportError: pass if have_cython and (not is_release or previous_cython_version == 'unknown'): return cython_version else: return False
[ "def", "should_build_with_cython", "(", "previous_cython_version", ",", "is_release", ")", ":", "# Only build with Cython if, of course, Cython is installed, we're in a", "# development version (i.e. not release) or the Cython-generated source", "# files haven't been created yet (cython_version == 'unknown'). The latter", "# case can happen even when release is True if checking out a release tag", "# from the repository", "have_cython", "=", "False", "try", ":", "from", "Cython", "import", "__version__", "as", "cython_version", "# noqa", "have_cython", "=", "True", "except", "ImportError", ":", "pass", "if", "have_cython", "and", "(", "not", "is_release", "or", "previous_cython_version", "==", "'unknown'", ")", ":", "return", "cython_version", "else", ":", "return", "False" ]
Returns the previously used Cython version (or 'unknown' if not previously built) if Cython should be used to build extension modules from pyx files.
[ "Returns", "the", "previously", "used", "Cython", "version", "(", "or", "unknown", "if", "not", "previously", "built", ")", "if", "Cython", "should", "be", "used", "to", "build", "extension", "modules", "from", "pyx", "files", "." ]
python
train
37.521739
ga4gh/ga4gh-server
ga4gh/server/backend.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/backend.py#L289-L302
def readsGenerator(self, request): """ Returns a generator over the (read, nextPageToken) pairs defined by the specified request """ if not request.reference_id: raise exceptions.UnmappedReadsNotSupported() if len(request.read_group_ids) < 1: raise exceptions.BadRequestException( "At least one readGroupId must be specified") elif len(request.read_group_ids) == 1: return self._readsGeneratorSingle(request) else: return self._readsGeneratorMultiple(request)
[ "def", "readsGenerator", "(", "self", ",", "request", ")", ":", "if", "not", "request", ".", "reference_id", ":", "raise", "exceptions", ".", "UnmappedReadsNotSupported", "(", ")", "if", "len", "(", "request", ".", "read_group_ids", ")", "<", "1", ":", "raise", "exceptions", ".", "BadRequestException", "(", "\"At least one readGroupId must be specified\"", ")", "elif", "len", "(", "request", ".", "read_group_ids", ")", "==", "1", ":", "return", "self", ".", "_readsGeneratorSingle", "(", "request", ")", "else", ":", "return", "self", ".", "_readsGeneratorMultiple", "(", "request", ")" ]
Returns a generator over the (read, nextPageToken) pairs defined by the specified request
[ "Returns", "a", "generator", "over", "the", "(", "read", "nextPageToken", ")", "pairs", "defined", "by", "the", "specified", "request" ]
python
train
41
itsnauman/termrule
tr/termrule.py
https://github.com/itsnauman/termrule/blob/62b8cc7e9a7fc4476ccdaf84fe2685eb529dc48c/tr/termrule.py#L108-L125
def tr(self, args, color=None): """ Method to print ASCII patterns to terminal """ width = self._term_size()[1] if not args: if color is not None: print(self._echo("#" * width, color)) else: print(self._echo("#" * width, "green")) else: for each_symbol in args: chars = len(each_symbol) number_chars = width // chars if color is not None: print(self._echo(each_symbol * number_chars, color)) else: print(each_symbol * number_chars)
[ "def", "tr", "(", "self", ",", "args", ",", "color", "=", "None", ")", ":", "width", "=", "self", ".", "_term_size", "(", ")", "[", "1", "]", "if", "not", "args", ":", "if", "color", "is", "not", "None", ":", "print", "(", "self", ".", "_echo", "(", "\"#\"", "*", "width", ",", "color", ")", ")", "else", ":", "print", "(", "self", ".", "_echo", "(", "\"#\"", "*", "width", ",", "\"green\"", ")", ")", "else", ":", "for", "each_symbol", "in", "args", ":", "chars", "=", "len", "(", "each_symbol", ")", "number_chars", "=", "width", "//", "chars", "if", "color", "is", "not", "None", ":", "print", "(", "self", ".", "_echo", "(", "each_symbol", "*", "number_chars", ",", "color", ")", ")", "else", ":", "print", "(", "each_symbol", "*", "number_chars", ")" ]
Method to print ASCII patterns to terminal
[ "Method", "to", "print", "ASCII", "patterns", "to", "terminal" ]
python
train
35.222222
python-xlib/python-xlib
Xlib/ext/nvcontrol.py
https://github.com/python-xlib/python-xlib/blob/8901e831737e79fe5645f48089d70e1d1046d2f2/Xlib/ext/nvcontrol.py#L63-L73
def query_string_attribute(self, target, display_mask, attr): """Return the value of a string attribute""" reply = NVCtrlQueryStringAttributeReplyRequest(display=self.display, opcode=self.display.get_extension_major(extname), target_id=target.id(), target_type=target.type(), display_mask=display_mask, attr=attr) if not reply._data.get('flags'): return None return str(reply._data.get('string')).strip('\0')
[ "def", "query_string_attribute", "(", "self", ",", "target", ",", "display_mask", ",", "attr", ")", ":", "reply", "=", "NVCtrlQueryStringAttributeReplyRequest", "(", "display", "=", "self", ".", "display", ",", "opcode", "=", "self", ".", "display", ".", "get_extension_major", "(", "extname", ")", ",", "target_id", "=", "target", ".", "id", "(", ")", ",", "target_type", "=", "target", ".", "type", "(", ")", ",", "display_mask", "=", "display_mask", ",", "attr", "=", "attr", ")", "if", "not", "reply", ".", "_data", ".", "get", "(", "'flags'", ")", ":", "return", "None", "return", "str", "(", "reply", ".", "_data", ".", "get", "(", "'string'", ")", ")", ".", "strip", "(", "'\\0'", ")" ]
Return the value of a string attribute
[ "Return", "the", "value", "of", "a", "string", "attribute" ]
python
train
61.545455
wright-group/WrightTools
WrightTools/collection/_collection.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/collection/_collection.py#L75-L103
def create_collection(self, name="collection", position=None, **kwargs): """Create a new child colleciton. Parameters ---------- name : string Unique identifier. position : integer (optional) Location to insert. Default is None (append). kwargs Additional arguments to child collection instantiation. Returns ------- WrightTools Collection New child. """ if name in self.item_names: wt_exceptions.ObjectExistsWarning.warn(name) return self[name] collection = Collection( filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs ) if position is not None: self.attrs["item_names"] = np.insert( self.attrs["item_names"][:-1], position, collection.natural_name.encode() ) setattr(self, name, collection) return collection
[ "def", "create_collection", "(", "self", ",", "name", "=", "\"collection\"", ",", "position", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "name", "in", "self", ".", "item_names", ":", "wt_exceptions", ".", "ObjectExistsWarning", ".", "warn", "(", "name", ")", "return", "self", "[", "name", "]", "collection", "=", "Collection", "(", "filepath", "=", "self", ".", "filepath", ",", "parent", "=", "self", ".", "name", ",", "name", "=", "name", ",", "edit_local", "=", "True", ",", "*", "*", "kwargs", ")", "if", "position", "is", "not", "None", ":", "self", ".", "attrs", "[", "\"item_names\"", "]", "=", "np", ".", "insert", "(", "self", ".", "attrs", "[", "\"item_names\"", "]", "[", ":", "-", "1", "]", ",", "position", ",", "collection", ".", "natural_name", ".", "encode", "(", ")", ")", "setattr", "(", "self", ",", "name", ",", "collection", ")", "return", "collection" ]
Create a new child colleciton. Parameters ---------- name : string Unique identifier. position : integer (optional) Location to insert. Default is None (append). kwargs Additional arguments to child collection instantiation. Returns ------- WrightTools Collection New child.
[ "Create", "a", "new", "child", "colleciton", "." ]
python
train
33.310345
chrippa/python-librtmp
librtmp/rtmp.py
https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L172-L191
def connect(self, packet=None): """Connect to the server. :param packet: RTMPPacket, this packet will be sent instead of the regular "connect" packet. Raises :exc:`RTMPError` if the connect attempt fails. """ if isinstance(packet, RTMPPacket): packet = packet.packet else: packet = ffi.NULL res = librtmp.RTMP_Connect(self.rtmp, packet) if res < 1: raise RTMPError("Failed to connect") return RTMPCall(self, 1.0)
[ "def", "connect", "(", "self", ",", "packet", "=", "None", ")", ":", "if", "isinstance", "(", "packet", ",", "RTMPPacket", ")", ":", "packet", "=", "packet", ".", "packet", "else", ":", "packet", "=", "ffi", ".", "NULL", "res", "=", "librtmp", ".", "RTMP_Connect", "(", "self", ".", "rtmp", ",", "packet", ")", "if", "res", "<", "1", ":", "raise", "RTMPError", "(", "\"Failed to connect\"", ")", "return", "RTMPCall", "(", "self", ",", "1.0", ")" ]
Connect to the server. :param packet: RTMPPacket, this packet will be sent instead of the regular "connect" packet. Raises :exc:`RTMPError` if the connect attempt fails.
[ "Connect", "to", "the", "server", "." ]
python
train
26.5
earwig/mwparserfromhell
mwparserfromhell/parser/tokenizer.py
https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/parser/tokenizer.py#L705-L717
def _handle_tag_text(self, text): """Handle regular *text* inside of an HTML open tag.""" next = self._read(1) if not self._can_recurse() or text not in self.MARKERS: self._emit_text(text) elif text == next == "{": self._parse_template_or_argument() elif text == next == "[": self._parse_wikilink() elif text == "<": self._parse_tag() else: self._emit_text(text)
[ "def", "_handle_tag_text", "(", "self", ",", "text", ")", ":", "next", "=", "self", ".", "_read", "(", "1", ")", "if", "not", "self", ".", "_can_recurse", "(", ")", "or", "text", "not", "in", "self", ".", "MARKERS", ":", "self", ".", "_emit_text", "(", "text", ")", "elif", "text", "==", "next", "==", "\"{\"", ":", "self", ".", "_parse_template_or_argument", "(", ")", "elif", "text", "==", "next", "==", "\"[\"", ":", "self", ".", "_parse_wikilink", "(", ")", "elif", "text", "==", "\"<\"", ":", "self", ".", "_parse_tag", "(", ")", "else", ":", "self", ".", "_emit_text", "(", "text", ")" ]
Handle regular *text* inside of an HTML open tag.
[ "Handle", "regular", "*", "text", "*", "inside", "of", "an", "HTML", "open", "tag", "." ]
python
train
35.846154
couchbase/couchbase-python-client
couchbase/subdocument.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/subdocument.py#L34-L46
def _gen_3spec(op, path, xattr=False): """ Returns a Spec tuple suitable for passing to the underlying C extension. This variant is called for operations that lack an input value. :param str path: The path to fetch :param bool xattr: Whether this is an extended attribute :return: a spec suitable for passing to the underlying C extension """ flags = 0 if xattr: flags |= _P.SDSPEC_F_XATTR return Spec(op, path, flags)
[ "def", "_gen_3spec", "(", "op", ",", "path", ",", "xattr", "=", "False", ")", ":", "flags", "=", "0", "if", "xattr", ":", "flags", "|=", "_P", ".", "SDSPEC_F_XATTR", "return", "Spec", "(", "op", ",", "path", ",", "flags", ")" ]
Returns a Spec tuple suitable for passing to the underlying C extension. This variant is called for operations that lack an input value. :param str path: The path to fetch :param bool xattr: Whether this is an extended attribute :return: a spec suitable for passing to the underlying C extension
[ "Returns", "a", "Spec", "tuple", "suitable", "for", "passing", "to", "the", "underlying", "C", "extension", ".", "This", "variant", "is", "called", "for", "operations", "that", "lack", "an", "input", "value", "." ]
python
train
35
janpipek/physt
physt/compat/geant4.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/compat/geant4.py#L43-L49
def _get(pseudodict, key, single=True): """Helper method for getting values from "multi-dict"s""" matches = [item[1] for item in pseudodict if item[0] == key] if single: return matches[0] else: return matches
[ "def", "_get", "(", "pseudodict", ",", "key", ",", "single", "=", "True", ")", ":", "matches", "=", "[", "item", "[", "1", "]", "for", "item", "in", "pseudodict", "if", "item", "[", "0", "]", "==", "key", "]", "if", "single", ":", "return", "matches", "[", "0", "]", "else", ":", "return", "matches" ]
Helper method for getting values from "multi-dict"s
[ "Helper", "method", "for", "getting", "values", "from", "multi", "-", "dict", "s" ]
python
train
33.428571
spacetelescope/stsci.tools
lib/stsci/tools/for2to3.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/for2to3.py#L53-L71
def tobytes(s, encoding='ascii'): """ Convert string s to the 'bytes' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, this is technically the same as the str type in terms of the character data in memory. """ # NOTE: after we abandon 2.5, we might simply instead use "bytes(s)" # NOTE: after we abandon all 2.*, del this and prepend byte strings with 'b' if PY3K: if isinstance(s, bytes): return s else: return s.encode(encoding) else: # for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes # but handle if unicode is passed if isinstance(s, unicode): return s.encode(encoding) else: return s
[ "def", "tobytes", "(", "s", ",", "encoding", "=", "'ascii'", ")", ":", "# NOTE: after we abandon 2.5, we might simply instead use \"bytes(s)\"", "# NOTE: after we abandon all 2.*, del this and prepend byte strings with 'b'", "if", "PY3K", ":", "if", "isinstance", "(", "s", ",", "bytes", ")", ":", "return", "s", "else", ":", "return", "s", ".", "encode", "(", "encoding", ")", "else", ":", "# for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes", "# but handle if unicode is passed", "if", "isinstance", "(", "s", ",", "unicode", ")", ":", "return", "s", ".", "encode", "(", "encoding", ")", "else", ":", "return", "s" ]
Convert string s to the 'bytes' type, in all Pythons, even back before Python 2.6. What 'str' means varies by PY3K or not. In Pythons before 3.0, this is technically the same as the str type in terms of the character data in memory.
[ "Convert", "string", "s", "to", "the", "bytes", "type", "in", "all", "Pythons", "even", "back", "before", "Python", "2", ".", "6", ".", "What", "str", "means", "varies", "by", "PY3K", "or", "not", ".", "In", "Pythons", "before", "3", ".", "0", "this", "is", "technically", "the", "same", "as", "the", "str", "type", "in", "terms", "of", "the", "character", "data", "in", "memory", "." ]
python
train
41.210526
drdoctr/doctr
doctr/travis.py
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L49-L95
def setup_deploy_key(keypath='github_deploy_key', key_ext='.enc', env_name='DOCTR_DEPLOY_ENCRYPTION_KEY'): """ Decrypts the deploy key and configures it with ssh The key is assumed to be encrypted as keypath + key_ext, and the encryption key is assumed to be set in the environment variable ``env_name``. If ``env_name`` is not set, it falls back to ``DOCTR_DEPLOY_ENCRYPTION_KEY`` for backwards compatibility. If keypath + key_ext does not exist, it falls back to ``github_deploy_key.enc`` for backwards compatibility. """ key = os.environ.get(env_name, os.environ.get("DOCTR_DEPLOY_ENCRYPTION_KEY", None)) if not key: raise RuntimeError("{env_name} or DOCTR_DEPLOY_ENCRYPTION_KEY environment variable is not set. Make sure you followed the instructions from 'doctr configure' properly. You may need to re-run 'doctr configure' to fix this error." .format(env_name=env_name)) # Legacy keyfile name if (not os.path.isfile(keypath + key_ext) and os.path.isfile('github_deploy_key' + key_ext)): keypath = 'github_deploy_key' key_filename = os.path.basename(keypath) key = key.encode('utf-8') decrypt_file(keypath + key_ext, key) key_path = os.path.expanduser("~/.ssh/" + key_filename) os.makedirs(os.path.expanduser("~/.ssh"), exist_ok=True) os.rename(keypath, key_path) with open(os.path.expanduser("~/.ssh/config"), 'a') as f: f.write("Host github.com" ' IdentityFile "%s"' " LogLevel ERROR\n" % key_path) # start ssh-agent and add key to it # info from SSH agent has to be put into the environment agent_info = subprocess.check_output(['ssh-agent', '-s']) agent_info = agent_info.decode('utf-8') agent_info = agent_info.split() AUTH_SOCK = agent_info[0].split('=')[1][:-1] AGENT_PID = agent_info[3].split('=')[1][:-1] os.putenv('SSH_AUTH_SOCK', AUTH_SOCK) os.putenv('SSH_AGENT_PID', AGENT_PID) run(['ssh-add', os.path.expanduser('~/.ssh/' + key_filename)])
[ "def", "setup_deploy_key", "(", "keypath", "=", "'github_deploy_key'", ",", "key_ext", "=", "'.enc'", ",", "env_name", "=", "'DOCTR_DEPLOY_ENCRYPTION_KEY'", ")", ":", "key", "=", "os", ".", "environ", ".", "get", "(", "env_name", ",", "os", ".", "environ", ".", "get", "(", "\"DOCTR_DEPLOY_ENCRYPTION_KEY\"", ",", "None", ")", ")", "if", "not", "key", ":", "raise", "RuntimeError", "(", "\"{env_name} or DOCTR_DEPLOY_ENCRYPTION_KEY environment variable is not set. Make sure you followed the instructions from 'doctr configure' properly. You may need to re-run 'doctr configure' to fix this error.\"", ".", "format", "(", "env_name", "=", "env_name", ")", ")", "# Legacy keyfile name", "if", "(", "not", "os", ".", "path", ".", "isfile", "(", "keypath", "+", "key_ext", ")", "and", "os", ".", "path", ".", "isfile", "(", "'github_deploy_key'", "+", "key_ext", ")", ")", ":", "keypath", "=", "'github_deploy_key'", "key_filename", "=", "os", ".", "path", ".", "basename", "(", "keypath", ")", "key", "=", "key", ".", "encode", "(", "'utf-8'", ")", "decrypt_file", "(", "keypath", "+", "key_ext", ",", "key", ")", "key_path", "=", "os", ".", "path", ".", "expanduser", "(", "\"~/.ssh/\"", "+", "key_filename", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "expanduser", "(", "\"~/.ssh\"", ")", ",", "exist_ok", "=", "True", ")", "os", ".", "rename", "(", "keypath", ",", "key_path", ")", "with", "open", "(", "os", ".", "path", ".", "expanduser", "(", "\"~/.ssh/config\"", ")", ",", "'a'", ")", "as", "f", ":", "f", ".", "write", "(", "\"Host github.com\"", "' IdentityFile \"%s\"'", "\" LogLevel ERROR\\n\"", "%", "key_path", ")", "# start ssh-agent and add key to it", "# info from SSH agent has to be put into the environment", "agent_info", "=", "subprocess", ".", "check_output", "(", "[", "'ssh-agent'", ",", "'-s'", "]", ")", "agent_info", "=", "agent_info", ".", "decode", "(", "'utf-8'", ")", "agent_info", "=", "agent_info", ".", "split", "(", ")", "AUTH_SOCK", "=", "agent_info", "[", "0", "]", ".", "split", "(", "'='", ")", "[", "1", "]", "[", ":", "-", "1", "]", "AGENT_PID", "=", "agent_info", "[", "3", "]", ".", "split", "(", "'='", ")", "[", "1", "]", "[", ":", "-", "1", "]", "os", ".", "putenv", "(", "'SSH_AUTH_SOCK'", ",", "AUTH_SOCK", ")", "os", ".", "putenv", "(", "'SSH_AGENT_PID'", ",", "AGENT_PID", ")", "run", "(", "[", "'ssh-add'", ",", "os", ".", "path", ".", "expanduser", "(", "'~/.ssh/'", "+", "key_filename", ")", "]", ")" ]
Decrypts the deploy key and configures it with ssh The key is assumed to be encrypted as keypath + key_ext, and the encryption key is assumed to be set in the environment variable ``env_name``. If ``env_name`` is not set, it falls back to ``DOCTR_DEPLOY_ENCRYPTION_KEY`` for backwards compatibility. If keypath + key_ext does not exist, it falls back to ``github_deploy_key.enc`` for backwards compatibility.
[ "Decrypts", "the", "deploy", "key", "and", "configures", "it", "with", "ssh" ]
python
train
42.93617
flo-compbio/genometools
genometools/ontology/util.py
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ontology/util.py#L53-L59
def download_release(download_file, release=None): """Downloads the "go-basic.obo" file for the specified release.""" if release is None: release = get_latest_release() url = 'http://viewvc.geneontology.org/viewvc/GO-SVN/ontology-releases/%s/go-basic.obo' % release #download_file = 'go-basic_%s.obo' % release misc.http_download(url, download_file)
[ "def", "download_release", "(", "download_file", ",", "release", "=", "None", ")", ":", "if", "release", "is", "None", ":", "release", "=", "get_latest_release", "(", ")", "url", "=", "'http://viewvc.geneontology.org/viewvc/GO-SVN/ontology-releases/%s/go-basic.obo'", "%", "release", "#download_file = 'go-basic_%s.obo' % release", "misc", ".", "http_download", "(", "url", ",", "download_file", ")" ]
Downloads the "go-basic.obo" file for the specified release.
[ "Downloads", "the", "go", "-", "basic", ".", "obo", "file", "for", "the", "specified", "release", "." ]
python
train
53
arviz-devs/arviz
arviz/plots/ppcplot.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/plots/ppcplot.py#L20-L480
def plot_ppc( data, kind="density", alpha=None, mean=True, figsize=None, textsize=None, data_pairs=None, var_names=None, coords=None, flatten=None, flatten_pp=None, num_pp_samples=None, random_seed=None, jitter=None, animated=False, animation_kwargs=None, legend=True, ): """ Plot for posterior predictive checks. Parameters ---------- data : az.InferenceData object InferenceData object containing the observed and posterior predictive data. kind : str Type of plot to display (density, cumulative, or scatter). Defaults to density. alpha : float Opacity of posterior predictive density curves. Defaults to 0.2 for kind = density and cumulative, for scatter defaults to 0.7 mean : bool Whether or not to plot the mean posterior predictive distribution. Defaults to True figsize : tuple Figure size. If None it will be defined automatically. textsize: float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on figsize. data_pairs : dict Dictionary containing relations between observed data and posterior predictive data. Dictionary structure: Key = data var_name Value = posterior predictive var_name For example, `data_pairs = {'y' : 'y_hat'}` If None, it will assume that the observed data and the posterior predictive data have the same variable name. var_names : list List of variables to be plotted. Defaults to all observed variables in the model if None. coords : dict Dictionary mapping dimensions to selected coordinates to be plotted. Dimensions without a mapping specified will include all coordinates for that dimension. Defaults to including all coordinates for all dimensions if None. flatten : list List of dimensions to flatten in observed_data. Only flattens across the coordinates specified in the coords argument. Defaults to flattening all of the dimensions. flatten_pp : list List of dimensions to flatten in posterior_predictive. Only flattens across the coordinates specified in the coords argument. Defaults to flattening all of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`. num_pp_samples : int The number of posterior predictive samples to plot. For `kind` = 'scatter' and `animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7 unless defined otherwise. Otherwise it defaults to all provided samples. random_seed : int Random number generator seed passed to numpy.random.seed to allow reproducibility of the plot. By default, no seed will be provided and the plot will change each call if a random sample is specified by `num_pp_samples`. jitter : float If kind is "scatter", jitter will add random uniform noise to the height of the ppc samples and observed data. By default 0. animated : bool Create an animation of one posterior predictive sample per frame. Defaults to False. animation_kwargs : dict Keywords passed to `animation.FuncAnimation`. legend : bool Add legend to figure. By default True. Returns ------- axes : matplotlib axes Examples -------- Plot the observed data KDE overlaid on posterior predictive KDEs. .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('radon') >>> az.plot_ppc(data) Plot the overlay with empirical CDFs. .. plot:: :context: close-figs >>> az.plot_ppc(data, kind='cumulative') Use the coords and flatten parameters to plot selected variable dimensions across multiple plots. .. plot:: :context: close-figs >>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[]) Plot the overlay using a stacked scatter plot that is particularly useful when the sample sizes are small. .. plot:: :context: close-figs >>> az.plot_ppc(data, kind='scatter', flatten=[], >>> coords={'observed_county': ['AITKIN', 'BELTRAMI']}) Plot random posterior predictive sub-samples. .. plot:: :context: close-figs >>> az.plot_ppc(data, num_pp_samples=30, random_seed=7) """ for group in ("posterior_predictive", "observed_data"): if not hasattr(data, group): raise TypeError( '`data` argument must have the group "{group}" for ppcplot'.format(group=group) ) if kind.lower() not in ("density", "cumulative", "scatter"): raise TypeError("`kind` argument must be either `density`, `cumulative`, or `scatter`") if data_pairs is None: data_pairs = {} if animation_kwargs is None: animation_kwargs = {} if platform.system() == "Linux": animation_kwargs.setdefault("blit", True) else: animation_kwargs.setdefault("blit", False) if animated and animation_kwargs["blit"] and platform.system() != "Linux": _log.warning( "If you experience problems rendering the animation try setting" "`animation_kwargs({'blit':False}) or changing the plotting backend (e.g. to TkAgg)" ) if alpha is None: if animated: alpha = 1 else: if kind.lower() == "scatter": alpha = 0.7 else: alpha = 0.2 if jitter is None: jitter = 0.0 assert jitter >= 0.0 observed = data.observed_data posterior_predictive = data.posterior_predictive if var_names is None: var_names = observed.data_vars var_names = _var_names(var_names, observed) pp_var_names = [data_pairs.get(var, var) for var in var_names] if flatten_pp is None and flatten is None: flatten_pp = list(posterior_predictive.dims.keys()) elif flatten_pp is None: flatten_pp = flatten if flatten is None: flatten = list(observed.dims.keys()) if coords is None: coords = {} if random_seed is not None: np.random.seed(random_seed) total_pp_samples = posterior_predictive.sizes["chain"] * posterior_predictive.sizes["draw"] if num_pp_samples is None: if kind == "scatter" and not animated: num_pp_samples = min(5, total_pp_samples) else: num_pp_samples = total_pp_samples if ( not isinstance(num_pp_samples, Integral) or num_pp_samples < 1 or num_pp_samples > total_pp_samples ): raise TypeError( "`num_pp_samples` must be an integer between 1 and " + "{limit}.".format(limit=total_pp_samples) ) pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False) for key in coords.keys(): coords[key] = np.where(np.in1d(observed[key], coords[key]))[0] obs_plotters = list( xarray_var_iter( observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True ) ) pp_plotters = list( xarray_var_iter( posterior_predictive.isel(coords), var_names=pp_var_names, skip_dims=set(flatten_pp), combined=True, ) ) length_plotters = len(obs_plotters) rows, cols = default_grid(length_plotters) (figsize, ax_labelsize, _, xt_labelsize, linewidth, markersize) = _scale_fig_size( figsize, textsize, rows, cols ) fig, axes = _create_axes_grid(length_plotters, rows, cols, figsize=figsize) for i, ax in enumerate(axes): var_name, selection, obs_vals = obs_plotters[i] pp_var_name, _, pp_vals = pp_plotters[i] dtype = posterior_predictive[pp_var_name].dtype.kind # flatten non-specified dimensions obs_vals = obs_vals.flatten() pp_vals = pp_vals.reshape(total_pp_samples, -1) pp_sampled_vals = pp_vals[pp_sample_ix] if kind == "density": plot_kwargs = {"color": "C5", "alpha": alpha, "linewidth": 0.5 * linewidth} if dtype == "i": plot_kwargs["drawstyle"] = "steps-pre" ax.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name)) if dtype == "f": plot_kde( obs_vals, label="Observed {}".format(var_name), plot_kwargs={"color": "k", "linewidth": linewidth, "zorder": 3}, fill_kwargs={"alpha": 0}, ax=ax, legend=legend, ) else: nbins = round(len(obs_vals) ** 0.5) hist, bin_edges = np.histogram(obs_vals, bins=nbins, density=True) hist = np.concatenate((hist[:1], hist)) ax.plot( bin_edges, hist, label="Observed {}".format(var_name), color="k", linewidth=linewidth, zorder=3, drawstyle=plot_kwargs["drawstyle"], ) if animated: animate, init = _set_animation( pp_sampled_vals, ax, dtype=dtype, kind=kind, plot_kwargs=plot_kwargs ) else: # run plot_kde manually with one plot call pp_densities = [] for vals in pp_sampled_vals: vals = np.array([vals]).flatten() if dtype == "f": pp_density, lower, upper = _fast_kde(vals) pp_x = np.linspace(lower, upper, len(pp_density)) pp_densities.extend([pp_x, pp_density]) else: nbins = round(len(vals) ** 0.5) hist, bin_edges = np.histogram(vals, bins=nbins, density=True) hist = np.concatenate((hist[:1], hist)) pp_densities.extend([bin_edges, hist]) ax.plot(*pp_densities, **plot_kwargs) if mean: if dtype == "f": plot_kde( pp_vals.flatten(), plot_kwargs={ "color": "C0", "linestyle": "--", "linewidth": linewidth, "zorder": 2, }, label="Posterior predictive mean {}".format(pp_var_name), ax=ax, legend=legend, ) else: vals = pp_vals.flatten() nbins = round(len(vals) ** 0.5) hist, bin_edges = np.histogram(vals, bins=nbins, density=True) hist = np.concatenate((hist[:1], hist)) ax.plot( bin_edges, hist, color="C0", linewidth=linewidth, label="Posterior predictive mean {}".format(pp_var_name), zorder=2, linestyle="--", drawstyle=plot_kwargs["drawstyle"], ) ax.tick_params(labelsize=xt_labelsize) ax.set_yticks([]) elif kind == "cumulative": drawstyle = "default" if dtype == "f" else "steps-pre" ax.plot( *_empirical_cdf(obs_vals), color="k", linewidth=linewidth, label="Observed {}".format(var_name), drawstyle=drawstyle, zorder=3 ) if animated: animate, init = _set_animation( pp_sampled_vals, ax, kind=kind, alpha=alpha, drawstyle=drawstyle, linewidth=linewidth, ) else: # run plot_kde manually with one plot call pp_densities = [] for vals in pp_sampled_vals: vals = np.array([vals]).flatten() pp_x, pp_density = _empirical_cdf(vals) pp_densities.extend([pp_x, pp_density]) ax.plot( *pp_densities, alpha=alpha, color="C5", drawstyle=drawstyle, linewidth=linewidth ) ax.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name)) if mean: ax.plot( *_empirical_cdf(pp_vals.flatten()), color="C0", linestyle="--", linewidth=linewidth, drawstyle=drawstyle, label="Posterior predictive mean {}".format(pp_var_name) ) ax.set_yticks([0, 0.5, 1]) elif kind == "scatter": if mean: if dtype == "f": plot_kde( pp_vals.flatten(), plot_kwargs={ "color": "C0", "linestyle": "--", "linewidth": linewidth, "zorder": 3, }, label="Posterior predictive mean {}".format(pp_var_name), ax=ax, legend=legend, ) else: vals = pp_vals.flatten() nbins = round(len(vals) ** 0.5) hist, bin_edges = np.histogram(vals, bins=nbins, density=True) hist = np.concatenate((hist[:1], hist)) ax.plot( bin_edges, hist, color="C0", linewidth=linewidth, label="Posterior predictive mean {}".format(pp_var_name), zorder=3, linestyle="--", drawstyle="steps-pre", ) _, limit = ax.get_ylim() limit *= 1.05 y_rows = np.linspace(0, limit, num_pp_samples + 1) jitter_scale = y_rows[1] - y_rows[0] scale_low = 0 scale_high = jitter_scale * jitter obs_yvals = np.zeros_like(obs_vals, dtype=np.float64) if jitter: obs_yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(obs_vals)) ax.plot( obs_vals, obs_yvals, "o", color="C0", markersize=markersize, alpha=alpha, label="Observed {}".format(var_name), zorder=4, ) if animated: animate, init = _set_animation( pp_sampled_vals, ax, kind=kind, height=y_rows.mean() * 0.5, markersize=markersize, ) else: for vals, y in zip(pp_sampled_vals, y_rows[1:]): vals = np.ravel(vals) yvals = np.full_like(vals, y, dtype=np.float64) if jitter: yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(vals)) ax.plot( vals, yvals, "o", zorder=2, color="C5", markersize=markersize, alpha=alpha ) ax.plot([], "C5o", label="Posterior predictive {}".format(pp_var_name)) ax.set_yticks([]) if var_name != pp_var_name: xlabel = "{} / {}".format(var_name, pp_var_name) else: xlabel = var_name ax.set_xlabel(make_label(xlabel, selection), fontsize=ax_labelsize) if legend: if i == 0: ax.legend(fontsize=xt_labelsize * 0.75) else: ax.legend([]) if animated: ani = animation.FuncAnimation( fig, animate, np.arange(0, num_pp_samples), init_func=init, **animation_kwargs ) return axes, ani else: return axes
[ "def", "plot_ppc", "(", "data", ",", "kind", "=", "\"density\"", ",", "alpha", "=", "None", ",", "mean", "=", "True", ",", "figsize", "=", "None", ",", "textsize", "=", "None", ",", "data_pairs", "=", "None", ",", "var_names", "=", "None", ",", "coords", "=", "None", ",", "flatten", "=", "None", ",", "flatten_pp", "=", "None", ",", "num_pp_samples", "=", "None", ",", "random_seed", "=", "None", ",", "jitter", "=", "None", ",", "animated", "=", "False", ",", "animation_kwargs", "=", "None", ",", "legend", "=", "True", ",", ")", ":", "for", "group", "in", "(", "\"posterior_predictive\"", ",", "\"observed_data\"", ")", ":", "if", "not", "hasattr", "(", "data", ",", "group", ")", ":", "raise", "TypeError", "(", "'`data` argument must have the group \"{group}\" for ppcplot'", ".", "format", "(", "group", "=", "group", ")", ")", "if", "kind", ".", "lower", "(", ")", "not", "in", "(", "\"density\"", ",", "\"cumulative\"", ",", "\"scatter\"", ")", ":", "raise", "TypeError", "(", "\"`kind` argument must be either `density`, `cumulative`, or `scatter`\"", ")", "if", "data_pairs", "is", "None", ":", "data_pairs", "=", "{", "}", "if", "animation_kwargs", "is", "None", ":", "animation_kwargs", "=", "{", "}", "if", "platform", ".", "system", "(", ")", "==", "\"Linux\"", ":", "animation_kwargs", ".", "setdefault", "(", "\"blit\"", ",", "True", ")", "else", ":", "animation_kwargs", ".", "setdefault", "(", "\"blit\"", ",", "False", ")", "if", "animated", "and", "animation_kwargs", "[", "\"blit\"", "]", "and", "platform", ".", "system", "(", ")", "!=", "\"Linux\"", ":", "_log", ".", "warning", "(", "\"If you experience problems rendering the animation try setting\"", "\"`animation_kwargs({'blit':False}) or changing the plotting backend (e.g. to TkAgg)\"", ")", "if", "alpha", "is", "None", ":", "if", "animated", ":", "alpha", "=", "1", "else", ":", "if", "kind", ".", "lower", "(", ")", "==", "\"scatter\"", ":", "alpha", "=", "0.7", "else", ":", "alpha", "=", "0.2", "if", "jitter", "is", "None", ":", "jitter", "=", "0.0", "assert", "jitter", ">=", "0.0", "observed", "=", "data", ".", "observed_data", "posterior_predictive", "=", "data", ".", "posterior_predictive", "if", "var_names", "is", "None", ":", "var_names", "=", "observed", ".", "data_vars", "var_names", "=", "_var_names", "(", "var_names", ",", "observed", ")", "pp_var_names", "=", "[", "data_pairs", ".", "get", "(", "var", ",", "var", ")", "for", "var", "in", "var_names", "]", "if", "flatten_pp", "is", "None", "and", "flatten", "is", "None", ":", "flatten_pp", "=", "list", "(", "posterior_predictive", ".", "dims", ".", "keys", "(", ")", ")", "elif", "flatten_pp", "is", "None", ":", "flatten_pp", "=", "flatten", "if", "flatten", "is", "None", ":", "flatten", "=", "list", "(", "observed", ".", "dims", ".", "keys", "(", ")", ")", "if", "coords", "is", "None", ":", "coords", "=", "{", "}", "if", "random_seed", "is", "not", "None", ":", "np", ".", "random", ".", "seed", "(", "random_seed", ")", "total_pp_samples", "=", "posterior_predictive", ".", "sizes", "[", "\"chain\"", "]", "*", "posterior_predictive", ".", "sizes", "[", "\"draw\"", "]", "if", "num_pp_samples", "is", "None", ":", "if", "kind", "==", "\"scatter\"", "and", "not", "animated", ":", "num_pp_samples", "=", "min", "(", "5", ",", "total_pp_samples", ")", "else", ":", "num_pp_samples", "=", "total_pp_samples", "if", "(", "not", "isinstance", "(", "num_pp_samples", ",", "Integral", ")", "or", "num_pp_samples", "<", "1", "or", "num_pp_samples", ">", "total_pp_samples", ")", ":", "raise", "TypeError", "(", "\"`num_pp_samples` must be an integer between 1 and \"", "+", "\"{limit}.\"", ".", "format", "(", "limit", "=", "total_pp_samples", ")", ")", "pp_sample_ix", "=", "np", ".", "random", ".", "choice", "(", "total_pp_samples", ",", "size", "=", "num_pp_samples", ",", "replace", "=", "False", ")", "for", "key", "in", "coords", ".", "keys", "(", ")", ":", "coords", "[", "key", "]", "=", "np", ".", "where", "(", "np", ".", "in1d", "(", "observed", "[", "key", "]", ",", "coords", "[", "key", "]", ")", ")", "[", "0", "]", "obs_plotters", "=", "list", "(", "xarray_var_iter", "(", "observed", ".", "isel", "(", "coords", ")", ",", "skip_dims", "=", "set", "(", "flatten", ")", ",", "var_names", "=", "var_names", ",", "combined", "=", "True", ")", ")", "pp_plotters", "=", "list", "(", "xarray_var_iter", "(", "posterior_predictive", ".", "isel", "(", "coords", ")", ",", "var_names", "=", "pp_var_names", ",", "skip_dims", "=", "set", "(", "flatten_pp", ")", ",", "combined", "=", "True", ",", ")", ")", "length_plotters", "=", "len", "(", "obs_plotters", ")", "rows", ",", "cols", "=", "default_grid", "(", "length_plotters", ")", "(", "figsize", ",", "ax_labelsize", ",", "_", ",", "xt_labelsize", ",", "linewidth", ",", "markersize", ")", "=", "_scale_fig_size", "(", "figsize", ",", "textsize", ",", "rows", ",", "cols", ")", "fig", ",", "axes", "=", "_create_axes_grid", "(", "length_plotters", ",", "rows", ",", "cols", ",", "figsize", "=", "figsize", ")", "for", "i", ",", "ax", "in", "enumerate", "(", "axes", ")", ":", "var_name", ",", "selection", ",", "obs_vals", "=", "obs_plotters", "[", "i", "]", "pp_var_name", ",", "_", ",", "pp_vals", "=", "pp_plotters", "[", "i", "]", "dtype", "=", "posterior_predictive", "[", "pp_var_name", "]", ".", "dtype", ".", "kind", "# flatten non-specified dimensions", "obs_vals", "=", "obs_vals", ".", "flatten", "(", ")", "pp_vals", "=", "pp_vals", ".", "reshape", "(", "total_pp_samples", ",", "-", "1", ")", "pp_sampled_vals", "=", "pp_vals", "[", "pp_sample_ix", "]", "if", "kind", "==", "\"density\"", ":", "plot_kwargs", "=", "{", "\"color\"", ":", "\"C5\"", ",", "\"alpha\"", ":", "alpha", ",", "\"linewidth\"", ":", "0.5", "*", "linewidth", "}", "if", "dtype", "==", "\"i\"", ":", "plot_kwargs", "[", "\"drawstyle\"", "]", "=", "\"steps-pre\"", "ax", ".", "plot", "(", "[", "]", ",", "color", "=", "\"C5\"", ",", "label", "=", "\"Posterior predictive {}\"", ".", "format", "(", "pp_var_name", ")", ")", "if", "dtype", "==", "\"f\"", ":", "plot_kde", "(", "obs_vals", ",", "label", "=", "\"Observed {}\"", ".", "format", "(", "var_name", ")", ",", "plot_kwargs", "=", "{", "\"color\"", ":", "\"k\"", ",", "\"linewidth\"", ":", "linewidth", ",", "\"zorder\"", ":", "3", "}", ",", "fill_kwargs", "=", "{", "\"alpha\"", ":", "0", "}", ",", "ax", "=", "ax", ",", "legend", "=", "legend", ",", ")", "else", ":", "nbins", "=", "round", "(", "len", "(", "obs_vals", ")", "**", "0.5", ")", "hist", ",", "bin_edges", "=", "np", ".", "histogram", "(", "obs_vals", ",", "bins", "=", "nbins", ",", "density", "=", "True", ")", "hist", "=", "np", ".", "concatenate", "(", "(", "hist", "[", ":", "1", "]", ",", "hist", ")", ")", "ax", ".", "plot", "(", "bin_edges", ",", "hist", ",", "label", "=", "\"Observed {}\"", ".", "format", "(", "var_name", ")", ",", "color", "=", "\"k\"", ",", "linewidth", "=", "linewidth", ",", "zorder", "=", "3", ",", "drawstyle", "=", "plot_kwargs", "[", "\"drawstyle\"", "]", ",", ")", "if", "animated", ":", "animate", ",", "init", "=", "_set_animation", "(", "pp_sampled_vals", ",", "ax", ",", "dtype", "=", "dtype", ",", "kind", "=", "kind", ",", "plot_kwargs", "=", "plot_kwargs", ")", "else", ":", "# run plot_kde manually with one plot call", "pp_densities", "=", "[", "]", "for", "vals", "in", "pp_sampled_vals", ":", "vals", "=", "np", ".", "array", "(", "[", "vals", "]", ")", ".", "flatten", "(", ")", "if", "dtype", "==", "\"f\"", ":", "pp_density", ",", "lower", ",", "upper", "=", "_fast_kde", "(", "vals", ")", "pp_x", "=", "np", ".", "linspace", "(", "lower", ",", "upper", ",", "len", "(", "pp_density", ")", ")", "pp_densities", ".", "extend", "(", "[", "pp_x", ",", "pp_density", "]", ")", "else", ":", "nbins", "=", "round", "(", "len", "(", "vals", ")", "**", "0.5", ")", "hist", ",", "bin_edges", "=", "np", ".", "histogram", "(", "vals", ",", "bins", "=", "nbins", ",", "density", "=", "True", ")", "hist", "=", "np", ".", "concatenate", "(", "(", "hist", "[", ":", "1", "]", ",", "hist", ")", ")", "pp_densities", ".", "extend", "(", "[", "bin_edges", ",", "hist", "]", ")", "ax", ".", "plot", "(", "*", "pp_densities", ",", "*", "*", "plot_kwargs", ")", "if", "mean", ":", "if", "dtype", "==", "\"f\"", ":", "plot_kde", "(", "pp_vals", ".", "flatten", "(", ")", ",", "plot_kwargs", "=", "{", "\"color\"", ":", "\"C0\"", ",", "\"linestyle\"", ":", "\"--\"", ",", "\"linewidth\"", ":", "linewidth", ",", "\"zorder\"", ":", "2", ",", "}", ",", "label", "=", "\"Posterior predictive mean {}\"", ".", "format", "(", "pp_var_name", ")", ",", "ax", "=", "ax", ",", "legend", "=", "legend", ",", ")", "else", ":", "vals", "=", "pp_vals", ".", "flatten", "(", ")", "nbins", "=", "round", "(", "len", "(", "vals", ")", "**", "0.5", ")", "hist", ",", "bin_edges", "=", "np", ".", "histogram", "(", "vals", ",", "bins", "=", "nbins", ",", "density", "=", "True", ")", "hist", "=", "np", ".", "concatenate", "(", "(", "hist", "[", ":", "1", "]", ",", "hist", ")", ")", "ax", ".", "plot", "(", "bin_edges", ",", "hist", ",", "color", "=", "\"C0\"", ",", "linewidth", "=", "linewidth", ",", "label", "=", "\"Posterior predictive mean {}\"", ".", "format", "(", "pp_var_name", ")", ",", "zorder", "=", "2", ",", "linestyle", "=", "\"--\"", ",", "drawstyle", "=", "plot_kwargs", "[", "\"drawstyle\"", "]", ",", ")", "ax", ".", "tick_params", "(", "labelsize", "=", "xt_labelsize", ")", "ax", ".", "set_yticks", "(", "[", "]", ")", "elif", "kind", "==", "\"cumulative\"", ":", "drawstyle", "=", "\"default\"", "if", "dtype", "==", "\"f\"", "else", "\"steps-pre\"", "ax", ".", "plot", "(", "*", "_empirical_cdf", "(", "obs_vals", ")", ",", "color", "=", "\"k\"", ",", "linewidth", "=", "linewidth", ",", "label", "=", "\"Observed {}\"", ".", "format", "(", "var_name", ")", ",", "drawstyle", "=", "drawstyle", ",", "zorder", "=", "3", ")", "if", "animated", ":", "animate", ",", "init", "=", "_set_animation", "(", "pp_sampled_vals", ",", "ax", ",", "kind", "=", "kind", ",", "alpha", "=", "alpha", ",", "drawstyle", "=", "drawstyle", ",", "linewidth", "=", "linewidth", ",", ")", "else", ":", "# run plot_kde manually with one plot call", "pp_densities", "=", "[", "]", "for", "vals", "in", "pp_sampled_vals", ":", "vals", "=", "np", ".", "array", "(", "[", "vals", "]", ")", ".", "flatten", "(", ")", "pp_x", ",", "pp_density", "=", "_empirical_cdf", "(", "vals", ")", "pp_densities", ".", "extend", "(", "[", "pp_x", ",", "pp_density", "]", ")", "ax", ".", "plot", "(", "*", "pp_densities", ",", "alpha", "=", "alpha", ",", "color", "=", "\"C5\"", ",", "drawstyle", "=", "drawstyle", ",", "linewidth", "=", "linewidth", ")", "ax", ".", "plot", "(", "[", "]", ",", "color", "=", "\"C5\"", ",", "label", "=", "\"Posterior predictive {}\"", ".", "format", "(", "pp_var_name", ")", ")", "if", "mean", ":", "ax", ".", "plot", "(", "*", "_empirical_cdf", "(", "pp_vals", ".", "flatten", "(", ")", ")", ",", "color", "=", "\"C0\"", ",", "linestyle", "=", "\"--\"", ",", "linewidth", "=", "linewidth", ",", "drawstyle", "=", "drawstyle", ",", "label", "=", "\"Posterior predictive mean {}\"", ".", "format", "(", "pp_var_name", ")", ")", "ax", ".", "set_yticks", "(", "[", "0", ",", "0.5", ",", "1", "]", ")", "elif", "kind", "==", "\"scatter\"", ":", "if", "mean", ":", "if", "dtype", "==", "\"f\"", ":", "plot_kde", "(", "pp_vals", ".", "flatten", "(", ")", ",", "plot_kwargs", "=", "{", "\"color\"", ":", "\"C0\"", ",", "\"linestyle\"", ":", "\"--\"", ",", "\"linewidth\"", ":", "linewidth", ",", "\"zorder\"", ":", "3", ",", "}", ",", "label", "=", "\"Posterior predictive mean {}\"", ".", "format", "(", "pp_var_name", ")", ",", "ax", "=", "ax", ",", "legend", "=", "legend", ",", ")", "else", ":", "vals", "=", "pp_vals", ".", "flatten", "(", ")", "nbins", "=", "round", "(", "len", "(", "vals", ")", "**", "0.5", ")", "hist", ",", "bin_edges", "=", "np", ".", "histogram", "(", "vals", ",", "bins", "=", "nbins", ",", "density", "=", "True", ")", "hist", "=", "np", ".", "concatenate", "(", "(", "hist", "[", ":", "1", "]", ",", "hist", ")", ")", "ax", ".", "plot", "(", "bin_edges", ",", "hist", ",", "color", "=", "\"C0\"", ",", "linewidth", "=", "linewidth", ",", "label", "=", "\"Posterior predictive mean {}\"", ".", "format", "(", "pp_var_name", ")", ",", "zorder", "=", "3", ",", "linestyle", "=", "\"--\"", ",", "drawstyle", "=", "\"steps-pre\"", ",", ")", "_", ",", "limit", "=", "ax", ".", "get_ylim", "(", ")", "limit", "*=", "1.05", "y_rows", "=", "np", ".", "linspace", "(", "0", ",", "limit", ",", "num_pp_samples", "+", "1", ")", "jitter_scale", "=", "y_rows", "[", "1", "]", "-", "y_rows", "[", "0", "]", "scale_low", "=", "0", "scale_high", "=", "jitter_scale", "*", "jitter", "obs_yvals", "=", "np", ".", "zeros_like", "(", "obs_vals", ",", "dtype", "=", "np", ".", "float64", ")", "if", "jitter", ":", "obs_yvals", "+=", "np", ".", "random", ".", "uniform", "(", "low", "=", "scale_low", ",", "high", "=", "scale_high", ",", "size", "=", "len", "(", "obs_vals", ")", ")", "ax", ".", "plot", "(", "obs_vals", ",", "obs_yvals", ",", "\"o\"", ",", "color", "=", "\"C0\"", ",", "markersize", "=", "markersize", ",", "alpha", "=", "alpha", ",", "label", "=", "\"Observed {}\"", ".", "format", "(", "var_name", ")", ",", "zorder", "=", "4", ",", ")", "if", "animated", ":", "animate", ",", "init", "=", "_set_animation", "(", "pp_sampled_vals", ",", "ax", ",", "kind", "=", "kind", ",", "height", "=", "y_rows", ".", "mean", "(", ")", "*", "0.5", ",", "markersize", "=", "markersize", ",", ")", "else", ":", "for", "vals", ",", "y", "in", "zip", "(", "pp_sampled_vals", ",", "y_rows", "[", "1", ":", "]", ")", ":", "vals", "=", "np", ".", "ravel", "(", "vals", ")", "yvals", "=", "np", ".", "full_like", "(", "vals", ",", "y", ",", "dtype", "=", "np", ".", "float64", ")", "if", "jitter", ":", "yvals", "+=", "np", ".", "random", ".", "uniform", "(", "low", "=", "scale_low", ",", "high", "=", "scale_high", ",", "size", "=", "len", "(", "vals", ")", ")", "ax", ".", "plot", "(", "vals", ",", "yvals", ",", "\"o\"", ",", "zorder", "=", "2", ",", "color", "=", "\"C5\"", ",", "markersize", "=", "markersize", ",", "alpha", "=", "alpha", ")", "ax", ".", "plot", "(", "[", "]", ",", "\"C5o\"", ",", "label", "=", "\"Posterior predictive {}\"", ".", "format", "(", "pp_var_name", ")", ")", "ax", ".", "set_yticks", "(", "[", "]", ")", "if", "var_name", "!=", "pp_var_name", ":", "xlabel", "=", "\"{} / {}\"", ".", "format", "(", "var_name", ",", "pp_var_name", ")", "else", ":", "xlabel", "=", "var_name", "ax", ".", "set_xlabel", "(", "make_label", "(", "xlabel", ",", "selection", ")", ",", "fontsize", "=", "ax_labelsize", ")", "if", "legend", ":", "if", "i", "==", "0", ":", "ax", ".", "legend", "(", "fontsize", "=", "xt_labelsize", "*", "0.75", ")", "else", ":", "ax", ".", "legend", "(", "[", "]", ")", "if", "animated", ":", "ani", "=", "animation", ".", "FuncAnimation", "(", "fig", ",", "animate", ",", "np", ".", "arange", "(", "0", ",", "num_pp_samples", ")", ",", "init_func", "=", "init", ",", "*", "*", "animation_kwargs", ")", "return", "axes", ",", "ani", "else", ":", "return", "axes" ]
Plot for posterior predictive checks. Parameters ---------- data : az.InferenceData object InferenceData object containing the observed and posterior predictive data. kind : str Type of plot to display (density, cumulative, or scatter). Defaults to density. alpha : float Opacity of posterior predictive density curves. Defaults to 0.2 for kind = density and cumulative, for scatter defaults to 0.7 mean : bool Whether or not to plot the mean posterior predictive distribution. Defaults to True figsize : tuple Figure size. If None it will be defined automatically. textsize: float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on figsize. data_pairs : dict Dictionary containing relations between observed data and posterior predictive data. Dictionary structure: Key = data var_name Value = posterior predictive var_name For example, `data_pairs = {'y' : 'y_hat'}` If None, it will assume that the observed data and the posterior predictive data have the same variable name. var_names : list List of variables to be plotted. Defaults to all observed variables in the model if None. coords : dict Dictionary mapping dimensions to selected coordinates to be plotted. Dimensions without a mapping specified will include all coordinates for that dimension. Defaults to including all coordinates for all dimensions if None. flatten : list List of dimensions to flatten in observed_data. Only flattens across the coordinates specified in the coords argument. Defaults to flattening all of the dimensions. flatten_pp : list List of dimensions to flatten in posterior_predictive. Only flattens across the coordinates specified in the coords argument. Defaults to flattening all of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`. num_pp_samples : int The number of posterior predictive samples to plot. For `kind` = 'scatter' and `animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7 unless defined otherwise. Otherwise it defaults to all provided samples. random_seed : int Random number generator seed passed to numpy.random.seed to allow reproducibility of the plot. By default, no seed will be provided and the plot will change each call if a random sample is specified by `num_pp_samples`. jitter : float If kind is "scatter", jitter will add random uniform noise to the height of the ppc samples and observed data. By default 0. animated : bool Create an animation of one posterior predictive sample per frame. Defaults to False. animation_kwargs : dict Keywords passed to `animation.FuncAnimation`. legend : bool Add legend to figure. By default True. Returns ------- axes : matplotlib axes Examples -------- Plot the observed data KDE overlaid on posterior predictive KDEs. .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('radon') >>> az.plot_ppc(data) Plot the overlay with empirical CDFs. .. plot:: :context: close-figs >>> az.plot_ppc(data, kind='cumulative') Use the coords and flatten parameters to plot selected variable dimensions across multiple plots. .. plot:: :context: close-figs >>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[]) Plot the overlay using a stacked scatter plot that is particularly useful when the sample sizes are small. .. plot:: :context: close-figs >>> az.plot_ppc(data, kind='scatter', flatten=[], >>> coords={'observed_county': ['AITKIN', 'BELTRAMI']}) Plot random posterior predictive sub-samples. .. plot:: :context: close-figs >>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
[ "Plot", "for", "posterior", "predictive", "checks", "." ]
python
train
35.442516
apache/incubator-mxnet
python/mxnet/ndarray/ndarray.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L3333-L3393
def greater_equal(lhs, rhs): """Returns the result of element-wise **greater than or equal to** (>=) comparison operation with broadcasting. For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs, otherwise return 0(false). Equivalent to ``lhs >= rhs`` and ``mx.nd.broadcast_greater_equal(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.array First array to be compared. rhs : scalar or mxnet.ndarray.array Second array to be compared. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray Output array of boolean values. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(2).reshape((1,2)) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([[ 0., 1.]], dtype=float32) >>> (x >= 1).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (x >= y).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.greater_equal(x, y).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (z >= y).asnumpy() array([[ 1., 1.], [ 0., 1.]], dtype=float32) """ # pylint: disable= no-member, protected-access return _ufunc_helper( lhs, rhs, op.broadcast_greater_equal, lambda x, y: 1 if x >= y else 0, _internal._greater_equal_scalar, _internal._lesser_equal_scalar)
[ "def", "greater_equal", "(", "lhs", ",", "rhs", ")", ":", "# pylint: disable= no-member, protected-access", "return", "_ufunc_helper", "(", "lhs", ",", "rhs", ",", "op", ".", "broadcast_greater_equal", ",", "lambda", "x", ",", "y", ":", "1", "if", "x", ">=", "y", "else", "0", ",", "_internal", ".", "_greater_equal_scalar", ",", "_internal", ".", "_lesser_equal_scalar", ")" ]
Returns the result of element-wise **greater than or equal to** (>=) comparison operation with broadcasting. For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs, otherwise return 0(false). Equivalent to ``lhs >= rhs`` and ``mx.nd.broadcast_greater_equal(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.array First array to be compared. rhs : scalar or mxnet.ndarray.array Second array to be compared. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray Output array of boolean values. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(2).reshape((1,2)) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([[ 0., 1.]], dtype=float32) >>> (x >= 1).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (x >= y).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.greater_equal(x, y).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (z >= y).asnumpy() array([[ 1., 1.], [ 0., 1.]], dtype=float32)
[ "Returns", "the", "result", "of", "element", "-", "wise", "**", "greater", "than", "or", "equal", "to", "**", "(", ">", "=", ")", "comparison", "operation", "with", "broadcasting", "." ]
python
train
30.540984
databio/pypiper
pypiper/ngstk.py
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L1706-L1715
def get_peak_number(self, sample): """ Counts number of peaks from a sample's peak file. :param pipelines.Sample sample: Sample object with "peaks" attribute. """ proc = subprocess.Popen(["wc", "-l", sample.peaks], stdout=subprocess.PIPE) out, err = proc.communicate() sample["peakNumber"] = re.sub("\D.*", "", out) return sample
[ "def", "get_peak_number", "(", "self", ",", "sample", ")", ":", "proc", "=", "subprocess", ".", "Popen", "(", "[", "\"wc\"", ",", "\"-l\"", ",", "sample", ".", "peaks", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "proc", ".", "communicate", "(", ")", "sample", "[", "\"peakNumber\"", "]", "=", "re", ".", "sub", "(", "\"\\D.*\"", ",", "\"\"", ",", "out", ")", "return", "sample" ]
Counts number of peaks from a sample's peak file. :param pipelines.Sample sample: Sample object with "peaks" attribute.
[ "Counts", "number", "of", "peaks", "from", "a", "sample", "s", "peak", "file", "." ]
python
train
38.5
google/grr
grr/core/grr_response_core/lib/config_lib.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/config_lib.py#L1509-L1514
def DEFINE_choice(self, name, default, choices, help, constant=False): """A helper for defining choice string options.""" self.AddOption( type_info.Choice( name=name, default=default, choices=choices, description=help), constant=constant)
[ "def", "DEFINE_choice", "(", "self", ",", "name", ",", "default", ",", "choices", ",", "help", ",", "constant", "=", "False", ")", ":", "self", ".", "AddOption", "(", "type_info", ".", "Choice", "(", "name", "=", "name", ",", "default", "=", "default", ",", "choices", "=", "choices", ",", "description", "=", "help", ")", ",", "constant", "=", "constant", ")" ]
A helper for defining choice string options.
[ "A", "helper", "for", "defining", "choice", "string", "options", "." ]
python
train
44.833333
heuer/segno
segno/encoder.py
https://github.com/heuer/segno/blob/64d912a2bd17d0b5ff3e8b5d37098edfc663c2b3/segno/encoder.py#L1206-L1242
def make_matrix(version, reserve_regions=True, add_timing=True): """\ Creates a matrix of the provided `size` (w x h) initialized with the (illegal) value 0x2. The "timing pattern" is already added to the matrix and the version and format areas are initialized with 0x0. :param int version: The (Micro) QR Code version :rtype: tuple of bytearrays """ size = calc_matrix_size(version) row = [0x2] * size matrix = tuple([bytearray(row) for i in range(size)]) if reserve_regions: if version > 6: # Reserve version pattern areas for i in range(6): # Upper right matrix[i][-11] = 0x0 matrix[i][-10] = 0x0 matrix[i][-9] = 0x0 # Lower left matrix[-11][i] = 0x0 matrix[-10][i] = 0x0 matrix[-9][i] = 0x0 # Reserve format pattern areas for i in range(9): matrix[i][8] = 0x0 # Upper left matrix[8][i] = 0x0 # Upper bottom if version > 0: matrix[-i][8] = 0x0 # Bottom left matrix[8][- i] = 0x0 # Upper right if add_timing: # ISO/IEC 18004:2015 -- 6.3.5 Timing pattern (page 17) add_timing_pattern(matrix, version < 1) return matrix
[ "def", "make_matrix", "(", "version", ",", "reserve_regions", "=", "True", ",", "add_timing", "=", "True", ")", ":", "size", "=", "calc_matrix_size", "(", "version", ")", "row", "=", "[", "0x2", "]", "*", "size", "matrix", "=", "tuple", "(", "[", "bytearray", "(", "row", ")", "for", "i", "in", "range", "(", "size", ")", "]", ")", "if", "reserve_regions", ":", "if", "version", ">", "6", ":", "# Reserve version pattern areas", "for", "i", "in", "range", "(", "6", ")", ":", "# Upper right", "matrix", "[", "i", "]", "[", "-", "11", "]", "=", "0x0", "matrix", "[", "i", "]", "[", "-", "10", "]", "=", "0x0", "matrix", "[", "i", "]", "[", "-", "9", "]", "=", "0x0", "# Lower left", "matrix", "[", "-", "11", "]", "[", "i", "]", "=", "0x0", "matrix", "[", "-", "10", "]", "[", "i", "]", "=", "0x0", "matrix", "[", "-", "9", "]", "[", "i", "]", "=", "0x0", "# Reserve format pattern areas", "for", "i", "in", "range", "(", "9", ")", ":", "matrix", "[", "i", "]", "[", "8", "]", "=", "0x0", "# Upper left", "matrix", "[", "8", "]", "[", "i", "]", "=", "0x0", "# Upper bottom", "if", "version", ">", "0", ":", "matrix", "[", "-", "i", "]", "[", "8", "]", "=", "0x0", "# Bottom left", "matrix", "[", "8", "]", "[", "-", "i", "]", "=", "0x0", "# Upper right", "if", "add_timing", ":", "# ISO/IEC 18004:2015 -- 6.3.5 Timing pattern (page 17)", "add_timing_pattern", "(", "matrix", ",", "version", "<", "1", ")", "return", "matrix" ]
\ Creates a matrix of the provided `size` (w x h) initialized with the (illegal) value 0x2. The "timing pattern" is already added to the matrix and the version and format areas are initialized with 0x0. :param int version: The (Micro) QR Code version :rtype: tuple of bytearrays
[ "\\", "Creates", "a", "matrix", "of", "the", "provided", "size", "(", "w", "x", "h", ")", "initialized", "with", "the", "(", "illegal", ")", "value", "0x2", "." ]
python
train
35.27027
programa-stic/barf-project
barf/core/smt/smttranslator.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/smt/smttranslator.py#L251-L257
def _translate_dst_oprnd(self, operand): """Translate destination operand to a SMT expression. """ if isinstance(operand, ReilRegisterOperand): return self._translate_dst_register_oprnd(operand) else: raise Exception("Invalid operand type")
[ "def", "_translate_dst_oprnd", "(", "self", ",", "operand", ")", ":", "if", "isinstance", "(", "operand", ",", "ReilRegisterOperand", ")", ":", "return", "self", ".", "_translate_dst_register_oprnd", "(", "operand", ")", "else", ":", "raise", "Exception", "(", "\"Invalid operand type\"", ")" ]
Translate destination operand to a SMT expression.
[ "Translate", "destination", "operand", "to", "a", "SMT", "expression", "." ]
python
train
41.428571
Yelp/kafka-utils
kafka_utils/kafka_cluster_manager/cluster_info/util.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/util.py#L56-L97
def separate_groups(groups, key, total): """Separate the group into overloaded and under-loaded groups. The revised over-loaded groups increases the choice space for future selection of most suitable group based on search criteria. For example: Given the groups (a:4, b:4, c:3, d:2) where the number represents the number of elements for each group. smart_separate_groups sets 'a' and 'c' as optimal, 'b' as over-loaded and 'd' as under-loaded. separate-groups combines 'a' with 'b' as over-loaded, allowing to select between these two groups to transfer the element to 'd'. :param groups: list of groups :param key: function to retrieve element count from group :param total: total number of elements to distribute :returns: sorted lists of over loaded (descending) and under loaded (ascending) group """ optimum, extra = compute_optimum(len(groups), total) over_loaded, under_loaded, optimal = _smart_separate_groups(groups, key, total) # If every group is optimal return if not extra: return over_loaded, under_loaded # Some groups in optimal may have a number of elements that is optimum + 1. # In this case they should be considered over_loaded. potential_under_loaded = [ group for group in optimal if key(group) == optimum ] potential_over_loaded = [ group for group in optimal if key(group) > optimum ] revised_under_loaded = under_loaded + potential_under_loaded revised_over_loaded = over_loaded + potential_over_loaded return ( sorted(revised_over_loaded, key=key, reverse=True), sorted(revised_under_loaded, key=key), )
[ "def", "separate_groups", "(", "groups", ",", "key", ",", "total", ")", ":", "optimum", ",", "extra", "=", "compute_optimum", "(", "len", "(", "groups", ")", ",", "total", ")", "over_loaded", ",", "under_loaded", ",", "optimal", "=", "_smart_separate_groups", "(", "groups", ",", "key", ",", "total", ")", "# If every group is optimal return", "if", "not", "extra", ":", "return", "over_loaded", ",", "under_loaded", "# Some groups in optimal may have a number of elements that is optimum + 1.", "# In this case they should be considered over_loaded.", "potential_under_loaded", "=", "[", "group", "for", "group", "in", "optimal", "if", "key", "(", "group", ")", "==", "optimum", "]", "potential_over_loaded", "=", "[", "group", "for", "group", "in", "optimal", "if", "key", "(", "group", ")", ">", "optimum", "]", "revised_under_loaded", "=", "under_loaded", "+", "potential_under_loaded", "revised_over_loaded", "=", "over_loaded", "+", "potential_over_loaded", "return", "(", "sorted", "(", "revised_over_loaded", ",", "key", "=", "key", ",", "reverse", "=", "True", ")", ",", "sorted", "(", "revised_under_loaded", ",", "key", "=", "key", ")", ",", ")" ]
Separate the group into overloaded and under-loaded groups. The revised over-loaded groups increases the choice space for future selection of most suitable group based on search criteria. For example: Given the groups (a:4, b:4, c:3, d:2) where the number represents the number of elements for each group. smart_separate_groups sets 'a' and 'c' as optimal, 'b' as over-loaded and 'd' as under-loaded. separate-groups combines 'a' with 'b' as over-loaded, allowing to select between these two groups to transfer the element to 'd'. :param groups: list of groups :param key: function to retrieve element count from group :param total: total number of elements to distribute :returns: sorted lists of over loaded (descending) and under loaded (ascending) group
[ "Separate", "the", "group", "into", "overloaded", "and", "under", "-", "loaded", "groups", "." ]
python
train
39.857143
pre-commit/pre-commit-mirror-maker
pre_commit_mirror_maker/main.py
https://github.com/pre-commit/pre-commit-mirror-maker/blob/8bafa3b87e67d291d5a747f0137b921a170a1723/pre_commit_mirror_maker/main.py#L12-L28
def split_by_commas(maybe_s: str) -> Tuple[str, ...]: """Split a string by commas, but allow escaped commas. - If maybe_s is falsey, returns an empty tuple - Ignore backslashed commas """ if not maybe_s: return () parts: List[str] = [] split_by_backslash = maybe_s.split(r'\,') for split_by_backslash_part in split_by_backslash: splitby_comma = split_by_backslash_part.split(',') if parts: parts[-1] += ',' + splitby_comma[0] else: parts.append(splitby_comma[0]) parts.extend(splitby_comma[1:]) return tuple(parts)
[ "def", "split_by_commas", "(", "maybe_s", ":", "str", ")", "->", "Tuple", "[", "str", ",", "...", "]", ":", "if", "not", "maybe_s", ":", "return", "(", ")", "parts", ":", "List", "[", "str", "]", "=", "[", "]", "split_by_backslash", "=", "maybe_s", ".", "split", "(", "r'\\,'", ")", "for", "split_by_backslash_part", "in", "split_by_backslash", ":", "splitby_comma", "=", "split_by_backslash_part", ".", "split", "(", "','", ")", "if", "parts", ":", "parts", "[", "-", "1", "]", "+=", "','", "+", "splitby_comma", "[", "0", "]", "else", ":", "parts", ".", "append", "(", "splitby_comma", "[", "0", "]", ")", "parts", ".", "extend", "(", "splitby_comma", "[", "1", ":", "]", ")", "return", "tuple", "(", "parts", ")" ]
Split a string by commas, but allow escaped commas. - If maybe_s is falsey, returns an empty tuple - Ignore backslashed commas
[ "Split", "a", "string", "by", "commas", "but", "allow", "escaped", "commas", ".", "-", "If", "maybe_s", "is", "falsey", "returns", "an", "empty", "tuple", "-", "Ignore", "backslashed", "commas" ]
python
train
35.176471
pandas-dev/pandas
pandas/core/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5332-L5406
def combine_first(self, other): """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ import pandas.core.computation.expressions as expressions def extract_values(arr): # Does two things: # 1. maybe gets the values from the Series / Index # 2. convert datelike to i8 if isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr._values if needs_i8_conversion(arr): if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: arr = arr.view('i8') return arr def combiner(x, y): mask = isna(x) if isinstance(mask, (ABCIndexClass, ABCSeries)): mask = mask._values x_values = extract_values(x) y_values = extract_values(y) # If the column y in other DataFrame is not in first DataFrame, # just return y_values. if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False)
[ "def", "combine_first", "(", "self", ",", "other", ")", ":", "import", "pandas", ".", "core", ".", "computation", ".", "expressions", "as", "expressions", "def", "extract_values", "(", "arr", ")", ":", "# Does two things:", "# 1. maybe gets the values from the Series / Index", "# 2. convert datelike to i8", "if", "isinstance", "(", "arr", ",", "(", "ABCIndexClass", ",", "ABCSeries", ")", ")", ":", "arr", "=", "arr", ".", "_values", "if", "needs_i8_conversion", "(", "arr", ")", ":", "if", "is_extension_array_dtype", "(", "arr", ".", "dtype", ")", ":", "arr", "=", "arr", ".", "asi8", "else", ":", "arr", "=", "arr", ".", "view", "(", "'i8'", ")", "return", "arr", "def", "combiner", "(", "x", ",", "y", ")", ":", "mask", "=", "isna", "(", "x", ")", "if", "isinstance", "(", "mask", ",", "(", "ABCIndexClass", ",", "ABCSeries", ")", ")", ":", "mask", "=", "mask", ".", "_values", "x_values", "=", "extract_values", "(", "x", ")", "y_values", "=", "extract_values", "(", "y", ")", "# If the column y in other DataFrame is not in first DataFrame,", "# just return y_values.", "if", "y", ".", "name", "not", "in", "self", ".", "columns", ":", "return", "y_values", "return", "expressions", ".", "where", "(", "mask", ",", "y_values", ",", "x_values", ")", "return", "self", ".", "combine", "(", "other", ",", "combiner", ",", "overwrite", "=", "False", ")" ]
Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0
[ "Update", "null", "elements", "with", "value", "in", "the", "same", "location", "in", "other", "." ]
python
train
31.053333
johntruckenbrodt/spatialist
spatialist/raster.py
https://github.com/johntruckenbrodt/spatialist/blob/007f49296a156de8d7168ad235b5a5b8e8d3633d/spatialist/raster.py#L590-L607
def is_valid(self): """ Check image integrity. Tries to compute the checksum for each raster layer and returns False if this fails. See this forum entry: `How to check if image is valid? <https://lists.osgeo.org/pipermail/gdal-dev/2013-November/037520.html>`_. Returns ------- bool is the file valid? """ for i in range(self.raster.RasterCount): try: checksum = self.raster.GetRasterBand(i + 1).Checksum() except RuntimeError: return False return True
[ "def", "is_valid", "(", "self", ")", ":", "for", "i", "in", "range", "(", "self", ".", "raster", ".", "RasterCount", ")", ":", "try", ":", "checksum", "=", "self", ".", "raster", ".", "GetRasterBand", "(", "i", "+", "1", ")", ".", "Checksum", "(", ")", "except", "RuntimeError", ":", "return", "False", "return", "True" ]
Check image integrity. Tries to compute the checksum for each raster layer and returns False if this fails. See this forum entry: `How to check if image is valid? <https://lists.osgeo.org/pipermail/gdal-dev/2013-November/037520.html>`_. Returns ------- bool is the file valid?
[ "Check", "image", "integrity", ".", "Tries", "to", "compute", "the", "checksum", "for", "each", "raster", "layer", "and", "returns", "False", "if", "this", "fails", ".", "See", "this", "forum", "entry", ":", "How", "to", "check", "if", "image", "is", "valid?", "<https", ":", "//", "lists", ".", "osgeo", ".", "org", "/", "pipermail", "/", "gdal", "-", "dev", "/", "2013", "-", "November", "/", "037520", ".", "html", ">", "_", "." ]
python
train
32.833333
benspaulding/django-faq
faq/views/normal.py
https://github.com/benspaulding/django-faq/blob/9a744e7c1943fd05bfa42c84b2ce003367c58e6e/faq/views/normal.py#L11-L30
def topic_detail(request, slug): """ A detail view of a Topic Templates: :template:`faq/topic_detail.html` Context: topic An :model:`faq.Topic` object. question_list A list of all published :model:`faq.Question` objects that relate to the given :model:`faq.Topic`. """ extra_context = { 'question_list': Question.objects.published().filter(topic__slug=slug), } return object_detail(request, queryset=Topic.objects.published(), extra_context=extra_context, template_object_name='topic', slug=slug)
[ "def", "topic_detail", "(", "request", ",", "slug", ")", ":", "extra_context", "=", "{", "'question_list'", ":", "Question", ".", "objects", ".", "published", "(", ")", ".", "filter", "(", "topic__slug", "=", "slug", ")", ",", "}", "return", "object_detail", "(", "request", ",", "queryset", "=", "Topic", ".", "objects", ".", "published", "(", ")", ",", "extra_context", "=", "extra_context", ",", "template_object_name", "=", "'topic'", ",", "slug", "=", "slug", ")" ]
A detail view of a Topic Templates: :template:`faq/topic_detail.html` Context: topic An :model:`faq.Topic` object. question_list A list of all published :model:`faq.Question` objects that relate to the given :model:`faq.Topic`.
[ "A", "detail", "view", "of", "a", "Topic" ]
python
train
29.4
PmagPy/PmagPy
programs/demag_gui.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L6067-L6153
def update_selection(self): """ Convenience function update display (figures, text boxes and statistics windows) with a new selection of specimen """ self.clear_boxes() # commented out to allow propogation of higher level viewing state self.clear_high_level_pars() if self.UPPER_LEVEL_SHOW != "specimens": self.mean_type_box.SetValue("None") # -------------------------- # check if the coordinate system in the window exists (if not change to "specimen" coordinate system) # -------------------------- coordinate_system = self.coordinates_box.GetValue() if coordinate_system == 'tilt-corrected' and \ len(self.Data[self.s]['zijdblock_tilt']) == 0: self.coordinates_box.SetStringSelection('specimen') elif coordinate_system == 'geographic' and \ len(self.Data[self.s]['zijdblock_geo']) == 0: self.coordinates_box.SetStringSelection("specimen") if coordinate_system != self.coordinates_box.GetValue() and self.ie_open: self.ie.coordinates_box.SetStringSelection( self.coordinates_box.GetValue()) self.ie.update_editor() coordinate_system = self.coordinates_box.GetValue() self.COORDINATE_SYSTEM = coordinate_system # -------------------------- # update treatment list # -------------------------- self.update_bounds_boxes() # -------------------------- # update high level boxes # -------------------------- high_level = self.level_box.GetValue() old_string = self.level_names.GetValue() new_string = old_string if high_level == 'sample': if self.s in self.Data_hierarchy['sample_of_specimen']: new_string = self.Data_hierarchy['sample_of_specimen'][self.s] else: new_string = '' if high_level == 'site': if self.s in self.Data_hierarchy['site_of_specimen']: new_string = self.Data_hierarchy['site_of_specimen'][self.s] else: new_string = '' if high_level == 'location': if self.s in self.Data_hierarchy['location_of_specimen']: new_string = self.Data_hierarchy['location_of_specimen'][self.s] else: new_string = '' self.level_names.SetValue(new_string) if self.ie_open and new_string != old_string: self.ie.level_names.SetValue(new_string) self.ie.on_select_level_name(-1, True) # -------------------------- # update PCA box # -------------------------- self.update_PCA_box() # update warning self.generate_warning_text() self.update_warning_box() # update choices in the fit box self.update_fit_boxes() self.update_mean_fit_box() # measurements text box self.Add_text() # draw figures if self.current_fit: self.draw_figure(self.s, False) else: self.draw_figure(self.s, True) # update high level stats self.update_high_level_stats() # redraw interpretations self.update_GUI_with_new_interpretation()
[ "def", "update_selection", "(", "self", ")", ":", "self", ".", "clear_boxes", "(", ")", "# commented out to allow propogation of higher level viewing state", "self", ".", "clear_high_level_pars", "(", ")", "if", "self", ".", "UPPER_LEVEL_SHOW", "!=", "\"specimens\"", ":", "self", ".", "mean_type_box", ".", "SetValue", "(", "\"None\"", ")", "# --------------------------", "# check if the coordinate system in the window exists (if not change to \"specimen\" coordinate system)", "# --------------------------", "coordinate_system", "=", "self", ".", "coordinates_box", ".", "GetValue", "(", ")", "if", "coordinate_system", "==", "'tilt-corrected'", "and", "len", "(", "self", ".", "Data", "[", "self", ".", "s", "]", "[", "'zijdblock_tilt'", "]", ")", "==", "0", ":", "self", ".", "coordinates_box", ".", "SetStringSelection", "(", "'specimen'", ")", "elif", "coordinate_system", "==", "'geographic'", "and", "len", "(", "self", ".", "Data", "[", "self", ".", "s", "]", "[", "'zijdblock_geo'", "]", ")", "==", "0", ":", "self", ".", "coordinates_box", ".", "SetStringSelection", "(", "\"specimen\"", ")", "if", "coordinate_system", "!=", "self", ".", "coordinates_box", ".", "GetValue", "(", ")", "and", "self", ".", "ie_open", ":", "self", ".", "ie", ".", "coordinates_box", ".", "SetStringSelection", "(", "self", ".", "coordinates_box", ".", "GetValue", "(", ")", ")", "self", ".", "ie", ".", "update_editor", "(", ")", "coordinate_system", "=", "self", ".", "coordinates_box", ".", "GetValue", "(", ")", "self", ".", "COORDINATE_SYSTEM", "=", "coordinate_system", "# --------------------------", "# update treatment list", "# --------------------------", "self", ".", "update_bounds_boxes", "(", ")", "# --------------------------", "# update high level boxes", "# --------------------------", "high_level", "=", "self", ".", "level_box", ".", "GetValue", "(", ")", "old_string", "=", "self", ".", "level_names", ".", "GetValue", "(", ")", "new_string", "=", "old_string", "if", "high_level", "==", "'sample'", ":", "if", "self", ".", "s", "in", "self", ".", "Data_hierarchy", "[", "'sample_of_specimen'", "]", ":", "new_string", "=", "self", ".", "Data_hierarchy", "[", "'sample_of_specimen'", "]", "[", "self", ".", "s", "]", "else", ":", "new_string", "=", "''", "if", "high_level", "==", "'site'", ":", "if", "self", ".", "s", "in", "self", ".", "Data_hierarchy", "[", "'site_of_specimen'", "]", ":", "new_string", "=", "self", ".", "Data_hierarchy", "[", "'site_of_specimen'", "]", "[", "self", ".", "s", "]", "else", ":", "new_string", "=", "''", "if", "high_level", "==", "'location'", ":", "if", "self", ".", "s", "in", "self", ".", "Data_hierarchy", "[", "'location_of_specimen'", "]", ":", "new_string", "=", "self", ".", "Data_hierarchy", "[", "'location_of_specimen'", "]", "[", "self", ".", "s", "]", "else", ":", "new_string", "=", "''", "self", ".", "level_names", ".", "SetValue", "(", "new_string", ")", "if", "self", ".", "ie_open", "and", "new_string", "!=", "old_string", ":", "self", ".", "ie", ".", "level_names", ".", "SetValue", "(", "new_string", ")", "self", ".", "ie", ".", "on_select_level_name", "(", "-", "1", ",", "True", ")", "# --------------------------", "# update PCA box", "# --------------------------", "self", ".", "update_PCA_box", "(", ")", "# update warning", "self", ".", "generate_warning_text", "(", ")", "self", ".", "update_warning_box", "(", ")", "# update choices in the fit box", "self", ".", "update_fit_boxes", "(", ")", "self", ".", "update_mean_fit_box", "(", ")", "# measurements text box", "self", ".", "Add_text", "(", ")", "# draw figures", "if", "self", ".", "current_fit", ":", "self", ".", "draw_figure", "(", "self", ".", "s", ",", "False", ")", "else", ":", "self", ".", "draw_figure", "(", "self", ".", "s", ",", "True", ")", "# update high level stats", "self", ".", "update_high_level_stats", "(", ")", "# redraw interpretations", "self", ".", "update_GUI_with_new_interpretation", "(", ")" ]
Convenience function update display (figures, text boxes and statistics windows) with a new selection of specimen
[ "Convenience", "function", "update", "display", "(", "figures", "text", "boxes", "and", "statistics", "windows", ")", "with", "a", "new", "selection", "of", "specimen" ]
python
train
37.356322
mickybart/python-atlasbroker
atlasbroker/service.py
https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/service.py#L74-L93
def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec: """Provision the new instance see openbrokerapi documentation Returns: ProvisionedServiceSpec """ if service_details.plan_id == self._backend.config.UUID_PLANS_EXISTING_CLUSTER: # Provision the instance on an Existing Atlas Cluster # Find or create the instance instance = self._backend.find(instance_id) # Create the instance if needed return self._backend.create(instance, service_details.parameters, existing=True) # Plan not supported raise ErrPlanUnsupported(service_details.plan_id)
[ "def", "provision", "(", "self", ",", "instance_id", ":", "str", ",", "service_details", ":", "ProvisionDetails", ",", "async_allowed", ":", "bool", ")", "->", "ProvisionedServiceSpec", ":", "if", "service_details", ".", "plan_id", "==", "self", ".", "_backend", ".", "config", ".", "UUID_PLANS_EXISTING_CLUSTER", ":", "# Provision the instance on an Existing Atlas Cluster", "# Find or create the instance", "instance", "=", "self", ".", "_backend", ".", "find", "(", "instance_id", ")", "# Create the instance if needed", "return", "self", ".", "_backend", ".", "create", "(", "instance", ",", "service_details", ".", "parameters", ",", "existing", "=", "True", ")", "# Plan not supported", "raise", "ErrPlanUnsupported", "(", "service_details", ".", "plan_id", ")" ]
Provision the new instance see openbrokerapi documentation Returns: ProvisionedServiceSpec
[ "Provision", "the", "new", "instance", "see", "openbrokerapi", "documentation", "Returns", ":", "ProvisionedServiceSpec" ]
python
train
39
dfm/george
george/solvers/basic.py
https://github.com/dfm/george/blob/44819680036387625ee89f81c55104f3c1600759/george/solvers/basic.py#L89-L102
def dot_solve(self, y): r""" Compute the inner product of a vector with the inverse of the covariance matrix applied to itself: .. math:: y\,K^{-1}\,y Args: y (ndarray[nsamples]): The vector :math:`y`. """ return np.dot(y.T, cho_solve(self._factor, y))
[ "def", "dot_solve", "(", "self", ",", "y", ")", ":", "return", "np", ".", "dot", "(", "y", ".", "T", ",", "cho_solve", "(", "self", ".", "_factor", ",", "y", ")", ")" ]
r""" Compute the inner product of a vector with the inverse of the covariance matrix applied to itself: .. math:: y\,K^{-1}\,y Args: y (ndarray[nsamples]): The vector :math:`y`.
[ "r", "Compute", "the", "inner", "product", "of", "a", "vector", "with", "the", "inverse", "of", "the", "covariance", "matrix", "applied", "to", "itself", ":" ]
python
train
23.071429
juju/charm-helpers
charmhelpers/contrib/openstack/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/utils.py#L1049-L1074
def _extract_services_list_helper(services): """Extract a OrderedDict of {service: [ports]} of the supplied services for use by the other functions. The services object can either be: - None : no services were passed (an empty dict is returned) - a list of strings - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - An array of [{'service': service_name, ...}, ...] @param services: see above @returns OrderedDict(service: [ports], ...) """ if services is None: return {} if isinstance(services, dict): services = services.values() # either extract the list of services from the dictionary, or if # it is a simple string, use that. i.e. works with mixed lists. _s = OrderedDict() for s in services: if isinstance(s, dict) and 'service' in s: _s[s['service']] = s.get('ports', []) if isinstance(s, str): _s[s] = [] return _s
[ "def", "_extract_services_list_helper", "(", "services", ")", ":", "if", "services", "is", "None", ":", "return", "{", "}", "if", "isinstance", "(", "services", ",", "dict", ")", ":", "services", "=", "services", ".", "values", "(", ")", "# either extract the list of services from the dictionary, or if", "# it is a simple string, use that. i.e. works with mixed lists.", "_s", "=", "OrderedDict", "(", ")", "for", "s", "in", "services", ":", "if", "isinstance", "(", "s", ",", "dict", ")", "and", "'service'", "in", "s", ":", "_s", "[", "s", "[", "'service'", "]", "]", "=", "s", ".", "get", "(", "'ports'", ",", "[", "]", ")", "if", "isinstance", "(", "s", ",", "str", ")", ":", "_s", "[", "s", "]", "=", "[", "]", "return", "_s" ]
Extract a OrderedDict of {service: [ports]} of the supplied services for use by the other functions. The services object can either be: - None : no services were passed (an empty dict is returned) - a list of strings - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - An array of [{'service': service_name, ...}, ...] @param services: see above @returns OrderedDict(service: [ports], ...)
[ "Extract", "a", "OrderedDict", "of", "{", "service", ":", "[", "ports", "]", "}", "of", "the", "supplied", "services", "for", "use", "by", "the", "other", "functions", "." ]
python
train
36.730769
lmjohns3/theanets
theanets/feedforward.py
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/feedforward.py#L363-L374
def monitors(self, **kwargs): '''Return expressions that should be computed to monitor training. Returns ------- monitors : list of (name, expression) pairs A list of named monitor expressions to compute for this network. ''' monitors = super(Classifier, self).monitors(**kwargs) regs = regularizers.from_kwargs(self, **kwargs) outputs, _ = self.build_graph(regs) return monitors + [('acc', self.losses[0].accuracy(outputs))]
[ "def", "monitors", "(", "self", ",", "*", "*", "kwargs", ")", ":", "monitors", "=", "super", "(", "Classifier", ",", "self", ")", ".", "monitors", "(", "*", "*", "kwargs", ")", "regs", "=", "regularizers", ".", "from_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", "outputs", ",", "_", "=", "self", ".", "build_graph", "(", "regs", ")", "return", "monitors", "+", "[", "(", "'acc'", ",", "self", ".", "losses", "[", "0", "]", ".", "accuracy", "(", "outputs", ")", ")", "]" ]
Return expressions that should be computed to monitor training. Returns ------- monitors : list of (name, expression) pairs A list of named monitor expressions to compute for this network.
[ "Return", "expressions", "that", "should", "be", "computed", "to", "monitor", "training", "." ]
python
test
41.583333
bitesofcode/projexui
projexui/widgets/xtoolbutton.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtoolbutton.py#L94-L110
def blink(self, state=True): """ Starts or stops the blinking state for this button. This only works for when the toolbutton is in Shadowed or Colored mode. :param state | <bool> :return <bool> | success """ if self._blinking == state: return True elif not self.graphicsEffect(): return False else: self._blinking = state if state: self.startTimer(self.blinkInterval())
[ "def", "blink", "(", "self", ",", "state", "=", "True", ")", ":", "if", "self", ".", "_blinking", "==", "state", ":", "return", "True", "elif", "not", "self", ".", "graphicsEffect", "(", ")", ":", "return", "False", "else", ":", "self", ".", "_blinking", "=", "state", "if", "state", ":", "self", ".", "startTimer", "(", "self", ".", "blinkInterval", "(", ")", ")" ]
Starts or stops the blinking state for this button. This only works for when the toolbutton is in Shadowed or Colored mode. :param state | <bool> :return <bool> | success
[ "Starts", "or", "stops", "the", "blinking", "state", "for", "this", "button", ".", "This", "only", "works", "for", "when", "the", "toolbutton", "is", "in", "Shadowed", "or", "Colored", "mode", ".", ":", "param", "state", "|", "<bool", ">", ":", "return", "<bool", ">", "|", "success" ]
python
train
31.352941
apache/incubator-mxnet
python/mxnet/module/python_module.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/python_module.py#L285-L302
def forward(self, data_batch, is_train=None): """Forward computation. Here we do nothing but to keep a reference to the scores and the labels so that we can do backward computation. Parameters ---------- data_batch : DataBatch Could be anything with similar API implemented. is_train : bool Default is ``None``, which means `is_train` takes the value of ``self.for_training``. """ self._scores = data_batch.data[0] if is_train is None: is_train = self.for_training if is_train: self._labels = data_batch.label[0]
[ "def", "forward", "(", "self", ",", "data_batch", ",", "is_train", "=", "None", ")", ":", "self", ".", "_scores", "=", "data_batch", ".", "data", "[", "0", "]", "if", "is_train", "is", "None", ":", "is_train", "=", "self", ".", "for_training", "if", "is_train", ":", "self", ".", "_labels", "=", "data_batch", ".", "label", "[", "0", "]" ]
Forward computation. Here we do nothing but to keep a reference to the scores and the labels so that we can do backward computation. Parameters ---------- data_batch : DataBatch Could be anything with similar API implemented. is_train : bool Default is ``None``, which means `is_train` takes the value of ``self.for_training``.
[ "Forward", "computation", ".", "Here", "we", "do", "nothing", "but", "to", "keep", "a", "reference", "to", "the", "scores", "and", "the", "labels", "so", "that", "we", "can", "do", "backward", "computation", "." ]
python
train
34.777778
tanghaibao/goatools
goatools/cli/compare_gos.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/cli/compare_gos.py#L214-L218
def get_grouped(self, go_ntsets, go_all, gosubdag, **kws): """Get Grouped object.""" kws_grpd = {k:v for k, v in kws.items() if k in Grouped.kws_dict} kws_grpd['go2nt'] = self._init_go2ntpresent(go_ntsets, go_all, gosubdag) return Grouped(gosubdag, self.godag.version, **kws_grpd)
[ "def", "get_grouped", "(", "self", ",", "go_ntsets", ",", "go_all", ",", "gosubdag", ",", "*", "*", "kws", ")", ":", "kws_grpd", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "kws", ".", "items", "(", ")", "if", "k", "in", "Grouped", ".", "kws_dict", "}", "kws_grpd", "[", "'go2nt'", "]", "=", "self", ".", "_init_go2ntpresent", "(", "go_ntsets", ",", "go_all", ",", "gosubdag", ")", "return", "Grouped", "(", "gosubdag", ",", "self", ".", "godag", ".", "version", ",", "*", "*", "kws_grpd", ")" ]
Get Grouped object.
[ "Get", "Grouped", "object", "." ]
python
train
61.6
sjkingo/python-freshdesk
freshdesk/v2/api.py
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L461-L464
def _delete(self, url): """Wrapper around request.delete() to use the API prefix. Returns a JSON response.""" req = self._session.delete(self._api_prefix + url) return self._action(req)
[ "def", "_delete", "(", "self", ",", "url", ")", ":", "req", "=", "self", ".", "_session", ".", "delete", "(", "self", ".", "_api_prefix", "+", "url", ")", "return", "self", ".", "_action", "(", "req", ")" ]
Wrapper around request.delete() to use the API prefix. Returns a JSON response.
[ "Wrapper", "around", "request", ".", "delete", "()", "to", "use", "the", "API", "prefix", ".", "Returns", "a", "JSON", "response", "." ]
python
train
51.5
mcieslik-mctp/papy
src/papy/graph.py
https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/papy/graph.py#L228-L237
def rank_width(self): """ Returns the width of each rank in the graph. #TODO """ rank_width = defaultdict(int) node_rank = self.node_rank() for rank in node_rank.values(): rank_width[rank] += 1 return dict(rank_width)
[ "def", "rank_width", "(", "self", ")", ":", "rank_width", "=", "defaultdict", "(", "int", ")", "node_rank", "=", "self", ".", "node_rank", "(", ")", "for", "rank", "in", "node_rank", ".", "values", "(", ")", ":", "rank_width", "[", "rank", "]", "+=", "1", "return", "dict", "(", "rank_width", ")" ]
Returns the width of each rank in the graph. #TODO
[ "Returns", "the", "width", "of", "each", "rank", "in", "the", "graph", ".", "#TODO" ]
python
train
28.5
couchbase/couchbase-python-client
couchbase/connstr.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/connstr.py#L156-L170
def _build_connstr(host, port, bucket): """ Converts a 1.x host:port specification to a connection string """ hostlist = [] if isinstance(host, (tuple, list)): for curhost in host: if isinstance(curhost, (list, tuple)): hostlist.append(_fmthost(*curhost)) else: hostlist.append(curhost) else: hostlist.append(_fmthost(host, port)) return 'http://{0}/{1}'.format(','.join(hostlist), bucket)
[ "def", "_build_connstr", "(", "host", ",", "port", ",", "bucket", ")", ":", "hostlist", "=", "[", "]", "if", "isinstance", "(", "host", ",", "(", "tuple", ",", "list", ")", ")", ":", "for", "curhost", "in", "host", ":", "if", "isinstance", "(", "curhost", ",", "(", "list", ",", "tuple", ")", ")", ":", "hostlist", ".", "append", "(", "_fmthost", "(", "*", "curhost", ")", ")", "else", ":", "hostlist", ".", "append", "(", "curhost", ")", "else", ":", "hostlist", ".", "append", "(", "_fmthost", "(", "host", ",", "port", ")", ")", "return", "'http://{0}/{1}'", ".", "format", "(", "','", ".", "join", "(", "hostlist", ")", ",", "bucket", ")" ]
Converts a 1.x host:port specification to a connection string
[ "Converts", "a", "1", ".", "x", "host", ":", "port", "specification", "to", "a", "connection", "string" ]
python
train
31.733333
Miserlou/Zappa
zappa/core.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L866-L909
def get_manylinux_wheel_url(self, package_name, package_version): """ For a given package name, returns a link to the download URL, else returns None. Related: https://github.com/Miserlou/Zappa/issues/398 Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae This function downloads metadata JSON of `package_name` from Pypi and examines if the package has a manylinux wheel. This function also caches the JSON file so that we don't have to poll Pypi every time. """ cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), 'cached_pypi_info') if not os.path.isdir(cached_pypi_info_dir): os.makedirs(cached_pypi_info_dir) # Even though the metadata is for the package, we save it in a # filename that includes the package's version. This helps in # invalidating the cached file if the user moves to a different # version of the package. # Related: https://github.com/Miserlou/Zappa/issues/899 json_file = '{0!s}-{1!s}.json'.format(package_name, package_version) json_file_path = os.path.join(cached_pypi_info_dir, json_file) if os.path.exists(json_file_path): with open(json_file_path, 'rb') as metafile: data = json.load(metafile) else: url = 'https://pypi.python.org/pypi/{}/json'.format(package_name) try: res = requests.get(url, timeout=float(os.environ.get('PIP_TIMEOUT', 1.5))) data = res.json() except Exception as e: # pragma: no cover return None with open(json_file_path, 'wb') as metafile: jsondata = json.dumps(data) metafile.write(bytes(jsondata, "utf-8")) if package_version not in data['releases']: return None for f in data['releases'][package_version]: if f['filename'].endswith(self.manylinux_wheel_file_suffix): return f['url'] return None
[ "def", "get_manylinux_wheel_url", "(", "self", ",", "package_name", ",", "package_version", ")", ":", "cached_pypi_info_dir", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "'cached_pypi_info'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "cached_pypi_info_dir", ")", ":", "os", ".", "makedirs", "(", "cached_pypi_info_dir", ")", "# Even though the metadata is for the package, we save it in a", "# filename that includes the package's version. This helps in", "# invalidating the cached file if the user moves to a different", "# version of the package.", "# Related: https://github.com/Miserlou/Zappa/issues/899", "json_file", "=", "'{0!s}-{1!s}.json'", ".", "format", "(", "package_name", ",", "package_version", ")", "json_file_path", "=", "os", ".", "path", ".", "join", "(", "cached_pypi_info_dir", ",", "json_file", ")", "if", "os", ".", "path", ".", "exists", "(", "json_file_path", ")", ":", "with", "open", "(", "json_file_path", ",", "'rb'", ")", "as", "metafile", ":", "data", "=", "json", ".", "load", "(", "metafile", ")", "else", ":", "url", "=", "'https://pypi.python.org/pypi/{}/json'", ".", "format", "(", "package_name", ")", "try", ":", "res", "=", "requests", ".", "get", "(", "url", ",", "timeout", "=", "float", "(", "os", ".", "environ", ".", "get", "(", "'PIP_TIMEOUT'", ",", "1.5", ")", ")", ")", "data", "=", "res", ".", "json", "(", ")", "except", "Exception", "as", "e", ":", "# pragma: no cover", "return", "None", "with", "open", "(", "json_file_path", ",", "'wb'", ")", "as", "metafile", ":", "jsondata", "=", "json", ".", "dumps", "(", "data", ")", "metafile", ".", "write", "(", "bytes", "(", "jsondata", ",", "\"utf-8\"", ")", ")", "if", "package_version", "not", "in", "data", "[", "'releases'", "]", ":", "return", "None", "for", "f", "in", "data", "[", "'releases'", "]", "[", "package_version", "]", ":", "if", "f", "[", "'filename'", "]", ".", "endswith", "(", "self", ".", "manylinux_wheel_file_suffix", ")", ":", "return", "f", "[", "'url'", "]", "return", "None" ]
For a given package name, returns a link to the download URL, else returns None. Related: https://github.com/Miserlou/Zappa/issues/398 Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae This function downloads metadata JSON of `package_name` from Pypi and examines if the package has a manylinux wheel. This function also caches the JSON file so that we don't have to poll Pypi every time.
[ "For", "a", "given", "package", "name", "returns", "a", "link", "to", "the", "download", "URL", "else", "returns", "None", "." ]
python
train
46.545455
ministryofjustice/money-to-prisoners-common
build_tasks.py
https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/build_tasks.py#L140-L173
def set_version(context: Context, version=None, bump=False): """ Updates the version of MTP-common """ if bump and version: raise TaskError('You cannot bump and set a specific version') if bump: from mtp_common import VERSION version = list(VERSION) version[-1] += 1 else: try: version = list(map(int, version.split('.'))) assert len(version) == 3 except (AttributeError, ValueError, AssertionError): raise TaskError('Version must be in the form N.N.N') dotted_version = '.'.join(map(str, version)) replacements = [ (r'^VERSION =.*$', 'VERSION = (%s)' % ', '.join(map(str, version)), 'mtp_common/__init__.py'), (r'^ "version":.*$', ' "version": "%s",' % dotted_version, 'package.json'), ] for search, replacement, path in replacements: with open(os.path.join(root_path, path)) as f: content = f.read() content = re.sub(search, replacement, content, flags=re.MULTILINE) with open(os.path.join(root_path, path), 'w') as f: f.write(content) context.debug('Updated version to %s' % dotted_version)
[ "def", "set_version", "(", "context", ":", "Context", ",", "version", "=", "None", ",", "bump", "=", "False", ")", ":", "if", "bump", "and", "version", ":", "raise", "TaskError", "(", "'You cannot bump and set a specific version'", ")", "if", "bump", ":", "from", "mtp_common", "import", "VERSION", "version", "=", "list", "(", "VERSION", ")", "version", "[", "-", "1", "]", "+=", "1", "else", ":", "try", ":", "version", "=", "list", "(", "map", "(", "int", ",", "version", ".", "split", "(", "'.'", ")", ")", ")", "assert", "len", "(", "version", ")", "==", "3", "except", "(", "AttributeError", ",", "ValueError", ",", "AssertionError", ")", ":", "raise", "TaskError", "(", "'Version must be in the form N.N.N'", ")", "dotted_version", "=", "'.'", ".", "join", "(", "map", "(", "str", ",", "version", ")", ")", "replacements", "=", "[", "(", "r'^VERSION =.*$'", ",", "'VERSION = (%s)'", "%", "', '", ".", "join", "(", "map", "(", "str", ",", "version", ")", ")", ",", "'mtp_common/__init__.py'", ")", ",", "(", "r'^ \"version\":.*$'", ",", "' \"version\": \"%s\",'", "%", "dotted_version", ",", "'package.json'", ")", ",", "]", "for", "search", ",", "replacement", ",", "path", "in", "replacements", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "root_path", ",", "path", ")", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "content", "=", "re", ".", "sub", "(", "search", ",", "replacement", ",", "content", ",", "flags", "=", "re", ".", "MULTILINE", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "root_path", ",", "path", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "content", ")", "context", ".", "debug", "(", "'Updated version to %s'", "%", "dotted_version", ")" ]
Updates the version of MTP-common
[ "Updates", "the", "version", "of", "MTP", "-", "common" ]
python
train
35.058824
apache/airflow
airflow/contrib/operators/gcp_transfer_operator.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/gcp_transfer_operator.py#L106-L110
def _convert_date_to_dict(field_date): """ Convert native python ``datetime.date`` object to a format supported by the API """ return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year}
[ "def", "_convert_date_to_dict", "(", "field_date", ")", ":", "return", "{", "DAY", ":", "field_date", ".", "day", ",", "MONTH", ":", "field_date", ".", "month", ",", "YEAR", ":", "field_date", ".", "year", "}" ]
Convert native python ``datetime.date`` object to a format supported by the API
[ "Convert", "native", "python", "datetime", ".", "date", "object", "to", "a", "format", "supported", "by", "the", "API" ]
python
test
46.4
jsommers/switchyard
switchyard/lib/topo/topobuild.py
https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/topo/topobuild.py#L393-L412
def union(self, other, rename=False): ''' Union/add two topologies together to form a larger topology. If rename is False, the method assumes that node names don't clash (i.e., you've called addNodeLabelPrefix or you've explicitly chosen names to avoid clashes). If rename is True, nodes/links are relabeled such that the new "prefix" for each node is the graph name (i.e., for graph name A, node h1 is renamed A_h1). This method returns a new Topology object and does not modify either topology used for unioning. ''' if rename: self.nxgraph = Topology.__relabel_graph(self.__nxgraph, self.name) other.nxgraph = Topology.__relabel_graph(other.__nxgraph, other.name) nxgraph = nx.union(self.nxgraph, other.nxgraph, name="{}_{}".format(self.name, other.name)) newtopo = Topology(nxgraph=nxgraph, name="{}_{}".format(self.name, other.name)) return newtopo
[ "def", "union", "(", "self", ",", "other", ",", "rename", "=", "False", ")", ":", "if", "rename", ":", "self", ".", "nxgraph", "=", "Topology", ".", "__relabel_graph", "(", "self", ".", "__nxgraph", ",", "self", ".", "name", ")", "other", ".", "nxgraph", "=", "Topology", ".", "__relabel_graph", "(", "other", ".", "__nxgraph", ",", "other", ".", "name", ")", "nxgraph", "=", "nx", ".", "union", "(", "self", ".", "nxgraph", ",", "other", ".", "nxgraph", ",", "name", "=", "\"{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "other", ".", "name", ")", ")", "newtopo", "=", "Topology", "(", "nxgraph", "=", "nxgraph", ",", "name", "=", "\"{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "other", ".", "name", ")", ")", "return", "newtopo" ]
Union/add two topologies together to form a larger topology. If rename is False, the method assumes that node names don't clash (i.e., you've called addNodeLabelPrefix or you've explicitly chosen names to avoid clashes). If rename is True, nodes/links are relabeled such that the new "prefix" for each node is the graph name (i.e., for graph name A, node h1 is renamed A_h1). This method returns a new Topology object and does not modify either topology used for unioning.
[ "Union", "/", "add", "two", "topologies", "together", "to", "form", "a", "larger", "topology", "." ]
python
train
49.6
mkouhei/bootstrap-py
bootstrap_py/control.py
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L39-L47
def check_repository_existence(params): """Check repository existence. :param argparse.Namespace params: parameters """ repodir = os.path.join(params.outdir, params.name) if os.path.isdir(repodir): raise Conflict( 'Package repository "{0}" has already exists.'.format(repodir))
[ "def", "check_repository_existence", "(", "params", ")", ":", "repodir", "=", "os", ".", "path", ".", "join", "(", "params", ".", "outdir", ",", "params", ".", "name", ")", "if", "os", ".", "path", ".", "isdir", "(", "repodir", ")", ":", "raise", "Conflict", "(", "'Package repository \"{0}\" has already exists.'", ".", "format", "(", "repodir", ")", ")" ]
Check repository existence. :param argparse.Namespace params: parameters
[ "Check", "repository", "existence", "." ]
python
train
34.444444
googleads/googleads-python-lib
googleads/common.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/googleads/common.py#L1096-L1110
def egress(self, envelope, http_headers, operation, binding_options): """Overriding the egress function to set our headers. Args: envelope: An Element with the SOAP request data. http_headers: A dict of the current http headers. operation: The SoapOperation instance. binding_options: An options dict for the SOAP binding. Returns: A tuple of the envelope and headers. """ custom_headers = self._header_handler.GetHTTPHeaders() http_headers.update(custom_headers) return envelope, http_headers
[ "def", "egress", "(", "self", ",", "envelope", ",", "http_headers", ",", "operation", ",", "binding_options", ")", ":", "custom_headers", "=", "self", ".", "_header_handler", ".", "GetHTTPHeaders", "(", ")", "http_headers", ".", "update", "(", "custom_headers", ")", "return", "envelope", ",", "http_headers" ]
Overriding the egress function to set our headers. Args: envelope: An Element with the SOAP request data. http_headers: A dict of the current http headers. operation: The SoapOperation instance. binding_options: An options dict for the SOAP binding. Returns: A tuple of the envelope and headers.
[ "Overriding", "the", "egress", "function", "to", "set", "our", "headers", "." ]
python
train
35.933333
developersociety/django-glitter
glitter/blocks/video/validators.py
https://github.com/developersociety/django-glitter/blob/2c0280ec83afee80deee94ee3934fc54239c2e87/glitter/blocks/video/validators.py#L45-L48
def validate_url(value): """ Validate url. """ if not re.match(VIMEO_URL_RE, value) and not re.match(YOUTUBE_URL_RE, value): raise ValidationError('Invalid URL - only Youtube, Vimeo can be used.')
[ "def", "validate_url", "(", "value", ")", ":", "if", "not", "re", ".", "match", "(", "VIMEO_URL_RE", ",", "value", ")", "and", "not", "re", ".", "match", "(", "YOUTUBE_URL_RE", ",", "value", ")", ":", "raise", "ValidationError", "(", "'Invalid URL - only Youtube, Vimeo can be used.'", ")" ]
Validate url.
[ "Validate", "url", "." ]
python
train
52.25
ActivisionGameScience/assertpy
assertpy/assertpy.py
https://github.com/ActivisionGameScience/assertpy/blob/08d799cdb01f9a25d3e20672efac991c7bc26d79/assertpy/assertpy.py#L339-L346
def does_not_contain_duplicates(self): """Asserts that val is iterable and does not contain any duplicate items.""" try: if len(self.val) == len(set(self.val)): return self except TypeError: raise TypeError('val is not iterable') self._err('Expected <%s> to not contain duplicates, but did.' % self.val)
[ "def", "does_not_contain_duplicates", "(", "self", ")", ":", "try", ":", "if", "len", "(", "self", ".", "val", ")", "==", "len", "(", "set", "(", "self", ".", "val", ")", ")", ":", "return", "self", "except", "TypeError", ":", "raise", "TypeError", "(", "'val is not iterable'", ")", "self", ".", "_err", "(", "'Expected <%s> to not contain duplicates, but did.'", "%", "self", ".", "val", ")" ]
Asserts that val is iterable and does not contain any duplicate items.
[ "Asserts", "that", "val", "is", "iterable", "and", "does", "not", "contain", "any", "duplicate", "items", "." ]
python
valid
46
caseyjlaw/rtpipe
rtpipe/parsecal.py
https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/parsecal.py#L413-L422
def flagants(self, threshold=50): """ Flags solutions with amplitude more than threshold larger than median. """ # identify very low gain amps not already flagged badsols = n.where( (n.median(self.amp)/self.amp > threshold) & (self.flagged == False))[0] if len(badsols): self.logger.info('Solutions %s flagged (times %s, ants %s, freqs %s) for low gain amplitude.' % (str(badsols), self.mjd[badsols], self.antname[badsols], self.ifid[badsols])) for sol in badsols: self.flagged[sol] = True
[ "def", "flagants", "(", "self", ",", "threshold", "=", "50", ")", ":", "# identify very low gain amps not already flagged", "badsols", "=", "n", ".", "where", "(", "(", "n", ".", "median", "(", "self", ".", "amp", ")", "/", "self", ".", "amp", ">", "threshold", ")", "&", "(", "self", ".", "flagged", "==", "False", ")", ")", "[", "0", "]", "if", "len", "(", "badsols", ")", ":", "self", ".", "logger", ".", "info", "(", "'Solutions %s flagged (times %s, ants %s, freqs %s) for low gain amplitude.'", "%", "(", "str", "(", "badsols", ")", ",", "self", ".", "mjd", "[", "badsols", "]", ",", "self", ".", "antname", "[", "badsols", "]", ",", "self", ".", "ifid", "[", "badsols", "]", ")", ")", "for", "sol", "in", "badsols", ":", "self", ".", "flagged", "[", "sol", "]", "=", "True" ]
Flags solutions with amplitude more than threshold larger than median.
[ "Flags", "solutions", "with", "amplitude", "more", "than", "threshold", "larger", "than", "median", "." ]
python
train
56.1
bachiraoun/pysimplelog
SimpleLog.py
https://github.com/bachiraoun/pysimplelog/blob/2681ed5b1b8d7e66c3fff3ec3cca2b14ac571238/SimpleLog.py#L872-L883
def set_log_type_name(self, logType, name): """ Set a logtype name. :Parameters: #. logType (string): A defined logging type. #. name (string): The logtype new name. """ assert logType in self.__logTypeStdoutFlags.keys(), "logType '%s' not defined" %logType assert isinstance(name, basestring), "name must be a string" name = str(name) self.__logTypeNames[logType] = name
[ "def", "set_log_type_name", "(", "self", ",", "logType", ",", "name", ")", ":", "assert", "logType", "in", "self", ".", "__logTypeStdoutFlags", ".", "keys", "(", ")", ",", "\"logType '%s' not defined\"", "%", "logType", "assert", "isinstance", "(", "name", ",", "basestring", ")", ",", "\"name must be a string\"", "name", "=", "str", "(", "name", ")", "self", ".", "__logTypeNames", "[", "logType", "]", "=", "name" ]
Set a logtype name. :Parameters: #. logType (string): A defined logging type. #. name (string): The logtype new name.
[ "Set", "a", "logtype", "name", "." ]
python
train
37.25
manahl/arctic
arctic/chunkstore/chunkstore.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/chunkstore/chunkstore.py#L306-L404
def write(self, symbol, item, metadata=None, chunker=DateChunker(), audit=None, **kwargs): """ Writes data from item to symbol in the database Parameters ---------- symbol: str the symbol that will be used to reference the written data item: Dataframe or Series the data to write the database metadata: ? optional per symbol metadata chunker: Object of type Chunker A chunker that chunks the data in item audit: dict audit information kwargs: optional keyword args that are passed to the chunker. Includes: chunk_size: used by chunker to break data into discrete chunks. see specific chunkers for more information about this param. func: function function to apply to each chunk before writing. Function can not modify the date column. """ if not isinstance(item, (DataFrame, Series)): raise Exception("Can only chunk DataFrames and Series") self._arctic_lib.check_quota() previous_shas = [] doc = {} meta = {} doc[SYMBOL] = symbol doc[LEN] = len(item) doc[SERIALIZER] = self.serializer.TYPE doc[CHUNKER] = chunker.TYPE doc[USERMETA] = metadata sym = self._get_symbol_info(symbol) if sym: previous_shas = set([Binary(x[SHA]) for x in self._collection.find({SYMBOL: symbol}, projection={SHA: True, '_id': False}, )]) ops = [] meta_ops = [] chunk_count = 0 for start, end, chunk_size, record in chunker.to_chunks(item, **kwargs): chunk_count += 1 data = self.serializer.serialize(record) doc[CHUNK_SIZE] = chunk_size doc[METADATA] = {'columns': data[METADATA][COLUMNS] if COLUMNS in data[METADATA] else ''} meta = data[METADATA] for i in xrange(int(len(data[DATA]) / MAX_CHUNK_SIZE + 1)): chunk = {DATA: Binary(data[DATA][i * MAX_CHUNK_SIZE: (i + 1) * MAX_CHUNK_SIZE])} chunk[SEGMENT] = i chunk[START] = meta[START] = start chunk[END] = meta[END] = end chunk[SYMBOL] = meta[SYMBOL] = symbol dates = [chunker.chunk_to_str(start), chunker.chunk_to_str(end), str(chunk[SEGMENT]).encode('ascii')] chunk[SHA] = self._checksum(dates, chunk[DATA]) meta_ops.append(pymongo.ReplaceOne({SYMBOL: symbol, START: start, END: end}, meta, upsert=True)) if chunk[SHA] not in previous_shas: ops.append(pymongo.UpdateOne({SYMBOL: symbol, START: start, END: end, SEGMENT: chunk[SEGMENT]}, {'$set': chunk}, upsert=True)) else: # already exists, dont need to update in mongo previous_shas.remove(chunk[SHA]) if ops: self._collection.bulk_write(ops, ordered=False) if meta_ops: self._mdata.bulk_write(meta_ops, ordered=False) doc[CHUNK_COUNT] = chunk_count doc[APPEND_COUNT] = 0 if previous_shas: mongo_retry(self._collection.delete_many)({SYMBOL: symbol, SHA: {'$in': list(previous_shas)}}) mongo_retry(self._symbols.update_one)({SYMBOL: symbol}, {'$set': doc}, upsert=True) if audit is not None: audit['symbol'] = symbol audit['action'] = 'write' audit['chunks'] = chunk_count self._audit.insert_one(audit)
[ "def", "write", "(", "self", ",", "symbol", ",", "item", ",", "metadata", "=", "None", ",", "chunker", "=", "DateChunker", "(", ")", ",", "audit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "item", ",", "(", "DataFrame", ",", "Series", ")", ")", ":", "raise", "Exception", "(", "\"Can only chunk DataFrames and Series\"", ")", "self", ".", "_arctic_lib", ".", "check_quota", "(", ")", "previous_shas", "=", "[", "]", "doc", "=", "{", "}", "meta", "=", "{", "}", "doc", "[", "SYMBOL", "]", "=", "symbol", "doc", "[", "LEN", "]", "=", "len", "(", "item", ")", "doc", "[", "SERIALIZER", "]", "=", "self", ".", "serializer", ".", "TYPE", "doc", "[", "CHUNKER", "]", "=", "chunker", ".", "TYPE", "doc", "[", "USERMETA", "]", "=", "metadata", "sym", "=", "self", ".", "_get_symbol_info", "(", "symbol", ")", "if", "sym", ":", "previous_shas", "=", "set", "(", "[", "Binary", "(", "x", "[", "SHA", "]", ")", "for", "x", "in", "self", ".", "_collection", ".", "find", "(", "{", "SYMBOL", ":", "symbol", "}", ",", "projection", "=", "{", "SHA", ":", "True", ",", "'_id'", ":", "False", "}", ",", ")", "]", ")", "ops", "=", "[", "]", "meta_ops", "=", "[", "]", "chunk_count", "=", "0", "for", "start", ",", "end", ",", "chunk_size", ",", "record", "in", "chunker", ".", "to_chunks", "(", "item", ",", "*", "*", "kwargs", ")", ":", "chunk_count", "+=", "1", "data", "=", "self", ".", "serializer", ".", "serialize", "(", "record", ")", "doc", "[", "CHUNK_SIZE", "]", "=", "chunk_size", "doc", "[", "METADATA", "]", "=", "{", "'columns'", ":", "data", "[", "METADATA", "]", "[", "COLUMNS", "]", "if", "COLUMNS", "in", "data", "[", "METADATA", "]", "else", "''", "}", "meta", "=", "data", "[", "METADATA", "]", "for", "i", "in", "xrange", "(", "int", "(", "len", "(", "data", "[", "DATA", "]", ")", "/", "MAX_CHUNK_SIZE", "+", "1", ")", ")", ":", "chunk", "=", "{", "DATA", ":", "Binary", "(", "data", "[", "DATA", "]", "[", "i", "*", "MAX_CHUNK_SIZE", ":", "(", "i", "+", "1", ")", "*", "MAX_CHUNK_SIZE", "]", ")", "}", "chunk", "[", "SEGMENT", "]", "=", "i", "chunk", "[", "START", "]", "=", "meta", "[", "START", "]", "=", "start", "chunk", "[", "END", "]", "=", "meta", "[", "END", "]", "=", "end", "chunk", "[", "SYMBOL", "]", "=", "meta", "[", "SYMBOL", "]", "=", "symbol", "dates", "=", "[", "chunker", ".", "chunk_to_str", "(", "start", ")", ",", "chunker", ".", "chunk_to_str", "(", "end", ")", ",", "str", "(", "chunk", "[", "SEGMENT", "]", ")", ".", "encode", "(", "'ascii'", ")", "]", "chunk", "[", "SHA", "]", "=", "self", ".", "_checksum", "(", "dates", ",", "chunk", "[", "DATA", "]", ")", "meta_ops", ".", "append", "(", "pymongo", ".", "ReplaceOne", "(", "{", "SYMBOL", ":", "symbol", ",", "START", ":", "start", ",", "END", ":", "end", "}", ",", "meta", ",", "upsert", "=", "True", ")", ")", "if", "chunk", "[", "SHA", "]", "not", "in", "previous_shas", ":", "ops", ".", "append", "(", "pymongo", ".", "UpdateOne", "(", "{", "SYMBOL", ":", "symbol", ",", "START", ":", "start", ",", "END", ":", "end", ",", "SEGMENT", ":", "chunk", "[", "SEGMENT", "]", "}", ",", "{", "'$set'", ":", "chunk", "}", ",", "upsert", "=", "True", ")", ")", "else", ":", "# already exists, dont need to update in mongo", "previous_shas", ".", "remove", "(", "chunk", "[", "SHA", "]", ")", "if", "ops", ":", "self", ".", "_collection", ".", "bulk_write", "(", "ops", ",", "ordered", "=", "False", ")", "if", "meta_ops", ":", "self", ".", "_mdata", ".", "bulk_write", "(", "meta_ops", ",", "ordered", "=", "False", ")", "doc", "[", "CHUNK_COUNT", "]", "=", "chunk_count", "doc", "[", "APPEND_COUNT", "]", "=", "0", "if", "previous_shas", ":", "mongo_retry", "(", "self", ".", "_collection", ".", "delete_many", ")", "(", "{", "SYMBOL", ":", "symbol", ",", "SHA", ":", "{", "'$in'", ":", "list", "(", "previous_shas", ")", "}", "}", ")", "mongo_retry", "(", "self", ".", "_symbols", ".", "update_one", ")", "(", "{", "SYMBOL", ":", "symbol", "}", ",", "{", "'$set'", ":", "doc", "}", ",", "upsert", "=", "True", ")", "if", "audit", "is", "not", "None", ":", "audit", "[", "'symbol'", "]", "=", "symbol", "audit", "[", "'action'", "]", "=", "'write'", "audit", "[", "'chunks'", "]", "=", "chunk_count", "self", ".", "_audit", ".", "insert_one", "(", "audit", ")" ]
Writes data from item to symbol in the database Parameters ---------- symbol: str the symbol that will be used to reference the written data item: Dataframe or Series the data to write the database metadata: ? optional per symbol metadata chunker: Object of type Chunker A chunker that chunks the data in item audit: dict audit information kwargs: optional keyword args that are passed to the chunker. Includes: chunk_size: used by chunker to break data into discrete chunks. see specific chunkers for more information about this param. func: function function to apply to each chunk before writing. Function can not modify the date column.
[ "Writes", "data", "from", "item", "to", "symbol", "in", "the", "database" ]
python
train
41.767677
edx/edx-enterprise
enterprise/admin/forms.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/admin/forms.py#L388-L398
def clean(self): """ Clean form fields prior to database entry. In this case, the major cleaning operation is substituting a None value for a blank value in the Catalog field. """ cleaned_data = super(EnterpriseCustomerAdminForm, self).clean() if 'catalog' in cleaned_data and not cleaned_data['catalog']: cleaned_data['catalog'] = None return cleaned_data
[ "def", "clean", "(", "self", ")", ":", "cleaned_data", "=", "super", "(", "EnterpriseCustomerAdminForm", ",", "self", ")", ".", "clean", "(", ")", "if", "'catalog'", "in", "cleaned_data", "and", "not", "cleaned_data", "[", "'catalog'", "]", ":", "cleaned_data", "[", "'catalog'", "]", "=", "None", "return", "cleaned_data" ]
Clean form fields prior to database entry. In this case, the major cleaning operation is substituting a None value for a blank value in the Catalog field.
[ "Clean", "form", "fields", "prior", "to", "database", "entry", "." ]
python
valid
38.454545
fastai/fastai
fastai/torch_core.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/torch_core.py#L377-L381
def flatten_check(out:Tensor, targ:Tensor) -> Tensor: "Check that `out` and `targ` have the same number of elements and flatten them." out,targ = out.contiguous().view(-1),targ.contiguous().view(-1) assert len(out) == len(targ), f"Expected output and target to have the same number of elements but got {len(out)} and {len(targ)}." return out,targ
[ "def", "flatten_check", "(", "out", ":", "Tensor", ",", "targ", ":", "Tensor", ")", "->", "Tensor", ":", "out", ",", "targ", "=", "out", ".", "contiguous", "(", ")", ".", "view", "(", "-", "1", ")", ",", "targ", ".", "contiguous", "(", ")", ".", "view", "(", "-", "1", ")", "assert", "len", "(", "out", ")", "==", "len", "(", "targ", ")", ",", "f\"Expected output and target to have the same number of elements but got {len(out)} and {len(targ)}.\"", "return", "out", ",", "targ" ]
Check that `out` and `targ` have the same number of elements and flatten them.
[ "Check", "that", "out", "and", "targ", "have", "the", "same", "number", "of", "elements", "and", "flatten", "them", "." ]
python
train
71.6
knipknap/SpiffWorkflow
SpiffWorkflow/task.py
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/task.py#L584-L590
def complete(self): """ Called by the associated task to let us know that its state has changed (e.g. from FUTURE to COMPLETED.) """ self._set_state(self.COMPLETED) return self.task_spec._on_complete(self)
[ "def", "complete", "(", "self", ")", ":", "self", ".", "_set_state", "(", "self", ".", "COMPLETED", ")", "return", "self", ".", "task_spec", ".", "_on_complete", "(", "self", ")" ]
Called by the associated task to let us know that its state has changed (e.g. from FUTURE to COMPLETED.)
[ "Called", "by", "the", "associated", "task", "to", "let", "us", "know", "that", "its", "state", "has", "changed", "(", "e", ".", "g", ".", "from", "FUTURE", "to", "COMPLETED", ".", ")" ]
python
valid
35.285714
Zitrax/nose-dep
nosedep.py
https://github.com/Zitrax/nose-dep/blob/fd29c95e0e5eb2dbd821f6566b72dfcf42631226/nosedep.py#L195-L198
def split_on_condition(seq, condition): """Split a sequence into two iterables without looping twice""" l1, l2 = tee((condition(item), item) for item in seq) return (i for p, i in l1 if p), (i for p, i in l2 if not p)
[ "def", "split_on_condition", "(", "seq", ",", "condition", ")", ":", "l1", ",", "l2", "=", "tee", "(", "(", "condition", "(", "item", ")", ",", "item", ")", "for", "item", "in", "seq", ")", "return", "(", "i", "for", "p", ",", "i", "in", "l1", "if", "p", ")", ",", "(", "i", "for", "p", ",", "i", "in", "l2", "if", "not", "p", ")" ]
Split a sequence into two iterables without looping twice
[ "Split", "a", "sequence", "into", "two", "iterables", "without", "looping", "twice" ]
python
train
56.5
gabstopper/smc-python
smc/elements/situations.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/situations.py#L291-L302
def parameter_values(self): """ Parameter values for this inspection situation. This correlate to the the situation_context. :rtype: list(SituationParameterValue) """ for param in self.data.get('parameter_values', []): cache = ElementCache(data=self.make_request(href=param)) name = '{}'.format(cache.type.title()).replace('_', '') yield type(name, (SituationParameterValue,), { 'data': cache})(name=cache.name, type=cache.type, href=param)
[ "def", "parameter_values", "(", "self", ")", ":", "for", "param", "in", "self", ".", "data", ".", "get", "(", "'parameter_values'", ",", "[", "]", ")", ":", "cache", "=", "ElementCache", "(", "data", "=", "self", ".", "make_request", "(", "href", "=", "param", ")", ")", "name", "=", "'{}'", ".", "format", "(", "cache", ".", "type", ".", "title", "(", ")", ")", ".", "replace", "(", "'_'", ",", "''", ")", "yield", "type", "(", "name", ",", "(", "SituationParameterValue", ",", ")", ",", "{", "'data'", ":", "cache", "}", ")", "(", "name", "=", "cache", ".", "name", ",", "type", "=", "cache", ".", "type", ",", "href", "=", "param", ")" ]
Parameter values for this inspection situation. This correlate to the the situation_context. :rtype: list(SituationParameterValue)
[ "Parameter", "values", "for", "this", "inspection", "situation", ".", "This", "correlate", "to", "the", "the", "situation_context", ".", ":", "rtype", ":", "list", "(", "SituationParameterValue", ")" ]
python
train
44.833333
ronaldguillen/wave
wave/serializers.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L1078-L1124
def build_standard_field(self, field_name, model_field): """ Create regular model fields. """ field_mapping = ClassLookupDict(self.serializer_field_mapping) field_class = field_mapping[model_field] field_kwargs = get_field_kwargs(field_name, model_field) if 'choices' in field_kwargs: # Fields with choices get coerced into `ChoiceField` # instead of using their regular typed field. field_class = self.serializer_choice_field # Some model fields may introduce kwargs that would not be valid # for the choice field. We need to strip these out. # Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES) valid_kwargs = set(( 'read_only', 'write_only', 'required', 'default', 'initial', 'source', 'label', 'help_text', 'style', 'error_messages', 'validators', 'allow_null', 'allow_blank', 'choices' )) for key in list(field_kwargs.keys()): if key not in valid_kwargs: field_kwargs.pop(key) if not issubclass(field_class, ModelField): # `model_field` is only valid for the fallback case of # `ModelField`, which is used when no other typed field # matched to the model field. field_kwargs.pop('model_field', None) if not issubclass(field_class, CharField) and not issubclass(field_class, ChoiceField): # `allow_blank` is only valid for textual fields. field_kwargs.pop('allow_blank', None) if postgres_fields and isinstance(model_field, postgres_fields.ArrayField): # Populate the `child` argument on `ListField` instances generated # for the PostgrSQL specfic `ArrayField`. child_model_field = model_field.base_field child_field_class, child_field_kwargs = self.build_standard_field( 'child', child_model_field ) field_kwargs['child'] = child_field_class(**child_field_kwargs) return field_class, field_kwargs
[ "def", "build_standard_field", "(", "self", ",", "field_name", ",", "model_field", ")", ":", "field_mapping", "=", "ClassLookupDict", "(", "self", ".", "serializer_field_mapping", ")", "field_class", "=", "field_mapping", "[", "model_field", "]", "field_kwargs", "=", "get_field_kwargs", "(", "field_name", ",", "model_field", ")", "if", "'choices'", "in", "field_kwargs", ":", "# Fields with choices get coerced into `ChoiceField`", "# instead of using their regular typed field.", "field_class", "=", "self", ".", "serializer_choice_field", "# Some model fields may introduce kwargs that would not be valid", "# for the choice field. We need to strip these out.", "# Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)", "valid_kwargs", "=", "set", "(", "(", "'read_only'", ",", "'write_only'", ",", "'required'", ",", "'default'", ",", "'initial'", ",", "'source'", ",", "'label'", ",", "'help_text'", ",", "'style'", ",", "'error_messages'", ",", "'validators'", ",", "'allow_null'", ",", "'allow_blank'", ",", "'choices'", ")", ")", "for", "key", "in", "list", "(", "field_kwargs", ".", "keys", "(", ")", ")", ":", "if", "key", "not", "in", "valid_kwargs", ":", "field_kwargs", ".", "pop", "(", "key", ")", "if", "not", "issubclass", "(", "field_class", ",", "ModelField", ")", ":", "# `model_field` is only valid for the fallback case of", "# `ModelField`, which is used when no other typed field", "# matched to the model field.", "field_kwargs", ".", "pop", "(", "'model_field'", ",", "None", ")", "if", "not", "issubclass", "(", "field_class", ",", "CharField", ")", "and", "not", "issubclass", "(", "field_class", ",", "ChoiceField", ")", ":", "# `allow_blank` is only valid for textual fields.", "field_kwargs", ".", "pop", "(", "'allow_blank'", ",", "None", ")", "if", "postgres_fields", "and", "isinstance", "(", "model_field", ",", "postgres_fields", ".", "ArrayField", ")", ":", "# Populate the `child` argument on `ListField` instances generated", "# for the PostgrSQL specfic `ArrayField`.", "child_model_field", "=", "model_field", ".", "base_field", "child_field_class", ",", "child_field_kwargs", "=", "self", ".", "build_standard_field", "(", "'child'", ",", "child_model_field", ")", "field_kwargs", "[", "'child'", "]", "=", "child_field_class", "(", "*", "*", "child_field_kwargs", ")", "return", "field_class", ",", "field_kwargs" ]
Create regular model fields.
[ "Create", "regular", "model", "fields", "." ]
python
train
46.021277
Spinmob/spinmob
_data.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_data.py#L1022-L1055
def pop_header(self, hkey, ignore_error=False): """ This will remove and return the specified header value. Parameters ---------- hkey Header key you wish to pop. You can specify either a key string or an index. ignore_error=False Whether to quietly ignore any errors (i.e., hkey not found). """ # try the integer approach first to allow negative values if type(hkey) is not str: try: return self.headers.pop(self.hkeys.pop(hkey)) except: if not ignore_error: print("ERROR: pop_header() could not find hkey "+str(hkey)) return None else: try: # find the key integer and pop it hkey = self.hkeys.index(hkey) # pop it! return self.headers.pop(self.hkeys.pop(hkey)) except: if not ignore_error: print("ERROR: pop_header() could not find hkey "+str(hkey)) return
[ "def", "pop_header", "(", "self", ",", "hkey", ",", "ignore_error", "=", "False", ")", ":", "# try the integer approach first to allow negative values", "if", "type", "(", "hkey", ")", "is", "not", "str", ":", "try", ":", "return", "self", ".", "headers", ".", "pop", "(", "self", ".", "hkeys", ".", "pop", "(", "hkey", ")", ")", "except", ":", "if", "not", "ignore_error", ":", "print", "(", "\"ERROR: pop_header() could not find hkey \"", "+", "str", "(", "hkey", ")", ")", "return", "None", "else", ":", "try", ":", "# find the key integer and pop it", "hkey", "=", "self", ".", "hkeys", ".", "index", "(", "hkey", ")", "# pop it!", "return", "self", ".", "headers", ".", "pop", "(", "self", ".", "hkeys", ".", "pop", "(", "hkey", ")", ")", "except", ":", "if", "not", "ignore_error", ":", "print", "(", "\"ERROR: pop_header() could not find hkey \"", "+", "str", "(", "hkey", ")", ")", "return" ]
This will remove and return the specified header value. Parameters ---------- hkey Header key you wish to pop. You can specify either a key string or an index. ignore_error=False Whether to quietly ignore any errors (i.e., hkey not found).
[ "This", "will", "remove", "and", "return", "the", "specified", "header", "value", "." ]
python
train
32.352941
klmitch/turnstile
turnstile/limits.py
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L509-L513
def messages(self): """Return remaining messages before limiting.""" return int(math.floor(((self.limit.unit_value - self.level) / self.limit.unit_value) * self.limit.value))
[ "def", "messages", "(", "self", ")", ":", "return", "int", "(", "math", ".", "floor", "(", "(", "(", "self", ".", "limit", ".", "unit_value", "-", "self", ".", "level", ")", "/", "self", ".", "limit", ".", "unit_value", ")", "*", "self", ".", "limit", ".", "value", ")", ")" ]
Return remaining messages before limiting.
[ "Return", "remaining", "messages", "before", "limiting", "." ]
python
train
43.6
bastibe/SoundFile
soundfile.py
https://github.com/bastibe/SoundFile/blob/161e930da9c9ea76579b6ee18a131e10bca8a605/soundfile.py#L1273-L1282
def _check_buffer(self, data, ctype): """Convert buffer to cdata and check for valid size.""" assert ctype in _ffi_types.values() if not isinstance(data, bytes): data = _ffi.from_buffer(data) frames, remainder = divmod(len(data), self.channels * _ffi.sizeof(ctype)) if remainder: raise ValueError("Data size must be a multiple of frame size") return data, frames
[ "def", "_check_buffer", "(", "self", ",", "data", ",", "ctype", ")", ":", "assert", "ctype", "in", "_ffi_types", ".", "values", "(", ")", "if", "not", "isinstance", "(", "data", ",", "bytes", ")", ":", "data", "=", "_ffi", ".", "from_buffer", "(", "data", ")", "frames", ",", "remainder", "=", "divmod", "(", "len", "(", "data", ")", ",", "self", ".", "channels", "*", "_ffi", ".", "sizeof", "(", "ctype", ")", ")", "if", "remainder", ":", "raise", "ValueError", "(", "\"Data size must be a multiple of frame size\"", ")", "return", "data", ",", "frames" ]
Convert buffer to cdata and check for valid size.
[ "Convert", "buffer", "to", "cdata", "and", "check", "for", "valid", "size", "." ]
python
train
46
Alignak-monitoring/alignak
alignak/http/arbiter_interface.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/arbiter_interface.py#L820-L955
def realms(self, details=False): """Return the realms / satellites configuration Returns an object containing the hierarchical realms configuration with the main information about each realm: { All: { satellites: { pollers: [ "poller-master" ], reactionners: [ "reactionner-master" ], schedulers: [ "scheduler-master", "scheduler-master-3", "scheduler-master-2" ], brokers: [ "broker-master" ], receivers: [ "receiver-master", "receiver-nsca" ] }, children: { }, name: "All", members: [ "host_1", "host_0", "host_3", "host_2", "host_11", "localhost" ], level: 0 }, North: { ... } } Sub realms defined inside a realm are provided in the `children` property of their parent realm and they contain the same information as their parent.. The `members` realm contain the list of the hosts members of the realm. If ``details`` is required, each realm will contain more information about each satellite involved in the realm management: { All: { satellites: { pollers: [ { passive: false, name: "poller-master", livestate_output: "poller/poller-master is up and running.", reachable: true, uri: "http://127.0.0.1:7771/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.593074, type: "poller" } ], reactionners: [ { passive: false, name: "reactionner-master", livestate_output: "reactionner/reactionner-master is up and running.", reachable: true, uri: "http://127.0.0.1:7769/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.587762, type: "reactionner" } ] :return: dict containing realms / satellites :rtype: dict """ def get_realm_info(realm, realms, satellites, details=False): """Get the realm and its children information :return: None """ res = { "name": realm.get_name(), "level": realm.level, "hosts": realm.members, "hostgroups": realm.group_members, "children": {}, "satellites": { } } for child in realm.realm_members: child = realms.find_by_name(child) if not child: continue realm_infos = get_realm_info(child, realms, satellites, details=details) res['children'][child.get_name()] = realm_infos for sat_type in ['scheduler', 'reactionner', 'broker', 'receiver', 'poller']: res["satellites"][sat_type + 's'] = [] sats = realm.get_potential_satellites_by_type(satellites, sat_type) for sat in sats: if details: res["satellites"][sat_type + 's'][sat.name] = sat.give_satellite_json() else: res["satellites"][sat_type + 's'].append(sat.name) return res if details is not False: details = bool(details) # Report our daemons states, but only if a dispatcher and the configuration is loaded if not getattr(self.app, 'dispatcher', None) or not getattr(self.app, 'conf', None): return {'_status': u'ERR', '_message': "Not yet available. Please come back later."} res = {} higher_realms = [realm for realm in self.app.conf.realms if realm.level == 0] for realm in higher_realms: res[realm.get_name()] = get_realm_info(realm, self.app.conf.realms, self.app.dispatcher.all_daemons_links) return res
[ "def", "realms", "(", "self", ",", "details", "=", "False", ")", ":", "def", "get_realm_info", "(", "realm", ",", "realms", ",", "satellites", ",", "details", "=", "False", ")", ":", "\"\"\"Get the realm and its children information\n\n :return: None\n \"\"\"", "res", "=", "{", "\"name\"", ":", "realm", ".", "get_name", "(", ")", ",", "\"level\"", ":", "realm", ".", "level", ",", "\"hosts\"", ":", "realm", ".", "members", ",", "\"hostgroups\"", ":", "realm", ".", "group_members", ",", "\"children\"", ":", "{", "}", ",", "\"satellites\"", ":", "{", "}", "}", "for", "child", "in", "realm", ".", "realm_members", ":", "child", "=", "realms", ".", "find_by_name", "(", "child", ")", "if", "not", "child", ":", "continue", "realm_infos", "=", "get_realm_info", "(", "child", ",", "realms", ",", "satellites", ",", "details", "=", "details", ")", "res", "[", "'children'", "]", "[", "child", ".", "get_name", "(", ")", "]", "=", "realm_infos", "for", "sat_type", "in", "[", "'scheduler'", ",", "'reactionner'", ",", "'broker'", ",", "'receiver'", ",", "'poller'", "]", ":", "res", "[", "\"satellites\"", "]", "[", "sat_type", "+", "'s'", "]", "=", "[", "]", "sats", "=", "realm", ".", "get_potential_satellites_by_type", "(", "satellites", ",", "sat_type", ")", "for", "sat", "in", "sats", ":", "if", "details", ":", "res", "[", "\"satellites\"", "]", "[", "sat_type", "+", "'s'", "]", "[", "sat", ".", "name", "]", "=", "sat", ".", "give_satellite_json", "(", ")", "else", ":", "res", "[", "\"satellites\"", "]", "[", "sat_type", "+", "'s'", "]", ".", "append", "(", "sat", ".", "name", ")", "return", "res", "if", "details", "is", "not", "False", ":", "details", "=", "bool", "(", "details", ")", "# Report our daemons states, but only if a dispatcher and the configuration is loaded", "if", "not", "getattr", "(", "self", ".", "app", ",", "'dispatcher'", ",", "None", ")", "or", "not", "getattr", "(", "self", ".", "app", ",", "'conf'", ",", "None", ")", ":", "return", "{", "'_status'", ":", "u'ERR'", ",", "'_message'", ":", "\"Not yet available. Please come back later.\"", "}", "res", "=", "{", "}", "higher_realms", "=", "[", "realm", "for", "realm", "in", "self", ".", "app", ".", "conf", ".", "realms", "if", "realm", ".", "level", "==", "0", "]", "for", "realm", "in", "higher_realms", ":", "res", "[", "realm", ".", "get_name", "(", ")", "]", "=", "get_realm_info", "(", "realm", ",", "self", ".", "app", ".", "conf", ".", "realms", ",", "self", ".", "app", ".", "dispatcher", ".", "all_daemons_links", ")", "return", "res" ]
Return the realms / satellites configuration Returns an object containing the hierarchical realms configuration with the main information about each realm: { All: { satellites: { pollers: [ "poller-master" ], reactionners: [ "reactionner-master" ], schedulers: [ "scheduler-master", "scheduler-master-3", "scheduler-master-2" ], brokers: [ "broker-master" ], receivers: [ "receiver-master", "receiver-nsca" ] }, children: { }, name: "All", members: [ "host_1", "host_0", "host_3", "host_2", "host_11", "localhost" ], level: 0 }, North: { ... } } Sub realms defined inside a realm are provided in the `children` property of their parent realm and they contain the same information as their parent.. The `members` realm contain the list of the hosts members of the realm. If ``details`` is required, each realm will contain more information about each satellite involved in the realm management: { All: { satellites: { pollers: [ { passive: false, name: "poller-master", livestate_output: "poller/poller-master is up and running.", reachable: true, uri: "http://127.0.0.1:7771/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.593074, type: "poller" } ], reactionners: [ { passive: false, name: "reactionner-master", livestate_output: "reactionner/reactionner-master is up and running.", reachable: true, uri: "http://127.0.0.1:7769/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.587762, type: "reactionner" } ] :return: dict containing realms / satellites :rtype: dict
[ "Return", "the", "realms", "/", "satellites", "configuration" ]
python
train
39.397059
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_heliplane.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_heliplane.py#L31-L100
def mavlink_packet(self, msg): '''handle an incoming mavlink packet''' type = msg.get_type() master = self.master # add some status fields if type in [ 'RC_CHANNELS' ]: ilock = self.get_rc_input(msg, self.interlock_channel) if ilock <= 0: self.console.set_status('ILOCK', 'ILOCK:--', fg='grey', row=4) elif ilock >= 1800: self.console.set_status('ILOCK', 'ILOCK:ON', fg='red', row=4) else: self.console.set_status('ILOCK', 'ILOCK:OFF', fg='green', row=4) override = self.get_rc_input(msg, self.override_channel) if override <= 0: self.console.set_status('OVR', 'OVR:--', fg='grey', row=4) elif override >= 1800: self.console.set_status('OVR', 'OVR:ON', fg='red', row=4) else: self.console.set_status('OVR', 'OVR:OFF', fg='green', row=4) zeroi = self.get_rc_input(msg, self.zero_I_channel) if zeroi <= 0: self.console.set_status('ZEROI', 'ZEROI:--', fg='grey', row=4) elif zeroi >= 1800: self.console.set_status('ZEROI', 'ZEROI:ON', fg='red', row=4) else: self.console.set_status('ZEROI', 'ZEROI:OFF', fg='green', row=4) novtol = self.get_rc_input(msg, self.no_vtol_channel) if novtol <= 0: self.console.set_status('NOVTOL', 'NOVTOL:--', fg='grey', row=4) elif novtol >= 1800: self.console.set_status('NOVTOL', 'NOVTOL:ON', fg='red', row=4) else: self.console.set_status('NOVTOL', 'NOVTOL:OFF', fg='green', row=4) if type in [ 'SERVO_OUTPUT_RAW' ]: rsc = self.get_pwm_output(msg, self.rsc_out_channel) if rsc <= 0: self.console.set_status('RSC', 'RSC:--', fg='grey', row=4) elif rsc <= 1200: self.console.set_status('RSC', 'RSC:%u' % rsc, fg='red', row=4) elif rsc <= 1600: self.console.set_status('RSC', 'RSC:%u' % rsc, fg='orange', row=4) else: self.console.set_status('RSC', 'RSC:%u' % rsc, fg='green', row=4) thr = self.get_pwm_output(msg, self.fwd_thr_channel) if thr <= 0: self.console.set_status('FTHR', 'FTHR:--', fg='grey', row=4) elif thr <= 1100: self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='red', row=4) elif thr <= 1500: self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='orange', row=4) else: self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='green', row=4) if type in [ 'RPM' ]: rpm = msg.rpm1 if rpm < 1000: rpm_colour = 'red' elif rpm < 2000: rpm_colour = 'orange' else: rpm_colour = 'green' self.console.set_status('RPM', 'RPM: %u' % rpm, fg=rpm_colour, row=4)
[ "def", "mavlink_packet", "(", "self", ",", "msg", ")", ":", "type", "=", "msg", ".", "get_type", "(", ")", "master", "=", "self", ".", "master", "# add some status fields", "if", "type", "in", "[", "'RC_CHANNELS'", "]", ":", "ilock", "=", "self", ".", "get_rc_input", "(", "msg", ",", "self", ".", "interlock_channel", ")", "if", "ilock", "<=", "0", ":", "self", ".", "console", ".", "set_status", "(", "'ILOCK'", ",", "'ILOCK:--'", ",", "fg", "=", "'grey'", ",", "row", "=", "4", ")", "elif", "ilock", ">=", "1800", ":", "self", ".", "console", ".", "set_status", "(", "'ILOCK'", ",", "'ILOCK:ON'", ",", "fg", "=", "'red'", ",", "row", "=", "4", ")", "else", ":", "self", ".", "console", ".", "set_status", "(", "'ILOCK'", ",", "'ILOCK:OFF'", ",", "fg", "=", "'green'", ",", "row", "=", "4", ")", "override", "=", "self", ".", "get_rc_input", "(", "msg", ",", "self", ".", "override_channel", ")", "if", "override", "<=", "0", ":", "self", ".", "console", ".", "set_status", "(", "'OVR'", ",", "'OVR:--'", ",", "fg", "=", "'grey'", ",", "row", "=", "4", ")", "elif", "override", ">=", "1800", ":", "self", ".", "console", ".", "set_status", "(", "'OVR'", ",", "'OVR:ON'", ",", "fg", "=", "'red'", ",", "row", "=", "4", ")", "else", ":", "self", ".", "console", ".", "set_status", "(", "'OVR'", ",", "'OVR:OFF'", ",", "fg", "=", "'green'", ",", "row", "=", "4", ")", "zeroi", "=", "self", ".", "get_rc_input", "(", "msg", ",", "self", ".", "zero_I_channel", ")", "if", "zeroi", "<=", "0", ":", "self", ".", "console", ".", "set_status", "(", "'ZEROI'", ",", "'ZEROI:--'", ",", "fg", "=", "'grey'", ",", "row", "=", "4", ")", "elif", "zeroi", ">=", "1800", ":", "self", ".", "console", ".", "set_status", "(", "'ZEROI'", ",", "'ZEROI:ON'", ",", "fg", "=", "'red'", ",", "row", "=", "4", ")", "else", ":", "self", ".", "console", ".", "set_status", "(", "'ZEROI'", ",", "'ZEROI:OFF'", ",", "fg", "=", "'green'", ",", "row", "=", "4", ")", "novtol", "=", "self", ".", "get_rc_input", "(", "msg", ",", "self", ".", "no_vtol_channel", ")", "if", "novtol", "<=", "0", ":", "self", ".", "console", ".", "set_status", "(", "'NOVTOL'", ",", "'NOVTOL:--'", ",", "fg", "=", "'grey'", ",", "row", "=", "4", ")", "elif", "novtol", ">=", "1800", ":", "self", ".", "console", ".", "set_status", "(", "'NOVTOL'", ",", "'NOVTOL:ON'", ",", "fg", "=", "'red'", ",", "row", "=", "4", ")", "else", ":", "self", ".", "console", ".", "set_status", "(", "'NOVTOL'", ",", "'NOVTOL:OFF'", ",", "fg", "=", "'green'", ",", "row", "=", "4", ")", "if", "type", "in", "[", "'SERVO_OUTPUT_RAW'", "]", ":", "rsc", "=", "self", ".", "get_pwm_output", "(", "msg", ",", "self", ".", "rsc_out_channel", ")", "if", "rsc", "<=", "0", ":", "self", ".", "console", ".", "set_status", "(", "'RSC'", ",", "'RSC:--'", ",", "fg", "=", "'grey'", ",", "row", "=", "4", ")", "elif", "rsc", "<=", "1200", ":", "self", ".", "console", ".", "set_status", "(", "'RSC'", ",", "'RSC:%u'", "%", "rsc", ",", "fg", "=", "'red'", ",", "row", "=", "4", ")", "elif", "rsc", "<=", "1600", ":", "self", ".", "console", ".", "set_status", "(", "'RSC'", ",", "'RSC:%u'", "%", "rsc", ",", "fg", "=", "'orange'", ",", "row", "=", "4", ")", "else", ":", "self", ".", "console", ".", "set_status", "(", "'RSC'", ",", "'RSC:%u'", "%", "rsc", ",", "fg", "=", "'green'", ",", "row", "=", "4", ")", "thr", "=", "self", ".", "get_pwm_output", "(", "msg", ",", "self", ".", "fwd_thr_channel", ")", "if", "thr", "<=", "0", ":", "self", ".", "console", ".", "set_status", "(", "'FTHR'", ",", "'FTHR:--'", ",", "fg", "=", "'grey'", ",", "row", "=", "4", ")", "elif", "thr", "<=", "1100", ":", "self", ".", "console", ".", "set_status", "(", "'FTHR'", ",", "'FTHR:%u'", "%", "thr", ",", "fg", "=", "'red'", ",", "row", "=", "4", ")", "elif", "thr", "<=", "1500", ":", "self", ".", "console", ".", "set_status", "(", "'FTHR'", ",", "'FTHR:%u'", "%", "thr", ",", "fg", "=", "'orange'", ",", "row", "=", "4", ")", "else", ":", "self", ".", "console", ".", "set_status", "(", "'FTHR'", ",", "'FTHR:%u'", "%", "thr", ",", "fg", "=", "'green'", ",", "row", "=", "4", ")", "if", "type", "in", "[", "'RPM'", "]", ":", "rpm", "=", "msg", ".", "rpm1", "if", "rpm", "<", "1000", ":", "rpm_colour", "=", "'red'", "elif", "rpm", "<", "2000", ":", "rpm_colour", "=", "'orange'", "else", ":", "rpm_colour", "=", "'green'", "self", ".", "console", ".", "set_status", "(", "'RPM'", ",", "'RPM: %u'", "%", "rpm", ",", "fg", "=", "rpm_colour", ",", "row", "=", "4", ")" ]
handle an incoming mavlink packet
[ "handle", "an", "incoming", "mavlink", "packet" ]
python
train
43.671429
dmwm/DBS
Server/Python/src/dbs/web/DBSReaderModel.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSReaderModel.py#L800-L850
def listBlockSummaries(self, block_name="", dataset="", detail=False): """ API that returns summary information like total size and total number of events in a dataset or a list of blocks :param block_name: list block summaries for block_name(s) :type block_name: str, list :param dataset: list block summaries for all blocks in dataset :type dataset: str :param detail: list summary by block names if detail=True, default=False :type detail: str, bool :returns: list of dicts containing total block_sizes, file_counts and event_counts of dataset or blocks provided """ if bool(dataset)+bool(block_name)!=1: dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "Dataset or block_names must be specified at a time.") if block_name and isinstance(block_name, basestring): try: block_name = [str(block_name)] except: dbsExceptionHandler("dbsException-invalid-input", "Invalid block_name for listBlockSummaries. ") for this_block_name in block_name: if re.search("[*, %]", this_block_name): dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "No wildcards are allowed in block_name list") if re.search("[*, %]", dataset): dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "No wildcards are allowed in dataset") data = [] try: with self.dbi.connection() as conn: data = self.dbsBlockSummaryListDAO.execute(conn, block_name, dataset, detail) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/listBlockSummaries. %s\n. Exception trace: \n %s" % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) for item in data: yield item
[ "def", "listBlockSummaries", "(", "self", ",", "block_name", "=", "\"\"", ",", "dataset", "=", "\"\"", ",", "detail", "=", "False", ")", ":", "if", "bool", "(", "dataset", ")", "+", "bool", "(", "block_name", ")", "!=", "1", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "dbsExceptionCode", "[", "\"dbsException-invalid-input2\"", "]", ",", "self", ".", "logger", ".", "exception", ",", "\"Dataset or block_names must be specified at a time.\"", ")", "if", "block_name", "and", "isinstance", "(", "block_name", ",", "basestring", ")", ":", "try", ":", "block_name", "=", "[", "str", "(", "block_name", ")", "]", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"Invalid block_name for listBlockSummaries. \"", ")", "for", "this_block_name", "in", "block_name", ":", "if", "re", ".", "search", "(", "\"[*, %]\"", ",", "this_block_name", ")", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "dbsExceptionCode", "[", "\"dbsException-invalid-input2\"", "]", ",", "self", ".", "logger", ".", "exception", ",", "\"No wildcards are allowed in block_name list\"", ")", "if", "re", ".", "search", "(", "\"[*, %]\"", ",", "dataset", ")", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "dbsExceptionCode", "[", "\"dbsException-invalid-input2\"", "]", ",", "self", ".", "logger", ".", "exception", ",", "\"No wildcards are allowed in dataset\"", ")", "data", "=", "[", "]", "try", ":", "with", "self", ".", "dbi", ".", "connection", "(", ")", "as", "conn", ":", "data", "=", "self", ".", "dbsBlockSummaryListDAO", ".", "execute", "(", "conn", ",", "block_name", ",", "dataset", ",", "detail", ")", "except", "dbsException", "as", "de", ":", "dbsExceptionHandler", "(", "de", ".", "eCode", ",", "de", ".", "message", ",", "self", ".", "logger", ".", "exception", ",", "de", ".", "serverError", ")", "except", "Exception", "as", "ex", ":", "sError", "=", "\"DBSReaderModel/listBlockSummaries. %s\\n. Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")", "for", "item", "in", "data", ":", "yield", "item" ]
API that returns summary information like total size and total number of events in a dataset or a list of blocks :param block_name: list block summaries for block_name(s) :type block_name: str, list :param dataset: list block summaries for all blocks in dataset :type dataset: str :param detail: list summary by block names if detail=True, default=False :type detail: str, bool :returns: list of dicts containing total block_sizes, file_counts and event_counts of dataset or blocks provided
[ "API", "that", "returns", "summary", "information", "like", "total", "size", "and", "total", "number", "of", "events", "in", "a", "dataset", "or", "a", "list", "of", "blocks" ]
python
train
51.764706
gamechanger/dusty
dusty/systems/docker/compose.py
https://github.com/gamechanger/dusty/blob/dc12de90bb6945023d6f43a8071e984313a1d984/dusty/systems/docker/compose.py#L64-L93
def _compose_restart(services): """Well, this is annoying. Compose 1.2 shipped with the restart functionality fucking broken, so we can't set a faster timeout than 10 seconds (which is way too long) using Compose. We are therefore resigned to trying to hack this together ourselves. Lame. Relevant fix which will make it into the next release: https://github.com/docker/compose/pull/1318""" def _restart_container(client, container): log_to_client('Restarting {}'.format(get_canonical_container_name(container))) client.restart(container['Id'], timeout=1) assembled_specs = get_assembled_specs() if services == []: services = [spec.name for spec in assembled_specs.get_apps_and_services()] logging.info('Restarting service containers from list: {}'.format(services)) client = get_docker_client() for service in services: container = get_container_for_app_or_service(service, include_exited=True) if container is None: log_to_client('No container found for {}'.format(service)) continue stopped_linked_containers = _check_stopped_linked_containers(container, assembled_specs) if stopped_linked_containers: log_to_client('No running containers {0}, which are linked to by {1}. Cannot restart {1}'.format( stopped_linked_containers, service)) else: _restart_container(client, container)
[ "def", "_compose_restart", "(", "services", ")", ":", "def", "_restart_container", "(", "client", ",", "container", ")", ":", "log_to_client", "(", "'Restarting {}'", ".", "format", "(", "get_canonical_container_name", "(", "container", ")", ")", ")", "client", ".", "restart", "(", "container", "[", "'Id'", "]", ",", "timeout", "=", "1", ")", "assembled_specs", "=", "get_assembled_specs", "(", ")", "if", "services", "==", "[", "]", ":", "services", "=", "[", "spec", ".", "name", "for", "spec", "in", "assembled_specs", ".", "get_apps_and_services", "(", ")", "]", "logging", ".", "info", "(", "'Restarting service containers from list: {}'", ".", "format", "(", "services", ")", ")", "client", "=", "get_docker_client", "(", ")", "for", "service", "in", "services", ":", "container", "=", "get_container_for_app_or_service", "(", "service", ",", "include_exited", "=", "True", ")", "if", "container", "is", "None", ":", "log_to_client", "(", "'No container found for {}'", ".", "format", "(", "service", ")", ")", "continue", "stopped_linked_containers", "=", "_check_stopped_linked_containers", "(", "container", ",", "assembled_specs", ")", "if", "stopped_linked_containers", ":", "log_to_client", "(", "'No running containers {0}, which are linked to by {1}. Cannot restart {1}'", ".", "format", "(", "stopped_linked_containers", ",", "service", ")", ")", "else", ":", "_restart_container", "(", "client", ",", "container", ")" ]
Well, this is annoying. Compose 1.2 shipped with the restart functionality fucking broken, so we can't set a faster timeout than 10 seconds (which is way too long) using Compose. We are therefore resigned to trying to hack this together ourselves. Lame. Relevant fix which will make it into the next release: https://github.com/docker/compose/pull/1318
[ "Well", "this", "is", "annoying", ".", "Compose", "1", ".", "2", "shipped", "with", "the", "restart", "functionality", "fucking", "broken", "so", "we", "can", "t", "set", "a", "faster", "timeout", "than", "10", "seconds", "(", "which", "is", "way", "too", "long", ")", "using", "Compose", ".", "We", "are", "therefore", "resigned", "to", "trying", "to", "hack", "this", "together", "ourselves", ".", "Lame", "." ]
python
valid
48.4
aiidateam/aiida-codtools
aiida_codtools/cli/workflows/cif_clean.py
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/cli/workflows/cif_clean.py#L46-L176
def launch_cif_clean(cif_filter, cif_select, group_cif_raw, group_cif_clean, group_structure, group_workchain, node, max_entries, skip_check, parse_engine, daemon): """Run the `CifCleanWorkChain` on the entries in a group with raw imported CifData nodes. It will use the `cif_filter` and `cif_select` scripts of `cod-tools` to clean the input cif file. Additionally, if the `group-structure` option is passed, the workchain will also attempt to use the given parse engine to parse the cleaned `CifData` to obtain the structure and then use SeeKpath to find the primitive structure, which, if successful, will be added to the `group-structure` group. """ # pylint: disable=too-many-arguments,too-many-locals,too-many-statements,too-many-branches import inspect from datetime import datetime from aiida import orm from aiida.engine import launch from aiida.plugins import DataFactory, WorkflowFactory from aiida_codtools.common.cli import echo_utc from aiida_codtools.common.resources import get_default_options from aiida_codtools.common.utils import get_input_node CifData = DataFactory('cif') # pylint: disable=invalid-name CifCleanWorkChain = WorkflowFactory('codtools.cif_clean') # pylint: disable=invalid-name # Collect the dictionary of not None parameters passed to the launch script and print to screen local_vars = locals() launch_paramaters = {} for arg in inspect.getargspec(launch_cif_clean.callback).args: # pylint: disable=deprecated-method if arg in local_vars and local_vars[arg]: launch_paramaters[arg] = local_vars[arg] click.echo('=' * 80) click.echo('Starting on {}'.format(datetime.utcnow().isoformat())) click.echo('Launch parameters: {}'.format(launch_paramaters)) click.echo('-' * 80) if group_cif_raw is not None: # Get CifData nodes that should actually be submitted according to the input filters builder = orm.QueryBuilder() builder.append(orm.Group, filters={'id': {'==': group_cif_raw.pk}}, tag='group') if skip_check: builder.append(CifData, with_group='group', project=['*']) else: # Get CifData nodes that already have an associated workchain node in the `group_workchain` group. submitted = orm.QueryBuilder() submitted.append(orm.WorkChainNode, tag='workchain') submitted.append(orm.Group, filters={'id': {'==': group_workchain.pk}}, with_node='workchain') submitted.append(orm.CifData, with_outgoing='workchain', tag='data', project=['id']) submitted_nodes = set(pk for entry in submitted.all() for pk in entry) if submitted_nodes: filters = {'id': {'!in': submitted_nodes}} else: filters = {} # Get all CifData nodes that are not included in the submitted node list builder.append(CifData, with_group='group', filters=filters, project=['*']) if max_entries is not None: builder.limit(int(max_entries)) nodes = [entry[0] for entry in builder.all()] elif node is not None: nodes = [node] else: raise click.BadParameter('you have to specify either --group-cif-raw or --node') counter = 0 node_cif_filter_parameters = get_input_node(orm.Dict, { 'fix-syntax-errors': True, 'use-c-parser': True, 'use-datablocks-without-coordinates': True, }) node_cif_select_parameters = get_input_node(orm.Dict, { 'canonicalize-tag-names': True, 'dont-treat-dots-as-underscores': True, 'invert': True, 'tags': '_publ_author_name,_citation_journal_abbrev', 'use-c-parser': True, }) node_options = get_input_node(orm.Dict, get_default_options()) node_parse_engine = get_input_node(orm.Str, parse_engine) node_site_tolerance = get_input_node(orm.Float, 5E-4) node_symprec = get_input_node(orm.Float, 5E-3) for cif in nodes: inputs = { 'cif': cif, 'cif_filter': cif_filter, 'cif_select': cif_select, 'cif_filter_parameters': node_cif_filter_parameters, 'cif_select_parameters': node_cif_select_parameters, 'options': node_options, 'parse_engine': node_parse_engine, 'site_tolerance': node_site_tolerance, 'symprec': node_symprec, } if group_cif_clean is not None: inputs['group_cif'] = group_cif_clean if group_structure is not None: inputs['group_structure'] = group_structure if daemon: workchain = launch.submit(CifCleanWorkChain, **inputs) echo_utc('CifData<{}> submitting: {}<{}>'.format(cif.pk, CifCleanWorkChain.__name__, workchain.pk)) else: echo_utc('CifData<{}> running: {}'.format(cif.pk, CifCleanWorkChain.__name__)) _, workchain = launch.run_get_node(CifCleanWorkChain, **inputs) if group_workchain is not None: group_workchain.add_nodes([workchain]) counter += 1 if max_entries is not None and counter >= max_entries: break click.echo('-' * 80) click.echo('Submitted {} new workchains'.format(counter)) click.echo('Stopping on {}'.format(datetime.utcnow().isoformat())) click.echo('=' * 80)
[ "def", "launch_cif_clean", "(", "cif_filter", ",", "cif_select", ",", "group_cif_raw", ",", "group_cif_clean", ",", "group_structure", ",", "group_workchain", ",", "node", ",", "max_entries", ",", "skip_check", ",", "parse_engine", ",", "daemon", ")", ":", "# pylint: disable=too-many-arguments,too-many-locals,too-many-statements,too-many-branches", "import", "inspect", "from", "datetime", "import", "datetime", "from", "aiida", "import", "orm", "from", "aiida", ".", "engine", "import", "launch", "from", "aiida", ".", "plugins", "import", "DataFactory", ",", "WorkflowFactory", "from", "aiida_codtools", ".", "common", ".", "cli", "import", "echo_utc", "from", "aiida_codtools", ".", "common", ".", "resources", "import", "get_default_options", "from", "aiida_codtools", ".", "common", ".", "utils", "import", "get_input_node", "CifData", "=", "DataFactory", "(", "'cif'", ")", "# pylint: disable=invalid-name", "CifCleanWorkChain", "=", "WorkflowFactory", "(", "'codtools.cif_clean'", ")", "# pylint: disable=invalid-name", "# Collect the dictionary of not None parameters passed to the launch script and print to screen", "local_vars", "=", "locals", "(", ")", "launch_paramaters", "=", "{", "}", "for", "arg", "in", "inspect", ".", "getargspec", "(", "launch_cif_clean", ".", "callback", ")", ".", "args", ":", "# pylint: disable=deprecated-method", "if", "arg", "in", "local_vars", "and", "local_vars", "[", "arg", "]", ":", "launch_paramaters", "[", "arg", "]", "=", "local_vars", "[", "arg", "]", "click", ".", "echo", "(", "'='", "*", "80", ")", "click", ".", "echo", "(", "'Starting on {}'", ".", "format", "(", "datetime", ".", "utcnow", "(", ")", ".", "isoformat", "(", ")", ")", ")", "click", ".", "echo", "(", "'Launch parameters: {}'", ".", "format", "(", "launch_paramaters", ")", ")", "click", ".", "echo", "(", "'-'", "*", "80", ")", "if", "group_cif_raw", "is", "not", "None", ":", "# Get CifData nodes that should actually be submitted according to the input filters", "builder", "=", "orm", ".", "QueryBuilder", "(", ")", "builder", ".", "append", "(", "orm", ".", "Group", ",", "filters", "=", "{", "'id'", ":", "{", "'=='", ":", "group_cif_raw", ".", "pk", "}", "}", ",", "tag", "=", "'group'", ")", "if", "skip_check", ":", "builder", ".", "append", "(", "CifData", ",", "with_group", "=", "'group'", ",", "project", "=", "[", "'*'", "]", ")", "else", ":", "# Get CifData nodes that already have an associated workchain node in the `group_workchain` group.", "submitted", "=", "orm", ".", "QueryBuilder", "(", ")", "submitted", ".", "append", "(", "orm", ".", "WorkChainNode", ",", "tag", "=", "'workchain'", ")", "submitted", ".", "append", "(", "orm", ".", "Group", ",", "filters", "=", "{", "'id'", ":", "{", "'=='", ":", "group_workchain", ".", "pk", "}", "}", ",", "with_node", "=", "'workchain'", ")", "submitted", ".", "append", "(", "orm", ".", "CifData", ",", "with_outgoing", "=", "'workchain'", ",", "tag", "=", "'data'", ",", "project", "=", "[", "'id'", "]", ")", "submitted_nodes", "=", "set", "(", "pk", "for", "entry", "in", "submitted", ".", "all", "(", ")", "for", "pk", "in", "entry", ")", "if", "submitted_nodes", ":", "filters", "=", "{", "'id'", ":", "{", "'!in'", ":", "submitted_nodes", "}", "}", "else", ":", "filters", "=", "{", "}", "# Get all CifData nodes that are not included in the submitted node list", "builder", ".", "append", "(", "CifData", ",", "with_group", "=", "'group'", ",", "filters", "=", "filters", ",", "project", "=", "[", "'*'", "]", ")", "if", "max_entries", "is", "not", "None", ":", "builder", ".", "limit", "(", "int", "(", "max_entries", ")", ")", "nodes", "=", "[", "entry", "[", "0", "]", "for", "entry", "in", "builder", ".", "all", "(", ")", "]", "elif", "node", "is", "not", "None", ":", "nodes", "=", "[", "node", "]", "else", ":", "raise", "click", ".", "BadParameter", "(", "'you have to specify either --group-cif-raw or --node'", ")", "counter", "=", "0", "node_cif_filter_parameters", "=", "get_input_node", "(", "orm", ".", "Dict", ",", "{", "'fix-syntax-errors'", ":", "True", ",", "'use-c-parser'", ":", "True", ",", "'use-datablocks-without-coordinates'", ":", "True", ",", "}", ")", "node_cif_select_parameters", "=", "get_input_node", "(", "orm", ".", "Dict", ",", "{", "'canonicalize-tag-names'", ":", "True", ",", "'dont-treat-dots-as-underscores'", ":", "True", ",", "'invert'", ":", "True", ",", "'tags'", ":", "'_publ_author_name,_citation_journal_abbrev'", ",", "'use-c-parser'", ":", "True", ",", "}", ")", "node_options", "=", "get_input_node", "(", "orm", ".", "Dict", ",", "get_default_options", "(", ")", ")", "node_parse_engine", "=", "get_input_node", "(", "orm", ".", "Str", ",", "parse_engine", ")", "node_site_tolerance", "=", "get_input_node", "(", "orm", ".", "Float", ",", "5E-4", ")", "node_symprec", "=", "get_input_node", "(", "orm", ".", "Float", ",", "5E-3", ")", "for", "cif", "in", "nodes", ":", "inputs", "=", "{", "'cif'", ":", "cif", ",", "'cif_filter'", ":", "cif_filter", ",", "'cif_select'", ":", "cif_select", ",", "'cif_filter_parameters'", ":", "node_cif_filter_parameters", ",", "'cif_select_parameters'", ":", "node_cif_select_parameters", ",", "'options'", ":", "node_options", ",", "'parse_engine'", ":", "node_parse_engine", ",", "'site_tolerance'", ":", "node_site_tolerance", ",", "'symprec'", ":", "node_symprec", ",", "}", "if", "group_cif_clean", "is", "not", "None", ":", "inputs", "[", "'group_cif'", "]", "=", "group_cif_clean", "if", "group_structure", "is", "not", "None", ":", "inputs", "[", "'group_structure'", "]", "=", "group_structure", "if", "daemon", ":", "workchain", "=", "launch", ".", "submit", "(", "CifCleanWorkChain", ",", "*", "*", "inputs", ")", "echo_utc", "(", "'CifData<{}> submitting: {}<{}>'", ".", "format", "(", "cif", ".", "pk", ",", "CifCleanWorkChain", ".", "__name__", ",", "workchain", ".", "pk", ")", ")", "else", ":", "echo_utc", "(", "'CifData<{}> running: {}'", ".", "format", "(", "cif", ".", "pk", ",", "CifCleanWorkChain", ".", "__name__", ")", ")", "_", ",", "workchain", "=", "launch", ".", "run_get_node", "(", "CifCleanWorkChain", ",", "*", "*", "inputs", ")", "if", "group_workchain", "is", "not", "None", ":", "group_workchain", ".", "add_nodes", "(", "[", "workchain", "]", ")", "counter", "+=", "1", "if", "max_entries", "is", "not", "None", "and", "counter", ">=", "max_entries", ":", "break", "click", ".", "echo", "(", "'-'", "*", "80", ")", "click", ".", "echo", "(", "'Submitted {} new workchains'", ".", "format", "(", "counter", ")", ")", "click", ".", "echo", "(", "'Stopping on {}'", ".", "format", "(", "datetime", ".", "utcnow", "(", ")", ".", "isoformat", "(", ")", ")", ")", "click", ".", "echo", "(", "'='", "*", "80", ")" ]
Run the `CifCleanWorkChain` on the entries in a group with raw imported CifData nodes. It will use the `cif_filter` and `cif_select` scripts of `cod-tools` to clean the input cif file. Additionally, if the `group-structure` option is passed, the workchain will also attempt to use the given parse engine to parse the cleaned `CifData` to obtain the structure and then use SeeKpath to find the primitive structure, which, if successful, will be added to the `group-structure` group.
[ "Run", "the", "CifCleanWorkChain", "on", "the", "entries", "in", "a", "group", "with", "raw", "imported", "CifData", "nodes", "." ]
python
train
40.427481
djgagne/hagelslag
hagelslag/evaluation/ProbabilityMetrics.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/ProbabilityMetrics.py#L108-L118
def merge(self, other_roc): """ Ingest the values of another DistributedROC object into this one and update the statistics inplace. Args: other_roc: another DistributedROC object. """ if other_roc.thresholds.size == self.thresholds.size and np.all(other_roc.thresholds == self.thresholds): self.contingency_tables += other_roc.contingency_tables else: print("Input table thresholds do not match.")
[ "def", "merge", "(", "self", ",", "other_roc", ")", ":", "if", "other_roc", ".", "thresholds", ".", "size", "==", "self", ".", "thresholds", ".", "size", "and", "np", ".", "all", "(", "other_roc", ".", "thresholds", "==", "self", ".", "thresholds", ")", ":", "self", ".", "contingency_tables", "+=", "other_roc", ".", "contingency_tables", "else", ":", "print", "(", "\"Input table thresholds do not match.\"", ")" ]
Ingest the values of another DistributedROC object into this one and update the statistics inplace. Args: other_roc: another DistributedROC object.
[ "Ingest", "the", "values", "of", "another", "DistributedROC", "object", "into", "this", "one", "and", "update", "the", "statistics", "inplace", "." ]
python
train
42.909091
Legobot/Legobot
Legobot/Connectors/Discord.py
https://github.com/Legobot/Legobot/blob/d13da172960a149681cb5151ce34b2f3a58ad32b/Legobot/Connectors/Discord.py#L109-L122
def create_message(self, channel_id, text): """ Sends a message to a Discord channel or user via REST API Args: channel_id (string): ID of destingation Discord channel text (string): Content of message """ baseurl = self.rest_baseurl + \ '/channels/{}/messages'.format(channel_id) requests.post(baseurl, headers=self.headers, data=json.dumps({'content': text}))
[ "def", "create_message", "(", "self", ",", "channel_id", ",", "text", ")", ":", "baseurl", "=", "self", ".", "rest_baseurl", "+", "'/channels/{}/messages'", ".", "format", "(", "channel_id", ")", "requests", ".", "post", "(", "baseurl", ",", "headers", "=", "self", ".", "headers", ",", "data", "=", "json", ".", "dumps", "(", "{", "'content'", ":", "text", "}", ")", ")" ]
Sends a message to a Discord channel or user via REST API Args: channel_id (string): ID of destingation Discord channel text (string): Content of message
[ "Sends", "a", "message", "to", "a", "Discord", "channel", "or", "user", "via", "REST", "API" ]
python
train
34.142857
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py#L399-L408
def OSPFNeighborState_NeighborState(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") OSPFNeighborState = ET.SubElement(config, "OSPFNeighborState", xmlns="http://brocade.com/ns/brocade-notification-stream") NeighborState = ET.SubElement(OSPFNeighborState, "NeighborState") NeighborState.text = kwargs.pop('NeighborState') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "OSPFNeighborState_NeighborState", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "OSPFNeighborState", "=", "ET", ".", "SubElement", "(", "config", ",", "\"OSPFNeighborState\"", ",", "xmlns", "=", "\"http://brocade.com/ns/brocade-notification-stream\"", ")", "NeighborState", "=", "ET", ".", "SubElement", "(", "OSPFNeighborState", ",", "\"NeighborState\"", ")", "NeighborState", ".", "text", "=", "kwargs", ".", "pop", "(", "'NeighborState'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
47.6
jobovy/galpy
galpy/potential/verticalPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/verticalPotential.py#L129-L184
def toVerticalPotential(Pot,R,phi=None): """ NAME: toVerticalPotential PURPOSE: convert a Potential to a vertical potential at a given R INPUT: Pot - Potential instance or list of such instances R - Galactocentric radius at which to evaluate the vertical potential (can be Quantity) phi= (None) Galactocentric azimuth at which to evaluate the vertical potential (can be Quantity); required if Pot is non-axisymmetric OUTPUT: (list of) linearPotential instance(s) HISTORY: 2018-10-07 - Written - Bovy (UofT) """ Pot= flatten(Pot) if _APY_LOADED: if isinstance(R,units.Quantity): if hasattr(Pot,'_ro'): R= R.to(units.kpc).value/Pot._ro else: R= R.to(units.kpc).value/Pot[0]._ro if isinstance(phi,units.Quantity): phi= phi.to(units.rad).value if isinstance(Pot,list): out= [] for pot in Pot: if isinstance(pot,linearPotential): out.append(pot) elif isinstance(pot,Potential): out.append(verticalPotential(pot,R,phi=phi)) elif isinstance(pot,planarPotential): raise PotentialError("Input to 'toVerticalPotential' cannot be a planarPotential") else: raise PotentialError("Input to 'toVerticalPotential' is neither an RZPotential-instance or a list of such instances") return out elif isinstance(Pot,Potential): return verticalPotential(Pot,R,phi=phi) elif isinstance(Pot,linearPotential): return Pot elif isinstance(Pot,planarPotential): raise PotentialError("Input to 'toVerticalPotential' cannot be a planarPotential") else: raise PotentialError("Input to 'toVerticalPotential' is neither an Potential-instance or a list of such instances")
[ "def", "toVerticalPotential", "(", "Pot", ",", "R", ",", "phi", "=", "None", ")", ":", "Pot", "=", "flatten", "(", "Pot", ")", "if", "_APY_LOADED", ":", "if", "isinstance", "(", "R", ",", "units", ".", "Quantity", ")", ":", "if", "hasattr", "(", "Pot", ",", "'_ro'", ")", ":", "R", "=", "R", ".", "to", "(", "units", ".", "kpc", ")", ".", "value", "/", "Pot", ".", "_ro", "else", ":", "R", "=", "R", ".", "to", "(", "units", ".", "kpc", ")", ".", "value", "/", "Pot", "[", "0", "]", ".", "_ro", "if", "isinstance", "(", "phi", ",", "units", ".", "Quantity", ")", ":", "phi", "=", "phi", ".", "to", "(", "units", ".", "rad", ")", ".", "value", "if", "isinstance", "(", "Pot", ",", "list", ")", ":", "out", "=", "[", "]", "for", "pot", "in", "Pot", ":", "if", "isinstance", "(", "pot", ",", "linearPotential", ")", ":", "out", ".", "append", "(", "pot", ")", "elif", "isinstance", "(", "pot", ",", "Potential", ")", ":", "out", ".", "append", "(", "verticalPotential", "(", "pot", ",", "R", ",", "phi", "=", "phi", ")", ")", "elif", "isinstance", "(", "pot", ",", "planarPotential", ")", ":", "raise", "PotentialError", "(", "\"Input to 'toVerticalPotential' cannot be a planarPotential\"", ")", "else", ":", "raise", "PotentialError", "(", "\"Input to 'toVerticalPotential' is neither an RZPotential-instance or a list of such instances\"", ")", "return", "out", "elif", "isinstance", "(", "Pot", ",", "Potential", ")", ":", "return", "verticalPotential", "(", "Pot", ",", "R", ",", "phi", "=", "phi", ")", "elif", "isinstance", "(", "Pot", ",", "linearPotential", ")", ":", "return", "Pot", "elif", "isinstance", "(", "Pot", ",", "planarPotential", ")", ":", "raise", "PotentialError", "(", "\"Input to 'toVerticalPotential' cannot be a planarPotential\"", ")", "else", ":", "raise", "PotentialError", "(", "\"Input to 'toVerticalPotential' is neither an Potential-instance or a list of such instances\"", ")" ]
NAME: toVerticalPotential PURPOSE: convert a Potential to a vertical potential at a given R INPUT: Pot - Potential instance or list of such instances R - Galactocentric radius at which to evaluate the vertical potential (can be Quantity) phi= (None) Galactocentric azimuth at which to evaluate the vertical potential (can be Quantity); required if Pot is non-axisymmetric OUTPUT: (list of) linearPotential instance(s) HISTORY: 2018-10-07 - Written - Bovy (UofT)
[ "NAME", ":" ]
python
train
33.053571
tcalmant/ipopo
pelix/ipopo/core.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/core.py#L560-L595
def _register_factory(self, factory_name, factory, override): # type: (str, type, bool) -> None """ Registers a component factory :param factory_name: The name of the factory :param factory: The factory class object :param override: If true, previous factory is overridden, else an exception is risen if a previous factory with that name already exists :raise ValueError: The factory name already exists or is invalid :raise TypeError: Invalid factory type """ if not factory_name or not is_string(factory_name): raise ValueError("A factory name must be a non-empty string") if not inspect.isclass(factory): raise TypeError( "Invalid factory class '{0}'".format(type(factory).__name__) ) with self.__factories_lock: if factory_name in self.__factories: if override: _logger.info("Overriding factory '%s'", factory_name) else: raise ValueError( "'{0}' factory already exist".format(factory_name) ) self.__factories[factory_name] = factory # Trigger an event self._fire_ipopo_event( constants.IPopoEvent.REGISTERED, factory_name )
[ "def", "_register_factory", "(", "self", ",", "factory_name", ",", "factory", ",", "override", ")", ":", "# type: (str, type, bool) -> None", "if", "not", "factory_name", "or", "not", "is_string", "(", "factory_name", ")", ":", "raise", "ValueError", "(", "\"A factory name must be a non-empty string\"", ")", "if", "not", "inspect", ".", "isclass", "(", "factory", ")", ":", "raise", "TypeError", "(", "\"Invalid factory class '{0}'\"", ".", "format", "(", "type", "(", "factory", ")", ".", "__name__", ")", ")", "with", "self", ".", "__factories_lock", ":", "if", "factory_name", "in", "self", ".", "__factories", ":", "if", "override", ":", "_logger", ".", "info", "(", "\"Overriding factory '%s'\"", ",", "factory_name", ")", "else", ":", "raise", "ValueError", "(", "\"'{0}' factory already exist\"", ".", "format", "(", "factory_name", ")", ")", "self", ".", "__factories", "[", "factory_name", "]", "=", "factory", "# Trigger an event", "self", ".", "_fire_ipopo_event", "(", "constants", ".", "IPopoEvent", ".", "REGISTERED", ",", "factory_name", ")" ]
Registers a component factory :param factory_name: The name of the factory :param factory: The factory class object :param override: If true, previous factory is overridden, else an exception is risen if a previous factory with that name already exists :raise ValueError: The factory name already exists or is invalid :raise TypeError: Invalid factory type
[ "Registers", "a", "component", "factory" ]
python
train
38.555556
phaethon/kamene
kamene/packet.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/packet.py#L415-L424
def pdfdump(self, filename=None, **kargs): """pdfdump(filename=None, layer_shift=0, rebuild=1) Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called.""" canvas = self.canvas_dump(**kargs) if filename is None: fname = get_temp_file(autoext=".pdf") canvas.writePDFfile(fname) subprocess.Popen([conf.prog.pdfreader, fname+".pdf"]) else: canvas.writePDFfile(filename)
[ "def", "pdfdump", "(", "self", ",", "filename", "=", "None", ",", "*", "*", "kargs", ")", ":", "canvas", "=", "self", ".", "canvas_dump", "(", "*", "*", "kargs", ")", "if", "filename", "is", "None", ":", "fname", "=", "get_temp_file", "(", "autoext", "=", "\".pdf\"", ")", "canvas", ".", "writePDFfile", "(", "fname", ")", "subprocess", ".", "Popen", "(", "[", "conf", ".", "prog", ".", "pdfreader", ",", "fname", "+", "\".pdf\"", "]", ")", "else", ":", "canvas", ".", "writePDFfile", "(", "filename", ")" ]
pdfdump(filename=None, layer_shift=0, rebuild=1) Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called.
[ "pdfdump", "(", "filename", "=", "None", "layer_shift", "=", "0", "rebuild", "=", "1", ")", "Creates", "a", "PDF", "file", "describing", "a", "packet", ".", "If", "filename", "is", "not", "provided", "a", "temporary", "file", "is", "created", "and", "xpdf", "is", "called", "." ]
python
train
50.3
coordt/django-alphabetfilter
alphafilter/templatetags/alphafilter.py
https://github.com/coordt/django-alphabetfilter/blob/a7bc21c0ea985c2021a4668241bf643c615c6c1f/alphafilter/templatetags/alphafilter.py#L177-L202
def qs_alphabet_filter(parser, token): """ The parser/tokenizer for the queryset alphabet filter. {% qs_alphabet_filter <queryset> <field name> [<template name>] [strip_params=comma,delim,list] %} {% qs_alphabet_filter objects lastname myapp/template.html %} The template name is optional and uses alphafilter/alphabet.html if not specified """ bits = token.split_contents() if len(bits) == 3: return AlphabetFilterNode(bits[1], bits[2]) elif len(bits) == 4: if "=" in bits[3]: key, val = bits[3].split('=') return AlphabetFilterNode(bits[1], bits[2], strip_params=val) else: return AlphabetFilterNode(bits[1], bits[2], template_name=bits[3]) elif len(bits) == 5: key, val = bits[4].split('=') return AlphabetFilterNode(bits[1], bits[2], bits[3], bits[4]) else: raise TemplateSyntaxError("%s is called with a queryset and field " "name, and optionally a template." % bits[0])
[ "def", "qs_alphabet_filter", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "bits", ")", "==", "3", ":", "return", "AlphabetFilterNode", "(", "bits", "[", "1", "]", ",", "bits", "[", "2", "]", ")", "elif", "len", "(", "bits", ")", "==", "4", ":", "if", "\"=\"", "in", "bits", "[", "3", "]", ":", "key", ",", "val", "=", "bits", "[", "3", "]", ".", "split", "(", "'='", ")", "return", "AlphabetFilterNode", "(", "bits", "[", "1", "]", ",", "bits", "[", "2", "]", ",", "strip_params", "=", "val", ")", "else", ":", "return", "AlphabetFilterNode", "(", "bits", "[", "1", "]", ",", "bits", "[", "2", "]", ",", "template_name", "=", "bits", "[", "3", "]", ")", "elif", "len", "(", "bits", ")", "==", "5", ":", "key", ",", "val", "=", "bits", "[", "4", "]", ".", "split", "(", "'='", ")", "return", "AlphabetFilterNode", "(", "bits", "[", "1", "]", ",", "bits", "[", "2", "]", ",", "bits", "[", "3", "]", ",", "bits", "[", "4", "]", ")", "else", ":", "raise", "TemplateSyntaxError", "(", "\"%s is called with a queryset and field \"", "\"name, and optionally a template.\"", "%", "bits", "[", "0", "]", ")" ]
The parser/tokenizer for the queryset alphabet filter. {% qs_alphabet_filter <queryset> <field name> [<template name>] [strip_params=comma,delim,list] %} {% qs_alphabet_filter objects lastname myapp/template.html %} The template name is optional and uses alphafilter/alphabet.html if not specified
[ "The", "parser", "/", "tokenizer", "for", "the", "queryset", "alphabet", "filter", "." ]
python
train
39.192308
shoebot/shoebot
shoebot/sbio/shell.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/sbio/shell.py#L234-L239
def do_rewind(self, line): """ rewind """ self.print_response("Rewinding from frame %s to 0" % self.bot._frame) self.bot._frame = 0
[ "def", "do_rewind", "(", "self", ",", "line", ")", ":", "self", ".", "print_response", "(", "\"Rewinding from frame %s to 0\"", "%", "self", ".", "bot", ".", "_frame", ")", "self", ".", "bot", ".", "_frame", "=", "0" ]
rewind
[ "rewind" ]
python
valid
27.666667
Azure/azure-sdk-for-python
azure-servicebus/azure/servicebus/servicebus_client.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/servicebus_client.py#L433-L469
def list_sessions(self, updated_since=None, max_results=100, skip=0, **kwargs): """List session IDs. List the Session IDs with pending messages in the queue where the state of the session has been updated since the timestamp provided. If no timestamp is provided, all will be returned. If the state of a session has never been set, it will not be returned regardless of whether there are messages pending. :param updated_since: The UTC datetime from which to return updated pending Session IDs. :type updated_since: datetime.datetime :param max_results: The maximum number of Session IDs to return. Default value is 100. :type max_results: int :param skip: The page value to jump to. Default value is 0. :type skip: int :rtype: list[str] Example: .. literalinclude:: ../examples/test_examples.py :start-after: [START list_sessions_service_bus] :end-before: [END list_sessions_service_bus] :language: python :dedent: 4 :caption: Get the Ids of session which have messages pending in the queue """ if self.entity and not self.requires_session: raise ValueError("This is not a sessionful entity.") message = { 'last-updated-time': updated_since or datetime.datetime.utcfromtimestamp(0), 'skip': types.AMQPInt(skip), 'top': types.AMQPInt(max_results), } with BaseHandler(self.entity_uri, self.auth_config, debug=self.debug, **kwargs) as handler: return handler._mgmt_request_response( # pylint: disable=protected-access REQUEST_RESPONSE_GET_MESSAGE_SESSIONS_OPERATION, message, mgmt_handlers.list_sessions_op)
[ "def", "list_sessions", "(", "self", ",", "updated_since", "=", "None", ",", "max_results", "=", "100", ",", "skip", "=", "0", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "entity", "and", "not", "self", ".", "requires_session", ":", "raise", "ValueError", "(", "\"This is not a sessionful entity.\"", ")", "message", "=", "{", "'last-updated-time'", ":", "updated_since", "or", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "0", ")", ",", "'skip'", ":", "types", ".", "AMQPInt", "(", "skip", ")", ",", "'top'", ":", "types", ".", "AMQPInt", "(", "max_results", ")", ",", "}", "with", "BaseHandler", "(", "self", ".", "entity_uri", ",", "self", ".", "auth_config", ",", "debug", "=", "self", ".", "debug", ",", "*", "*", "kwargs", ")", "as", "handler", ":", "return", "handler", ".", "_mgmt_request_response", "(", "# pylint: disable=protected-access", "REQUEST_RESPONSE_GET_MESSAGE_SESSIONS_OPERATION", ",", "message", ",", "mgmt_handlers", ".", "list_sessions_op", ")" ]
List session IDs. List the Session IDs with pending messages in the queue where the state of the session has been updated since the timestamp provided. If no timestamp is provided, all will be returned. If the state of a session has never been set, it will not be returned regardless of whether there are messages pending. :param updated_since: The UTC datetime from which to return updated pending Session IDs. :type updated_since: datetime.datetime :param max_results: The maximum number of Session IDs to return. Default value is 100. :type max_results: int :param skip: The page value to jump to. Default value is 0. :type skip: int :rtype: list[str] Example: .. literalinclude:: ../examples/test_examples.py :start-after: [START list_sessions_service_bus] :end-before: [END list_sessions_service_bus] :language: python :dedent: 4 :caption: Get the Ids of session which have messages pending in the queue
[ "List", "session", "IDs", "." ]
python
test
49.135135
JohannesBuchner/jbopt
jbopt/mcmc.py
https://github.com/JohannesBuchner/jbopt/blob/11b721ea001625ad7820f71ff684723c71216646/jbopt/mcmc.py#L65-L112
def mcmc(transform, loglikelihood, parameter_names, nsteps=40000, nburn=400, stdevs=0.1, start = 0.5, **problem): """ **Metropolis Hastings MCMC** with automatic step width adaption. Burnin period is also used to guess steps. :param nburn: number of burnin steps :param stdevs: step widths to start with """ if 'seed' in problem: numpy.random.seed(problem['seed']) n_params = len(parameter_names) def like(cube): cube = numpy.array(cube) if (cube <= 1e-10).any() or (cube >= 1-1e-10).any(): return -1e100 params = transform(cube) return loglikelihood(params) start = start + numpy.zeros(n_params) stdevs = stdevs + numpy.zeros(n_params) def compute_stepwidths(chain): return numpy.std(chain, axis=0) / 3 import matplotlib.pyplot as plt plt.figure(figsize=(7, 7)) steps = numpy.array([0.1]*(n_params)) print 'burn-in (1/2)...' chain, prob, _, steps_ = mcmc_advance(start, steps, like, nsteps=nburn / 2, adapt=True) steps = compute_stepwidths(chain) print 'burn-in (2/2)...' chain, prob, _, steps_ = mcmc_advance(chain[-1], steps, like, nsteps=nburn / 2, adapt=True) steps = compute_stepwidths(chain) print 'recording chain ...' chain, prob, _, steps_ = mcmc_advance(chain[-1], steps, like, nsteps=nsteps) chain = numpy.array(chain) i = numpy.argmax(prob) final = chain[-1] print 'postprocessing...' chain = numpy.array([transform(params) for params in chain]) return dict(start=chain[-1], maximum=chain[i], seeds=[final, chain[i]], chain=chain, method='Metropolis MCMC')
[ "def", "mcmc", "(", "transform", ",", "loglikelihood", ",", "parameter_names", ",", "nsteps", "=", "40000", ",", "nburn", "=", "400", ",", "stdevs", "=", "0.1", ",", "start", "=", "0.5", ",", "*", "*", "problem", ")", ":", "if", "'seed'", "in", "problem", ":", "numpy", ".", "random", ".", "seed", "(", "problem", "[", "'seed'", "]", ")", "n_params", "=", "len", "(", "parameter_names", ")", "def", "like", "(", "cube", ")", ":", "cube", "=", "numpy", ".", "array", "(", "cube", ")", "if", "(", "cube", "<=", "1e-10", ")", ".", "any", "(", ")", "or", "(", "cube", ">=", "1", "-", "1e-10", ")", ".", "any", "(", ")", ":", "return", "-", "1e100", "params", "=", "transform", "(", "cube", ")", "return", "loglikelihood", "(", "params", ")", "start", "=", "start", "+", "numpy", ".", "zeros", "(", "n_params", ")", "stdevs", "=", "stdevs", "+", "numpy", ".", "zeros", "(", "n_params", ")", "def", "compute_stepwidths", "(", "chain", ")", ":", "return", "numpy", ".", "std", "(", "chain", ",", "axis", "=", "0", ")", "/", "3", "import", "matplotlib", ".", "pyplot", "as", "plt", "plt", ".", "figure", "(", "figsize", "=", "(", "7", ",", "7", ")", ")", "steps", "=", "numpy", ".", "array", "(", "[", "0.1", "]", "*", "(", "n_params", ")", ")", "print", "'burn-in (1/2)...'", "chain", ",", "prob", ",", "_", ",", "steps_", "=", "mcmc_advance", "(", "start", ",", "steps", ",", "like", ",", "nsteps", "=", "nburn", "/", "2", ",", "adapt", "=", "True", ")", "steps", "=", "compute_stepwidths", "(", "chain", ")", "print", "'burn-in (2/2)...'", "chain", ",", "prob", ",", "_", ",", "steps_", "=", "mcmc_advance", "(", "chain", "[", "-", "1", "]", ",", "steps", ",", "like", ",", "nsteps", "=", "nburn", "/", "2", ",", "adapt", "=", "True", ")", "steps", "=", "compute_stepwidths", "(", "chain", ")", "print", "'recording chain ...'", "chain", ",", "prob", ",", "_", ",", "steps_", "=", "mcmc_advance", "(", "chain", "[", "-", "1", "]", ",", "steps", ",", "like", ",", "nsteps", "=", "nsteps", ")", "chain", "=", "numpy", ".", "array", "(", "chain", ")", "i", "=", "numpy", ".", "argmax", "(", "prob", ")", "final", "=", "chain", "[", "-", "1", "]", "print", "'postprocessing...'", "chain", "=", "numpy", ".", "array", "(", "[", "transform", "(", "params", ")", "for", "params", "in", "chain", "]", ")", "return", "dict", "(", "start", "=", "chain", "[", "-", "1", "]", ",", "maximum", "=", "chain", "[", "i", "]", ",", "seeds", "=", "[", "final", ",", "chain", "[", "i", "]", "]", ",", "chain", "=", "chain", ",", "method", "=", "'Metropolis MCMC'", ")" ]
**Metropolis Hastings MCMC** with automatic step width adaption. Burnin period is also used to guess steps. :param nburn: number of burnin steps :param stdevs: step widths to start with
[ "**", "Metropolis", "Hastings", "MCMC", "**" ]
python
valid
30.958333
CalebBell/fluids
fluids/two_phase.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/two_phase.py#L1539-L1634
def Wang_Chiang_Lu(m, x, rhol, rhog, mul, mug, D, roughness=0, L=1): r'''Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997) correlation given in [1]_ and reviewed in [2]_ and [3]_. .. math:: \Delta P = \Delta P_{g} \phi_g^2 .. math:: \phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \text{ for } G >= 200 kg/m^2/s .. math:: \phi_g^2 = 1 + CX + X^2 \text{ for lower mass fluxes} .. math:: C = 0.000004566X^{0.128}Re_{lo}^{0.938}\left(\frac{\rho_l}{\rho_g} \right)^{-2.15}\left(\frac{\mu_l}{\mu_g}\right)^{5.1} .. math:: X^2 = \frac{\Delta P_l}{\Delta P_g} Parameters ---------- m : float Mass flow rate of fluid, [kg/s] x : float Quality of fluid, [-] rhol : float Liquid density, [kg/m^3] rhog : float Gas density, [kg/m^3] mul : float Viscosity of liquid, [Pa*s] mug : float Viscosity of gas, [Pa*s] D : float Diameter of pipe, [m] roughness : float, optional Roughness of pipe for use in calculating friction factor, [m] L : float, optional Length of pipe, [m] Returns ------- dP : float Pressure drop of the two-phase flow, [Pa] Notes ----- Examples -------- >>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, ... mug=14E-6, D=0.05, roughness=0, L=1) 448.29981978639154 References ---------- .. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a 6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4 (November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1. .. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/ Micro-Channel Flows." International Journal of Heat and Mass Transfer 55, no. 11-12 (May 2012): 3246-61. doi:10.1016/j.ijheatmasstransfer.2012.02.047. .. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen. "Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow in Pipes." Nuclear Engineering and Design, SI : CFD4NRS-3, 253 (December 2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007. ''' G_tp = m/(pi/4*D**2) # Actual Liquid flow v_l = m*(1-x)/rhol/(pi/4*D**2) Re_l = Reynolds(V=v_l, rho=rhol, mu=mul, D=D) fd_l = friction_factor(Re=Re_l, eD=roughness/D) dP_l = fd_l*L/D*(0.5*rhol*v_l**2) # Actual gas flow v_g = m*x/rhog/(pi/4*D**2) Re_g = Reynolds(V=v_g, rho=rhog, mu=mug, D=D) fd_g = friction_factor(Re=Re_g, eD=roughness/D) dP_g = fd_g*L/D*(0.5*rhog*v_g**2) X = (dP_l/dP_g)**0.5 if G_tp >= 200: phi_g2 = 1 + 9.397*X**0.62 + 0.564*X**2.45 else: # Liquid-only flow; Re_lo is oddly needed v_lo = m/rhol/(pi/4*D**2) Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D) C = 0.000004566*X**0.128*Re_lo**0.938*(rhol/rhog)**-2.15*(mul/mug)**5.1 phi_g2 = 1 + C*X + X**2 return dP_g*phi_g2
[ "def", "Wang_Chiang_Lu", "(", "m", ",", "x", ",", "rhol", ",", "rhog", ",", "mul", ",", "mug", ",", "D", ",", "roughness", "=", "0", ",", "L", "=", "1", ")", ":", "G_tp", "=", "m", "/", "(", "pi", "/", "4", "*", "D", "**", "2", ")", "# Actual Liquid flow", "v_l", "=", "m", "*", "(", "1", "-", "x", ")", "/", "rhol", "/", "(", "pi", "/", "4", "*", "D", "**", "2", ")", "Re_l", "=", "Reynolds", "(", "V", "=", "v_l", ",", "rho", "=", "rhol", ",", "mu", "=", "mul", ",", "D", "=", "D", ")", "fd_l", "=", "friction_factor", "(", "Re", "=", "Re_l", ",", "eD", "=", "roughness", "/", "D", ")", "dP_l", "=", "fd_l", "*", "L", "/", "D", "*", "(", "0.5", "*", "rhol", "*", "v_l", "**", "2", ")", "# Actual gas flow", "v_g", "=", "m", "*", "x", "/", "rhog", "/", "(", "pi", "/", "4", "*", "D", "**", "2", ")", "Re_g", "=", "Reynolds", "(", "V", "=", "v_g", ",", "rho", "=", "rhog", ",", "mu", "=", "mug", ",", "D", "=", "D", ")", "fd_g", "=", "friction_factor", "(", "Re", "=", "Re_g", ",", "eD", "=", "roughness", "/", "D", ")", "dP_g", "=", "fd_g", "*", "L", "/", "D", "*", "(", "0.5", "*", "rhog", "*", "v_g", "**", "2", ")", "X", "=", "(", "dP_l", "/", "dP_g", ")", "**", "0.5", "if", "G_tp", ">=", "200", ":", "phi_g2", "=", "1", "+", "9.397", "*", "X", "**", "0.62", "+", "0.564", "*", "X", "**", "2.45", "else", ":", "# Liquid-only flow; Re_lo is oddly needed", "v_lo", "=", "m", "/", "rhol", "/", "(", "pi", "/", "4", "*", "D", "**", "2", ")", "Re_lo", "=", "Reynolds", "(", "V", "=", "v_lo", ",", "rho", "=", "rhol", ",", "mu", "=", "mul", ",", "D", "=", "D", ")", "C", "=", "0.000004566", "*", "X", "**", "0.128", "*", "Re_lo", "**", "0.938", "*", "(", "rhol", "/", "rhog", ")", "**", "-", "2.15", "*", "(", "mul", "/", "mug", ")", "**", "5.1", "phi_g2", "=", "1", "+", "C", "*", "X", "+", "X", "**", "2", "return", "dP_g", "*", "phi_g2" ]
r'''Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997) correlation given in [1]_ and reviewed in [2]_ and [3]_. .. math:: \Delta P = \Delta P_{g} \phi_g^2 .. math:: \phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \text{ for } G >= 200 kg/m^2/s .. math:: \phi_g^2 = 1 + CX + X^2 \text{ for lower mass fluxes} .. math:: C = 0.000004566X^{0.128}Re_{lo}^{0.938}\left(\frac{\rho_l}{\rho_g} \right)^{-2.15}\left(\frac{\mu_l}{\mu_g}\right)^{5.1} .. math:: X^2 = \frac{\Delta P_l}{\Delta P_g} Parameters ---------- m : float Mass flow rate of fluid, [kg/s] x : float Quality of fluid, [-] rhol : float Liquid density, [kg/m^3] rhog : float Gas density, [kg/m^3] mul : float Viscosity of liquid, [Pa*s] mug : float Viscosity of gas, [Pa*s] D : float Diameter of pipe, [m] roughness : float, optional Roughness of pipe for use in calculating friction factor, [m] L : float, optional Length of pipe, [m] Returns ------- dP : float Pressure drop of the two-phase flow, [Pa] Notes ----- Examples -------- >>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, ... mug=14E-6, D=0.05, roughness=0, L=1) 448.29981978639154 References ---------- .. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a 6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4 (November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1. .. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/ Micro-Channel Flows." International Journal of Heat and Mass Transfer 55, no. 11-12 (May 2012): 3246-61. doi:10.1016/j.ijheatmasstransfer.2012.02.047. .. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen. "Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow in Pipes." Nuclear Engineering and Design, SI : CFD4NRS-3, 253 (December 2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007.
[ "r", "Calculates", "two", "-", "phase", "pressure", "drop", "with", "the", "Wang", "Chiang", "and", "Lu", "(", "1997", ")", "correlation", "given", "in", "[", "1", "]", "_", "and", "reviewed", "in", "[", "2", "]", "_", "and", "[", "3", "]", "_", "." ]
python
train
32.510417
vbwagner/ctypescrypto
ctypescrypto/digest.py
https://github.com/vbwagner/ctypescrypto/blob/33c32904cf5e04901f87f90e2499634b8feecd3e/ctypescrypto/digest.py#L117-L135
def update(self, data, length=None): """ Hashes given byte string @param data - string to hash @param length - if not specifed, entire string is hashed, otherwise only first length bytes """ if self.digest_finalized: raise DigestError("No updates allowed") if not isinstance(data, bintype): raise TypeError("A byte string is expected") if length is None: length = len(data) elif length > len(data): raise ValueError("Specified length is greater than length of data") result = libcrypto.EVP_DigestUpdate(self.ctx, c_char_p(data), length) if result != 1: raise DigestError("Unable to update digest")
[ "def", "update", "(", "self", ",", "data", ",", "length", "=", "None", ")", ":", "if", "self", ".", "digest_finalized", ":", "raise", "DigestError", "(", "\"No updates allowed\"", ")", "if", "not", "isinstance", "(", "data", ",", "bintype", ")", ":", "raise", "TypeError", "(", "\"A byte string is expected\"", ")", "if", "length", "is", "None", ":", "length", "=", "len", "(", "data", ")", "elif", "length", ">", "len", "(", "data", ")", ":", "raise", "ValueError", "(", "\"Specified length is greater than length of data\"", ")", "result", "=", "libcrypto", ".", "EVP_DigestUpdate", "(", "self", ".", "ctx", ",", "c_char_p", "(", "data", ")", ",", "length", ")", "if", "result", "!=", "1", ":", "raise", "DigestError", "(", "\"Unable to update digest\"", ")" ]
Hashes given byte string @param data - string to hash @param length - if not specifed, entire string is hashed, otherwise only first length bytes
[ "Hashes", "given", "byte", "string" ]
python
train
39.157895
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/scene/events.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/scene/events.py#L50-L57
def press_event(self): """ The mouse press event that initiated a mouse drag, if any. """ if self.mouse_event.press_event is None: return None ev = self.copy() ev.mouse_event = self.mouse_event.press_event return ev
[ "def", "press_event", "(", "self", ")", ":", "if", "self", ".", "mouse_event", ".", "press_event", "is", "None", ":", "return", "None", "ev", "=", "self", ".", "copy", "(", ")", "ev", ".", "mouse_event", "=", "self", ".", "mouse_event", ".", "press_event", "return", "ev" ]
The mouse press event that initiated a mouse drag, if any.
[ "The", "mouse", "press", "event", "that", "initiated", "a", "mouse", "drag", "if", "any", "." ]
python
train
33.5
tensorflow/probability
tensorflow_probability/python/sts/regression.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/regression.py#L40-L47
def _observe_timeseries_fn(timeseries): """Build an observation_noise_fn that observes a Tensor timeseries.""" def observation_noise_fn(t): current_slice = timeseries[..., t, :] return tfd.MultivariateNormalDiag( loc=current_slice, scale_diag=tf.zeros_like(current_slice)) return observation_noise_fn
[ "def", "_observe_timeseries_fn", "(", "timeseries", ")", ":", "def", "observation_noise_fn", "(", "t", ")", ":", "current_slice", "=", "timeseries", "[", "...", ",", "t", ",", ":", "]", "return", "tfd", ".", "MultivariateNormalDiag", "(", "loc", "=", "current_slice", ",", "scale_diag", "=", "tf", ".", "zeros_like", "(", "current_slice", ")", ")", "return", "observation_noise_fn" ]
Build an observation_noise_fn that observes a Tensor timeseries.
[ "Build", "an", "observation_noise_fn", "that", "observes", "a", "Tensor", "timeseries", "." ]
python
test
40.375
SBRG/ssbio
ssbio/protein/structure/utils/foldx.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/foldx.py#L216-L244
def get_ddG_results(self): """Parse the results from BuildModel and get the delta delta G's. A positive ddG means that the mutation(s) is destabilzing, negative means stabilizing. - highly stabilising (ΔΔG < −1.84 kcal/mol); - stabilising (−1.84 kcal/mol ≤ ΔΔG < −0.92 kcal/mol); - slightly stabilising (−0.92 kcal/mol ≤ ΔΔG < −0.46 kcal/mol); - neutral (−0.46 kcal/mol < ΔΔG ≤ +0.46 kcal/mol); - slightly destabilising (+0.46 kcal/mol < ΔΔG ≤ +0.92 kcal/mol); - destabilising (+0.92 kcal/mol < ΔΔG ≤ +1.84 kcal/mol); - highly destabilising (ΔΔG > +1.84 kcal/mol). Returns: dict: Dictionary of mutation group to predicted ddG. """ foldx_avg_df = self.df_mutation_ddG_avg foldx_avg_ddG = {} results = foldx_avg_df[['Pdb', 'total energy', 'SD']].T.to_dict().values() for r in results: ident = r['Pdb'].split('_')[-1] ddG = r['total energy'] ddG_sd = r['SD'] foldx_avg_ddG[self.mutation_index_to_group[int(ident)]] = (ddG, ddG_sd) return foldx_avg_ddG
[ "def", "get_ddG_results", "(", "self", ")", ":", "foldx_avg_df", "=", "self", ".", "df_mutation_ddG_avg", "foldx_avg_ddG", "=", "{", "}", "results", "=", "foldx_avg_df", "[", "[", "'Pdb'", ",", "'total energy'", ",", "'SD'", "]", "]", ".", "T", ".", "to_dict", "(", ")", ".", "values", "(", ")", "for", "r", "in", "results", ":", "ident", "=", "r", "[", "'Pdb'", "]", ".", "split", "(", "'_'", ")", "[", "-", "1", "]", "ddG", "=", "r", "[", "'total energy'", "]", "ddG_sd", "=", "r", "[", "'SD'", "]", "foldx_avg_ddG", "[", "self", ".", "mutation_index_to_group", "[", "int", "(", "ident", ")", "]", "]", "=", "(", "ddG", ",", "ddG_sd", ")", "return", "foldx_avg_ddG" ]
Parse the results from BuildModel and get the delta delta G's. A positive ddG means that the mutation(s) is destabilzing, negative means stabilizing. - highly stabilising (ΔΔG < −1.84 kcal/mol); - stabilising (−1.84 kcal/mol ≤ ΔΔG < −0.92 kcal/mol); - slightly stabilising (−0.92 kcal/mol ≤ ΔΔG < −0.46 kcal/mol); - neutral (−0.46 kcal/mol < ΔΔG ≤ +0.46 kcal/mol); - slightly destabilising (+0.46 kcal/mol < ΔΔG ≤ +0.92 kcal/mol); - destabilising (+0.92 kcal/mol < ΔΔG ≤ +1.84 kcal/mol); - highly destabilising (ΔΔG > +1.84 kcal/mol). Returns: dict: Dictionary of mutation group to predicted ddG.
[ "Parse", "the", "results", "from", "BuildModel", "and", "get", "the", "delta", "delta", "G", "s", "." ]
python
train
39.413793
robehickman/simple-http-file-sync
shttpfs/versioned_storage.py
https://github.com/robehickman/simple-http-file-sync/blob/fa29b3ee58e9504e1d3ddfc0c14047284bf9921d/shttpfs/versioned_storage.py#L46-L64
def build_dir_tree(self, files): """ Convert a flat file dict into the tree format used for storage """ def helper(split_files): this_dir = {'files' : {}, 'dirs' : {}} dirs = defaultdict(list) for fle in split_files: index = fle[0]; fileinfo = fle[1] if len(index) == 1: fileinfo['path'] = index[0] # store only the file name instead of the whole path this_dir['files'][fileinfo['path']] = fileinfo elif len(index) > 1: dirs[index[0]].append((index[1:], fileinfo)) for name,info in dirs.iteritems(): this_dir['dirs'][name] = helper(info) return this_dir return helper([(name.split('/')[1:], file_info) for name, file_info in files.iteritems()])
[ "def", "build_dir_tree", "(", "self", ",", "files", ")", ":", "def", "helper", "(", "split_files", ")", ":", "this_dir", "=", "{", "'files'", ":", "{", "}", ",", "'dirs'", ":", "{", "}", "}", "dirs", "=", "defaultdict", "(", "list", ")", "for", "fle", "in", "split_files", ":", "index", "=", "fle", "[", "0", "]", "fileinfo", "=", "fle", "[", "1", "]", "if", "len", "(", "index", ")", "==", "1", ":", "fileinfo", "[", "'path'", "]", "=", "index", "[", "0", "]", "# store only the file name instead of the whole path", "this_dir", "[", "'files'", "]", "[", "fileinfo", "[", "'path'", "]", "]", "=", "fileinfo", "elif", "len", "(", "index", ")", ">", "1", ":", "dirs", "[", "index", "[", "0", "]", "]", ".", "append", "(", "(", "index", "[", "1", ":", "]", ",", "fileinfo", ")", ")", "for", "name", ",", "info", "in", "dirs", ".", "iteritems", "(", ")", ":", "this_dir", "[", "'dirs'", "]", "[", "name", "]", "=", "helper", "(", "info", ")", "return", "this_dir", "return", "helper", "(", "[", "(", "name", ".", "split", "(", "'/'", ")", "[", "1", ":", "]", ",", "file_info", ")", "for", "name", ",", "file_info", "in", "files", ".", "iteritems", "(", ")", "]", ")" ]
Convert a flat file dict into the tree format used for storage
[ "Convert", "a", "flat", "file", "dict", "into", "the", "tree", "format", "used", "for", "storage" ]
python
train
44.105263
JohnVinyard/featureflow
featureflow/extractor.py
https://github.com/JohnVinyard/featureflow/blob/7731487b00e38fa4f58c88b7881870fda2d69fdb/featureflow/extractor.py#L189-L218
def version(self): """ Compute the version identifier for this functional node using the func code and local names. Optionally, also allow closed-over variable values to affect the version number when closure_fingerprint is specified """ try: f = self.func.__call__.__code__ except AttributeError: f = self.func.__code__ h = md5() h.update(f.co_code) h.update(str(f.co_names).encode()) try: closure = self.func.__closure__ except AttributeError: return h.hexdigest() if closure is None or self.closure_fingerprint is None: return h.hexdigest() d = dict( (name, cell.cell_contents) for name, cell in zip(f.co_freevars, closure)) h.update(self.closure_fingerprint(d).encode()) return h.hexdigest()
[ "def", "version", "(", "self", ")", ":", "try", ":", "f", "=", "self", ".", "func", ".", "__call__", ".", "__code__", "except", "AttributeError", ":", "f", "=", "self", ".", "func", ".", "__code__", "h", "=", "md5", "(", ")", "h", ".", "update", "(", "f", ".", "co_code", ")", "h", ".", "update", "(", "str", "(", "f", ".", "co_names", ")", ".", "encode", "(", ")", ")", "try", ":", "closure", "=", "self", ".", "func", ".", "__closure__", "except", "AttributeError", ":", "return", "h", ".", "hexdigest", "(", ")", "if", "closure", "is", "None", "or", "self", ".", "closure_fingerprint", "is", "None", ":", "return", "h", ".", "hexdigest", "(", ")", "d", "=", "dict", "(", "(", "name", ",", "cell", ".", "cell_contents", ")", "for", "name", ",", "cell", "in", "zip", "(", "f", ".", "co_freevars", ",", "closure", ")", ")", "h", ".", "update", "(", "self", ".", "closure_fingerprint", "(", "d", ")", ".", "encode", "(", ")", ")", "return", "h", ".", "hexdigest", "(", ")" ]
Compute the version identifier for this functional node using the func code and local names. Optionally, also allow closed-over variable values to affect the version number when closure_fingerprint is specified
[ "Compute", "the", "version", "identifier", "for", "this", "functional", "node", "using", "the", "func", "code", "and", "local", "names", ".", "Optionally", "also", "allow", "closed", "-", "over", "variable", "values", "to", "affect", "the", "version", "number", "when", "closure_fingerprint", "is", "specified" ]
python
train
29.733333
brainiak/brainiak
brainiak/funcalign/srm.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/srm.py#L789-L846
def _srm(self, data): """Expectation-Maximization algorithm for fitting the probabilistic SRM. Parameters ---------- data : list of 2D arrays, element i has shape=[voxels_i, samples] Each element in the list contains the fMRI data of one subject. Returns ------- w : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. s : array, shape=[features, samples] The shared response. """ subjects = len(data) self.random_state_ = np.random.RandomState(self.rand_seed) random_states = [ np.random.RandomState(self.random_state_.randint(2 ** 32)) for i in range(len(data))] # Initialization step: initialize the outputs with initial values, # voxels with the number of voxels in each subject. w, _ = _init_w_transforms(data, self.features, random_states) shared_response = self._compute_shared_response(data, w) if logger.isEnabledFor(logging.INFO): # Calculate the current objective function value objective = self._objective_function(data, w, shared_response) logger.info('Objective function %f' % objective) # Main loop of the algorithm for iteration in range(self.n_iter): logger.info('Iteration %d' % (iteration + 1)) # Update each subject's mapping transform W_i: for subject in range(subjects): a_subject = data[subject].dot(shared_response.T) perturbation = np.zeros(a_subject.shape) np.fill_diagonal(perturbation, 0.001) u_subject, _, v_subject = np.linalg.svd( a_subject + perturbation, full_matrices=False) w[subject] = u_subject.dot(v_subject) # Update the shared response: shared_response = self._compute_shared_response(data, w) if logger.isEnabledFor(logging.INFO): # Calculate the current objective function value objective = self._objective_function(data, w, shared_response) logger.info('Objective function %f' % objective) return w, shared_response
[ "def", "_srm", "(", "self", ",", "data", ")", ":", "subjects", "=", "len", "(", "data", ")", "self", ".", "random_state_", "=", "np", ".", "random", ".", "RandomState", "(", "self", ".", "rand_seed", ")", "random_states", "=", "[", "np", ".", "random", ".", "RandomState", "(", "self", ".", "random_state_", ".", "randint", "(", "2", "**", "32", ")", ")", "for", "i", "in", "range", "(", "len", "(", "data", ")", ")", "]", "# Initialization step: initialize the outputs with initial values,", "# voxels with the number of voxels in each subject.", "w", ",", "_", "=", "_init_w_transforms", "(", "data", ",", "self", ".", "features", ",", "random_states", ")", "shared_response", "=", "self", ".", "_compute_shared_response", "(", "data", ",", "w", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "# Calculate the current objective function value", "objective", "=", "self", ".", "_objective_function", "(", "data", ",", "w", ",", "shared_response", ")", "logger", ".", "info", "(", "'Objective function %f'", "%", "objective", ")", "# Main loop of the algorithm", "for", "iteration", "in", "range", "(", "self", ".", "n_iter", ")", ":", "logger", ".", "info", "(", "'Iteration %d'", "%", "(", "iteration", "+", "1", ")", ")", "# Update each subject's mapping transform W_i:", "for", "subject", "in", "range", "(", "subjects", ")", ":", "a_subject", "=", "data", "[", "subject", "]", ".", "dot", "(", "shared_response", ".", "T", ")", "perturbation", "=", "np", ".", "zeros", "(", "a_subject", ".", "shape", ")", "np", ".", "fill_diagonal", "(", "perturbation", ",", "0.001", ")", "u_subject", ",", "_", ",", "v_subject", "=", "np", ".", "linalg", ".", "svd", "(", "a_subject", "+", "perturbation", ",", "full_matrices", "=", "False", ")", "w", "[", "subject", "]", "=", "u_subject", ".", "dot", "(", "v_subject", ")", "# Update the shared response:", "shared_response", "=", "self", ".", "_compute_shared_response", "(", "data", ",", "w", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "# Calculate the current objective function value", "objective", "=", "self", ".", "_objective_function", "(", "data", ",", "w", ",", "shared_response", ")", "logger", ".", "info", "(", "'Objective function %f'", "%", "objective", ")", "return", "w", ",", "shared_response" ]
Expectation-Maximization algorithm for fitting the probabilistic SRM. Parameters ---------- data : list of 2D arrays, element i has shape=[voxels_i, samples] Each element in the list contains the fMRI data of one subject. Returns ------- w : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. s : array, shape=[features, samples] The shared response.
[ "Expectation", "-", "Maximization", "algorithm", "for", "fitting", "the", "probabilistic", "SRM", "." ]
python
train
39
saltstack/salt
salt/modules/opkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/opkg.py#L336-L356
def _parse_reported_packages_from_install_output(output): ''' Parses the output of "opkg install" to determine what packages would have been installed by an operation run with the --noaction flag. We are looking for lines like: Installing <package> (<version>) on <target> or Upgrading <package> from <oldVersion> to <version> on root ''' reported_pkgs = {} install_pattern = re.compile(r'Installing\s(?P<package>.*?)\s\((?P<version>.*?)\)\son\s(?P<target>.*?)') upgrade_pattern = re.compile(r'Upgrading\s(?P<package>.*?)\sfrom\s(?P<oldVersion>.*?)\sto\s(?P<version>.*?)\son\s(?P<target>.*?)') for line in salt.utils.itertools.split(output, '\n'): match = install_pattern.match(line) if match is None: match = upgrade_pattern.match(line) if match: reported_pkgs[match.group('package')] = match.group('version') return reported_pkgs
[ "def", "_parse_reported_packages_from_install_output", "(", "output", ")", ":", "reported_pkgs", "=", "{", "}", "install_pattern", "=", "re", ".", "compile", "(", "r'Installing\\s(?P<package>.*?)\\s\\((?P<version>.*?)\\)\\son\\s(?P<target>.*?)'", ")", "upgrade_pattern", "=", "re", ".", "compile", "(", "r'Upgrading\\s(?P<package>.*?)\\sfrom\\s(?P<oldVersion>.*?)\\sto\\s(?P<version>.*?)\\son\\s(?P<target>.*?)'", ")", "for", "line", "in", "salt", ".", "utils", ".", "itertools", ".", "split", "(", "output", ",", "'\\n'", ")", ":", "match", "=", "install_pattern", ".", "match", "(", "line", ")", "if", "match", "is", "None", ":", "match", "=", "upgrade_pattern", ".", "match", "(", "line", ")", "if", "match", ":", "reported_pkgs", "[", "match", ".", "group", "(", "'package'", ")", "]", "=", "match", ".", "group", "(", "'version'", ")", "return", "reported_pkgs" ]
Parses the output of "opkg install" to determine what packages would have been installed by an operation run with the --noaction flag. We are looking for lines like: Installing <package> (<version>) on <target> or Upgrading <package> from <oldVersion> to <version> on root
[ "Parses", "the", "output", "of", "opkg", "install", "to", "determine", "what", "packages", "would", "have", "been", "installed", "by", "an", "operation", "run", "with", "the", "--", "noaction", "flag", "." ]
python
train
43.857143
AtomHash/evernode
evernode/classes/form_data.py
https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/classes/form_data.py#L63-L78
def parse(self, fail_callback=None): """ Parse text fields and file fields for values and files """ # get text fields for field in self.field_arguments: self.values[field['name']] = self.__get_value(field['name']) if self.values[field['name']] is None and field['required']: if fail_callback is not None: fail_callback() self.__invalid_request(field['error']) # get file fields for file in self.file_arguments: self.files[file['name']] = self.__get_file(file) if self.files[file['name']] is None and file['required']: if fail_callback is not None: fail_callback() self.__invalid_request(file['error'])
[ "def", "parse", "(", "self", ",", "fail_callback", "=", "None", ")", ":", "# get text fields\r", "for", "field", "in", "self", ".", "field_arguments", ":", "self", ".", "values", "[", "field", "[", "'name'", "]", "]", "=", "self", ".", "__get_value", "(", "field", "[", "'name'", "]", ")", "if", "self", ".", "values", "[", "field", "[", "'name'", "]", "]", "is", "None", "and", "field", "[", "'required'", "]", ":", "if", "fail_callback", "is", "not", "None", ":", "fail_callback", "(", ")", "self", ".", "__invalid_request", "(", "field", "[", "'error'", "]", ")", "# get file fields\r", "for", "file", "in", "self", ".", "file_arguments", ":", "self", ".", "files", "[", "file", "[", "'name'", "]", "]", "=", "self", ".", "__get_file", "(", "file", ")", "if", "self", ".", "files", "[", "file", "[", "'name'", "]", "]", "is", "None", "and", "file", "[", "'required'", "]", ":", "if", "fail_callback", "is", "not", "None", ":", "fail_callback", "(", ")", "self", ".", "__invalid_request", "(", "file", "[", "'error'", "]", ")" ]
Parse text fields and file fields for values and files
[ "Parse", "text", "fields", "and", "file", "fields", "for", "values", "and", "files" ]
python
train
49.5625
readbeyond/aeneas
aeneas/syncmap/smfsmil.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/syncmap/smfsmil.py#L55-L89
def parse(self, input_text, syncmap): """ Read from SMIL file. Limitations: 1. parses only ``<par>`` elements, in order 2. timings must have ``hh:mm:ss.mmm`` or ``ss.mmm`` format (autodetected) 3. both ``clipBegin`` and ``clipEnd`` attributes of ``<audio>`` must be populated """ from lxml import etree smil_ns = "{http://www.w3.org/ns/SMIL}" root = etree.fromstring(gf.safe_bytes(input_text)) for par in root.iter(smil_ns + "par"): for child in par: if child.tag == (smil_ns + "text"): identifier = gf.safe_unicode(gf.split_url(child.get("src"))[1]) elif child.tag == (smil_ns + "audio"): begin_text = child.get("clipBegin") if ":" in begin_text: begin = gf.time_from_hhmmssmmm(begin_text) else: begin = gf.time_from_ssmmm(begin_text) end_text = child.get("clipEnd") if ":" in end_text: end = gf.time_from_hhmmssmmm(end_text) else: end = gf.time_from_ssmmm(end_text) # TODO read text from additional text_file? self._add_fragment( syncmap=syncmap, identifier=identifier, lines=[u""], begin=begin, end=end )
[ "def", "parse", "(", "self", ",", "input_text", ",", "syncmap", ")", ":", "from", "lxml", "import", "etree", "smil_ns", "=", "\"{http://www.w3.org/ns/SMIL}\"", "root", "=", "etree", ".", "fromstring", "(", "gf", ".", "safe_bytes", "(", "input_text", ")", ")", "for", "par", "in", "root", ".", "iter", "(", "smil_ns", "+", "\"par\"", ")", ":", "for", "child", "in", "par", ":", "if", "child", ".", "tag", "==", "(", "smil_ns", "+", "\"text\"", ")", ":", "identifier", "=", "gf", ".", "safe_unicode", "(", "gf", ".", "split_url", "(", "child", ".", "get", "(", "\"src\"", ")", ")", "[", "1", "]", ")", "elif", "child", ".", "tag", "==", "(", "smil_ns", "+", "\"audio\"", ")", ":", "begin_text", "=", "child", ".", "get", "(", "\"clipBegin\"", ")", "if", "\":\"", "in", "begin_text", ":", "begin", "=", "gf", ".", "time_from_hhmmssmmm", "(", "begin_text", ")", "else", ":", "begin", "=", "gf", ".", "time_from_ssmmm", "(", "begin_text", ")", "end_text", "=", "child", ".", "get", "(", "\"clipEnd\"", ")", "if", "\":\"", "in", "end_text", ":", "end", "=", "gf", ".", "time_from_hhmmssmmm", "(", "end_text", ")", "else", ":", "end", "=", "gf", ".", "time_from_ssmmm", "(", "end_text", ")", "# TODO read text from additional text_file?", "self", ".", "_add_fragment", "(", "syncmap", "=", "syncmap", ",", "identifier", "=", "identifier", ",", "lines", "=", "[", "u\"\"", "]", ",", "begin", "=", "begin", ",", "end", "=", "end", ")" ]
Read from SMIL file. Limitations: 1. parses only ``<par>`` elements, in order 2. timings must have ``hh:mm:ss.mmm`` or ``ss.mmm`` format (autodetected) 3. both ``clipBegin`` and ``clipEnd`` attributes of ``<audio>`` must be populated
[ "Read", "from", "SMIL", "file", "." ]
python
train
41.657143
Tinche/cattrs
src/cattr/multistrategy_dispatch.py
https://github.com/Tinche/cattrs/blob/481bc9bdb69b2190d699b54f331c8c5c075506d5/src/cattr/multistrategy_dispatch.py#L46-L52
def register_func_list(self, func_and_handler): """ register a function to determine if the handle should be used for the type """ for func, handler in func_and_handler: self._function_dispatch.register(func, handler) self.dispatch.cache_clear()
[ "def", "register_func_list", "(", "self", ",", "func_and_handler", ")", ":", "for", "func", ",", "handler", "in", "func_and_handler", ":", "self", ".", "_function_dispatch", ".", "register", "(", "func", ",", "handler", ")", "self", ".", "dispatch", ".", "cache_clear", "(", ")" ]
register a function to determine if the handle should be used for the type
[ "register", "a", "function", "to", "determine", "if", "the", "handle", "should", "be", "used", "for", "the", "type" ]
python
train
42.142857
takluyver/backcall
backcall/backcall.py
https://github.com/takluyver/backcall/blob/cff13f5e4bd2a2af82fc5174e38cca0f9b7c21d2/backcall/backcall.py#L27-L109
def callback_prototype(prototype): """Decorator to process a callback prototype. A callback prototype is a function whose signature includes all the values that will be passed by the callback API in question. The original function will be returned, with a ``prototype.adapt`` attribute which can be used to prepare third party callbacks. """ protosig = signature(prototype) positional, keyword = [], [] for name, param in protosig.parameters.items(): if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD): raise TypeError("*args/**kwargs not supported in prototypes") if (param.default is not Parameter.empty) \ or (param.kind == Parameter.KEYWORD_ONLY): keyword.append(name) else: positional.append(name) kwargs = dict.fromkeys(keyword) def adapt(callback): """Introspect and prepare a third party callback.""" sig = signature(callback) try: # XXX: callback can have extra optional parameters - OK? sig.bind(*positional, **kwargs) return callback except TypeError: pass # Match up arguments unmatched_pos = positional[:] unmatched_kw = kwargs.copy() unrecognised = [] # TODO: unrecognised parameters with default values - OK? for name, param in sig.parameters.items(): # print(name, param.kind) #DBG if param.kind == Parameter.POSITIONAL_ONLY: if len(unmatched_pos) > 0: unmatched_pos.pop(0) else: unrecognised.append(name) elif param.kind == Parameter.POSITIONAL_OR_KEYWORD: if (param.default is not Parameter.empty) and (name in unmatched_kw): unmatched_kw.pop(name) elif len(unmatched_pos) > 0: unmatched_pos.pop(0) else: unrecognised.append(name) elif param.kind == Parameter.VAR_POSITIONAL: unmatched_pos = [] elif param.kind == Parameter.KEYWORD_ONLY: if name in unmatched_kw: unmatched_kw.pop(name) else: unrecognised.append(name) else: # VAR_KEYWORD unmatched_kw = {} # print(unmatched_pos, unmatched_kw, unrecognised) #DBG if unrecognised: raise TypeError("Function {!r} had unmatched arguments: {}".format(callback, unrecognised)) n_positional = len(positional) - len(unmatched_pos) @wraps(callback) def adapted(*args, **kwargs): """Wrapper for third party callbacks that discards excess arguments""" # print(args, kwargs) args = args[:n_positional] for name in unmatched_kw: # XXX: Could name not be in kwargs? kwargs.pop(name) # print(args, kwargs, unmatched_pos, cut_positional, unmatched_kw) return callback(*args, **kwargs) return adapted prototype.adapt = adapt return prototype
[ "def", "callback_prototype", "(", "prototype", ")", ":", "protosig", "=", "signature", "(", "prototype", ")", "positional", ",", "keyword", "=", "[", "]", ",", "[", "]", "for", "name", ",", "param", "in", "protosig", ".", "parameters", ".", "items", "(", ")", ":", "if", "param", ".", "kind", "in", "(", "Parameter", ".", "VAR_POSITIONAL", ",", "Parameter", ".", "VAR_KEYWORD", ")", ":", "raise", "TypeError", "(", "\"*args/**kwargs not supported in prototypes\"", ")", "if", "(", "param", ".", "default", "is", "not", "Parameter", ".", "empty", ")", "or", "(", "param", ".", "kind", "==", "Parameter", ".", "KEYWORD_ONLY", ")", ":", "keyword", ".", "append", "(", "name", ")", "else", ":", "positional", ".", "append", "(", "name", ")", "kwargs", "=", "dict", ".", "fromkeys", "(", "keyword", ")", "def", "adapt", "(", "callback", ")", ":", "\"\"\"Introspect and prepare a third party callback.\"\"\"", "sig", "=", "signature", "(", "callback", ")", "try", ":", "# XXX: callback can have extra optional parameters - OK?", "sig", ".", "bind", "(", "*", "positional", ",", "*", "*", "kwargs", ")", "return", "callback", "except", "TypeError", ":", "pass", "# Match up arguments", "unmatched_pos", "=", "positional", "[", ":", "]", "unmatched_kw", "=", "kwargs", ".", "copy", "(", ")", "unrecognised", "=", "[", "]", "# TODO: unrecognised parameters with default values - OK?", "for", "name", ",", "param", "in", "sig", ".", "parameters", ".", "items", "(", ")", ":", "# print(name, param.kind) #DBG", "if", "param", ".", "kind", "==", "Parameter", ".", "POSITIONAL_ONLY", ":", "if", "len", "(", "unmatched_pos", ")", ">", "0", ":", "unmatched_pos", ".", "pop", "(", "0", ")", "else", ":", "unrecognised", ".", "append", "(", "name", ")", "elif", "param", ".", "kind", "==", "Parameter", ".", "POSITIONAL_OR_KEYWORD", ":", "if", "(", "param", ".", "default", "is", "not", "Parameter", ".", "empty", ")", "and", "(", "name", "in", "unmatched_kw", ")", ":", "unmatched_kw", ".", "pop", "(", "name", ")", "elif", "len", "(", "unmatched_pos", ")", ">", "0", ":", "unmatched_pos", ".", "pop", "(", "0", ")", "else", ":", "unrecognised", ".", "append", "(", "name", ")", "elif", "param", ".", "kind", "==", "Parameter", ".", "VAR_POSITIONAL", ":", "unmatched_pos", "=", "[", "]", "elif", "param", ".", "kind", "==", "Parameter", ".", "KEYWORD_ONLY", ":", "if", "name", "in", "unmatched_kw", ":", "unmatched_kw", ".", "pop", "(", "name", ")", "else", ":", "unrecognised", ".", "append", "(", "name", ")", "else", ":", "# VAR_KEYWORD", "unmatched_kw", "=", "{", "}", "# print(unmatched_pos, unmatched_kw, unrecognised) #DBG", "if", "unrecognised", ":", "raise", "TypeError", "(", "\"Function {!r} had unmatched arguments: {}\"", ".", "format", "(", "callback", ",", "unrecognised", ")", ")", "n_positional", "=", "len", "(", "positional", ")", "-", "len", "(", "unmatched_pos", ")", "@", "wraps", "(", "callback", ")", "def", "adapted", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrapper for third party callbacks that discards excess arguments\"\"\"", "# print(args, kwargs)", "args", "=", "args", "[", ":", "n_positional", "]", "for", "name", "in", "unmatched_kw", ":", "# XXX: Could name not be in kwargs?", "kwargs", ".", "pop", "(", "name", ")", "# print(args, kwargs, unmatched_pos, cut_positional, unmatched_kw)", "return", "callback", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "adapted", "prototype", ".", "adapt", "=", "adapt", "return", "prototype" ]
Decorator to process a callback prototype. A callback prototype is a function whose signature includes all the values that will be passed by the callback API in question. The original function will be returned, with a ``prototype.adapt`` attribute which can be used to prepare third party callbacks.
[ "Decorator", "to", "process", "a", "callback", "prototype", ".", "A", "callback", "prototype", "is", "a", "function", "whose", "signature", "includes", "all", "the", "values", "that", "will", "be", "passed", "by", "the", "callback", "API", "in", "question", ".", "The", "original", "function", "will", "be", "returned", "with", "a", "prototype", ".", "adapt", "attribute", "which", "can", "be", "used", "to", "prepare", "third", "party", "callbacks", "." ]
python
train
38.120482
rakanalh/pocket-api
pocket/__init__.py
https://github.com/rakanalh/pocket-api/blob/d8222dd34e3aa5e545f9b8ba407fa277c734ab82/pocket/__init__.py#L329-L360
def _make_exception(self, response): """ In case of exception, construct the exception object that holds all important values returned by the response. :return: The exception instance :rtype: PocketException """ headers = response.headers limit_headers = [] if 'X-Limit-User-Limit' in headers: limit_headers = [ headers['X-Limit-User-Limit'], headers['X-Limit-User-Remaining'], headers['X-Limit-User-Reset'], headers['X-Limit-Key-Limit'], headers['X-Limit-Key-Remaining'], headers['X-Limit-Key-Reset'] ] x_error_code = int(headers['X-Error-Code']) exc = PocketException if x_error_code in self.auth_error_codes: exc = PocketAutException return exc( response.status_code, x_error_code, headers['X-Error'], *limit_headers )
[ "def", "_make_exception", "(", "self", ",", "response", ")", ":", "headers", "=", "response", ".", "headers", "limit_headers", "=", "[", "]", "if", "'X-Limit-User-Limit'", "in", "headers", ":", "limit_headers", "=", "[", "headers", "[", "'X-Limit-User-Limit'", "]", ",", "headers", "[", "'X-Limit-User-Remaining'", "]", ",", "headers", "[", "'X-Limit-User-Reset'", "]", ",", "headers", "[", "'X-Limit-Key-Limit'", "]", ",", "headers", "[", "'X-Limit-Key-Remaining'", "]", ",", "headers", "[", "'X-Limit-Key-Reset'", "]", "]", "x_error_code", "=", "int", "(", "headers", "[", "'X-Error-Code'", "]", ")", "exc", "=", "PocketException", "if", "x_error_code", "in", "self", ".", "auth_error_codes", ":", "exc", "=", "PocketAutException", "return", "exc", "(", "response", ".", "status_code", ",", "x_error_code", ",", "headers", "[", "'X-Error'", "]", ",", "*", "limit_headers", ")" ]
In case of exception, construct the exception object that holds all important values returned by the response. :return: The exception instance :rtype: PocketException
[ "In", "case", "of", "exception", "construct", "the", "exception", "object", "that", "holds", "all", "important", "values", "returned", "by", "the", "response", ".", ":", "return", ":", "The", "exception", "instance", ":", "rtype", ":", "PocketException" ]
python
train
31.03125
MolSSI-BSE/basis_set_exchange
basis_set_exchange/api.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/api.py#L481-L490
def _basis_notes_path(name, data_dir): '''Form a path to the notes for a basis set''' data_dir = fix_data_dir(data_dir) bs_data = _get_basis_metadata(name, data_dir) # the notes file is the same as the base file name, with a .notes extension filebase = bs_data['basename'] file_path = os.path.join(data_dir, filebase + '.notes') return file_path
[ "def", "_basis_notes_path", "(", "name", ",", "data_dir", ")", ":", "data_dir", "=", "fix_data_dir", "(", "data_dir", ")", "bs_data", "=", "_get_basis_metadata", "(", "name", ",", "data_dir", ")", "# the notes file is the same as the base file name, with a .notes extension", "filebase", "=", "bs_data", "[", "'basename'", "]", "file_path", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "filebase", "+", "'.notes'", ")", "return", "file_path" ]
Form a path to the notes for a basis set
[ "Form", "a", "path", "to", "the", "notes", "for", "a", "basis", "set" ]
python
train
36.6
inveniosoftware/invenio-records-rest
invenio_records_rest/schemas/fields/trimmedstring.py
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/schemas/fields/trimmedstring.py#L19-L22
def _deserialize(self, value, attr, data): """Deserialize string value.""" value = super(TrimmedString, self)._deserialize(value, attr, data) return value.strip()
[ "def", "_deserialize", "(", "self", ",", "value", ",", "attr", ",", "data", ")", ":", "value", "=", "super", "(", "TrimmedString", ",", "self", ")", ".", "_deserialize", "(", "value", ",", "attr", ",", "data", ")", "return", "value", ".", "strip", "(", ")" ]
Deserialize string value.
[ "Deserialize", "string", "value", "." ]
python
train
45.75
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_policer.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_policer.py#L177-L189
def police_priority_map_exceed_map_pri3_exceed(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer") name_key = ET.SubElement(police_priority_map, "name") name_key.text = kwargs.pop('name') exceed = ET.SubElement(police_priority_map, "exceed") map_pri3_exceed = ET.SubElement(exceed, "map-pri3-exceed") map_pri3_exceed.text = kwargs.pop('map_pri3_exceed') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "police_priority_map_exceed_map_pri3_exceed", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "police_priority_map", "=", "ET", ".", "SubElement", "(", "config", ",", "\"police-priority-map\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-policer\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "police_priority_map", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "exceed", "=", "ET", ".", "SubElement", "(", "police_priority_map", ",", "\"exceed\"", ")", "map_pri3_exceed", "=", "ET", ".", "SubElement", "(", "exceed", ",", "\"map-pri3-exceed\"", ")", "map_pri3_exceed", ".", "text", "=", "kwargs", ".", "pop", "(", "'map_pri3_exceed'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
49.153846
wdm0006/git-pandas
gitpandas/utilities/plotting.py
https://github.com/wdm0006/git-pandas/blob/e56b817b1d66b8296d1d5e703d5db0e181d25899/gitpandas/utilities/plotting.py#L22-L73
def plot_punchcard(df, metric='lines', title='punchcard', by=None): """ Uses modified plotting code from https://bitbucket.org/birkenfeld/hgpunchcard :param df: :param metric: :param title: :return: """ if not HAS_MPL: raise ImportError('Must have matplotlib installed to use the plotting functions') # find how many plots we are making if by is not None: unique_vals = set(df[by].values.tolist()) else: unique_vals = ['foo'] for idx, val in enumerate(unique_vals): if by is not None: sub_df = df[df[by] == val] else: sub_df = df fig = plt.figure(figsize=(8, title and 3 or 2.5), facecolor='#ffffff') ax = fig.add_subplot('111', axisbg='#ffffff') fig.subplots_adjust(left=0.06, bottom=0.04, right=0.98, top=0.95) if by is not None: ax.set_title(title + ' (%s)' % (str(val), ), y=0.96).set_color('#333333') else: ax.set_title(title, y=0.96).set_color('#333333') ax.set_frame_on(False) ax.scatter(sub_df['hour_of_day'], sub_df['day_of_week'], s=sub_df[metric], c='#333333', edgecolor='#333333') for line in ax.get_xticklines() + ax.get_yticklines(): line.set_alpha(0.0) dist = -0.8 ax.plot([dist, 23.5], [dist, dist], c='#555555') ax.plot([dist, dist], [dist, 6.4], c='#555555') ax.set_xlim(-1, 24) ax.set_ylim(-0.9, 6.9) ax.set_yticks(range(7)) for tx in ax.set_yticklabels(['Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun']): tx.set_color('#555555') tx.set_size('x-small') ax.set_xticks(range(24)) for tx in ax.set_xticklabels(['%02d' % x for x in range(24)]): tx.set_color('#555555') tx.set_size('x-small') ax.set_aspect('equal') if idx + 1 == len(unique_vals): plt.show(block=True) else: plt.show(block=False)
[ "def", "plot_punchcard", "(", "df", ",", "metric", "=", "'lines'", ",", "title", "=", "'punchcard'", ",", "by", "=", "None", ")", ":", "if", "not", "HAS_MPL", ":", "raise", "ImportError", "(", "'Must have matplotlib installed to use the plotting functions'", ")", "# find how many plots we are making", "if", "by", "is", "not", "None", ":", "unique_vals", "=", "set", "(", "df", "[", "by", "]", ".", "values", ".", "tolist", "(", ")", ")", "else", ":", "unique_vals", "=", "[", "'foo'", "]", "for", "idx", ",", "val", "in", "enumerate", "(", "unique_vals", ")", ":", "if", "by", "is", "not", "None", ":", "sub_df", "=", "df", "[", "df", "[", "by", "]", "==", "val", "]", "else", ":", "sub_df", "=", "df", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "8", ",", "title", "and", "3", "or", "2.5", ")", ",", "facecolor", "=", "'#ffffff'", ")", "ax", "=", "fig", ".", "add_subplot", "(", "'111'", ",", "axisbg", "=", "'#ffffff'", ")", "fig", ".", "subplots_adjust", "(", "left", "=", "0.06", ",", "bottom", "=", "0.04", ",", "right", "=", "0.98", ",", "top", "=", "0.95", ")", "if", "by", "is", "not", "None", ":", "ax", ".", "set_title", "(", "title", "+", "' (%s)'", "%", "(", "str", "(", "val", ")", ",", ")", ",", "y", "=", "0.96", ")", ".", "set_color", "(", "'#333333'", ")", "else", ":", "ax", ".", "set_title", "(", "title", ",", "y", "=", "0.96", ")", ".", "set_color", "(", "'#333333'", ")", "ax", ".", "set_frame_on", "(", "False", ")", "ax", ".", "scatter", "(", "sub_df", "[", "'hour_of_day'", "]", ",", "sub_df", "[", "'day_of_week'", "]", ",", "s", "=", "sub_df", "[", "metric", "]", ",", "c", "=", "'#333333'", ",", "edgecolor", "=", "'#333333'", ")", "for", "line", "in", "ax", ".", "get_xticklines", "(", ")", "+", "ax", ".", "get_yticklines", "(", ")", ":", "line", ".", "set_alpha", "(", "0.0", ")", "dist", "=", "-", "0.8", "ax", ".", "plot", "(", "[", "dist", ",", "23.5", "]", ",", "[", "dist", ",", "dist", "]", ",", "c", "=", "'#555555'", ")", "ax", ".", "plot", "(", "[", "dist", ",", "dist", "]", ",", "[", "dist", ",", "6.4", "]", ",", "c", "=", "'#555555'", ")", "ax", ".", "set_xlim", "(", "-", "1", ",", "24", ")", "ax", ".", "set_ylim", "(", "-", "0.9", ",", "6.9", ")", "ax", ".", "set_yticks", "(", "range", "(", "7", ")", ")", "for", "tx", "in", "ax", ".", "set_yticklabels", "(", "[", "'Mon'", ",", "'Tues'", ",", "'Wed'", ",", "'Thurs'", ",", "'Fri'", ",", "'Sat'", ",", "'Sun'", "]", ")", ":", "tx", ".", "set_color", "(", "'#555555'", ")", "tx", ".", "set_size", "(", "'x-small'", ")", "ax", ".", "set_xticks", "(", "range", "(", "24", ")", ")", "for", "tx", "in", "ax", ".", "set_xticklabels", "(", "[", "'%02d'", "%", "x", "for", "x", "in", "range", "(", "24", ")", "]", ")", ":", "tx", ".", "set_color", "(", "'#555555'", ")", "tx", ".", "set_size", "(", "'x-small'", ")", "ax", ".", "set_aspect", "(", "'equal'", ")", "if", "idx", "+", "1", "==", "len", "(", "unique_vals", ")", ":", "plt", ".", "show", "(", "block", "=", "True", ")", "else", ":", "plt", ".", "show", "(", "block", "=", "False", ")" ]
Uses modified plotting code from https://bitbucket.org/birkenfeld/hgpunchcard :param df: :param metric: :param title: :return:
[ "Uses", "modified", "plotting", "code", "from", "https", ":", "//", "bitbucket", ".", "org", "/", "birkenfeld", "/", "hgpunchcard" ]
python
train
37.403846
deschler/django-modeltranslation
modeltranslation/fields.py
https://github.com/deschler/django-modeltranslation/blob/18fec04a5105cbd83fc3759f4fda20135b3a848c/modeltranslation/fields.py#L406-L412
def cache_name(self): """ Used in django 1.x """ lang = get_language() cache = build_localized_fieldname(self.accessor, lang) return "_%s_cache" % cache
[ "def", "cache_name", "(", "self", ")", ":", "lang", "=", "get_language", "(", ")", "cache", "=", "build_localized_fieldname", "(", "self", ".", "accessor", ",", "lang", ")", "return", "\"_%s_cache\"", "%", "cache" ]
Used in django 1.x
[ "Used", "in", "django", "1", ".", "x" ]
python
train
27.714286
DLR-RM/RAFCON
source/rafcon/core/states/execution_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/execution_state.py#L104-L130
def _execute(self, execute_inputs, execute_outputs, backward_execution=False): """Calls the custom execute function of the script.py of the state """ self._script.build_module() outcome_item = self._script.execute(self, execute_inputs, execute_outputs, backward_execution) # in the case of backward execution the outcome is not relevant if backward_execution: return # If the state was preempted, the state must be left on the preempted outcome if self.preempted: return Outcome(-2, "preempted") # Outcome id was returned if outcome_item in self.outcomes: return self.outcomes[outcome_item] # Outcome name was returned for outcome_id, outcome in self.outcomes.items(): if outcome.name == outcome_item: return self.outcomes[outcome_id] logger.error("Returned outcome of {0} not existing: {1}".format(self, outcome_item)) return Outcome(-1, "aborted")
[ "def", "_execute", "(", "self", ",", "execute_inputs", ",", "execute_outputs", ",", "backward_execution", "=", "False", ")", ":", "self", ".", "_script", ".", "build_module", "(", ")", "outcome_item", "=", "self", ".", "_script", ".", "execute", "(", "self", ",", "execute_inputs", ",", "execute_outputs", ",", "backward_execution", ")", "# in the case of backward execution the outcome is not relevant", "if", "backward_execution", ":", "return", "# If the state was preempted, the state must be left on the preempted outcome", "if", "self", ".", "preempted", ":", "return", "Outcome", "(", "-", "2", ",", "\"preempted\"", ")", "# Outcome id was returned", "if", "outcome_item", "in", "self", ".", "outcomes", ":", "return", "self", ".", "outcomes", "[", "outcome_item", "]", "# Outcome name was returned", "for", "outcome_id", ",", "outcome", "in", "self", ".", "outcomes", ".", "items", "(", ")", ":", "if", "outcome", ".", "name", "==", "outcome_item", ":", "return", "self", ".", "outcomes", "[", "outcome_id", "]", "logger", ".", "error", "(", "\"Returned outcome of {0} not existing: {1}\"", ".", "format", "(", "self", ",", "outcome_item", ")", ")", "return", "Outcome", "(", "-", "1", ",", "\"aborted\"", ")" ]
Calls the custom execute function of the script.py of the state
[ "Calls", "the", "custom", "execute", "function", "of", "the", "script", ".", "py", "of", "the", "state" ]
python
train
37.259259
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumberutil.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L704-L769
def length_of_national_destination_code(numobj): """Return length of the national destination code code for a number. Gets the length of the national destination code (NDC) from the PhoneNumber object passed in, so that clients could use it to split a national significant number into NDC and subscriber number. The NDC of a phone number is normally the first group of digit(s) right after the country calling code when the number is formatted in the international format, if there is a subscriber number part that follows. N.B.: similar to an area code, not all numbers have an NDC! An example of how this could be used: >>> import phonenumbers >>> numobj = phonenumbers.parse("18002530000", "US") >>> nsn = phonenumbers.national_significant_number(numobj) >>> ndc_len = phonenumbers.length_of_national_destination_code(numobj) >>> if ndc_len > 0: ... national_destination_code = nsn[:ndc_len] ... subscriber_number = nsn[ndc_len:] ... else: ... national_destination_code = "" ... subscriber_number = nsn Refer to the unittests to see the difference between this function and length_of_geographical_area_code. Arguments: numobj -- The PhoneNumber object to find the length of the NDC from. Returns the length of NDC of the PhoneNumber object passed in, which could be zero. """ if numobj.extension is not None: # We don't want to alter the object given to us, but we don't want to # include the extension when we format it, so we copy it and clear the # extension here. copied_numobj = PhoneNumber() copied_numobj.merge_from(numobj) copied_numobj.extension = None else: copied_numobj = numobj nsn = format_number(copied_numobj, PhoneNumberFormat.INTERNATIONAL) number_groups = re.split(NON_DIGITS_PATTERN, nsn) # The pattern will start with "+COUNTRY_CODE " so the first group will # always be the empty string (before the + symbol) and the second group # will be the country calling code. The third group will be area code if # it is not the last group. if len(number_groups) <= 3: return 0 if number_type(numobj) == PhoneNumberType.MOBILE: # For example Argentinian mobile numbers, when formatted in the # international format, are in the form of +54 9 NDC XXXX... As a # result, we take the length of the third group (NDC) and add the # length of the second group (which is the mobile token), which also # forms part of the national significant number. This assumes that # the mobile token is always formatted separately from the rest of the # phone number. mobile_token = country_mobile_token(numobj.country_code) if mobile_token != U_EMPTY_STRING: return len(number_groups[2]) + len(number_groups[3]) return len(number_groups[2])
[ "def", "length_of_national_destination_code", "(", "numobj", ")", ":", "if", "numobj", ".", "extension", "is", "not", "None", ":", "# We don't want to alter the object given to us, but we don't want to", "# include the extension when we format it, so we copy it and clear the", "# extension here.", "copied_numobj", "=", "PhoneNumber", "(", ")", "copied_numobj", ".", "merge_from", "(", "numobj", ")", "copied_numobj", ".", "extension", "=", "None", "else", ":", "copied_numobj", "=", "numobj", "nsn", "=", "format_number", "(", "copied_numobj", ",", "PhoneNumberFormat", ".", "INTERNATIONAL", ")", "number_groups", "=", "re", ".", "split", "(", "NON_DIGITS_PATTERN", ",", "nsn", ")", "# The pattern will start with \"+COUNTRY_CODE \" so the first group will", "# always be the empty string (before the + symbol) and the second group", "# will be the country calling code. The third group will be area code if", "# it is not the last group.", "if", "len", "(", "number_groups", ")", "<=", "3", ":", "return", "0", "if", "number_type", "(", "numobj", ")", "==", "PhoneNumberType", ".", "MOBILE", ":", "# For example Argentinian mobile numbers, when formatted in the", "# international format, are in the form of +54 9 NDC XXXX... As a", "# result, we take the length of the third group (NDC) and add the", "# length of the second group (which is the mobile token), which also", "# forms part of the national significant number. This assumes that", "# the mobile token is always formatted separately from the rest of the", "# phone number.", "mobile_token", "=", "country_mobile_token", "(", "numobj", ".", "country_code", ")", "if", "mobile_token", "!=", "U_EMPTY_STRING", ":", "return", "len", "(", "number_groups", "[", "2", "]", ")", "+", "len", "(", "number_groups", "[", "3", "]", ")", "return", "len", "(", "number_groups", "[", "2", "]", ")" ]
Return length of the national destination code code for a number. Gets the length of the national destination code (NDC) from the PhoneNumber object passed in, so that clients could use it to split a national significant number into NDC and subscriber number. The NDC of a phone number is normally the first group of digit(s) right after the country calling code when the number is formatted in the international format, if there is a subscriber number part that follows. N.B.: similar to an area code, not all numbers have an NDC! An example of how this could be used: >>> import phonenumbers >>> numobj = phonenumbers.parse("18002530000", "US") >>> nsn = phonenumbers.national_significant_number(numobj) >>> ndc_len = phonenumbers.length_of_national_destination_code(numobj) >>> if ndc_len > 0: ... national_destination_code = nsn[:ndc_len] ... subscriber_number = nsn[ndc_len:] ... else: ... national_destination_code = "" ... subscriber_number = nsn Refer to the unittests to see the difference between this function and length_of_geographical_area_code. Arguments: numobj -- The PhoneNumber object to find the length of the NDC from. Returns the length of NDC of the PhoneNumber object passed in, which could be zero.
[ "Return", "length", "of", "the", "national", "destination", "code", "code", "for", "a", "number", "." ]
python
train
43.833333
bkg/greenwich
greenwich/raster.py
https://github.com/bkg/greenwich/blob/57ec644dadfe43ce0ecf2cfd32a2de71e0c8c141/greenwich/raster.py#L474-L488
def get_offset(self, envelope): """Returns a 4-tuple pixel window (x_offset, y_offset, x_size, y_size). Arguments: envelope -- coordinate extent tuple or Envelope """ if isinstance(envelope, collections.Sequence): envelope = Envelope(envelope) if not (self.envelope.contains(envelope) or self.envelope.intersects(envelope)): raise ValueError('Envelope does not intersect with this extent') coords = self.affine.transform((envelope.ul, envelope.lr)) nxy = [(min(dest, size) - origin) or 1 for size, origin, dest in zip(self.size, *coords)] return coords[0] + tuple(nxy)
[ "def", "get_offset", "(", "self", ",", "envelope", ")", ":", "if", "isinstance", "(", "envelope", ",", "collections", ".", "Sequence", ")", ":", "envelope", "=", "Envelope", "(", "envelope", ")", "if", "not", "(", "self", ".", "envelope", ".", "contains", "(", "envelope", ")", "or", "self", ".", "envelope", ".", "intersects", "(", "envelope", ")", ")", ":", "raise", "ValueError", "(", "'Envelope does not intersect with this extent'", ")", "coords", "=", "self", ".", "affine", ".", "transform", "(", "(", "envelope", ".", "ul", ",", "envelope", ".", "lr", ")", ")", "nxy", "=", "[", "(", "min", "(", "dest", ",", "size", ")", "-", "origin", ")", "or", "1", "for", "size", ",", "origin", ",", "dest", "in", "zip", "(", "self", ".", "size", ",", "*", "coords", ")", "]", "return", "coords", "[", "0", "]", "+", "tuple", "(", "nxy", ")" ]
Returns a 4-tuple pixel window (x_offset, y_offset, x_size, y_size). Arguments: envelope -- coordinate extent tuple or Envelope
[ "Returns", "a", "4", "-", "tuple", "pixel", "window", "(", "x_offset", "y_offset", "x_size", "y_size", ")", "." ]
python
test
45.466667
push-things/wallabag_api
wallabag_api/wallabag.py
https://github.com/push-things/wallabag_api/blob/8d1e10a6ebc03d1ac9af2b38b57eb69f29b4216e/wallabag_api/wallabag.py#L208-L220
async def get_entry(self, entry): """ GET /api/entries/{entry}.{_format} Retrieve a single entry :param entry: \w+ an integer The Entry ID :return data related to the ext """ params = {'access_token': self.token} url = '/api/entries/{entry}.{ext}'.format(entry=entry, ext=self.format) return await self.query(url, "get", **params)
[ "async", "def", "get_entry", "(", "self", ",", "entry", ")", ":", "params", "=", "{", "'access_token'", ":", "self", ".", "token", "}", "url", "=", "'/api/entries/{entry}.{ext}'", ".", "format", "(", "entry", "=", "entry", ",", "ext", "=", "self", ".", "format", ")", "return", "await", "self", ".", "query", "(", "url", ",", "\"get\"", ",", "*", "*", "params", ")" ]
GET /api/entries/{entry}.{_format} Retrieve a single entry :param entry: \w+ an integer The Entry ID :return data related to the ext
[ "GET", "/", "api", "/", "entries", "/", "{", "entry", "}", ".", "{", "_format", "}" ]
python
train
34
boolangery/py-lua-parser
luaparser/builder.py
https://github.com/boolangery/py-lua-parser/blob/578f2bf75f6f84c4b52c2affba56a4ec569d7ce7/luaparser/builder.py#L793-L815
def parse_func_body(self): """If success, return a tuple (args, body)""" self.save() self._expected = [] if self.next_is_rc(Tokens.OPAR, False): # do not render right hidden self.handle_hidden_right() # render hidden after new level args = self.parse_param_list() if args is not None: # may be an empty table if self.next_is_rc(Tokens.CPAR, False): # do not render right hidden self.handle_hidden_right() # render hidden after new level body = self.parse_block() if body: self._expected = [] token = self.next_is_rc(Tokens.END, False) if token: body.stop_char = token.stop self.success() return args, body else: self.abort() else: self.abort() return self.failure()
[ "def", "parse_func_body", "(", "self", ")", ":", "self", ".", "save", "(", ")", "self", ".", "_expected", "=", "[", "]", "if", "self", ".", "next_is_rc", "(", "Tokens", ".", "OPAR", ",", "False", ")", ":", "# do not render right hidden", "self", ".", "handle_hidden_right", "(", ")", "# render hidden after new level", "args", "=", "self", ".", "parse_param_list", "(", ")", "if", "args", "is", "not", "None", ":", "# may be an empty table", "if", "self", ".", "next_is_rc", "(", "Tokens", ".", "CPAR", ",", "False", ")", ":", "# do not render right hidden", "self", ".", "handle_hidden_right", "(", ")", "# render hidden after new level", "body", "=", "self", ".", "parse_block", "(", ")", "if", "body", ":", "self", ".", "_expected", "=", "[", "]", "token", "=", "self", ".", "next_is_rc", "(", "Tokens", ".", "END", ",", "False", ")", "if", "token", ":", "body", ".", "stop_char", "=", "token", ".", "stop", "self", ".", "success", "(", ")", "return", "args", ",", "body", "else", ":", "self", ".", "abort", "(", ")", "else", ":", "self", ".", "abort", "(", ")", "return", "self", ".", "failure", "(", ")" ]
If success, return a tuple (args, body)
[ "If", "success", "return", "a", "tuple", "(", "args", "body", ")" ]
python
train
45.391304
yero13/na3x
na3x/validation/validator.py
https://github.com/yero13/na3x/blob/b31ef801ea574081125020a7d0f9c4242f8f8b02/na3x/validation/validator.py#L261-L273
def no_intersection(to_validate, constraint, violation_cfg): """ Returns violation message if validated and constraint sets have no intersection :param to_validate: :param constraint: :param violation_cfg: :return: """ if len(constraint) == 0 or len(set(constraint).intersection(to_validate)) > 0: return None else: violation_cfg[Check.CFG_KEY_VIOLATION_MSG] = violation_cfg[Check.CFG_KEY_VIOLATION_MSG].format(constraint) return violation_cfg
[ "def", "no_intersection", "(", "to_validate", ",", "constraint", ",", "violation_cfg", ")", ":", "if", "len", "(", "constraint", ")", "==", "0", "or", "len", "(", "set", "(", "constraint", ")", ".", "intersection", "(", "to_validate", ")", ")", ">", "0", ":", "return", "None", "else", ":", "violation_cfg", "[", "Check", ".", "CFG_KEY_VIOLATION_MSG", "]", "=", "violation_cfg", "[", "Check", ".", "CFG_KEY_VIOLATION_MSG", "]", ".", "format", "(", "constraint", ")", "return", "violation_cfg" ]
Returns violation message if validated and constraint sets have no intersection :param to_validate: :param constraint: :param violation_cfg: :return:
[ "Returns", "violation", "message", "if", "validated", "and", "constraint", "sets", "have", "no", "intersection", ":", "param", "to_validate", ":", ":", "param", "constraint", ":", ":", "param", "violation_cfg", ":", ":", "return", ":" ]
python
train
37.769231
FutunnOpen/futuquant
futuquant/trade/trade_query.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/trade/trade_query.py#L259-L284
def pack_req(cls, trd_side, order_type, price, qty, code, adjust_limit, trd_env, sec_mkt_str, acc_id, trd_mkt, conn_id): """Convert from user request for place order to PLS request""" from futuquant.common.pb.Trd_PlaceOrder_pb2 import Request req = Request() serial_no = get_unique_id32() req.c2s.packetID.serialNo = serial_no req.c2s.packetID.connID = conn_id req.c2s.header.trdEnv = TRD_ENV_MAP[trd_env] req.c2s.header.accID = acc_id req.c2s.header.trdMarket = TRD_MKT_MAP[trd_mkt] req.c2s.trdSide = TRD_SIDE_MAP[trd_side] req.c2s.orderType = ORDER_TYPE_MAP[order_type] req.c2s.code = code req.c2s.qty = qty req.c2s.price = price req.c2s.adjustPrice = adjust_limit != 0 req.c2s.adjustSideAndLimit = adjust_limit proto_qot_mkt = MKT_MAP.get(sec_mkt_str, Qot_Common_pb2.QotMarket_Unknown) proto_trd_sec_mkt = QOT_MARKET_TO_TRD_SEC_MARKET_MAP.get(proto_qot_mkt, Trd_Common_pb2.TrdSecMarket_Unknown) req.c2s.secMarket = proto_trd_sec_mkt return pack_pb_req(req, ProtoId.Trd_PlaceOrder, conn_id, serial_no)
[ "def", "pack_req", "(", "cls", ",", "trd_side", ",", "order_type", ",", "price", ",", "qty", ",", "code", ",", "adjust_limit", ",", "trd_env", ",", "sec_mkt_str", ",", "acc_id", ",", "trd_mkt", ",", "conn_id", ")", ":", "from", "futuquant", ".", "common", ".", "pb", ".", "Trd_PlaceOrder_pb2", "import", "Request", "req", "=", "Request", "(", ")", "serial_no", "=", "get_unique_id32", "(", ")", "req", ".", "c2s", ".", "packetID", ".", "serialNo", "=", "serial_no", "req", ".", "c2s", ".", "packetID", ".", "connID", "=", "conn_id", "req", ".", "c2s", ".", "header", ".", "trdEnv", "=", "TRD_ENV_MAP", "[", "trd_env", "]", "req", ".", "c2s", ".", "header", ".", "accID", "=", "acc_id", "req", ".", "c2s", ".", "header", ".", "trdMarket", "=", "TRD_MKT_MAP", "[", "trd_mkt", "]", "req", ".", "c2s", ".", "trdSide", "=", "TRD_SIDE_MAP", "[", "trd_side", "]", "req", ".", "c2s", ".", "orderType", "=", "ORDER_TYPE_MAP", "[", "order_type", "]", "req", ".", "c2s", ".", "code", "=", "code", "req", ".", "c2s", ".", "qty", "=", "qty", "req", ".", "c2s", ".", "price", "=", "price", "req", ".", "c2s", ".", "adjustPrice", "=", "adjust_limit", "!=", "0", "req", ".", "c2s", ".", "adjustSideAndLimit", "=", "adjust_limit", "proto_qot_mkt", "=", "MKT_MAP", ".", "get", "(", "sec_mkt_str", ",", "Qot_Common_pb2", ".", "QotMarket_Unknown", ")", "proto_trd_sec_mkt", "=", "QOT_MARKET_TO_TRD_SEC_MARKET_MAP", ".", "get", "(", "proto_qot_mkt", ",", "Trd_Common_pb2", ".", "TrdSecMarket_Unknown", ")", "req", ".", "c2s", ".", "secMarket", "=", "proto_trd_sec_mkt", "return", "pack_pb_req", "(", "req", ",", "ProtoId", ".", "Trd_PlaceOrder", ",", "conn_id", ",", "serial_no", ")" ]
Convert from user request for place order to PLS request
[ "Convert", "from", "user", "request", "for", "place", "order", "to", "PLS", "request" ]
python
train
47.038462
mbedmicro/pyOCD
pyocd/__main__.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/__main__.py#L463-L485
def do_erase(self): """! @brief Handle 'erase' subcommand.""" self._increase_logging(["pyocd.tools.loader", "pyocd"]) session = ConnectHelper.session_with_chosen_probe( project_dir=self._args.project_dir, config_file=self._args.config, user_script=self._args.script, no_config=self._args.no_config, pack=self._args.pack, unique_id=self._args.unique_id, target_override=self._args.target_override, frequency=self._args.frequency, blocking=False, **convert_session_options(self._args.options)) if session is None: sys.exit(1) with session: mode = self._args.erase_mode or loader.FlashEraser.Mode.SECTOR eraser = loader.FlashEraser(session, mode) addresses = flatten_args(self._args.addresses) eraser.erase(addresses)
[ "def", "do_erase", "(", "self", ")", ":", "self", ".", "_increase_logging", "(", "[", "\"pyocd.tools.loader\"", ",", "\"pyocd\"", "]", ")", "session", "=", "ConnectHelper", ".", "session_with_chosen_probe", "(", "project_dir", "=", "self", ".", "_args", ".", "project_dir", ",", "config_file", "=", "self", ".", "_args", ".", "config", ",", "user_script", "=", "self", ".", "_args", ".", "script", ",", "no_config", "=", "self", ".", "_args", ".", "no_config", ",", "pack", "=", "self", ".", "_args", ".", "pack", ",", "unique_id", "=", "self", ".", "_args", ".", "unique_id", ",", "target_override", "=", "self", ".", "_args", ".", "target_override", ",", "frequency", "=", "self", ".", "_args", ".", "frequency", ",", "blocking", "=", "False", ",", "*", "*", "convert_session_options", "(", "self", ".", "_args", ".", "options", ")", ")", "if", "session", "is", "None", ":", "sys", ".", "exit", "(", "1", ")", "with", "session", ":", "mode", "=", "self", ".", "_args", ".", "erase_mode", "or", "loader", ".", "FlashEraser", ".", "Mode", ".", "SECTOR", "eraser", "=", "loader", ".", "FlashEraser", "(", "session", ",", "mode", ")", "addresses", "=", "flatten_args", "(", "self", ".", "_args", ".", "addresses", ")", "eraser", ".", "erase", "(", "addresses", ")" ]
! @brief Handle 'erase' subcommand.
[ "!" ]
python
train
47.565217