repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
Yelp/kafka-utils
kafka_utils/util/__init__.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/__init__.py#L76-L85
def positive_nonzero_int(string): """Convert string to positive integer greater than zero.""" error_msg = 'Positive non-zero integer required, {string} given.'.format(string=string) try: value = int(string) except ValueError: raise ArgumentTypeError(error_msg) if value <= 0: raise ArgumentTypeError(error_msg) return value
[ "def", "positive_nonzero_int", "(", "string", ")", ":", "error_msg", "=", "'Positive non-zero integer required, {string} given.'", ".", "format", "(", "string", "=", "string", ")", "try", ":", "value", "=", "int", "(", "string", ")", "except", "ValueError", ":", "raise", "ArgumentTypeError", "(", "error_msg", ")", "if", "value", "<=", "0", ":", "raise", "ArgumentTypeError", "(", "error_msg", ")", "return", "value" ]
Convert string to positive integer greater than zero.
[ "Convert", "string", "to", "positive", "integer", "greater", "than", "zero", "." ]
python
train
36.2
mozilla/mozdownload
mozdownload/utils.py
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/utils.py#L12-L18
def urljoin(*fragments): """Concatenate multi part strings into urls.""" # Strip possible already existent final slashes of fragments except for the last one parts = [fragment.rstrip('/') for fragment in fragments[:len(fragments) - 1]] parts.append(fragments[-1]) return '/'.join(parts)
[ "def", "urljoin", "(", "*", "fragments", ")", ":", "# Strip possible already existent final slashes of fragments except for the last one", "parts", "=", "[", "fragment", ".", "rstrip", "(", "'/'", ")", "for", "fragment", "in", "fragments", "[", ":", "len", "(", "fragments", ")", "-", "1", "]", "]", "parts", ".", "append", "(", "fragments", "[", "-", "1", "]", ")", "return", "'/'", ".", "join", "(", "parts", ")" ]
Concatenate multi part strings into urls.
[ "Concatenate", "multi", "part", "strings", "into", "urls", "." ]
python
train
43
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/werkzeug/wrappers.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/werkzeug/wrappers.py#L370-L380
def _get_stream_for_parsing(self): """This is the same as accessing :attr:`stream` with the difference that if it finds cached data from calling :meth:`get_data` first it will create a new stream out of the cached data. .. versionadded:: 0.9.3 """ cached_data = getattr(self, '_cached_data', None) if cached_data is not None: return BytesIO(cached_data) return self.stream
[ "def", "_get_stream_for_parsing", "(", "self", ")", ":", "cached_data", "=", "getattr", "(", "self", ",", "'_cached_data'", ",", "None", ")", "if", "cached_data", "is", "not", "None", ":", "return", "BytesIO", "(", "cached_data", ")", "return", "self", ".", "stream" ]
This is the same as accessing :attr:`stream` with the difference that if it finds cached data from calling :meth:`get_data` first it will create a new stream out of the cached data. .. versionadded:: 0.9.3
[ "This", "is", "the", "same", "as", "accessing", ":", "attr", ":", "stream", "with", "the", "difference", "that", "if", "it", "finds", "cached", "data", "from", "calling", ":", "meth", ":", "get_data", "first", "it", "will", "create", "a", "new", "stream", "out", "of", "the", "cached", "data", "." ]
python
test
39.909091
saltstack/salt
salt/states/file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/file.py#L3068-L3088
def _get_recurse_set(recurse): ''' Converse *recurse* definition to a set of strings. Raises TypeError or ValueError when *recurse* has wrong structure. ''' if not recurse: return set() if not isinstance(recurse, list): raise TypeError('"recurse" must be formed as a list of strings') try: recurse_set = set(recurse) except TypeError: # non-hashable elements recurse_set = None if recurse_set is None or not set(_RECURSE_TYPES) >= recurse_set: raise ValueError('Types for "recurse" limited to {0}.'.format( ', '.join('"{0}"'.format(rtype) for rtype in _RECURSE_TYPES))) if 'ignore_files' in recurse_set and 'ignore_dirs' in recurse_set: raise ValueError('Must not specify "recurse" options "ignore_files"' ' and "ignore_dirs" at the same time.') return recurse_set
[ "def", "_get_recurse_set", "(", "recurse", ")", ":", "if", "not", "recurse", ":", "return", "set", "(", ")", "if", "not", "isinstance", "(", "recurse", ",", "list", ")", ":", "raise", "TypeError", "(", "'\"recurse\" must be formed as a list of strings'", ")", "try", ":", "recurse_set", "=", "set", "(", "recurse", ")", "except", "TypeError", ":", "# non-hashable elements", "recurse_set", "=", "None", "if", "recurse_set", "is", "None", "or", "not", "set", "(", "_RECURSE_TYPES", ")", ">=", "recurse_set", ":", "raise", "ValueError", "(", "'Types for \"recurse\" limited to {0}.'", ".", "format", "(", "', '", ".", "join", "(", "'\"{0}\"'", ".", "format", "(", "rtype", ")", "for", "rtype", "in", "_RECURSE_TYPES", ")", ")", ")", "if", "'ignore_files'", "in", "recurse_set", "and", "'ignore_dirs'", "in", "recurse_set", ":", "raise", "ValueError", "(", "'Must not specify \"recurse\" options \"ignore_files\"'", "' and \"ignore_dirs\" at the same time.'", ")", "return", "recurse_set" ]
Converse *recurse* definition to a set of strings. Raises TypeError or ValueError when *recurse* has wrong structure.
[ "Converse", "*", "recurse", "*", "definition", "to", "a", "set", "of", "strings", "." ]
python
train
41.666667
Crunch-io/crunch-cube
src/cr/cube/cube_slice.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/cube_slice.py#L552-L564
def pairwise_indices(self, alpha=0.05, only_larger=True, hs_dims=None): """Indices of columns where p < alpha for column-comparison t-tests Returns an array of tuples of columns that are significant at p<alpha, from a series of pairwise t-tests. Argument both_pairs returns indices striclty on the test statistic. If False, however, only the index of values *significantly smaller* than each cell are indicated. """ return PairwiseSignificance( self, alpha=alpha, only_larger=only_larger, hs_dims=hs_dims ).pairwise_indices
[ "def", "pairwise_indices", "(", "self", ",", "alpha", "=", "0.05", ",", "only_larger", "=", "True", ",", "hs_dims", "=", "None", ")", ":", "return", "PairwiseSignificance", "(", "self", ",", "alpha", "=", "alpha", ",", "only_larger", "=", "only_larger", ",", "hs_dims", "=", "hs_dims", ")", ".", "pairwise_indices" ]
Indices of columns where p < alpha for column-comparison t-tests Returns an array of tuples of columns that are significant at p<alpha, from a series of pairwise t-tests. Argument both_pairs returns indices striclty on the test statistic. If False, however, only the index of values *significantly smaller* than each cell are indicated.
[ "Indices", "of", "columns", "where", "p", "<", "alpha", "for", "column", "-", "comparison", "t", "-", "tests" ]
python
train
45.923077
basecrm/basecrm-python
basecrm/http_client.py
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/http_client.py#L62-L73
def put(self, url, body=None, **kwargs): """ Send a PUT request. :param str url: Sub URL for the request. You MUST not specify neither base url nor api version prefix. :param dict body: (optional) Dictionary of body attributes that will be wrapped with envelope and json encoded. :param dict **kwargs: (optional) Other parameters which are directly passed to :func:`requests.request`. :return: Tuple of three elements: (http status code, headers, response - either parsed json or plain text) :rtype: tuple """ return self.request('put', url, body=body, **kwargs)
[ "def", "put", "(", "self", ",", "url", ",", "body", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "request", "(", "'put'", ",", "url", ",", "body", "=", "body", ",", "*", "*", "kwargs", ")" ]
Send a PUT request. :param str url: Sub URL for the request. You MUST not specify neither base url nor api version prefix. :param dict body: (optional) Dictionary of body attributes that will be wrapped with envelope and json encoded. :param dict **kwargs: (optional) Other parameters which are directly passed to :func:`requests.request`. :return: Tuple of three elements: (http status code, headers, response - either parsed json or plain text) :rtype: tuple
[ "Send", "a", "PUT", "request", "." ]
python
train
52.083333
jobovy/galpy
galpy/util/bovy_coords.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_coords.py#L906-L950
def XYZ_to_galcenrect(X,Y,Z,Xsun=1.,Zsun=0.,_extra_rot=True): """ NAME: XYZ_to_galcenrect PURPOSE: transform XYZ coordinates (wrt Sun) to rectangular Galactocentric coordinates INPUT: X - X Y - Y Z - Z Xsun - cylindrical distance to the GC Zsun - Sun's height above the midplane _extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition OUTPUT: (Xg, Yg, Zg) HISTORY: 2010-09-24 - Written - Bovy (NYU) 2016-05-12 - Edited to properly take into account the Sun's vertical position; dropped Ysun keyword - Bovy (UofT) 2018-04-18 - Tweaked to be consistent with astropy's Galactocentric frame - Bovy (UofT) """ if _extra_rot: X,Y,Z= nu.dot(galcen_extra_rot,nu.array([X,Y,Z])) dgc= nu.sqrt(Xsun**2.+Zsun**2.) costheta, sintheta= Xsun/dgc, Zsun/dgc return nu.dot(nu.array([[costheta,0.,-sintheta], [0.,1.,0.], [sintheta,0.,costheta]]), nu.array([-X+dgc,Y,nu.sign(Xsun)*Z])).T
[ "def", "XYZ_to_galcenrect", "(", "X", ",", "Y", ",", "Z", ",", "Xsun", "=", "1.", ",", "Zsun", "=", "0.", ",", "_extra_rot", "=", "True", ")", ":", "if", "_extra_rot", ":", "X", ",", "Y", ",", "Z", "=", "nu", ".", "dot", "(", "galcen_extra_rot", ",", "nu", ".", "array", "(", "[", "X", ",", "Y", ",", "Z", "]", ")", ")", "dgc", "=", "nu", ".", "sqrt", "(", "Xsun", "**", "2.", "+", "Zsun", "**", "2.", ")", "costheta", ",", "sintheta", "=", "Xsun", "/", "dgc", ",", "Zsun", "/", "dgc", "return", "nu", ".", "dot", "(", "nu", ".", "array", "(", "[", "[", "costheta", ",", "0.", ",", "-", "sintheta", "]", ",", "[", "0.", ",", "1.", ",", "0.", "]", ",", "[", "sintheta", ",", "0.", ",", "costheta", "]", "]", ")", ",", "nu", ".", "array", "(", "[", "-", "X", "+", "dgc", ",", "Y", ",", "nu", ".", "sign", "(", "Xsun", ")", "*", "Z", "]", ")", ")", ".", "T" ]
NAME: XYZ_to_galcenrect PURPOSE: transform XYZ coordinates (wrt Sun) to rectangular Galactocentric coordinates INPUT: X - X Y - Y Z - Z Xsun - cylindrical distance to the GC Zsun - Sun's height above the midplane _extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition OUTPUT: (Xg, Yg, Zg) HISTORY: 2010-09-24 - Written - Bovy (NYU) 2016-05-12 - Edited to properly take into account the Sun's vertical position; dropped Ysun keyword - Bovy (UofT) 2018-04-18 - Tweaked to be consistent with astropy's Galactocentric frame - Bovy (UofT)
[ "NAME", ":" ]
python
train
25.133333
svenevs/exhale
exhale/parse.py
https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/parse.py#L22-L167
def walk(textRoot, currentTag, level, prefix=None, postfix=None, unwrapUntilPara=False): ''' .. note:: This method does not cover all possible input doxygen types! This means that when an unsupported / unrecognized doxygen tag appears in the xml listing, the **raw xml will appear on the file page being documented**. This traverser is greedily designed to work for what testing revealed as the *bare minimum* required. **Please** see the :ref:`Doxygen ALIASES <doxygen_aliases>` section for how to bypass invalid documentation coming form Exhale. Recursive traverser method to parse the input parsed xml tree and convert the nodes into raw reStructuredText from the input doxygen format. **Not all doxygen markup types are handled**. The current supported doxygen xml markup tags are: - ``para`` - ``orderedlist`` - ``itemizedlist`` - ``verbatim`` (specifically: ``embed:rst:leading-asterisk``) - ``formula`` - ``ref`` - ``emphasis`` (e.g., using `em`_) - ``computeroutput`` (e.g., using `c`_) - ``bold`` (e.g., using `b`_) .. _em: http://www.doxygen.nl/manual/commands.html#cmdem .. _c: http://www.doxygen.nl/manual/commands.html#cmdc .. _b: http://www.doxygen.nl/manual/commands.html#cmdb The goal of this method is to "explode" input ``xml`` data into raw reStructuredText to put at the top of the file pages. Wielding beautiful soup, this essentially means that you need to expand every non ``para`` tag into a ``para``. So if an ordered list appears in the xml, then the raw listing must be built up from the child nodes. After this is finished, though, the :meth:`bs4.BeautifulSoup.get_text` method will happily remove all remaining ``para`` tags to produce the final reStructuredText **provided that** the original "exploded" tags (such as the ordered list definition and its ``listitem`` children) have been *removed* from the soup. **Parameters** ``textRoot`` (:class:`~exhale.graph.ExhaleRoot`) The text root object that is calling this method. This parameter is necessary in order to retrieve / convert the doxygen ``\\ref SomeClass`` tag and link it to the appropriate node page. The ``textRoot`` object is not modified by executing this method. ``currentTag`` (:class:`bs4.element.Tag`) The current xml tag being processed, either to have its contents directly modified or unraveled. ``level`` (int) .. warning:: This variable does **not** represent "recursion depth" (as one would typically see with a variable like this)! The **block** level of indentation currently being parsed. Because we are parsing a tree in order to generate raw reStructuredText code, we need to maintain a notion of "block level". This means tracking when there are nested structures such as a list within a list: .. code-block:: rst 1. This is an outer ordered list. - There is a nested unordered list. - It is a child of the outer list. 2. This is another item in the outer list. The outer ordered (numbers ``1`` and ``2``) list is at indentation level ``0``, and the inner unordered (``-``) list is at indentation level ``1``. Meaning that level is used as .. code-block:: py indent = " " * level # ... later ... some_text = "\\n{indent}{text}".format(indent=indent, text=some_text) to indent the ordered / unordered lists accordingly. ''' if not currentTag: return if prefix: currentTag.insert_before(prefix) if postfix: currentTag.insert_after(postfix) children = currentTag.findChildren(recursive=False) indent = " " * level if currentTag.name == "orderedlist": idx = 1 for child in children: walk(textRoot, child, level + 1, "\n{0}{1}. ".format(indent, idx), None, True) idx += 1 child.unwrap() currentTag.unwrap() elif currentTag.name == "itemizedlist": for child in children: walk(textRoot, child, level + 1, "\n{0}- ".format(indent), None, True) child.unwrap() currentTag.unwrap() elif currentTag.name == "verbatim": # TODO: find relevant section in breathe.sphinxrenderer and include the versions # for both leading /// as well as just plain embed:rst. leading_asterisk = "embed:rst:leading-asterisk\n*" if currentTag.string.startswith(leading_asterisk): cont = currentTag.string.replace(leading_asterisk, "") cont = textwrap.dedent(cont.replace("\n*", "\n")) currentTag.string = cont elif currentTag.name == "formula": currentTag.string = ":math:`{0}`".format(currentTag.string[1:-1]) elif currentTag.name == "ref": signal = None if "refid" not in currentTag.attrs: signal = "No 'refid' in `ref` tag attributes of file documentation. Attributes were: {0}".format( currentTag.attrs ) else: refid = currentTag.attrs["refid"] if refid not in textRoot.node_by_refid: signal = "Found unknown 'refid' of [{0}] in file level documentation.".format(refid) else: currentTag.string = ":ref:`{0}`".format(textRoot.node_by_refid[refid].link_name) if signal: # << verboseBuild utils.verbose_log(signal, utils.AnsiColors.BOLD_YELLOW) elif currentTag.name == "emphasis": currentTag.string = "*{0}*".format(currentTag.string) elif currentTag.name == "computeroutput": currentTag.string = "``{0}``".format(currentTag.string) elif currentTag.name == "bold": currentTag.string = "**{0}**".format(currentTag.string) else: ctr = 0 for child in children: c_prefix = None c_postfix = None if ctr > 0 and child.name == "para": c_prefix = "\n{0}".format(indent) walk(textRoot, child, level, c_prefix, c_postfix) ctr += 1
[ "def", "walk", "(", "textRoot", ",", "currentTag", ",", "level", ",", "prefix", "=", "None", ",", "postfix", "=", "None", ",", "unwrapUntilPara", "=", "False", ")", ":", "if", "not", "currentTag", ":", "return", "if", "prefix", ":", "currentTag", ".", "insert_before", "(", "prefix", ")", "if", "postfix", ":", "currentTag", ".", "insert_after", "(", "postfix", ")", "children", "=", "currentTag", ".", "findChildren", "(", "recursive", "=", "False", ")", "indent", "=", "\" \"", "*", "level", "if", "currentTag", ".", "name", "==", "\"orderedlist\"", ":", "idx", "=", "1", "for", "child", "in", "children", ":", "walk", "(", "textRoot", ",", "child", ",", "level", "+", "1", ",", "\"\\n{0}{1}. \"", ".", "format", "(", "indent", ",", "idx", ")", ",", "None", ",", "True", ")", "idx", "+=", "1", "child", ".", "unwrap", "(", ")", "currentTag", ".", "unwrap", "(", ")", "elif", "currentTag", ".", "name", "==", "\"itemizedlist\"", ":", "for", "child", "in", "children", ":", "walk", "(", "textRoot", ",", "child", ",", "level", "+", "1", ",", "\"\\n{0}- \"", ".", "format", "(", "indent", ")", ",", "None", ",", "True", ")", "child", ".", "unwrap", "(", ")", "currentTag", ".", "unwrap", "(", ")", "elif", "currentTag", ".", "name", "==", "\"verbatim\"", ":", "# TODO: find relevant section in breathe.sphinxrenderer and include the versions", "# for both leading /// as well as just plain embed:rst.", "leading_asterisk", "=", "\"embed:rst:leading-asterisk\\n*\"", "if", "currentTag", ".", "string", ".", "startswith", "(", "leading_asterisk", ")", ":", "cont", "=", "currentTag", ".", "string", ".", "replace", "(", "leading_asterisk", ",", "\"\"", ")", "cont", "=", "textwrap", ".", "dedent", "(", "cont", ".", "replace", "(", "\"\\n*\"", ",", "\"\\n\"", ")", ")", "currentTag", ".", "string", "=", "cont", "elif", "currentTag", ".", "name", "==", "\"formula\"", ":", "currentTag", ".", "string", "=", "\":math:`{0}`\"", ".", "format", "(", "currentTag", ".", "string", "[", "1", ":", "-", "1", "]", ")", "elif", "currentTag", ".", "name", "==", "\"ref\"", ":", "signal", "=", "None", "if", "\"refid\"", "not", "in", "currentTag", ".", "attrs", ":", "signal", "=", "\"No 'refid' in `ref` tag attributes of file documentation. Attributes were: {0}\"", ".", "format", "(", "currentTag", ".", "attrs", ")", "else", ":", "refid", "=", "currentTag", ".", "attrs", "[", "\"refid\"", "]", "if", "refid", "not", "in", "textRoot", ".", "node_by_refid", ":", "signal", "=", "\"Found unknown 'refid' of [{0}] in file level documentation.\"", ".", "format", "(", "refid", ")", "else", ":", "currentTag", ".", "string", "=", "\":ref:`{0}`\"", ".", "format", "(", "textRoot", ".", "node_by_refid", "[", "refid", "]", ".", "link_name", ")", "if", "signal", ":", "# << verboseBuild", "utils", ".", "verbose_log", "(", "signal", ",", "utils", ".", "AnsiColors", ".", "BOLD_YELLOW", ")", "elif", "currentTag", ".", "name", "==", "\"emphasis\"", ":", "currentTag", ".", "string", "=", "\"*{0}*\"", ".", "format", "(", "currentTag", ".", "string", ")", "elif", "currentTag", ".", "name", "==", "\"computeroutput\"", ":", "currentTag", ".", "string", "=", "\"``{0}``\"", ".", "format", "(", "currentTag", ".", "string", ")", "elif", "currentTag", ".", "name", "==", "\"bold\"", ":", "currentTag", ".", "string", "=", "\"**{0}**\"", ".", "format", "(", "currentTag", ".", "string", ")", "else", ":", "ctr", "=", "0", "for", "child", "in", "children", ":", "c_prefix", "=", "None", "c_postfix", "=", "None", "if", "ctr", ">", "0", "and", "child", ".", "name", "==", "\"para\"", ":", "c_prefix", "=", "\"\\n{0}\"", ".", "format", "(", "indent", ")", "walk", "(", "textRoot", ",", "child", ",", "level", ",", "c_prefix", ",", "c_postfix", ")", "ctr", "+=", "1" ]
.. note:: This method does not cover all possible input doxygen types! This means that when an unsupported / unrecognized doxygen tag appears in the xml listing, the **raw xml will appear on the file page being documented**. This traverser is greedily designed to work for what testing revealed as the *bare minimum* required. **Please** see the :ref:`Doxygen ALIASES <doxygen_aliases>` section for how to bypass invalid documentation coming form Exhale. Recursive traverser method to parse the input parsed xml tree and convert the nodes into raw reStructuredText from the input doxygen format. **Not all doxygen markup types are handled**. The current supported doxygen xml markup tags are: - ``para`` - ``orderedlist`` - ``itemizedlist`` - ``verbatim`` (specifically: ``embed:rst:leading-asterisk``) - ``formula`` - ``ref`` - ``emphasis`` (e.g., using `em`_) - ``computeroutput`` (e.g., using `c`_) - ``bold`` (e.g., using `b`_) .. _em: http://www.doxygen.nl/manual/commands.html#cmdem .. _c: http://www.doxygen.nl/manual/commands.html#cmdc .. _b: http://www.doxygen.nl/manual/commands.html#cmdb The goal of this method is to "explode" input ``xml`` data into raw reStructuredText to put at the top of the file pages. Wielding beautiful soup, this essentially means that you need to expand every non ``para`` tag into a ``para``. So if an ordered list appears in the xml, then the raw listing must be built up from the child nodes. After this is finished, though, the :meth:`bs4.BeautifulSoup.get_text` method will happily remove all remaining ``para`` tags to produce the final reStructuredText **provided that** the original "exploded" tags (such as the ordered list definition and its ``listitem`` children) have been *removed* from the soup. **Parameters** ``textRoot`` (:class:`~exhale.graph.ExhaleRoot`) The text root object that is calling this method. This parameter is necessary in order to retrieve / convert the doxygen ``\\ref SomeClass`` tag and link it to the appropriate node page. The ``textRoot`` object is not modified by executing this method. ``currentTag`` (:class:`bs4.element.Tag`) The current xml tag being processed, either to have its contents directly modified or unraveled. ``level`` (int) .. warning:: This variable does **not** represent "recursion depth" (as one would typically see with a variable like this)! The **block** level of indentation currently being parsed. Because we are parsing a tree in order to generate raw reStructuredText code, we need to maintain a notion of "block level". This means tracking when there are nested structures such as a list within a list: .. code-block:: rst 1. This is an outer ordered list. - There is a nested unordered list. - It is a child of the outer list. 2. This is another item in the outer list. The outer ordered (numbers ``1`` and ``2``) list is at indentation level ``0``, and the inner unordered (``-``) list is at indentation level ``1``. Meaning that level is used as .. code-block:: py indent = " " * level # ... later ... some_text = "\\n{indent}{text}".format(indent=indent, text=some_text) to indent the ordered / unordered lists accordingly.
[ "..", "note", "::" ]
python
train
42.863014
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L1608-L1613
def list_dscp_marking_rules(self, policy_id, retrieve_all=True, **_params): """Fetches a list of all DSCP marking rules for the given policy.""" return self.list('dscp_marking_rules', self.qos_dscp_marking_rules_path % policy_id, retrieve_all, **_params)
[ "def", "list_dscp_marking_rules", "(", "self", ",", "policy_id", ",", "retrieve_all", "=", "True", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "list", "(", "'dscp_marking_rules'", ",", "self", ".", "qos_dscp_marking_rules_path", "%", "policy_id", ",", "retrieve_all", ",", "*", "*", "_params", ")" ]
Fetches a list of all DSCP marking rules for the given policy.
[ "Fetches", "a", "list", "of", "all", "DSCP", "marking", "rules", "for", "the", "given", "policy", "." ]
python
train
57.833333
inveniosoftware/invenio-deposit
invenio_deposit/api.py
https://github.com/inveniosoftware/invenio-deposit/blob/f243ea1d01ab0a3bc92ade3262d1abdd2bc32447/invenio_deposit/api.py#L204-L206
def commit(self, *args, **kwargs): """Store changes on current instance in database and index it.""" return super(Deposit, self).commit(*args, **kwargs)
[ "def", "commit", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "Deposit", ",", "self", ")", ".", "commit", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Store changes on current instance in database and index it.
[ "Store", "changes", "on", "current", "instance", "in", "database", "and", "index", "it", "." ]
python
valid
55.333333
cltk/cltk
cltk/corpus/arabic/utils/pyarabic/araby.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/arabic/utils/pyarabic/araby.py#L1068-L1086
def tokenize(text=""): """ Tokenize text into words. @param text: the input text. @type text: unicode. @return: list of words. @rtype: list. """ if text == '': return [] else: # split tokens mylist = TOKEN_PATTERN.split(text) # don't remove newline \n mylist = [TOKEN_REPLACE.sub('', x) for x in mylist if x] # remove empty substring mylist = [x for x in mylist if x] return mylist
[ "def", "tokenize", "(", "text", "=", "\"\"", ")", ":", "if", "text", "==", "''", ":", "return", "[", "]", "else", ":", "# split tokens", "mylist", "=", "TOKEN_PATTERN", ".", "split", "(", "text", ")", "# don't remove newline \\n", "mylist", "=", "[", "TOKEN_REPLACE", ".", "sub", "(", "''", ",", "x", ")", "for", "x", "in", "mylist", "if", "x", "]", "# remove empty substring", "mylist", "=", "[", "x", "for", "x", "in", "mylist", "if", "x", "]", "return", "mylist" ]
Tokenize text into words. @param text: the input text. @type text: unicode. @return: list of words. @rtype: list.
[ "Tokenize", "text", "into", "words", "." ]
python
train
24.421053
mental32/spotify.py
spotify/models/player.py
https://github.com/mental32/spotify.py/blob/bb296cac7c3dd289908906b7069bd80f43950515/spotify/models/player.py#L74-L83
async def pause(self, *, device: Optional[SomeDevice] = None): """Pause playback on the user’s account. Parameters ---------- device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target. """ await self._user.http.pause_playback(device_id=str(device))
[ "async", "def", "pause", "(", "self", ",", "*", ",", "device", ":", "Optional", "[", "SomeDevice", "]", "=", "None", ")", ":", "await", "self", ".", "_user", ".", "http", ".", "pause_playback", "(", "device_id", "=", "str", "(", "device", ")", ")" ]
Pause playback on the user’s account. Parameters ---------- device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
[ "Pause", "playback", "on", "the", "user’s", "account", "." ]
python
test
42.2
kivy/python-for-android
pythonforandroid/recipes/openssl/__init__.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/recipes/openssl/__init__.py#L88-L93
def include_flags(self, arch): '''Returns a string with the include folders''' openssl_includes = join(self.get_build_dir(arch.arch), 'include') return (' -I' + openssl_includes + ' -I' + join(openssl_includes, 'internal') + ' -I' + join(openssl_includes, 'openssl'))
[ "def", "include_flags", "(", "self", ",", "arch", ")", ":", "openssl_includes", "=", "join", "(", "self", ".", "get_build_dir", "(", "arch", ".", "arch", ")", ",", "'include'", ")", "return", "(", "' -I'", "+", "openssl_includes", "+", "' -I'", "+", "join", "(", "openssl_includes", ",", "'internal'", ")", "+", "' -I'", "+", "join", "(", "openssl_includes", ",", "'openssl'", ")", ")" ]
Returns a string with the include folders
[ "Returns", "a", "string", "with", "the", "include", "folders" ]
python
train
53
Polyconseil/django-cid
cid/locals.py
https://github.com/Polyconseil/django-cid/blob/43415c8bbc91aa03983384072dbc1d2ecdeb2852/cid/locals.py#L15-L28
def get_cid(): """Return the currently set correlation id (if any). If no correlation id has been set and ``CID_GENERATE`` is enabled in the settings, a new correlation id is set and returned. FIXME (dbaty): in version 2, just `return getattr(_thread_locals, 'CID', None)` We want the simplest thing here and let `generate_new_cid` do the job. """ cid = getattr(_thread_locals, 'CID', None) if cid is None and getattr(settings, 'CID_GENERATE', False): cid = str(uuid.uuid4()) set_cid(cid) return cid
[ "def", "get_cid", "(", ")", ":", "cid", "=", "getattr", "(", "_thread_locals", ",", "'CID'", ",", "None", ")", "if", "cid", "is", "None", "and", "getattr", "(", "settings", ",", "'CID_GENERATE'", ",", "False", ")", ":", "cid", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "set_cid", "(", "cid", ")", "return", "cid" ]
Return the currently set correlation id (if any). If no correlation id has been set and ``CID_GENERATE`` is enabled in the settings, a new correlation id is set and returned. FIXME (dbaty): in version 2, just `return getattr(_thread_locals, 'CID', None)` We want the simplest thing here and let `generate_new_cid` do the job.
[ "Return", "the", "currently", "set", "correlation", "id", "(", "if", "any", ")", "." ]
python
train
38.571429
crunchyroll/ef-open
efopen/ef_aws_resolver.py
https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_aws_resolver.py#L199-L216
def ec2_security_group_security_group_id(self, lookup, default=None): """ Args: lookup: the friendly name of a security group to look up default: the optional value to return if lookup failed; returns None if not set Returns: Security group ID if target found or default/None if no match """ try: response = EFAwsResolver.__CLIENTS["ec2"].describe_security_groups(Filters=[{ 'Name':'group-name', 'Values':[lookup] }]) except: return default if len(response["SecurityGroups"]) > 0: return response["SecurityGroups"][0]["GroupId"] else: return default
[ "def", "ec2_security_group_security_group_id", "(", "self", ",", "lookup", ",", "default", "=", "None", ")", ":", "try", ":", "response", "=", "EFAwsResolver", ".", "__CLIENTS", "[", "\"ec2\"", "]", ".", "describe_security_groups", "(", "Filters", "=", "[", "{", "'Name'", ":", "'group-name'", ",", "'Values'", ":", "[", "lookup", "]", "}", "]", ")", "except", ":", "return", "default", "if", "len", "(", "response", "[", "\"SecurityGroups\"", "]", ")", ">", "0", ":", "return", "response", "[", "\"SecurityGroups\"", "]", "[", "0", "]", "[", "\"GroupId\"", "]", "else", ":", "return", "default" ]
Args: lookup: the friendly name of a security group to look up default: the optional value to return if lookup failed; returns None if not set Returns: Security group ID if target found or default/None if no match
[ "Args", ":", "lookup", ":", "the", "friendly", "name", "of", "a", "security", "group", "to", "look", "up", "default", ":", "the", "optional", "value", "to", "return", "if", "lookup", "failed", ";", "returns", "None", "if", "not", "set", "Returns", ":", "Security", "group", "ID", "if", "target", "found", "or", "default", "/", "None", "if", "no", "match" ]
python
train
34.444444
pandas-dev/pandas
pandas/core/indexes/base.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5393-L5400
def _trim_front(strings): """ Trims zeros and decimal points. """ trimmed = strings while len(strings) > 0 and all(x[0] == ' ' for x in trimmed): trimmed = [x[1:] for x in trimmed] return trimmed
[ "def", "_trim_front", "(", "strings", ")", ":", "trimmed", "=", "strings", "while", "len", "(", "strings", ")", ">", "0", "and", "all", "(", "x", "[", "0", "]", "==", "' '", "for", "x", "in", "trimmed", ")", ":", "trimmed", "=", "[", "x", "[", "1", ":", "]", "for", "x", "in", "trimmed", "]", "return", "trimmed" ]
Trims zeros and decimal points.
[ "Trims", "zeros", "and", "decimal", "points", "." ]
python
train
27.5
edx/edx-enterprise
enterprise/api_client/discovery.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api_client/discovery.py#L331-L345
def get_program_course_keys(self, program_uuid): """ Get a list of the course IDs (not course run IDs) contained in the program. Arguments: program_uuid (str): Program UUID in string form Returns: list(str): List of course keys in string form that are included in the program """ program_details = self.get_program_by_uuid(program_uuid) if not program_details: return [] return [course['key'] for course in program_details.get('courses', [])]
[ "def", "get_program_course_keys", "(", "self", ",", "program_uuid", ")", ":", "program_details", "=", "self", ".", "get_program_by_uuid", "(", "program_uuid", ")", "if", "not", "program_details", ":", "return", "[", "]", "return", "[", "course", "[", "'key'", "]", "for", "course", "in", "program_details", ".", "get", "(", "'courses'", ",", "[", "]", ")", "]" ]
Get a list of the course IDs (not course run IDs) contained in the program. Arguments: program_uuid (str): Program UUID in string form Returns: list(str): List of course keys in string form that are included in the program
[ "Get", "a", "list", "of", "the", "course", "IDs", "(", "not", "course", "run", "IDs", ")", "contained", "in", "the", "program", "." ]
python
valid
35.4
Microsoft/nni
tools/nni_trial_tool/hdfsClientUtility.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_trial_tool/hdfsClientUtility.py#L69-L91
def copyDirectoryToHdfs(localDirectory, hdfsDirectory, hdfsClient): '''Copy directory from local to HDFS''' if not os.path.exists(localDirectory): raise Exception('Local Directory does not exist!') hdfsClient.mkdirs(hdfsDirectory) result = True for file in os.listdir(localDirectory): file_path = os.path.join(localDirectory, file) if os.path.isdir(file_path): hdfs_directory = os.path.join(hdfsDirectory, file) try: result = result and copyDirectoryToHdfs(file_path, hdfs_directory, hdfsClient) except Exception as exception: nni_log(LogType.Error, 'Copy local directory {0} to hdfs directory {1} error: {2}'.format(file_path, hdfs_directory, str(exception))) result = False else: hdfs_file_path = os.path.join(hdfsDirectory, file) try: result = result and copyFileToHdfs(file_path, hdfs_file_path, hdfsClient) except Exception as exception: nni_log(LogType.Error, 'Copy local file {0} to hdfs {1} error: {2}'.format(file_path, hdfs_file_path, str(exception))) result = False return result
[ "def", "copyDirectoryToHdfs", "(", "localDirectory", ",", "hdfsDirectory", ",", "hdfsClient", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "localDirectory", ")", ":", "raise", "Exception", "(", "'Local Directory does not exist!'", ")", "hdfsClient", ".", "mkdirs", "(", "hdfsDirectory", ")", "result", "=", "True", "for", "file", "in", "os", ".", "listdir", "(", "localDirectory", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "localDirectory", ",", "file", ")", "if", "os", ".", "path", ".", "isdir", "(", "file_path", ")", ":", "hdfs_directory", "=", "os", ".", "path", ".", "join", "(", "hdfsDirectory", ",", "file", ")", "try", ":", "result", "=", "result", "and", "copyDirectoryToHdfs", "(", "file_path", ",", "hdfs_directory", ",", "hdfsClient", ")", "except", "Exception", "as", "exception", ":", "nni_log", "(", "LogType", ".", "Error", ",", "'Copy local directory {0} to hdfs directory {1} error: {2}'", ".", "format", "(", "file_path", ",", "hdfs_directory", ",", "str", "(", "exception", ")", ")", ")", "result", "=", "False", "else", ":", "hdfs_file_path", "=", "os", ".", "path", ".", "join", "(", "hdfsDirectory", ",", "file", ")", "try", ":", "result", "=", "result", "and", "copyFileToHdfs", "(", "file_path", ",", "hdfs_file_path", ",", "hdfsClient", ")", "except", "Exception", "as", "exception", ":", "nni_log", "(", "LogType", ".", "Error", ",", "'Copy local file {0} to hdfs {1} error: {2}'", ".", "format", "(", "file_path", ",", "hdfs_file_path", ",", "str", "(", "exception", ")", ")", ")", "result", "=", "False", "return", "result" ]
Copy directory from local to HDFS
[ "Copy", "directory", "from", "local", "to", "HDFS" ]
python
train
51.826087
RedisJSON/rejson-py
rejson/client.py
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L159-L164
def jsonnumincrby(self, name, path, number): """ Increments the numeric (integer or floating point) JSON value under ``path`` at key ``name`` by the provided ``number`` """ return self.execute_command('JSON.NUMINCRBY', name, str_path(path), self._encode(number))
[ "def", "jsonnumincrby", "(", "self", ",", "name", ",", "path", ",", "number", ")", ":", "return", "self", ".", "execute_command", "(", "'JSON.NUMINCRBY'", ",", "name", ",", "str_path", "(", "path", ")", ",", "self", ".", "_encode", "(", "number", ")", ")" ]
Increments the numeric (integer or floating point) JSON value under ``path`` at key ``name`` by the provided ``number``
[ "Increments", "the", "numeric", "(", "integer", "or", "floating", "point", ")", "JSON", "value", "under", "path", "at", "key", "name", "by", "the", "provided", "number" ]
python
train
49.5
materialsproject/pymatgen
pymatgen/io/abinit/works.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/works.py#L1190-L1232
def create_tasks(self, wfk_file, scr_input): """ Create the SCR tasks and register them in self. Args: wfk_file: Path to the ABINIT WFK file to use for the computation of the screening. scr_input: Input for the screening calculation. """ assert len(self) == 0 wfk_file = self.wfk_file = os.path.abspath(wfk_file) # Build a temporary work in the tmpdir that will use a shell manager # to run ABINIT in order to get the list of q-points for the screening. shell_manager = self.manager.to_shell_manager(mpi_procs=1) w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager) fake_input = scr_input.deepcopy() fake_task = w.register(fake_input) w.allocate() w.build() # Create the symbolic link and add the magic value # nqpdm = -1 to the input to get the list of q-points. fake_task.inlink_file(wfk_file) fake_task.set_vars({"nqptdm": -1}) fake_task.start_and_wait() # Parse the section with the q-points with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader: qpoints = reader.read_value("reduced_coordinates_of_kpoints") #print("qpoints) # Now we can register the task for the different q-points for qpoint in qpoints: qptdm_input = scr_input.deepcopy() qptdm_input.set_vars(nqptdm=1, qptdm=qpoint) new_task = self.register_scr_task(qptdm_input, manager=self.manager) # Add the garbage collector. if self.flow.gc is not None: new_task.set_gc(self.flow.gc) self.allocate()
[ "def", "create_tasks", "(", "self", ",", "wfk_file", ",", "scr_input", ")", ":", "assert", "len", "(", "self", ")", "==", "0", "wfk_file", "=", "self", ".", "wfk_file", "=", "os", ".", "path", ".", "abspath", "(", "wfk_file", ")", "# Build a temporary work in the tmpdir that will use a shell manager", "# to run ABINIT in order to get the list of q-points for the screening.", "shell_manager", "=", "self", ".", "manager", ".", "to_shell_manager", "(", "mpi_procs", "=", "1", ")", "w", "=", "Work", "(", "workdir", "=", "self", ".", "tmpdir", ".", "path_join", "(", "\"_qptdm_run\"", ")", ",", "manager", "=", "shell_manager", ")", "fake_input", "=", "scr_input", ".", "deepcopy", "(", ")", "fake_task", "=", "w", ".", "register", "(", "fake_input", ")", "w", ".", "allocate", "(", ")", "w", ".", "build", "(", ")", "# Create the symbolic link and add the magic value", "# nqpdm = -1 to the input to get the list of q-points.", "fake_task", ".", "inlink_file", "(", "wfk_file", ")", "fake_task", ".", "set_vars", "(", "{", "\"nqptdm\"", ":", "-", "1", "}", ")", "fake_task", ".", "start_and_wait", "(", ")", "# Parse the section with the q-points", "with", "NetcdfReader", "(", "fake_task", ".", "outdir", ".", "has_abiext", "(", "\"qptdms.nc\"", ")", ")", "as", "reader", ":", "qpoints", "=", "reader", ".", "read_value", "(", "\"reduced_coordinates_of_kpoints\"", ")", "#print(\"qpoints)", "# Now we can register the task for the different q-points", "for", "qpoint", "in", "qpoints", ":", "qptdm_input", "=", "scr_input", ".", "deepcopy", "(", ")", "qptdm_input", ".", "set_vars", "(", "nqptdm", "=", "1", ",", "qptdm", "=", "qpoint", ")", "new_task", "=", "self", ".", "register_scr_task", "(", "qptdm_input", ",", "manager", "=", "self", ".", "manager", ")", "# Add the garbage collector.", "if", "self", ".", "flow", ".", "gc", "is", "not", "None", ":", "new_task", ".", "set_gc", "(", "self", ".", "flow", ".", "gc", ")", "self", ".", "allocate", "(", ")" ]
Create the SCR tasks and register them in self. Args: wfk_file: Path to the ABINIT WFK file to use for the computation of the screening. scr_input: Input for the screening calculation.
[ "Create", "the", "SCR", "tasks", "and", "register", "them", "in", "self", "." ]
python
train
39.093023
log2timeline/dfvfs
dfvfs/vfs/file_system.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/file_system.py#L82-L103
def Close(self): """Closes the file system. Raises: IOError: if the file system object was not opened or the close failed. OSError: if the file system object was not opened or the close failed. """ if not self._is_open: raise IOError('Not opened.') if not self._is_cached: close_file_system = True elif self._resolver_context.ReleaseFileSystem(self): self._is_cached = False close_file_system = True else: close_file_system = False if close_file_system: self._Close() self._is_open = False self._path_spec = None
[ "def", "Close", "(", "self", ")", ":", "if", "not", "self", ".", "_is_open", ":", "raise", "IOError", "(", "'Not opened.'", ")", "if", "not", "self", ".", "_is_cached", ":", "close_file_system", "=", "True", "elif", "self", ".", "_resolver_context", ".", "ReleaseFileSystem", "(", "self", ")", ":", "self", ".", "_is_cached", "=", "False", "close_file_system", "=", "True", "else", ":", "close_file_system", "=", "False", "if", "close_file_system", ":", "self", ".", "_Close", "(", ")", "self", ".", "_is_open", "=", "False", "self", ".", "_path_spec", "=", "None" ]
Closes the file system. Raises: IOError: if the file system object was not opened or the close failed. OSError: if the file system object was not opened or the close failed.
[ "Closes", "the", "file", "system", "." ]
python
train
26.636364
pyapi-gitlab/pyapi-gitlab
gitlab/__init__.py
https://github.com/pyapi-gitlab/pyapi-gitlab/blob/f74b6fb5c13cecae9524997847e928905cc60acf/gitlab/__init__.py#L1637-L1653
def searchproject(self, search, page=1, per_page=20): """ Search for projects by name which are accessible to the authenticated user :param search: Query to search for :param page: Page number :param per_page: Records per page :return: list of results """ data = {'page': page, 'per_page': per_page} request = requests.get("{0}/{1}".format(self.search_url, search), params=data, verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout) if request.status_code == 200: return request.json() else: return False
[ "def", "searchproject", "(", "self", ",", "search", ",", "page", "=", "1", ",", "per_page", "=", "20", ")", ":", "data", "=", "{", "'page'", ":", "page", ",", "'per_page'", ":", "per_page", "}", "request", "=", "requests", ".", "get", "(", "\"{0}/{1}\"", ".", "format", "(", "self", ".", "search_url", ",", "search", ")", ",", "params", "=", "data", ",", "verify", "=", "self", ".", "verify_ssl", ",", "auth", "=", "self", ".", "auth", ",", "headers", "=", "self", ".", "headers", ",", "timeout", "=", "self", ".", "timeout", ")", "if", "request", ".", "status_code", "==", "200", ":", "return", "request", ".", "json", "(", ")", "else", ":", "return", "False" ]
Search for projects by name which are accessible to the authenticated user :param search: Query to search for :param page: Page number :param per_page: Records per page :return: list of results
[ "Search", "for", "projects", "by", "name", "which", "are", "accessible", "to", "the", "authenticated", "user" ]
python
train
39
apache/spark
python/pyspark/mllib/linalg/distributed.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1051-L1071
def blocks(self): """ The RDD of sub-matrix blocks ((blockRowIndex, blockColIndex), sub-matrix) that form this distributed matrix. >>> mat = BlockMatrix( ... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2) >>> blocks = mat.blocks >>> blocks.first() ((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0)) """ # We use DataFrames for serialization of sub-matrix blocks # from Java, so we first convert the RDD of blocks to a # DataFrame on the Scala/Java side. Then we map each Row in # the DataFrame back to a sub-matrix block on this side. blocks_df = callMLlibFunc("getMatrixBlocks", self._java_matrix_wrapper._java_model) blocks = blocks_df.rdd.map(lambda row: ((row[0][0], row[0][1]), row[1])) return blocks
[ "def", "blocks", "(", "self", ")", ":", "# We use DataFrames for serialization of sub-matrix blocks", "# from Java, so we first convert the RDD of blocks to a", "# DataFrame on the Scala/Java side. Then we map each Row in", "# the DataFrame back to a sub-matrix block on this side.", "blocks_df", "=", "callMLlibFunc", "(", "\"getMatrixBlocks\"", ",", "self", ".", "_java_matrix_wrapper", ".", "_java_model", ")", "blocks", "=", "blocks_df", ".", "rdd", ".", "map", "(", "lambda", "row", ":", "(", "(", "row", "[", "0", "]", "[", "0", "]", ",", "row", "[", "0", "]", "[", "1", "]", ")", ",", "row", "[", "1", "]", ")", ")", "return", "blocks" ]
The RDD of sub-matrix blocks ((blockRowIndex, blockColIndex), sub-matrix) that form this distributed matrix. >>> mat = BlockMatrix( ... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2) >>> blocks = mat.blocks >>> blocks.first() ((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0))
[ "The", "RDD", "of", "sub", "-", "matrix", "blocks", "((", "blockRowIndex", "blockColIndex", ")", "sub", "-", "matrix", ")", "that", "form", "this", "distributed", "matrix", "." ]
python
train
45.47619
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/s3.py#L197-L235
def remove(self, path, recursive=True): """ Remove a file or directory from S3. :param path: File or directory to remove :param recursive: Boolean indicator to remove object and children :return: Boolean indicator denoting success of the removal of 1 or more files """ if not self.exists(path): logger.debug('Could not delete %s; path does not exist', path) return False (bucket, key) = self._path_to_bucket_and_key(path) s3_bucket = self.s3.Bucket(bucket) # root if self._is_root(key): raise InvalidDeleteException('Cannot delete root of bucket at path %s' % path) # file if self._exists(bucket, key): self.s3.meta.client.delete_object(Bucket=bucket, Key=key) logger.debug('Deleting %s from bucket %s', key, bucket) return True if self.isdir(path) and not recursive: raise InvalidDeleteException('Path %s is a directory. Must use recursive delete' % path) delete_key_list = [{'Key': obj.key} for obj in s3_bucket.objects.filter(Prefix=self._add_path_delimiter(key))] # delete the directory marker file if it exists if self._exists(bucket, '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)): delete_key_list.append({'Key': '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)}) if len(delete_key_list) > 0: n = 1000 for i in range(0, len(delete_key_list), n): self.s3.meta.client.delete_objects(Bucket=bucket, Delete={'Objects': delete_key_list[i: i + n]}) return True return False
[ "def", "remove", "(", "self", ",", "path", ",", "recursive", "=", "True", ")", ":", "if", "not", "self", ".", "exists", "(", "path", ")", ":", "logger", ".", "debug", "(", "'Could not delete %s; path does not exist'", ",", "path", ")", "return", "False", "(", "bucket", ",", "key", ")", "=", "self", ".", "_path_to_bucket_and_key", "(", "path", ")", "s3_bucket", "=", "self", ".", "s3", ".", "Bucket", "(", "bucket", ")", "# root", "if", "self", ".", "_is_root", "(", "key", ")", ":", "raise", "InvalidDeleteException", "(", "'Cannot delete root of bucket at path %s'", "%", "path", ")", "# file", "if", "self", ".", "_exists", "(", "bucket", ",", "key", ")", ":", "self", ".", "s3", ".", "meta", ".", "client", ".", "delete_object", "(", "Bucket", "=", "bucket", ",", "Key", "=", "key", ")", "logger", ".", "debug", "(", "'Deleting %s from bucket %s'", ",", "key", ",", "bucket", ")", "return", "True", "if", "self", ".", "isdir", "(", "path", ")", "and", "not", "recursive", ":", "raise", "InvalidDeleteException", "(", "'Path %s is a directory. Must use recursive delete'", "%", "path", ")", "delete_key_list", "=", "[", "{", "'Key'", ":", "obj", ".", "key", "}", "for", "obj", "in", "s3_bucket", ".", "objects", ".", "filter", "(", "Prefix", "=", "self", ".", "_add_path_delimiter", "(", "key", ")", ")", "]", "# delete the directory marker file if it exists", "if", "self", ".", "_exists", "(", "bucket", ",", "'{}{}'", ".", "format", "(", "key", ",", "S3_DIRECTORY_MARKER_SUFFIX_0", ")", ")", ":", "delete_key_list", ".", "append", "(", "{", "'Key'", ":", "'{}{}'", ".", "format", "(", "key", ",", "S3_DIRECTORY_MARKER_SUFFIX_0", ")", "}", ")", "if", "len", "(", "delete_key_list", ")", ">", "0", ":", "n", "=", "1000", "for", "i", "in", "range", "(", "0", ",", "len", "(", "delete_key_list", ")", ",", "n", ")", ":", "self", ".", "s3", ".", "meta", ".", "client", ".", "delete_objects", "(", "Bucket", "=", "bucket", ",", "Delete", "=", "{", "'Objects'", ":", "delete_key_list", "[", "i", ":", "i", "+", "n", "]", "}", ")", "return", "True", "return", "False" ]
Remove a file or directory from S3. :param path: File or directory to remove :param recursive: Boolean indicator to remove object and children :return: Boolean indicator denoting success of the removal of 1 or more files
[ "Remove", "a", "file", "or", "directory", "from", "S3", ".", ":", "param", "path", ":", "File", "or", "directory", "to", "remove", ":", "param", "recursive", ":", "Boolean", "indicator", "to", "remove", "object", "and", "children", ":", "return", ":", "Boolean", "indicator", "denoting", "success", "of", "the", "removal", "of", "1", "or", "more", "files" ]
python
train
42.179487
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L487-L521
def update_dataset(self, dataset, fields, retry=DEFAULT_RETRY): """Change some fields of a dataset. Use ``fields`` to specify which fields to update. At least one field must be provided. If a field is listed in ``fields`` and is ``None`` in ``dataset``, it will be deleted. If ``dataset.etag`` is not ``None``, the update will only succeed if the dataset on the server has the same ETag. Thus reading a dataset with ``get_dataset``, changing its fields, and then passing it to ``update_dataset`` will ensure that the changes will only be saved if no modifications to the dataset occurred since the read. Args: dataset (google.cloud.bigquery.dataset.Dataset): The dataset to update. fields (Sequence[str]): The properties of ``dataset`` to change (e.g. "friendly_name"). retry (google.api_core.retry.Retry, optional): How to retry the RPC. Returns: google.cloud.bigquery.dataset.Dataset: The modified ``Dataset`` instance. """ partial = dataset._build_resource(fields) if dataset.etag is not None: headers = {"If-Match": dataset.etag} else: headers = None api_response = self._call_api( retry, method="PATCH", path=dataset.path, data=partial, headers=headers ) return Dataset.from_api_repr(api_response)
[ "def", "update_dataset", "(", "self", ",", "dataset", ",", "fields", ",", "retry", "=", "DEFAULT_RETRY", ")", ":", "partial", "=", "dataset", ".", "_build_resource", "(", "fields", ")", "if", "dataset", ".", "etag", "is", "not", "None", ":", "headers", "=", "{", "\"If-Match\"", ":", "dataset", ".", "etag", "}", "else", ":", "headers", "=", "None", "api_response", "=", "self", ".", "_call_api", "(", "retry", ",", "method", "=", "\"PATCH\"", ",", "path", "=", "dataset", ".", "path", ",", "data", "=", "partial", ",", "headers", "=", "headers", ")", "return", "Dataset", ".", "from_api_repr", "(", "api_response", ")" ]
Change some fields of a dataset. Use ``fields`` to specify which fields to update. At least one field must be provided. If a field is listed in ``fields`` and is ``None`` in ``dataset``, it will be deleted. If ``dataset.etag`` is not ``None``, the update will only succeed if the dataset on the server has the same ETag. Thus reading a dataset with ``get_dataset``, changing its fields, and then passing it to ``update_dataset`` will ensure that the changes will only be saved if no modifications to the dataset occurred since the read. Args: dataset (google.cloud.bigquery.dataset.Dataset): The dataset to update. fields (Sequence[str]): The properties of ``dataset`` to change (e.g. "friendly_name"). retry (google.api_core.retry.Retry, optional): How to retry the RPC. Returns: google.cloud.bigquery.dataset.Dataset: The modified ``Dataset`` instance.
[ "Change", "some", "fields", "of", "a", "dataset", "." ]
python
train
42.057143
gwastro/pycbc
pycbc/tmpltbank/option_utils.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/option_utils.py#L1184-L1206
def check_ethinca_against_bank_params(ethincaParams, metricParams): """ Cross-check the ethinca and bank layout metric calculation parameters and set the ethinca metric PN order equal to the bank PN order if not previously set. Parameters ---------- ethincaParams: instance of ethincaParameters metricParams: instance of metricParameters """ if ethincaParams.doEthinca: if metricParams.f0 != metricParams.fLow: raise ValueError("If calculating ethinca metric, f0 and f-low " "must be equal!") if ethincaParams.fLow is not None and ( ethincaParams.fLow != metricParams.fLow): raise ValueError("Ethinca metric calculation does not currently " "support a f-low value different from the bank " "metric!") if ethincaParams.pnOrder is None: ethincaParams.pnOrder = metricParams.pnOrder else: pass
[ "def", "check_ethinca_against_bank_params", "(", "ethincaParams", ",", "metricParams", ")", ":", "if", "ethincaParams", ".", "doEthinca", ":", "if", "metricParams", ".", "f0", "!=", "metricParams", ".", "fLow", ":", "raise", "ValueError", "(", "\"If calculating ethinca metric, f0 and f-low \"", "\"must be equal!\"", ")", "if", "ethincaParams", ".", "fLow", "is", "not", "None", "and", "(", "ethincaParams", ".", "fLow", "!=", "metricParams", ".", "fLow", ")", ":", "raise", "ValueError", "(", "\"Ethinca metric calculation does not currently \"", "\"support a f-low value different from the bank \"", "\"metric!\"", ")", "if", "ethincaParams", ".", "pnOrder", "is", "None", ":", "ethincaParams", ".", "pnOrder", "=", "metricParams", ".", "pnOrder", "else", ":", "pass" ]
Cross-check the ethinca and bank layout metric calculation parameters and set the ethinca metric PN order equal to the bank PN order if not previously set. Parameters ---------- ethincaParams: instance of ethincaParameters metricParams: instance of metricParameters
[ "Cross", "-", "check", "the", "ethinca", "and", "bank", "layout", "metric", "calculation", "parameters", "and", "set", "the", "ethinca", "metric", "PN", "order", "equal", "to", "the", "bank", "PN", "order", "if", "not", "previously", "set", "." ]
python
train
42.434783
hyperledger/indy-plenum
plenum/common/message_processor.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/message_processor.py#L18-L30
def discard(self, msg, reason, logMethod=logging.error, cliOutput=False): """ Discard a message and log a reason using the specified `logMethod`. :param msg: the message to discard :param reason: the reason why this message is being discarded :param logMethod: the logging function to be used :param cliOutput: if truthy, informs a CLI that the logged msg should be printed """ reason = "" if not reason else " because {}".format(reason) logMethod("{} discarding message {}{}".format(self, msg, reason), extra={"cli": cliOutput})
[ "def", "discard", "(", "self", ",", "msg", ",", "reason", ",", "logMethod", "=", "logging", ".", "error", ",", "cliOutput", "=", "False", ")", ":", "reason", "=", "\"\"", "if", "not", "reason", "else", "\" because {}\"", ".", "format", "(", "reason", ")", "logMethod", "(", "\"{} discarding message {}{}\"", ".", "format", "(", "self", ",", "msg", ",", "reason", ")", ",", "extra", "=", "{", "\"cli\"", ":", "cliOutput", "}", ")" ]
Discard a message and log a reason using the specified `logMethod`. :param msg: the message to discard :param reason: the reason why this message is being discarded :param logMethod: the logging function to be used :param cliOutput: if truthy, informs a CLI that the logged msg should be printed
[ "Discard", "a", "message", "and", "log", "a", "reason", "using", "the", "specified", "logMethod", "." ]
python
train
47.384615
materialsproject/pymatgen
pymatgen/io/vasp/inputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/inputs.py#L1119-L1138
def automatic_density_by_vol(structure, kppvol, force_gamma=False): """ Returns an automatic Kpoint object based on a structure and a kpoint density per inverse Angstrom^3 of reciprocal cell. Algorithm: Same as automatic_density() Args: structure (Structure): Input structure kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell force_gamma (bool): Force a gamma centered mesh Returns: Kpoints """ vol = structure.lattice.reciprocal_lattice.volume kppa = kppvol * vol * structure.num_sites return Kpoints.automatic_density(structure, kppa, force_gamma=force_gamma)
[ "def", "automatic_density_by_vol", "(", "structure", ",", "kppvol", ",", "force_gamma", "=", "False", ")", ":", "vol", "=", "structure", ".", "lattice", ".", "reciprocal_lattice", ".", "volume", "kppa", "=", "kppvol", "*", "vol", "*", "structure", ".", "num_sites", "return", "Kpoints", ".", "automatic_density", "(", "structure", ",", "kppa", ",", "force_gamma", "=", "force_gamma", ")" ]
Returns an automatic Kpoint object based on a structure and a kpoint density per inverse Angstrom^3 of reciprocal cell. Algorithm: Same as automatic_density() Args: structure (Structure): Input structure kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell force_gamma (bool): Force a gamma centered mesh Returns: Kpoints
[ "Returns", "an", "automatic", "Kpoint", "object", "based", "on", "a", "structure", "and", "a", "kpoint", "density", "per", "inverse", "Angstrom^3", "of", "reciprocal", "cell", "." ]
python
train
37
dagster-io/dagster
python_modules/dagster/dagster/core/execution.py
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L141-L160
def result_for_solid(self, name): '''Get a :py:class:`SolidExecutionResult` for a given solid name. ''' check.str_param(name, 'name') if not self.pipeline.has_solid(name): raise DagsterInvariantViolationError( 'Try to get result for solid {name} in {pipeline}. No such solid.'.format( name=name, pipeline=self.pipeline.display_name ) ) if name not in self.solid_result_dict: raise DagsterInvariantViolationError( 'Did not find result for solid {name} in pipeline execution result'.format( name=name ) ) return self.solid_result_dict[name]
[ "def", "result_for_solid", "(", "self", ",", "name", ")", ":", "check", ".", "str_param", "(", "name", ",", "'name'", ")", "if", "not", "self", ".", "pipeline", ".", "has_solid", "(", "name", ")", ":", "raise", "DagsterInvariantViolationError", "(", "'Try to get result for solid {name} in {pipeline}. No such solid.'", ".", "format", "(", "name", "=", "name", ",", "pipeline", "=", "self", ".", "pipeline", ".", "display_name", ")", ")", "if", "name", "not", "in", "self", ".", "solid_result_dict", ":", "raise", "DagsterInvariantViolationError", "(", "'Did not find result for solid {name} in pipeline execution result'", ".", "format", "(", "name", "=", "name", ")", ")", "return", "self", ".", "solid_result_dict", "[", "name", "]" ]
Get a :py:class:`SolidExecutionResult` for a given solid name.
[ "Get", "a", ":", "py", ":", "class", ":", "SolidExecutionResult", "for", "a", "given", "solid", "name", "." ]
python
test
36.1
saltstack/salt
salt/modules/zypperpkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zypperpkg.py#L214-L228
def _is_error(self): ''' Is this is an error code? :return: ''' if self.exit_code: msg = self.SUCCESS_EXIT_CODES.get(self.exit_code) if msg: log.info(msg) msg = self.WARNING_EXIT_CODES.get(self.exit_code) if msg: log.warning(msg) return self.exit_code not in self.SUCCESS_EXIT_CODES and self.exit_code not in self.WARNING_EXIT_CODES
[ "def", "_is_error", "(", "self", ")", ":", "if", "self", ".", "exit_code", ":", "msg", "=", "self", ".", "SUCCESS_EXIT_CODES", ".", "get", "(", "self", ".", "exit_code", ")", "if", "msg", ":", "log", ".", "info", "(", "msg", ")", "msg", "=", "self", ".", "WARNING_EXIT_CODES", ".", "get", "(", "self", ".", "exit_code", ")", "if", "msg", ":", "log", ".", "warning", "(", "msg", ")", "return", "self", ".", "exit_code", "not", "in", "self", ".", "SUCCESS_EXIT_CODES", "and", "self", ".", "exit_code", "not", "in", "self", ".", "WARNING_EXIT_CODES" ]
Is this is an error code? :return:
[ "Is", "this", "is", "an", "error", "code?" ]
python
train
29.866667
asweigart/pyautogui
pyautogui/__init__.py
https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/__init__.py#L1125-L1149
def displayMousePosition(xOffset=0, yOffset=0): """This function is meant to be run from the command line. It will automatically display the location and RGB of the mouse cursor.""" print('Press Ctrl-C to quit.') if xOffset != 0 or yOffset != 0: print('xOffset: %s yOffset: %s' % (xOffset, yOffset)) resolution = size() try: while True: # Get and print the mouse coordinates. x, y = position() positionStr = 'X: ' + str(x - xOffset).rjust(4) + ' Y: ' + str(y - yOffset).rjust(4) if (x - xOffset) < 0 or (y - yOffset) < 0 or (x - xOffset) >= resolution[0] or (y - yOffset) >= resolution[1]: pixelColor = ('NaN', 'NaN', 'NaN') else: pixelColor = pyscreeze.screenshot().getpixel((x, y)) positionStr += ' RGB: (' + str(pixelColor[0]).rjust(3) positionStr += ', ' + str(pixelColor[1]).rjust(3) positionStr += ', ' + str(pixelColor[2]).rjust(3) + ')' sys.stdout.write(positionStr) sys.stdout.write('\b' * len(positionStr)) sys.stdout.flush() except KeyboardInterrupt: sys.stdout.write('\n') sys.stdout.flush()
[ "def", "displayMousePosition", "(", "xOffset", "=", "0", ",", "yOffset", "=", "0", ")", ":", "print", "(", "'Press Ctrl-C to quit.'", ")", "if", "xOffset", "!=", "0", "or", "yOffset", "!=", "0", ":", "print", "(", "'xOffset: %s yOffset: %s'", "%", "(", "xOffset", ",", "yOffset", ")", ")", "resolution", "=", "size", "(", ")", "try", ":", "while", "True", ":", "# Get and print the mouse coordinates.", "x", ",", "y", "=", "position", "(", ")", "positionStr", "=", "'X: '", "+", "str", "(", "x", "-", "xOffset", ")", ".", "rjust", "(", "4", ")", "+", "' Y: '", "+", "str", "(", "y", "-", "yOffset", ")", ".", "rjust", "(", "4", ")", "if", "(", "x", "-", "xOffset", ")", "<", "0", "or", "(", "y", "-", "yOffset", ")", "<", "0", "or", "(", "x", "-", "xOffset", ")", ">=", "resolution", "[", "0", "]", "or", "(", "y", "-", "yOffset", ")", ">=", "resolution", "[", "1", "]", ":", "pixelColor", "=", "(", "'NaN'", ",", "'NaN'", ",", "'NaN'", ")", "else", ":", "pixelColor", "=", "pyscreeze", ".", "screenshot", "(", ")", ".", "getpixel", "(", "(", "x", ",", "y", ")", ")", "positionStr", "+=", "' RGB: ('", "+", "str", "(", "pixelColor", "[", "0", "]", ")", ".", "rjust", "(", "3", ")", "positionStr", "+=", "', '", "+", "str", "(", "pixelColor", "[", "1", "]", ")", ".", "rjust", "(", "3", ")", "positionStr", "+=", "', '", "+", "str", "(", "pixelColor", "[", "2", "]", ")", ".", "rjust", "(", "3", ")", "+", "')'", "sys", ".", "stdout", ".", "write", "(", "positionStr", ")", "sys", ".", "stdout", ".", "write", "(", "'\\b'", "*", "len", "(", "positionStr", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "except", "KeyboardInterrupt", ":", "sys", ".", "stdout", ".", "write", "(", "'\\n'", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
This function is meant to be run from the command line. It will automatically display the location and RGB of the mouse cursor.
[ "This", "function", "is", "meant", "to", "be", "run", "from", "the", "command", "line", ".", "It", "will", "automatically", "display", "the", "location", "and", "RGB", "of", "the", "mouse", "cursor", "." ]
python
train
48.12
RudolfCardinal/pythonlib
cardinal_pythonlib/wsgi/reverse_proxied_mw.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/wsgi/reverse_proxied_mw.py#L241-L255
def necessary(self) -> bool: """ Is any special handling (e.g. the addition of :class:`ReverseProxiedMiddleware`) necessary for thie config? """ return any([ self.trusted_proxy_headers, self.http_host, self.remote_addr, self.script_name, self.server_name, self.server_port, self.url_scheme, self.rewrite_path_info, ])
[ "def", "necessary", "(", "self", ")", "->", "bool", ":", "return", "any", "(", "[", "self", ".", "trusted_proxy_headers", ",", "self", ".", "http_host", ",", "self", ".", "remote_addr", ",", "self", ".", "script_name", ",", "self", ".", "server_name", ",", "self", ".", "server_port", ",", "self", ".", "url_scheme", ",", "self", ".", "rewrite_path_info", ",", "]", ")" ]
Is any special handling (e.g. the addition of :class:`ReverseProxiedMiddleware`) necessary for thie config?
[ "Is", "any", "special", "handling", "(", "e", ".", "g", ".", "the", "addition", "of", ":", "class", ":", "ReverseProxiedMiddleware", ")", "necessary", "for", "thie", "config?" ]
python
train
29.8
rocky/python3-trepan
trepan/bwprocessor/main.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/bwprocessor/main.py#L410-L445
def _populate_commands(self): """ Create an instance of each of the debugger commands. Commands are found by importing files in the directory 'command'. Some files are excluded via an array set in __init__. For each of the remaining files, we import them and scan for class names inside those files and for each class name, we will create an instance of that class. The set of DebuggerCommand class instances form set of possible debugger commands.""" cmd_instances = [] from trepan.bwprocessor import command as Mcommand eval_cmd_template = 'command_mod.%s(self)' for mod_name in Mcommand.__modules__: import_name = "command." + mod_name try: command_mod = getattr(__import__(import_name), mod_name) except: print('Error importing %s: %s' % (mod_name, sys.exc_info()[0])) continue classnames = [ tup[0] for tup in inspect.getmembers(command_mod, inspect.isclass) if ('DebuggerCommand' != tup[0] and tup[0].endswith('Command')) ] for classname in classnames: eval_cmd = eval_cmd_template % classname try: instance = eval(eval_cmd) cmd_instances.append(instance) except: print('Error loading %s from %s: %s' % (classname, mod_name, sys.exc_info()[0])) pass pass pass return cmd_instances
[ "def", "_populate_commands", "(", "self", ")", ":", "cmd_instances", "=", "[", "]", "from", "trepan", ".", "bwprocessor", "import", "command", "as", "Mcommand", "eval_cmd_template", "=", "'command_mod.%s(self)'", "for", "mod_name", "in", "Mcommand", ".", "__modules__", ":", "import_name", "=", "\"command.\"", "+", "mod_name", "try", ":", "command_mod", "=", "getattr", "(", "__import__", "(", "import_name", ")", ",", "mod_name", ")", "except", ":", "print", "(", "'Error importing %s: %s'", "%", "(", "mod_name", ",", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ")", ")", "continue", "classnames", "=", "[", "tup", "[", "0", "]", "for", "tup", "in", "inspect", ".", "getmembers", "(", "command_mod", ",", "inspect", ".", "isclass", ")", "if", "(", "'DebuggerCommand'", "!=", "tup", "[", "0", "]", "and", "tup", "[", "0", "]", ".", "endswith", "(", "'Command'", ")", ")", "]", "for", "classname", "in", "classnames", ":", "eval_cmd", "=", "eval_cmd_template", "%", "classname", "try", ":", "instance", "=", "eval", "(", "eval_cmd", ")", "cmd_instances", ".", "append", "(", "instance", ")", "except", ":", "print", "(", "'Error loading %s from %s: %s'", "%", "(", "classname", ",", "mod_name", ",", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ")", ")", "pass", "pass", "pass", "return", "cmd_instances" ]
Create an instance of each of the debugger commands. Commands are found by importing files in the directory 'command'. Some files are excluded via an array set in __init__. For each of the remaining files, we import them and scan for class names inside those files and for each class name, we will create an instance of that class. The set of DebuggerCommand class instances form set of possible debugger commands.
[ "Create", "an", "instance", "of", "each", "of", "the", "debugger", "commands", ".", "Commands", "are", "found", "by", "importing", "files", "in", "the", "directory", "command", ".", "Some", "files", "are", "excluded", "via", "an", "array", "set", "in", "__init__", ".", "For", "each", "of", "the", "remaining", "files", "we", "import", "them", "and", "scan", "for", "class", "names", "inside", "those", "files", "and", "for", "each", "class", "name", "we", "will", "create", "an", "instance", "of", "that", "class", ".", "The", "set", "of", "DebuggerCommand", "class", "instances", "form", "set", "of", "possible", "debugger", "commands", "." ]
python
test
45.333333
Miserlou/Zappa
zappa/cli.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/cli.py#L1225-L1258
def unschedule(self): """ Given a a list of scheduled functions, tear down their regular execution. """ # Run even if events are not defined to remove previously existing ones (thus default to []). events = self.stage_config.get('events', []) if not isinstance(events, list): # pragma: no cover print("Events must be supplied as a list.") return function_arn = None try: function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name) function_arn = function_response['Configuration']['FunctionArn'] except botocore.exceptions.ClientError as e: # pragma: no cover raise ClickException("Function does not exist, you should deploy first. Ex: zappa deploy {}. " "Proceeding to unschedule CloudWatch based events.".format(self.api_stage)) print("Unscheduling..") self.zappa.unschedule_events( lambda_name=self.lambda_name, lambda_arn=function_arn, events=events, ) # Remove async task SNS if self.stage_config.get('async_source', None) == 'sns' \ and self.stage_config.get('async_resources', True): removed_arns = self.zappa.remove_async_sns_topic(self.lambda_name) click.echo('SNS Topic removed: %s' % ', '.join(removed_arns))
[ "def", "unschedule", "(", "self", ")", ":", "# Run even if events are not defined to remove previously existing ones (thus default to []).", "events", "=", "self", ".", "stage_config", ".", "get", "(", "'events'", ",", "[", "]", ")", "if", "not", "isinstance", "(", "events", ",", "list", ")", ":", "# pragma: no cover", "print", "(", "\"Events must be supplied as a list.\"", ")", "return", "function_arn", "=", "None", "try", ":", "function_response", "=", "self", ".", "zappa", ".", "lambda_client", ".", "get_function", "(", "FunctionName", "=", "self", ".", "lambda_name", ")", "function_arn", "=", "function_response", "[", "'Configuration'", "]", "[", "'FunctionArn'", "]", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "e", ":", "# pragma: no cover", "raise", "ClickException", "(", "\"Function does not exist, you should deploy first. Ex: zappa deploy {}. \"", "\"Proceeding to unschedule CloudWatch based events.\"", ".", "format", "(", "self", ".", "api_stage", ")", ")", "print", "(", "\"Unscheduling..\"", ")", "self", ".", "zappa", ".", "unschedule_events", "(", "lambda_name", "=", "self", ".", "lambda_name", ",", "lambda_arn", "=", "function_arn", ",", "events", "=", "events", ",", ")", "# Remove async task SNS", "if", "self", ".", "stage_config", ".", "get", "(", "'async_source'", ",", "None", ")", "==", "'sns'", "and", "self", ".", "stage_config", ".", "get", "(", "'async_resources'", ",", "True", ")", ":", "removed_arns", "=", "self", ".", "zappa", ".", "remove_async_sns_topic", "(", "self", ".", "lambda_name", ")", "click", ".", "echo", "(", "'SNS Topic removed: %s'", "%", "', '", ".", "join", "(", "removed_arns", ")", ")" ]
Given a a list of scheduled functions, tear down their regular execution.
[ "Given", "a", "a", "list", "of", "scheduled", "functions", "tear", "down", "their", "regular", "execution", "." ]
python
train
41
theolind/pymysensors
mysensors/ota.py
https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/ota.py#L130-L157
def respond_fw_config(self, msg): """Respond to a firmware config request.""" (req_fw_type, req_fw_ver, req_blocks, req_crc, bloader_ver) = fw_hex_to_int(msg.payload, 5) _LOGGER.debug( 'Received firmware config request with firmware type %s, ' 'firmware version %s, %s blocks, CRC %s, bootloader %s', req_fw_type, req_fw_ver, req_blocks, req_crc, bloader_ver) fw_type, fw_ver, fware = self._get_fw( msg, (self.requested, self.unstarted)) if fware is None: return None if fw_type != req_fw_type: _LOGGER.warning( 'Firmware type %s of update is not identical to existing ' 'firmware type %s for node %s', fw_type, req_fw_type, msg.node_id) _LOGGER.info( 'Updating node %s to firmware type %s version %s from type %s ' 'version %s', msg.node_id, fw_type, fw_ver, req_fw_type, req_fw_ver) msg = msg.copy(sub_type=self._const.Stream.ST_FIRMWARE_CONFIG_RESPONSE) msg.payload = fw_int_to_hex( fw_type, fw_ver, fware['blocks'], fware['crc']) return msg
[ "def", "respond_fw_config", "(", "self", ",", "msg", ")", ":", "(", "req_fw_type", ",", "req_fw_ver", ",", "req_blocks", ",", "req_crc", ",", "bloader_ver", ")", "=", "fw_hex_to_int", "(", "msg", ".", "payload", ",", "5", ")", "_LOGGER", ".", "debug", "(", "'Received firmware config request with firmware type %s, '", "'firmware version %s, %s blocks, CRC %s, bootloader %s'", ",", "req_fw_type", ",", "req_fw_ver", ",", "req_blocks", ",", "req_crc", ",", "bloader_ver", ")", "fw_type", ",", "fw_ver", ",", "fware", "=", "self", ".", "_get_fw", "(", "msg", ",", "(", "self", ".", "requested", ",", "self", ".", "unstarted", ")", ")", "if", "fware", "is", "None", ":", "return", "None", "if", "fw_type", "!=", "req_fw_type", ":", "_LOGGER", ".", "warning", "(", "'Firmware type %s of update is not identical to existing '", "'firmware type %s for node %s'", ",", "fw_type", ",", "req_fw_type", ",", "msg", ".", "node_id", ")", "_LOGGER", ".", "info", "(", "'Updating node %s to firmware type %s version %s from type %s '", "'version %s'", ",", "msg", ".", "node_id", ",", "fw_type", ",", "fw_ver", ",", "req_fw_type", ",", "req_fw_ver", ")", "msg", "=", "msg", ".", "copy", "(", "sub_type", "=", "self", ".", "_const", ".", "Stream", ".", "ST_FIRMWARE_CONFIG_RESPONSE", ")", "msg", ".", "payload", "=", "fw_int_to_hex", "(", "fw_type", ",", "fw_ver", ",", "fware", "[", "'blocks'", "]", ",", "fware", "[", "'crc'", "]", ")", "return", "msg" ]
Respond to a firmware config request.
[ "Respond", "to", "a", "firmware", "config", "request", "." ]
python
train
42.892857
SuperCowPowers/workbench
workbench/server/bro/bro_log_reader.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/server/bro/bro_log_reader.py#L20-L48
def read_log(self, logfile): """The read_log method returns a memory efficient generator for rows in a Bro log. Usage: rows = my_bro_reader.read_log(logfile) for row in rows: do something with row Args: logfile: The Bro Log file. """ # Make sure we're at the beginning logfile.seek(0) # First parse the header of the bro log field_names, _ = self._parse_bro_header(logfile) # Note: SO stupid to write a csv reader, but csv.DictReader on Bro # files was doing something weird with generator output that # affected zeroRPC and gave 'could not route _zpc_more' error. # So wrote my own, put a sleep at the end, seems to fix it. while 1: _line = next(logfile).strip() if not _line.startswith('#close'): yield self._cast_dict(dict(zip(field_names, _line.split(self.delimiter)))) else: time.sleep(.1) # Give time for zeroRPC to finish messages break
[ "def", "read_log", "(", "self", ",", "logfile", ")", ":", "# Make sure we're at the beginning", "logfile", ".", "seek", "(", "0", ")", "# First parse the header of the bro log", "field_names", ",", "_", "=", "self", ".", "_parse_bro_header", "(", "logfile", ")", "# Note: SO stupid to write a csv reader, but csv.DictReader on Bro", "# files was doing something weird with generator output that", "# affected zeroRPC and gave 'could not route _zpc_more' error.", "# So wrote my own, put a sleep at the end, seems to fix it.", "while", "1", ":", "_line", "=", "next", "(", "logfile", ")", ".", "strip", "(", ")", "if", "not", "_line", ".", "startswith", "(", "'#close'", ")", ":", "yield", "self", ".", "_cast_dict", "(", "dict", "(", "zip", "(", "field_names", ",", "_line", ".", "split", "(", "self", ".", "delimiter", ")", ")", ")", ")", "else", ":", "time", ".", "sleep", "(", ".1", ")", "# Give time for zeroRPC to finish messages", "break" ]
The read_log method returns a memory efficient generator for rows in a Bro log. Usage: rows = my_bro_reader.read_log(logfile) for row in rows: do something with row Args: logfile: The Bro Log file.
[ "The", "read_log", "method", "returns", "a", "memory", "efficient", "generator", "for", "rows", "in", "a", "Bro", "log", "." ]
python
train
37.206897
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/treemodel.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/treemodel.py#L374-L389
def set_parent(self, parent): """Set the parent of the treeitem :param parent: parent treeitem :type parent: :class:`TreeItem` | None :returns: None :rtype: None :raises: None """ if self._parent == parent: return if self._parent: self._parent.remove_child(self) self._parent = parent if parent: parent.add_child(self)
[ "def", "set_parent", "(", "self", ",", "parent", ")", ":", "if", "self", ".", "_parent", "==", "parent", ":", "return", "if", "self", ".", "_parent", ":", "self", ".", "_parent", ".", "remove_child", "(", "self", ")", "self", ".", "_parent", "=", "parent", "if", "parent", ":", "parent", ".", "add_child", "(", "self", ")" ]
Set the parent of the treeitem :param parent: parent treeitem :type parent: :class:`TreeItem` | None :returns: None :rtype: None :raises: None
[ "Set", "the", "parent", "of", "the", "treeitem" ]
python
train
26.75
fastai/fastai
fastai/tabular/transform.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/tabular/transform.py#L43-L53
def add_cyclic_datepart(df:DataFrame, field_name:str, prefix:str=None, drop:bool=True, time:bool=False, add_linear:bool=False): "Helper function that adds trigonometric date/time features to a date in the column `field_name` of `df`." make_date(df, field_name) field = df[field_name] prefix = ifnone(prefix, re.sub('[Dd]ate$', '', field_name)) series = field.apply(partial(cyclic_dt_features, time=time, add_linear=add_linear)) columns = [prefix + c for c in cyclic_dt_feat_names(time, add_linear)] df_feats = pd.DataFrame([item for item in series], columns=columns, index=series.index) df = pd.concat([df, df_feats], axis=1) if drop: df.drop(field_name, axis=1, inplace=True) return df
[ "def", "add_cyclic_datepart", "(", "df", ":", "DataFrame", ",", "field_name", ":", "str", ",", "prefix", ":", "str", "=", "None", ",", "drop", ":", "bool", "=", "True", ",", "time", ":", "bool", "=", "False", ",", "add_linear", ":", "bool", "=", "False", ")", ":", "make_date", "(", "df", ",", "field_name", ")", "field", "=", "df", "[", "field_name", "]", "prefix", "=", "ifnone", "(", "prefix", ",", "re", ".", "sub", "(", "'[Dd]ate$'", ",", "''", ",", "field_name", ")", ")", "series", "=", "field", ".", "apply", "(", "partial", "(", "cyclic_dt_features", ",", "time", "=", "time", ",", "add_linear", "=", "add_linear", ")", ")", "columns", "=", "[", "prefix", "+", "c", "for", "c", "in", "cyclic_dt_feat_names", "(", "time", ",", "add_linear", ")", "]", "df_feats", "=", "pd", ".", "DataFrame", "(", "[", "item", "for", "item", "in", "series", "]", ",", "columns", "=", "columns", ",", "index", "=", "series", ".", "index", ")", "df", "=", "pd", ".", "concat", "(", "[", "df", ",", "df_feats", "]", ",", "axis", "=", "1", ")", "if", "drop", ":", "df", ".", "drop", "(", "field_name", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "return", "df" ]
Helper function that adds trigonometric date/time features to a date in the column `field_name` of `df`.
[ "Helper", "function", "that", "adds", "trigonometric", "date", "/", "time", "features", "to", "a", "date", "in", "the", "column", "field_name", "of", "df", "." ]
python
train
65.090909
instacart/lore
lore/estimators/naive.py
https://github.com/instacart/lore/blob/0367bde9a52e69162832906acc61e8d65c5ec5d4/lore/estimators/naive.py#L29-L36
def fit(self, x, y, **kwargs): """ Fit a naive model :param x: Predictors to use for fitting the data (this will not be used in naive models) :param y: Outcome """ self.mean = numpy.mean(y) return {}
[ "def", "fit", "(", "self", ",", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "self", ".", "mean", "=", "numpy", ".", "mean", "(", "y", ")", "return", "{", "}" ]
Fit a naive model :param x: Predictors to use for fitting the data (this will not be used in naive models) :param y: Outcome
[ "Fit", "a", "naive", "model", ":", "param", "x", ":", "Predictors", "to", "use", "for", "fitting", "the", "data", "(", "this", "will", "not", "be", "used", "in", "naive", "models", ")", ":", "param", "y", ":", "Outcome" ]
python
train
31
bioidiap/bob.ip.facedetect
bob/ip/facedetect/train/TrainingSet.py
https://github.com/bioidiap/bob.ip.facedetect/blob/601da5141ca7302ad36424d1421b33190ba46779/bob/ip/facedetect/train/TrainingSet.py#L441-L457
def feature_extractor(self): """feature_extractor() -> extractor Returns the feature extractor used to extract the positive and negative features. This feature extractor is stored to file during the :py:meth:`extract` method ran, so this function reads that file (from the ``feature_directory`` set in the constructor) and returns its content. **Returns:** ``extractor`` : :py:class:`FeatureExtractor` The feature extractor used to extract the features stored in the ``feature_directory`` """ extractor_file = os.path.join(self.feature_directory, "Extractor.hdf5") if not os.path.exists(extractor_file): raise IOError("Could not found extractor file %s. Did you already run the extraction process? Did you specify the correct `feature_directory` in the constructor?" % extractor_file) hdf5 = bob.io.base.HDF5File(extractor_file) return FeatureExtractor(hdf5)
[ "def", "feature_extractor", "(", "self", ")", ":", "extractor_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "feature_directory", ",", "\"Extractor.hdf5\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "extractor_file", ")", ":", "raise", "IOError", "(", "\"Could not found extractor file %s. Did you already run the extraction process? Did you specify the correct `feature_directory` in the constructor?\"", "%", "extractor_file", ")", "hdf5", "=", "bob", ".", "io", ".", "base", ".", "HDF5File", "(", "extractor_file", ")", "return", "FeatureExtractor", "(", "hdf5", ")" ]
feature_extractor() -> extractor Returns the feature extractor used to extract the positive and negative features. This feature extractor is stored to file during the :py:meth:`extract` method ran, so this function reads that file (from the ``feature_directory`` set in the constructor) and returns its content. **Returns:** ``extractor`` : :py:class:`FeatureExtractor` The feature extractor used to extract the features stored in the ``feature_directory``
[ "feature_extractor", "()", "-", ">", "extractor" ]
python
train
52.823529
theosysbio/means
src/means/approximation/mea/raw_to_central.py
https://github.com/theosysbio/means/blob/fe164916a1d84ab2a4fa039871d38ccdf638b1db/src/means/approximation/mea/raw_to_central.py#L12-L65
def raw_to_central(n_counter, species, k_counter): """ Expresses central moments in terms of raw moments (and other central moments). Based on equation 8 in the paper: .. math:: \mathbf{M_{x^n}} = \sum_{k_1=0}^{n_1} ... \sum_{k_d=0}^{n_d} \mathbf{{n \choose k}} (-1)^{\mathbf{n-k}} \mu^{\mathbf{n-k}} \langle \mathbf{x^k} \\rangle The term :math:`\mu^{\mathbf{n-k}}`, so called alpha term is expressed with respect to `species` values that are equivalent to :math:`\mu_i` in the paper. The last term, the beta term, :math:`\langle \mathbf{x^n} \\rangle` is simply obtained from k_counter as it contains the symbols for raw moments. :param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments :type n_counter: list[:class:`~means.core.descriptors.Moment`] :param species: the symbols for species means :param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments :type k_counter: list[:class:`~means.core.descriptors.Moment`] :return: a vector of central moments expressed in terms of raw moment """ # create empty output central_in_terms_of_raw = [] # This loop loops through the ::math::`[n_1, ..., n_d]` vectors of the sums in the beginning of the equation # i.e. :math:`\sum_{k1=0}^n_1 ... \sum_{kd=0}^n_d` part of the equation. # Note, this is not the sum over k's in that equation, or at least I think its not for n_iter in n_counter: #loop through all n1,...,nd combinations # nothing to do for 0th order central moment if n_iter.order == 0: continue # n_vec is the vector ::math::`[n_1, ... n_d]` in equation 8 n_vec = n_iter.n_vector # k_lower contains the elements of `k_counter` that are lower than or equal to the current n_vec # This generates the list of possible k values to satisfy ns in the equation. # `k_vec` iterators bellow are the vector ::math::`[k_1, ..., k_d]` k_lower = [k for k in k_counter if n_iter >= k] # (n k) binomial term in equation 9 n_choose_k_vec = [make_k_chose_e(k_vec.n_vector, n_vec) for k_vec in k_lower] # (-1)^(n-k) term in equation 9 minus_one_pow_n_min_k_vec = [_make_min_one_pow_n_minus_k(n_vec, k_vec.n_vector) for k_vec in k_lower ] # alpha term in equation 9 alpha_vec = [_make_alpha(n_vec, k_vec.n_vector, species) for k_vec in k_lower] # beta term in equation 9 beta_vec = [k_vec.symbol for k_vec in k_lower] # let us multiply all terms product = [(n * m * a * b) for (n, m, a, b) in zip(n_choose_k_vec, minus_one_pow_n_min_k_vec, alpha_vec, beta_vec)] # and store the product central_in_terms_of_raw.append(sum(product)) return sp.Matrix(central_in_terms_of_raw)
[ "def", "raw_to_central", "(", "n_counter", ",", "species", ",", "k_counter", ")", ":", "# create empty output", "central_in_terms_of_raw", "=", "[", "]", "# This loop loops through the ::math::`[n_1, ..., n_d]` vectors of the sums in the beginning of the equation", "# i.e. :math:`\\sum_{k1=0}^n_1 ... \\sum_{kd=0}^n_d` part of the equation.", "# Note, this is not the sum over k's in that equation, or at least I think its not", "for", "n_iter", "in", "n_counter", ":", "#loop through all n1,...,nd combinations", "# nothing to do for 0th order central moment", "if", "n_iter", ".", "order", "==", "0", ":", "continue", "# n_vec is the vector ::math::`[n_1, ... n_d]` in equation 8", "n_vec", "=", "n_iter", ".", "n_vector", "# k_lower contains the elements of `k_counter` that are lower than or equal to the current n_vec", "# This generates the list of possible k values to satisfy ns in the equation.", "# `k_vec` iterators bellow are the vector ::math::`[k_1, ..., k_d]`", "k_lower", "=", "[", "k", "for", "k", "in", "k_counter", "if", "n_iter", ">=", "k", "]", "# (n k) binomial term in equation 9", "n_choose_k_vec", "=", "[", "make_k_chose_e", "(", "k_vec", ".", "n_vector", ",", "n_vec", ")", "for", "k_vec", "in", "k_lower", "]", "# (-1)^(n-k) term in equation 9", "minus_one_pow_n_min_k_vec", "=", "[", "_make_min_one_pow_n_minus_k", "(", "n_vec", ",", "k_vec", ".", "n_vector", ")", "for", "k_vec", "in", "k_lower", "]", "# alpha term in equation 9", "alpha_vec", "=", "[", "_make_alpha", "(", "n_vec", ",", "k_vec", ".", "n_vector", ",", "species", ")", "for", "k_vec", "in", "k_lower", "]", "# beta term in equation 9", "beta_vec", "=", "[", "k_vec", ".", "symbol", "for", "k_vec", "in", "k_lower", "]", "# let us multiply all terms", "product", "=", "[", "(", "n", "*", "m", "*", "a", "*", "b", ")", "for", "(", "n", ",", "m", ",", "a", ",", "b", ")", "in", "zip", "(", "n_choose_k_vec", ",", "minus_one_pow_n_min_k_vec", ",", "alpha_vec", ",", "beta_vec", ")", "]", "# and store the product", "central_in_terms_of_raw", ".", "append", "(", "sum", "(", "product", ")", ")", "return", "sp", ".", "Matrix", "(", "central_in_terms_of_raw", ")" ]
Expresses central moments in terms of raw moments (and other central moments). Based on equation 8 in the paper: .. math:: \mathbf{M_{x^n}} = \sum_{k_1=0}^{n_1} ... \sum_{k_d=0}^{n_d} \mathbf{{n \choose k}} (-1)^{\mathbf{n-k}} \mu^{\mathbf{n-k}} \langle \mathbf{x^k} \\rangle The term :math:`\mu^{\mathbf{n-k}}`, so called alpha term is expressed with respect to `species` values that are equivalent to :math:`\mu_i` in the paper. The last term, the beta term, :math:`\langle \mathbf{x^n} \\rangle` is simply obtained from k_counter as it contains the symbols for raw moments. :param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments :type n_counter: list[:class:`~means.core.descriptors.Moment`] :param species: the symbols for species means :param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments :type k_counter: list[:class:`~means.core.descriptors.Moment`] :return: a vector of central moments expressed in terms of raw moment
[ "Expresses", "central", "moments", "in", "terms", "of", "raw", "moments", "(", "and", "other", "central", "moments", ")", ".", "Based", "on", "equation", "8", "in", "the", "paper", ":" ]
python
train
52.12963
EconForge/dolo
trash/dolo/misc/symbolic_interactive.py
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/trash/dolo/misc/symbolic_interactive.py#L227-L255
def def_variables(s): """ blabla """ frame = inspect.currentframe().f_back try: if isinstance(s,str): s = re.split('\s|,', s) res = [] for t in s: # skip empty stringG if not t: continue if t.count("@") > 0: sym = IndexedSymbol(t,Variable) t = t.strip('@') else: sym = Variable(t) frame.f_globals[t] = sym res.append(sym) if frame.f_globals.get('variables_order'): # we should avoid to declare symbols twice ! frame.f_globals['variables_order'].extend(res) else: frame.f_globals['variables_order'] = res return res finally: del frame
[ "def", "def_variables", "(", "s", ")", ":", "frame", "=", "inspect", ".", "currentframe", "(", ")", ".", "f_back", "try", ":", "if", "isinstance", "(", "s", ",", "str", ")", ":", "s", "=", "re", ".", "split", "(", "'\\s|,'", ",", "s", ")", "res", "=", "[", "]", "for", "t", "in", "s", ":", "# skip empty stringG", "if", "not", "t", ":", "continue", "if", "t", ".", "count", "(", "\"@\"", ")", ">", "0", ":", "sym", "=", "IndexedSymbol", "(", "t", ",", "Variable", ")", "t", "=", "t", ".", "strip", "(", "'@'", ")", "else", ":", "sym", "=", "Variable", "(", "t", ")", "frame", ".", "f_globals", "[", "t", "]", "=", "sym", "res", ".", "append", "(", "sym", ")", "if", "frame", ".", "f_globals", ".", "get", "(", "'variables_order'", ")", ":", "# we should avoid to declare symbols twice !", "frame", ".", "f_globals", "[", "'variables_order'", "]", ".", "extend", "(", "res", ")", "else", ":", "frame", ".", "f_globals", "[", "'variables_order'", "]", "=", "res", "return", "res", "finally", ":", "del", "frame" ]
blabla
[ "blabla" ]
python
train
26.551724
rjdkmr/do_x3dna
dnaMD/dnaMD/dnaEY.py
https://github.com/rjdkmr/do_x3dna/blob/fe910335eefcada76737f9e7cd6f25036cd32ab6/dnaMD/dnaMD/dnaEY.py#L853-L952
def _calcEnergyBendStretchTwist(self, diff, es, which): r"""Calculate energy for ``esType='BST'`` using a difference vector. It is called in :meth:`dnaEY.getGlobalDeformationEnergy` for energy calculation of each frame. Parameters ---------- diff : numpy.ndarray Array of difference between minimum and current parameter values. .. math:: \mathbf{x} = \begin{bmatrix} (\theta^{x}_{i} - \theta^{x}_0) & (\theta^{y}_{i} - \theta^{y}_0) & (L_i - L_0) & (\phi_i - \phi_0) \end{bmatrix} es : numpy.ndarray Elastic matrix. See in :meth:`dnaEY.getStretchTwistBendModulus` about elastic matrix. which : str For which type of motions, energy will be calculated. see ``which`` parameter in :meth:`dnaEY.getGlobalDeformationEnergy` for keywords. Return ------ energy : float Deformation free energy value """ if which not in self.enGlobalTypes: raise ValueError('{0} is not a supported energy keywords.\n Use any of the following: \n {1}'.format( which, self.enGlobalTypes)) energy = None if which == 'full': temp = np.matrix(diff) energy = 0.5 * ((temp * es) * temp.T) energy = energy[0,0] if which == 'diag': energy = 0.5 * ((diff[0] ** 2 * es[0][0]) + (diff[1] ** 2 * es[1][1]) + (diff[2] ** 2 * es[2][2]) + (diff[3] ** 2 * es[3][3])) if which == 'bend': energy = 0.5 * ((diff[0] ** 2 * es[0][0]) + (diff[1] ** 2 * es[1][1]) + (diff[0] * diff[1] * es[0][1])) if which == 'b1': energy = 0.5 * (diff[0] ** 2 * es[0][0]) if which == 'b2': energy = 0.5 * (diff[1] ** 2 * es[1][1]) if which == 'stretch': energy = 0.5 * (diff[2] ** 2 * es[2][2]) if which == 'twist': energy = 0.5 * (diff[3] ** 2 * es[3][3]) if which == 'st_coupling': energy = 0.5 * (diff[2] * diff[3] * es[2][3]) if which == 'bs_coupling': energy = 0.5 * ((diff[0] * diff[2] * es[0][2]) + (diff[1] * diff[2] * es[1][2])) if which == 'bt_coupling': energy = 0.5 * ((diff[0] * diff[3] * es[0][3]) + (diff[1] * diff[3] * es[1][3])) if which == 'bb_coupling': energy = 0.5 * (diff[0] * diff[1] * es[0][1]) if which == 'st': energy = 0.5 * ((diff[0] ** 2 * es[0][0]) + (diff[1] ** 2 * es[1][1]) + (diff[2] ** 2 * es[2][2]) + (diff[3] ** 2 * es[3][3]) + (diff[2] * diff[3] * es[2][3])) if which == 'bs': energy = 0.5 * ((diff[0] ** 2 * es[0][0]) + (diff[1] ** 2 * es[1][1]) + (diff[2] ** 2 * es[2][2]) + (diff[0] * diff[2] * es[0][2]) + (diff[1] * diff[2] * es[1][2])) if which == 'bt': energy = 0.5 * ((diff[0] ** 2 * es[0][0]) + (diff[1] ** 2 * es[1][1]) + (diff[3] ** 2 * es[3][3]) + (diff[0] * diff[3] * es[0][3]) + (diff[1] * diff[3] * es[1][3])) return energy
[ "def", "_calcEnergyBendStretchTwist", "(", "self", ",", "diff", ",", "es", ",", "which", ")", ":", "if", "which", "not", "in", "self", ".", "enGlobalTypes", ":", "raise", "ValueError", "(", "'{0} is not a supported energy keywords.\\n Use any of the following: \\n {1}'", ".", "format", "(", "which", ",", "self", ".", "enGlobalTypes", ")", ")", "energy", "=", "None", "if", "which", "==", "'full'", ":", "temp", "=", "np", ".", "matrix", "(", "diff", ")", "energy", "=", "0.5", "*", "(", "(", "temp", "*", "es", ")", "*", "temp", ".", "T", ")", "energy", "=", "energy", "[", "0", ",", "0", "]", "if", "which", "==", "'diag'", ":", "energy", "=", "0.5", "*", "(", "(", "diff", "[", "0", "]", "**", "2", "*", "es", "[", "0", "]", "[", "0", "]", ")", "+", "(", "diff", "[", "1", "]", "**", "2", "*", "es", "[", "1", "]", "[", "1", "]", ")", "+", "(", "diff", "[", "2", "]", "**", "2", "*", "es", "[", "2", "]", "[", "2", "]", ")", "+", "(", "diff", "[", "3", "]", "**", "2", "*", "es", "[", "3", "]", "[", "3", "]", ")", ")", "if", "which", "==", "'bend'", ":", "energy", "=", "0.5", "*", "(", "(", "diff", "[", "0", "]", "**", "2", "*", "es", "[", "0", "]", "[", "0", "]", ")", "+", "(", "diff", "[", "1", "]", "**", "2", "*", "es", "[", "1", "]", "[", "1", "]", ")", "+", "(", "diff", "[", "0", "]", "*", "diff", "[", "1", "]", "*", "es", "[", "0", "]", "[", "1", "]", ")", ")", "if", "which", "==", "'b1'", ":", "energy", "=", "0.5", "*", "(", "diff", "[", "0", "]", "**", "2", "*", "es", "[", "0", "]", "[", "0", "]", ")", "if", "which", "==", "'b2'", ":", "energy", "=", "0.5", "*", "(", "diff", "[", "1", "]", "**", "2", "*", "es", "[", "1", "]", "[", "1", "]", ")", "if", "which", "==", "'stretch'", ":", "energy", "=", "0.5", "*", "(", "diff", "[", "2", "]", "**", "2", "*", "es", "[", "2", "]", "[", "2", "]", ")", "if", "which", "==", "'twist'", ":", "energy", "=", "0.5", "*", "(", "diff", "[", "3", "]", "**", "2", "*", "es", "[", "3", "]", "[", "3", "]", ")", "if", "which", "==", "'st_coupling'", ":", "energy", "=", "0.5", "*", "(", "diff", "[", "2", "]", "*", "diff", "[", "3", "]", "*", "es", "[", "2", "]", "[", "3", "]", ")", "if", "which", "==", "'bs_coupling'", ":", "energy", "=", "0.5", "*", "(", "(", "diff", "[", "0", "]", "*", "diff", "[", "2", "]", "*", "es", "[", "0", "]", "[", "2", "]", ")", "+", "(", "diff", "[", "1", "]", "*", "diff", "[", "2", "]", "*", "es", "[", "1", "]", "[", "2", "]", ")", ")", "if", "which", "==", "'bt_coupling'", ":", "energy", "=", "0.5", "*", "(", "(", "diff", "[", "0", "]", "*", "diff", "[", "3", "]", "*", "es", "[", "0", "]", "[", "3", "]", ")", "+", "(", "diff", "[", "1", "]", "*", "diff", "[", "3", "]", "*", "es", "[", "1", "]", "[", "3", "]", ")", ")", "if", "which", "==", "'bb_coupling'", ":", "energy", "=", "0.5", "*", "(", "diff", "[", "0", "]", "*", "diff", "[", "1", "]", "*", "es", "[", "0", "]", "[", "1", "]", ")", "if", "which", "==", "'st'", ":", "energy", "=", "0.5", "*", "(", "(", "diff", "[", "0", "]", "**", "2", "*", "es", "[", "0", "]", "[", "0", "]", ")", "+", "(", "diff", "[", "1", "]", "**", "2", "*", "es", "[", "1", "]", "[", "1", "]", ")", "+", "(", "diff", "[", "2", "]", "**", "2", "*", "es", "[", "2", "]", "[", "2", "]", ")", "+", "(", "diff", "[", "3", "]", "**", "2", "*", "es", "[", "3", "]", "[", "3", "]", ")", "+", "(", "diff", "[", "2", "]", "*", "diff", "[", "3", "]", "*", "es", "[", "2", "]", "[", "3", "]", ")", ")", "if", "which", "==", "'bs'", ":", "energy", "=", "0.5", "*", "(", "(", "diff", "[", "0", "]", "**", "2", "*", "es", "[", "0", "]", "[", "0", "]", ")", "+", "(", "diff", "[", "1", "]", "**", "2", "*", "es", "[", "1", "]", "[", "1", "]", ")", "+", "(", "diff", "[", "2", "]", "**", "2", "*", "es", "[", "2", "]", "[", "2", "]", ")", "+", "(", "diff", "[", "0", "]", "*", "diff", "[", "2", "]", "*", "es", "[", "0", "]", "[", "2", "]", ")", "+", "(", "diff", "[", "1", "]", "*", "diff", "[", "2", "]", "*", "es", "[", "1", "]", "[", "2", "]", ")", ")", "if", "which", "==", "'bt'", ":", "energy", "=", "0.5", "*", "(", "(", "diff", "[", "0", "]", "**", "2", "*", "es", "[", "0", "]", "[", "0", "]", ")", "+", "(", "diff", "[", "1", "]", "**", "2", "*", "es", "[", "1", "]", "[", "1", "]", ")", "+", "(", "diff", "[", "3", "]", "**", "2", "*", "es", "[", "3", "]", "[", "3", "]", ")", "+", "(", "diff", "[", "0", "]", "*", "diff", "[", "3", "]", "*", "es", "[", "0", "]", "[", "3", "]", ")", "+", "(", "diff", "[", "1", "]", "*", "diff", "[", "3", "]", "*", "es", "[", "1", "]", "[", "3", "]", ")", ")", "return", "energy" ]
r"""Calculate energy for ``esType='BST'`` using a difference vector. It is called in :meth:`dnaEY.getGlobalDeformationEnergy` for energy calculation of each frame. Parameters ---------- diff : numpy.ndarray Array of difference between minimum and current parameter values. .. math:: \mathbf{x} = \begin{bmatrix} (\theta^{x}_{i} - \theta^{x}_0) & (\theta^{y}_{i} - \theta^{y}_0) & (L_i - L_0) & (\phi_i - \phi_0) \end{bmatrix} es : numpy.ndarray Elastic matrix. See in :meth:`dnaEY.getStretchTwistBendModulus` about elastic matrix. which : str For which type of motions, energy will be calculated. see ``which`` parameter in :meth:`dnaEY.getGlobalDeformationEnergy` for keywords. Return ------ energy : float Deformation free energy value
[ "r", "Calculate", "energy", "for", "esType", "=", "BST", "using", "a", "difference", "vector", "." ]
python
train
35.54
tompollard/tableone
tableone.py
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L675-L700
def _create_cat_table(self,data): """ Create table one for categorical data. Returns ---------- table : pandas DataFrame A table summarising the categorical variables. """ table = self.cat_describe['t1_summary'].copy() # add the total count of null values across all levels isnull = data[self._categorical].isnull().sum().to_frame(name='isnull') isnull.index.rename('variable', inplace=True) try: table = table.join(isnull) except TypeError: # if columns form a CategoricalIndex, need to convert to string first table.columns = table.columns.astype(str) table = table.join(isnull) # add pval column if self._pval and self._pval_adjust: table = table.join(self._significance_table[['pval (adjusted)','ptest']]) elif self._pval: table = table.join(self._significance_table[['pval','ptest']]) return table
[ "def", "_create_cat_table", "(", "self", ",", "data", ")", ":", "table", "=", "self", ".", "cat_describe", "[", "'t1_summary'", "]", ".", "copy", "(", ")", "# add the total count of null values across all levels", "isnull", "=", "data", "[", "self", ".", "_categorical", "]", ".", "isnull", "(", ")", ".", "sum", "(", ")", ".", "to_frame", "(", "name", "=", "'isnull'", ")", "isnull", ".", "index", ".", "rename", "(", "'variable'", ",", "inplace", "=", "True", ")", "try", ":", "table", "=", "table", ".", "join", "(", "isnull", ")", "except", "TypeError", ":", "# if columns form a CategoricalIndex, need to convert to string first", "table", ".", "columns", "=", "table", ".", "columns", ".", "astype", "(", "str", ")", "table", "=", "table", ".", "join", "(", "isnull", ")", "# add pval column", "if", "self", ".", "_pval", "and", "self", ".", "_pval_adjust", ":", "table", "=", "table", ".", "join", "(", "self", ".", "_significance_table", "[", "[", "'pval (adjusted)'", ",", "'ptest'", "]", "]", ")", "elif", "self", ".", "_pval", ":", "table", "=", "table", ".", "join", "(", "self", ".", "_significance_table", "[", "[", "'pval'", ",", "'ptest'", "]", "]", ")", "return", "table" ]
Create table one for categorical data. Returns ---------- table : pandas DataFrame A table summarising the categorical variables.
[ "Create", "table", "one", "for", "categorical", "data", "." ]
python
train
37.692308
BlueBrain/NeuroM
neurom/check/runner.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/runner.py#L53-L71
def run(self, path): '''Test a bunch of files and return a summary JSON report''' SEPARATOR = '=' * 40 summary = {} res = True for _f in utils.get_files_by_path(path): L.info(SEPARATOR) status, summ = self._check_file(_f) res &= status if summ is not None: summary.update(summ) L.info(SEPARATOR) status = 'PASS' if res else 'FAIL' return {'files': summary, 'STATUS': status}
[ "def", "run", "(", "self", ",", "path", ")", ":", "SEPARATOR", "=", "'='", "*", "40", "summary", "=", "{", "}", "res", "=", "True", "for", "_f", "in", "utils", ".", "get_files_by_path", "(", "path", ")", ":", "L", ".", "info", "(", "SEPARATOR", ")", "status", ",", "summ", "=", "self", ".", "_check_file", "(", "_f", ")", "res", "&=", "status", "if", "summ", "is", "not", "None", ":", "summary", ".", "update", "(", "summ", ")", "L", ".", "info", "(", "SEPARATOR", ")", "status", "=", "'PASS'", "if", "res", "else", "'FAIL'", "return", "{", "'files'", ":", "summary", ",", "'STATUS'", ":", "status", "}" ]
Test a bunch of files and return a summary JSON report
[ "Test", "a", "bunch", "of", "files", "and", "return", "a", "summary", "JSON", "report" ]
python
train
25.736842
wmayner/pyphi
pyphi/partition.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L387-L439
def mip_bipartitions(mechanism, purview, node_labels=None): r"""Return an generator of all |small_phi| bipartitions of a mechanism over a purview. Excludes all bipartitions where one half is entirely empty, *e.g*:: A ∅ ─── ✕ ─── B ∅ is not valid, but :: A ∅ ─── ✕ ─── ∅ B is. Args: mechanism (tuple[int]): The mechanism to partition purview (tuple[int]): The purview to partition Yields: Bipartition: Where each bipartition is:: bipart[0].mechanism bipart[1].mechanism ─────────────────── ✕ ─────────────────── bipart[0].purview bipart[1].purview Example: >>> mechanism = (0,) >>> purview = (2, 3) >>> for partition in mip_bipartitions(mechanism, purview): ... print(partition, '\n') # doctest: +NORMALIZE_WHITESPACE ∅ 0 ─── ✕ ─── 2 3 <BLANKLINE> ∅ 0 ─── ✕ ─── 3 2 <BLANKLINE> ∅ 0 ─── ✕ ─── 2,3 ∅ """ numerators = bipartition(mechanism) denominators = directed_bipartition(purview) for n, d in product(numerators, denominators): if (n[0] or d[0]) and (n[1] or d[1]): yield Bipartition(Part(n[0], d[0]), Part(n[1], d[1]), node_labels=node_labels)
[ "def", "mip_bipartitions", "(", "mechanism", ",", "purview", ",", "node_labels", "=", "None", ")", ":", "numerators", "=", "bipartition", "(", "mechanism", ")", "denominators", "=", "directed_bipartition", "(", "purview", ")", "for", "n", ",", "d", "in", "product", "(", "numerators", ",", "denominators", ")", ":", "if", "(", "n", "[", "0", "]", "or", "d", "[", "0", "]", ")", "and", "(", "n", "[", "1", "]", "or", "d", "[", "1", "]", ")", ":", "yield", "Bipartition", "(", "Part", "(", "n", "[", "0", "]", ",", "d", "[", "0", "]", ")", ",", "Part", "(", "n", "[", "1", "]", ",", "d", "[", "1", "]", ")", ",", "node_labels", "=", "node_labels", ")" ]
r"""Return an generator of all |small_phi| bipartitions of a mechanism over a purview. Excludes all bipartitions where one half is entirely empty, *e.g*:: A ∅ ─── ✕ ─── B ∅ is not valid, but :: A ∅ ─── ✕ ─── ∅ B is. Args: mechanism (tuple[int]): The mechanism to partition purview (tuple[int]): The purview to partition Yields: Bipartition: Where each bipartition is:: bipart[0].mechanism bipart[1].mechanism ─────────────────── ✕ ─────────────────── bipart[0].purview bipart[1].purview Example: >>> mechanism = (0,) >>> purview = (2, 3) >>> for partition in mip_bipartitions(mechanism, purview): ... print(partition, '\n') # doctest: +NORMALIZE_WHITESPACE ∅ 0 ─── ✕ ─── 2 3 <BLANKLINE> ∅ 0 ─── ✕ ─── 3 2 <BLANKLINE> ∅ 0 ─── ✕ ─── 2,3 ∅
[ "r", "Return", "an", "generator", "of", "all", "|small_phi|", "bipartitions", "of", "a", "mechanism", "over", "a", "purview", "." ]
python
train
26.188679
frejanordsiek/GeminiMotorDrive
GeminiMotorDrive/__init__.py
https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/__init__.py#L41-L85
def get_driver(driver='ASCII_RS232', *args, **keywords): """ Gets a driver for a Parker Motion Gemini drive. Gets and connects a particular driver in ``drivers`` to a Parker Motion Gemini GV-6 or GT-6 servo/stepper motor drive. The only driver currently supported is the ``'ASCII_RS232'`` driver which corresponds to ``drivers.ASCII_RS232``. Parameters ---------- driver : str, optional The driver to communicate to the particular driver with, which includes the hardware connection and possibly the communications protocol. The only driver currently supported is the ``'ASCII_RS232'`` driver which corresponds to ``drivers.ASCII_RS232``. *args : additional positional arguments Additional positional arguments to pass onto the constructor for the driver. **keywords : additional keyword arguments Additional keyword arguments to pass onto the constructor for the driver. Returns ------- drivers : drivers The connected drivers class that is connected to the drive. Raises ------ NotImplementedError If the `driver` is not supported. See Also -------- drivers drivers.ASCII_RS232 """ if driver.upper() == 'ASCII_RS232': return drivers.ASCII_RS232(*args, **keywords) else: raise NotImplementedError('Driver not supported: ' + str(driver))
[ "def", "get_driver", "(", "driver", "=", "'ASCII_RS232'", ",", "*", "args", ",", "*", "*", "keywords", ")", ":", "if", "driver", ".", "upper", "(", ")", "==", "'ASCII_RS232'", ":", "return", "drivers", ".", "ASCII_RS232", "(", "*", "args", ",", "*", "*", "keywords", ")", "else", ":", "raise", "NotImplementedError", "(", "'Driver not supported: '", "+", "str", "(", "driver", ")", ")" ]
Gets a driver for a Parker Motion Gemini drive. Gets and connects a particular driver in ``drivers`` to a Parker Motion Gemini GV-6 or GT-6 servo/stepper motor drive. The only driver currently supported is the ``'ASCII_RS232'`` driver which corresponds to ``drivers.ASCII_RS232``. Parameters ---------- driver : str, optional The driver to communicate to the particular driver with, which includes the hardware connection and possibly the communications protocol. The only driver currently supported is the ``'ASCII_RS232'`` driver which corresponds to ``drivers.ASCII_RS232``. *args : additional positional arguments Additional positional arguments to pass onto the constructor for the driver. **keywords : additional keyword arguments Additional keyword arguments to pass onto the constructor for the driver. Returns ------- drivers : drivers The connected drivers class that is connected to the drive. Raises ------ NotImplementedError If the `driver` is not supported. See Also -------- drivers drivers.ASCII_RS232
[ "Gets", "a", "driver", "for", "a", "Parker", "Motion", "Gemini", "drive", "." ]
python
train
31.866667
ppb/pursuedpybear
ppb/scenes.py
https://github.com/ppb/pursuedpybear/blob/db3bfaaf86d14b4d1bb9e0b24cc8dc63f29c2191/ppb/scenes.py#L171-L190
def get(self, *, kind: Type=None, tag: Hashable=None, **kwargs) -> Iterator: """ Get an iterator of GameObjects by kind or tag. kind: Any type. Pass to get a subset of contained GameObjects with the given type. tag: Any Hashable object. Pass to get a subset of contained GameObjects with the given tag. Pass both kind and tag to get objects that are both that type and that tag. Examples: scene.get(type=MyGameObject) scene.get(tag="red") scene.get(type=MyGameObject, tag="red") """ return self.game_objects.get(kind=kind, tag=tag, **kwargs)
[ "def", "get", "(", "self", ",", "*", ",", "kind", ":", "Type", "=", "None", ",", "tag", ":", "Hashable", "=", "None", ",", "*", "*", "kwargs", ")", "->", "Iterator", ":", "return", "self", ".", "game_objects", ".", "get", "(", "kind", "=", "kind", ",", "tag", "=", "tag", ",", "*", "*", "kwargs", ")" ]
Get an iterator of GameObjects by kind or tag. kind: Any type. Pass to get a subset of contained GameObjects with the given type. tag: Any Hashable object. Pass to get a subset of contained GameObjects with the given tag. Pass both kind and tag to get objects that are both that type and that tag. Examples: scene.get(type=MyGameObject) scene.get(tag="red") scene.get(type=MyGameObject, tag="red")
[ "Get", "an", "iterator", "of", "GameObjects", "by", "kind", "or", "tag", "." ]
python
train
33.1
MolSSI-BSE/basis_set_exchange
basis_set_exchange/sort.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/sort.py#L218-L238
def sort_references_dict(refs): """Sorts a reference dictionary into a standard order The keys of the references are also sorted, and the keys for the data for each reference are put in a more canonical order. """ if _use_odict: refs_sorted = OrderedDict() else: refs_sorted = dict() # We insert this first, That is ok - it will be overwritten # with the sorted version later refs_sorted['molssi_bse_schema'] = refs['molssi_bse_schema'] # This sorts the entries by reference key (author1985a, etc) for k, v in sorted(refs.items()): refs_sorted[k] = sort_single_reference(v) return refs_sorted
[ "def", "sort_references_dict", "(", "refs", ")", ":", "if", "_use_odict", ":", "refs_sorted", "=", "OrderedDict", "(", ")", "else", ":", "refs_sorted", "=", "dict", "(", ")", "# We insert this first, That is ok - it will be overwritten", "# with the sorted version later", "refs_sorted", "[", "'molssi_bse_schema'", "]", "=", "refs", "[", "'molssi_bse_schema'", "]", "# This sorts the entries by reference key (author1985a, etc)", "for", "k", ",", "v", "in", "sorted", "(", "refs", ".", "items", "(", ")", ")", ":", "refs_sorted", "[", "k", "]", "=", "sort_single_reference", "(", "v", ")", "return", "refs_sorted" ]
Sorts a reference dictionary into a standard order The keys of the references are also sorted, and the keys for the data for each reference are put in a more canonical order.
[ "Sorts", "a", "reference", "dictionary", "into", "a", "standard", "order" ]
python
train
30.904762
nedbat/django_coverage_plugin
django_coverage_plugin/plugin.py
https://github.com/nedbat/django_coverage_plugin/blob/0072737c0ea5a1ca6b9f046af4947de191f13804/django_coverage_plugin/plugin.py#L50-L89
def check_debug(): """Check that Django's template debugging is enabled. Django's built-in "template debugging" records information the plugin needs to do its work. Check that the setting is correct, and raise an exception if it is not. Returns True if the debug check was performed, False otherwise """ from django.conf import settings if not settings.configured: return False # I _think_ this check is all that's needed and the 3 "hasattr" checks # below can be removed, but it's not clear how to verify that from django.apps import apps if not apps.ready: return False # django.template.backends.django gets loaded lazily, so return false # until they've been loaded if not hasattr(django.template, "backends"): return False if not hasattr(django.template.backends, "django"): return False if not hasattr(django.template.backends.django, "DjangoTemplates"): raise DjangoTemplatePluginException("Can't use non-Django templates.") for engine in django.template.engines.all(): if not isinstance(engine, django.template.backends.django.DjangoTemplates): raise DjangoTemplatePluginException( "Can't use non-Django templates." ) if not engine.engine.debug: raise DjangoTemplatePluginException( "Template debugging must be enabled in settings." ) return True
[ "def", "check_debug", "(", ")", ":", "from", "django", ".", "conf", "import", "settings", "if", "not", "settings", ".", "configured", ":", "return", "False", "# I _think_ this check is all that's needed and the 3 \"hasattr\" checks", "# below can be removed, but it's not clear how to verify that", "from", "django", ".", "apps", "import", "apps", "if", "not", "apps", ".", "ready", ":", "return", "False", "# django.template.backends.django gets loaded lazily, so return false", "# until they've been loaded", "if", "not", "hasattr", "(", "django", ".", "template", ",", "\"backends\"", ")", ":", "return", "False", "if", "not", "hasattr", "(", "django", ".", "template", ".", "backends", ",", "\"django\"", ")", ":", "return", "False", "if", "not", "hasattr", "(", "django", ".", "template", ".", "backends", ".", "django", ",", "\"DjangoTemplates\"", ")", ":", "raise", "DjangoTemplatePluginException", "(", "\"Can't use non-Django templates.\"", ")", "for", "engine", "in", "django", ".", "template", ".", "engines", ".", "all", "(", ")", ":", "if", "not", "isinstance", "(", "engine", ",", "django", ".", "template", ".", "backends", ".", "django", ".", "DjangoTemplates", ")", ":", "raise", "DjangoTemplatePluginException", "(", "\"Can't use non-Django templates.\"", ")", "if", "not", "engine", ".", "engine", ".", "debug", ":", "raise", "DjangoTemplatePluginException", "(", "\"Template debugging must be enabled in settings.\"", ")", "return", "True" ]
Check that Django's template debugging is enabled. Django's built-in "template debugging" records information the plugin needs to do its work. Check that the setting is correct, and raise an exception if it is not. Returns True if the debug check was performed, False otherwise
[ "Check", "that", "Django", "s", "template", "debugging", "is", "enabled", "." ]
python
train
35.875
bcbio/bcbio-nextgen
bcbio/graph/graph.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/graph/graph.py#L140-L155
def prep_for_graph(data_frame, series=None, delta_series=None, smoothing=None, outlier_stddev=None): """Prepare a dataframe for graphing by calculating deltas for series that need them, resampling, and removing outliers. """ series = series or [] delta_series = delta_series or [] graph = calc_deltas(data_frame, delta_series) for s in series + delta_series: if smoothing: graph[s] = graph[s].resample(smoothing) if outlier_stddev: graph[s] = remove_outliers(graph[s], outlier_stddev) return graph[series + delta_series]
[ "def", "prep_for_graph", "(", "data_frame", ",", "series", "=", "None", ",", "delta_series", "=", "None", ",", "smoothing", "=", "None", ",", "outlier_stddev", "=", "None", ")", ":", "series", "=", "series", "or", "[", "]", "delta_series", "=", "delta_series", "or", "[", "]", "graph", "=", "calc_deltas", "(", "data_frame", ",", "delta_series", ")", "for", "s", "in", "series", "+", "delta_series", ":", "if", "smoothing", ":", "graph", "[", "s", "]", "=", "graph", "[", "s", "]", ".", "resample", "(", "smoothing", ")", "if", "outlier_stddev", ":", "graph", "[", "s", "]", "=", "remove_outliers", "(", "graph", "[", "s", "]", ",", "outlier_stddev", ")", "return", "graph", "[", "series", "+", "delta_series", "]" ]
Prepare a dataframe for graphing by calculating deltas for series that need them, resampling, and removing outliers.
[ "Prepare", "a", "dataframe", "for", "graphing", "by", "calculating", "deltas", "for", "series", "that", "need", "them", "resampling", "and", "removing", "outliers", "." ]
python
train
37.375
slickqa/python-client
slickqa/micromodels/fields.py
https://github.com/slickqa/python-client/blob/1d36b4977cd4140d7d24917cab2b3f82b60739c2/slickqa/micromodels/fields.py#L96-L105
def to_python(self): """The string ``'True'`` (case insensitive) will be converted to ``True``, as will any positive integers. """ if isinstance(self.data, str): return self.data.strip().lower() == 'true' if isinstance(self.data, int): return self.data > 0 return bool(self.data)
[ "def", "to_python", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "data", ",", "str", ")", ":", "return", "self", ".", "data", ".", "strip", "(", ")", ".", "lower", "(", ")", "==", "'true'", "if", "isinstance", "(", "self", ".", "data", ",", "int", ")", ":", "return", "self", ".", "data", ">", "0", "return", "bool", "(", "self", ".", "data", ")" ]
The string ``'True'`` (case insensitive) will be converted to ``True``, as will any positive integers.
[ "The", "string", "True", "(", "case", "insensitive", ")", "will", "be", "converted", "to", "True", "as", "will", "any", "positive", "integers", "." ]
python
train
34.3
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py#L460-L473
def ip_hide_ext_community_list_holder_extcommunity_list_ext_community_action(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") hide_ext_community_list_holder = ET.SubElement(ip, "hide-ext-community-list-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") extcommunity_list = ET.SubElement(hide_ext_community_list_holder, "extcommunity-list") extcommunity_list_num_key = ET.SubElement(extcommunity_list, "extcommunity-list-num") extcommunity_list_num_key.text = kwargs.pop('extcommunity_list_num') ext_community_action = ET.SubElement(extcommunity_list, "ext-community-action") ext_community_action.text = kwargs.pop('ext_community_action') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ip_hide_ext_community_list_holder_extcommunity_list_ext_community_action", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ip", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ip\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-common-def\"", ")", "hide_ext_community_list_holder", "=", "ET", ".", "SubElement", "(", "ip", ",", "\"hide-ext-community-list-holder\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ip-policy\"", ")", "extcommunity_list", "=", "ET", ".", "SubElement", "(", "hide_ext_community_list_holder", ",", "\"extcommunity-list\"", ")", "extcommunity_list_num_key", "=", "ET", ".", "SubElement", "(", "extcommunity_list", ",", "\"extcommunity-list-num\"", ")", "extcommunity_list_num_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'extcommunity_list_num'", ")", "ext_community_action", "=", "ET", ".", "SubElement", "(", "extcommunity_list", ",", "\"ext-community-action\"", ")", "ext_community_action", ".", "text", "=", "kwargs", ".", "pop", "(", "'ext_community_action'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
64.857143
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L12333-L12345
def resource_request_send(self, request_id, uri_type, uri, transfer_type, storage, force_mavlink1=False): ''' The autopilot is requesting a resource (file, binary, other type of data) request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t) uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t) uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t) transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t) storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t) ''' return self.send(self.resource_request_encode(request_id, uri_type, uri, transfer_type, storage), force_mavlink1=force_mavlink1)
[ "def", "resource_request_send", "(", "self", ",", "request_id", ",", "uri_type", ",", "uri", ",", "transfer_type", ",", "storage", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "resource_request_encode", "(", "request_id", ",", "uri_type", ",", "uri", ",", "transfer_type", ",", "storage", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
The autopilot is requesting a resource (file, binary, other type of data) request_id : Request ID. This ID should be re-used when sending back URI contents (uint8_t) uri_type : The type of requested URI. 0 = a file via URL. 1 = a UAVCAN binary (uint8_t) uri : The requested unique resource identifier (URI). It is not necessarily a straight domain name (depends on the URI type enum) (uint8_t) transfer_type : The way the autopilot wants to receive the URI. 0 = MAVLink FTP. 1 = binary stream. (uint8_t) storage : The storage path the autopilot wants the URI to be stored in. Will only be valid if the transfer_type has a storage associated (e.g. MAVLink FTP). (uint8_t)
[ "The", "autopilot", "is", "requesting", "a", "resource", "(", "file", "binary", "other", "type", "of", "data", ")" ]
python
train
88.230769
spyder-ide/conda-manager
conda_manager/api/manager_api.py
https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/manager_api.py#L239-L244
def update_repodata(self, channels=None): """Update repodata from channels or use condarc channels if None.""" norm_channels = self.conda_get_condarc_channels(channels=channels, normalize=True) repodata_urls = self._set_repo_urls_from_channels(norm_channels) self._check_repos(repodata_urls)
[ "def", "update_repodata", "(", "self", ",", "channels", "=", "None", ")", ":", "norm_channels", "=", "self", ".", "conda_get_condarc_channels", "(", "channels", "=", "channels", ",", "normalize", "=", "True", ")", "repodata_urls", "=", "self", ".", "_set_repo_urls_from_channels", "(", "norm_channels", ")", "self", ".", "_check_repos", "(", "repodata_urls", ")" ]
Update repodata from channels or use condarc channels if None.
[ "Update", "repodata", "from", "channels", "or", "use", "condarc", "channels", "if", "None", "." ]
python
train
62.333333
juicer/juicer
juicer/utils/__init__.py
https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/utils/__init__.py#L70-L83
def _user_config_file(): """ Check that the config file is present and readable. If not, copy a template in place. """ config_file = Constants.USER_CONFIG if os.path.exists(config_file) and os.access(config_file, os.R_OK): return config_file elif os.path.exists(config_file) and not os.access(config_file, os.R_OK): raise IOError("Can not read %s" % config_file) else: shutil.copy(Constants.EXAMPLE_USER_CONFIG, config_file) raise JuicerConfigError("Default config file created.\nCheck man 5 juicer.conf.")
[ "def", "_user_config_file", "(", ")", ":", "config_file", "=", "Constants", ".", "USER_CONFIG", "if", "os", ".", "path", ".", "exists", "(", "config_file", ")", "and", "os", ".", "access", "(", "config_file", ",", "os", ".", "R_OK", ")", ":", "return", "config_file", "elif", "os", ".", "path", ".", "exists", "(", "config_file", ")", "and", "not", "os", ".", "access", "(", "config_file", ",", "os", ".", "R_OK", ")", ":", "raise", "IOError", "(", "\"Can not read %s\"", "%", "config_file", ")", "else", ":", "shutil", ".", "copy", "(", "Constants", ".", "EXAMPLE_USER_CONFIG", ",", "config_file", ")", "raise", "JuicerConfigError", "(", "\"Default config file created.\\nCheck man 5 juicer.conf.\"", ")" ]
Check that the config file is present and readable. If not, copy a template in place.
[ "Check", "that", "the", "config", "file", "is", "present", "and", "readable", ".", "If", "not", "copy", "a", "template", "in", "place", "." ]
python
train
39.857143
BlueBrain/hpcbench
hpcbench/benchmark/ior.py
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/ior.py#L227-L241
def pre_execute(self, execution, context): """Make sure the named directory is created if possible""" path = self._fspath if path: path = path.format( benchmark=context.benchmark, api=execution['category'], **execution.get('metas', {}) ) if self.clean_path: shutil.rmtree(path, ignore_errors=True) if execution['metas']['file_mode'] == 'onefile': path = osp.dirname(path) if not osp.exists(path): os.makedirs(path)
[ "def", "pre_execute", "(", "self", ",", "execution", ",", "context", ")", ":", "path", "=", "self", ".", "_fspath", "if", "path", ":", "path", "=", "path", ".", "format", "(", "benchmark", "=", "context", ".", "benchmark", ",", "api", "=", "execution", "[", "'category'", "]", ",", "*", "*", "execution", ".", "get", "(", "'metas'", ",", "{", "}", ")", ")", "if", "self", ".", "clean_path", ":", "shutil", ".", "rmtree", "(", "path", ",", "ignore_errors", "=", "True", ")", "if", "execution", "[", "'metas'", "]", "[", "'file_mode'", "]", "==", "'onefile'", ":", "path", "=", "osp", ".", "dirname", "(", "path", ")", "if", "not", "osp", ".", "exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")" ]
Make sure the named directory is created if possible
[ "Make", "sure", "the", "named", "directory", "is", "created", "if", "possible" ]
python
train
38.666667
chrisrink10/basilisp
src/basilisp/lang/reader.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/reader.py#L1029-L1059
def read( stream, resolver: Resolver = None, data_readers: DataReaders = None, eof: Any = EOF, is_eof_error: bool = False, ) -> Iterable[ReaderForm]: """Read the contents of a stream as a Lisp expression. Callers may optionally specify a namespace resolver, which will be used to adjudicate the fully-qualified name of symbols appearing inside of a syntax quote. Callers may optionally specify a map of custom data readers that will be used to resolve values in reader macros. Data reader tags specified by callers must be namespaced symbols; non-namespaced symbols are reserved by the reader. Data reader functions must be functions taking one argument and returning a value. The caller is responsible for closing the input stream.""" reader = StreamReader(stream) ctx = ReaderContext(reader, resolver=resolver, data_readers=data_readers, eof=eof) while True: expr = _read_next(ctx) if expr is ctx.eof: if is_eof_error: raise EOFError return if expr is COMMENT or isinstance(expr, Comment): continue yield expr
[ "def", "read", "(", "stream", ",", "resolver", ":", "Resolver", "=", "None", ",", "data_readers", ":", "DataReaders", "=", "None", ",", "eof", ":", "Any", "=", "EOF", ",", "is_eof_error", ":", "bool", "=", "False", ",", ")", "->", "Iterable", "[", "ReaderForm", "]", ":", "reader", "=", "StreamReader", "(", "stream", ")", "ctx", "=", "ReaderContext", "(", "reader", ",", "resolver", "=", "resolver", ",", "data_readers", "=", "data_readers", ",", "eof", "=", "eof", ")", "while", "True", ":", "expr", "=", "_read_next", "(", "ctx", ")", "if", "expr", "is", "ctx", ".", "eof", ":", "if", "is_eof_error", ":", "raise", "EOFError", "return", "if", "expr", "is", "COMMENT", "or", "isinstance", "(", "expr", ",", "Comment", ")", ":", "continue", "yield", "expr" ]
Read the contents of a stream as a Lisp expression. Callers may optionally specify a namespace resolver, which will be used to adjudicate the fully-qualified name of symbols appearing inside of a syntax quote. Callers may optionally specify a map of custom data readers that will be used to resolve values in reader macros. Data reader tags specified by callers must be namespaced symbols; non-namespaced symbols are reserved by the reader. Data reader functions must be functions taking one argument and returning a value. The caller is responsible for closing the input stream.
[ "Read", "the", "contents", "of", "a", "stream", "as", "a", "Lisp", "expression", "." ]
python
test
36.774194
revelc/pyaccumulo
pyaccumulo/proxy/AccumuloProxy.py
https://github.com/revelc/pyaccumulo/blob/8adcf535bb82ba69c749efce785c9efc487e85de/pyaccumulo/proxy/AccumuloProxy.py#L1393-L1403
def importDirectory(self, login, tableName, importDir, failureDir, setTime): """ Parameters: - login - tableName - importDir - failureDir - setTime """ self.send_importDirectory(login, tableName, importDir, failureDir, setTime) self.recv_importDirectory()
[ "def", "importDirectory", "(", "self", ",", "login", ",", "tableName", ",", "importDir", ",", "failureDir", ",", "setTime", ")", ":", "self", ".", "send_importDirectory", "(", "login", ",", "tableName", ",", "importDir", ",", "failureDir", ",", "setTime", ")", "self", ".", "recv_importDirectory", "(", ")" ]
Parameters: - login - tableName - importDir - failureDir - setTime
[ "Parameters", ":", "-", "login", "-", "tableName", "-", "importDir", "-", "failureDir", "-", "setTime" ]
python
train
26.363636
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/apps/ipcontrollerapp.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/apps/ipcontrollerapp.py#L234-L266
def load_config_from_json(self): """load config from existing json connector files.""" c = self.config self.log.debug("loading config from JSON") # load from engine config fname = os.path.join(self.profile_dir.security_dir, self.engine_json_file) self.log.info("loading connection info from %s", fname) with open(fname) as f: cfg = json.loads(f.read()) key = cfg['exec_key'] # json gives unicode, Session.key wants bytes c.Session.key = key.encode('ascii') xport,addr = cfg['url'].split('://') c.HubFactory.engine_transport = xport ip,ports = addr.split(':') c.HubFactory.engine_ip = ip c.HubFactory.regport = int(ports) self.location = cfg['location'] if not self.engine_ssh_server: self.engine_ssh_server = cfg['ssh'] # load client config fname = os.path.join(self.profile_dir.security_dir, self.client_json_file) self.log.info("loading connection info from %s", fname) with open(fname) as f: cfg = json.loads(f.read()) assert key == cfg['exec_key'], "exec_key mismatch between engine and client keys" xport,addr = cfg['url'].split('://') c.HubFactory.client_transport = xport ip,ports = addr.split(':') c.HubFactory.client_ip = ip if not self.ssh_server: self.ssh_server = cfg['ssh'] assert int(ports) == c.HubFactory.regport, "regport mismatch"
[ "def", "load_config_from_json", "(", "self", ")", ":", "c", "=", "self", ".", "config", "self", ".", "log", ".", "debug", "(", "\"loading config from JSON\"", ")", "# load from engine config", "fname", "=", "os", ".", "path", ".", "join", "(", "self", ".", "profile_dir", ".", "security_dir", ",", "self", ".", "engine_json_file", ")", "self", ".", "log", ".", "info", "(", "\"loading connection info from %s\"", ",", "fname", ")", "with", "open", "(", "fname", ")", "as", "f", ":", "cfg", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", "key", "=", "cfg", "[", "'exec_key'", "]", "# json gives unicode, Session.key wants bytes", "c", ".", "Session", ".", "key", "=", "key", ".", "encode", "(", "'ascii'", ")", "xport", ",", "addr", "=", "cfg", "[", "'url'", "]", ".", "split", "(", "'://'", ")", "c", ".", "HubFactory", ".", "engine_transport", "=", "xport", "ip", ",", "ports", "=", "addr", ".", "split", "(", "':'", ")", "c", ".", "HubFactory", ".", "engine_ip", "=", "ip", "c", ".", "HubFactory", ".", "regport", "=", "int", "(", "ports", ")", "self", ".", "location", "=", "cfg", "[", "'location'", "]", "if", "not", "self", ".", "engine_ssh_server", ":", "self", ".", "engine_ssh_server", "=", "cfg", "[", "'ssh'", "]", "# load client config", "fname", "=", "os", ".", "path", ".", "join", "(", "self", ".", "profile_dir", ".", "security_dir", ",", "self", ".", "client_json_file", ")", "self", ".", "log", ".", "info", "(", "\"loading connection info from %s\"", ",", "fname", ")", "with", "open", "(", "fname", ")", "as", "f", ":", "cfg", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", "assert", "key", "==", "cfg", "[", "'exec_key'", "]", ",", "\"exec_key mismatch between engine and client keys\"", "xport", ",", "addr", "=", "cfg", "[", "'url'", "]", ".", "split", "(", "'://'", ")", "c", ".", "HubFactory", ".", "client_transport", "=", "xport", "ip", ",", "ports", "=", "addr", ".", "split", "(", "':'", ")", "c", ".", "HubFactory", ".", "client_ip", "=", "ip", "if", "not", "self", ".", "ssh_server", ":", "self", ".", "ssh_server", "=", "cfg", "[", "'ssh'", "]", "assert", "int", "(", "ports", ")", "==", "c", ".", "HubFactory", ".", "regport", ",", "\"regport mismatch\"" ]
load config from existing json connector files.
[ "load", "config", "from", "existing", "json", "connector", "files", "." ]
python
test
45.090909
stevelittlefish/littlefish
littlefish/attackprotect.py
https://github.com/stevelittlefish/littlefish/blob/6deee7f81fab30716c743efe2e94e786c6e17016/littlefish/attackprotect.py#L61-L80
def service(self): """ Decrease the countdowns, and remove any expired locks. Should be called once every <decrease_every> seconds. """ with self.lock: # Decrement / remove all attempts for key in list(self.attempts.keys()): log.debug('Decrementing count for %s' % key) if key in self.attempts: if self.attempts[key] <= 1: del self.attempts[key] else: self.attempts[key] -= 1 # Remove expired locks now = datetime.datetime.utcnow() for key in list(self.locks.keys()): if key in self.locks and self.locks[key] < now: log.info('Expiring login lock for %s' % key) del self.locks[key]
[ "def", "service", "(", "self", ")", ":", "with", "self", ".", "lock", ":", "# Decrement / remove all attempts", "for", "key", "in", "list", "(", "self", ".", "attempts", ".", "keys", "(", ")", ")", ":", "log", ".", "debug", "(", "'Decrementing count for %s'", "%", "key", ")", "if", "key", "in", "self", ".", "attempts", ":", "if", "self", ".", "attempts", "[", "key", "]", "<=", "1", ":", "del", "self", ".", "attempts", "[", "key", "]", "else", ":", "self", ".", "attempts", "[", "key", "]", "-=", "1", "# Remove expired locks", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "for", "key", "in", "list", "(", "self", ".", "locks", ".", "keys", "(", ")", ")", ":", "if", "key", "in", "self", ".", "locks", "and", "self", ".", "locks", "[", "key", "]", "<", "now", ":", "log", ".", "info", "(", "'Expiring login lock for %s'", "%", "key", ")", "del", "self", ".", "locks", "[", "key", "]" ]
Decrease the countdowns, and remove any expired locks. Should be called once every <decrease_every> seconds.
[ "Decrease", "the", "countdowns", "and", "remove", "any", "expired", "locks", ".", "Should", "be", "called", "once", "every", "<decrease_every", ">", "seconds", "." ]
python
test
41.55
jantman/awslimitchecker
awslimitchecker/runner.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/runner.py#L73-L217
def parse_args(self, argv): """ parse arguments/options :param argv: argument list to parse, usually ``sys.argv[1:]`` :type argv: list :returns: parsed arguments :rtype: :py:class:`argparse.Namespace` """ desc = 'Report on AWS service limits and usage via boto3, optionally ' \ 'warn about any services with usage nearing or exceeding their' \ ' limits. For further help, see ' \ '<http://awslimitchecker.readthedocs.org/>' # ###### IMPORTANT license notice ########## # Pursuant to Sections 5(b) and 13 of the GNU Affero General Public # License, version 3, this notice MUST NOT be removed, and MUST be # displayed to ALL USERS of this software, even if they interact with # it remotely over a network. # # See the "Development" section of the awslimitchecker documentation # (docs/source/development.rst or # <http://awslimitchecker.readthedocs.org/en/latest/development.html> ) # for further information. # ###### IMPORTANT license notice ########## epilog = 'awslimitchecker is AGPLv3-licensed Free Software. Anyone ' \ 'using this program, even remotely over a network, is ' \ 'entitled to a copy of the source code. Use `--version` for ' \ 'information on the source code location.' p = argparse.ArgumentParser(description=desc, epilog=epilog) p.add_argument('-S', '--service', action='store', nargs='*', help='perform action for only the specified service name' '; see -s|--list-services for valid names') p.add_argument('--skip-service', action='append', default=[], dest='skip_service', help='avoid performing actions for the specified service' ' name; see -s|--list-services for valid names') p.add_argument('--skip-check', action='append', default=[], dest='skip_check', help='avoid performing actions for the specified check' ' name') p.add_argument('-s', '--list-services', action='store_true', default=False, help='print a list of all AWS service types that ' 'awslimitchecker knows how to check') p.add_argument('-l', '--list-limits', action='store_true', default=False, help='print all AWS effective limits in "service_name/' 'limit_name" format') p.add_argument('--list-defaults', action='store_true', default=False, help='print all AWS default limits in "service_name/' 'limit_name" format') p.add_argument('-L', '--limit', action=StoreKeyValuePair, help='override a single AWS limit, specified in ' '"service_name/limit_name=value" format; can be ' 'specified multiple times.') p.add_argument('-u', '--show-usage', action='store_true', default=False, help='find and print the current usage of all AWS ' 'services with known limits') p.add_argument('--iam-policy', action='store_true', default=False, help='output a JSON serialized IAM Policy ' 'listing the required permissions for ' 'awslimitchecker to run correctly.') p.add_argument('-W', '--warning-threshold', action='store', type=int, default=80, help='default warning threshold (percentage of ' 'limit); default: 80') p.add_argument('-C', '--critical-threshold', action='store', type=int, default=99, help='default critical threshold (percentage of ' 'limit); default: 99') p.add_argument('-P', '--profile', action='store', dest='profile_name', type=str, default=None, help='Name of profile in the AWS cross-sdk credentials ' 'file to use credentials from; similar to the ' 'corresponding awscli option') p.add_argument('-A', '--sts-account-id', action='store', type=str, default=None, help='for use with STS, the Account ID of the ' 'destination account (account to assume a role in)') p.add_argument('-R', '--sts-account-role', action='store', type=str, default=None, help='for use with STS, the name of the IAM role to ' 'assume') p.add_argument('-E', '--external-id', action='store', type=str, default=None, help='External ID to use when assuming ' 'a role via STS') p.add_argument('-M', '--mfa-serial-number', action='store', type=str, default=None, help='MFA Serial Number to use when ' 'assuming a role via STS') p.add_argument('-T', '--mfa-token', action='store', type=str, default=None, help='MFA Token to use when assuming ' 'a role via STS') p.add_argument('-r', '--region', action='store', type=str, default=None, help='AWS region name to connect to; required for STS') p.add_argument('--skip-ta', action='store_true', default=False, help='do not attempt to pull *any* information on limits' ' from Trusted Advisor') g = p.add_mutually_exclusive_group() g.add_argument('--ta-refresh-wait', dest='ta_refresh_wait', action='store_true', default=False, help='If applicable, refresh all Trusted Advisor ' 'limit-related checks, and wait for the refresh to' ' complete before continuing.') g.add_argument('--ta-refresh-trigger', dest='ta_refresh_trigger', action='store_true', default=False, help='If applicable, trigger refreshes for all Trusted ' 'Advisor limit-related checks, but do not wait for ' 'them to finish refreshing; trigger the refresh ' 'and continue on (useful to ensure checks are ' 'refreshed before the next scheduled run).') g.add_argument('--ta-refresh-older', dest='ta_refresh_older', action='store', type=int, default=None, help='If applicable, trigger refreshes for all Trusted ' 'Advisor limit-related checks with results more ' 'than this number of seconds old. Wait for the ' 'refresh to complete before continuing.') p.add_argument('--ta-refresh-timeout', dest='ta_refresh_timeout', type=int, action='store', default=None, help='If waiting for TA checks to refresh, wait up to ' 'this number of seconds before continuing on ' 'anyway.') p.add_argument('--no-color', action='store_true', default=False, help='do not colorize output') p.add_argument('--no-check-version', action='store_false', default=True, dest='check_version', help='do not check latest version at startup') p.add_argument('-v', '--verbose', dest='verbose', action='count', default=0, help='verbose output. specify twice for debug-level ' 'output.') p.add_argument('-V', '--version', dest='version', action='store_true', default=False, help='print version number and exit.') args = p.parse_args(argv) args.ta_refresh_mode = None if args.ta_refresh_wait: args.ta_refresh_mode = 'wait' elif args.ta_refresh_trigger: args.ta_refresh_mode = 'trigger' elif args.ta_refresh_older is not None: args.ta_refresh_mode = args.ta_refresh_older return args
[ "def", "parse_args", "(", "self", ",", "argv", ")", ":", "desc", "=", "'Report on AWS service limits and usage via boto3, optionally '", "'warn about any services with usage nearing or exceeding their'", "' limits. For further help, see '", "'<http://awslimitchecker.readthedocs.org/>'", "# ###### IMPORTANT license notice ##########", "# Pursuant to Sections 5(b) and 13 of the GNU Affero General Public", "# License, version 3, this notice MUST NOT be removed, and MUST be", "# displayed to ALL USERS of this software, even if they interact with", "# it remotely over a network.", "#", "# See the \"Development\" section of the awslimitchecker documentation", "# (docs/source/development.rst or", "# <http://awslimitchecker.readthedocs.org/en/latest/development.html> )", "# for further information.", "# ###### IMPORTANT license notice ##########", "epilog", "=", "'awslimitchecker is AGPLv3-licensed Free Software. Anyone '", "'using this program, even remotely over a network, is '", "'entitled to a copy of the source code. Use `--version` for '", "'information on the source code location.'", "p", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "desc", ",", "epilog", "=", "epilog", ")", "p", ".", "add_argument", "(", "'-S'", ",", "'--service'", ",", "action", "=", "'store'", ",", "nargs", "=", "'*'", ",", "help", "=", "'perform action for only the specified service name'", "'; see -s|--list-services for valid names'", ")", "p", ".", "add_argument", "(", "'--skip-service'", ",", "action", "=", "'append'", ",", "default", "=", "[", "]", ",", "dest", "=", "'skip_service'", ",", "help", "=", "'avoid performing actions for the specified service'", "' name; see -s|--list-services for valid names'", ")", "p", ".", "add_argument", "(", "'--skip-check'", ",", "action", "=", "'append'", ",", "default", "=", "[", "]", ",", "dest", "=", "'skip_check'", ",", "help", "=", "'avoid performing actions for the specified check'", "' name'", ")", "p", ".", "add_argument", "(", "'-s'", ",", "'--list-services'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'print a list of all AWS service types that '", "'awslimitchecker knows how to check'", ")", "p", ".", "add_argument", "(", "'-l'", ",", "'--list-limits'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'print all AWS effective limits in \"service_name/'", "'limit_name\" format'", ")", "p", ".", "add_argument", "(", "'--list-defaults'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'print all AWS default limits in \"service_name/'", "'limit_name\" format'", ")", "p", ".", "add_argument", "(", "'-L'", ",", "'--limit'", ",", "action", "=", "StoreKeyValuePair", ",", "help", "=", "'override a single AWS limit, specified in '", "'\"service_name/limit_name=value\" format; can be '", "'specified multiple times.'", ")", "p", ".", "add_argument", "(", "'-u'", ",", "'--show-usage'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'find and print the current usage of all AWS '", "'services with known limits'", ")", "p", ".", "add_argument", "(", "'--iam-policy'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'output a JSON serialized IAM Policy '", "'listing the required permissions for '", "'awslimitchecker to run correctly.'", ")", "p", ".", "add_argument", "(", "'-W'", ",", "'--warning-threshold'", ",", "action", "=", "'store'", ",", "type", "=", "int", ",", "default", "=", "80", ",", "help", "=", "'default warning threshold (percentage of '", "'limit); default: 80'", ")", "p", ".", "add_argument", "(", "'-C'", ",", "'--critical-threshold'", ",", "action", "=", "'store'", ",", "type", "=", "int", ",", "default", "=", "99", ",", "help", "=", "'default critical threshold (percentage of '", "'limit); default: 99'", ")", "p", ".", "add_argument", "(", "'-P'", ",", "'--profile'", ",", "action", "=", "'store'", ",", "dest", "=", "'profile_name'", ",", "type", "=", "str", ",", "default", "=", "None", ",", "help", "=", "'Name of profile in the AWS cross-sdk credentials '", "'file to use credentials from; similar to the '", "'corresponding awscli option'", ")", "p", ".", "add_argument", "(", "'-A'", ",", "'--sts-account-id'", ",", "action", "=", "'store'", ",", "type", "=", "str", ",", "default", "=", "None", ",", "help", "=", "'for use with STS, the Account ID of the '", "'destination account (account to assume a role in)'", ")", "p", ".", "add_argument", "(", "'-R'", ",", "'--sts-account-role'", ",", "action", "=", "'store'", ",", "type", "=", "str", ",", "default", "=", "None", ",", "help", "=", "'for use with STS, the name of the IAM role to '", "'assume'", ")", "p", ".", "add_argument", "(", "'-E'", ",", "'--external-id'", ",", "action", "=", "'store'", ",", "type", "=", "str", ",", "default", "=", "None", ",", "help", "=", "'External ID to use when assuming '", "'a role via STS'", ")", "p", ".", "add_argument", "(", "'-M'", ",", "'--mfa-serial-number'", ",", "action", "=", "'store'", ",", "type", "=", "str", ",", "default", "=", "None", ",", "help", "=", "'MFA Serial Number to use when '", "'assuming a role via STS'", ")", "p", ".", "add_argument", "(", "'-T'", ",", "'--mfa-token'", ",", "action", "=", "'store'", ",", "type", "=", "str", ",", "default", "=", "None", ",", "help", "=", "'MFA Token to use when assuming '", "'a role via STS'", ")", "p", ".", "add_argument", "(", "'-r'", ",", "'--region'", ",", "action", "=", "'store'", ",", "type", "=", "str", ",", "default", "=", "None", ",", "help", "=", "'AWS region name to connect to; required for STS'", ")", "p", ".", "add_argument", "(", "'--skip-ta'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'do not attempt to pull *any* information on limits'", "' from Trusted Advisor'", ")", "g", "=", "p", ".", "add_mutually_exclusive_group", "(", ")", "g", ".", "add_argument", "(", "'--ta-refresh-wait'", ",", "dest", "=", "'ta_refresh_wait'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'If applicable, refresh all Trusted Advisor '", "'limit-related checks, and wait for the refresh to'", "' complete before continuing.'", ")", "g", ".", "add_argument", "(", "'--ta-refresh-trigger'", ",", "dest", "=", "'ta_refresh_trigger'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'If applicable, trigger refreshes for all Trusted '", "'Advisor limit-related checks, but do not wait for '", "'them to finish refreshing; trigger the refresh '", "'and continue on (useful to ensure checks are '", "'refreshed before the next scheduled run).'", ")", "g", ".", "add_argument", "(", "'--ta-refresh-older'", ",", "dest", "=", "'ta_refresh_older'", ",", "action", "=", "'store'", ",", "type", "=", "int", ",", "default", "=", "None", ",", "help", "=", "'If applicable, trigger refreshes for all Trusted '", "'Advisor limit-related checks with results more '", "'than this number of seconds old. Wait for the '", "'refresh to complete before continuing.'", ")", "p", ".", "add_argument", "(", "'--ta-refresh-timeout'", ",", "dest", "=", "'ta_refresh_timeout'", ",", "type", "=", "int", ",", "action", "=", "'store'", ",", "default", "=", "None", ",", "help", "=", "'If waiting for TA checks to refresh, wait up to '", "'this number of seconds before continuing on '", "'anyway.'", ")", "p", ".", "add_argument", "(", "'--no-color'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'do not colorize output'", ")", "p", ".", "add_argument", "(", "'--no-check-version'", ",", "action", "=", "'store_false'", ",", "default", "=", "True", ",", "dest", "=", "'check_version'", ",", "help", "=", "'do not check latest version at startup'", ")", "p", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "dest", "=", "'verbose'", ",", "action", "=", "'count'", ",", "default", "=", "0", ",", "help", "=", "'verbose output. specify twice for debug-level '", "'output.'", ")", "p", ".", "add_argument", "(", "'-V'", ",", "'--version'", ",", "dest", "=", "'version'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'print version number and exit.'", ")", "args", "=", "p", ".", "parse_args", "(", "argv", ")", "args", ".", "ta_refresh_mode", "=", "None", "if", "args", ".", "ta_refresh_wait", ":", "args", ".", "ta_refresh_mode", "=", "'wait'", "elif", "args", ".", "ta_refresh_trigger", ":", "args", ".", "ta_refresh_mode", "=", "'trigger'", "elif", "args", ".", "ta_refresh_older", "is", "not", "None", ":", "args", ".", "ta_refresh_mode", "=", "args", ".", "ta_refresh_older", "return", "args" ]
parse arguments/options :param argv: argument list to parse, usually ``sys.argv[1:]`` :type argv: list :returns: parsed arguments :rtype: :py:class:`argparse.Namespace`
[ "parse", "arguments", "/", "options" ]
python
train
58.97931
cloud-custodian/cloud-custodian
c7n/mu.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/mu.py#L133-L152
def add_directory(self, path, ignore=None): """Add ``*.py`` files under the directory ``path`` to the archive. """ for root, dirs, files in os.walk(path): arc_prefix = os.path.relpath(root, os.path.dirname(path)) # py3 remove pyc cache dirs. if '__pycache__' in dirs: dirs.remove('__pycache__') for f in files: dest_path = os.path.join(arc_prefix, f) # ignore specific files if ignore and ignore(dest_path): continue if f.endswith('.pyc') or f.endswith('.c'): continue f_path = os.path.join(root, f) self.add_file(f_path, dest_path)
[ "def", "add_directory", "(", "self", ",", "path", ",", "ignore", "=", "None", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "arc_prefix", "=", "os", ".", "path", ".", "relpath", "(", "root", ",", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", "# py3 remove pyc cache dirs.", "if", "'__pycache__'", "in", "dirs", ":", "dirs", ".", "remove", "(", "'__pycache__'", ")", "for", "f", "in", "files", ":", "dest_path", "=", "os", ".", "path", ".", "join", "(", "arc_prefix", ",", "f", ")", "# ignore specific files", "if", "ignore", "and", "ignore", "(", "dest_path", ")", ":", "continue", "if", "f", ".", "endswith", "(", "'.pyc'", ")", "or", "f", ".", "endswith", "(", "'.c'", ")", ":", "continue", "f_path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", "self", ".", "add_file", "(", "f_path", ",", "dest_path", ")" ]
Add ``*.py`` files under the directory ``path`` to the archive.
[ "Add", "*", ".", "py", "files", "under", "the", "directory", "path", "to", "the", "archive", "." ]
python
train
37
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L2084-L2110
def insert_record(self, table: str, fields: Sequence[str], values: Sequence[Any], update_on_duplicate_key: bool = False) -> int: """Inserts a record into database, table "table", using the list of fieldnames and the list of values. Returns the new PK (or None).""" self.ensure_db_open() if len(fields) != len(values): raise AssertionError("Field/value mismatch") if update_on_duplicate_key: sql = get_sql_insert_or_update(table, fields, self.get_delims()) else: sql = get_sql_insert(table, fields, self.get_delims()) sql = self.localize_sql(sql) log.debug("About to insert_record with SQL template: " + sql) try: cursor = self.db.cursor() debug_sql(sql, values) cursor.execute(sql, values) # ... binds the placeholders (?, %s) to values in the process new_pk = get_pk_of_last_insert(cursor) log.debug("Record inserted.") return new_pk except: # nopep8 log.exception("insert_record: Failed to insert record.") raise
[ "def", "insert_record", "(", "self", ",", "table", ":", "str", ",", "fields", ":", "Sequence", "[", "str", "]", ",", "values", ":", "Sequence", "[", "Any", "]", ",", "update_on_duplicate_key", ":", "bool", "=", "False", ")", "->", "int", ":", "self", ".", "ensure_db_open", "(", ")", "if", "len", "(", "fields", ")", "!=", "len", "(", "values", ")", ":", "raise", "AssertionError", "(", "\"Field/value mismatch\"", ")", "if", "update_on_duplicate_key", ":", "sql", "=", "get_sql_insert_or_update", "(", "table", ",", "fields", ",", "self", ".", "get_delims", "(", ")", ")", "else", ":", "sql", "=", "get_sql_insert", "(", "table", ",", "fields", ",", "self", ".", "get_delims", "(", ")", ")", "sql", "=", "self", ".", "localize_sql", "(", "sql", ")", "log", ".", "debug", "(", "\"About to insert_record with SQL template: \"", "+", "sql", ")", "try", ":", "cursor", "=", "self", ".", "db", ".", "cursor", "(", ")", "debug_sql", "(", "sql", ",", "values", ")", "cursor", ".", "execute", "(", "sql", ",", "values", ")", "# ... binds the placeholders (?, %s) to values in the process", "new_pk", "=", "get_pk_of_last_insert", "(", "cursor", ")", "log", ".", "debug", "(", "\"Record inserted.\"", ")", "return", "new_pk", "except", ":", "# nopep8", "log", ".", "exception", "(", "\"insert_record: Failed to insert record.\"", ")", "raise" ]
Inserts a record into database, table "table", using the list of fieldnames and the list of values. Returns the new PK (or None).
[ "Inserts", "a", "record", "into", "database", "table", "table", "using", "the", "list", "of", "fieldnames", "and", "the", "list", "of", "values", ".", "Returns", "the", "new", "PK", "(", "or", "None", ")", "." ]
python
train
44.481481
rackerlabs/simpl
simpl/config.py
https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/config.py#L385-L389
def prog(self): """Program name.""" if not self._prog: self._prog = self._parser.prog return self._prog
[ "def", "prog", "(", "self", ")", ":", "if", "not", "self", ".", "_prog", ":", "self", ".", "_prog", "=", "self", ".", "_parser", ".", "prog", "return", "self", ".", "_prog" ]
Program name.
[ "Program", "name", "." ]
python
train
27
fracpete/python-weka-wrapper3
python/weka/core/classes.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/classes.py#L1683-L1705
def from_commandline(cmdline, classname=None): """ Creates an OptionHandler based on the provided commandline string. :param cmdline: the commandline string to use :type cmdline: str :param classname: the classname of the wrapper to return other than OptionHandler (in dot-notation) :type classname: str :return: the generated option handler instance :rtype: object """ params = split_options(cmdline) cls = params[0] params = params[1:] handler = OptionHandler(javabridge.static_call( "Lweka/core/Utils;", "forName", "(Ljava/lang/Class;Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/Object;", javabridge.class_for_name("java.lang.Object"), cls, params)) if classname is None: return handler else: c = get_class(classname) return c(jobject=handler.jobject)
[ "def", "from_commandline", "(", "cmdline", ",", "classname", "=", "None", ")", ":", "params", "=", "split_options", "(", "cmdline", ")", "cls", "=", "params", "[", "0", "]", "params", "=", "params", "[", "1", ":", "]", "handler", "=", "OptionHandler", "(", "javabridge", ".", "static_call", "(", "\"Lweka/core/Utils;\"", ",", "\"forName\"", ",", "\"(Ljava/lang/Class;Ljava/lang/String;[Ljava/lang/String;)Ljava/lang/Object;\"", ",", "javabridge", ".", "class_for_name", "(", "\"java.lang.Object\"", ")", ",", "cls", ",", "params", ")", ")", "if", "classname", "is", "None", ":", "return", "handler", "else", ":", "c", "=", "get_class", "(", "classname", ")", "return", "c", "(", "jobject", "=", "handler", ".", "jobject", ")" ]
Creates an OptionHandler based on the provided commandline string. :param cmdline: the commandline string to use :type cmdline: str :param classname: the classname of the wrapper to return other than OptionHandler (in dot-notation) :type classname: str :return: the generated option handler instance :rtype: object
[ "Creates", "an", "OptionHandler", "based", "on", "the", "provided", "commandline", "string", "." ]
python
train
36.73913
nats-io/python-nats
nats/io/client.py
https://github.com/nats-io/python-nats/blob/4a409319c409e7e55ce8377b64b406375c5f455b/nats/io/client.py#L812-L820
def auto_unsubscribe(self, sid, limit=1): """ Sends an UNSUB command to the server. Unsubscribe is one of the basic building blocks in order to be able to define request/response semantics via pub/sub by announcing the server limited interest a priori. """ if self.is_draining: raise ErrConnectionDraining yield self._unsubscribe(sid, limit)
[ "def", "auto_unsubscribe", "(", "self", ",", "sid", ",", "limit", "=", "1", ")", ":", "if", "self", ".", "is_draining", ":", "raise", "ErrConnectionDraining", "yield", "self", ".", "_unsubscribe", "(", "sid", ",", "limit", ")" ]
Sends an UNSUB command to the server. Unsubscribe is one of the basic building blocks in order to be able to define request/response semantics via pub/sub by announcing the server limited interest a priori.
[ "Sends", "an", "UNSUB", "command", "to", "the", "server", ".", "Unsubscribe", "is", "one", "of", "the", "basic", "building", "blocks", "in", "order", "to", "be", "able", "to", "define", "request", "/", "response", "semantics", "via", "pub", "/", "sub", "by", "announcing", "the", "server", "limited", "interest", "a", "priori", "." ]
python
train
44.666667
pydanny/cookiecutter-django
hooks/post_gen_project.py
https://github.com/pydanny/cookiecutter-django/blob/bb9b482e96d1966e20745eeea87a8aa10ed1c861/hooks/post_gen_project.py#L107-L129
def generate_random_string( length, using_digits=False, using_ascii_letters=False, using_punctuation=False ): """ Example: opting out for 50 symbol-long, [a-z][A-Z][0-9] string would yield log_2((26+26+50)^50) ~= 334 bit strength. """ if not using_sysrandom: return None symbols = [] if using_digits: symbols += string.digits if using_ascii_letters: symbols += string.ascii_letters if using_punctuation: all_punctuation = set(string.punctuation) # These symbols can cause issues in environment variables unsuitable = {"'", '"', "\\", "$"} suitable = all_punctuation.difference(unsuitable) symbols += "".join(suitable) return "".join([random.choice(symbols) for _ in range(length)])
[ "def", "generate_random_string", "(", "length", ",", "using_digits", "=", "False", ",", "using_ascii_letters", "=", "False", ",", "using_punctuation", "=", "False", ")", ":", "if", "not", "using_sysrandom", ":", "return", "None", "symbols", "=", "[", "]", "if", "using_digits", ":", "symbols", "+=", "string", ".", "digits", "if", "using_ascii_letters", ":", "symbols", "+=", "string", ".", "ascii_letters", "if", "using_punctuation", ":", "all_punctuation", "=", "set", "(", "string", ".", "punctuation", ")", "# These symbols can cause issues in environment variables", "unsuitable", "=", "{", "\"'\"", ",", "'\"'", ",", "\"\\\\\"", ",", "\"$\"", "}", "suitable", "=", "all_punctuation", ".", "difference", "(", "unsuitable", ")", "symbols", "+=", "\"\"", ".", "join", "(", "suitable", ")", "return", "\"\"", ".", "join", "(", "[", "random", ".", "choice", "(", "symbols", ")", "for", "_", "in", "range", "(", "length", ")", "]", ")" ]
Example: opting out for 50 symbol-long, [a-z][A-Z][0-9] string would yield log_2((26+26+50)^50) ~= 334 bit strength.
[ "Example", ":", "opting", "out", "for", "50", "symbol", "-", "long", "[", "a", "-", "z", "]", "[", "A", "-", "Z", "]", "[", "0", "-", "9", "]", "string", "would", "yield", "log_2", "((", "26", "+", "26", "+", "50", ")", "^50", ")", "~", "=", "334", "bit", "strength", "." ]
python
test
33.913043
wbond/asn1crypto
asn1crypto/x509.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/x509.py#L1541-L1565
def url(self): """ :return: None or a unicode string of the distribution point's URL """ if self._url is False: self._url = None name = self['distribution_point'] if name.name != 'full_name': raise ValueError(unwrap( ''' CRL distribution points that are relative to the issuer are not supported ''' )) for general_name in name.chosen: if general_name.name == 'uniform_resource_identifier': url = general_name.native if url.lower().startswith(('http://', 'https://', 'ldap://', 'ldaps://')): self._url = url break return self._url
[ "def", "url", "(", "self", ")", ":", "if", "self", ".", "_url", "is", "False", ":", "self", ".", "_url", "=", "None", "name", "=", "self", "[", "'distribution_point'", "]", "if", "name", ".", "name", "!=", "'full_name'", ":", "raise", "ValueError", "(", "unwrap", "(", "'''\n CRL distribution points that are relative to the issuer are\n not supported\n '''", ")", ")", "for", "general_name", "in", "name", ".", "chosen", ":", "if", "general_name", ".", "name", "==", "'uniform_resource_identifier'", ":", "url", "=", "general_name", ".", "native", "if", "url", ".", "lower", "(", ")", ".", "startswith", "(", "(", "'http://'", ",", "'https://'", ",", "'ldap://'", ",", "'ldaps://'", ")", ")", ":", "self", ".", "_url", "=", "url", "break", "return", "self", ".", "_url" ]
:return: None or a unicode string of the distribution point's URL
[ ":", "return", ":", "None", "or", "a", "unicode", "string", "of", "the", "distribution", "point", "s", "URL" ]
python
train
32.96
Kozea/cairocffi
cairocffi/context.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L1456-L1471
def in_fill(self, x, y): """Tests whether the given point is inside the area that would be affected by a :meth:`fill` operation given the current path and filling parameters. Surface dimensions and clipping are not taken into account. See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`. :param x: X coordinate of the point to test :param y: Y coordinate of the point to test :type x: float :type y: float :returns: A boolean. """ return bool(cairo.cairo_in_fill(self._pointer, x, y))
[ "def", "in_fill", "(", "self", ",", "x", ",", "y", ")", ":", "return", "bool", "(", "cairo", ".", "cairo_in_fill", "(", "self", ".", "_pointer", ",", "x", ",", "y", ")", ")" ]
Tests whether the given point is inside the area that would be affected by a :meth:`fill` operation given the current path and filling parameters. Surface dimensions and clipping are not taken into account. See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`. :param x: X coordinate of the point to test :param y: Y coordinate of the point to test :type x: float :type y: float :returns: A boolean.
[ "Tests", "whether", "the", "given", "point", "is", "inside", "the", "area", "that", "would", "be", "affected", "by", "a", ":", "meth", ":", "fill", "operation", "given", "the", "current", "path", "and", "filling", "parameters", ".", "Surface", "dimensions", "and", "clipping", "are", "not", "taken", "into", "account", "." ]
python
train
36.375
openvax/mhctools
mhctools/input_file_formats.py
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/input_file_formats.py#L26-L61
def create_input_peptides_files( peptides, max_peptides_per_file=None, group_by_length=False): """ Creates one or more files containing one peptide per line, returns names of files. """ if group_by_length: peptide_lengths = {len(p) for p in peptides} peptide_groups = {l: [] for l in peptide_lengths} for p in peptides: peptide_groups[len(p)].append(p) else: peptide_groups = {"": peptides} file_names = [] for key, group in peptide_groups.items(): n_peptides = len(group) if not max_peptides_per_file: max_peptides_per_file = n_peptides input_file = None for i, p in enumerate(group): if i % max_peptides_per_file == 0: if input_file is not None: file_names.append(input_file.name) input_file.close() input_file = make_writable_tempfile( prefix_number=i // max_peptides_per_file, prefix_name=key, suffix=".txt") input_file.write("%s\n" % p) if input_file is not None: file_names.append(input_file.name) input_file.close() return file_names
[ "def", "create_input_peptides_files", "(", "peptides", ",", "max_peptides_per_file", "=", "None", ",", "group_by_length", "=", "False", ")", ":", "if", "group_by_length", ":", "peptide_lengths", "=", "{", "len", "(", "p", ")", "for", "p", "in", "peptides", "}", "peptide_groups", "=", "{", "l", ":", "[", "]", "for", "l", "in", "peptide_lengths", "}", "for", "p", "in", "peptides", ":", "peptide_groups", "[", "len", "(", "p", ")", "]", ".", "append", "(", "p", ")", "else", ":", "peptide_groups", "=", "{", "\"\"", ":", "peptides", "}", "file_names", "=", "[", "]", "for", "key", ",", "group", "in", "peptide_groups", ".", "items", "(", ")", ":", "n_peptides", "=", "len", "(", "group", ")", "if", "not", "max_peptides_per_file", ":", "max_peptides_per_file", "=", "n_peptides", "input_file", "=", "None", "for", "i", ",", "p", "in", "enumerate", "(", "group", ")", ":", "if", "i", "%", "max_peptides_per_file", "==", "0", ":", "if", "input_file", "is", "not", "None", ":", "file_names", ".", "append", "(", "input_file", ".", "name", ")", "input_file", ".", "close", "(", ")", "input_file", "=", "make_writable_tempfile", "(", "prefix_number", "=", "i", "//", "max_peptides_per_file", ",", "prefix_name", "=", "key", ",", "suffix", "=", "\".txt\"", ")", "input_file", ".", "write", "(", "\"%s\\n\"", "%", "p", ")", "if", "input_file", "is", "not", "None", ":", "file_names", ".", "append", "(", "input_file", ".", "name", ")", "input_file", ".", "close", "(", ")", "return", "file_names" ]
Creates one or more files containing one peptide per line, returns names of files.
[ "Creates", "one", "or", "more", "files", "containing", "one", "peptide", "per", "line", "returns", "names", "of", "files", "." ]
python
valid
34.5
saltstack/salt
salt/states/esxi.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/esxi.py#L869-L1025
def syslog_configured(name, syslog_configs, firewall=True, reset_service=True, reset_syslog_config=False, reset_configs=None): ''' Ensures the specified syslog configuration parameters. By default, this state will reset the syslog service after any new or changed parameters are set successfully. name Name of the state. syslog_configs Name of parameter to set (corresponds to the command line switch for esxcli without the double dashes (--)) Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, and ``default-timeout``. Each syslog_config option also needs a configuration value to set. For example, ``loghost`` requires URLs or IP addresses to use for logging. Multiple log servers can be specified by listing them, comma-separated, but without spaces before or after commas (reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html) firewall Enable the firewall rule set for syslog. Defaults to ``True``. reset_service After a successful parameter set, reset the service. Defaults to ``True``. reset_syslog_config Resets the syslog service to it's default settings. Defaults to ``False``. If set to ``True``, default settings defined by the list of syslog configs in ``reset_configs`` will be reset before running any other syslog settings. reset_configs A comma-delimited list of parameters to reset. Only runs if ``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set to ``True``, but no syslog configs are listed in ``reset_configs``, then ``reset_configs`` will be set to ``all`` by default. See ``syslog_configs`` parameter above for a list of valid options. Example: .. code-block:: yaml configure-host-syslog: esxi.syslog_configured: - syslog_configs: loghost: ssl://localhost:5432,tcp://10.1.0.1:1514 default-timeout: 120 - firewall: True - reset_service: True - reset_syslog_config: True - reset_configs: loghost,default-timeout ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] if reset_syslog_config: if not reset_configs: reset_configs = 'all' # Only run the command if not using test=True if not __opts__['test']: reset = __salt__[esxi_cmd]('reset_syslog_config', syslog_config=reset_configs).get(host) for key, val in six.iteritems(reset): if isinstance(val, bool): continue if not val.get('success'): msg = val.get('message') if not msg: msg = 'There was an error resetting a syslog config \'{0}\'.' \ 'Please check debug logs.'.format(val) ret['comment'] = 'Error: {0}'.format(msg) return ret ret['changes'].update({'reset_syslog_config': {'old': '', 'new': reset_configs}}) current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host) error = current_firewall.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_firewall = current_firewall.get('rulesets').get('syslog') if current_firewall != firewall: # Only run the command if not using test=True if not __opts__['test']: enabled = __salt__[esxi_cmd]('enable_firewall_ruleset', ruleset_enable=firewall, ruleset_name='syslog').get(host) if enabled.get('retcode') != 0: err = enabled.get('stderr') out = enabled.get('stdout') ret['comment'] = 'Error: {0}'.format(err if err else out) return ret ret['changes'].update({'firewall': {'old': current_firewall, 'new': firewall}}) current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host) for key, val in six.iteritems(syslog_configs): # The output of get_syslog_config has different keys than the keys # Used to set syslog_config values. We need to look them up first. try: lookup_key = _lookup_syslog_config(key) except KeyError: ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key) return ret current_val = current_syslog_config[lookup_key] if six.text_type(current_val) != six.text_type(val): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_syslog_config', syslog_config=key, config_value=val, firewall=firewall, reset_service=reset_service).get(host) success = response.get(key).get('success') if not success: msg = response.get(key).get('message') if not msg: msg = 'There was an error setting syslog config \'{0}\'. ' \ 'Please check debug logs.'.format(key) ret['comment'] = msg return ret if not ret['changes'].get('syslog_config'): ret['changes'].update({'syslog_config': {}}) ret['changes']['syslog_config'].update({key: {'old': current_val, 'new': val}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Syslog is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Syslog state will change.' return ret
[ "def", "syslog_configured", "(", "name", ",", "syslog_configs", ",", "firewall", "=", "True", ",", "reset_service", "=", "True", ",", "reset_syslog_config", "=", "False", ",", "reset_configs", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "esxi_cmd", "=", "'esxi.cmd'", "host", "=", "__pillar__", "[", "'proxy'", "]", "[", "'host'", "]", "if", "reset_syslog_config", ":", "if", "not", "reset_configs", ":", "reset_configs", "=", "'all'", "# Only run the command if not using test=True", "if", "not", "__opts__", "[", "'test'", "]", ":", "reset", "=", "__salt__", "[", "esxi_cmd", "]", "(", "'reset_syslog_config'", ",", "syslog_config", "=", "reset_configs", ")", ".", "get", "(", "host", ")", "for", "key", ",", "val", "in", "six", ".", "iteritems", "(", "reset", ")", ":", "if", "isinstance", "(", "val", ",", "bool", ")", ":", "continue", "if", "not", "val", ".", "get", "(", "'success'", ")", ":", "msg", "=", "val", ".", "get", "(", "'message'", ")", "if", "not", "msg", ":", "msg", "=", "'There was an error resetting a syslog config \\'{0}\\'.'", "'Please check debug logs.'", ".", "format", "(", "val", ")", "ret", "[", "'comment'", "]", "=", "'Error: {0}'", ".", "format", "(", "msg", ")", "return", "ret", "ret", "[", "'changes'", "]", ".", "update", "(", "{", "'reset_syslog_config'", ":", "{", "'old'", ":", "''", ",", "'new'", ":", "reset_configs", "}", "}", ")", "current_firewall", "=", "__salt__", "[", "esxi_cmd", "]", "(", "'get_firewall_status'", ")", ".", "get", "(", "host", ")", "error", "=", "current_firewall", ".", "get", "(", "'Error'", ")", "if", "error", ":", "ret", "[", "'comment'", "]", "=", "'Error: {0}'", ".", "format", "(", "error", ")", "return", "ret", "current_firewall", "=", "current_firewall", ".", "get", "(", "'rulesets'", ")", ".", "get", "(", "'syslog'", ")", "if", "current_firewall", "!=", "firewall", ":", "# Only run the command if not using test=True", "if", "not", "__opts__", "[", "'test'", "]", ":", "enabled", "=", "__salt__", "[", "esxi_cmd", "]", "(", "'enable_firewall_ruleset'", ",", "ruleset_enable", "=", "firewall", ",", "ruleset_name", "=", "'syslog'", ")", ".", "get", "(", "host", ")", "if", "enabled", ".", "get", "(", "'retcode'", ")", "!=", "0", ":", "err", "=", "enabled", ".", "get", "(", "'stderr'", ")", "out", "=", "enabled", ".", "get", "(", "'stdout'", ")", "ret", "[", "'comment'", "]", "=", "'Error: {0}'", ".", "format", "(", "err", "if", "err", "else", "out", ")", "return", "ret", "ret", "[", "'changes'", "]", ".", "update", "(", "{", "'firewall'", ":", "{", "'old'", ":", "current_firewall", ",", "'new'", ":", "firewall", "}", "}", ")", "current_syslog_config", "=", "__salt__", "[", "esxi_cmd", "]", "(", "'get_syslog_config'", ")", ".", "get", "(", "host", ")", "for", "key", ",", "val", "in", "six", ".", "iteritems", "(", "syslog_configs", ")", ":", "# The output of get_syslog_config has different keys than the keys", "# Used to set syslog_config values. We need to look them up first.", "try", ":", "lookup_key", "=", "_lookup_syslog_config", "(", "key", ")", "except", "KeyError", ":", "ret", "[", "'comment'", "]", "=", "'\\'{0}\\' is not a valid config variable.'", ".", "format", "(", "key", ")", "return", "ret", "current_val", "=", "current_syslog_config", "[", "lookup_key", "]", "if", "six", ".", "text_type", "(", "current_val", ")", "!=", "six", ".", "text_type", "(", "val", ")", ":", "# Only run the command if not using test=True", "if", "not", "__opts__", "[", "'test'", "]", ":", "response", "=", "__salt__", "[", "esxi_cmd", "]", "(", "'set_syslog_config'", ",", "syslog_config", "=", "key", ",", "config_value", "=", "val", ",", "firewall", "=", "firewall", ",", "reset_service", "=", "reset_service", ")", ".", "get", "(", "host", ")", "success", "=", "response", ".", "get", "(", "key", ")", ".", "get", "(", "'success'", ")", "if", "not", "success", ":", "msg", "=", "response", ".", "get", "(", "key", ")", ".", "get", "(", "'message'", ")", "if", "not", "msg", ":", "msg", "=", "'There was an error setting syslog config \\'{0}\\'. '", "'Please check debug logs.'", ".", "format", "(", "key", ")", "ret", "[", "'comment'", "]", "=", "msg", "return", "ret", "if", "not", "ret", "[", "'changes'", "]", ".", "get", "(", "'syslog_config'", ")", ":", "ret", "[", "'changes'", "]", ".", "update", "(", "{", "'syslog_config'", ":", "{", "}", "}", ")", "ret", "[", "'changes'", "]", "[", "'syslog_config'", "]", ".", "update", "(", "{", "key", ":", "{", "'old'", ":", "current_val", ",", "'new'", ":", "val", "}", "}", ")", "ret", "[", "'result'", "]", "=", "True", "if", "ret", "[", "'changes'", "]", "==", "{", "}", ":", "ret", "[", "'comment'", "]", "=", "'Syslog is already in the desired state.'", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Syslog state will change.'", "return", "ret" ]
Ensures the specified syslog configuration parameters. By default, this state will reset the syslog service after any new or changed parameters are set successfully. name Name of the state. syslog_configs Name of parameter to set (corresponds to the command line switch for esxcli without the double dashes (--)) Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, and ``default-timeout``. Each syslog_config option also needs a configuration value to set. For example, ``loghost`` requires URLs or IP addresses to use for logging. Multiple log servers can be specified by listing them, comma-separated, but without spaces before or after commas (reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html) firewall Enable the firewall rule set for syslog. Defaults to ``True``. reset_service After a successful parameter set, reset the service. Defaults to ``True``. reset_syslog_config Resets the syslog service to it's default settings. Defaults to ``False``. If set to ``True``, default settings defined by the list of syslog configs in ``reset_configs`` will be reset before running any other syslog settings. reset_configs A comma-delimited list of parameters to reset. Only runs if ``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set to ``True``, but no syslog configs are listed in ``reset_configs``, then ``reset_configs`` will be set to ``all`` by default. See ``syslog_configs`` parameter above for a list of valid options. Example: .. code-block:: yaml configure-host-syslog: esxi.syslog_configured: - syslog_configs: loghost: ssl://localhost:5432,tcp://10.1.0.1:1514 default-timeout: 120 - firewall: True - reset_service: True - reset_syslog_config: True - reset_configs: loghost,default-timeout
[ "Ensures", "the", "specified", "syslog", "configuration", "parameters", ".", "By", "default", "this", "state", "will", "reset", "the", "syslog", "service", "after", "any", "new", "or", "changed", "parameters", "are", "set", "successfully", "." ]
python
train
40.808917
nickmckay/LiPD-utilities
Python/lipd/lpd_noaa.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/lpd_noaa.py#L826-L848
def __get_doi(pub): """ Get DOI from this ONE publication entry. :param dict pub: Single publication entry :return: """ doi = "" # Doi location: d["pub"][idx]["identifier"][0]["id"] try: doi = pub["DOI"][0]["id"] doi = clean_doi(doi) except KeyError: logger_lpd_noaa.info("get_dois: KeyError: missing a doi key") except Exception: logger_lpd_noaa.info("get_dois: Exception: something went wrong") # if we received a doi that's a list, we want to concat into a single string if isinstance(doi, list): if len(doi) == 1: doi = doi[0] else: ", ".join(doi) return doi
[ "def", "__get_doi", "(", "pub", ")", ":", "doi", "=", "\"\"", "# Doi location: d[\"pub\"][idx][\"identifier\"][0][\"id\"]", "try", ":", "doi", "=", "pub", "[", "\"DOI\"", "]", "[", "0", "]", "[", "\"id\"", "]", "doi", "=", "clean_doi", "(", "doi", ")", "except", "KeyError", ":", "logger_lpd_noaa", ".", "info", "(", "\"get_dois: KeyError: missing a doi key\"", ")", "except", "Exception", ":", "logger_lpd_noaa", ".", "info", "(", "\"get_dois: Exception: something went wrong\"", ")", "# if we received a doi that's a list, we want to concat into a single string", "if", "isinstance", "(", "doi", ",", "list", ")", ":", "if", "len", "(", "doi", ")", "==", "1", ":", "doi", "=", "doi", "[", "0", "]", "else", ":", "\", \"", ".", "join", "(", "doi", ")", "return", "doi" ]
Get DOI from this ONE publication entry. :param dict pub: Single publication entry :return:
[ "Get", "DOI", "from", "this", "ONE", "publication", "entry", ".", ":", "param", "dict", "pub", ":", "Single", "publication", "entry", ":", "return", ":" ]
python
train
32.565217
aio-libs/aioredis
aioredis/commands/geo.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/geo.py#L93-L132
def georadiusbymember(self, key, member, radius, unit='m', *, with_dist=False, with_hash=False, with_coord=False, count=None, sort=None, encoding=_NOTSET): """Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a member. Return value follows Redis convention: * if none of ``WITH*`` flags are set -- list of strings returned: >>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km') [b"Palermo", b"Catania"] * if any flag (or all) is set -- list of named tuples returned: >>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km', ... with_dist=True) [GeoMember(name=b"Palermo", dist=190.4424, hash=None, coord=None), GeoMember(name=b"Catania", dist=56.4413, hash=None, coord=None)] :raises TypeError: radius is not float or int :raises TypeError: count is not int :raises ValueError: if unit not equal ``m``, ``km``, ``mi`` or ``ft`` :raises ValueError: if sort not equal ``ASC`` or ``DESC`` :rtype: list[str] or list[GeoMember] """ args = validate_georadius_options( radius, unit, with_dist, with_hash, with_coord, count, sort ) fut = self.execute( b'GEORADIUSBYMEMBER', key, member, radius, unit, *args, encoding=encoding) if with_dist or with_hash or with_coord: return wait_convert(fut, make_geomember, with_dist=with_dist, with_hash=with_hash, with_coord=with_coord) return fut
[ "def", "georadiusbymember", "(", "self", ",", "key", ",", "member", ",", "radius", ",", "unit", "=", "'m'", ",", "*", ",", "with_dist", "=", "False", ",", "with_hash", "=", "False", ",", "with_coord", "=", "False", ",", "count", "=", "None", ",", "sort", "=", "None", ",", "encoding", "=", "_NOTSET", ")", ":", "args", "=", "validate_georadius_options", "(", "radius", ",", "unit", ",", "with_dist", ",", "with_hash", ",", "with_coord", ",", "count", ",", "sort", ")", "fut", "=", "self", ".", "execute", "(", "b'GEORADIUSBYMEMBER'", ",", "key", ",", "member", ",", "radius", ",", "unit", ",", "*", "args", ",", "encoding", "=", "encoding", ")", "if", "with_dist", "or", "with_hash", "or", "with_coord", ":", "return", "wait_convert", "(", "fut", ",", "make_geomember", ",", "with_dist", "=", "with_dist", ",", "with_hash", "=", "with_hash", ",", "with_coord", "=", "with_coord", ")", "return", "fut" ]
Query a sorted set representing a geospatial index to fetch members matching a given maximum distance from a member. Return value follows Redis convention: * if none of ``WITH*`` flags are set -- list of strings returned: >>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km') [b"Palermo", b"Catania"] * if any flag (or all) is set -- list of named tuples returned: >>> await redis.georadiusbymember('Sicily', 'Palermo', 200, 'km', ... with_dist=True) [GeoMember(name=b"Palermo", dist=190.4424, hash=None, coord=None), GeoMember(name=b"Catania", dist=56.4413, hash=None, coord=None)] :raises TypeError: radius is not float or int :raises TypeError: count is not int :raises ValueError: if unit not equal ``m``, ``km``, ``mi`` or ``ft`` :raises ValueError: if sort not equal ``ASC`` or ``DESC`` :rtype: list[str] or list[GeoMember]
[ "Query", "a", "sorted", "set", "representing", "a", "geospatial", "index", "to", "fetch", "members", "matching", "a", "given", "maximum", "distance", "from", "a", "member", "." ]
python
train
43.75
bioidiap/gridtk
gridtk/generator.py
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/generator.py#L22-L39
def _ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=dict): '''Loads the contents of the YAML stream into :py:class:`collections.OrderedDict`'s See: https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts ''' class OrderedLoader(Loader): pass def construct_mapping(loader, node): loader.flatten_mapping(node) return object_pairs_hook(loader.construct_pairs(node)) OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping) return yaml.load(stream, OrderedLoader)
[ "def", "_ordered_load", "(", "stream", ",", "Loader", "=", "yaml", ".", "Loader", ",", "object_pairs_hook", "=", "dict", ")", ":", "class", "OrderedLoader", "(", "Loader", ")", ":", "pass", "def", "construct_mapping", "(", "loader", ",", "node", ")", ":", "loader", ".", "flatten_mapping", "(", "node", ")", "return", "object_pairs_hook", "(", "loader", ".", "construct_pairs", "(", "node", ")", ")", "OrderedLoader", ".", "add_constructor", "(", "yaml", ".", "resolver", ".", "BaseResolver", ".", "DEFAULT_MAPPING_TAG", ",", "construct_mapping", ")", "return", "yaml", ".", "load", "(", "stream", ",", "OrderedLoader", ")" ]
Loads the contents of the YAML stream into :py:class:`collections.OrderedDict`'s See: https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
[ "Loads", "the", "contents", "of", "the", "YAML", "stream", "into", ":", "py", ":", "class", ":", "collections", ".", "OrderedDict", "s" ]
python
train
32.055556
minhhoit/yacms
yacms/pages/admin.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/pages/admin.py#L110-L120
def _maintain_parent(self, request, response): """ Maintain the parent ID in the querystring for response_add and response_change. """ location = response._headers.get("location") parent = request.GET.get("parent") if parent and location and "?" not in location[1]: url = "%s?parent=%s" % (location[1], parent) return HttpResponseRedirect(url) return response
[ "def", "_maintain_parent", "(", "self", ",", "request", ",", "response", ")", ":", "location", "=", "response", ".", "_headers", ".", "get", "(", "\"location\"", ")", "parent", "=", "request", ".", "GET", ".", "get", "(", "\"parent\"", ")", "if", "parent", "and", "location", "and", "\"?\"", "not", "in", "location", "[", "1", "]", ":", "url", "=", "\"%s?parent=%s\"", "%", "(", "location", "[", "1", "]", ",", "parent", ")", "return", "HttpResponseRedirect", "(", "url", ")", "return", "response" ]
Maintain the parent ID in the querystring for response_add and response_change.
[ "Maintain", "the", "parent", "ID", "in", "the", "querystring", "for", "response_add", "and", "response_change", "." ]
python
train
39.727273
Basic-Components/msgpack-rpc-protocol
python/pymprpc/client/sync.py
https://github.com/Basic-Components/msgpack-rpc-protocol/blob/7983ace5d5cfd7214df6803f9b1de458df5fe3b1/python/pymprpc/client/sync.py#L355-L377
def _make_query(self, ID: str, methodname: str, *args: Any, **kwargs: Any): """将调用请求的ID,方法名,参数包装为请求数据. Parameters: ID (str): - 任务ID methodname (str): - 要调用的方法名 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 Return: (Dict[str, Any]) : - 请求的python字典形式 """ query = { "MPRPC": self.VERSION, "ID": ID, "METHOD": methodname, "RETURN": True, "ARGS": args, "KWARGS": kwargs } print(query) return query
[ "def", "_make_query", "(", "self", ",", "ID", ":", "str", ",", "methodname", ":", "str", ",", "*", "args", ":", "Any", ",", "*", "*", "kwargs", ":", "Any", ")", ":", "query", "=", "{", "\"MPRPC\"", ":", "self", ".", "VERSION", ",", "\"ID\"", ":", "ID", ",", "\"METHOD\"", ":", "methodname", ",", "\"RETURN\"", ":", "True", ",", "\"ARGS\"", ":", "args", ",", "\"KWARGS\"", ":", "kwargs", "}", "print", "(", "query", ")", "return", "query" ]
将调用请求的ID,方法名,参数包装为请求数据. Parameters: ID (str): - 任务ID methodname (str): - 要调用的方法名 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 Return: (Dict[str, Any]) : - 请求的python字典形式
[ "将调用请求的ID", "方法名", "参数包装为请求数据", "." ]
python
train
25.130435
Nukesor/pueue
pueue/daemon/daemon.py
https://github.com/Nukesor/pueue/blob/f1d276360454d4dd2738658a13df1e20caa4b926/pueue/daemon/daemon.py#L404-L443
def pause(self, payload): """Start the daemon and all processes or only specific processes.""" # Pause specific processes, if `keys` is given in the payload if payload.get('keys'): succeeded = [] failed = [] for key in payload.get('keys'): success = self.process_handler.pause_process(key) if success: succeeded.append(str(key)) else: failed.append(str(key)) message = '' if len(succeeded) > 0: message += 'Paused processes: {}.'.format(', '.join(succeeded)) status = 'success' if len(failed) > 0: message += '\nNo running process for keys: {}'.format(', '.join(failed)) status = 'error' answer = {'message': message.strip(), 'status': status} # Pause all processes and the daemon else: if payload.get('wait'): self.paused = True answer = {'message': 'Pausing daemon, but waiting for processes to finish.', 'status': 'success'} else: self.process_handler.pause_all() if not self.paused: self.paused = True answer = {'message': 'Daemon and all processes paused.', 'status': 'success'} else: answer = {'message': 'Daemon already paused, pausing all processes anyway.', 'status': 'success'} return answer
[ "def", "pause", "(", "self", ",", "payload", ")", ":", "# Pause specific processes, if `keys` is given in the payload", "if", "payload", ".", "get", "(", "'keys'", ")", ":", "succeeded", "=", "[", "]", "failed", "=", "[", "]", "for", "key", "in", "payload", ".", "get", "(", "'keys'", ")", ":", "success", "=", "self", ".", "process_handler", ".", "pause_process", "(", "key", ")", "if", "success", ":", "succeeded", ".", "append", "(", "str", "(", "key", ")", ")", "else", ":", "failed", ".", "append", "(", "str", "(", "key", ")", ")", "message", "=", "''", "if", "len", "(", "succeeded", ")", ">", "0", ":", "message", "+=", "'Paused processes: {}.'", ".", "format", "(", "', '", ".", "join", "(", "succeeded", ")", ")", "status", "=", "'success'", "if", "len", "(", "failed", ")", ">", "0", ":", "message", "+=", "'\\nNo running process for keys: {}'", ".", "format", "(", "', '", ".", "join", "(", "failed", ")", ")", "status", "=", "'error'", "answer", "=", "{", "'message'", ":", "message", ".", "strip", "(", ")", ",", "'status'", ":", "status", "}", "# Pause all processes and the daemon", "else", ":", "if", "payload", ".", "get", "(", "'wait'", ")", ":", "self", ".", "paused", "=", "True", "answer", "=", "{", "'message'", ":", "'Pausing daemon, but waiting for processes to finish.'", ",", "'status'", ":", "'success'", "}", "else", ":", "self", ".", "process_handler", ".", "pause_all", "(", ")", "if", "not", "self", ".", "paused", ":", "self", ".", "paused", "=", "True", "answer", "=", "{", "'message'", ":", "'Daemon and all processes paused.'", ",", "'status'", ":", "'success'", "}", "else", ":", "answer", "=", "{", "'message'", ":", "'Daemon already paused, pausing all processes anyway.'", ",", "'status'", ":", "'success'", "}", "return", "answer" ]
Start the daemon and all processes or only specific processes.
[ "Start", "the", "daemon", "and", "all", "processes", "or", "only", "specific", "processes", "." ]
python
train
40
rootpy/rootpy
rootpy/plotting/hist.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/hist.py#L1245-L1258
def get_sum_w2(self, ix, iy=0, iz=0): """ Obtain the true number of entries in the bin weighted by w^2 """ if self.GetSumw2N() == 0: raise RuntimeError( "Attempting to access Sumw2 in histogram " "where weights were not stored") xl = self.nbins(axis=0, overflow=True) yl = self.nbins(axis=1, overflow=True) idx = xl * yl * iz + xl * iy + ix if not 0 <= idx < self.GetSumw2N(): raise IndexError("bin index out of range") return self.GetSumw2().At(idx)
[ "def", "get_sum_w2", "(", "self", ",", "ix", ",", "iy", "=", "0", ",", "iz", "=", "0", ")", ":", "if", "self", ".", "GetSumw2N", "(", ")", "==", "0", ":", "raise", "RuntimeError", "(", "\"Attempting to access Sumw2 in histogram \"", "\"where weights were not stored\"", ")", "xl", "=", "self", ".", "nbins", "(", "axis", "=", "0", ",", "overflow", "=", "True", ")", "yl", "=", "self", ".", "nbins", "(", "axis", "=", "1", ",", "overflow", "=", "True", ")", "idx", "=", "xl", "*", "yl", "*", "iz", "+", "xl", "*", "iy", "+", "ix", "if", "not", "0", "<=", "idx", "<", "self", ".", "GetSumw2N", "(", ")", ":", "raise", "IndexError", "(", "\"bin index out of range\"", ")", "return", "self", ".", "GetSumw2", "(", ")", ".", "At", "(", "idx", ")" ]
Obtain the true number of entries in the bin weighted by w^2
[ "Obtain", "the", "true", "number", "of", "entries", "in", "the", "bin", "weighted", "by", "w^2" ]
python
train
40.357143
saltstack/salt
salt/utils/stringutils.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/stringutils.py#L289-L313
def human_to_bytes(size): ''' Given a human-readable byte string (e.g. 2G, 30M), return the number of bytes. Will return 0 if the argument has unexpected form. .. versionadded:: 2018.3.0 ''' sbytes = size[:-1] unit = size[-1] if sbytes.isdigit(): sbytes = int(sbytes) if unit == 'P': sbytes *= 1125899906842624 elif unit == 'T': sbytes *= 1099511627776 elif unit == 'G': sbytes *= 1073741824 elif unit == 'M': sbytes *= 1048576 else: sbytes = 0 else: sbytes = 0 return sbytes
[ "def", "human_to_bytes", "(", "size", ")", ":", "sbytes", "=", "size", "[", ":", "-", "1", "]", "unit", "=", "size", "[", "-", "1", "]", "if", "sbytes", ".", "isdigit", "(", ")", ":", "sbytes", "=", "int", "(", "sbytes", ")", "if", "unit", "==", "'P'", ":", "sbytes", "*=", "1125899906842624", "elif", "unit", "==", "'T'", ":", "sbytes", "*=", "1099511627776", "elif", "unit", "==", "'G'", ":", "sbytes", "*=", "1073741824", "elif", "unit", "==", "'M'", ":", "sbytes", "*=", "1048576", "else", ":", "sbytes", "=", "0", "else", ":", "sbytes", "=", "0", "return", "sbytes" ]
Given a human-readable byte string (e.g. 2G, 30M), return the number of bytes. Will return 0 if the argument has unexpected form. .. versionadded:: 2018.3.0
[ "Given", "a", "human", "-", "readable", "byte", "string", "(", "e", ".", "g", ".", "2G", "30M", ")", "return", "the", "number", "of", "bytes", ".", "Will", "return", "0", "if", "the", "argument", "has", "unexpected", "form", "." ]
python
train
24.52
saltstack/salt
salt/queues/sqlite_queue.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/queues/sqlite_queue.py#L211-L244
def pop(queue, quantity=1, is_runner=False): ''' Pop one or more or all items from the queue return them. ''' cmd = 'SELECT name FROM {0}'.format(queue) if quantity != 'all': try: quantity = int(quantity) except ValueError as exc: error_txt = ('Quantity must be an integer or "all".\n' 'Error: "{0}".'.format(exc)) raise SaltInvocationError(error_txt) cmd = ''.join([cmd, ' LIMIT {0}'.format(quantity)]) log.debug('SQL Query: %s', cmd) con = _conn(queue) items = [] with con: cur = con.cursor() result = cur.execute(cmd).fetchall() if result: items = [item[0] for item in result] itemlist = '","'.join(items) _quote_escape(itemlist) del_cmd = '''DELETE FROM {0} WHERE name IN ("{1}")'''.format( queue, itemlist) log.debug('SQL Query: %s', del_cmd) cur.execute(del_cmd) con.commit() if is_runner: items = [salt.utils.json.loads(item[0].replace("'", '"')) for item in result] log.info(items) return items
[ "def", "pop", "(", "queue", ",", "quantity", "=", "1", ",", "is_runner", "=", "False", ")", ":", "cmd", "=", "'SELECT name FROM {0}'", ".", "format", "(", "queue", ")", "if", "quantity", "!=", "'all'", ":", "try", ":", "quantity", "=", "int", "(", "quantity", ")", "except", "ValueError", "as", "exc", ":", "error_txt", "=", "(", "'Quantity must be an integer or \"all\".\\n'", "'Error: \"{0}\".'", ".", "format", "(", "exc", ")", ")", "raise", "SaltInvocationError", "(", "error_txt", ")", "cmd", "=", "''", ".", "join", "(", "[", "cmd", ",", "' LIMIT {0}'", ".", "format", "(", "quantity", ")", "]", ")", "log", ".", "debug", "(", "'SQL Query: %s'", ",", "cmd", ")", "con", "=", "_conn", "(", "queue", ")", "items", "=", "[", "]", "with", "con", ":", "cur", "=", "con", ".", "cursor", "(", ")", "result", "=", "cur", ".", "execute", "(", "cmd", ")", ".", "fetchall", "(", ")", "if", "result", ":", "items", "=", "[", "item", "[", "0", "]", "for", "item", "in", "result", "]", "itemlist", "=", "'\",\"'", ".", "join", "(", "items", ")", "_quote_escape", "(", "itemlist", ")", "del_cmd", "=", "'''DELETE FROM {0} WHERE name IN (\"{1}\")'''", ".", "format", "(", "queue", ",", "itemlist", ")", "log", ".", "debug", "(", "'SQL Query: %s'", ",", "del_cmd", ")", "cur", ".", "execute", "(", "del_cmd", ")", "con", ".", "commit", "(", ")", "if", "is_runner", ":", "items", "=", "[", "salt", ".", "utils", ".", "json", ".", "loads", "(", "item", "[", "0", "]", ".", "replace", "(", "\"'\"", ",", "'\"'", ")", ")", "for", "item", "in", "result", "]", "log", ".", "info", "(", "items", ")", "return", "items" ]
Pop one or more or all items from the queue return them.
[ "Pop", "one", "or", "more", "or", "all", "items", "from", "the", "queue", "return", "them", "." ]
python
train
33.294118
ArangoDB-Community/pyArango
pyArango/document.py
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/document.py#L59-L80
def validateField(self, field) : """Validatie a field""" if field not in self.validators and not self.collection._validation['allow_foreign_fields'] : raise SchemaViolation(self.collection.__class__, field) if field in self.store: if isinstance(self.store[field], DocumentStore) : return self[field].validate() if field in self.patchStore : return self.validators[field].validate(self.patchStore[field]) else : try : return self.validators[field].validate(self.store[field]) except ValidationError as e: raise ValidationError( "'%s' -> %s" % ( field, str(e)) ) except AttributeError: if isinstance(self.validators[field], dict) and not isinstance(self.store[field], dict) : raise ValueError("Validator expected a sub document for field '%s', got '%s' instead" % (field, self.store[field]) ) else : raise return True
[ "def", "validateField", "(", "self", ",", "field", ")", ":", "if", "field", "not", "in", "self", ".", "validators", "and", "not", "self", ".", "collection", ".", "_validation", "[", "'allow_foreign_fields'", "]", ":", "raise", "SchemaViolation", "(", "self", ".", "collection", ".", "__class__", ",", "field", ")", "if", "field", "in", "self", ".", "store", ":", "if", "isinstance", "(", "self", ".", "store", "[", "field", "]", ",", "DocumentStore", ")", ":", "return", "self", "[", "field", "]", ".", "validate", "(", ")", "if", "field", "in", "self", ".", "patchStore", ":", "return", "self", ".", "validators", "[", "field", "]", ".", "validate", "(", "self", ".", "patchStore", "[", "field", "]", ")", "else", ":", "try", ":", "return", "self", ".", "validators", "[", "field", "]", ".", "validate", "(", "self", ".", "store", "[", "field", "]", ")", "except", "ValidationError", "as", "e", ":", "raise", "ValidationError", "(", "\"'%s' -> %s\"", "%", "(", "field", ",", "str", "(", "e", ")", ")", ")", "except", "AttributeError", ":", "if", "isinstance", "(", "self", ".", "validators", "[", "field", "]", ",", "dict", ")", "and", "not", "isinstance", "(", "self", ".", "store", "[", "field", "]", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"Validator expected a sub document for field '%s', got '%s' instead\"", "%", "(", "field", ",", "self", ".", "store", "[", "field", "]", ")", ")", "else", ":", "raise", "return", "True" ]
Validatie a field
[ "Validatie", "a", "field" ]
python
train
49.818182
ambitioninc/django-query-builder
querybuilder/query.py
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L184-L229
def get_condition(self): """ Determines the condition to be used in the condition part of the join sql. :return: The condition for the join clause :rtype: str or None """ if self.condition: return self.condition if type(self.right_table) is ModelTable and type(self.right_table) is ModelTable: # loop through fields to find the field for this model # check if this join type is for a related field for field in self.get_all_related_objects(self.right_table): related_model = field.model if hasattr(field, 'related_model'): related_model = field.related_model if related_model == self.left_table.model: table_join_field = field.field.column # self.table_join_name = field.get_accessor_name() condition = '{0}.{1} = {2}.{3}'.format( self.right_table.get_identifier(), self.right_table.model._meta.pk.name, self.left_table.get_identifier(), table_join_field, ) return condition # check if this join type is for a foreign key for field in self.right_table.model._meta.fields: if ( field.get_internal_type() == 'OneToOneField' or field.get_internal_type() == 'ForeignKey' ): if field.remote_field.model == self.left_table.model: table_join_field = field.column # self.table_join_name = field.name condition = '{0}.{1} = {2}.{3}'.format( self.right_table.get_identifier(), table_join_field, self.left_table.get_identifier(), self.left_table.model._meta.pk.name ) return condition return None
[ "def", "get_condition", "(", "self", ")", ":", "if", "self", ".", "condition", ":", "return", "self", ".", "condition", "if", "type", "(", "self", ".", "right_table", ")", "is", "ModelTable", "and", "type", "(", "self", ".", "right_table", ")", "is", "ModelTable", ":", "# loop through fields to find the field for this model", "# check if this join type is for a related field", "for", "field", "in", "self", ".", "get_all_related_objects", "(", "self", ".", "right_table", ")", ":", "related_model", "=", "field", ".", "model", "if", "hasattr", "(", "field", ",", "'related_model'", ")", ":", "related_model", "=", "field", ".", "related_model", "if", "related_model", "==", "self", ".", "left_table", ".", "model", ":", "table_join_field", "=", "field", ".", "field", ".", "column", "# self.table_join_name = field.get_accessor_name()", "condition", "=", "'{0}.{1} = {2}.{3}'", ".", "format", "(", "self", ".", "right_table", ".", "get_identifier", "(", ")", ",", "self", ".", "right_table", ".", "model", ".", "_meta", ".", "pk", ".", "name", ",", "self", ".", "left_table", ".", "get_identifier", "(", ")", ",", "table_join_field", ",", ")", "return", "condition", "# check if this join type is for a foreign key", "for", "field", "in", "self", ".", "right_table", ".", "model", ".", "_meta", ".", "fields", ":", "if", "(", "field", ".", "get_internal_type", "(", ")", "==", "'OneToOneField'", "or", "field", ".", "get_internal_type", "(", ")", "==", "'ForeignKey'", ")", ":", "if", "field", ".", "remote_field", ".", "model", "==", "self", ".", "left_table", ".", "model", ":", "table_join_field", "=", "field", ".", "column", "# self.table_join_name = field.name", "condition", "=", "'{0}.{1} = {2}.{3}'", ".", "format", "(", "self", ".", "right_table", ".", "get_identifier", "(", ")", ",", "table_join_field", ",", "self", ".", "left_table", ".", "get_identifier", "(", ")", ",", "self", ".", "left_table", ".", "model", ".", "_meta", ".", "pk", ".", "name", ")", "return", "condition", "return", "None" ]
Determines the condition to be used in the condition part of the join sql. :return: The condition for the join clause :rtype: str or None
[ "Determines", "the", "condition", "to", "be", "used", "in", "the", "condition", "part", "of", "the", "join", "sql", "." ]
python
train
44.956522
SHTOOLS/SHTOOLS
pyshtools/shclasses/shgravgrid.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shgravgrid.py#L176-L227
def plot_theta(self, colorbar=True, cb_orientation='vertical', cb_label='$g_\\theta$, m s$^{-2}$', ax=None, show=True, fname=None, **kwargs): """ Plot the theta component of the gravity field. Usage ----- x.plot_theta([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname, **kwargs]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$g_\\theta$, m s$^{-2}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods. """ if ax is None: fig, axes = self.theta.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.theta.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
[ "def", "plot_theta", "(", "self", ",", "colorbar", "=", "True", ",", "cb_orientation", "=", "'vertical'", ",", "cb_label", "=", "'$g_\\\\theta$, m s$^{-2}$'", ",", "ax", "=", "None", ",", "show", "=", "True", ",", "fname", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ax", "is", "None", ":", "fig", ",", "axes", "=", "self", ".", "theta", ".", "plot", "(", "colorbar", "=", "colorbar", ",", "cb_orientation", "=", "cb_orientation", ",", "cb_label", "=", "cb_label", ",", "show", "=", "False", ",", "*", "*", "kwargs", ")", "if", "show", ":", "fig", ".", "show", "(", ")", "if", "fname", "is", "not", "None", ":", "fig", ".", "savefig", "(", "fname", ")", "return", "fig", ",", "axes", "else", ":", "self", ".", "theta", ".", "plot", "(", "colorbar", "=", "colorbar", ",", "cb_orientation", "=", "cb_orientation", ",", "cb_label", "=", "cb_label", ",", "ax", "=", "ax", ",", "*", "*", "kwargs", ")" ]
Plot the theta component of the gravity field. Usage ----- x.plot_theta([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname, **kwargs]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$g_\\theta$, m s$^{-2}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
[ "Plot", "the", "theta", "component", "of", "the", "gravity", "field", "." ]
python
train
42.653846
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/project.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/project.py#L1136-L1142
def constant(self, name, value): """Declare and set a project global constant. Project global constants are normal variables but should not be changed. They are applied to every child Jamfile.""" assert is_iterable_typed(name, basestring) assert is_iterable_typed(value, basestring) self.registry.current().add_constant(name[0], value)
[ "def", "constant", "(", "self", ",", "name", ",", "value", ")", ":", "assert", "is_iterable_typed", "(", "name", ",", "basestring", ")", "assert", "is_iterable_typed", "(", "value", ",", "basestring", ")", "self", ".", "registry", ".", "current", "(", ")", ".", "add_constant", "(", "name", "[", "0", "]", ",", "value", ")" ]
Declare and set a project global constant. Project global constants are normal variables but should not be changed. They are applied to every child Jamfile.
[ "Declare", "and", "set", "a", "project", "global", "constant", ".", "Project", "global", "constants", "are", "normal", "variables", "but", "should", "not", "be", "changed", ".", "They", "are", "applied", "to", "every", "child", "Jamfile", "." ]
python
train
53.857143
eyeseast/python-tablefu
table_fu/formatting.py
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L10-L19
def _saferound(value, decimal_places): """ Rounds a float value off to the desired precision """ try: f = float(value) except ValueError: return '' format = '%%.%df' % decimal_places return format % f
[ "def", "_saferound", "(", "value", ",", "decimal_places", ")", ":", "try", ":", "f", "=", "float", "(", "value", ")", "except", "ValueError", ":", "return", "''", "format", "=", "'%%.%df'", "%", "decimal_places", "return", "format", "%", "f" ]
Rounds a float value off to the desired precision
[ "Rounds", "a", "float", "value", "off", "to", "the", "desired", "precision" ]
python
train
23.5
biocore/burrito
burrito/util.py
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L390-L397
def _absolute(self, path): """ Convert a filename to an absolute path """ path = FilePath(path) if isabs(path): return path else: # these are both Path objects, so joining with + is acceptable return self.WorkingDir + path
[ "def", "_absolute", "(", "self", ",", "path", ")", ":", "path", "=", "FilePath", "(", "path", ")", "if", "isabs", "(", "path", ")", ":", "return", "path", "else", ":", "# these are both Path objects, so joining with + is acceptable", "return", "self", ".", "WorkingDir", "+", "path" ]
Convert a filename to an absolute path
[ "Convert", "a", "filename", "to", "an", "absolute", "path" ]
python
train
35.375
python-cmd2/cmd2
cmd2/cmd2.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L2115-L2127
def default(self, statement: Statement) -> Optional[bool]: """Executed when the command given isn't a recognized command implemented by a do_* method. :param statement: Statement object with parsed input """ if self.default_to_shell: if 'shell' not in self.exclude_from_history: self.history.append(statement) return self.do_shell(statement.command_and_args) else: err_msg = self.default_error.format(statement.command) self.decolorized_write(sys.stderr, "{}\n".format(err_msg))
[ "def", "default", "(", "self", ",", "statement", ":", "Statement", ")", "->", "Optional", "[", "bool", "]", ":", "if", "self", ".", "default_to_shell", ":", "if", "'shell'", "not", "in", "self", ".", "exclude_from_history", ":", "self", ".", "history", ".", "append", "(", "statement", ")", "return", "self", ".", "do_shell", "(", "statement", ".", "command_and_args", ")", "else", ":", "err_msg", "=", "self", ".", "default_error", ".", "format", "(", "statement", ".", "command", ")", "self", ".", "decolorized_write", "(", "sys", ".", "stderr", ",", "\"{}\\n\"", ".", "format", "(", "err_msg", ")", ")" ]
Executed when the command given isn't a recognized command implemented by a do_* method. :param statement: Statement object with parsed input
[ "Executed", "when", "the", "command", "given", "isn", "t", "a", "recognized", "command", "implemented", "by", "a", "do_", "*", "method", "." ]
python
train
44
cltl/KafNafParserPy
KafNafParserPy/constituency_data.py
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/constituency_data.py#L308-L317
def get_terminals_as_list(self): """ Iterator that returns all the terminal objects @rtype: L{Cterminal} @return: terminal objects as list """ terminalList = [] for t_node in self.__get_t_nodes(): terminalList.append(Cterminal(t_node)) return terminalList
[ "def", "get_terminals_as_list", "(", "self", ")", ":", "terminalList", "=", "[", "]", "for", "t_node", "in", "self", ".", "__get_t_nodes", "(", ")", ":", "terminalList", ".", "append", "(", "Cterminal", "(", "t_node", ")", ")", "return", "terminalList" ]
Iterator that returns all the terminal objects @rtype: L{Cterminal} @return: terminal objects as list
[ "Iterator", "that", "returns", "all", "the", "terminal", "objects" ]
python
train
32.2
abingham/docopt-subcommands
docopt_subcommands/__init__.py
https://github.com/abingham/docopt-subcommands/blob/4b5cd75bb8eed01f9405345446ca58e9a29d67ad/docopt_subcommands/__init__.py#L17-L69
def main(program=None, version=None, doc_template=None, commands=None, argv=None, exit_at_end=True): """Top-level driver for creating subcommand-based programs. Args: program: The name of your program. version: The version string for your program. doc_template: The top-level docstring template for your program. If `None`, a standard default version is applied. commands: A `Subcommands` instance. argv: The command-line arguments to parse. If `None`, this defaults to `sys.argv[1:]` exit_at_end: Whether to call `sys.exit()` at the end of the function. There are two ways to use this function. First, you can pass `program`, `version`, and `doc_template`, in which case `docopt_subcommands` will use these arguments along with the subcommands registered with `command()` to define you program. The second way to use this function is to pass in a `Subcommands` objects via the `commands` argument. In this case the `program`, `version`, and `doc_template` arguments are ignored, and the `Subcommands` instance takes precedence. In both cases the `argv` argument can be used to specify the arguments to be parsed. """ if commands is None: if program is None: raise ValueError( '`program` required if subcommand object not provided') if version is None: raise ValueError( '`version` required if subcommand object not provided') commands = Subcommands(program, version, doc_template=doc_template) for name, handler in _commands: commands.add_command(handler, name) if argv is None: argv = sys.argv[1:] result = commands(argv) if exit_at_end: sys.exit(result) else: return result
[ "def", "main", "(", "program", "=", "None", ",", "version", "=", "None", ",", "doc_template", "=", "None", ",", "commands", "=", "None", ",", "argv", "=", "None", ",", "exit_at_end", "=", "True", ")", ":", "if", "commands", "is", "None", ":", "if", "program", "is", "None", ":", "raise", "ValueError", "(", "'`program` required if subcommand object not provided'", ")", "if", "version", "is", "None", ":", "raise", "ValueError", "(", "'`version` required if subcommand object not provided'", ")", "commands", "=", "Subcommands", "(", "program", ",", "version", ",", "doc_template", "=", "doc_template", ")", "for", "name", ",", "handler", "in", "_commands", ":", "commands", ".", "add_command", "(", "handler", ",", "name", ")", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "result", "=", "commands", "(", "argv", ")", "if", "exit_at_end", ":", "sys", ".", "exit", "(", "result", ")", "else", ":", "return", "result" ]
Top-level driver for creating subcommand-based programs. Args: program: The name of your program. version: The version string for your program. doc_template: The top-level docstring template for your program. If `None`, a standard default version is applied. commands: A `Subcommands` instance. argv: The command-line arguments to parse. If `None`, this defaults to `sys.argv[1:]` exit_at_end: Whether to call `sys.exit()` at the end of the function. There are two ways to use this function. First, you can pass `program`, `version`, and `doc_template`, in which case `docopt_subcommands` will use these arguments along with the subcommands registered with `command()` to define you program. The second way to use this function is to pass in a `Subcommands` objects via the `commands` argument. In this case the `program`, `version`, and `doc_template` arguments are ignored, and the `Subcommands` instance takes precedence. In both cases the `argv` argument can be used to specify the arguments to be parsed.
[ "Top", "-", "level", "driver", "for", "creating", "subcommand", "-", "based", "programs", "." ]
python
train
35.962264
numenta/nupic
src/nupic/data/file_record_stream.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/file_record_stream.py#L609-L659
def _updateSequenceInfo(self, r): """Keep track of sequence and make sure time goes forward Check if the current record is the beginning of a new sequence A new sequence starts in 2 cases: 1. The sequence id changed (if there is a sequence id field) 2. The reset field is 1 (if there is a reset field) Note that if there is no sequenceId field or resetId field then the entire dataset is technically one big sequence. The function will not return True for the first record in this case. This is Ok because it is important to detect new sequences only when there are multiple sequences in the file. """ # Get current sequence id (if any) newSequence = False sequenceId = (r[self._sequenceIdIdx] if self._sequenceIdIdx is not None else None) if sequenceId != self._currSequence: # verify that the new sequence didn't show up before if sequenceId in self._sequences: raise Exception('Broken sequence: %s, record: %s' % \ (sequenceId, r)) # add the finished sequence to the set of sequence self._sequences.add(self._currSequence) self._currSequence = sequenceId # Verify that the reset is consistent (if there is one) if self._resetIdx: assert r[self._resetIdx] == 1 newSequence = True else: # Check the reset reset = False if self._resetIdx: reset = r[self._resetIdx] if reset == 1: newSequence = True # If it's still the same old sequence make sure the time flows forward if not newSequence: if self._timeStampIdx and self._currTime is not None: t = r[self._timeStampIdx] if t < self._currTime: raise Exception('No time travel. Early timestamp for record: %s' % r) if self._timeStampIdx: self._currTime = r[self._timeStampIdx]
[ "def", "_updateSequenceInfo", "(", "self", ",", "r", ")", ":", "# Get current sequence id (if any)", "newSequence", "=", "False", "sequenceId", "=", "(", "r", "[", "self", ".", "_sequenceIdIdx", "]", "if", "self", ".", "_sequenceIdIdx", "is", "not", "None", "else", "None", ")", "if", "sequenceId", "!=", "self", ".", "_currSequence", ":", "# verify that the new sequence didn't show up before", "if", "sequenceId", "in", "self", ".", "_sequences", ":", "raise", "Exception", "(", "'Broken sequence: %s, record: %s'", "%", "(", "sequenceId", ",", "r", ")", ")", "# add the finished sequence to the set of sequence", "self", ".", "_sequences", ".", "add", "(", "self", ".", "_currSequence", ")", "self", ".", "_currSequence", "=", "sequenceId", "# Verify that the reset is consistent (if there is one)", "if", "self", ".", "_resetIdx", ":", "assert", "r", "[", "self", ".", "_resetIdx", "]", "==", "1", "newSequence", "=", "True", "else", ":", "# Check the reset", "reset", "=", "False", "if", "self", ".", "_resetIdx", ":", "reset", "=", "r", "[", "self", ".", "_resetIdx", "]", "if", "reset", "==", "1", ":", "newSequence", "=", "True", "# If it's still the same old sequence make sure the time flows forward", "if", "not", "newSequence", ":", "if", "self", ".", "_timeStampIdx", "and", "self", ".", "_currTime", "is", "not", "None", ":", "t", "=", "r", "[", "self", ".", "_timeStampIdx", "]", "if", "t", "<", "self", ".", "_currTime", ":", "raise", "Exception", "(", "'No time travel. Early timestamp for record: %s'", "%", "r", ")", "if", "self", ".", "_timeStampIdx", ":", "self", ".", "_currTime", "=", "r", "[", "self", ".", "_timeStampIdx", "]" ]
Keep track of sequence and make sure time goes forward Check if the current record is the beginning of a new sequence A new sequence starts in 2 cases: 1. The sequence id changed (if there is a sequence id field) 2. The reset field is 1 (if there is a reset field) Note that if there is no sequenceId field or resetId field then the entire dataset is technically one big sequence. The function will not return True for the first record in this case. This is Ok because it is important to detect new sequences only when there are multiple sequences in the file.
[ "Keep", "track", "of", "sequence", "and", "make", "sure", "time", "goes", "forward" ]
python
valid
36.137255
UCSBarchlab/PyRTL
pyrtl/rtllib/adders.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/rtllib/adders.py#L258-L282
def fast_group_adder(wires_to_add, reducer=wallace_reducer, final_adder=kogge_stone): """ A generalization of the carry save adder, this is designed to add many numbers together in a both area and time efficient manner. Uses a tree reducer to achieve this performance :param [WireVector] wires_to_add: an array of wirevectors to add :param reducer: the tree reducer to use :param final_adder: The two value adder to use at the end :return: a wirevector with the result of the addition The length of the result is: max(len(w) for w in wires_to_add) + ceil(len(wires_to_add)) """ import math longest_wire_len = max(len(w) for w in wires_to_add) result_bitwidth = longest_wire_len + int(math.ceil(math.log(len(wires_to_add), 2))) bits = [[] for i in range(longest_wire_len)] for wire in wires_to_add: for bit_loc, bit in enumerate(wire): bits[bit_loc].append(bit) return reducer(bits, result_bitwidth, final_adder)
[ "def", "fast_group_adder", "(", "wires_to_add", ",", "reducer", "=", "wallace_reducer", ",", "final_adder", "=", "kogge_stone", ")", ":", "import", "math", "longest_wire_len", "=", "max", "(", "len", "(", "w", ")", "for", "w", "in", "wires_to_add", ")", "result_bitwidth", "=", "longest_wire_len", "+", "int", "(", "math", ".", "ceil", "(", "math", ".", "log", "(", "len", "(", "wires_to_add", ")", ",", "2", ")", ")", ")", "bits", "=", "[", "[", "]", "for", "i", "in", "range", "(", "longest_wire_len", ")", "]", "for", "wire", "in", "wires_to_add", ":", "for", "bit_loc", ",", "bit", "in", "enumerate", "(", "wire", ")", ":", "bits", "[", "bit_loc", "]", ".", "append", "(", "bit", ")", "return", "reducer", "(", "bits", ",", "result_bitwidth", ",", "final_adder", ")" ]
A generalization of the carry save adder, this is designed to add many numbers together in a both area and time efficient manner. Uses a tree reducer to achieve this performance :param [WireVector] wires_to_add: an array of wirevectors to add :param reducer: the tree reducer to use :param final_adder: The two value adder to use at the end :return: a wirevector with the result of the addition The length of the result is: max(len(w) for w in wires_to_add) + ceil(len(wires_to_add))
[ "A", "generalization", "of", "the", "carry", "save", "adder", "this", "is", "designed", "to", "add", "many", "numbers", "together", "in", "a", "both", "area", "and", "time", "efficient", "manner", ".", "Uses", "a", "tree", "reducer", "to", "achieve", "this", "performance" ]
python
train
39.36
DataBiosphere/toil
src/toil/lib/ec2.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/ec2.py#L240-L255
def create_ondemand_instances(ec2, image_id, spec, num_instances=1): """ Requests the RunInstances EC2 API call but accounts for the race between recently created instance profiles, IAM roles and an instance creation that refers to them. :rtype: list[Instance] """ instance_type = spec['instance_type'] log.info('Creating %s instance(s) ... ', instance_type) for attempt in retry_ec2(retry_for=a_long_time, retry_while=inconsistencies_detected): with attempt: return ec2.run_instances(image_id, min_count=num_instances, max_count=num_instances, **spec).instances
[ "def", "create_ondemand_instances", "(", "ec2", ",", "image_id", ",", "spec", ",", "num_instances", "=", "1", ")", ":", "instance_type", "=", "spec", "[", "'instance_type'", "]", "log", ".", "info", "(", "'Creating %s instance(s) ... '", ",", "instance_type", ")", "for", "attempt", "in", "retry_ec2", "(", "retry_for", "=", "a_long_time", ",", "retry_while", "=", "inconsistencies_detected", ")", ":", "with", "attempt", ":", "return", "ec2", ".", "run_instances", "(", "image_id", ",", "min_count", "=", "num_instances", ",", "max_count", "=", "num_instances", ",", "*", "*", "spec", ")", ".", "instances" ]
Requests the RunInstances EC2 API call but accounts for the race between recently created instance profiles, IAM roles and an instance creation that refers to them. :rtype: list[Instance]
[ "Requests", "the", "RunInstances", "EC2", "API", "call", "but", "accounts", "for", "the", "race", "between", "recently", "created", "instance", "profiles", "IAM", "roles", "and", "an", "instance", "creation", "that", "refers", "to", "them", "." ]
python
train
46.25
Netflix-Skunkworks/historical
historical/vpc/collector.py
https://github.com/Netflix-Skunkworks/historical/blob/c3ebaa8388a3fe67e23a6c9c6b04c3e618497c4a/historical/vpc/collector.py#L44-L75
def describe_vpc(record): """Attempts to describe vpc ids.""" account_id = record['account'] vpc_name = cloudwatch.filter_request_parameters('vpcName', record) vpc_id = cloudwatch.filter_request_parameters('vpcId', record) try: if vpc_id and vpc_name: # pylint: disable=R1705 return describe_vpcs( account_number=account_id, assume_role=HISTORICAL_ROLE, region=CURRENT_REGION, Filters=[ { 'Name': 'vpc-id', 'Values': [vpc_id] } ] ) elif vpc_id: return describe_vpcs( account_number=account_id, assume_role=HISTORICAL_ROLE, region=CURRENT_REGION, VpcIds=[vpc_id] ) else: raise Exception('[X] Describe requires VpcId.') except ClientError as exc: if exc.response['Error']['Code'] == 'InvalidVpc.NotFound': return [] raise exc
[ "def", "describe_vpc", "(", "record", ")", ":", "account_id", "=", "record", "[", "'account'", "]", "vpc_name", "=", "cloudwatch", ".", "filter_request_parameters", "(", "'vpcName'", ",", "record", ")", "vpc_id", "=", "cloudwatch", ".", "filter_request_parameters", "(", "'vpcId'", ",", "record", ")", "try", ":", "if", "vpc_id", "and", "vpc_name", ":", "# pylint: disable=R1705", "return", "describe_vpcs", "(", "account_number", "=", "account_id", ",", "assume_role", "=", "HISTORICAL_ROLE", ",", "region", "=", "CURRENT_REGION", ",", "Filters", "=", "[", "{", "'Name'", ":", "'vpc-id'", ",", "'Values'", ":", "[", "vpc_id", "]", "}", "]", ")", "elif", "vpc_id", ":", "return", "describe_vpcs", "(", "account_number", "=", "account_id", ",", "assume_role", "=", "HISTORICAL_ROLE", ",", "region", "=", "CURRENT_REGION", ",", "VpcIds", "=", "[", "vpc_id", "]", ")", "else", ":", "raise", "Exception", "(", "'[X] Describe requires VpcId.'", ")", "except", "ClientError", "as", "exc", ":", "if", "exc", ".", "response", "[", "'Error'", "]", "[", "'Code'", "]", "==", "'InvalidVpc.NotFound'", ":", "return", "[", "]", "raise", "exc" ]
Attempts to describe vpc ids.
[ "Attempts", "to", "describe", "vpc", "ids", "." ]
python
train
33.1875
saltstack/salt
salt/states/proxy.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/proxy.py#L40-L144
def managed(name, port, services=None, user=None, password=None, bypass_domains=None, network_service='Ethernet'): ''' Manages proxy settings for this mininon name The proxy server to use port The port used by the proxy server services A list of the services that should use the given proxy settings, valid services include http, https and ftp. If no service is given all of the valid services will be used. user The username to use for the proxy server if required password The password to use for the proxy server if required bypass_domains An array of the domains that should bypass the proxy network_service The network service to apply the changes to, this only necessary on macOS ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} valid_services = ['http', 'https', 'ftp'] if services is None: services = valid_services # Darwin if __grains__['os'] in ['MacOS', 'Darwin']: ret['changes'] = {'new': []} for service in services: current_settings = __salt__['proxy.get_{0}_proxy'.format(service)]() if current_settings.get('server') == name and current_settings.get('port') == six.text_type(port): ret['comment'] += '{0} proxy settings already set.\n'.format(service) elif __salt__['proxy.set_{0}_proxy'.format(service)](name, port, user, password, network_service): ret['comment'] += '{0} proxy settings updated correctly\n'.format(service) ret['changes']['new'].append({'service': service, 'server': name, 'port': port, 'user': user}) else: ret['result'] = False ret['comment'] += 'Failed to set {0} proxy settings.\n' if bypass_domains is not None: current_domains = __salt__['proxy.get_proxy_bypass']() if len(set(current_domains).intersection(bypass_domains)) == len(bypass_domains): ret['comment'] += 'Proxy bypass domains are already set correctly.\n' elif __salt__['proxy.set_proxy_bypass'](bypass_domains, network_service): ret['comment'] += 'Proxy bypass domains updated correctly\n' ret['changes']['new'].append({'bypass_domains': list(set(bypass_domains).difference(current_domains))}) else: ret['result'] = False ret['comment'] += 'Failed to set bypass proxy domains.\n' if not ret['changes']['new']: del ret['changes']['new'] return ret # Windows - Needs its own branch as all settings need to be set at the same time if __grains__['os'] in ['Windows']: changes_needed = False current_settings = __salt__['proxy.get_proxy_win']() current_domains = __salt__['proxy.get_proxy_bypass']() if current_settings.get('enabled', False) is True: for service in services: # We need to update one of our proxy servers if service not in current_settings: changes_needed = True break if current_settings[service]['server'] != name or current_settings[service]['port'] != six.text_type(port): changes_needed = True break else: # Proxy settings aren't enabled changes_needed = True # We need to update our bypass domains if len(set(current_domains).intersection(bypass_domains)) != len(bypass_domains): changes_needed = True if changes_needed: if __salt__['proxy.set_proxy_win'](name, port, services, bypass_domains): ret['comment'] = 'Proxy settings updated correctly' else: ret['result'] = False ret['comment'] = 'Failed to set {0} proxy settings.' else: ret['comment'] = 'Proxy settings already correct.' return ret
[ "def", "managed", "(", "name", ",", "port", ",", "services", "=", "None", ",", "user", "=", "None", ",", "password", "=", "None", ",", "bypass_domains", "=", "None", ",", "network_service", "=", "'Ethernet'", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "valid_services", "=", "[", "'http'", ",", "'https'", ",", "'ftp'", "]", "if", "services", "is", "None", ":", "services", "=", "valid_services", "# Darwin", "if", "__grains__", "[", "'os'", "]", "in", "[", "'MacOS'", ",", "'Darwin'", "]", ":", "ret", "[", "'changes'", "]", "=", "{", "'new'", ":", "[", "]", "}", "for", "service", "in", "services", ":", "current_settings", "=", "__salt__", "[", "'proxy.get_{0}_proxy'", ".", "format", "(", "service", ")", "]", "(", ")", "if", "current_settings", ".", "get", "(", "'server'", ")", "==", "name", "and", "current_settings", ".", "get", "(", "'port'", ")", "==", "six", ".", "text_type", "(", "port", ")", ":", "ret", "[", "'comment'", "]", "+=", "'{0} proxy settings already set.\\n'", ".", "format", "(", "service", ")", "elif", "__salt__", "[", "'proxy.set_{0}_proxy'", ".", "format", "(", "service", ")", "]", "(", "name", ",", "port", ",", "user", ",", "password", ",", "network_service", ")", ":", "ret", "[", "'comment'", "]", "+=", "'{0} proxy settings updated correctly\\n'", ".", "format", "(", "service", ")", "ret", "[", "'changes'", "]", "[", "'new'", "]", ".", "append", "(", "{", "'service'", ":", "service", ",", "'server'", ":", "name", ",", "'port'", ":", "port", ",", "'user'", ":", "user", "}", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "+=", "'Failed to set {0} proxy settings.\\n'", "if", "bypass_domains", "is", "not", "None", ":", "current_domains", "=", "__salt__", "[", "'proxy.get_proxy_bypass'", "]", "(", ")", "if", "len", "(", "set", "(", "current_domains", ")", ".", "intersection", "(", "bypass_domains", ")", ")", "==", "len", "(", "bypass_domains", ")", ":", "ret", "[", "'comment'", "]", "+=", "'Proxy bypass domains are already set correctly.\\n'", "elif", "__salt__", "[", "'proxy.set_proxy_bypass'", "]", "(", "bypass_domains", ",", "network_service", ")", ":", "ret", "[", "'comment'", "]", "+=", "'Proxy bypass domains updated correctly\\n'", "ret", "[", "'changes'", "]", "[", "'new'", "]", ".", "append", "(", "{", "'bypass_domains'", ":", "list", "(", "set", "(", "bypass_domains", ")", ".", "difference", "(", "current_domains", ")", ")", "}", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "+=", "'Failed to set bypass proxy domains.\\n'", "if", "not", "ret", "[", "'changes'", "]", "[", "'new'", "]", ":", "del", "ret", "[", "'changes'", "]", "[", "'new'", "]", "return", "ret", "# Windows - Needs its own branch as all settings need to be set at the same time", "if", "__grains__", "[", "'os'", "]", "in", "[", "'Windows'", "]", ":", "changes_needed", "=", "False", "current_settings", "=", "__salt__", "[", "'proxy.get_proxy_win'", "]", "(", ")", "current_domains", "=", "__salt__", "[", "'proxy.get_proxy_bypass'", "]", "(", ")", "if", "current_settings", ".", "get", "(", "'enabled'", ",", "False", ")", "is", "True", ":", "for", "service", "in", "services", ":", "# We need to update one of our proxy servers", "if", "service", "not", "in", "current_settings", ":", "changes_needed", "=", "True", "break", "if", "current_settings", "[", "service", "]", "[", "'server'", "]", "!=", "name", "or", "current_settings", "[", "service", "]", "[", "'port'", "]", "!=", "six", ".", "text_type", "(", "port", ")", ":", "changes_needed", "=", "True", "break", "else", ":", "# Proxy settings aren't enabled", "changes_needed", "=", "True", "# We need to update our bypass domains", "if", "len", "(", "set", "(", "current_domains", ")", ".", "intersection", "(", "bypass_domains", ")", ")", "!=", "len", "(", "bypass_domains", ")", ":", "changes_needed", "=", "True", "if", "changes_needed", ":", "if", "__salt__", "[", "'proxy.set_proxy_win'", "]", "(", "name", ",", "port", ",", "services", ",", "bypass_domains", ")", ":", "ret", "[", "'comment'", "]", "=", "'Proxy settings updated correctly'", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to set {0} proxy settings.'", "else", ":", "ret", "[", "'comment'", "]", "=", "'Proxy settings already correct.'", "return", "ret" ]
Manages proxy settings for this mininon name The proxy server to use port The port used by the proxy server services A list of the services that should use the given proxy settings, valid services include http, https and ftp. If no service is given all of the valid services will be used. user The username to use for the proxy server if required password The password to use for the proxy server if required bypass_domains An array of the domains that should bypass the proxy network_service The network service to apply the changes to, this only necessary on macOS
[ "Manages", "proxy", "settings", "for", "this", "mininon" ]
python
train
37.933333
NASA-AMMOS/AIT-Core
ait/core/cmd.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/cmd.py#L347-L375
def validate(self, cmd, messages=None): """Returns True if the given Command is valid, False otherwise. Validation error messages are appended to an optional messages array. """ valid = True args = [ arg for arg in cmd.args if arg is not None ] if self.nargs != len(args): valid = False if messages is not None: msg = 'Expected %d arguments, but received %d.' messages.append(msg % (self.nargs, len(args))) for defn, value in zip(self.args, cmd.args): if value is None: valid = False if messages is not None: messages.append('Argument "%s" is missing.' % defn.name) elif defn.validate(value, messages) is False: valid = False if len(cmd._unrecognized) > 0: valid = False if messages is not None: for name in cmd.unrecognized: messages.append('Argument "%s" is unrecognized.' % name) return valid
[ "def", "validate", "(", "self", ",", "cmd", ",", "messages", "=", "None", ")", ":", "valid", "=", "True", "args", "=", "[", "arg", "for", "arg", "in", "cmd", ".", "args", "if", "arg", "is", "not", "None", "]", "if", "self", ".", "nargs", "!=", "len", "(", "args", ")", ":", "valid", "=", "False", "if", "messages", "is", "not", "None", ":", "msg", "=", "'Expected %d arguments, but received %d.'", "messages", ".", "append", "(", "msg", "%", "(", "self", ".", "nargs", ",", "len", "(", "args", ")", ")", ")", "for", "defn", ",", "value", "in", "zip", "(", "self", ".", "args", ",", "cmd", ".", "args", ")", ":", "if", "value", "is", "None", ":", "valid", "=", "False", "if", "messages", "is", "not", "None", ":", "messages", ".", "append", "(", "'Argument \"%s\" is missing.'", "%", "defn", ".", "name", ")", "elif", "defn", ".", "validate", "(", "value", ",", "messages", ")", "is", "False", ":", "valid", "=", "False", "if", "len", "(", "cmd", ".", "_unrecognized", ")", ">", "0", ":", "valid", "=", "False", "if", "messages", "is", "not", "None", ":", "for", "name", "in", "cmd", ".", "unrecognized", ":", "messages", ".", "append", "(", "'Argument \"%s\" is unrecognized.'", "%", "name", ")", "return", "valid" ]
Returns True if the given Command is valid, False otherwise. Validation error messages are appended to an optional messages array.
[ "Returns", "True", "if", "the", "given", "Command", "is", "valid", "False", "otherwise", ".", "Validation", "error", "messages", "are", "appended", "to", "an", "optional", "messages", "array", "." ]
python
train
36.586207
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/gridfs/grid_file.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/gridfs/grid_file.py#L289-L297
def close(self): """Flush the file and close it. A closed file cannot be written any more. Calling :meth:`close` more than once is allowed. """ if not self._closed: self.__flush() object.__setattr__(self, "_closed", True)
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "_closed", ":", "self", ".", "__flush", "(", ")", "object", ".", "__setattr__", "(", "self", ",", "\"_closed\"", ",", "True", ")" ]
Flush the file and close it. A closed file cannot be written any more. Calling :meth:`close` more than once is allowed.
[ "Flush", "the", "file", "and", "close", "it", "." ]
python
train
30.888889
VIVelev/PyDojoML
dojo/cluster/mixture/gaussian_mixture_model.py
https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/cluster/mixture/gaussian_mixture_model.py#L100-L107
def _converged(self, X): """Covergence if || likehood - last_likelihood || < tolerance""" if len(self.responsibilities) < 2: return False diff = np.linalg.norm(self.responsibilities[-1] - self.responsibilities[-2]) return diff <= self.tolerance
[ "def", "_converged", "(", "self", ",", "X", ")", ":", "if", "len", "(", "self", ".", "responsibilities", ")", "<", "2", ":", "return", "False", "diff", "=", "np", ".", "linalg", ".", "norm", "(", "self", ".", "responsibilities", "[", "-", "1", "]", "-", "self", ".", "responsibilities", "[", "-", "2", "]", ")", "return", "diff", "<=", "self", ".", "tolerance" ]
Covergence if || likehood - last_likelihood || < tolerance
[ "Covergence", "if", "||", "likehood", "-", "last_likelihood", "||", "<", "tolerance" ]
python
train
35.375
linuxsoftware/ls.joyous
ls/joyous/models/events.py
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L703-L707
def _getFromTime(self, atDate=None): """ Time that the event starts (in the local time zone). """ return getLocalTime(self.date, self.time_from, self.tz)
[ "def", "_getFromTime", "(", "self", ",", "atDate", "=", "None", ")", ":", "return", "getLocalTime", "(", "self", ".", "date", ",", "self", ".", "time_from", ",", "self", ".", "tz", ")" ]
Time that the event starts (in the local time zone).
[ "Time", "that", "the", "event", "starts", "(", "in", "the", "local", "time", "zone", ")", "." ]
python
train
36.2
manns/pyspread
pyspread/src/actions/_grid_actions.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_grid_actions.py#L723-L735
def insert_cols(self, col, no_cols=1): """Adds no_cols columns before col, appends if col > maxcols and marks grid as changed """ # Mark content as changed post_command_event(self.main_window, self.ContentChangedMsg) tab = self.grid.current_table self.code_array.insert(col, no_cols, axis=1, tab=tab)
[ "def", "insert_cols", "(", "self", ",", "col", ",", "no_cols", "=", "1", ")", ":", "# Mark content as changed", "post_command_event", "(", "self", ".", "main_window", ",", "self", ".", "ContentChangedMsg", ")", "tab", "=", "self", ".", "grid", ".", "current_table", "self", ".", "code_array", ".", "insert", "(", "col", ",", "no_cols", ",", "axis", "=", "1", ",", "tab", "=", "tab", ")" ]
Adds no_cols columns before col, appends if col > maxcols and marks grid as changed
[ "Adds", "no_cols", "columns", "before", "col", "appends", "if", "col", ">", "maxcols" ]
python
train
26.846154