repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
django-salesforce/django-salesforce
salesforce/dbapi/subselect.py
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/subselect.py#L231-L251
def find_closing_parenthesis(sql, startpos): """Find the pair of opening and closing parentheses. Starts search at the position startpos. Returns tuple of positions (opening, closing) if search succeeds, otherwise None. """ pattern = re.compile(r'[()]') level = 0 opening = [] for match in pattern.finditer(sql, startpos): par = match.group() if par == '(': if level == 0: opening = match.start() level += 1 if par == ')': assert level > 0, "missing '(' before ')'" level -= 1 if level == 0: closing = match.end() return opening, closing
[ "def", "find_closing_parenthesis", "(", "sql", ",", "startpos", ")", ":", "pattern", "=", "re", ".", "compile", "(", "r'[()]'", ")", "level", "=", "0", "opening", "=", "[", "]", "for", "match", "in", "pattern", ".", "finditer", "(", "sql", ",", "startpos", ")", ":", "par", "=", "match", ".", "group", "(", ")", "if", "par", "==", "'('", ":", "if", "level", "==", "0", ":", "opening", "=", "match", ".", "start", "(", ")", "level", "+=", "1", "if", "par", "==", "')'", ":", "assert", "level", ">", "0", ",", "\"missing '(' before ')'\"", "level", "-=", "1", "if", "level", "==", "0", ":", "closing", "=", "match", ".", "end", "(", ")", "return", "opening", ",", "closing" ]
Find the pair of opening and closing parentheses. Starts search at the position startpos. Returns tuple of positions (opening, closing) if search succeeds, otherwise None.
[ "Find", "the", "pair", "of", "opening", "and", "closing", "parentheses", "." ]
python
train
emencia/emencia-django-forum
forum/markup.py
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/markup.py#L20-L28
def clean_restructuredtext(form_instance, content): """ RST syntax validation """ if content: errors = SourceReporter(content) if errors: raise ValidationError(map(map_parsing_errors, errors)) return content
[ "def", "clean_restructuredtext", "(", "form_instance", ",", "content", ")", ":", "if", "content", ":", "errors", "=", "SourceReporter", "(", "content", ")", "if", "errors", ":", "raise", "ValidationError", "(", "map", "(", "map_parsing_errors", ",", "errors", ")", ")", "return", "content" ]
RST syntax validation
[ "RST", "syntax", "validation" ]
python
train
hydpy-dev/hydpy
hydpy/core/autodoctools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/autodoctools.py#L287-L336
def get_role(member, cython=False): """Return the reStructuredText role `func`, `class`, or `const` best describing the given member. Some examples based on the site-package |numpy|. |numpy.clip| is a function: >>> from hydpy.core.autodoctools import Substituter >>> import numpy >>> Substituter.get_role(numpy.clip) 'func' |numpy.ndarray| is a class: >>> Substituter.get_role(numpy.ndarray) 'class' |numpy.ndarray.clip| is a method, for which also the `function` role is returned: >>> Substituter.get_role(numpy.ndarray.clip) 'func' For everything else the `constant` role is returned: >>> Substituter.get_role(numpy.nan) 'const' When analysing cython extension modules, set the option `cython` flag to |True|. |Double| is correctly identified as a class: >>> from hydpy.cythons import pointerutils >>> Substituter.get_role(pointerutils.Double, cython=True) 'class' Only with the `cython` flag beeing |True|, for everything else the `function` text role is returned (doesn't make sense here, but the |numpy| module is not something defined in module |pointerutils| anyway): >>> Substituter.get_role(pointerutils.numpy, cython=True) 'func' """ if inspect.isroutine(member) or isinstance(member, numpy.ufunc): return 'func' elif inspect.isclass(member): return 'class' elif cython: return 'func' return 'const'
[ "def", "get_role", "(", "member", ",", "cython", "=", "False", ")", ":", "if", "inspect", ".", "isroutine", "(", "member", ")", "or", "isinstance", "(", "member", ",", "numpy", ".", "ufunc", ")", ":", "return", "'func'", "elif", "inspect", ".", "isclass", "(", "member", ")", ":", "return", "'class'", "elif", "cython", ":", "return", "'func'", "return", "'const'" ]
Return the reStructuredText role `func`, `class`, or `const` best describing the given member. Some examples based on the site-package |numpy|. |numpy.clip| is a function: >>> from hydpy.core.autodoctools import Substituter >>> import numpy >>> Substituter.get_role(numpy.clip) 'func' |numpy.ndarray| is a class: >>> Substituter.get_role(numpy.ndarray) 'class' |numpy.ndarray.clip| is a method, for which also the `function` role is returned: >>> Substituter.get_role(numpy.ndarray.clip) 'func' For everything else the `constant` role is returned: >>> Substituter.get_role(numpy.nan) 'const' When analysing cython extension modules, set the option `cython` flag to |True|. |Double| is correctly identified as a class: >>> from hydpy.cythons import pointerutils >>> Substituter.get_role(pointerutils.Double, cython=True) 'class' Only with the `cython` flag beeing |True|, for everything else the `function` text role is returned (doesn't make sense here, but the |numpy| module is not something defined in module |pointerutils| anyway): >>> Substituter.get_role(pointerutils.numpy, cython=True) 'func'
[ "Return", "the", "reStructuredText", "role", "func", "class", "or", "const", "best", "describing", "the", "given", "member", "." ]
python
train
ConsenSys/mythril-classic
mythril/laser/smt/bitvec.py
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/laser/smt/bitvec.py#L408-L415
def URem(a: BitVec, b: BitVec) -> BitVec: """Create an unsigned remainder expression. :param a: :param b: :return: """ return _arithmetic_helper(a, b, z3.URem)
[ "def", "URem", "(", "a", ":", "BitVec", ",", "b", ":", "BitVec", ")", "->", "BitVec", ":", "return", "_arithmetic_helper", "(", "a", ",", "b", ",", "z3", ".", "URem", ")" ]
Create an unsigned remainder expression. :param a: :param b: :return:
[ "Create", "an", "unsigned", "remainder", "expression", "." ]
python
train
UCSBarchlab/PyRTL
pyrtl/corecircuits.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/corecircuits.py#L184-L193
def signed_mult(a, b): """ Return a*b where a and b are treated as signed values. """ a, b = as_wires(a), as_wires(b) final_len = len(a) + len(b) # sign extend both inputs to the final target length a, b = a.sign_extended(final_len), b.sign_extended(final_len) # the result is the multiplication of both, but truncated # TODO: this may make estimates based on the multiplication overly # pessimistic as half of the multiply result is thrown right away! return (a * b)[0:final_len]
[ "def", "signed_mult", "(", "a", ",", "b", ")", ":", "a", ",", "b", "=", "as_wires", "(", "a", ")", ",", "as_wires", "(", "b", ")", "final_len", "=", "len", "(", "a", ")", "+", "len", "(", "b", ")", "# sign extend both inputs to the final target length", "a", ",", "b", "=", "a", ".", "sign_extended", "(", "final_len", ")", ",", "b", ".", "sign_extended", "(", "final_len", ")", "# the result is the multiplication of both, but truncated", "# TODO: this may make estimates based on the multiplication overly", "# pessimistic as half of the multiply result is thrown right away!", "return", "(", "a", "*", "b", ")", "[", "0", ":", "final_len", "]" ]
Return a*b where a and b are treated as signed values.
[ "Return", "a", "*", "b", "where", "a", "and", "b", "are", "treated", "as", "signed", "values", "." ]
python
train
robmcmullen/atrcopy
atrcopy/segments.py
https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L725-L775
def get_entire_style_ranges(self, split_comments=None, **kwargs): """Find sections of the segment that have the same style value. The arguments to this function are used as a mask for the style to determine where to split the styles. Style bits that aren't included in the list will be ignored when splitting. The returned list covers the entire length of the segment. Returns a list of tuples, each tuple containing two items: a start, end tuple; and an integer with the style value. """ style_bits = self.get_style_bits(**kwargs) matches = self.get_comment_locations(**kwargs) groups = np.split(matches, np.where(np.diff(matches) != 0)[0] + 1) if split_comments is None: split_comments = [] # print groups # split into groups with the same numbers ranges = [] last_end = 0 if len(groups) == 1 and len(groups[0]) == 0: # check for degenerate case return last_style = -1 for group in groups: # each group is guaranteed to have the same style size = len(group) next_end = last_end + size style = matches[last_end] masked_style = style & style_bits # print last_end, next_end, style, masked_style, size, group if style & comment_bit_mask: if masked_style in split_comments: # print "interesting comment", last_end, next_end ranges.append(((last_end, next_end), masked_style)) else: # print "non-interesting comment", last_end, next_end if last_style == masked_style: ((prev_end, _), _) = ranges.pop() ranges.append(((prev_end, next_end), masked_style)) else: ranges.append(((last_end, next_end), masked_style)) else: if last_style == masked_style: ((prev_end, _), _) = ranges.pop() ranges.append(((prev_end, next_end), masked_style)) else: ranges.append(((last_end, next_end), masked_style)) last_style = masked_style last_end = next_end return ranges
[ "def", "get_entire_style_ranges", "(", "self", ",", "split_comments", "=", "None", ",", "*", "*", "kwargs", ")", ":", "style_bits", "=", "self", ".", "get_style_bits", "(", "*", "*", "kwargs", ")", "matches", "=", "self", ".", "get_comment_locations", "(", "*", "*", "kwargs", ")", "groups", "=", "np", ".", "split", "(", "matches", ",", "np", ".", "where", "(", "np", ".", "diff", "(", "matches", ")", "!=", "0", ")", "[", "0", "]", "+", "1", ")", "if", "split_comments", "is", "None", ":", "split_comments", "=", "[", "]", "# print groups", "# split into groups with the same numbers", "ranges", "=", "[", "]", "last_end", "=", "0", "if", "len", "(", "groups", ")", "==", "1", "and", "len", "(", "groups", "[", "0", "]", ")", "==", "0", ":", "# check for degenerate case", "return", "last_style", "=", "-", "1", "for", "group", "in", "groups", ":", "# each group is guaranteed to have the same style", "size", "=", "len", "(", "group", ")", "next_end", "=", "last_end", "+", "size", "style", "=", "matches", "[", "last_end", "]", "masked_style", "=", "style", "&", "style_bits", "# print last_end, next_end, style, masked_style, size, group", "if", "style", "&", "comment_bit_mask", ":", "if", "masked_style", "in", "split_comments", ":", "# print \"interesting comment\", last_end, next_end", "ranges", ".", "append", "(", "(", "(", "last_end", ",", "next_end", ")", ",", "masked_style", ")", ")", "else", ":", "# print \"non-interesting comment\", last_end, next_end", "if", "last_style", "==", "masked_style", ":", "(", "(", "prev_end", ",", "_", ")", ",", "_", ")", "=", "ranges", ".", "pop", "(", ")", "ranges", ".", "append", "(", "(", "(", "prev_end", ",", "next_end", ")", ",", "masked_style", ")", ")", "else", ":", "ranges", ".", "append", "(", "(", "(", "last_end", ",", "next_end", ")", ",", "masked_style", ")", ")", "else", ":", "if", "last_style", "==", "masked_style", ":", "(", "(", "prev_end", ",", "_", ")", ",", "_", ")", "=", "ranges", ".", "pop", "(", ")", "ranges", ".", "append", "(", "(", "(", "prev_end", ",", "next_end", ")", ",", "masked_style", ")", ")", "else", ":", "ranges", ".", "append", "(", "(", "(", "last_end", ",", "next_end", ")", ",", "masked_style", ")", ")", "last_style", "=", "masked_style", "last_end", "=", "next_end", "return", "ranges" ]
Find sections of the segment that have the same style value. The arguments to this function are used as a mask for the style to determine where to split the styles. Style bits that aren't included in the list will be ignored when splitting. The returned list covers the entire length of the segment. Returns a list of tuples, each tuple containing two items: a start, end tuple; and an integer with the style value.
[ "Find", "sections", "of", "the", "segment", "that", "have", "the", "same", "style", "value", "." ]
python
train
mirceaulinic/pypluribus
pyPluribus/config.py
https://github.com/mirceaulinic/pypluribus/blob/99bb9b6de40a0e465e3f0e6636b26acdeabbbd90/pyPluribus/config.py#L63-L78
def _upload_config_content(self, configuration, rollbacked=False): """Will try to upload a specific configuration on the device.""" try: for configuration_line in configuration.splitlines(): self._device.cli(configuration_line) self._config_changed = True # configuration was changed self._committed = False # and not committed yet except (pyPluribus.exceptions.CommandExecutionError, pyPluribus.exceptions.TimeoutError) as clierr: if not rollbacked: # rollack errors will just trow # to avoid loops self.discard() raise pyPluribus.exceptions.ConfigLoadError("Unable to upload config on the device: {err}.\ Configuration will be discarded.".format(err=clierr.message)) return True
[ "def", "_upload_config_content", "(", "self", ",", "configuration", ",", "rollbacked", "=", "False", ")", ":", "try", ":", "for", "configuration_line", "in", "configuration", ".", "splitlines", "(", ")", ":", "self", ".", "_device", ".", "cli", "(", "configuration_line", ")", "self", ".", "_config_changed", "=", "True", "# configuration was changed", "self", ".", "_committed", "=", "False", "# and not committed yet", "except", "(", "pyPluribus", ".", "exceptions", ".", "CommandExecutionError", ",", "pyPluribus", ".", "exceptions", ".", "TimeoutError", ")", "as", "clierr", ":", "if", "not", "rollbacked", ":", "# rollack errors will just trow", "# to avoid loops", "self", ".", "discard", "(", ")", "raise", "pyPluribus", ".", "exceptions", ".", "ConfigLoadError", "(", "\"Unable to upload config on the device: {err}.\\\n Configuration will be discarded.\"", ".", "format", "(", "err", "=", "clierr", ".", "message", ")", ")", "return", "True" ]
Will try to upload a specific configuration on the device.
[ "Will", "try", "to", "upload", "a", "specific", "configuration", "on", "the", "device", "." ]
python
train
razor-x/scipy-data_fitting
examples/example_helper.py
https://github.com/razor-x/scipy-data_fitting/blob/c756a645da8629699b3f22244bfb7d5d4d88b179/examples/example_helper.py#L10-L23
def save_example_fit(fit): """ Save fit result to a json file and a plot to an svg file. """ json_directory = os.path.join('examples', 'json') plot_directory = os.path.join('examples', 'plots') if not os.path.isdir(json_directory): os.makedirs(json_directory) if not os.path.isdir(plot_directory): os.makedirs(plot_directory) fit.to_json(os.path.join(json_directory, fit.name + '.json'), meta=fit.metadata) plot = Plot(fit) plot.save(os.path.join(plot_directory, fit.name + '.svg')) plot.close()
[ "def", "save_example_fit", "(", "fit", ")", ":", "json_directory", "=", "os", ".", "path", ".", "join", "(", "'examples'", ",", "'json'", ")", "plot_directory", "=", "os", ".", "path", ".", "join", "(", "'examples'", ",", "'plots'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "json_directory", ")", ":", "os", ".", "makedirs", "(", "json_directory", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "plot_directory", ")", ":", "os", ".", "makedirs", "(", "plot_directory", ")", "fit", ".", "to_json", "(", "os", ".", "path", ".", "join", "(", "json_directory", ",", "fit", ".", "name", "+", "'.json'", ")", ",", "meta", "=", "fit", ".", "metadata", ")", "plot", "=", "Plot", "(", "fit", ")", "plot", ".", "save", "(", "os", ".", "path", ".", "join", "(", "plot_directory", ",", "fit", ".", "name", "+", "'.svg'", ")", ")", "plot", ".", "close", "(", ")" ]
Save fit result to a json file and a plot to an svg file.
[ "Save", "fit", "result", "to", "a", "json", "file", "and", "a", "plot", "to", "an", "svg", "file", "." ]
python
train
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/__init__.py
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/__init__.py#L18-L80
def autocomplete(): """Command and option completion for the main option parser (and options) and its subcommands (and options). Enable by sourcing one of the completion shell scripts (bash or zsh). """ # Don't complete if user hasn't sourced bash_completion file. if 'PIP_AUTO_COMPLETE' not in os.environ: return cwords = os.environ['COMP_WORDS'].split()[1:] cword = int(os.environ['COMP_CWORD']) try: current = cwords[cword-1] except IndexError: current = '' load_all_commands() subcommands = [cmd for cmd, cls in command_dict.items() if not cls.hidden] options = [] # subcommand try: subcommand_name = [w for w in cwords if w in subcommands][0] except IndexError: subcommand_name = None # subcommand options if subcommand_name: # special case: 'help' subcommand has no options if subcommand_name == 'help': sys.exit(1) # special case: list locally installed dists for uninstall command if subcommand_name == 'uninstall' and not current.startswith('-'): installed = [] lc = current.lower() for dist in get_installed_distributions(local_only=True): if dist.key.startswith(lc) and dist.key not in cwords[1:]: installed.append(dist.key) # if there are no dists installed, fall back to option completion if installed: for dist in installed: print(dist) sys.exit(1) subcommand = command_dict.get(subcommand_name) options += [(opt.get_opt_string(), opt.nargs) for opt in subcommand.parser.option_list if opt.help != optparse.SUPPRESS_HELP] # filter out previously specified options from available options prev_opts = [x.split('=')[0] for x in cwords[1:cword-1]] options = [(x, v) for (x, v) in options if x not in prev_opts] # filter options by current input options = [(k, v) for k, v in options if k.startswith(current)] for option in options: opt_label = option[0] # append '=' to options which require args if option[1]: opt_label += '=' print(opt_label) else: # show options of main parser only when necessary if current.startswith('-') or current.startswith('--'): subcommands += [opt.get_opt_string() for opt in parser.option_list if opt.help != optparse.SUPPRESS_HELP] print(' '.join([x for x in subcommands if x.startswith(current)])) sys.exit(1)
[ "def", "autocomplete", "(", ")", ":", "# Don't complete if user hasn't sourced bash_completion file.", "if", "'PIP_AUTO_COMPLETE'", "not", "in", "os", ".", "environ", ":", "return", "cwords", "=", "os", ".", "environ", "[", "'COMP_WORDS'", "]", ".", "split", "(", ")", "[", "1", ":", "]", "cword", "=", "int", "(", "os", ".", "environ", "[", "'COMP_CWORD'", "]", ")", "try", ":", "current", "=", "cwords", "[", "cword", "-", "1", "]", "except", "IndexError", ":", "current", "=", "''", "load_all_commands", "(", ")", "subcommands", "=", "[", "cmd", "for", "cmd", ",", "cls", "in", "command_dict", ".", "items", "(", ")", "if", "not", "cls", ".", "hidden", "]", "options", "=", "[", "]", "# subcommand", "try", ":", "subcommand_name", "=", "[", "w", "for", "w", "in", "cwords", "if", "w", "in", "subcommands", "]", "[", "0", "]", "except", "IndexError", ":", "subcommand_name", "=", "None", "# subcommand options", "if", "subcommand_name", ":", "# special case: 'help' subcommand has no options", "if", "subcommand_name", "==", "'help'", ":", "sys", ".", "exit", "(", "1", ")", "# special case: list locally installed dists for uninstall command", "if", "subcommand_name", "==", "'uninstall'", "and", "not", "current", ".", "startswith", "(", "'-'", ")", ":", "installed", "=", "[", "]", "lc", "=", "current", ".", "lower", "(", ")", "for", "dist", "in", "get_installed_distributions", "(", "local_only", "=", "True", ")", ":", "if", "dist", ".", "key", ".", "startswith", "(", "lc", ")", "and", "dist", ".", "key", "not", "in", "cwords", "[", "1", ":", "]", ":", "installed", ".", "append", "(", "dist", ".", "key", ")", "# if there are no dists installed, fall back to option completion", "if", "installed", ":", "for", "dist", "in", "installed", ":", "print", "(", "dist", ")", "sys", ".", "exit", "(", "1", ")", "subcommand", "=", "command_dict", ".", "get", "(", "subcommand_name", ")", "options", "+=", "[", "(", "opt", ".", "get_opt_string", "(", ")", ",", "opt", ".", "nargs", ")", "for", "opt", "in", "subcommand", ".", "parser", ".", "option_list", "if", "opt", ".", "help", "!=", "optparse", ".", "SUPPRESS_HELP", "]", "# filter out previously specified options from available options", "prev_opts", "=", "[", "x", ".", "split", "(", "'='", ")", "[", "0", "]", "for", "x", "in", "cwords", "[", "1", ":", "cword", "-", "1", "]", "]", "options", "=", "[", "(", "x", ",", "v", ")", "for", "(", "x", ",", "v", ")", "in", "options", "if", "x", "not", "in", "prev_opts", "]", "# filter options by current input", "options", "=", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "options", "if", "k", ".", "startswith", "(", "current", ")", "]", "for", "option", "in", "options", ":", "opt_label", "=", "option", "[", "0", "]", "# append '=' to options which require args", "if", "option", "[", "1", "]", ":", "opt_label", "+=", "'='", "print", "(", "opt_label", ")", "else", ":", "# show options of main parser only when necessary", "if", "current", ".", "startswith", "(", "'-'", ")", "or", "current", ".", "startswith", "(", "'--'", ")", ":", "subcommands", "+=", "[", "opt", ".", "get_opt_string", "(", ")", "for", "opt", "in", "parser", ".", "option_list", "if", "opt", ".", "help", "!=", "optparse", ".", "SUPPRESS_HELP", "]", "print", "(", "' '", ".", "join", "(", "[", "x", "for", "x", "in", "subcommands", "if", "x", ".", "startswith", "(", "current", ")", "]", ")", ")", "sys", ".", "exit", "(", "1", ")" ]
Command and option completion for the main option parser (and options) and its subcommands (and options). Enable by sourcing one of the completion shell scripts (bash or zsh).
[ "Command", "and", "option", "completion", "for", "the", "main", "option", "parser", "(", "and", "options", ")", "and", "its", "subcommands", "(", "and", "options", ")", "." ]
python
train
euske/pdfminer
pdfminer/psparser.py
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/psparser.py#L191-L206
def seek(self, pos): """Seeks the parser to the given position. """ if self.debug: logging.debug('seek: %r' % pos) self.fp.seek(pos) # reset the status for nextline() self.bufpos = pos self.buf = b'' self.charpos = 0 # reset the status for nexttoken() self._parse1 = self._parse_main self._curtoken = b'' self._curtokenpos = 0 self._tokens = [] return
[ "def", "seek", "(", "self", ",", "pos", ")", ":", "if", "self", ".", "debug", ":", "logging", ".", "debug", "(", "'seek: %r'", "%", "pos", ")", "self", ".", "fp", ".", "seek", "(", "pos", ")", "# reset the status for nextline()", "self", ".", "bufpos", "=", "pos", "self", ".", "buf", "=", "b''", "self", ".", "charpos", "=", "0", "# reset the status for nexttoken()", "self", ".", "_parse1", "=", "self", ".", "_parse_main", "self", ".", "_curtoken", "=", "b''", "self", ".", "_curtokenpos", "=", "0", "self", ".", "_tokens", "=", "[", "]", "return" ]
Seeks the parser to the given position.
[ "Seeks", "the", "parser", "to", "the", "given", "position", "." ]
python
train
BlackEarth/bl
bl/string.py
https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/string.py#L68-L91
def titleify(self, lang='en', allwords=False, lastword=True): """takes a string and makes a title from it""" if lang in LOWERCASE_WORDS: lc_words = LOWERCASE_WORDS[lang] else: lc_words = [] s = str(self).strip() l = re.split(r"([_\W]+)", s) for i in range(len(l)): l[i] = l[i].lower() if ( allwords == True or i == 0 or (lastword == True and i == len(l) - 1) or l[i].lower() not in lc_words ): w = l[i] if len(w) > 1: w = w[0].upper() + w[1:] else: w = w.upper() l[i] = w s = "".join(l) return String(s)
[ "def", "titleify", "(", "self", ",", "lang", "=", "'en'", ",", "allwords", "=", "False", ",", "lastword", "=", "True", ")", ":", "if", "lang", "in", "LOWERCASE_WORDS", ":", "lc_words", "=", "LOWERCASE_WORDS", "[", "lang", "]", "else", ":", "lc_words", "=", "[", "]", "s", "=", "str", "(", "self", ")", ".", "strip", "(", ")", "l", "=", "re", ".", "split", "(", "r\"([_\\W]+)\"", ",", "s", ")", "for", "i", "in", "range", "(", "len", "(", "l", ")", ")", ":", "l", "[", "i", "]", "=", "l", "[", "i", "]", ".", "lower", "(", ")", "if", "(", "allwords", "==", "True", "or", "i", "==", "0", "or", "(", "lastword", "==", "True", "and", "i", "==", "len", "(", "l", ")", "-", "1", ")", "or", "l", "[", "i", "]", ".", "lower", "(", ")", "not", "in", "lc_words", ")", ":", "w", "=", "l", "[", "i", "]", "if", "len", "(", "w", ")", ">", "1", ":", "w", "=", "w", "[", "0", "]", ".", "upper", "(", ")", "+", "w", "[", "1", ":", "]", "else", ":", "w", "=", "w", ".", "upper", "(", ")", "l", "[", "i", "]", "=", "w", "s", "=", "\"\"", ".", "join", "(", "l", ")", "return", "String", "(", "s", ")" ]
takes a string and makes a title from it
[ "takes", "a", "string", "and", "makes", "a", "title", "from", "it" ]
python
train
aaugustin/websockets
src/websockets/server.py
https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/server.py#L219-L242
async def read_http_request(self) -> Tuple[str, Headers]: """ Read request line and headers from the HTTP request. Raise :exc:`~websockets.exceptions.InvalidMessage` if the HTTP message is malformed or isn't an HTTP/1.1 GET request. Don't attempt to read the request body because WebSocket handshake requests don't have one. If the request contains a body, it may be read from ``self.reader`` after this coroutine returns. """ try: path, headers = await read_request(self.reader) except ValueError as exc: raise InvalidMessage("Malformed HTTP message") from exc logger.debug("%s < GET %s HTTP/1.1", self.side, path) logger.debug("%s < %r", self.side, headers) self.path = path self.request_headers = headers return path, headers
[ "async", "def", "read_http_request", "(", "self", ")", "->", "Tuple", "[", "str", ",", "Headers", "]", ":", "try", ":", "path", ",", "headers", "=", "await", "read_request", "(", "self", ".", "reader", ")", "except", "ValueError", "as", "exc", ":", "raise", "InvalidMessage", "(", "\"Malformed HTTP message\"", ")", "from", "exc", "logger", ".", "debug", "(", "\"%s < GET %s HTTP/1.1\"", ",", "self", ".", "side", ",", "path", ")", "logger", ".", "debug", "(", "\"%s < %r\"", ",", "self", ".", "side", ",", "headers", ")", "self", ".", "path", "=", "path", "self", ".", "request_headers", "=", "headers", "return", "path", ",", "headers" ]
Read request line and headers from the HTTP request. Raise :exc:`~websockets.exceptions.InvalidMessage` if the HTTP message is malformed or isn't an HTTP/1.1 GET request. Don't attempt to read the request body because WebSocket handshake requests don't have one. If the request contains a body, it may be read from ``self.reader`` after this coroutine returns.
[ "Read", "request", "line", "and", "headers", "from", "the", "HTTP", "request", "." ]
python
train
yougov/vr.runners
vr/runners/image.py
https://github.com/yougov/vr.runners/blob/f43ba50a64b17ee4f07596fe225bcb38ca6652ad/vr/runners/image.py#L19-L26
def ensure_image(name, url, images_root, md5, untar_to=None): """Ensure OS image at url has been downloaded and (optionally) unpacked.""" image_dir_path = os.path.join(images_root, name) mkdir(image_dir_path) image_file_path = os.path.join(image_dir_path, os.path.basename(url)) ensure_file(url, image_file_path, md5) if untar_to: prepare_image(image_file_path, untar_to)
[ "def", "ensure_image", "(", "name", ",", "url", ",", "images_root", ",", "md5", ",", "untar_to", "=", "None", ")", ":", "image_dir_path", "=", "os", ".", "path", ".", "join", "(", "images_root", ",", "name", ")", "mkdir", "(", "image_dir_path", ")", "image_file_path", "=", "os", ".", "path", ".", "join", "(", "image_dir_path", ",", "os", ".", "path", ".", "basename", "(", "url", ")", ")", "ensure_file", "(", "url", ",", "image_file_path", ",", "md5", ")", "if", "untar_to", ":", "prepare_image", "(", "image_file_path", ",", "untar_to", ")" ]
Ensure OS image at url has been downloaded and (optionally) unpacked.
[ "Ensure", "OS", "image", "at", "url", "has", "been", "downloaded", "and", "(", "optionally", ")", "unpacked", "." ]
python
train
angr/angr
angr/state_plugins/abstract_memory.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/abstract_memory.py#L331-L380
def normalize_address(self, addr, is_write=False, convert_to_valueset=False, target_region=None, condition=None): #pylint:disable=arguments-differ """ Convert a ValueSet object into a list of addresses. :param addr: A ValueSet object (which describes an address) :param is_write: Is this address used in a write or not :param convert_to_valueset: True if you want to have a list of ValueSet instances instead of AddressWrappers, False otherwise :param target_region: Which region to normalize the address to. To leave the decision to SimuVEX, set it to None :return: A list of AddressWrapper or ValueSet objects """ targets_limit = WRITE_TARGETS_LIMIT if is_write else READ_TARGETS_LIMIT if type(addr) is not int: for constraint in self.state.solver.constraints: if getattr(addr, 'variables', set()) & constraint.variables: addr = self._apply_condition_to_symbolic_addr(addr, constraint) # Apply the condition if necessary if condition is not None: addr = self._apply_condition_to_symbolic_addr(addr, condition) if type(addr) is int: addr = self.state.solver.BVV(addr, self.state.arch.bits) addr_with_regions = self._normalize_address_type(addr) address_wrappers = [ ] for region, addr_si in addr_with_regions: concrete_addrs = addr_si.eval(targets_limit) if len(concrete_addrs) == targets_limit and HYBRID_SOLVER in self.state.options: exact = True if APPROXIMATE_FIRST not in self.state.options else None solutions = self.state.solver.eval_upto(addr, targets_limit, exact=exact) if len(solutions) < len(concrete_addrs): concrete_addrs = [addr_si.intersection(s).eval(1)[0] for s in solutions] if len(concrete_addrs) == targets_limit: self.state.history.add_event('mem', message='concretized too many targets. address = %s' % addr_si) for c in concrete_addrs: aw = self._normalize_address(region, c, target_region=target_region) address_wrappers.append(aw) if convert_to_valueset: return [ i.to_valueset(self.state) for i in address_wrappers ] else: return address_wrappers
[ "def", "normalize_address", "(", "self", ",", "addr", ",", "is_write", "=", "False", ",", "convert_to_valueset", "=", "False", ",", "target_region", "=", "None", ",", "condition", "=", "None", ")", ":", "#pylint:disable=arguments-differ", "targets_limit", "=", "WRITE_TARGETS_LIMIT", "if", "is_write", "else", "READ_TARGETS_LIMIT", "if", "type", "(", "addr", ")", "is", "not", "int", ":", "for", "constraint", "in", "self", ".", "state", ".", "solver", ".", "constraints", ":", "if", "getattr", "(", "addr", ",", "'variables'", ",", "set", "(", ")", ")", "&", "constraint", ".", "variables", ":", "addr", "=", "self", ".", "_apply_condition_to_symbolic_addr", "(", "addr", ",", "constraint", ")", "# Apply the condition if necessary", "if", "condition", "is", "not", "None", ":", "addr", "=", "self", ".", "_apply_condition_to_symbolic_addr", "(", "addr", ",", "condition", ")", "if", "type", "(", "addr", ")", "is", "int", ":", "addr", "=", "self", ".", "state", ".", "solver", ".", "BVV", "(", "addr", ",", "self", ".", "state", ".", "arch", ".", "bits", ")", "addr_with_regions", "=", "self", ".", "_normalize_address_type", "(", "addr", ")", "address_wrappers", "=", "[", "]", "for", "region", ",", "addr_si", "in", "addr_with_regions", ":", "concrete_addrs", "=", "addr_si", ".", "eval", "(", "targets_limit", ")", "if", "len", "(", "concrete_addrs", ")", "==", "targets_limit", "and", "HYBRID_SOLVER", "in", "self", ".", "state", ".", "options", ":", "exact", "=", "True", "if", "APPROXIMATE_FIRST", "not", "in", "self", ".", "state", ".", "options", "else", "None", "solutions", "=", "self", ".", "state", ".", "solver", ".", "eval_upto", "(", "addr", ",", "targets_limit", ",", "exact", "=", "exact", ")", "if", "len", "(", "solutions", ")", "<", "len", "(", "concrete_addrs", ")", ":", "concrete_addrs", "=", "[", "addr_si", ".", "intersection", "(", "s", ")", ".", "eval", "(", "1", ")", "[", "0", "]", "for", "s", "in", "solutions", "]", "if", "len", "(", "concrete_addrs", ")", "==", "targets_limit", ":", "self", ".", "state", ".", "history", ".", "add_event", "(", "'mem'", ",", "message", "=", "'concretized too many targets. address = %s'", "%", "addr_si", ")", "for", "c", "in", "concrete_addrs", ":", "aw", "=", "self", ".", "_normalize_address", "(", "region", ",", "c", ",", "target_region", "=", "target_region", ")", "address_wrappers", ".", "append", "(", "aw", ")", "if", "convert_to_valueset", ":", "return", "[", "i", ".", "to_valueset", "(", "self", ".", "state", ")", "for", "i", "in", "address_wrappers", "]", "else", ":", "return", "address_wrappers" ]
Convert a ValueSet object into a list of addresses. :param addr: A ValueSet object (which describes an address) :param is_write: Is this address used in a write or not :param convert_to_valueset: True if you want to have a list of ValueSet instances instead of AddressWrappers, False otherwise :param target_region: Which region to normalize the address to. To leave the decision to SimuVEX, set it to None :return: A list of AddressWrapper or ValueSet objects
[ "Convert", "a", "ValueSet", "object", "into", "a", "list", "of", "addresses", "." ]
python
train
calmjs/calmjs
src/calmjs/base.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/base.py#L520-L544
def find_node_modules_basedir(self): """ Find all node_modules directories configured to be accessible through this driver instance. This is typically used for adding the direct instance, and does not traverse the parent directories like what Node.js does. Returns a list of directories that contain a 'node_modules' directory. """ paths = [] # First do the working dir. local_node_path = self.join_cwd(NODE_MODULES) if isdir(local_node_path): paths.append(local_node_path) # do the NODE_PATH environment variable last, as Node.js seem to # have these resolving just before the global. if self.node_path: paths.extend(self.node_path.split(pathsep)) return paths
[ "def", "find_node_modules_basedir", "(", "self", ")", ":", "paths", "=", "[", "]", "# First do the working dir.", "local_node_path", "=", "self", ".", "join_cwd", "(", "NODE_MODULES", ")", "if", "isdir", "(", "local_node_path", ")", ":", "paths", ".", "append", "(", "local_node_path", ")", "# do the NODE_PATH environment variable last, as Node.js seem to", "# have these resolving just before the global.", "if", "self", ".", "node_path", ":", "paths", ".", "extend", "(", "self", ".", "node_path", ".", "split", "(", "pathsep", ")", ")", "return", "paths" ]
Find all node_modules directories configured to be accessible through this driver instance. This is typically used for adding the direct instance, and does not traverse the parent directories like what Node.js does. Returns a list of directories that contain a 'node_modules' directory.
[ "Find", "all", "node_modules", "directories", "configured", "to", "be", "accessible", "through", "this", "driver", "instance", "." ]
python
train
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L400-L409
def _read_index_file(self): """read the param_file, index_dir should already be set """ param_file = os.path.join(self.index_dir, self.param_file) with open(param_file) as f: for line in f.readlines(): (name, fasta_file, index_file, line_size, total_size) = line.strip().split("\t") self.size[name] = int(total_size) self.fasta_file[name] = fasta_file self.index_file[name] = index_file self.line_size[name] = int(line_size)
[ "def", "_read_index_file", "(", "self", ")", ":", "param_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "index_dir", ",", "self", ".", "param_file", ")", "with", "open", "(", "param_file", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "(", "name", ",", "fasta_file", ",", "index_file", ",", "line_size", ",", "total_size", ")", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "self", ".", "size", "[", "name", "]", "=", "int", "(", "total_size", ")", "self", ".", "fasta_file", "[", "name", "]", "=", "fasta_file", "self", ".", "index_file", "[", "name", "]", "=", "index_file", "self", ".", "line_size", "[", "name", "]", "=", "int", "(", "line_size", ")" ]
read the param_file, index_dir should already be set
[ "read", "the", "param_file", "index_dir", "should", "already", "be", "set" ]
python
train
Nekroze/partpy
partpy/sourcestring.py
https://github.com/Nekroze/partpy/blob/dbb7d2fb285464fc43d85bc31f5af46192d301f6/partpy/sourcestring.py#L50-L55
def reset_position(self): """Reset all current positions.""" self.pos = 0 self.col = 0 self.row = 1 self.eos = 0
[ "def", "reset_position", "(", "self", ")", ":", "self", ".", "pos", "=", "0", "self", ".", "col", "=", "0", "self", ".", "row", "=", "1", "self", ".", "eos", "=", "0" ]
Reset all current positions.
[ "Reset", "all", "current", "positions", "." ]
python
train
bcbio/bcbio-nextgen
scripts/utils/hydra_to_vcf.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hydra_to_vcf.py#L300-L319
def _get_vcf_breakends(hydra_file, genome_2bit, options=None): """Parse BEDPE input, yielding VCF ready breakends. """ if options is None: options = {} for features in group_hydra_breakends(hydra_parser(hydra_file, options)): if len(features) == 1 and is_deletion(features[0], options): yield build_vcf_deletion(features[0], genome_2bit) elif len(features) == 1 and is_tandem_dup(features[0], options): yield build_tandem_deletion(features[0], genome_2bit) elif len(features) == 2 and is_inversion(*features): yield build_vcf_inversion(features[0], features[1], genome_2bit) elif len(features) == 2 and is_translocation(*features): info = get_translocation_info(features[0], features[1]) for feature in features: for brend in build_vcf_parts(feature, genome_2bit, info): yield brend else: for feature in features: for brend in build_vcf_parts(feature, genome_2bit): yield brend
[ "def", "_get_vcf_breakends", "(", "hydra_file", ",", "genome_2bit", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "for", "features", "in", "group_hydra_breakends", "(", "hydra_parser", "(", "hydra_file", ",", "options", ")", ")", ":", "if", "len", "(", "features", ")", "==", "1", "and", "is_deletion", "(", "features", "[", "0", "]", ",", "options", ")", ":", "yield", "build_vcf_deletion", "(", "features", "[", "0", "]", ",", "genome_2bit", ")", "elif", "len", "(", "features", ")", "==", "1", "and", "is_tandem_dup", "(", "features", "[", "0", "]", ",", "options", ")", ":", "yield", "build_tandem_deletion", "(", "features", "[", "0", "]", ",", "genome_2bit", ")", "elif", "len", "(", "features", ")", "==", "2", "and", "is_inversion", "(", "*", "features", ")", ":", "yield", "build_vcf_inversion", "(", "features", "[", "0", "]", ",", "features", "[", "1", "]", ",", "genome_2bit", ")", "elif", "len", "(", "features", ")", "==", "2", "and", "is_translocation", "(", "*", "features", ")", ":", "info", "=", "get_translocation_info", "(", "features", "[", "0", "]", ",", "features", "[", "1", "]", ")", "for", "feature", "in", "features", ":", "for", "brend", "in", "build_vcf_parts", "(", "feature", ",", "genome_2bit", ",", "info", ")", ":", "yield", "brend", "else", ":", "for", "feature", "in", "features", ":", "for", "brend", "in", "build_vcf_parts", "(", "feature", ",", "genome_2bit", ")", ":", "yield", "brend" ]
Parse BEDPE input, yielding VCF ready breakends.
[ "Parse", "BEDPE", "input", "yielding", "VCF", "ready", "breakends", "." ]
python
train
frawau/aiolifx
aiolifx/aiolifx.py
https://github.com/frawau/aiolifx/blob/9bd8c5e6d291f4c79314989402f7e2c6476d5851/aiolifx/aiolifx.py#L390-L410
def set_label(self, value,callb=None): """Convenience method to set the label of the device This method will send a SetLabel message to the device, and request callb be executed when an ACK is received. The default callback will simply cache the value. :param value: The new label :type value: str :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :returns: None :rtype: None """ if len(value) > 32: value = value[:32] mypartial=partial(self.resp_set_label,label=value) if callb: self.req_with_ack(SetLabel, {"label": value},lambda x,y:(mypartial(y),callb(x,y)) ) else: self.req_with_ack(SetLabel, {"label": value},lambda x,y:mypartial(y) )
[ "def", "set_label", "(", "self", ",", "value", ",", "callb", "=", "None", ")", ":", "if", "len", "(", "value", ")", ">", "32", ":", "value", "=", "value", "[", ":", "32", "]", "mypartial", "=", "partial", "(", "self", ".", "resp_set_label", ",", "label", "=", "value", ")", "if", "callb", ":", "self", ".", "req_with_ack", "(", "SetLabel", ",", "{", "\"label\"", ":", "value", "}", ",", "lambda", "x", ",", "y", ":", "(", "mypartial", "(", "y", ")", ",", "callb", "(", "x", ",", "y", ")", ")", ")", "else", ":", "self", ".", "req_with_ack", "(", "SetLabel", ",", "{", "\"label\"", ":", "value", "}", ",", "lambda", "x", ",", "y", ":", "mypartial", "(", "y", ")", ")" ]
Convenience method to set the label of the device This method will send a SetLabel message to the device, and request callb be executed when an ACK is received. The default callback will simply cache the value. :param value: The new label :type value: str :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :returns: None :rtype: None
[ "Convenience", "method", "to", "set", "the", "label", "of", "the", "device" ]
python
train
neon-jungle/wagtailnews
wagtailnews/models.py
https://github.com/neon-jungle/wagtailnews/blob/4cdec7013cca276dcfc658d3c986444ba6a42a84/wagtailnews/models.py#L80-L86
def respond(self, request, view, newsitems, extra_context={}): """A helper that takes some news items and returns an HttpResponse""" context = self.get_context(request, view=view) context.update(self.paginate_newsitems(request, newsitems)) context.update(extra_context) template = self.get_template(request, view=view) return TemplateResponse(request, template, context)
[ "def", "respond", "(", "self", ",", "request", ",", "view", ",", "newsitems", ",", "extra_context", "=", "{", "}", ")", ":", "context", "=", "self", ".", "get_context", "(", "request", ",", "view", "=", "view", ")", "context", ".", "update", "(", "self", ".", "paginate_newsitems", "(", "request", ",", "newsitems", ")", ")", "context", ".", "update", "(", "extra_context", ")", "template", "=", "self", ".", "get_template", "(", "request", ",", "view", "=", "view", ")", "return", "TemplateResponse", "(", "request", ",", "template", ",", "context", ")" ]
A helper that takes some news items and returns an HttpResponse
[ "A", "helper", "that", "takes", "some", "news", "items", "and", "returns", "an", "HttpResponse" ]
python
train
unbit/sftpclone
sftpclone/sftpclone.py
https://github.com/unbit/sftpclone/blob/1cc89478e680fc4e0d12b1a15b5bafd0390d05da/sftpclone/sftpclone.py#L53-L59
def path_join(*args): """ Wrapper around `os.path.join`. Makes sure to join paths of the same type (bytes). """ args = (paramiko.py3compat.u(arg) for arg in args) return os.path.join(*args)
[ "def", "path_join", "(", "*", "args", ")", ":", "args", "=", "(", "paramiko", ".", "py3compat", ".", "u", "(", "arg", ")", "for", "arg", "in", "args", ")", "return", "os", ".", "path", ".", "join", "(", "*", "args", ")" ]
Wrapper around `os.path.join`. Makes sure to join paths of the same type (bytes).
[ "Wrapper", "around", "os", ".", "path", ".", "join", ".", "Makes", "sure", "to", "join", "paths", "of", "the", "same", "type", "(", "bytes", ")", "." ]
python
train
fgmacedo/django-export-action
export_action/introspection.py
https://github.com/fgmacedo/django-export-action/blob/215fecb9044d22e3ae19d86c3b220041a11fad07/export_action/introspection.py#L50-L63
def get_relation_fields_from_model(model_class): """ Get related fields (m2m, FK, and reverse FK) """ relation_fields = [] all_fields_names = _get_all_field_names(model_class) for field_name in all_fields_names: field, model, direct, m2m = _get_field_by_name(model_class, field_name) # get_all_field_names will return the same field # both with and without _id. Ignore the duplicate. if field_name[-3:] == '_id' and field_name[:-3] in all_fields_names: continue if m2m or not direct or _get_remote_field(field): field.field_name_override = field_name relation_fields += [field] return relation_fields
[ "def", "get_relation_fields_from_model", "(", "model_class", ")", ":", "relation_fields", "=", "[", "]", "all_fields_names", "=", "_get_all_field_names", "(", "model_class", ")", "for", "field_name", "in", "all_fields_names", ":", "field", ",", "model", ",", "direct", ",", "m2m", "=", "_get_field_by_name", "(", "model_class", ",", "field_name", ")", "# get_all_field_names will return the same field", "# both with and without _id. Ignore the duplicate.", "if", "field_name", "[", "-", "3", ":", "]", "==", "'_id'", "and", "field_name", "[", ":", "-", "3", "]", "in", "all_fields_names", ":", "continue", "if", "m2m", "or", "not", "direct", "or", "_get_remote_field", "(", "field", ")", ":", "field", ".", "field_name_override", "=", "field_name", "relation_fields", "+=", "[", "field", "]", "return", "relation_fields" ]
Get related fields (m2m, FK, and reverse FK)
[ "Get", "related", "fields", "(", "m2m", "FK", "and", "reverse", "FK", ")" ]
python
train
fermiPy/fermipy
fermipy/diffuse/diffuse_src_manager.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/diffuse_src_manager.py#L377-L434
def make_diffuse_comp_info_dict(self, diffuse_sources, components): """ Make a dictionary maping from diffuse component to information about that component Parameters ---------- diffuse_sources : dict Dictionary with diffuse source defintions components : dict Dictionary with event selection defintions, needed for selection depenedent diffuse components Returns ------- ret_dict : dict Dictionary mapping sourcekey to `model_component.ModelComponentInfo` """ ret_dict = {} for key, value in diffuse_sources.items(): if value is None: continue model_type = value.get('model_type', 'MapCubeSource') if model_type in ['galprop_rings', 'catalog']: continue selection_dependent = value.get('selection_dependent', False) moving = value.get('moving', False) versions = value.get('versions', []) for version in versions: # sourcekey = self._name_factory.sourcekey(source_name=key, # source_ver=version) comp_dict = None if selection_dependent: # For selection dependent diffuse sources we need to split # by binning component comp_dict = {} for comp in components: comp_key = comp.make_key('{ebin_name}_{evtype_name}') comp_dict[comp_key] = self.make_diffuse_comp_info( key, version, value, None, comp_key) elif moving: # For moving diffuse sources we need to split by zmax cut comp_dict = {} zmax_dict = {} for comp in components: zmax_dict[int(comp.zmax)] = True zmax_list = sorted(zmax_dict.keys()) for zmax in zmax_list: comp_key = "zmax%i" % (zmax) comp_dict[comp_key] = self.make_diffuse_comp_info( key, version, value, None, comp_key) comp_info = self.make_diffuse_comp_info( key, version, value, comp_dict) ret_dict[comp_info.sourcekey] = comp_info self._diffuse_comp_info_dict.update(ret_dict) return ret_dict
[ "def", "make_diffuse_comp_info_dict", "(", "self", ",", "diffuse_sources", ",", "components", ")", ":", "ret_dict", "=", "{", "}", "for", "key", ",", "value", "in", "diffuse_sources", ".", "items", "(", ")", ":", "if", "value", "is", "None", ":", "continue", "model_type", "=", "value", ".", "get", "(", "'model_type'", ",", "'MapCubeSource'", ")", "if", "model_type", "in", "[", "'galprop_rings'", ",", "'catalog'", "]", ":", "continue", "selection_dependent", "=", "value", ".", "get", "(", "'selection_dependent'", ",", "False", ")", "moving", "=", "value", ".", "get", "(", "'moving'", ",", "False", ")", "versions", "=", "value", ".", "get", "(", "'versions'", ",", "[", "]", ")", "for", "version", "in", "versions", ":", "# sourcekey = self._name_factory.sourcekey(source_name=key,", "# source_ver=version)", "comp_dict", "=", "None", "if", "selection_dependent", ":", "# For selection dependent diffuse sources we need to split", "# by binning component", "comp_dict", "=", "{", "}", "for", "comp", "in", "components", ":", "comp_key", "=", "comp", ".", "make_key", "(", "'{ebin_name}_{evtype_name}'", ")", "comp_dict", "[", "comp_key", "]", "=", "self", ".", "make_diffuse_comp_info", "(", "key", ",", "version", ",", "value", ",", "None", ",", "comp_key", ")", "elif", "moving", ":", "# For moving diffuse sources we need to split by zmax cut", "comp_dict", "=", "{", "}", "zmax_dict", "=", "{", "}", "for", "comp", "in", "components", ":", "zmax_dict", "[", "int", "(", "comp", ".", "zmax", ")", "]", "=", "True", "zmax_list", "=", "sorted", "(", "zmax_dict", ".", "keys", "(", ")", ")", "for", "zmax", "in", "zmax_list", ":", "comp_key", "=", "\"zmax%i\"", "%", "(", "zmax", ")", "comp_dict", "[", "comp_key", "]", "=", "self", ".", "make_diffuse_comp_info", "(", "key", ",", "version", ",", "value", ",", "None", ",", "comp_key", ")", "comp_info", "=", "self", ".", "make_diffuse_comp_info", "(", "key", ",", "version", ",", "value", ",", "comp_dict", ")", "ret_dict", "[", "comp_info", ".", "sourcekey", "]", "=", "comp_info", "self", ".", "_diffuse_comp_info_dict", ".", "update", "(", "ret_dict", ")", "return", "ret_dict" ]
Make a dictionary maping from diffuse component to information about that component Parameters ---------- diffuse_sources : dict Dictionary with diffuse source defintions components : dict Dictionary with event selection defintions, needed for selection depenedent diffuse components Returns ------- ret_dict : dict Dictionary mapping sourcekey to `model_component.ModelComponentInfo`
[ "Make", "a", "dictionary", "maping", "from", "diffuse", "component", "to", "information", "about", "that", "component" ]
python
train
Parquery/icontract
icontract/_metaclass.py
https://github.com/Parquery/icontract/blob/846e3187869a9ba790e9b893c98e5055e1cce274/icontract/_metaclass.py#L172-L268
def _decorate_namespace_property(bases: List[type], namespace: MutableMapping[str, Any], key: str) -> None: """Collect contracts for all getters/setters/deleters corresponding to ``key`` and decorate them.""" # pylint: disable=too-many-locals # pylint: disable=too-many-branches # pylint: disable=too-many-statements value = namespace[key] assert isinstance(value, property) fget = value.fget # type: Optional[Callable[..., Any]] fset = value.fset # type: Optional[Callable[..., Any]] fdel = value.fdel # type: Optional[Callable[..., Any]] for func in [value.fget, value.fset, value.fdel]: func = cast(Callable[..., Any], func) if func is None: continue # Collect the preconditions and postconditions from bases base_preconditions = [] # type: List[List[Contract]] base_snapshots = [] # type: List[Snapshot] base_postconditions = [] # type: List[Contract] bases_have_func = False for base in bases: if hasattr(base, key): base_property = getattr(base, key) assert isinstance(base_property, property), \ "Expected base {} to have {} as property, but got: {}".format(base, key, base_property) if func == value.fget: base_func = getattr(base, key).fget elif func == value.fset: base_func = getattr(base, key).fset elif func == value.fdel: base_func = getattr(base, key).fdel else: raise NotImplementedError("Unhandled case: func neither value.fget, value.fset nor value.fdel") if base_func is None: continue bases_have_func = True # Check if there is a checker function in the base class base_contract_checker = icontract._checkers.find_checker(func=base_func) # Ignore functions which don't have preconditions or postconditions if base_contract_checker is not None: base_preconditions.extend(base_contract_checker.__preconditions__) # type: ignore base_snapshots.extend(base_contract_checker.__postcondition_snapshots__) # type: ignore base_postconditions.extend(base_contract_checker.__postconditions__) # type: ignore # Add preconditions and postconditions of the function preconditions = [] # type: List[List[Contract]] snapshots = [] # type: List[Snapshot] postconditions = [] # type: List[Contract] contract_checker = icontract._checkers.find_checker(func=func) if contract_checker is not None: preconditions = contract_checker.__preconditions__ # type: ignore snapshots = contract_checker.__postcondition_snapshots__ postconditions = contract_checker.__postconditions__ # type: ignore preconditions = _collapse_preconditions( base_preconditions=base_preconditions, bases_have_func=bases_have_func, preconditions=preconditions, func=func) snapshots = _collapse_snapshots(base_snapshots=base_snapshots, snapshots=snapshots) postconditions = _collapse_postconditions( base_postconditions=base_postconditions, postconditions=postconditions) if preconditions or postconditions: if contract_checker is None: contract_checker = icontract._checkers.decorate_with_checker(func=func) # Replace the function with the function decorated with contract checks if func == value.fget: fget = contract_checker elif func == value.fset: fset = contract_checker elif func == value.fdel: fdel = contract_checker else: raise NotImplementedError("Unhandled case: func neither fget, fset nor fdel") # Override the preconditions and postconditions contract_checker.__preconditions__ = preconditions # type: ignore contract_checker.__postcondition_snapshots__ = snapshots # type: ignore contract_checker.__postconditions__ = postconditions # type: ignore if fget != value.fget or fset != value.fset or fdel != value.fdel: namespace[key] = property(fget=fget, fset=fset, fdel=fdel)
[ "def", "_decorate_namespace_property", "(", "bases", ":", "List", "[", "type", "]", ",", "namespace", ":", "MutableMapping", "[", "str", ",", "Any", "]", ",", "key", ":", "str", ")", "->", "None", ":", "# pylint: disable=too-many-locals", "# pylint: disable=too-many-branches", "# pylint: disable=too-many-statements", "value", "=", "namespace", "[", "key", "]", "assert", "isinstance", "(", "value", ",", "property", ")", "fget", "=", "value", ".", "fget", "# type: Optional[Callable[..., Any]]", "fset", "=", "value", ".", "fset", "# type: Optional[Callable[..., Any]]", "fdel", "=", "value", ".", "fdel", "# type: Optional[Callable[..., Any]]", "for", "func", "in", "[", "value", ".", "fget", ",", "value", ".", "fset", ",", "value", ".", "fdel", "]", ":", "func", "=", "cast", "(", "Callable", "[", "...", ",", "Any", "]", ",", "func", ")", "if", "func", "is", "None", ":", "continue", "# Collect the preconditions and postconditions from bases", "base_preconditions", "=", "[", "]", "# type: List[List[Contract]]", "base_snapshots", "=", "[", "]", "# type: List[Snapshot]", "base_postconditions", "=", "[", "]", "# type: List[Contract]", "bases_have_func", "=", "False", "for", "base", "in", "bases", ":", "if", "hasattr", "(", "base", ",", "key", ")", ":", "base_property", "=", "getattr", "(", "base", ",", "key", ")", "assert", "isinstance", "(", "base_property", ",", "property", ")", ",", "\"Expected base {} to have {} as property, but got: {}\"", ".", "format", "(", "base", ",", "key", ",", "base_property", ")", "if", "func", "==", "value", ".", "fget", ":", "base_func", "=", "getattr", "(", "base", ",", "key", ")", ".", "fget", "elif", "func", "==", "value", ".", "fset", ":", "base_func", "=", "getattr", "(", "base", ",", "key", ")", ".", "fset", "elif", "func", "==", "value", ".", "fdel", ":", "base_func", "=", "getattr", "(", "base", ",", "key", ")", ".", "fdel", "else", ":", "raise", "NotImplementedError", "(", "\"Unhandled case: func neither value.fget, value.fset nor value.fdel\"", ")", "if", "base_func", "is", "None", ":", "continue", "bases_have_func", "=", "True", "# Check if there is a checker function in the base class", "base_contract_checker", "=", "icontract", ".", "_checkers", ".", "find_checker", "(", "func", "=", "base_func", ")", "# Ignore functions which don't have preconditions or postconditions", "if", "base_contract_checker", "is", "not", "None", ":", "base_preconditions", ".", "extend", "(", "base_contract_checker", ".", "__preconditions__", ")", "# type: ignore", "base_snapshots", ".", "extend", "(", "base_contract_checker", ".", "__postcondition_snapshots__", ")", "# type: ignore", "base_postconditions", ".", "extend", "(", "base_contract_checker", ".", "__postconditions__", ")", "# type: ignore", "# Add preconditions and postconditions of the function", "preconditions", "=", "[", "]", "# type: List[List[Contract]]", "snapshots", "=", "[", "]", "# type: List[Snapshot]", "postconditions", "=", "[", "]", "# type: List[Contract]", "contract_checker", "=", "icontract", ".", "_checkers", ".", "find_checker", "(", "func", "=", "func", ")", "if", "contract_checker", "is", "not", "None", ":", "preconditions", "=", "contract_checker", ".", "__preconditions__", "# type: ignore", "snapshots", "=", "contract_checker", ".", "__postcondition_snapshots__", "postconditions", "=", "contract_checker", ".", "__postconditions__", "# type: ignore", "preconditions", "=", "_collapse_preconditions", "(", "base_preconditions", "=", "base_preconditions", ",", "bases_have_func", "=", "bases_have_func", ",", "preconditions", "=", "preconditions", ",", "func", "=", "func", ")", "snapshots", "=", "_collapse_snapshots", "(", "base_snapshots", "=", "base_snapshots", ",", "snapshots", "=", "snapshots", ")", "postconditions", "=", "_collapse_postconditions", "(", "base_postconditions", "=", "base_postconditions", ",", "postconditions", "=", "postconditions", ")", "if", "preconditions", "or", "postconditions", ":", "if", "contract_checker", "is", "None", ":", "contract_checker", "=", "icontract", ".", "_checkers", ".", "decorate_with_checker", "(", "func", "=", "func", ")", "# Replace the function with the function decorated with contract checks", "if", "func", "==", "value", ".", "fget", ":", "fget", "=", "contract_checker", "elif", "func", "==", "value", ".", "fset", ":", "fset", "=", "contract_checker", "elif", "func", "==", "value", ".", "fdel", ":", "fdel", "=", "contract_checker", "else", ":", "raise", "NotImplementedError", "(", "\"Unhandled case: func neither fget, fset nor fdel\"", ")", "# Override the preconditions and postconditions", "contract_checker", ".", "__preconditions__", "=", "preconditions", "# type: ignore", "contract_checker", ".", "__postcondition_snapshots__", "=", "snapshots", "# type: ignore", "contract_checker", ".", "__postconditions__", "=", "postconditions", "# type: ignore", "if", "fget", "!=", "value", ".", "fget", "or", "fset", "!=", "value", ".", "fset", "or", "fdel", "!=", "value", ".", "fdel", ":", "namespace", "[", "key", "]", "=", "property", "(", "fget", "=", "fget", ",", "fset", "=", "fset", ",", "fdel", "=", "fdel", ")" ]
Collect contracts for all getters/setters/deleters corresponding to ``key`` and decorate them.
[ "Collect", "contracts", "for", "all", "getters", "/", "setters", "/", "deleters", "corresponding", "to", "key", "and", "decorate", "them", "." ]
python
train
pyamg/pyamg
pyamg/util/utils.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L42-L89
def profile_solver(ml, accel=None, **kwargs): """Profile a particular multilevel object. Parameters ---------- ml : multilevel Fully constructed multilevel object accel : function pointer Pointer to a valid Krylov solver (e.g. gmres, cg) Returns ------- residuals : array Array of residuals for each iteration See Also -------- multilevel.psolve, multilevel.solve Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags, csr_matrix >>> from scipy.sparse.linalg import cg >>> from pyamg.classical import ruge_stuben_solver >>> from pyamg.util.utils import profile_solver >>> n=100 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = csr_matrix(spdiags(data,[-1,0,1],n,n)) >>> b = A*np.ones(A.shape[0]) >>> ml = ruge_stuben_solver(A, max_coarse=10) >>> res = profile_solver(ml,accel=cg) """ A = ml.levels[0].A b = A * sp.rand(A.shape[0], 1) residuals = [] if accel is None: ml.solve(b, residuals=residuals, **kwargs) else: def callback(x): residuals.append(norm(np.ravel(b) - np.ravel(A*x))) M = ml.aspreconditioner(cycle=kwargs.get('cycle', 'V')) accel(A, b, M=M, callback=callback, **kwargs) return np.asarray(residuals)
[ "def", "profile_solver", "(", "ml", ",", "accel", "=", "None", ",", "*", "*", "kwargs", ")", ":", "A", "=", "ml", ".", "levels", "[", "0", "]", ".", "A", "b", "=", "A", "*", "sp", ".", "rand", "(", "A", ".", "shape", "[", "0", "]", ",", "1", ")", "residuals", "=", "[", "]", "if", "accel", "is", "None", ":", "ml", ".", "solve", "(", "b", ",", "residuals", "=", "residuals", ",", "*", "*", "kwargs", ")", "else", ":", "def", "callback", "(", "x", ")", ":", "residuals", ".", "append", "(", "norm", "(", "np", ".", "ravel", "(", "b", ")", "-", "np", ".", "ravel", "(", "A", "*", "x", ")", ")", ")", "M", "=", "ml", ".", "aspreconditioner", "(", "cycle", "=", "kwargs", ".", "get", "(", "'cycle'", ",", "'V'", ")", ")", "accel", "(", "A", ",", "b", ",", "M", "=", "M", ",", "callback", "=", "callback", ",", "*", "*", "kwargs", ")", "return", "np", ".", "asarray", "(", "residuals", ")" ]
Profile a particular multilevel object. Parameters ---------- ml : multilevel Fully constructed multilevel object accel : function pointer Pointer to a valid Krylov solver (e.g. gmres, cg) Returns ------- residuals : array Array of residuals for each iteration See Also -------- multilevel.psolve, multilevel.solve Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags, csr_matrix >>> from scipy.sparse.linalg import cg >>> from pyamg.classical import ruge_stuben_solver >>> from pyamg.util.utils import profile_solver >>> n=100 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = csr_matrix(spdiags(data,[-1,0,1],n,n)) >>> b = A*np.ones(A.shape[0]) >>> ml = ruge_stuben_solver(A, max_coarse=10) >>> res = profile_solver(ml,accel=cg)
[ "Profile", "a", "particular", "multilevel", "object", "." ]
python
train
hazelcast/hazelcast-python-client
hazelcast/protocol/codec/transaction_create_codec.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/protocol/codec/transaction_create_codec.py#L20-L30
def encode_request(timeout, durability, transaction_type, thread_id): """ Encode request into client_message""" client_message = ClientMessage(payload_size=calculate_size(timeout, durability, transaction_type, thread_id)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_long(timeout) client_message.append_int(durability) client_message.append_int(transaction_type) client_message.append_long(thread_id) client_message.update_frame_length() return client_message
[ "def", "encode_request", "(", "timeout", ",", "durability", ",", "transaction_type", ",", "thread_id", ")", ":", "client_message", "=", "ClientMessage", "(", "payload_size", "=", "calculate_size", "(", "timeout", ",", "durability", ",", "transaction_type", ",", "thread_id", ")", ")", "client_message", ".", "set_message_type", "(", "REQUEST_TYPE", ")", "client_message", ".", "set_retryable", "(", "RETRYABLE", ")", "client_message", ".", "append_long", "(", "timeout", ")", "client_message", ".", "append_int", "(", "durability", ")", "client_message", ".", "append_int", "(", "transaction_type", ")", "client_message", ".", "append_long", "(", "thread_id", ")", "client_message", ".", "update_frame_length", "(", ")", "return", "client_message" ]
Encode request into client_message
[ "Encode", "request", "into", "client_message" ]
python
train
CS207-Final-Project-Group-10/cs207-FinalProject
solar_system/eight_planets.py
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/eight_planets.py#L154-L186
def make_frame(fig, ax, plot_x: np.ndarray, plot_y: np.ndarray, frame_num: int, bodies: List[str], plot_colors: Dict[str, str], markersize_tbl: Dict[str, float], fname: str): """ Make a series of frames of the planetary orbits that can be assembled into a movie. q is a Nx3B array. t indexes time points. 3B columns are (x, y, z) for the bodies in order. """ # Clear the axis ax.clear() ax.set_title(f'Inner Planetary Orbits in 2018') ax.set_xlabel('x in J2000.0 Frame; Astronomical Units (au)') ax.set_ylabel('y in J2000.0 Frame; Astronomical Units (au)') # Scale and tick size a = 2.0 da = 1.0 ticks = np.arange(-a, a+da, da) # Set limits and ticks ax.set_xlim(-a, a) ax.set_ylim(-a, a) ax.set_xticks(ticks) ax.set_yticks(ticks) # Plot the orbit of each body for k, body in enumerate(bodies[0:5]): ax.plot(plot_x[k], plot_y[k], label=body, color=plot_colors[body], linewidth=0, markersize = markersize_tbl[body], marker='o') ax.grid() # Save this frame fig.savefig(f'{fname}_{frame_num:05d}.png')
[ "def", "make_frame", "(", "fig", ",", "ax", ",", "plot_x", ":", "np", ".", "ndarray", ",", "plot_y", ":", "np", ".", "ndarray", ",", "frame_num", ":", "int", ",", "bodies", ":", "List", "[", "str", "]", ",", "plot_colors", ":", "Dict", "[", "str", ",", "str", "]", ",", "markersize_tbl", ":", "Dict", "[", "str", ",", "float", "]", ",", "fname", ":", "str", ")", ":", "# Clear the axis", "ax", ".", "clear", "(", ")", "ax", ".", "set_title", "(", "f'Inner Planetary Orbits in 2018'", ")", "ax", ".", "set_xlabel", "(", "'x in J2000.0 Frame; Astronomical Units (au)'", ")", "ax", ".", "set_ylabel", "(", "'y in J2000.0 Frame; Astronomical Units (au)'", ")", "# Scale and tick size", "a", "=", "2.0", "da", "=", "1.0", "ticks", "=", "np", ".", "arange", "(", "-", "a", ",", "a", "+", "da", ",", "da", ")", "# Set limits and ticks", "ax", ".", "set_xlim", "(", "-", "a", ",", "a", ")", "ax", ".", "set_ylim", "(", "-", "a", ",", "a", ")", "ax", ".", "set_xticks", "(", "ticks", ")", "ax", ".", "set_yticks", "(", "ticks", ")", "# Plot the orbit of each body", "for", "k", ",", "body", "in", "enumerate", "(", "bodies", "[", "0", ":", "5", "]", ")", ":", "ax", ".", "plot", "(", "plot_x", "[", "k", "]", ",", "plot_y", "[", "k", "]", ",", "label", "=", "body", ",", "color", "=", "plot_colors", "[", "body", "]", ",", "linewidth", "=", "0", ",", "markersize", "=", "markersize_tbl", "[", "body", "]", ",", "marker", "=", "'o'", ")", "ax", ".", "grid", "(", ")", "# Save this frame", "fig", ".", "savefig", "(", "f'{fname}_{frame_num:05d}.png'", ")" ]
Make a series of frames of the planetary orbits that can be assembled into a movie. q is a Nx3B array. t indexes time points. 3B columns are (x, y, z) for the bodies in order.
[ "Make", "a", "series", "of", "frames", "of", "the", "planetary", "orbits", "that", "can", "be", "assembled", "into", "a", "movie", ".", "q", "is", "a", "Nx3B", "array", ".", "t", "indexes", "time", "points", ".", "3B", "columns", "are", "(", "x", "y", "z", ")", "for", "the", "bodies", "in", "order", "." ]
python
train
libnano/primer3-py
primer3/wrappers.py
https://github.com/libnano/primer3-py/blob/0901c0ef3ac17afd69329d23db71136c00bcb635/primer3/wrappers.py#L70-L94
def calcTm(seq, mv_conc=50, dv_conc=0, dntp_conc=0.8, dna_conc=50, max_nn_length=60, tm_method='santalucia', salt_corrections_method='santalucia'): ''' Return the tm of `seq` as a float. ''' tm_meth = _tm_methods.get(tm_method) if tm_meth is None: raise ValueError('{} is not a valid tm calculation method'.format( tm_method)) salt_meth = _salt_corrections_methods.get(salt_corrections_method) if salt_meth is None: raise ValueError('{} is not a valid salt correction method'.format( salt_corrections_method)) # For whatever reason mv_conc and dna_conc have to be ints args = [pjoin(PRIMER3_HOME, 'oligotm'), '-mv', str(mv_conc), '-dv', str(dv_conc), '-n', str(dntp_conc), '-d', str(dna_conc), '-tp', str(tm_meth), '-sc', str(salt_meth), seq] tm = subprocess.check_output(args, stderr=DEV_NULL, env=os.environ) return float(tm)
[ "def", "calcTm", "(", "seq", ",", "mv_conc", "=", "50", ",", "dv_conc", "=", "0", ",", "dntp_conc", "=", "0.8", ",", "dna_conc", "=", "50", ",", "max_nn_length", "=", "60", ",", "tm_method", "=", "'santalucia'", ",", "salt_corrections_method", "=", "'santalucia'", ")", ":", "tm_meth", "=", "_tm_methods", ".", "get", "(", "tm_method", ")", "if", "tm_meth", "is", "None", ":", "raise", "ValueError", "(", "'{} is not a valid tm calculation method'", ".", "format", "(", "tm_method", ")", ")", "salt_meth", "=", "_salt_corrections_methods", ".", "get", "(", "salt_corrections_method", ")", "if", "salt_meth", "is", "None", ":", "raise", "ValueError", "(", "'{} is not a valid salt correction method'", ".", "format", "(", "salt_corrections_method", ")", ")", "# For whatever reason mv_conc and dna_conc have to be ints", "args", "=", "[", "pjoin", "(", "PRIMER3_HOME", ",", "'oligotm'", ")", ",", "'-mv'", ",", "str", "(", "mv_conc", ")", ",", "'-dv'", ",", "str", "(", "dv_conc", ")", ",", "'-n'", ",", "str", "(", "dntp_conc", ")", ",", "'-d'", ",", "str", "(", "dna_conc", ")", ",", "'-tp'", ",", "str", "(", "tm_meth", ")", ",", "'-sc'", ",", "str", "(", "salt_meth", ")", ",", "seq", "]", "tm", "=", "subprocess", ".", "check_output", "(", "args", ",", "stderr", "=", "DEV_NULL", ",", "env", "=", "os", ".", "environ", ")", "return", "float", "(", "tm", ")" ]
Return the tm of `seq` as a float.
[ "Return", "the", "tm", "of", "seq", "as", "a", "float", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/tfvc/tfvc_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/tfvc/tfvc_client.py#L610-L645
def get_labels(self, request_data, project=None, top=None, skip=None): """GetLabels. Get a collection of shallow label references. :param :class:`<TfvcLabelRequestData> <azure.devops.v5_0.tfvc.models.TfvcLabelRequestData>` request_data: labelScope, name, owner, and itemLabelFilter :param str project: Project ID or project name :param int top: Max number of labels to return :param int skip: Number of labels to skip :rtype: [TfvcLabelRef] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if request_data is not None: if request_data.label_scope is not None: query_parameters['requestData.labelScope'] = request_data.label_scope if request_data.name is not None: query_parameters['requestData.name'] = request_data.name if request_data.owner is not None: query_parameters['requestData.owner'] = request_data.owner if request_data.item_label_filter is not None: query_parameters['requestData.itemLabelFilter'] = request_data.item_label_filter if request_data.max_item_count is not None: query_parameters['requestData.maxItemCount'] = request_data.max_item_count if request_data.include_links is not None: query_parameters['requestData.includeLinks'] = request_data.include_links if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') response = self._send(http_method='GET', location_id='a5d9bd7f-b661-4d0e-b9be-d9c16affae54', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TfvcLabelRef]', self._unwrap_collection(response))
[ "def", "get_labels", "(", "self", ",", "request_data", ",", "project", "=", "None", ",", "top", "=", "None", ",", "skip", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "request_data", "is", "not", "None", ":", "if", "request_data", ".", "label_scope", "is", "not", "None", ":", "query_parameters", "[", "'requestData.labelScope'", "]", "=", "request_data", ".", "label_scope", "if", "request_data", ".", "name", "is", "not", "None", ":", "query_parameters", "[", "'requestData.name'", "]", "=", "request_data", ".", "name", "if", "request_data", ".", "owner", "is", "not", "None", ":", "query_parameters", "[", "'requestData.owner'", "]", "=", "request_data", ".", "owner", "if", "request_data", ".", "item_label_filter", "is", "not", "None", ":", "query_parameters", "[", "'requestData.itemLabelFilter'", "]", "=", "request_data", ".", "item_label_filter", "if", "request_data", ".", "max_item_count", "is", "not", "None", ":", "query_parameters", "[", "'requestData.maxItemCount'", "]", "=", "request_data", ".", "max_item_count", "if", "request_data", ".", "include_links", "is", "not", "None", ":", "query_parameters", "[", "'requestData.includeLinks'", "]", "=", "request_data", ".", "include_links", "if", "top", "is", "not", "None", ":", "query_parameters", "[", "'$top'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'top'", ",", "top", ",", "'int'", ")", "if", "skip", "is", "not", "None", ":", "query_parameters", "[", "'$skip'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'skip'", ",", "skip", ",", "'int'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'a5d9bd7f-b661-4d0e-b9be-d9c16affae54'", ",", "version", "=", "'5.0'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[TfvcLabelRef]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
GetLabels. Get a collection of shallow label references. :param :class:`<TfvcLabelRequestData> <azure.devops.v5_0.tfvc.models.TfvcLabelRequestData>` request_data: labelScope, name, owner, and itemLabelFilter :param str project: Project ID or project name :param int top: Max number of labels to return :param int skip: Number of labels to skip :rtype: [TfvcLabelRef]
[ "GetLabels", ".", "Get", "a", "collection", "of", "shallow", "label", "references", ".", ":", "param", ":", "class", ":", "<TfvcLabelRequestData", ">", "<azure", ".", "devops", ".", "v5_0", ".", "tfvc", ".", "models", ".", "TfvcLabelRequestData", ">", "request_data", ":", "labelScope", "name", "owner", "and", "itemLabelFilter", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "int", "top", ":", "Max", "number", "of", "labels", "to", "return", ":", "param", "int", "skip", ":", "Number", "of", "labels", "to", "skip", ":", "rtype", ":", "[", "TfvcLabelRef", "]" ]
python
train
MrYsLab/PyMata
PyMata/pymata.py
https://github.com/MrYsLab/PyMata/blob/7e0ec34670b5a0d3d6b74bcbe4f3808c845cc429/PyMata/pymata.py#L644-L654
def i2c_get_read_data(self, address): """ This method retrieves the i2c read data as the result of an i2c_read() command. :param address: i2c device address :return: raw data read from device """ if address in self._command_handler.i2c_map: map_entry = self._command_handler.i2c_map[address] return map_entry[1]
[ "def", "i2c_get_read_data", "(", "self", ",", "address", ")", ":", "if", "address", "in", "self", ".", "_command_handler", ".", "i2c_map", ":", "map_entry", "=", "self", ".", "_command_handler", ".", "i2c_map", "[", "address", "]", "return", "map_entry", "[", "1", "]" ]
This method retrieves the i2c read data as the result of an i2c_read() command. :param address: i2c device address :return: raw data read from device
[ "This", "method", "retrieves", "the", "i2c", "read", "data", "as", "the", "result", "of", "an", "i2c_read", "()", "command", "." ]
python
valid
moderngl/moderngl
moderngl/buffer.py
https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/buffer.py#L171-L183
def bind_to_uniform_block(self, binding=0, *, offset=0, size=-1) -> None: ''' Bind the buffer to a uniform block. Args: binding (int): The uniform block binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all. ''' self.mglo.bind_to_uniform_block(binding, offset, size)
[ "def", "bind_to_uniform_block", "(", "self", ",", "binding", "=", "0", ",", "*", ",", "offset", "=", "0", ",", "size", "=", "-", "1", ")", "->", "None", ":", "self", ".", "mglo", ".", "bind_to_uniform_block", "(", "binding", ",", "offset", ",", "size", ")" ]
Bind the buffer to a uniform block. Args: binding (int): The uniform block binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all.
[ "Bind", "the", "buffer", "to", "a", "uniform", "block", "." ]
python
train
apache/incubator-mxnet
python/mxnet/ndarray/ndarray.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L3584-L3641
def logical_or(lhs, rhs): """Returns the result of element-wise **logical or** comparison operation with broadcasting. For each element in input arrays, return 1(true) if lhs elements or rhs elements are true, otherwise return 0(false). Equivalent to ``lhs or rhs`` and ``mx.nd.broadcast_logical_or(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.array First input of the function. rhs : scalar or mxnet.ndarray.array Second input of the function. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray Output array of boolean values. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(2).reshape((1,2)) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([[ 0., 1.]], dtype=float32) >>> mx.nd.logical_or(x, 1).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.logical_or(x, y).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.logical_or(z, y).asnumpy() array([[ 0., 1.], [ 1., 1.]], dtype=float32) """ # pylint: disable= no-member, protected-access return _ufunc_helper( lhs, rhs, op.broadcast_logical_or, lambda x, y: 1 if x or y else 0, _internal._logical_or_scalar, None)
[ "def", "logical_or", "(", "lhs", ",", "rhs", ")", ":", "# pylint: disable= no-member, protected-access", "return", "_ufunc_helper", "(", "lhs", ",", "rhs", ",", "op", ".", "broadcast_logical_or", ",", "lambda", "x", ",", "y", ":", "1", "if", "x", "or", "y", "else", "0", ",", "_internal", ".", "_logical_or_scalar", ",", "None", ")" ]
Returns the result of element-wise **logical or** comparison operation with broadcasting. For each element in input arrays, return 1(true) if lhs elements or rhs elements are true, otherwise return 0(false). Equivalent to ``lhs or rhs`` and ``mx.nd.broadcast_logical_or(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.array First input of the function. rhs : scalar or mxnet.ndarray.array Second input of the function. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray Output array of boolean values. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(2).reshape((1,2)) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([[ 0., 1.]], dtype=float32) >>> mx.nd.logical_or(x, 1).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.logical_or(x, y).asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.logical_or(z, y).asnumpy() array([[ 0., 1.], [ 1., 1.]], dtype=float32)
[ "Returns", "the", "result", "of", "element", "-", "wise", "**", "logical", "or", "**", "comparison", "operation", "with", "broadcasting", "." ]
python
train
lsst-sqre/ltd-conveyor
ltdconveyor/fastly.py
https://github.com/lsst-sqre/ltd-conveyor/blob/c492937c4c1e050ccc4a0b9dcc38f9980d57e305/ltdconveyor/fastly.py#L14-L52
def purge_key(surrogate_key, service_id, api_key): """Instant purge URLs with a given surrogate key from the Fastly caches. Parameters ---------- surrogate_key : `str` Surrogate key header (``x-amz-meta-surrogate-key``) value of objects to purge from the Fastly cache. service_id : `str` Fastly service ID. api_key : `str` Fastly API key. Raises ------ FastlyError Error with the Fastly API usage. Notes ----- This function uses Fastly's ``/service/{service}/purge/{key}`` endpoint. See the `Fastly Purge documentation <http://ls.st/jxg>`_ for more information. For other Fastly APIs, consider using `fastly-py <https://github.com/fastly/fastly-py>`_. """ logger = logging.getLogger(__name__) api_root = 'https://api.fastly.com' path = '/service/{service}/purge/{surrogate_key}'.format( service=service_id, surrogate_key=surrogate_key) logger.info('Fastly purge {0}'.format(path)) r = requests.post(api_root + path, headers={'Fastly-Key': api_key, 'Accept': 'application/json'}) if r.status_code != 200: raise FastlyError(r.json)
[ "def", "purge_key", "(", "surrogate_key", ",", "service_id", ",", "api_key", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "api_root", "=", "'https://api.fastly.com'", "path", "=", "'/service/{service}/purge/{surrogate_key}'", ".", "format", "(", "service", "=", "service_id", ",", "surrogate_key", "=", "surrogate_key", ")", "logger", ".", "info", "(", "'Fastly purge {0}'", ".", "format", "(", "path", ")", ")", "r", "=", "requests", ".", "post", "(", "api_root", "+", "path", ",", "headers", "=", "{", "'Fastly-Key'", ":", "api_key", ",", "'Accept'", ":", "'application/json'", "}", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "FastlyError", "(", "r", ".", "json", ")" ]
Instant purge URLs with a given surrogate key from the Fastly caches. Parameters ---------- surrogate_key : `str` Surrogate key header (``x-amz-meta-surrogate-key``) value of objects to purge from the Fastly cache. service_id : `str` Fastly service ID. api_key : `str` Fastly API key. Raises ------ FastlyError Error with the Fastly API usage. Notes ----- This function uses Fastly's ``/service/{service}/purge/{key}`` endpoint. See the `Fastly Purge documentation <http://ls.st/jxg>`_ for more information. For other Fastly APIs, consider using `fastly-py <https://github.com/fastly/fastly-py>`_.
[ "Instant", "purge", "URLs", "with", "a", "given", "surrogate", "key", "from", "the", "Fastly", "caches", "." ]
python
test
wavefrontHQ/python-client
wavefront_api_client/api/search_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/search_api.py#L1889-L1910
def search_external_links_for_facet(self, facet, **kwargs): # noqa: E501 """Lists the values of a specific facet over the customer's external links # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_external_links_for_facet(facet, async_req=True) >>> result = thread.get() :param async_req bool :param str facet: (required) :param FacetSearchRequestContainer body: :return: ResponseContainerFacetResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_external_links_for_facet_with_http_info(facet, **kwargs) # noqa: E501 else: (data) = self.search_external_links_for_facet_with_http_info(facet, **kwargs) # noqa: E501 return data
[ "def", "search_external_links_for_facet", "(", "self", ",", "facet", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "search_external_links_for_facet_with_http_info", "(", "facet", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "search_external_links_for_facet_with_http_info", "(", "facet", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Lists the values of a specific facet over the customer's external links # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_external_links_for_facet(facet, async_req=True) >>> result = thread.get() :param async_req bool :param str facet: (required) :param FacetSearchRequestContainer body: :return: ResponseContainerFacetResponse If the method is called asynchronously, returns the request thread.
[ "Lists", "the", "values", "of", "a", "specific", "facet", "over", "the", "customer", "s", "external", "links", "#", "noqa", ":", "E501" ]
python
train
materialsproject/pymatgen
pymatgen/core/structure.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/structure.py#L1778-L1830
def from_str(cls, input_string, fmt, primitive=False, sort=False, merge_tol=0.0): """ Reads a structure from a string. Args: input_string (str): String to parse. fmt (str): A format specification. primitive (bool): Whether to find a primitive cell. Defaults to False. sort (bool): Whether to sort the sites in accordance to the default ordering criteria, i.e., electronegativity. merge_tol (float): If this is some positive number, sites that are within merge_tol from each other will be merged. Usually 0.01 should be enough to deal with common numerical issues. Returns: IStructure / Structure """ from pymatgen.io.cif import CifParser from pymatgen.io.vasp import Poscar from pymatgen.io.cssr import Cssr from pymatgen.io.xcrysden import XSF from pymatgen.io.atat import Mcsqs fmt = fmt.lower() if fmt == "cif": parser = CifParser.from_string(input_string) s = parser.get_structures(primitive=primitive)[0] elif fmt == "poscar": s = Poscar.from_string(input_string, False, read_velocities=False).structure elif fmt == "cssr": cssr = Cssr.from_string(input_string) s = cssr.structure elif fmt == "json": d = json.loads(input_string) s = Structure.from_dict(d) elif fmt == "yaml": import ruamel.yaml as yaml d = yaml.safe_load(input_string) s = Structure.from_dict(d) elif fmt == "xsf": s = XSF.from_string(input_string).structure elif fmt == "mcsqs": s = Mcsqs.structure_from_string(input_string) else: raise ValueError("Unrecognized format `%s`!" % fmt) if sort: s = s.get_sorted_structure() if merge_tol: s.merge_sites(merge_tol) return cls.from_sites(s)
[ "def", "from_str", "(", "cls", ",", "input_string", ",", "fmt", ",", "primitive", "=", "False", ",", "sort", "=", "False", ",", "merge_tol", "=", "0.0", ")", ":", "from", "pymatgen", ".", "io", ".", "cif", "import", "CifParser", "from", "pymatgen", ".", "io", ".", "vasp", "import", "Poscar", "from", "pymatgen", ".", "io", ".", "cssr", "import", "Cssr", "from", "pymatgen", ".", "io", ".", "xcrysden", "import", "XSF", "from", "pymatgen", ".", "io", ".", "atat", "import", "Mcsqs", "fmt", "=", "fmt", ".", "lower", "(", ")", "if", "fmt", "==", "\"cif\"", ":", "parser", "=", "CifParser", ".", "from_string", "(", "input_string", ")", "s", "=", "parser", ".", "get_structures", "(", "primitive", "=", "primitive", ")", "[", "0", "]", "elif", "fmt", "==", "\"poscar\"", ":", "s", "=", "Poscar", ".", "from_string", "(", "input_string", ",", "False", ",", "read_velocities", "=", "False", ")", ".", "structure", "elif", "fmt", "==", "\"cssr\"", ":", "cssr", "=", "Cssr", ".", "from_string", "(", "input_string", ")", "s", "=", "cssr", ".", "structure", "elif", "fmt", "==", "\"json\"", ":", "d", "=", "json", ".", "loads", "(", "input_string", ")", "s", "=", "Structure", ".", "from_dict", "(", "d", ")", "elif", "fmt", "==", "\"yaml\"", ":", "import", "ruamel", ".", "yaml", "as", "yaml", "d", "=", "yaml", ".", "safe_load", "(", "input_string", ")", "s", "=", "Structure", ".", "from_dict", "(", "d", ")", "elif", "fmt", "==", "\"xsf\"", ":", "s", "=", "XSF", ".", "from_string", "(", "input_string", ")", ".", "structure", "elif", "fmt", "==", "\"mcsqs\"", ":", "s", "=", "Mcsqs", ".", "structure_from_string", "(", "input_string", ")", "else", ":", "raise", "ValueError", "(", "\"Unrecognized format `%s`!\"", "%", "fmt", ")", "if", "sort", ":", "s", "=", "s", ".", "get_sorted_structure", "(", ")", "if", "merge_tol", ":", "s", ".", "merge_sites", "(", "merge_tol", ")", "return", "cls", ".", "from_sites", "(", "s", ")" ]
Reads a structure from a string. Args: input_string (str): String to parse. fmt (str): A format specification. primitive (bool): Whether to find a primitive cell. Defaults to False. sort (bool): Whether to sort the sites in accordance to the default ordering criteria, i.e., electronegativity. merge_tol (float): If this is some positive number, sites that are within merge_tol from each other will be merged. Usually 0.01 should be enough to deal with common numerical issues. Returns: IStructure / Structure
[ "Reads", "a", "structure", "from", "a", "string", "." ]
python
train
timothydmorton/VESPA
vespa/stars/populations.py
https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/stars/populations.py#L685-L693
def hidden_constraints(self): """ Constraints applied to the population, but temporarily removed. """ try: return self._hidden_constraints except AttributeError: self._hidden_constraints = ConstraintDict() return self._hidden_constraints
[ "def", "hidden_constraints", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_hidden_constraints", "except", "AttributeError", ":", "self", ".", "_hidden_constraints", "=", "ConstraintDict", "(", ")", "return", "self", ".", "_hidden_constraints" ]
Constraints applied to the population, but temporarily removed.
[ "Constraints", "applied", "to", "the", "population", "but", "temporarily", "removed", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py#L884-L893
def root_sa_root_enable(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") root_sa = ET.SubElement(config, "root-sa", xmlns="urn:brocade.com:mgmt:brocade-aaa") root = ET.SubElement(root_sa, "root") enable = ET.SubElement(root, "enable") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "root_sa_root_enable", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "root_sa", "=", "ET", ".", "SubElement", "(", "config", ",", "\"root-sa\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-aaa\"", ")", "root", "=", "ET", ".", "SubElement", "(", "root_sa", ",", "\"root\"", ")", "enable", "=", "ET", ".", "SubElement", "(", "root", ",", "\"enable\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
azraq27/neural
neural/decon.py
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/decon.py#L459-L530
def smooth_decon_to_fwhm(decon,fwhm,cache=True): '''takes an input :class:`Decon` object and uses ``3dBlurToFWHM`` to make the output as close as possible to ``fwhm`` returns the final measured fwhm. If ``cache`` is ``True``, will save the blurred input file (and use it again in the future)''' if os.path.exists(decon.prefix): return blur_dset = lambda dset: nl.suffix(dset,'_smooth_to_%.2f' % fwhm) with nl.notify('Running smooth_decon_to_fwhm analysis (with %.2fmm blur)' % fwhm): tmpdir = tempfile.mkdtemp() try: cwd = os.getcwd() random_files = [re.sub(r'\[\d+\]$','',str(x)) for x in nl.flatten([x for x in decon.__dict__.values() if isinstance(x,basestring) or isinstance(x,list)]+[x.values() for x in decon.__dict__.values() if isinstance(x,dict)])] files_to_copy = [x for x in random_files if os.path.exists(x) and x[0]!='/'] files_to_copy += [blur_dset(dset) for dset in decon.input_dsets if os.path.exists(blur_dset(dset))] # copy crap for file in files_to_copy: try: shutil.copytree(file,tmpdir) except OSError as e: shutil.copy(file,tmpdir) shutil.copy(file,tmpdir) copyback_files = [decon.prefix,decon.errts] with nl.run_in(tmpdir): if os.path.exists(decon.prefix): os.remove(decon.prefix) # Create the blurred inputs (or load from cache) if cache and all([os.path.exists(os.path.join(cwd,blur_dset(dset))) for dset in decon.input_dsets]): # Everything is already cached... nl.notify('Using cache\'d blurred datasets') else: # Need to make them from scratch with nl.notify('Creating blurred datasets'): old_errts = decon.errts decon.errts = 'residual.nii.gz' decon.prefix = os.path.basename(decon.prefix) # Run once in place to get the residual dataset decon.run() running_reps = 0 for dset in decon.input_dsets: info = nl.dset_info(dset) residual_dset = nl.suffix(dset,'_residual') nl.run(['3dbucket','-prefix',residual_dset,'%s[%d..%d]'%(decon.errts,running_reps,running_reps+info.reps-1)],products=residual_dset) cmd = ['3dBlurToFWHM','-quiet','-input',dset,'-blurmaster',residual_dset,'-prefix',blur_dset(dset),'-FWHM',fwhm] if decon.mask: if decon.mask=='auto': cmd += ['-automask'] else: cmd += ['-mask',decon.mask] nl.run(cmd,products=blur_dset(dset)) running_reps += info.reps if cache: copyback_files.append(blur_dset(dset)) decon.errts = old_errts decon.input_dsets = [blur_dset(dset) for dset in decon.input_dsets] for d in [decon.prefix,decon.errts]: if os.path.exists(d): try: os.remove(d) except: pass decon.run() for copyfile in copyback_files: if os.path.exists(copyfile): shutil.copy(copyfile,cwd) else: nl.notify('Warning: deconvolve did not produce expected file %s' % decon.prefix,level=nl.level.warning) except: raise finally: shutil.rmtree(tmpdir,True)
[ "def", "smooth_decon_to_fwhm", "(", "decon", ",", "fwhm", ",", "cache", "=", "True", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "decon", ".", "prefix", ")", ":", "return", "blur_dset", "=", "lambda", "dset", ":", "nl", ".", "suffix", "(", "dset", ",", "'_smooth_to_%.2f'", "%", "fwhm", ")", "with", "nl", ".", "notify", "(", "'Running smooth_decon_to_fwhm analysis (with %.2fmm blur)'", "%", "fwhm", ")", ":", "tmpdir", "=", "tempfile", ".", "mkdtemp", "(", ")", "try", ":", "cwd", "=", "os", ".", "getcwd", "(", ")", "random_files", "=", "[", "re", ".", "sub", "(", "r'\\[\\d+\\]$'", ",", "''", ",", "str", "(", "x", ")", ")", "for", "x", "in", "nl", ".", "flatten", "(", "[", "x", "for", "x", "in", "decon", ".", "__dict__", ".", "values", "(", ")", "if", "isinstance", "(", "x", ",", "basestring", ")", "or", "isinstance", "(", "x", ",", "list", ")", "]", "+", "[", "x", ".", "values", "(", ")", "for", "x", "in", "decon", ".", "__dict__", ".", "values", "(", ")", "if", "isinstance", "(", "x", ",", "dict", ")", "]", ")", "]", "files_to_copy", "=", "[", "x", "for", "x", "in", "random_files", "if", "os", ".", "path", ".", "exists", "(", "x", ")", "and", "x", "[", "0", "]", "!=", "'/'", "]", "files_to_copy", "+=", "[", "blur_dset", "(", "dset", ")", "for", "dset", "in", "decon", ".", "input_dsets", "if", "os", ".", "path", ".", "exists", "(", "blur_dset", "(", "dset", ")", ")", "]", "# copy crap", "for", "file", "in", "files_to_copy", ":", "try", ":", "shutil", ".", "copytree", "(", "file", ",", "tmpdir", ")", "except", "OSError", "as", "e", ":", "shutil", ".", "copy", "(", "file", ",", "tmpdir", ")", "shutil", ".", "copy", "(", "file", ",", "tmpdir", ")", "copyback_files", "=", "[", "decon", ".", "prefix", ",", "decon", ".", "errts", "]", "with", "nl", ".", "run_in", "(", "tmpdir", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "decon", ".", "prefix", ")", ":", "os", ".", "remove", "(", "decon", ".", "prefix", ")", "# Create the blurred inputs (or load from cache)", "if", "cache", "and", "all", "(", "[", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "blur_dset", "(", "dset", ")", ")", ")", "for", "dset", "in", "decon", ".", "input_dsets", "]", ")", ":", "# Everything is already cached...", "nl", ".", "notify", "(", "'Using cache\\'d blurred datasets'", ")", "else", ":", "# Need to make them from scratch", "with", "nl", ".", "notify", "(", "'Creating blurred datasets'", ")", ":", "old_errts", "=", "decon", ".", "errts", "decon", ".", "errts", "=", "'residual.nii.gz'", "decon", ".", "prefix", "=", "os", ".", "path", ".", "basename", "(", "decon", ".", "prefix", ")", "# Run once in place to get the residual dataset", "decon", ".", "run", "(", ")", "running_reps", "=", "0", "for", "dset", "in", "decon", ".", "input_dsets", ":", "info", "=", "nl", ".", "dset_info", "(", "dset", ")", "residual_dset", "=", "nl", ".", "suffix", "(", "dset", ",", "'_residual'", ")", "nl", ".", "run", "(", "[", "'3dbucket'", ",", "'-prefix'", ",", "residual_dset", ",", "'%s[%d..%d]'", "%", "(", "decon", ".", "errts", ",", "running_reps", ",", "running_reps", "+", "info", ".", "reps", "-", "1", ")", "]", ",", "products", "=", "residual_dset", ")", "cmd", "=", "[", "'3dBlurToFWHM'", ",", "'-quiet'", ",", "'-input'", ",", "dset", ",", "'-blurmaster'", ",", "residual_dset", ",", "'-prefix'", ",", "blur_dset", "(", "dset", ")", ",", "'-FWHM'", ",", "fwhm", "]", "if", "decon", ".", "mask", ":", "if", "decon", ".", "mask", "==", "'auto'", ":", "cmd", "+=", "[", "'-automask'", "]", "else", ":", "cmd", "+=", "[", "'-mask'", ",", "decon", ".", "mask", "]", "nl", ".", "run", "(", "cmd", ",", "products", "=", "blur_dset", "(", "dset", ")", ")", "running_reps", "+=", "info", ".", "reps", "if", "cache", ":", "copyback_files", ".", "append", "(", "blur_dset", "(", "dset", ")", ")", "decon", ".", "errts", "=", "old_errts", "decon", ".", "input_dsets", "=", "[", "blur_dset", "(", "dset", ")", "for", "dset", "in", "decon", ".", "input_dsets", "]", "for", "d", "in", "[", "decon", ".", "prefix", ",", "decon", ".", "errts", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "d", ")", ":", "try", ":", "os", ".", "remove", "(", "d", ")", "except", ":", "pass", "decon", ".", "run", "(", ")", "for", "copyfile", "in", "copyback_files", ":", "if", "os", ".", "path", ".", "exists", "(", "copyfile", ")", ":", "shutil", ".", "copy", "(", "copyfile", ",", "cwd", ")", "else", ":", "nl", ".", "notify", "(", "'Warning: deconvolve did not produce expected file %s'", "%", "decon", ".", "prefix", ",", "level", "=", "nl", ".", "level", ".", "warning", ")", "except", ":", "raise", "finally", ":", "shutil", ".", "rmtree", "(", "tmpdir", ",", "True", ")" ]
takes an input :class:`Decon` object and uses ``3dBlurToFWHM`` to make the output as close as possible to ``fwhm`` returns the final measured fwhm. If ``cache`` is ``True``, will save the blurred input file (and use it again in the future)
[ "takes", "an", "input", ":", "class", ":", "Decon", "object", "and", "uses", "3dBlurToFWHM", "to", "make", "the", "output", "as", "close", "as", "possible", "to", "fwhm", "returns", "the", "final", "measured", "fwhm", ".", "If", "cache", "is", "True", "will", "save", "the", "blurred", "input", "file", "(", "and", "use", "it", "again", "in", "the", "future", ")" ]
python
train
BlueBrain/hpcbench
hpcbench/benchmark/hpl.py
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/hpl.py#L263-L268
def mpirun(self): """Additional options passed as a list to the ``mpirun`` command""" cmd = self.attributes['mpirun'] if cmd and cmd[0] != 'mpirun': cmd = ['mpirun'] return [str(e) for e in cmd]
[ "def", "mpirun", "(", "self", ")", ":", "cmd", "=", "self", ".", "attributes", "[", "'mpirun'", "]", "if", "cmd", "and", "cmd", "[", "0", "]", "!=", "'mpirun'", ":", "cmd", "=", "[", "'mpirun'", "]", "return", "[", "str", "(", "e", ")", "for", "e", "in", "cmd", "]" ]
Additional options passed as a list to the ``mpirun`` command
[ "Additional", "options", "passed", "as", "a", "list", "to", "the", "mpirun", "command" ]
python
train
Yelp/kafka-utils
kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/partition_count_balancer.py#L204-L207
def rebalance_brokers(self): """Rebalance partition-count across brokers within each replication-group.""" for rg in six.itervalues(self.cluster_topology.rgs): rg.rebalance_brokers()
[ "def", "rebalance_brokers", "(", "self", ")", ":", "for", "rg", "in", "six", ".", "itervalues", "(", "self", ".", "cluster_topology", ".", "rgs", ")", ":", "rg", ".", "rebalance_brokers", "(", ")" ]
Rebalance partition-count across brokers within each replication-group.
[ "Rebalance", "partition", "-", "count", "across", "brokers", "within", "each", "replication", "-", "group", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/bijectors/affine.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/affine.py#L34-L37
def _as_tensor(x, name, dtype): """Convenience to convert to `Tensor` or leave as `None`.""" return None if x is None else tf.convert_to_tensor( value=x, name=name, dtype=dtype)
[ "def", "_as_tensor", "(", "x", ",", "name", ",", "dtype", ")", ":", "return", "None", "if", "x", "is", "None", "else", "tf", ".", "convert_to_tensor", "(", "value", "=", "x", ",", "name", "=", "name", ",", "dtype", "=", "dtype", ")" ]
Convenience to convert to `Tensor` or leave as `None`.
[ "Convenience", "to", "convert", "to", "Tensor", "or", "leave", "as", "None", "." ]
python
test
GoogleCloudPlatform/appengine-gcs-client
python/src/cloudstorage/storage_api.py
https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/storage_api.py#L536-L568
def seek(self, offset, whence=os.SEEK_SET): """Set the file's current offset. Note if the new offset is out of bound, it is adjusted to either 0 or EOF. Args: offset: seek offset as number. whence: seek mode. Supported modes are os.SEEK_SET (absolute seek), os.SEEK_CUR (seek relative to the current position), and os.SEEK_END (seek relative to the end, offset should be negative). Raises: IOError: When this buffer is closed. ValueError: When whence is invalid. """ self._check_open() self._buffer.reset() self._buffer_future = None if whence == os.SEEK_SET: self._offset = offset elif whence == os.SEEK_CUR: self._offset += offset elif whence == os.SEEK_END: self._offset = self._file_size + offset else: raise ValueError('Whence mode %s is invalid.' % str(whence)) self._offset = min(self._offset, self._file_size) self._offset = max(self._offset, 0) if self._remaining(): self._request_next_buffer()
[ "def", "seek", "(", "self", ",", "offset", ",", "whence", "=", "os", ".", "SEEK_SET", ")", ":", "self", ".", "_check_open", "(", ")", "self", ".", "_buffer", ".", "reset", "(", ")", "self", ".", "_buffer_future", "=", "None", "if", "whence", "==", "os", ".", "SEEK_SET", ":", "self", ".", "_offset", "=", "offset", "elif", "whence", "==", "os", ".", "SEEK_CUR", ":", "self", ".", "_offset", "+=", "offset", "elif", "whence", "==", "os", ".", "SEEK_END", ":", "self", ".", "_offset", "=", "self", ".", "_file_size", "+", "offset", "else", ":", "raise", "ValueError", "(", "'Whence mode %s is invalid.'", "%", "str", "(", "whence", ")", ")", "self", ".", "_offset", "=", "min", "(", "self", ".", "_offset", ",", "self", ".", "_file_size", ")", "self", ".", "_offset", "=", "max", "(", "self", ".", "_offset", ",", "0", ")", "if", "self", ".", "_remaining", "(", ")", ":", "self", ".", "_request_next_buffer", "(", ")" ]
Set the file's current offset. Note if the new offset is out of bound, it is adjusted to either 0 or EOF. Args: offset: seek offset as number. whence: seek mode. Supported modes are os.SEEK_SET (absolute seek), os.SEEK_CUR (seek relative to the current position), and os.SEEK_END (seek relative to the end, offset should be negative). Raises: IOError: When this buffer is closed. ValueError: When whence is invalid.
[ "Set", "the", "file", "s", "current", "offset", "." ]
python
train
lemieuxl/pyGenClean
pyGenClean/DupSNPs/duplicated_snps.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/DupSNPs/duplicated_snps.py#L231-L438
def createAndCleanTPED(tped, tfam, snps, prefix, chosenSNPs, completion, concordance, snpsToComplete, tfamFileName, completionT, concordanceT): """Complete a TPED for duplicated SNPs. :param tped: a representation of the ``tped`` of duplicated markers. :param tfam: a representation of the ``tfam``. :param snps: the position of duplicated markers in the ``tped``. :param prefix: the prefix of the output files. :param chosenSNPs: the markers that were chosen for completion (including problems). :param completion: the completion of each of the duplicated markers. :param concordance: the pairwise concordance of the duplicated markers. :param snpsToComplete: the markers that will be completed (excluding problems). :param tfamFileName: the name of the original ``tfam`` file. :param completionT: the completion threshold. :param concordanceT: the concordance threshold. :type tped: numpy.array :type tfam: list :type snps: dict :type prefix: str :type chosenSNPs: dict :type completion: numpy.array :type concordance: dict :type snpsToComplete: set :type tfamFileName: str :type completionT: float :type concordanceT: float :returns: a tuple containing the new ``tped`` after completion (:py:class:`numpy.array` as the first element, and the index of the markers that will need to be rid of (:py:class:`set`) as the last element. It creates three different files: * ``prefix.zeroed_out``: contains information about markers and samples where the genotyped was zeroed out. * ``prefix.not_good_enough``: contains information about markers that were not good enough to help in completing the chosen markers (because of concordance or completion). * ``prefix.removed_duplicates``: the list of markers that where used for completing the chosen one, hence they will be removed from the final data set. Cycling through every genotypes of every samples of every duplicated markers, checks if the genotypes are all the same. If the chosen one was not called, but the other ones were, then we complete the chosen one with the genotypes for the others (assuming that they are all the same). If there is a difference between the genotypes, it is zeroed out for the chosen marker. """ zeroedOutFile = None try: zeroedOutFile = open(prefix + ".zeroed_out", "w") except IOError: msg = "%(prefix).zeroed_out: can't write file" % locals() raise ProgramError(msg) print >>zeroedOutFile, "\t".join(["famID", "indID", "snpID"]) notGoodEnoughFile = None try: notGoodEnoughFile = open(prefix + ".not_good_enough", "w") except IOError: msg = "%(prefix)s.not_good_enough: can't write file" % locals() raise ProgramError(msg) print >>notGoodEnoughFile, "\t".join(["name", "reason"]) removedFile = None try: removedFile = open(prefix + ".removed_duplicates", "w") except IOError: msg = "%(prefix)s.removed_duplicates: can't write file" % locals() raise ProgramError(msg) notGoodEnoughSnps = set() # Split the tped in 'snpInfo' and 'genotypes' snpInfo = tped[:, :4] genotypes = tped[:, 4:] # The sed of index we want to get rid of at the end getRidOfIndex = set() for snpID, indexes in snps.iteritems(): if snpID not in snpsToComplete: # We don't want to complete this SNP, so we continue to next SNP continue # Getting the completion completionToRemove = set( np.where(completion[indexes] < completionT)[0] ) for k in completionToRemove: notGoodEnoughSnps.add((snpInfo[indexes][k, 1], "completion")) # Getting the concordance concordanceToRemove = set( np.where(concordance[snpID] < concordanceT)[0] ) for k in concordanceToRemove: notGoodEnoughSnps.add((snpInfo[indexes][k, 1], "concordance")) # These will be the indexes to remove indexesToRemove = set() for index in completionToRemove | concordanceToRemove: indexesToRemove.add(indexes[index]) # These are the indexes to keep indexesToKeep = [] for index in indexes: if index not in indexesToRemove: indexesToKeep.append(index) # Getting the chosen SNP chosenOne = chosenSNPs[snpID] if chosenOne not in set(indexesToKeep): # The chosen SNP is not a good SNP, so we go to next SNP logger.warning(" - {} chosen but not good enough".format( snpInfo[chosenOne, 1], )) continue # Now cycling through the genotypes nbSamples = genotypes.shape[1] for sampleIndex in xrange(nbSamples): # We need to remove the no call and keep the unique genotypes curGenotypes = genotypes[indexesToKeep, sampleIndex] cleanedCurGenotypes = curGenotypes[ np.where(curGenotypes != "0 0") ] uniqueCleanedCurGenotypes = np.unique(cleanedCurGenotypes) # Checking the number of unique genotypes toComplete = False if len(uniqueCleanedCurGenotypes) > 1: # There are more than one unique genotype (except 0 0) # len = 0 means all were 0 0 # len = 1 means they are all the same # len > 1 means discordance (might need to flip) # Just need to check the order of the alleles possibleAlleles = [ set() for k in xrange(len(uniqueCleanedCurGenotypes)) ] for k, geno in enumerate(uniqueCleanedCurGenotypes): possibleAlleles[k] |= set(geno.split(" ")) allEqual = True for k in xrange(len(possibleAlleles)): for l in xrange(k+1, len(possibleAlleles)): if possibleAlleles[k] != possibleAlleles[l]: allEqual = False if not allEqual: # The genotypes are not all equal, we set the chosen # genotype to null (0 0) tped[chosenOne, sampleIndex+4] = "0 0" print >>zeroedOutFile, "\t".join([tfam[sampleIndex, 0], tfam[sampleIndex, 1], snpInfo[chosenOne, 1]]) elif genotypes[chosenOne, sampleIndex] == "0 0": toComplete = True elif ((len(uniqueCleanedCurGenotypes) == 1) and (genotypes[chosenOne, sampleIndex] == "0 0")): toComplete = True if toComplete: # We complete the current individual tped[chosenOne, sampleIndex+4] = uniqueCleanedCurGenotypes[0] # We keep only the chose one for index in indexes: if index != chosenOne: getRidOfIndex.add(index) print >>removedFile, snpInfo[index, 1] # Writing the not good enough file for item in notGoodEnoughSnps: print >>notGoodEnoughFile, "\t".join(item) # Closing the output files zeroedOutFile.close() notGoodEnoughFile.close() # Printing the chosen file try: shutil.copy(tfamFileName, prefix + ".chosen_snps.tfam") except IOError: msg = "%(tfamFileName)s: can't copy file to " \ "%(prefix)s.chosen_snps.tfam" % locals() raise ProgramError(msg) chosenFile = None try: chosenFile = open(prefix + ".chosen_snps.tped", "w") except IOError: msg = "%(prefix)s.chosen_snps.tped: can't write file" % locals() raise ProgramError(msg) for chosenOne in chosenSNPs.itervalues(): snpID = (tped[chosenOne, 0], tped[chosenOne, 3]) if snpID in snpsToComplete: print >>chosenFile, "\t".join(tped[chosenOne]) chosenFile.close() return tped, getRidOfIndex
[ "def", "createAndCleanTPED", "(", "tped", ",", "tfam", ",", "snps", ",", "prefix", ",", "chosenSNPs", ",", "completion", ",", "concordance", ",", "snpsToComplete", ",", "tfamFileName", ",", "completionT", ",", "concordanceT", ")", ":", "zeroedOutFile", "=", "None", "try", ":", "zeroedOutFile", "=", "open", "(", "prefix", "+", "\".zeroed_out\"", ",", "\"w\"", ")", "except", "IOError", ":", "msg", "=", "\"%(prefix).zeroed_out: can't write file\"", "%", "locals", "(", ")", "raise", "ProgramError", "(", "msg", ")", "print", ">>", "zeroedOutFile", ",", "\"\\t\"", ".", "join", "(", "[", "\"famID\"", ",", "\"indID\"", ",", "\"snpID\"", "]", ")", "notGoodEnoughFile", "=", "None", "try", ":", "notGoodEnoughFile", "=", "open", "(", "prefix", "+", "\".not_good_enough\"", ",", "\"w\"", ")", "except", "IOError", ":", "msg", "=", "\"%(prefix)s.not_good_enough: can't write file\"", "%", "locals", "(", ")", "raise", "ProgramError", "(", "msg", ")", "print", ">>", "notGoodEnoughFile", ",", "\"\\t\"", ".", "join", "(", "[", "\"name\"", ",", "\"reason\"", "]", ")", "removedFile", "=", "None", "try", ":", "removedFile", "=", "open", "(", "prefix", "+", "\".removed_duplicates\"", ",", "\"w\"", ")", "except", "IOError", ":", "msg", "=", "\"%(prefix)s.removed_duplicates: can't write file\"", "%", "locals", "(", ")", "raise", "ProgramError", "(", "msg", ")", "notGoodEnoughSnps", "=", "set", "(", ")", "# Split the tped in 'snpInfo' and 'genotypes'", "snpInfo", "=", "tped", "[", ":", ",", ":", "4", "]", "genotypes", "=", "tped", "[", ":", ",", "4", ":", "]", "# The sed of index we want to get rid of at the end", "getRidOfIndex", "=", "set", "(", ")", "for", "snpID", ",", "indexes", "in", "snps", ".", "iteritems", "(", ")", ":", "if", "snpID", "not", "in", "snpsToComplete", ":", "# We don't want to complete this SNP, so we continue to next SNP", "continue", "# Getting the completion", "completionToRemove", "=", "set", "(", "np", ".", "where", "(", "completion", "[", "indexes", "]", "<", "completionT", ")", "[", "0", "]", ")", "for", "k", "in", "completionToRemove", ":", "notGoodEnoughSnps", ".", "add", "(", "(", "snpInfo", "[", "indexes", "]", "[", "k", ",", "1", "]", ",", "\"completion\"", ")", ")", "# Getting the concordance", "concordanceToRemove", "=", "set", "(", "np", ".", "where", "(", "concordance", "[", "snpID", "]", "<", "concordanceT", ")", "[", "0", "]", ")", "for", "k", "in", "concordanceToRemove", ":", "notGoodEnoughSnps", ".", "add", "(", "(", "snpInfo", "[", "indexes", "]", "[", "k", ",", "1", "]", ",", "\"concordance\"", ")", ")", "# These will be the indexes to remove", "indexesToRemove", "=", "set", "(", ")", "for", "index", "in", "completionToRemove", "|", "concordanceToRemove", ":", "indexesToRemove", ".", "add", "(", "indexes", "[", "index", "]", ")", "# These are the indexes to keep", "indexesToKeep", "=", "[", "]", "for", "index", "in", "indexes", ":", "if", "index", "not", "in", "indexesToRemove", ":", "indexesToKeep", ".", "append", "(", "index", ")", "# Getting the chosen SNP", "chosenOne", "=", "chosenSNPs", "[", "snpID", "]", "if", "chosenOne", "not", "in", "set", "(", "indexesToKeep", ")", ":", "# The chosen SNP is not a good SNP, so we go to next SNP", "logger", ".", "warning", "(", "\" - {} chosen but not good enough\"", ".", "format", "(", "snpInfo", "[", "chosenOne", ",", "1", "]", ",", ")", ")", "continue", "# Now cycling through the genotypes", "nbSamples", "=", "genotypes", ".", "shape", "[", "1", "]", "for", "sampleIndex", "in", "xrange", "(", "nbSamples", ")", ":", "# We need to remove the no call and keep the unique genotypes", "curGenotypes", "=", "genotypes", "[", "indexesToKeep", ",", "sampleIndex", "]", "cleanedCurGenotypes", "=", "curGenotypes", "[", "np", ".", "where", "(", "curGenotypes", "!=", "\"0 0\"", ")", "]", "uniqueCleanedCurGenotypes", "=", "np", ".", "unique", "(", "cleanedCurGenotypes", ")", "# Checking the number of unique genotypes", "toComplete", "=", "False", "if", "len", "(", "uniqueCleanedCurGenotypes", ")", ">", "1", ":", "# There are more than one unique genotype (except 0 0)", "# len = 0 means all were 0 0", "# len = 1 means they are all the same", "# len > 1 means discordance (might need to flip)", "# Just need to check the order of the alleles", "possibleAlleles", "=", "[", "set", "(", ")", "for", "k", "in", "xrange", "(", "len", "(", "uniqueCleanedCurGenotypes", ")", ")", "]", "for", "k", ",", "geno", "in", "enumerate", "(", "uniqueCleanedCurGenotypes", ")", ":", "possibleAlleles", "[", "k", "]", "|=", "set", "(", "geno", ".", "split", "(", "\" \"", ")", ")", "allEqual", "=", "True", "for", "k", "in", "xrange", "(", "len", "(", "possibleAlleles", ")", ")", ":", "for", "l", "in", "xrange", "(", "k", "+", "1", ",", "len", "(", "possibleAlleles", ")", ")", ":", "if", "possibleAlleles", "[", "k", "]", "!=", "possibleAlleles", "[", "l", "]", ":", "allEqual", "=", "False", "if", "not", "allEqual", ":", "# The genotypes are not all equal, we set the chosen", "# genotype to null (0 0)", "tped", "[", "chosenOne", ",", "sampleIndex", "+", "4", "]", "=", "\"0 0\"", "print", ">>", "zeroedOutFile", ",", "\"\\t\"", ".", "join", "(", "[", "tfam", "[", "sampleIndex", ",", "0", "]", ",", "tfam", "[", "sampleIndex", ",", "1", "]", ",", "snpInfo", "[", "chosenOne", ",", "1", "]", "]", ")", "elif", "genotypes", "[", "chosenOne", ",", "sampleIndex", "]", "==", "\"0 0\"", ":", "toComplete", "=", "True", "elif", "(", "(", "len", "(", "uniqueCleanedCurGenotypes", ")", "==", "1", ")", "and", "(", "genotypes", "[", "chosenOne", ",", "sampleIndex", "]", "==", "\"0 0\"", ")", ")", ":", "toComplete", "=", "True", "if", "toComplete", ":", "# We complete the current individual", "tped", "[", "chosenOne", ",", "sampleIndex", "+", "4", "]", "=", "uniqueCleanedCurGenotypes", "[", "0", "]", "# We keep only the chose one", "for", "index", "in", "indexes", ":", "if", "index", "!=", "chosenOne", ":", "getRidOfIndex", ".", "add", "(", "index", ")", "print", ">>", "removedFile", ",", "snpInfo", "[", "index", ",", "1", "]", "# Writing the not good enough file", "for", "item", "in", "notGoodEnoughSnps", ":", "print", ">>", "notGoodEnoughFile", ",", "\"\\t\"", ".", "join", "(", "item", ")", "# Closing the output files", "zeroedOutFile", ".", "close", "(", ")", "notGoodEnoughFile", ".", "close", "(", ")", "# Printing the chosen file", "try", ":", "shutil", ".", "copy", "(", "tfamFileName", ",", "prefix", "+", "\".chosen_snps.tfam\"", ")", "except", "IOError", ":", "msg", "=", "\"%(tfamFileName)s: can't copy file to \"", "\"%(prefix)s.chosen_snps.tfam\"", "%", "locals", "(", ")", "raise", "ProgramError", "(", "msg", ")", "chosenFile", "=", "None", "try", ":", "chosenFile", "=", "open", "(", "prefix", "+", "\".chosen_snps.tped\"", ",", "\"w\"", ")", "except", "IOError", ":", "msg", "=", "\"%(prefix)s.chosen_snps.tped: can't write file\"", "%", "locals", "(", ")", "raise", "ProgramError", "(", "msg", ")", "for", "chosenOne", "in", "chosenSNPs", ".", "itervalues", "(", ")", ":", "snpID", "=", "(", "tped", "[", "chosenOne", ",", "0", "]", ",", "tped", "[", "chosenOne", ",", "3", "]", ")", "if", "snpID", "in", "snpsToComplete", ":", "print", ">>", "chosenFile", ",", "\"\\t\"", ".", "join", "(", "tped", "[", "chosenOne", "]", ")", "chosenFile", ".", "close", "(", ")", "return", "tped", ",", "getRidOfIndex" ]
Complete a TPED for duplicated SNPs. :param tped: a representation of the ``tped`` of duplicated markers. :param tfam: a representation of the ``tfam``. :param snps: the position of duplicated markers in the ``tped``. :param prefix: the prefix of the output files. :param chosenSNPs: the markers that were chosen for completion (including problems). :param completion: the completion of each of the duplicated markers. :param concordance: the pairwise concordance of the duplicated markers. :param snpsToComplete: the markers that will be completed (excluding problems). :param tfamFileName: the name of the original ``tfam`` file. :param completionT: the completion threshold. :param concordanceT: the concordance threshold. :type tped: numpy.array :type tfam: list :type snps: dict :type prefix: str :type chosenSNPs: dict :type completion: numpy.array :type concordance: dict :type snpsToComplete: set :type tfamFileName: str :type completionT: float :type concordanceT: float :returns: a tuple containing the new ``tped`` after completion (:py:class:`numpy.array` as the first element, and the index of the markers that will need to be rid of (:py:class:`set`) as the last element. It creates three different files: * ``prefix.zeroed_out``: contains information about markers and samples where the genotyped was zeroed out. * ``prefix.not_good_enough``: contains information about markers that were not good enough to help in completing the chosen markers (because of concordance or completion). * ``prefix.removed_duplicates``: the list of markers that where used for completing the chosen one, hence they will be removed from the final data set. Cycling through every genotypes of every samples of every duplicated markers, checks if the genotypes are all the same. If the chosen one was not called, but the other ones were, then we complete the chosen one with the genotypes for the others (assuming that they are all the same). If there is a difference between the genotypes, it is zeroed out for the chosen marker.
[ "Complete", "a", "TPED", "for", "duplicated", "SNPs", "." ]
python
train
quantopian/zipline
zipline/finance/ledger.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L181-L222
def pay_dividends(self, next_trading_day): """ Returns a cash payment based on the dividends that should be paid out according to the accumulated bookkeeping of earned, unpaid, and stock dividends. """ net_cash_payment = 0.0 try: payments = self._unpaid_dividends[next_trading_day] # Mark these dividends as paid by dropping them from our unpaid del self._unpaid_dividends[next_trading_day] except KeyError: payments = [] # representing the fact that we're required to reimburse the owner of # the stock for any dividends paid while borrowing. for payment in payments: net_cash_payment += payment['amount'] # Add stock for any stock dividends paid. Again, the values here may # be negative in the case of short positions. try: stock_payments = self._unpaid_stock_dividends[next_trading_day] except KeyError: stock_payments = [] for stock_payment in stock_payments: payment_asset = stock_payment['payment_asset'] share_count = stock_payment['share_count'] # note we create a Position for stock dividend if we don't # already own the asset if payment_asset in self.positions: position = self.positions[payment_asset] else: position = self.positions[payment_asset] = Position( payment_asset, ) position.amount += share_count return net_cash_payment
[ "def", "pay_dividends", "(", "self", ",", "next_trading_day", ")", ":", "net_cash_payment", "=", "0.0", "try", ":", "payments", "=", "self", ".", "_unpaid_dividends", "[", "next_trading_day", "]", "# Mark these dividends as paid by dropping them from our unpaid", "del", "self", ".", "_unpaid_dividends", "[", "next_trading_day", "]", "except", "KeyError", ":", "payments", "=", "[", "]", "# representing the fact that we're required to reimburse the owner of", "# the stock for any dividends paid while borrowing.", "for", "payment", "in", "payments", ":", "net_cash_payment", "+=", "payment", "[", "'amount'", "]", "# Add stock for any stock dividends paid. Again, the values here may", "# be negative in the case of short positions.", "try", ":", "stock_payments", "=", "self", ".", "_unpaid_stock_dividends", "[", "next_trading_day", "]", "except", "KeyError", ":", "stock_payments", "=", "[", "]", "for", "stock_payment", "in", "stock_payments", ":", "payment_asset", "=", "stock_payment", "[", "'payment_asset'", "]", "share_count", "=", "stock_payment", "[", "'share_count'", "]", "# note we create a Position for stock dividend if we don't", "# already own the asset", "if", "payment_asset", "in", "self", ".", "positions", ":", "position", "=", "self", ".", "positions", "[", "payment_asset", "]", "else", ":", "position", "=", "self", ".", "positions", "[", "payment_asset", "]", "=", "Position", "(", "payment_asset", ",", ")", "position", ".", "amount", "+=", "share_count", "return", "net_cash_payment" ]
Returns a cash payment based on the dividends that should be paid out according to the accumulated bookkeeping of earned, unpaid, and stock dividends.
[ "Returns", "a", "cash", "payment", "based", "on", "the", "dividends", "that", "should", "be", "paid", "out", "according", "to", "the", "accumulated", "bookkeeping", "of", "earned", "unpaid", "and", "stock", "dividends", "." ]
python
train
log2timeline/dfdatetime
dfdatetime/time_elements.py
https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/time_elements.py#L497-L526
def CopyFromStringTuple(self, time_elements_tuple): """Copies time elements from string-based time elements tuple. Args: time_elements_tuple (Optional[tuple[str, str, str, str, str, str, str]]): time elements, contains year, month, day of month, hours, minutes, seconds and fraction of seconds. Raises: ValueError: if the time elements tuple is invalid. """ if len(time_elements_tuple) < 7: raise ValueError(( 'Invalid time elements tuple at least 7 elements required,' 'got: {0:d}').format(len(time_elements_tuple))) super(TimeElementsWithFractionOfSecond, self).CopyFromStringTuple( time_elements_tuple) try: fraction_of_second = decimal.Decimal(time_elements_tuple[6]) except (TypeError, ValueError): raise ValueError('Invalid fraction of second value: {0!s}'.format( time_elements_tuple[6])) if fraction_of_second < 0.0 or fraction_of_second >= 1.0: raise ValueError('Fraction of second value: {0:f} out of bounds.'.format( fraction_of_second)) self.fraction_of_second = fraction_of_second
[ "def", "CopyFromStringTuple", "(", "self", ",", "time_elements_tuple", ")", ":", "if", "len", "(", "time_elements_tuple", ")", "<", "7", ":", "raise", "ValueError", "(", "(", "'Invalid time elements tuple at least 7 elements required,'", "'got: {0:d}'", ")", ".", "format", "(", "len", "(", "time_elements_tuple", ")", ")", ")", "super", "(", "TimeElementsWithFractionOfSecond", ",", "self", ")", ".", "CopyFromStringTuple", "(", "time_elements_tuple", ")", "try", ":", "fraction_of_second", "=", "decimal", ".", "Decimal", "(", "time_elements_tuple", "[", "6", "]", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "'Invalid fraction of second value: {0!s}'", ".", "format", "(", "time_elements_tuple", "[", "6", "]", ")", ")", "if", "fraction_of_second", "<", "0.0", "or", "fraction_of_second", ">=", "1.0", ":", "raise", "ValueError", "(", "'Fraction of second value: {0:f} out of bounds.'", ".", "format", "(", "fraction_of_second", ")", ")", "self", ".", "fraction_of_second", "=", "fraction_of_second" ]
Copies time elements from string-based time elements tuple. Args: time_elements_tuple (Optional[tuple[str, str, str, str, str, str, str]]): time elements, contains year, month, day of month, hours, minutes, seconds and fraction of seconds. Raises: ValueError: if the time elements tuple is invalid.
[ "Copies", "time", "elements", "from", "string", "-", "based", "time", "elements", "tuple", "." ]
python
train
hyperledger/indy-plenum
plenum/server/primary_decider.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/primary_decider.py#L132-L139
def send(self, msg): """ Send a message to the node on which this replica resides. :param msg: the message to send """ logger.debug("{}'s elector sending {}".format(self.name, msg)) self.outBox.append(msg)
[ "def", "send", "(", "self", ",", "msg", ")", ":", "logger", ".", "debug", "(", "\"{}'s elector sending {}\"", ".", "format", "(", "self", ".", "name", ",", "msg", ")", ")", "self", ".", "outBox", ".", "append", "(", "msg", ")" ]
Send a message to the node on which this replica resides. :param msg: the message to send
[ "Send", "a", "message", "to", "the", "node", "on", "which", "this", "replica", "resides", "." ]
python
train
exosite-labs/pyonep
pyonep/onep.py
https://github.com/exosite-labs/pyonep/blob/d27b621b00688a542e0adcc01f3e3354c05238a1/pyonep/onep.py#L279-L287
def create(self, auth, type, desc, defer=False): """ Create something in Exosite. Args: auth: <cik> type: What thing to create. desc: Information about thing. """ return self._call('create', auth, [type, desc], defer)
[ "def", "create", "(", "self", ",", "auth", ",", "type", ",", "desc", ",", "defer", "=", "False", ")", ":", "return", "self", ".", "_call", "(", "'create'", ",", "auth", ",", "[", "type", ",", "desc", "]", ",", "defer", ")" ]
Create something in Exosite. Args: auth: <cik> type: What thing to create. desc: Information about thing.
[ "Create", "something", "in", "Exosite", "." ]
python
train
warrenspe/hconf
hconf/Config.py
https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/Config.py#L75-L98
def addConfig(self, name, default=None, cast=None, required=False, description=None): """ Adds the given configuration option to the ConfigManager. Inputs: name - The configuration name to accept. required - A boolean indicating whether or not the configuration option is required or not. cast - A type (or function accepting 1 argument and returning an object) to cast the input as. If any error occurs during casting an InvalidConfigurationException will be raised. default - The default value to assign to this configuration option. Note that None is not a valid default if required=True. description - A human readable description of this configuration parameter. Will be displayed when the program is run with a -h flag. """ # Validate the name if not self.configNameRE.match(name): raise InvalidConfigurationException("Invalid configuration name: %s" % name) self.configs[self._sanitizeName(name)] = { 'default': default, 'cast': cast, 'required': required, 'description': description }
[ "def", "addConfig", "(", "self", ",", "name", ",", "default", "=", "None", ",", "cast", "=", "None", ",", "required", "=", "False", ",", "description", "=", "None", ")", ":", "# Validate the name", "if", "not", "self", ".", "configNameRE", ".", "match", "(", "name", ")", ":", "raise", "InvalidConfigurationException", "(", "\"Invalid configuration name: %s\"", "%", "name", ")", "self", ".", "configs", "[", "self", ".", "_sanitizeName", "(", "name", ")", "]", "=", "{", "'default'", ":", "default", ",", "'cast'", ":", "cast", ",", "'required'", ":", "required", ",", "'description'", ":", "description", "}" ]
Adds the given configuration option to the ConfigManager. Inputs: name - The configuration name to accept. required - A boolean indicating whether or not the configuration option is required or not. cast - A type (or function accepting 1 argument and returning an object) to cast the input as. If any error occurs during casting an InvalidConfigurationException will be raised. default - The default value to assign to this configuration option. Note that None is not a valid default if required=True. description - A human readable description of this configuration parameter. Will be displayed when the program is run with a -h flag.
[ "Adds", "the", "given", "configuration", "option", "to", "the", "ConfigManager", "." ]
python
train
angr/angr
angr/simos/cgc.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/simos/cgc.py#L29-L68
def state_blank(self, flag_page=None, **kwargs): """ :param flag_page: Flag page content, either a string or a list of BV8s """ s = super(SimCGC, self).state_blank(**kwargs) # pylint:disable=invalid-name # Special stack base for CGC binaries to work with Shellphish CRS s.regs.sp = 0xbaaaaffc # Map the special cgc memory if o.ABSTRACT_MEMORY not in s.options: s.memory.mem._preapproved_stack = IRange(0xbaaab000 - 1024 * 1024 * 8, 0xbaaab000) s.memory.map_region(0x4347c000, 4096, 1) # Create the CGC plugin s.get_plugin('cgc') # Set up the flag page if flag_page is None: flag_page = [s.solver.BVS("cgc-flag-byte-%d" % i, 8, key=('flag', i), eternal=True) for i in range(0x1000)] elif type(flag_page) is bytes: flag_page = [s.solver.BVV(c, 8) for c in flag_page] elif type(flag_page) is list: pass else: raise ValueError("Bad flag page: expected None, bytestring, or list, but got %s" % type(flag_page)) s.cgc.flag_bytes = flag_page if s.mode != 'static': s.memory.store(0x4347c000, claripy.Concat(*s.cgc.flag_bytes), priv=True) # set up the address for concrete transmits s.unicorn.transmit_addr = self.syscall_from_number(2).addr s.libc.max_str_len = 1000000 s.libc.max_strtol_len = 10 s.libc.max_memcpy_size = 0x100000 s.libc.max_buffer_size = 0x100000 return s
[ "def", "state_blank", "(", "self", ",", "flag_page", "=", "None", ",", "*", "*", "kwargs", ")", ":", "s", "=", "super", "(", "SimCGC", ",", "self", ")", ".", "state_blank", "(", "*", "*", "kwargs", ")", "# pylint:disable=invalid-name", "# Special stack base for CGC binaries to work with Shellphish CRS", "s", ".", "regs", ".", "sp", "=", "0xbaaaaffc", "# Map the special cgc memory", "if", "o", ".", "ABSTRACT_MEMORY", "not", "in", "s", ".", "options", ":", "s", ".", "memory", ".", "mem", ".", "_preapproved_stack", "=", "IRange", "(", "0xbaaab000", "-", "1024", "*", "1024", "*", "8", ",", "0xbaaab000", ")", "s", ".", "memory", ".", "map_region", "(", "0x4347c000", ",", "4096", ",", "1", ")", "# Create the CGC plugin", "s", ".", "get_plugin", "(", "'cgc'", ")", "# Set up the flag page", "if", "flag_page", "is", "None", ":", "flag_page", "=", "[", "s", ".", "solver", ".", "BVS", "(", "\"cgc-flag-byte-%d\"", "%", "i", ",", "8", ",", "key", "=", "(", "'flag'", ",", "i", ")", ",", "eternal", "=", "True", ")", "for", "i", "in", "range", "(", "0x1000", ")", "]", "elif", "type", "(", "flag_page", ")", "is", "bytes", ":", "flag_page", "=", "[", "s", ".", "solver", ".", "BVV", "(", "c", ",", "8", ")", "for", "c", "in", "flag_page", "]", "elif", "type", "(", "flag_page", ")", "is", "list", ":", "pass", "else", ":", "raise", "ValueError", "(", "\"Bad flag page: expected None, bytestring, or list, but got %s\"", "%", "type", "(", "flag_page", ")", ")", "s", ".", "cgc", ".", "flag_bytes", "=", "flag_page", "if", "s", ".", "mode", "!=", "'static'", ":", "s", ".", "memory", ".", "store", "(", "0x4347c000", ",", "claripy", ".", "Concat", "(", "*", "s", ".", "cgc", ".", "flag_bytes", ")", ",", "priv", "=", "True", ")", "# set up the address for concrete transmits", "s", ".", "unicorn", ".", "transmit_addr", "=", "self", ".", "syscall_from_number", "(", "2", ")", ".", "addr", "s", ".", "libc", ".", "max_str_len", "=", "1000000", "s", ".", "libc", ".", "max_strtol_len", "=", "10", "s", ".", "libc", ".", "max_memcpy_size", "=", "0x100000", "s", ".", "libc", ".", "max_buffer_size", "=", "0x100000", "return", "s" ]
:param flag_page: Flag page content, either a string or a list of BV8s
[ ":", "param", "flag_page", ":", "Flag", "page", "content", "either", "a", "string", "or", "a", "list", "of", "BV8s" ]
python
train
CivicSpleen/ambry
ambry/library/search_backends/postgres_backend.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search_backends/postgres_backend.py#L515-L523
def is_indexed(self, partition): """ Returns True if partition is already indexed. Otherwise returns False. """ query = text(""" SELECT vid FROM partition_index WHERE vid = :vid; """) result = self.execute(query, vid=partition.vid) return bool(result.fetchall())
[ "def", "is_indexed", "(", "self", ",", "partition", ")", ":", "query", "=", "text", "(", "\"\"\"\n SELECT vid\n FROM partition_index\n WHERE vid = :vid;\n \"\"\"", ")", "result", "=", "self", ".", "execute", "(", "query", ",", "vid", "=", "partition", ".", "vid", ")", "return", "bool", "(", "result", ".", "fetchall", "(", ")", ")" ]
Returns True if partition is already indexed. Otherwise returns False.
[ "Returns", "True", "if", "partition", "is", "already", "indexed", ".", "Otherwise", "returns", "False", "." ]
python
train
django-danceschool/django-danceschool
danceschool/private_lessons/forms.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/private_lessons/forms.py#L70-L94
def clean(self): ''' Only allow submission if there are not already slots in the submitted window, and only allow rooms associated with the chosen location. ''' super(SlotCreationForm,self).clean() startDate = self.cleaned_data.get('startDate') endDate = self.cleaned_data.get('endDate') startTime = self.cleaned_data.get('startTime') endTime = self.cleaned_data.get('endTime') instructor = self.cleaned_data.get('instructorId') existingSlots = InstructorAvailabilitySlot.objects.filter( instructor=instructor, startTime__gt=( ensure_localtime(datetime.combine(startDate,startTime)) - timedelta(minutes=getConstant('privateLessons__lessonLengthInterval')) ), startTime__lt=ensure_localtime(datetime.combine(endDate,endTime)), ) if existingSlots.exists(): raise ValidationError(_('Newly created slots cannot overlap existing slots for this instructor.'),code='invalid')
[ "def", "clean", "(", "self", ")", ":", "super", "(", "SlotCreationForm", ",", "self", ")", ".", "clean", "(", ")", "startDate", "=", "self", ".", "cleaned_data", ".", "get", "(", "'startDate'", ")", "endDate", "=", "self", ".", "cleaned_data", ".", "get", "(", "'endDate'", ")", "startTime", "=", "self", ".", "cleaned_data", ".", "get", "(", "'startTime'", ")", "endTime", "=", "self", ".", "cleaned_data", ".", "get", "(", "'endTime'", ")", "instructor", "=", "self", ".", "cleaned_data", ".", "get", "(", "'instructorId'", ")", "existingSlots", "=", "InstructorAvailabilitySlot", ".", "objects", ".", "filter", "(", "instructor", "=", "instructor", ",", "startTime__gt", "=", "(", "ensure_localtime", "(", "datetime", ".", "combine", "(", "startDate", ",", "startTime", ")", ")", "-", "timedelta", "(", "minutes", "=", "getConstant", "(", "'privateLessons__lessonLengthInterval'", ")", ")", ")", ",", "startTime__lt", "=", "ensure_localtime", "(", "datetime", ".", "combine", "(", "endDate", ",", "endTime", ")", ")", ",", ")", "if", "existingSlots", ".", "exists", "(", ")", ":", "raise", "ValidationError", "(", "_", "(", "'Newly created slots cannot overlap existing slots for this instructor.'", ")", ",", "code", "=", "'invalid'", ")" ]
Only allow submission if there are not already slots in the submitted window, and only allow rooms associated with the chosen location.
[ "Only", "allow", "submission", "if", "there", "are", "not", "already", "slots", "in", "the", "submitted", "window", "and", "only", "allow", "rooms", "associated", "with", "the", "chosen", "location", "." ]
python
train
pypyr/pypyr-cli
pypyr/utils/filesystem.py
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L252-L303
def in_to_out(self, in_path, out_path=None): """Write a single file in to out, running self.formatter on each line. If in_path and out_path point to the same thing it will in-place edit and overwrite the in path. Even easier, if you do want to edit a file in place, don't specify out_path, or set it to None. Args: in_path: str or path-like. Must refer to a single existing file. out_path: str or path-like. Must refer to a single destination file location. will create directory structure if it doesn't exist. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None. """ is_in_place_edit = False if is_same_file(in_path, out_path): logger.debug( "in path and out path are the same file. writing to temp " "file and then replacing in path with the temp file.") out_path = None is_in_place_edit = True logger.debug(f"opening source file: {in_path}") with open(in_path) as infile: if out_path: logger.debug( f"opening destination file for writing: {out_path}") ensure_dir(out_path) with open(out_path, 'w') as outfile: outfile.writelines(self.formatter(infile)) return else: logger.debug("opening temp file for writing...") with NamedTemporaryFile(mode='w+t', dir=os.path.dirname(in_path), delete=False) as outfile: outfile.writelines(self.formatter(infile)) is_in_place_edit = True # only replace infile AFTER it's closed, outside the with. # pragma exclude because func actually returns on 287 in if out_path, # and cov not smart enough to realize that !is_in_place_edit won't ever # happen here (the function will have exited already) if is_in_place_edit: # pragma: no branch logger.debug(f"moving temp file to: {in_path}") move_temp_file(outfile.name, infile.name)
[ "def", "in_to_out", "(", "self", ",", "in_path", ",", "out_path", "=", "None", ")", ":", "is_in_place_edit", "=", "False", "if", "is_same_file", "(", "in_path", ",", "out_path", ")", ":", "logger", ".", "debug", "(", "\"in path and out path are the same file. writing to temp \"", "\"file and then replacing in path with the temp file.\"", ")", "out_path", "=", "None", "is_in_place_edit", "=", "True", "logger", ".", "debug", "(", "f\"opening source file: {in_path}\"", ")", "with", "open", "(", "in_path", ")", "as", "infile", ":", "if", "out_path", ":", "logger", ".", "debug", "(", "f\"opening destination file for writing: {out_path}\"", ")", "ensure_dir", "(", "out_path", ")", "with", "open", "(", "out_path", ",", "'w'", ")", "as", "outfile", ":", "outfile", ".", "writelines", "(", "self", ".", "formatter", "(", "infile", ")", ")", "return", "else", ":", "logger", ".", "debug", "(", "\"opening temp file for writing...\"", ")", "with", "NamedTemporaryFile", "(", "mode", "=", "'w+t'", ",", "dir", "=", "os", ".", "path", ".", "dirname", "(", "in_path", ")", ",", "delete", "=", "False", ")", "as", "outfile", ":", "outfile", ".", "writelines", "(", "self", ".", "formatter", "(", "infile", ")", ")", "is_in_place_edit", "=", "True", "# only replace infile AFTER it's closed, outside the with.", "# pragma exclude because func actually returns on 287 in if out_path,", "# and cov not smart enough to realize that !is_in_place_edit won't ever", "# happen here (the function will have exited already)", "if", "is_in_place_edit", ":", "# pragma: no branch", "logger", ".", "debug", "(", "f\"moving temp file to: {in_path}\"", ")", "move_temp_file", "(", "outfile", ".", "name", ",", "infile", ".", "name", ")" ]
Write a single file in to out, running self.formatter on each line. If in_path and out_path point to the same thing it will in-place edit and overwrite the in path. Even easier, if you do want to edit a file in place, don't specify out_path, or set it to None. Args: in_path: str or path-like. Must refer to a single existing file. out_path: str or path-like. Must refer to a single destination file location. will create directory structure if it doesn't exist. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None.
[ "Write", "a", "single", "file", "in", "to", "out", "running", "self", ".", "formatter", "on", "each", "line", "." ]
python
train
tamasgal/km3pipe
km3pipe/shell.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/shell.py#L227-L229
def _add_two_argument_command(self, command, arg1, arg2): """Helper function for two-argument commands""" self.lines.append("{} {} {}".format(command, arg1, arg2))
[ "def", "_add_two_argument_command", "(", "self", ",", "command", ",", "arg1", ",", "arg2", ")", ":", "self", ".", "lines", ".", "append", "(", "\"{} {} {}\"", ".", "format", "(", "command", ",", "arg1", ",", "arg2", ")", ")" ]
Helper function for two-argument commands
[ "Helper", "function", "for", "two", "-", "argument", "commands" ]
python
train
materialsproject/pymatgen
pymatgen/core/surface.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/surface.py#L336-L349
def dipole(self): """ Calculates the dipole of the Slab in the direction of the surface normal. Note that the Slab must be oxidation state-decorated for this to work properly. Otherwise, the Slab will always have a dipole of 0. """ dipole = np.zeros(3) mid_pt = np.sum(self.cart_coords, axis=0) / len(self) normal = self.normal for site in self: charge = sum([getattr(sp, "oxi_state", 0) * amt for sp, amt in site.species.items()]) dipole += charge * np.dot(site.coords - mid_pt, normal) * normal return dipole
[ "def", "dipole", "(", "self", ")", ":", "dipole", "=", "np", ".", "zeros", "(", "3", ")", "mid_pt", "=", "np", ".", "sum", "(", "self", ".", "cart_coords", ",", "axis", "=", "0", ")", "/", "len", "(", "self", ")", "normal", "=", "self", ".", "normal", "for", "site", "in", "self", ":", "charge", "=", "sum", "(", "[", "getattr", "(", "sp", ",", "\"oxi_state\"", ",", "0", ")", "*", "amt", "for", "sp", ",", "amt", "in", "site", ".", "species", ".", "items", "(", ")", "]", ")", "dipole", "+=", "charge", "*", "np", ".", "dot", "(", "site", ".", "coords", "-", "mid_pt", ",", "normal", ")", "*", "normal", "return", "dipole" ]
Calculates the dipole of the Slab in the direction of the surface normal. Note that the Slab must be oxidation state-decorated for this to work properly. Otherwise, the Slab will always have a dipole of 0.
[ "Calculates", "the", "dipole", "of", "the", "Slab", "in", "the", "direction", "of", "the", "surface", "normal", ".", "Note", "that", "the", "Slab", "must", "be", "oxidation", "state", "-", "decorated", "for", "this", "to", "work", "properly", ".", "Otherwise", "the", "Slab", "will", "always", "have", "a", "dipole", "of", "0", "." ]
python
train
AgeOfLearning/coeus-unity-python-framework
coeus_unity/assertions.py
https://github.com/AgeOfLearning/coeus-unity-python-framework/blob/cf8ca6800ace1425d917ea2628dbd05ed959fdd7/coeus_unity/assertions.py#L93-L105
def assert_await_scene_loaded(cli, scene_name, is_loaded=DEFAULT_SCENE_LOADED, timeout_seconds=DEFAULT_TIMEOUT_SECONDS): """ Asserts that we successfully awaited for the scene to be loaded based on is_loaded. If the timeout passes or the expression is_registered != actual state, then it will fail. :param cli: :param scene_name: :param is_loaded: (True | False) the state change we are waiting for. :param timeout_seconds: The amount of time to wait for a change before fail. :return: """ result = commands.await_scene_loaded(cli, scene_name, is_loaded, timeout_seconds) assert result is True return result
[ "def", "assert_await_scene_loaded", "(", "cli", ",", "scene_name", ",", "is_loaded", "=", "DEFAULT_SCENE_LOADED", ",", "timeout_seconds", "=", "DEFAULT_TIMEOUT_SECONDS", ")", ":", "result", "=", "commands", ".", "await_scene_loaded", "(", "cli", ",", "scene_name", ",", "is_loaded", ",", "timeout_seconds", ")", "assert", "result", "is", "True", "return", "result" ]
Asserts that we successfully awaited for the scene to be loaded based on is_loaded. If the timeout passes or the expression is_registered != actual state, then it will fail. :param cli: :param scene_name: :param is_loaded: (True | False) the state change we are waiting for. :param timeout_seconds: The amount of time to wait for a change before fail. :return:
[ "Asserts", "that", "we", "successfully", "awaited", "for", "the", "scene", "to", "be", "loaded", "based", "on", "is_loaded", ".", "If", "the", "timeout", "passes", "or", "the", "expression", "is_registered", "!", "=", "actual", "state", "then", "it", "will", "fail", ".", ":", "param", "cli", ":", ":", "param", "scene_name", ":", ":", "param", "is_loaded", ":", "(", "True", "|", "False", ")", "the", "state", "change", "we", "are", "waiting", "for", ".", ":", "param", "timeout_seconds", ":", "The", "amount", "of", "time", "to", "wait", "for", "a", "change", "before", "fail", ".", ":", "return", ":" ]
python
train
KelSolaar/Umbra
umbra/ui/widgets/search_QLineEdit.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/search_QLineEdit.py#L451-L460
def __set_style_sheet(self): """ Sets the Widget stylesheet. """ frame_width = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth) self.setStyleSheet(QString("QLineEdit {{ padding-left: {0}px; padding-right: {1}px; }}".format( self.__search_active_label.sizeHint().width( ) if self.__search_active_label.isVisible() else 0 + frame_width, self.__clear_button.sizeHint().width() + frame_width)))
[ "def", "__set_style_sheet", "(", "self", ")", ":", "frame_width", "=", "self", ".", "style", "(", ")", ".", "pixelMetric", "(", "QStyle", ".", "PM_DefaultFrameWidth", ")", "self", ".", "setStyleSheet", "(", "QString", "(", "\"QLineEdit {{ padding-left: {0}px; padding-right: {1}px; }}\"", ".", "format", "(", "self", ".", "__search_active_label", ".", "sizeHint", "(", ")", ".", "width", "(", ")", "if", "self", ".", "__search_active_label", ".", "isVisible", "(", ")", "else", "0", "+", "frame_width", ",", "self", ".", "__clear_button", ".", "sizeHint", "(", ")", ".", "width", "(", ")", "+", "frame_width", ")", ")", ")" ]
Sets the Widget stylesheet.
[ "Sets", "the", "Widget", "stylesheet", "." ]
python
train
ambitioninc/django-query-builder
querybuilder/query.py
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L310-L326
def get_condition_value(self, operator, value): """ Gets the condition value based on the operator and value :param operator: the condition operator name :type operator: str :param value: the value to be formatted based on the condition operator :type value: object :return: the comparison operator from the Where class's comparison_map :rtype: str """ if operator in ('contains', 'icontains'): value = '%{0}%'.format(value) elif operator == 'startswith': value = '{0}%'.format(value) return value
[ "def", "get_condition_value", "(", "self", ",", "operator", ",", "value", ")", ":", "if", "operator", "in", "(", "'contains'", ",", "'icontains'", ")", ":", "value", "=", "'%{0}%'", ".", "format", "(", "value", ")", "elif", "operator", "==", "'startswith'", ":", "value", "=", "'{0}%'", ".", "format", "(", "value", ")", "return", "value" ]
Gets the condition value based on the operator and value :param operator: the condition operator name :type operator: str :param value: the value to be formatted based on the condition operator :type value: object :return: the comparison operator from the Where class's comparison_map :rtype: str
[ "Gets", "the", "condition", "value", "based", "on", "the", "operator", "and", "value" ]
python
train
DataBiosphere/dsub
dsub/providers/local.py
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/providers/local.py#L704-L745
def _delocalize_logging_command(self, logging_path, user_project): """Returns a command to delocalize logs. Args: logging_path: location of log files. user_project: name of the project to be billed for the request. Returns: eg. 'gs://bucket/path/myfile' or 'gs://bucket/script-foobar-12' """ # Get the logging prefix (everything up to ".log") logging_prefix = os.path.splitext(logging_path.uri)[0] # Set the provider-specific mkdir and file copy commands if logging_path.file_provider == job_model.P_LOCAL: mkdir_cmd = 'mkdir -p "%s"\n' % os.path.dirname(logging_prefix) cp_cmd = 'cp' elif logging_path.file_provider == job_model.P_GCS: mkdir_cmd = '' if user_project: cp_cmd = 'gsutil -u {} -mq cp'.format(user_project) else: cp_cmd = 'gsutil -mq cp' else: assert False # Construct the copy command copy_logs_cmd = textwrap.dedent("""\ local cp_cmd="{cp_cmd}" local prefix="{prefix}" """).format( cp_cmd=cp_cmd, prefix=logging_prefix) # Build up the command body = textwrap.dedent("""\ {mkdir_cmd} {copy_logs_cmd} """).format( mkdir_cmd=mkdir_cmd, copy_logs_cmd=copy_logs_cmd) return body
[ "def", "_delocalize_logging_command", "(", "self", ",", "logging_path", ",", "user_project", ")", ":", "# Get the logging prefix (everything up to \".log\")", "logging_prefix", "=", "os", ".", "path", ".", "splitext", "(", "logging_path", ".", "uri", ")", "[", "0", "]", "# Set the provider-specific mkdir and file copy commands", "if", "logging_path", ".", "file_provider", "==", "job_model", ".", "P_LOCAL", ":", "mkdir_cmd", "=", "'mkdir -p \"%s\"\\n'", "%", "os", ".", "path", ".", "dirname", "(", "logging_prefix", ")", "cp_cmd", "=", "'cp'", "elif", "logging_path", ".", "file_provider", "==", "job_model", ".", "P_GCS", ":", "mkdir_cmd", "=", "''", "if", "user_project", ":", "cp_cmd", "=", "'gsutil -u {} -mq cp'", ".", "format", "(", "user_project", ")", "else", ":", "cp_cmd", "=", "'gsutil -mq cp'", "else", ":", "assert", "False", "# Construct the copy command", "copy_logs_cmd", "=", "textwrap", ".", "dedent", "(", "\"\"\"\\\n local cp_cmd=\"{cp_cmd}\"\n local prefix=\"{prefix}\"\n \"\"\"", ")", ".", "format", "(", "cp_cmd", "=", "cp_cmd", ",", "prefix", "=", "logging_prefix", ")", "# Build up the command", "body", "=", "textwrap", ".", "dedent", "(", "\"\"\"\\\n {mkdir_cmd}\n {copy_logs_cmd}\n \"\"\"", ")", ".", "format", "(", "mkdir_cmd", "=", "mkdir_cmd", ",", "copy_logs_cmd", "=", "copy_logs_cmd", ")", "return", "body" ]
Returns a command to delocalize logs. Args: logging_path: location of log files. user_project: name of the project to be billed for the request. Returns: eg. 'gs://bucket/path/myfile' or 'gs://bucket/script-foobar-12'
[ "Returns", "a", "command", "to", "delocalize", "logs", "." ]
python
valid
MacHu-GWU/crawl_zillow-project
crawl_zillow/scheduler.py
https://github.com/MacHu-GWU/crawl_zillow-project/blob/c6d7ca8e4c80e7e7e963496433ef73df1413c16e/crawl_zillow/scheduler.py#L78-L140
def user_process(self, input_data): """ :param input_data: :return: output_data, list of next model instance. For example, if model is :class:`~crawl_zillow.model.State`, then next model is :class:`~crawl_zillow.model.County`. """ url = input_data.doc.url self.logger.info("Crawl %s ." % url, 1) output_data = OutputData(data=list()) try: html = get_html( url, wait_time=Config.Crawler.wait_time, driver=self._selenium_driver, **input_data.get_html_kwargs) # some this model's attributes will also available in next model d = input_data.doc.to_dict() del d[primary_key] del d[status_key] del d[edit_at_key] del d[n_children_key] try: for href, name in htmlparser.get_items(html): data = { primary_key: href, self.next_model_col_name: name, } data.update(d) next_model_instance = self.next_model(**data) output_data.data.append(next_model_instance) self.logger.info(Status.S50_Finished.description, 1) output_data.status = Status.S50_Finished.id except Exception as e: raise exc.ParseError except exc.CaptchaError as e: time.sleep(10.0) # Wait for 10 seconds to solve Captcha self.logger.info(Status.S20_WrongPage.description, 1) output_data.status = Status.S20_WrongPage.id output_data.errors = e except exc.WrongHtmlError as e: self.logger.info(Status.S20_WrongPage.description, 1) output_data.status = Status.S20_WrongPage.id output_data.errors = e except exc.ParseError as e: self.logger.info(Status.S30_ParseError.description, 1) output_data.status = Status.S30_ParseError.id output_data.errors = e except exc.ServerSideError as e: self.logger.info(Status.S60_ServerSideError.description, 1) output_data.status = Status.S60_ServerSideError.id output_data.errors = e except Exception as e: self.logger.info(Status.S10_HttpError.description, 1) output_data.status = Status.S10_HttpError.id output_data.errors = e # output_data.data = output_data.data[:2] # COMMENT OUT IN PROD return output_data
[ "def", "user_process", "(", "self", ",", "input_data", ")", ":", "url", "=", "input_data", ".", "doc", ".", "url", "self", ".", "logger", ".", "info", "(", "\"Crawl %s .\"", "%", "url", ",", "1", ")", "output_data", "=", "OutputData", "(", "data", "=", "list", "(", ")", ")", "try", ":", "html", "=", "get_html", "(", "url", ",", "wait_time", "=", "Config", ".", "Crawler", ".", "wait_time", ",", "driver", "=", "self", ".", "_selenium_driver", ",", "*", "*", "input_data", ".", "get_html_kwargs", ")", "# some this model's attributes will also available in next model", "d", "=", "input_data", ".", "doc", ".", "to_dict", "(", ")", "del", "d", "[", "primary_key", "]", "del", "d", "[", "status_key", "]", "del", "d", "[", "edit_at_key", "]", "del", "d", "[", "n_children_key", "]", "try", ":", "for", "href", ",", "name", "in", "htmlparser", ".", "get_items", "(", "html", ")", ":", "data", "=", "{", "primary_key", ":", "href", ",", "self", ".", "next_model_col_name", ":", "name", ",", "}", "data", ".", "update", "(", "d", ")", "next_model_instance", "=", "self", ".", "next_model", "(", "*", "*", "data", ")", "output_data", ".", "data", ".", "append", "(", "next_model_instance", ")", "self", ".", "logger", ".", "info", "(", "Status", ".", "S50_Finished", ".", "description", ",", "1", ")", "output_data", ".", "status", "=", "Status", ".", "S50_Finished", ".", "id", "except", "Exception", "as", "e", ":", "raise", "exc", ".", "ParseError", "except", "exc", ".", "CaptchaError", "as", "e", ":", "time", ".", "sleep", "(", "10.0", ")", "# Wait for 10 seconds to solve Captcha", "self", ".", "logger", ".", "info", "(", "Status", ".", "S20_WrongPage", ".", "description", ",", "1", ")", "output_data", ".", "status", "=", "Status", ".", "S20_WrongPage", ".", "id", "output_data", ".", "errors", "=", "e", "except", "exc", ".", "WrongHtmlError", "as", "e", ":", "self", ".", "logger", ".", "info", "(", "Status", ".", "S20_WrongPage", ".", "description", ",", "1", ")", "output_data", ".", "status", "=", "Status", ".", "S20_WrongPage", ".", "id", "output_data", ".", "errors", "=", "e", "except", "exc", ".", "ParseError", "as", "e", ":", "self", ".", "logger", ".", "info", "(", "Status", ".", "S30_ParseError", ".", "description", ",", "1", ")", "output_data", ".", "status", "=", "Status", ".", "S30_ParseError", ".", "id", "output_data", ".", "errors", "=", "e", "except", "exc", ".", "ServerSideError", "as", "e", ":", "self", ".", "logger", ".", "info", "(", "Status", ".", "S60_ServerSideError", ".", "description", ",", "1", ")", "output_data", ".", "status", "=", "Status", ".", "S60_ServerSideError", ".", "id", "output_data", ".", "errors", "=", "e", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "info", "(", "Status", ".", "S10_HttpError", ".", "description", ",", "1", ")", "output_data", ".", "status", "=", "Status", ".", "S10_HttpError", ".", "id", "output_data", ".", "errors", "=", "e", "# output_data.data = output_data.data[:2] # COMMENT OUT IN PROD", "return", "output_data" ]
:param input_data: :return: output_data, list of next model instance. For example, if model is :class:`~crawl_zillow.model.State`, then next model is :class:`~crawl_zillow.model.County`.
[ ":", "param", "input_data", ":", ":", "return", ":", "output_data", "list", "of", "next", "model", "instance", ".", "For", "example", "if", "model", "is", ":", "class", ":", "~crawl_zillow", ".", "model", ".", "State", "then", "next", "model", "is", ":", "class", ":", "~crawl_zillow", ".", "model", ".", "County", "." ]
python
train
nabla-c0d3/sslyze
sslyze/plugins/utils/trust_store/trust_store_repository.py
https://github.com/nabla-c0d3/sslyze/blob/0fb3ae668453d7ecf616d0755f237ca7be9f62fa/sslyze/plugins/utils/trust_store/trust_store_repository.py#L123-L146
def update_default(cls) -> 'TrustStoresRepository': """Update the default trust stores used by SSLyze. The latest stores will be downloaded from https://github.com/nabla-c0d3/trust_stores_observatory. """ temp_path = mkdtemp() try: # Download the latest trust stores archive_path = join(temp_path, 'trust_stores_as_pem.tar.gz') urlretrieve(cls._UPDATE_URL, archive_path) # Extract the archive extract_path = join(temp_path, 'extracted') tarfile.open(archive_path).extractall(extract_path) # Copy the files to SSLyze and overwrite the existing stores shutil.rmtree(cls._DEFAULT_TRUST_STORES_PATH) shutil.copytree(extract_path, cls._DEFAULT_TRUST_STORES_PATH) finally: shutil.rmtree(temp_path) # Re-generate the default repo - not thread-safe cls._DEFAULT_REPOSITORY = cls(cls._DEFAULT_TRUST_STORES_PATH) return cls._DEFAULT_REPOSITORY
[ "def", "update_default", "(", "cls", ")", "->", "'TrustStoresRepository'", ":", "temp_path", "=", "mkdtemp", "(", ")", "try", ":", "# Download the latest trust stores", "archive_path", "=", "join", "(", "temp_path", ",", "'trust_stores_as_pem.tar.gz'", ")", "urlretrieve", "(", "cls", ".", "_UPDATE_URL", ",", "archive_path", ")", "# Extract the archive", "extract_path", "=", "join", "(", "temp_path", ",", "'extracted'", ")", "tarfile", ".", "open", "(", "archive_path", ")", ".", "extractall", "(", "extract_path", ")", "# Copy the files to SSLyze and overwrite the existing stores", "shutil", ".", "rmtree", "(", "cls", ".", "_DEFAULT_TRUST_STORES_PATH", ")", "shutil", ".", "copytree", "(", "extract_path", ",", "cls", ".", "_DEFAULT_TRUST_STORES_PATH", ")", "finally", ":", "shutil", ".", "rmtree", "(", "temp_path", ")", "# Re-generate the default repo - not thread-safe", "cls", ".", "_DEFAULT_REPOSITORY", "=", "cls", "(", "cls", ".", "_DEFAULT_TRUST_STORES_PATH", ")", "return", "cls", ".", "_DEFAULT_REPOSITORY" ]
Update the default trust stores used by SSLyze. The latest stores will be downloaded from https://github.com/nabla-c0d3/trust_stores_observatory.
[ "Update", "the", "default", "trust", "stores", "used", "by", "SSLyze", "." ]
python
train
pip-services3-python/pip-services3-commons-python
pip_services3_commons/random/RandomDateTime.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/random/RandomDateTime.py#L82-L98
def update_datetime(value, range = None): """ Updates (drifts) a Date value within specified range defined :param value: a Date value to drift. :param range: (optional) a range in milliseconds. Default: 10 days :return: an updated DateTime value. """ range = range if range != None else 10 if range < 0: return value days = RandomFloat.next_float(-range, range) return value + datetime.timedelta(days)
[ "def", "update_datetime", "(", "value", ",", "range", "=", "None", ")", ":", "range", "=", "range", "if", "range", "!=", "None", "else", "10", "if", "range", "<", "0", ":", "return", "value", "days", "=", "RandomFloat", ".", "next_float", "(", "-", "range", ",", "range", ")", "return", "value", "+", "datetime", ".", "timedelta", "(", "days", ")" ]
Updates (drifts) a Date value within specified range defined :param value: a Date value to drift. :param range: (optional) a range in milliseconds. Default: 10 days :return: an updated DateTime value.
[ "Updates", "(", "drifts", ")", "a", "Date", "value", "within", "specified", "range", "defined" ]
python
train
gwpy/gwpy
gwpy/signal/spectral/_utils.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_utils.py#L27-L54
def scale_timeseries_unit(tsunit, scaling='density'): """Scale the unit of a `TimeSeries` to match that of a `FrequencySeries` Parameters ---------- tsunit : `~astropy.units.UnitBase` input unit from `TimeSeries` scaling : `str` type of frequency series, either 'density' for a PSD, or 'spectrum' for a power spectrum. Returns ------- unit : `~astropy.units.Unit` unit to be applied to the resulting `FrequencySeries`. """ # set units if scaling == 'density': baseunit = units.Hertz elif scaling == 'spectrum': baseunit = units.dimensionless_unscaled else: raise ValueError("Unknown scaling: %r" % scaling) if tsunit: specunit = tsunit ** 2 / baseunit else: specunit = baseunit ** -1 return specunit
[ "def", "scale_timeseries_unit", "(", "tsunit", ",", "scaling", "=", "'density'", ")", ":", "# set units", "if", "scaling", "==", "'density'", ":", "baseunit", "=", "units", ".", "Hertz", "elif", "scaling", "==", "'spectrum'", ":", "baseunit", "=", "units", ".", "dimensionless_unscaled", "else", ":", "raise", "ValueError", "(", "\"Unknown scaling: %r\"", "%", "scaling", ")", "if", "tsunit", ":", "specunit", "=", "tsunit", "**", "2", "/", "baseunit", "else", ":", "specunit", "=", "baseunit", "**", "-", "1", "return", "specunit" ]
Scale the unit of a `TimeSeries` to match that of a `FrequencySeries` Parameters ---------- tsunit : `~astropy.units.UnitBase` input unit from `TimeSeries` scaling : `str` type of frequency series, either 'density' for a PSD, or 'spectrum' for a power spectrum. Returns ------- unit : `~astropy.units.Unit` unit to be applied to the resulting `FrequencySeries`.
[ "Scale", "the", "unit", "of", "a", "TimeSeries", "to", "match", "that", "of", "a", "FrequencySeries" ]
python
train
etcher-be/emiz
emiz/avwx/speech.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/speech.py#L72-L83
def altimeter(alt: Number, unit: str = 'inHg') -> str: """ Format altimeter details into a spoken word string """ ret = 'Altimeter ' if not alt: ret += 'unknown' elif unit == 'inHg': ret += core.spoken_number(alt.repr[:2]) + ' point ' + core.spoken_number(alt.repr[2:]) elif unit == 'hPa': ret += core.spoken_number(alt.repr) return ret
[ "def", "altimeter", "(", "alt", ":", "Number", ",", "unit", ":", "str", "=", "'inHg'", ")", "->", "str", ":", "ret", "=", "'Altimeter '", "if", "not", "alt", ":", "ret", "+=", "'unknown'", "elif", "unit", "==", "'inHg'", ":", "ret", "+=", "core", ".", "spoken_number", "(", "alt", ".", "repr", "[", ":", "2", "]", ")", "+", "' point '", "+", "core", ".", "spoken_number", "(", "alt", ".", "repr", "[", "2", ":", "]", ")", "elif", "unit", "==", "'hPa'", ":", "ret", "+=", "core", ".", "spoken_number", "(", "alt", ".", "repr", ")", "return", "ret" ]
Format altimeter details into a spoken word string
[ "Format", "altimeter", "details", "into", "a", "spoken", "word", "string" ]
python
train
saltstack/salt
salt/modules/freebsdports.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/freebsdports.py#L392-L453
def update(extract=False): ''' Update the ports tree extract : False If ``True``, runs a ``portsnap extract`` after fetching, should be used for first-time installation of the ports tree. CLI Example: .. code-block:: bash salt '*' ports.update ''' result = __salt__['cmd.run_all']( _portsnap() + ['fetch'], python_shell=False ) if not result['retcode'] == 0: raise CommandExecutionError( 'Unable to fetch ports snapshot: {0}'.format(result['stderr']) ) ret = [] try: patch_count = re.search( r'Fetching (\d+) patches', result['stdout'] ).group(1) except AttributeError: patch_count = 0 try: new_port_count = re.search( r'Fetching (\d+) new ports or files', result['stdout'] ).group(1) except AttributeError: new_port_count = 0 ret.append('Applied {0} new patches'.format(patch_count)) ret.append('Fetched {0} new ports or files'.format(new_port_count)) if extract: result = __salt__['cmd.run_all']( _portsnap() + ['extract'], python_shell=False ) if not result['retcode'] == 0: raise CommandExecutionError( 'Unable to extract ports snapshot {0}'.format(result['stderr']) ) result = __salt__['cmd.run_all']( _portsnap() + ['update'], python_shell=False ) if not result['retcode'] == 0: raise CommandExecutionError( 'Unable to apply ports snapshot: {0}'.format(result['stderr']) ) __context__.pop('ports.list_all', None) return '\n'.join(ret)
[ "def", "update", "(", "extract", "=", "False", ")", ":", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "_portsnap", "(", ")", "+", "[", "'fetch'", "]", ",", "python_shell", "=", "False", ")", "if", "not", "result", "[", "'retcode'", "]", "==", "0", ":", "raise", "CommandExecutionError", "(", "'Unable to fetch ports snapshot: {0}'", ".", "format", "(", "result", "[", "'stderr'", "]", ")", ")", "ret", "=", "[", "]", "try", ":", "patch_count", "=", "re", ".", "search", "(", "r'Fetching (\\d+) patches'", ",", "result", "[", "'stdout'", "]", ")", ".", "group", "(", "1", ")", "except", "AttributeError", ":", "patch_count", "=", "0", "try", ":", "new_port_count", "=", "re", ".", "search", "(", "r'Fetching (\\d+) new ports or files'", ",", "result", "[", "'stdout'", "]", ")", ".", "group", "(", "1", ")", "except", "AttributeError", ":", "new_port_count", "=", "0", "ret", ".", "append", "(", "'Applied {0} new patches'", ".", "format", "(", "patch_count", ")", ")", "ret", ".", "append", "(", "'Fetched {0} new ports or files'", ".", "format", "(", "new_port_count", ")", ")", "if", "extract", ":", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "_portsnap", "(", ")", "+", "[", "'extract'", "]", ",", "python_shell", "=", "False", ")", "if", "not", "result", "[", "'retcode'", "]", "==", "0", ":", "raise", "CommandExecutionError", "(", "'Unable to extract ports snapshot {0}'", ".", "format", "(", "result", "[", "'stderr'", "]", ")", ")", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "_portsnap", "(", ")", "+", "[", "'update'", "]", ",", "python_shell", "=", "False", ")", "if", "not", "result", "[", "'retcode'", "]", "==", "0", ":", "raise", "CommandExecutionError", "(", "'Unable to apply ports snapshot: {0}'", ".", "format", "(", "result", "[", "'stderr'", "]", ")", ")", "__context__", ".", "pop", "(", "'ports.list_all'", ",", "None", ")", "return", "'\\n'", ".", "join", "(", "ret", ")" ]
Update the ports tree extract : False If ``True``, runs a ``portsnap extract`` after fetching, should be used for first-time installation of the ports tree. CLI Example: .. code-block:: bash salt '*' ports.update
[ "Update", "the", "ports", "tree" ]
python
train
softlayer/softlayer-python
SoftLayer/managers/ticket.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/ticket.py#L25-L43
def list_tickets(self, open_status=True, closed_status=True): """List all tickets. :param boolean open_status: include open tickets :param boolean closed_status: include closed tickets """ mask = """mask[id, title, assignedUser[firstName, lastName], priority, createDate, lastEditDate, accountId, status, updateCount]""" call = 'getTickets' if not all([open_status, closed_status]): if open_status: call = 'getOpenTickets' elif closed_status: call = 'getClosedTickets' else: raise ValueError("open_status and closed_status cannot both be False") return self.client.call('Account', call, mask=mask, iter=True)
[ "def", "list_tickets", "(", "self", ",", "open_status", "=", "True", ",", "closed_status", "=", "True", ")", ":", "mask", "=", "\"\"\"mask[id, title, assignedUser[firstName, lastName], priority,\n createDate, lastEditDate, accountId, status, updateCount]\"\"\"", "call", "=", "'getTickets'", "if", "not", "all", "(", "[", "open_status", ",", "closed_status", "]", ")", ":", "if", "open_status", ":", "call", "=", "'getOpenTickets'", "elif", "closed_status", ":", "call", "=", "'getClosedTickets'", "else", ":", "raise", "ValueError", "(", "\"open_status and closed_status cannot both be False\"", ")", "return", "self", ".", "client", ".", "call", "(", "'Account'", ",", "call", ",", "mask", "=", "mask", ",", "iter", "=", "True", ")" ]
List all tickets. :param boolean open_status: include open tickets :param boolean closed_status: include closed tickets
[ "List", "all", "tickets", "." ]
python
train
cgrok/clashroyale
clashroyale/official_api/client.py
https://github.com/cgrok/clashroyale/blob/2618f4da22a84ad3e36d2446e23436d87c423163/clashroyale/official_api/client.py#L309-L321
def get_clan(self, tag: crtag, timeout: int=None): """Get inforamtion about a clan Parameters ---------- tag: str A valid tournament tag. Minimum length: 3 Valid characters: 0289PYLQGRJCUV timeout: Optional[int] = None Custom timeout that overwrites Client.timeout """ url = self.api.CLAN + '/' + tag return self._get_model(url, FullClan, timeout=timeout)
[ "def", "get_clan", "(", "self", ",", "tag", ":", "crtag", ",", "timeout", ":", "int", "=", "None", ")", ":", "url", "=", "self", ".", "api", ".", "CLAN", "+", "'/'", "+", "tag", "return", "self", ".", "_get_model", "(", "url", ",", "FullClan", ",", "timeout", "=", "timeout", ")" ]
Get inforamtion about a clan Parameters ---------- tag: str A valid tournament tag. Minimum length: 3 Valid characters: 0289PYLQGRJCUV timeout: Optional[int] = None Custom timeout that overwrites Client.timeout
[ "Get", "inforamtion", "about", "a", "clan" ]
python
valid
rocky/python3-trepan
trepan/processor/cmdfns.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/cmdfns.py#L175-L181
def run_show_bool(obj, what=None): """Generic subcommand showing a boolean-valued debugger setting. 'obj' is generally a subcommand that has 'name' and 'debugger.setting' attributes.""" val = show_onoff(obj.debugger.settings[obj.name]) if not what: what = obj.name return obj.msg("%s is %s." % (what, val))
[ "def", "run_show_bool", "(", "obj", ",", "what", "=", "None", ")", ":", "val", "=", "show_onoff", "(", "obj", ".", "debugger", ".", "settings", "[", "obj", ".", "name", "]", ")", "if", "not", "what", ":", "what", "=", "obj", ".", "name", "return", "obj", ".", "msg", "(", "\"%s is %s.\"", "%", "(", "what", ",", "val", ")", ")" ]
Generic subcommand showing a boolean-valued debugger setting. 'obj' is generally a subcommand that has 'name' and 'debugger.setting' attributes.
[ "Generic", "subcommand", "showing", "a", "boolean", "-", "valued", "debugger", "setting", ".", "obj", "is", "generally", "a", "subcommand", "that", "has", "name", "and", "debugger", ".", "setting", "attributes", "." ]
python
test
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py#L1007-L1013
def must_be_same(self, klass): """Called to make sure a Node is a Dir. Since we're an Entry, we can morph into one.""" if self.__class__ is not klass: self.__class__ = klass self._morph() self.clear()
[ "def", "must_be_same", "(", "self", ",", "klass", ")", ":", "if", "self", ".", "__class__", "is", "not", "klass", ":", "self", ".", "__class__", "=", "klass", "self", ".", "_morph", "(", ")", "self", ".", "clear", "(", ")" ]
Called to make sure a Node is a Dir. Since we're an Entry, we can morph into one.
[ "Called", "to", "make", "sure", "a", "Node", "is", "a", "Dir", ".", "Since", "we", "re", "an", "Entry", "we", "can", "morph", "into", "one", "." ]
python
train
xhtml2pdf/xhtml2pdf
xhtml2pdf/xhtml2pdf_reportlab.py
https://github.com/xhtml2pdf/xhtml2pdf/blob/230357a392f48816532d3c2fa082a680b80ece48/xhtml2pdf/xhtml2pdf_reportlab.py#L788-L839
def wrap(self, availWidth, availHeight): """ All table properties should be known by now. """ widths = (availWidth - self.rightColumnWidth, self.rightColumnWidth) # makes an internal table which does all the work. # we draw the LAST RUN's entries! If there are # none, we make some dummy data to keep the table # from complaining if len(self._lastEntries) == 0: _tempEntries = [(0, 'Placeholder for table of contents', 0)] else: _tempEntries = self._lastEntries lastMargin = 0 tableData = [] tableStyle = [ ('VALIGN', (0, 0), (- 1, - 1), 'TOP'), ('LEFTPADDING', (0, 0), (- 1, - 1), 0), ('RIGHTPADDING', (0, 0), (- 1, - 1), 0), ('TOPPADDING', (0, 0), (- 1, - 1), 0), ('BOTTOMPADDING', (0, 0), (- 1, - 1), 0), ] for i, entry in enumerate(_tempEntries): level, text, pageNum = entry[:3] leftColStyle = self.levelStyles[level] if i: # Not for first element tableStyle.append(( 'TOPPADDING', (0, i), (- 1, i), max(lastMargin, leftColStyle.spaceBefore))) # print leftColStyle.leftIndent lastMargin = leftColStyle.spaceAfter #right col style is right aligned rightColStyle = ParagraphStyle(name='leftColLevel%d' % level, parent=leftColStyle, leftIndent=0, alignment=TA_RIGHT) leftPara = Paragraph(text, leftColStyle) rightPara = Paragraph(str(pageNum), rightColStyle) tableData.append([leftPara, rightPara]) self._table = Table( tableData, colWidths=widths, style=TableStyle(tableStyle)) self.width, self.height = self._table.wrapOn(self.canv, availWidth, availHeight) return self.width, self.height
[ "def", "wrap", "(", "self", ",", "availWidth", ",", "availHeight", ")", ":", "widths", "=", "(", "availWidth", "-", "self", ".", "rightColumnWidth", ",", "self", ".", "rightColumnWidth", ")", "# makes an internal table which does all the work.", "# we draw the LAST RUN's entries! If there are", "# none, we make some dummy data to keep the table", "# from complaining", "if", "len", "(", "self", ".", "_lastEntries", ")", "==", "0", ":", "_tempEntries", "=", "[", "(", "0", ",", "'Placeholder for table of contents'", ",", "0", ")", "]", "else", ":", "_tempEntries", "=", "self", ".", "_lastEntries", "lastMargin", "=", "0", "tableData", "=", "[", "]", "tableStyle", "=", "[", "(", "'VALIGN'", ",", "(", "0", ",", "0", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "'TOP'", ")", ",", "(", "'LEFTPADDING'", ",", "(", "0", ",", "0", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "0", ")", ",", "(", "'RIGHTPADDING'", ",", "(", "0", ",", "0", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "0", ")", ",", "(", "'TOPPADDING'", ",", "(", "0", ",", "0", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "0", ")", ",", "(", "'BOTTOMPADDING'", ",", "(", "0", ",", "0", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "0", ")", ",", "]", "for", "i", ",", "entry", "in", "enumerate", "(", "_tempEntries", ")", ":", "level", ",", "text", ",", "pageNum", "=", "entry", "[", ":", "3", "]", "leftColStyle", "=", "self", ".", "levelStyles", "[", "level", "]", "if", "i", ":", "# Not for first element", "tableStyle", ".", "append", "(", "(", "'TOPPADDING'", ",", "(", "0", ",", "i", ")", ",", "(", "-", "1", ",", "i", ")", ",", "max", "(", "lastMargin", ",", "leftColStyle", ".", "spaceBefore", ")", ")", ")", "# print leftColStyle.leftIndent", "lastMargin", "=", "leftColStyle", ".", "spaceAfter", "#right col style is right aligned", "rightColStyle", "=", "ParagraphStyle", "(", "name", "=", "'leftColLevel%d'", "%", "level", ",", "parent", "=", "leftColStyle", ",", "leftIndent", "=", "0", ",", "alignment", "=", "TA_RIGHT", ")", "leftPara", "=", "Paragraph", "(", "text", ",", "leftColStyle", ")", "rightPara", "=", "Paragraph", "(", "str", "(", "pageNum", ")", ",", "rightColStyle", ")", "tableData", ".", "append", "(", "[", "leftPara", ",", "rightPara", "]", ")", "self", ".", "_table", "=", "Table", "(", "tableData", ",", "colWidths", "=", "widths", ",", "style", "=", "TableStyle", "(", "tableStyle", ")", ")", "self", ".", "width", ",", "self", ".", "height", "=", "self", ".", "_table", ".", "wrapOn", "(", "self", ".", "canv", ",", "availWidth", ",", "availHeight", ")", "return", "self", ".", "width", ",", "self", ".", "height" ]
All table properties should be known by now.
[ "All", "table", "properties", "should", "be", "known", "by", "now", "." ]
python
train
seequent/properties
properties/base/containers.py
https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/base/containers.py#L261-L281
def validate(self, instance, value): """Check the class of the container and validate each element This returns a copy of the container to prevent unwanted sharing of pointers. """ if not self.coerce and not isinstance(value, self._class_container): self.error(instance, value) if self.coerce and not isinstance(value, CONTAINERS): value = [value] if not isinstance(value, self._class_container): out_class = self._class_container else: out_class = value.__class__ out = [] for val in value: try: out += [self.prop.validate(instance, val)] except ValueError: self.error(instance, val, extra='This item is invalid.') return out_class(out)
[ "def", "validate", "(", "self", ",", "instance", ",", "value", ")", ":", "if", "not", "self", ".", "coerce", "and", "not", "isinstance", "(", "value", ",", "self", ".", "_class_container", ")", ":", "self", ".", "error", "(", "instance", ",", "value", ")", "if", "self", ".", "coerce", "and", "not", "isinstance", "(", "value", ",", "CONTAINERS", ")", ":", "value", "=", "[", "value", "]", "if", "not", "isinstance", "(", "value", ",", "self", ".", "_class_container", ")", ":", "out_class", "=", "self", ".", "_class_container", "else", ":", "out_class", "=", "value", ".", "__class__", "out", "=", "[", "]", "for", "val", "in", "value", ":", "try", ":", "out", "+=", "[", "self", ".", "prop", ".", "validate", "(", "instance", ",", "val", ")", "]", "except", "ValueError", ":", "self", ".", "error", "(", "instance", ",", "val", ",", "extra", "=", "'This item is invalid.'", ")", "return", "out_class", "(", "out", ")" ]
Check the class of the container and validate each element This returns a copy of the container to prevent unwanted sharing of pointers.
[ "Check", "the", "class", "of", "the", "container", "and", "validate", "each", "element" ]
python
train
MagicStack/asyncpg
asyncpg/connection.py
https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L1071-L1088
async def close(self, *, timeout=None): """Close the connection gracefully. :param float timeout: Optional timeout value in seconds. .. versionchanged:: 0.14.0 Added the *timeout* parameter. """ try: if not self.is_closed(): await self._protocol.close(timeout) except Exception: # If we fail to close gracefully, abort the connection. self._abort() raise finally: self._cleanup()
[ "async", "def", "close", "(", "self", ",", "*", ",", "timeout", "=", "None", ")", ":", "try", ":", "if", "not", "self", ".", "is_closed", "(", ")", ":", "await", "self", ".", "_protocol", ".", "close", "(", "timeout", ")", "except", "Exception", ":", "# If we fail to close gracefully, abort the connection.", "self", ".", "_abort", "(", ")", "raise", "finally", ":", "self", ".", "_cleanup", "(", ")" ]
Close the connection gracefully. :param float timeout: Optional timeout value in seconds. .. versionchanged:: 0.14.0 Added the *timeout* parameter.
[ "Close", "the", "connection", "gracefully", "." ]
python
train
cloudant/python-cloudant
src/cloudant/document.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L264-L299
def update_field(self, action, field, value, max_tries=10): """ Updates a field in the remote document. If a conflict exists, the document is re-fetched from the remote database and the update is retried. This is performed up to ``max_tries`` number of times. Use this method when you want to update a single field in a document, and don't want to risk clobbering other people's changes to the document in other fields, but also don't want the caller to implement logic to deal with conflicts. For example: .. code-block:: python # Append the string 'foo' to the 'words' list of Document doc. doc.update_field( action=doc.list_field_append, field='words', value='foo' ) :param callable action: A routine that takes a Document object, a field name, and a value. The routine should attempt to update a field in the locally cached Document object with the given value, using whatever logic is appropriate. Valid actions are :func:`~cloudant.document.Document.list_field_append`, :func:`~cloudant.document.Document.list_field_remove`, :func:`~cloudant.document.Document.field_set` :param str field: Name of the field to update :param value: Value to update the field with :param int max_tries: In the case of a conflict, the number of retries to attempt """ self._update_field(action, field, value, max_tries)
[ "def", "update_field", "(", "self", ",", "action", ",", "field", ",", "value", ",", "max_tries", "=", "10", ")", ":", "self", ".", "_update_field", "(", "action", ",", "field", ",", "value", ",", "max_tries", ")" ]
Updates a field in the remote document. If a conflict exists, the document is re-fetched from the remote database and the update is retried. This is performed up to ``max_tries`` number of times. Use this method when you want to update a single field in a document, and don't want to risk clobbering other people's changes to the document in other fields, but also don't want the caller to implement logic to deal with conflicts. For example: .. code-block:: python # Append the string 'foo' to the 'words' list of Document doc. doc.update_field( action=doc.list_field_append, field='words', value='foo' ) :param callable action: A routine that takes a Document object, a field name, and a value. The routine should attempt to update a field in the locally cached Document object with the given value, using whatever logic is appropriate. Valid actions are :func:`~cloudant.document.Document.list_field_append`, :func:`~cloudant.document.Document.list_field_remove`, :func:`~cloudant.document.Document.field_set` :param str field: Name of the field to update :param value: Value to update the field with :param int max_tries: In the case of a conflict, the number of retries to attempt
[ "Updates", "a", "field", "in", "the", "remote", "document", ".", "If", "a", "conflict", "exists", "the", "document", "is", "re", "-", "fetched", "from", "the", "remote", "database", "and", "the", "update", "is", "retried", ".", "This", "is", "performed", "up", "to", "max_tries", "number", "of", "times", "." ]
python
train
isislovecruft/python-gnupg
pretty_bad_protocol/_parsers.py
https://github.com/isislovecruft/python-gnupg/blob/784571449032e811587249743e183fc5e908a673/pretty_bad_protocol/_parsers.py#L127-L141
def _fix_unsafe(shell_input): """Find characters used to escape from a string into a shell, and wrap them in quotes if they exist. Regex pilfered from Python3 :mod:`shlex` module. :param str shell_input: The input intended for the GnuPG process. """ _unsafe = re.compile(r'[^\w@%+=:,./-]', 256) try: if len(_unsafe.findall(shell_input)) == 0: return shell_input.strip() else: clean = "'" + shell_input.replace("'", "'\"'\"'") + "'" return clean except TypeError: return None
[ "def", "_fix_unsafe", "(", "shell_input", ")", ":", "_unsafe", "=", "re", ".", "compile", "(", "r'[^\\w@%+=:,./-]'", ",", "256", ")", "try", ":", "if", "len", "(", "_unsafe", ".", "findall", "(", "shell_input", ")", ")", "==", "0", ":", "return", "shell_input", ".", "strip", "(", ")", "else", ":", "clean", "=", "\"'\"", "+", "shell_input", ".", "replace", "(", "\"'\"", ",", "\"'\\\"'\\\"'\"", ")", "+", "\"'\"", "return", "clean", "except", "TypeError", ":", "return", "None" ]
Find characters used to escape from a string into a shell, and wrap them in quotes if they exist. Regex pilfered from Python3 :mod:`shlex` module. :param str shell_input: The input intended for the GnuPG process.
[ "Find", "characters", "used", "to", "escape", "from", "a", "string", "into", "a", "shell", "and", "wrap", "them", "in", "quotes", "if", "they", "exist", ".", "Regex", "pilfered", "from", "Python3", ":", "mod", ":", "shlex", "module", "." ]
python
train
miquelo/resort
packages/resort/__init__.py
https://github.com/miquelo/resort/blob/097a25d3257c91a75c194fd44c2797ab356f85dd/packages/resort/__init__.py#L397-L451
def command_update(prog_name, prof_mgr, prof_name, prog_args): """ Update components. """ # Retrieve arguments parser = argparse.ArgumentParser( prog=prog_name ) parser.add_argument( "components", metavar="comps", nargs=argparse.REMAINDER, help="system components" ) args = parser.parse_args(prog_args) # Profile load prof_stub = prof_mgr.load(prof_name) # Collect component stubs comp_stubs = [] if len(args.components) == 0: raise Exception("Empty component list") for comp_name in args.components: comp_stub = prof_stub.component(comp_name) component_exists(prof_stub, comp_stub) comp_stubs.append(comp_stub) context = prof_stub.context() # Create delete plan plan = [] for comp_stub in comp_stubs: comp_stub.delete(context, plan) # Execute delete plan for op in plan: operation_execute(op, context) # Update component stub list for op in plan: comp_stub = prof_stub.component(op.name()) if comp_stub not in comp_stubs: comp_stubs.append(comp_stub) # Create insert plan plan = [] for comp_stub in comp_stubs: comp_stub.insert(context, plan) # Execute insert plan for op in plan: operation_execute(op, context)
[ "def", "command_update", "(", "prog_name", ",", "prof_mgr", ",", "prof_name", ",", "prog_args", ")", ":", "# Retrieve arguments", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "prog_name", ")", "parser", ".", "add_argument", "(", "\"components\"", ",", "metavar", "=", "\"comps\"", ",", "nargs", "=", "argparse", ".", "REMAINDER", ",", "help", "=", "\"system components\"", ")", "args", "=", "parser", ".", "parse_args", "(", "prog_args", ")", "# Profile load", "prof_stub", "=", "prof_mgr", ".", "load", "(", "prof_name", ")", "# Collect component stubs", "comp_stubs", "=", "[", "]", "if", "len", "(", "args", ".", "components", ")", "==", "0", ":", "raise", "Exception", "(", "\"Empty component list\"", ")", "for", "comp_name", "in", "args", ".", "components", ":", "comp_stub", "=", "prof_stub", ".", "component", "(", "comp_name", ")", "component_exists", "(", "prof_stub", ",", "comp_stub", ")", "comp_stubs", ".", "append", "(", "comp_stub", ")", "context", "=", "prof_stub", ".", "context", "(", ")", "# Create delete plan", "plan", "=", "[", "]", "for", "comp_stub", "in", "comp_stubs", ":", "comp_stub", ".", "delete", "(", "context", ",", "plan", ")", "# Execute delete plan", "for", "op", "in", "plan", ":", "operation_execute", "(", "op", ",", "context", ")", "# Update component stub list", "for", "op", "in", "plan", ":", "comp_stub", "=", "prof_stub", ".", "component", "(", "op", ".", "name", "(", ")", ")", "if", "comp_stub", "not", "in", "comp_stubs", ":", "comp_stubs", ".", "append", "(", "comp_stub", ")", "# Create insert plan", "plan", "=", "[", "]", "for", "comp_stub", "in", "comp_stubs", ":", "comp_stub", ".", "insert", "(", "context", ",", "plan", ")", "# Execute insert plan", "for", "op", "in", "plan", ":", "operation_execute", "(", "op", ",", "context", ")" ]
Update components.
[ "Update", "components", "." ]
python
train
AustralianSynchrotron/lightflow
lightflow/models/task_data.py
https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/models/task_data.py#L261-L273
def get_by_index(self, index): """ Return a dataset by its index. Args: index (int): The index of the dataset that should be returned. Raises: DataInvalidIndex: If the index does not represent a valid dataset. """ if index >= len(self._datasets): raise DataInvalidIndex('A dataset with index {} does not exist'.format(index)) return self._datasets[index]
[ "def", "get_by_index", "(", "self", ",", "index", ")", ":", "if", "index", ">=", "len", "(", "self", ".", "_datasets", ")", ":", "raise", "DataInvalidIndex", "(", "'A dataset with index {} does not exist'", ".", "format", "(", "index", ")", ")", "return", "self", ".", "_datasets", "[", "index", "]" ]
Return a dataset by its index. Args: index (int): The index of the dataset that should be returned. Raises: DataInvalidIndex: If the index does not represent a valid dataset.
[ "Return", "a", "dataset", "by", "its", "index", "." ]
python
train
phaethon/kamene
kamene/contrib/igmpv3.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/igmpv3.py#L144-L158
def post_build(self, p, pay): """Called implicitly before a packet is sent to compute and place IGMPv3 checksum. Parameters: self The instantiation of an IGMPv3 class p The IGMPv3 message in hex in network byte order pay Additional payload for the IGMPv3 message """ p += pay if self.type in [0, 0x31, 0x32, 0x22]: # for these, field is reserved (0) p = p[:1]+chr(0)+p[2:] if self.chksum is None: ck = checksum(p[:2]+p[4:]) p = p[:2]+ck.to_bytes(2, 'big')+p[4:] return p
[ "def", "post_build", "(", "self", ",", "p", ",", "pay", ")", ":", "p", "+=", "pay", "if", "self", ".", "type", "in", "[", "0", ",", "0x31", ",", "0x32", ",", "0x22", "]", ":", "# for these, field is reserved (0)", "p", "=", "p", "[", ":", "1", "]", "+", "chr", "(", "0", ")", "+", "p", "[", "2", ":", "]", "if", "self", ".", "chksum", "is", "None", ":", "ck", "=", "checksum", "(", "p", "[", ":", "2", "]", "+", "p", "[", "4", ":", "]", ")", "p", "=", "p", "[", ":", "2", "]", "+", "ck", ".", "to_bytes", "(", "2", ",", "'big'", ")", "+", "p", "[", "4", ":", "]", "return", "p" ]
Called implicitly before a packet is sent to compute and place IGMPv3 checksum. Parameters: self The instantiation of an IGMPv3 class p The IGMPv3 message in hex in network byte order pay Additional payload for the IGMPv3 message
[ "Called", "implicitly", "before", "a", "packet", "is", "sent", "to", "compute", "and", "place", "IGMPv3", "checksum", "." ]
python
train
spyder-ide/spyder
spyder/widgets/mixins.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/mixins.py#L524-L535
def get_character(self, position, offset=0): """Return character at *position* with the given offset.""" position = self.get_position(position) + offset cursor = self.textCursor() cursor.movePosition(QTextCursor.End) if position < cursor.position(): cursor.setPosition(position) cursor.movePosition(QTextCursor.Right, QTextCursor.KeepAnchor) return to_text_string(cursor.selectedText()) else: return ''
[ "def", "get_character", "(", "self", ",", "position", ",", "offset", "=", "0", ")", ":", "position", "=", "self", ".", "get_position", "(", "position", ")", "+", "offset", "cursor", "=", "self", ".", "textCursor", "(", ")", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "End", ")", "if", "position", "<", "cursor", ".", "position", "(", ")", ":", "cursor", ".", "setPosition", "(", "position", ")", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "Right", ",", "QTextCursor", ".", "KeepAnchor", ")", "return", "to_text_string", "(", "cursor", ".", "selectedText", "(", ")", ")", "else", ":", "return", "''" ]
Return character at *position* with the given offset.
[ "Return", "character", "at", "*", "position", "*", "with", "the", "given", "offset", "." ]
python
train
acsone/git-aggregator
git_aggregator/repo.py
https://github.com/acsone/git-aggregator/blob/8631b0e64f9e8ce1857b21adeddb890ebd8469a6/git_aggregator/repo.py#L313-L357
def collect_prs_info(self): """Collect all pending merge PRs info. :returns: mapping of PRs by state """ REPO_RE = re.compile( '^(https://github.com/|[email protected]:)' '(?P<owner>.*?)/(?P<repo>.*?)(.git)?$') PULL_RE = re.compile( '^(refs/)?pull/(?P<pr>[0-9]+)/head$') remotes = {r['name']: r['url'] for r in self.remotes} all_prs = {} for merge in self.merges: remote = merge['remote'] ref = merge['ref'] repo_url = remotes[remote] repo_mo = REPO_RE.match(repo_url) if not repo_mo: logger.debug('%s is not a github repo', repo_url) continue pull_mo = PULL_RE.match(ref) if not pull_mo: logger.debug('%s is not a github pull reqeust', ref) continue pr_info = { 'owner': repo_mo.group('owner'), 'repo': repo_mo.group('repo'), 'pr': pull_mo.group('pr'), } pr_info['path'] = '{owner}/{repo}/pulls/{pr}'.format(**pr_info) pr_info['url'] = 'https://github.com/{path}'.format(**pr_info) pr_info['shortcut'] = '{owner}/{repo}#{pr}'.format(**pr_info) r = self._github_api_get('/repos/{path}'.format(**pr_info)) if r.status_code != 200: logger.warning( 'Could not get status of {path}. ' 'Reason: {r.status_code} {r.reason}'.format(r=r, **pr_info) ) continue pr_info['state'] = r.json().get('state') pr_info['merged'] = ( not r.json().get('merged') and 'not ' or '' ) + 'merged' all_prs.setdefault(pr_info['state'], []).append(pr_info) return all_prs
[ "def", "collect_prs_info", "(", "self", ")", ":", "REPO_RE", "=", "re", ".", "compile", "(", "'^(https://github.com/|[email protected]:)'", "'(?P<owner>.*?)/(?P<repo>.*?)(.git)?$'", ")", "PULL_RE", "=", "re", ".", "compile", "(", "'^(refs/)?pull/(?P<pr>[0-9]+)/head$'", ")", "remotes", "=", "{", "r", "[", "'name'", "]", ":", "r", "[", "'url'", "]", "for", "r", "in", "self", ".", "remotes", "}", "all_prs", "=", "{", "}", "for", "merge", "in", "self", ".", "merges", ":", "remote", "=", "merge", "[", "'remote'", "]", "ref", "=", "merge", "[", "'ref'", "]", "repo_url", "=", "remotes", "[", "remote", "]", "repo_mo", "=", "REPO_RE", ".", "match", "(", "repo_url", ")", "if", "not", "repo_mo", ":", "logger", ".", "debug", "(", "'%s is not a github repo'", ",", "repo_url", ")", "continue", "pull_mo", "=", "PULL_RE", ".", "match", "(", "ref", ")", "if", "not", "pull_mo", ":", "logger", ".", "debug", "(", "'%s is not a github pull reqeust'", ",", "ref", ")", "continue", "pr_info", "=", "{", "'owner'", ":", "repo_mo", ".", "group", "(", "'owner'", ")", ",", "'repo'", ":", "repo_mo", ".", "group", "(", "'repo'", ")", ",", "'pr'", ":", "pull_mo", ".", "group", "(", "'pr'", ")", ",", "}", "pr_info", "[", "'path'", "]", "=", "'{owner}/{repo}/pulls/{pr}'", ".", "format", "(", "*", "*", "pr_info", ")", "pr_info", "[", "'url'", "]", "=", "'https://github.com/{path}'", ".", "format", "(", "*", "*", "pr_info", ")", "pr_info", "[", "'shortcut'", "]", "=", "'{owner}/{repo}#{pr}'", ".", "format", "(", "*", "*", "pr_info", ")", "r", "=", "self", ".", "_github_api_get", "(", "'/repos/{path}'", ".", "format", "(", "*", "*", "pr_info", ")", ")", "if", "r", ".", "status_code", "!=", "200", ":", "logger", ".", "warning", "(", "'Could not get status of {path}. '", "'Reason: {r.status_code} {r.reason}'", ".", "format", "(", "r", "=", "r", ",", "*", "*", "pr_info", ")", ")", "continue", "pr_info", "[", "'state'", "]", "=", "r", ".", "json", "(", ")", ".", "get", "(", "'state'", ")", "pr_info", "[", "'merged'", "]", "=", "(", "not", "r", ".", "json", "(", ")", ".", "get", "(", "'merged'", ")", "and", "'not '", "or", "''", ")", "+", "'merged'", "all_prs", ".", "setdefault", "(", "pr_info", "[", "'state'", "]", ",", "[", "]", ")", ".", "append", "(", "pr_info", ")", "return", "all_prs" ]
Collect all pending merge PRs info. :returns: mapping of PRs by state
[ "Collect", "all", "pending", "merge", "PRs", "info", "." ]
python
train
MisterWil/abodepy
abodepy/socketio.py
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/socketio.py#L110-L118
def start(self): """Start a thread to handle SocketIO notifications.""" if not self._thread: _LOGGER.info("Starting SocketIO thread...") self._thread = threading.Thread(target=self._run_socketio_thread, name='SocketIOThread') self._thread.deamon = True self._thread.start()
[ "def", "start", "(", "self", ")", ":", "if", "not", "self", ".", "_thread", ":", "_LOGGER", ".", "info", "(", "\"Starting SocketIO thread...\"", ")", "self", ".", "_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_run_socketio_thread", ",", "name", "=", "'SocketIOThread'", ")", "self", ".", "_thread", ".", "deamon", "=", "True", "self", ".", "_thread", ".", "start", "(", ")" ]
Start a thread to handle SocketIO notifications.
[ "Start", "a", "thread", "to", "handle", "SocketIO", "notifications", "." ]
python
train
walter426/Python_GoogleMapsApi
GoogleMapsApi/geocode.py
https://github.com/walter426/Python_GoogleMapsApi/blob/4832b293a0027446941a5f00ecc66256f92ddbce/GoogleMapsApi/geocode.py#L34-L63
def geocode(self, string, bounds=None, region=None, language=None, sensor=False): '''Geocode an address. Pls refer to the Google Maps Web API for the details of the parameters ''' if isinstance(string, unicode): string = string.encode('utf-8') params = { 'address': self.format_string % string, 'sensor': str(sensor).lower() } if bounds: params['bounds'] = bounds if region: params['region'] = region if language: params['language'] = language if not self.premier: url = self.get_url(params) else: url = self.get_signed_url(params) return self.GetService_url(url)
[ "def", "geocode", "(", "self", ",", "string", ",", "bounds", "=", "None", ",", "region", "=", "None", ",", "language", "=", "None", ",", "sensor", "=", "False", ")", ":", "if", "isinstance", "(", "string", ",", "unicode", ")", ":", "string", "=", "string", ".", "encode", "(", "'utf-8'", ")", "params", "=", "{", "'address'", ":", "self", ".", "format_string", "%", "string", ",", "'sensor'", ":", "str", "(", "sensor", ")", ".", "lower", "(", ")", "}", "if", "bounds", ":", "params", "[", "'bounds'", "]", "=", "bounds", "if", "region", ":", "params", "[", "'region'", "]", "=", "region", "if", "language", ":", "params", "[", "'language'", "]", "=", "language", "if", "not", "self", ".", "premier", ":", "url", "=", "self", ".", "get_url", "(", "params", ")", "else", ":", "url", "=", "self", ".", "get_signed_url", "(", "params", ")", "return", "self", ".", "GetService_url", "(", "url", ")" ]
Geocode an address. Pls refer to the Google Maps Web API for the details of the parameters
[ "Geocode", "an", "address", ".", "Pls", "refer", "to", "the", "Google", "Maps", "Web", "API", "for", "the", "details", "of", "the", "parameters" ]
python
train
pymoca/pymoca
src/pymoca/backends/casadi/api.py
https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/casadi/api.py#L246-L416
def load_model(model_folder: str, model_name: str, compiler_options: Dict[str, str]) -> CachedModel: """ Loads a precompiled CasADi model into a CachedModel instance. :param model_folder: Folder where the precompiled CasADi model is located. :param model_name: Name of the model. :param compiler_options: Dictionary of compiler options. :returns: CachedModel instance. """ db_file = os.path.join(model_folder, model_name + ".pymoca_cache") if compiler_options.get('mtime_check', True): # Mtime check cache_mtime = os.path.getmtime(db_file) for folder in [model_folder] + compiler_options.get('library_folders', []): for root, dir, files in os.walk(folder, followlinks=True): for item in fnmatch.filter(files, "*.mo"): filename = os.path.join(root, item) if os.path.getmtime(filename) > cache_mtime: raise InvalidCacheError("Cache out of date") # Create empty model object model = CachedModel() # Load metadata with open(db_file, 'rb') as f: db = pickle.load(f) if db['version'] != __version__: raise InvalidCacheError('Cache generated for a different version of pymoca') # Check compiler options. We ignore the library folders, as they have # already been checked, and checking them will impede platform # portability of the cache. exclude_options = ['library_folders'] old_opts = {k: v for k, v in db['options'].items() if k not in exclude_options} new_opts = {k: v for k, v in compiler_options.items() if k not in exclude_options} if old_opts != new_opts: raise InvalidCacheError('Cache generated for different compiler options') # Pickles are platform independent, but dynamic libraries are not if compiler_options.get('codegen', False): if db['library_os'] != os.name: raise InvalidCacheError('Cache generated for incompatible OS') # Include references to the shared libraries for o in ['dae_residual', 'initial_residual', 'variable_metadata', 'delay_arguments']: if isinstance(db[o], str): # Path to codegen'd library f = ca.external(o, db[o]) else: # Pickled CasADi Function; use as is assert isinstance(db[o], ca.Function) f = db[o] setattr(model, '_' + o + '_function', f) # Load variables per category variables_with_metadata = ['states', 'alg_states', 'inputs', 'parameters', 'constants'] variable_dict = {} for key in variables_with_metadata: variables = getattr(model, key) for i, d in enumerate(db[key]): variable = Variable.from_dict(d) variables.append(variable) variable_dict[variable.symbol.name()] = variable model.der_states = [Variable.from_dict(d) for d in db['der_states']] model.outputs = db['outputs'] model.delay_states = db['delay_states'] model.alias_relation = db['alias_relation'] # Evaluate variable metadata: parameter_vector = ca.veccat(*[v.symbol for v in model.parameters]) metadata = dict(zip(variables_with_metadata, model.variable_metadata_function(parameter_vector))) independent_metadata = dict(zip( variables_with_metadata, (np.array(x) for x in model.variable_metadata_function(ca.veccat(*[np.nan for v in model.parameters]))))) for k, key in enumerate(variables_with_metadata): m = db[key + "__metadata_dependent"] for i, d in enumerate(db[key]): variable = variable_dict[d['name']] for j, tmp in enumerate(CASADI_ATTRIBUTES): if m[i, j]: setattr(variable, tmp, metadata[key][i, j]) else: setattr(variable, tmp, independent_metadata[key][i, j]) # Evaluate delay arguments: if model.delay_states: args = [model.time, ca.veccat(*model._symbols(model.states)), ca.veccat(*model._symbols(model.der_states)), ca.veccat(*model._symbols(model.alg_states)), ca.veccat(*model._symbols(model.inputs)), ca.veccat(*model._symbols(model.constants)), ca.veccat(*model._symbols(model.parameters))] delay_arguments_raw = model.delay_arguments_function(*args) nan_args = [ca.repmat(np.nan, *arg.size()) for arg in args] independent_delay_arguments_raw = model.delay_arguments_function(*nan_args) delay_expressions_raw = delay_arguments_raw[::2] delay_durations_raw = delay_arguments_raw[1::2] independent_delay_durations_raw = independent_delay_arguments_raw[1::2] assert 1 == len({len(delay_expressions_raw), len(delay_durations_raw), len(independent_delay_durations_raw)}) all_symbols = [model.time, *model._symbols(model.states), *model._symbols(model.der_states), *model._symbols(model.alg_states), *model._symbols(model.inputs), *model._symbols(model.constants), *model._symbols(model.parameters)] duration_dependencies = db['__delay_duration_dependent'] # Get rid of false dependency symbols not used in any delay # durations. This significantly reduces the work the (slow) # substitute() calls have to do later on. actual_deps = sorted(set(np.array(duration_dependencies).ravel())) actual_dep_symbols = [np.nan] * len(all_symbols) for i in actual_deps: actual_dep_symbols[i] = all_symbols[i] delay_durations_simplified = ca.Function( 'replace_false_deps', all_symbols, delay_durations_raw).call( actual_dep_symbols) # Get rid of remaining hidden dependencies in the delay durations for i, expr in enumerate(delay_expressions_raw): if duration_dependencies[i]: dur = delay_durations_simplified[i] if len(duration_dependencies[i]) < len(actual_deps): deps = set(ca.symvar(dur)) actual_deps = {all_symbols[j] for j in duration_dependencies[i]} false_deps = deps - actual_deps if false_deps: [dur] = ca.substitute( [dur], list(false_deps), [np.nan] * len(false_deps)) else: # Already removed all false dependencies pass else: dur = independent_delay_durations_raw[i] model.delay_arguments.append(DelayArgument(expr, dur)) # Try to coerce parameters into their Python types for p in model.parameters: for attr in CASADI_ATTRIBUTES: v = getattr(p, attr) v_mx = ca.MX(v) if v_mx.is_constant() and v_mx.is_regular(): setattr(p, attr, p.python_type(v)) # Done return model
[ "def", "load_model", "(", "model_folder", ":", "str", ",", "model_name", ":", "str", ",", "compiler_options", ":", "Dict", "[", "str", ",", "str", "]", ")", "->", "CachedModel", ":", "db_file", "=", "os", ".", "path", ".", "join", "(", "model_folder", ",", "model_name", "+", "\".pymoca_cache\"", ")", "if", "compiler_options", ".", "get", "(", "'mtime_check'", ",", "True", ")", ":", "# Mtime check", "cache_mtime", "=", "os", ".", "path", ".", "getmtime", "(", "db_file", ")", "for", "folder", "in", "[", "model_folder", "]", "+", "compiler_options", ".", "get", "(", "'library_folders'", ",", "[", "]", ")", ":", "for", "root", ",", "dir", ",", "files", "in", "os", ".", "walk", "(", "folder", ",", "followlinks", "=", "True", ")", ":", "for", "item", "in", "fnmatch", ".", "filter", "(", "files", ",", "\"*.mo\"", ")", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "root", ",", "item", ")", "if", "os", ".", "path", ".", "getmtime", "(", "filename", ")", ">", "cache_mtime", ":", "raise", "InvalidCacheError", "(", "\"Cache out of date\"", ")", "# Create empty model object", "model", "=", "CachedModel", "(", ")", "# Load metadata", "with", "open", "(", "db_file", ",", "'rb'", ")", "as", "f", ":", "db", "=", "pickle", ".", "load", "(", "f", ")", "if", "db", "[", "'version'", "]", "!=", "__version__", ":", "raise", "InvalidCacheError", "(", "'Cache generated for a different version of pymoca'", ")", "# Check compiler options. We ignore the library folders, as they have", "# already been checked, and checking them will impede platform", "# portability of the cache.", "exclude_options", "=", "[", "'library_folders'", "]", "old_opts", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "db", "[", "'options'", "]", ".", "items", "(", ")", "if", "k", "not", "in", "exclude_options", "}", "new_opts", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "compiler_options", ".", "items", "(", ")", "if", "k", "not", "in", "exclude_options", "}", "if", "old_opts", "!=", "new_opts", ":", "raise", "InvalidCacheError", "(", "'Cache generated for different compiler options'", ")", "# Pickles are platform independent, but dynamic libraries are not", "if", "compiler_options", ".", "get", "(", "'codegen'", ",", "False", ")", ":", "if", "db", "[", "'library_os'", "]", "!=", "os", ".", "name", ":", "raise", "InvalidCacheError", "(", "'Cache generated for incompatible OS'", ")", "# Include references to the shared libraries", "for", "o", "in", "[", "'dae_residual'", ",", "'initial_residual'", ",", "'variable_metadata'", ",", "'delay_arguments'", "]", ":", "if", "isinstance", "(", "db", "[", "o", "]", ",", "str", ")", ":", "# Path to codegen'd library", "f", "=", "ca", ".", "external", "(", "o", ",", "db", "[", "o", "]", ")", "else", ":", "# Pickled CasADi Function; use as is", "assert", "isinstance", "(", "db", "[", "o", "]", ",", "ca", ".", "Function", ")", "f", "=", "db", "[", "o", "]", "setattr", "(", "model", ",", "'_'", "+", "o", "+", "'_function'", ",", "f", ")", "# Load variables per category", "variables_with_metadata", "=", "[", "'states'", ",", "'alg_states'", ",", "'inputs'", ",", "'parameters'", ",", "'constants'", "]", "variable_dict", "=", "{", "}", "for", "key", "in", "variables_with_metadata", ":", "variables", "=", "getattr", "(", "model", ",", "key", ")", "for", "i", ",", "d", "in", "enumerate", "(", "db", "[", "key", "]", ")", ":", "variable", "=", "Variable", ".", "from_dict", "(", "d", ")", "variables", ".", "append", "(", "variable", ")", "variable_dict", "[", "variable", ".", "symbol", ".", "name", "(", ")", "]", "=", "variable", "model", ".", "der_states", "=", "[", "Variable", ".", "from_dict", "(", "d", ")", "for", "d", "in", "db", "[", "'der_states'", "]", "]", "model", ".", "outputs", "=", "db", "[", "'outputs'", "]", "model", ".", "delay_states", "=", "db", "[", "'delay_states'", "]", "model", ".", "alias_relation", "=", "db", "[", "'alias_relation'", "]", "# Evaluate variable metadata:", "parameter_vector", "=", "ca", ".", "veccat", "(", "*", "[", "v", ".", "symbol", "for", "v", "in", "model", ".", "parameters", "]", ")", "metadata", "=", "dict", "(", "zip", "(", "variables_with_metadata", ",", "model", ".", "variable_metadata_function", "(", "parameter_vector", ")", ")", ")", "independent_metadata", "=", "dict", "(", "zip", "(", "variables_with_metadata", ",", "(", "np", ".", "array", "(", "x", ")", "for", "x", "in", "model", ".", "variable_metadata_function", "(", "ca", ".", "veccat", "(", "*", "[", "np", ".", "nan", "for", "v", "in", "model", ".", "parameters", "]", ")", ")", ")", ")", ")", "for", "k", ",", "key", "in", "enumerate", "(", "variables_with_metadata", ")", ":", "m", "=", "db", "[", "key", "+", "\"__metadata_dependent\"", "]", "for", "i", ",", "d", "in", "enumerate", "(", "db", "[", "key", "]", ")", ":", "variable", "=", "variable_dict", "[", "d", "[", "'name'", "]", "]", "for", "j", ",", "tmp", "in", "enumerate", "(", "CASADI_ATTRIBUTES", ")", ":", "if", "m", "[", "i", ",", "j", "]", ":", "setattr", "(", "variable", ",", "tmp", ",", "metadata", "[", "key", "]", "[", "i", ",", "j", "]", ")", "else", ":", "setattr", "(", "variable", ",", "tmp", ",", "independent_metadata", "[", "key", "]", "[", "i", ",", "j", "]", ")", "# Evaluate delay arguments:", "if", "model", ".", "delay_states", ":", "args", "=", "[", "model", ".", "time", ",", "ca", ".", "veccat", "(", "*", "model", ".", "_symbols", "(", "model", ".", "states", ")", ")", ",", "ca", ".", "veccat", "(", "*", "model", ".", "_symbols", "(", "model", ".", "der_states", ")", ")", ",", "ca", ".", "veccat", "(", "*", "model", ".", "_symbols", "(", "model", ".", "alg_states", ")", ")", ",", "ca", ".", "veccat", "(", "*", "model", ".", "_symbols", "(", "model", ".", "inputs", ")", ")", ",", "ca", ".", "veccat", "(", "*", "model", ".", "_symbols", "(", "model", ".", "constants", ")", ")", ",", "ca", ".", "veccat", "(", "*", "model", ".", "_symbols", "(", "model", ".", "parameters", ")", ")", "]", "delay_arguments_raw", "=", "model", ".", "delay_arguments_function", "(", "*", "args", ")", "nan_args", "=", "[", "ca", ".", "repmat", "(", "np", ".", "nan", ",", "*", "arg", ".", "size", "(", ")", ")", "for", "arg", "in", "args", "]", "independent_delay_arguments_raw", "=", "model", ".", "delay_arguments_function", "(", "*", "nan_args", ")", "delay_expressions_raw", "=", "delay_arguments_raw", "[", ":", ":", "2", "]", "delay_durations_raw", "=", "delay_arguments_raw", "[", "1", ":", ":", "2", "]", "independent_delay_durations_raw", "=", "independent_delay_arguments_raw", "[", "1", ":", ":", "2", "]", "assert", "1", "==", "len", "(", "{", "len", "(", "delay_expressions_raw", ")", ",", "len", "(", "delay_durations_raw", ")", ",", "len", "(", "independent_delay_durations_raw", ")", "}", ")", "all_symbols", "=", "[", "model", ".", "time", ",", "*", "model", ".", "_symbols", "(", "model", ".", "states", ")", ",", "*", "model", ".", "_symbols", "(", "model", ".", "der_states", ")", ",", "*", "model", ".", "_symbols", "(", "model", ".", "alg_states", ")", ",", "*", "model", ".", "_symbols", "(", "model", ".", "inputs", ")", ",", "*", "model", ".", "_symbols", "(", "model", ".", "constants", ")", ",", "*", "model", ".", "_symbols", "(", "model", ".", "parameters", ")", "]", "duration_dependencies", "=", "db", "[", "'__delay_duration_dependent'", "]", "# Get rid of false dependency symbols not used in any delay", "# durations. This significantly reduces the work the (slow)", "# substitute() calls have to do later on.", "actual_deps", "=", "sorted", "(", "set", "(", "np", ".", "array", "(", "duration_dependencies", ")", ".", "ravel", "(", ")", ")", ")", "actual_dep_symbols", "=", "[", "np", ".", "nan", "]", "*", "len", "(", "all_symbols", ")", "for", "i", "in", "actual_deps", ":", "actual_dep_symbols", "[", "i", "]", "=", "all_symbols", "[", "i", "]", "delay_durations_simplified", "=", "ca", ".", "Function", "(", "'replace_false_deps'", ",", "all_symbols", ",", "delay_durations_raw", ")", ".", "call", "(", "actual_dep_symbols", ")", "# Get rid of remaining hidden dependencies in the delay durations", "for", "i", ",", "expr", "in", "enumerate", "(", "delay_expressions_raw", ")", ":", "if", "duration_dependencies", "[", "i", "]", ":", "dur", "=", "delay_durations_simplified", "[", "i", "]", "if", "len", "(", "duration_dependencies", "[", "i", "]", ")", "<", "len", "(", "actual_deps", ")", ":", "deps", "=", "set", "(", "ca", ".", "symvar", "(", "dur", ")", ")", "actual_deps", "=", "{", "all_symbols", "[", "j", "]", "for", "j", "in", "duration_dependencies", "[", "i", "]", "}", "false_deps", "=", "deps", "-", "actual_deps", "if", "false_deps", ":", "[", "dur", "]", "=", "ca", ".", "substitute", "(", "[", "dur", "]", ",", "list", "(", "false_deps", ")", ",", "[", "np", ".", "nan", "]", "*", "len", "(", "false_deps", ")", ")", "else", ":", "# Already removed all false dependencies", "pass", "else", ":", "dur", "=", "independent_delay_durations_raw", "[", "i", "]", "model", ".", "delay_arguments", ".", "append", "(", "DelayArgument", "(", "expr", ",", "dur", ")", ")", "# Try to coerce parameters into their Python types", "for", "p", "in", "model", ".", "parameters", ":", "for", "attr", "in", "CASADI_ATTRIBUTES", ":", "v", "=", "getattr", "(", "p", ",", "attr", ")", "v_mx", "=", "ca", ".", "MX", "(", "v", ")", "if", "v_mx", ".", "is_constant", "(", ")", "and", "v_mx", ".", "is_regular", "(", ")", ":", "setattr", "(", "p", ",", "attr", ",", "p", ".", "python_type", "(", "v", ")", ")", "# Done", "return", "model" ]
Loads a precompiled CasADi model into a CachedModel instance. :param model_folder: Folder where the precompiled CasADi model is located. :param model_name: Name of the model. :param compiler_options: Dictionary of compiler options. :returns: CachedModel instance.
[ "Loads", "a", "precompiled", "CasADi", "model", "into", "a", "CachedModel", "instance", "." ]
python
train
twisted/mantissa
xmantissa/_webutil.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/_webutil.py#L65-L82
def wrapModel(self, model): """ Converts application-provided model objects to L{IResource} providers. """ res = IResource(model, None) if res is None: frag = INavigableFragment(model) fragmentName = getattr(frag, 'fragmentName', None) if fragmentName is not None: fragDocFactory = self._getDocFactory(fragmentName) if fragDocFactory is not None: frag.docFactory = fragDocFactory if frag.docFactory is None: raise CouldNotLoadFromThemes(frag, self._preferredThemes()) useAthena = isinstance(frag, (athena.LiveFragment, athena.LiveElement)) return self._wrapNavFrag(frag, useAthena) else: return res
[ "def", "wrapModel", "(", "self", ",", "model", ")", ":", "res", "=", "IResource", "(", "model", ",", "None", ")", "if", "res", "is", "None", ":", "frag", "=", "INavigableFragment", "(", "model", ")", "fragmentName", "=", "getattr", "(", "frag", ",", "'fragmentName'", ",", "None", ")", "if", "fragmentName", "is", "not", "None", ":", "fragDocFactory", "=", "self", ".", "_getDocFactory", "(", "fragmentName", ")", "if", "fragDocFactory", "is", "not", "None", ":", "frag", ".", "docFactory", "=", "fragDocFactory", "if", "frag", ".", "docFactory", "is", "None", ":", "raise", "CouldNotLoadFromThemes", "(", "frag", ",", "self", ".", "_preferredThemes", "(", ")", ")", "useAthena", "=", "isinstance", "(", "frag", ",", "(", "athena", ".", "LiveFragment", ",", "athena", ".", "LiveElement", ")", ")", "return", "self", ".", "_wrapNavFrag", "(", "frag", ",", "useAthena", ")", "else", ":", "return", "res" ]
Converts application-provided model objects to L{IResource} providers.
[ "Converts", "application", "-", "provided", "model", "objects", "to", "L", "{", "IResource", "}", "providers", "." ]
python
train
atmos-python/atmos
atmos/util.py
https://github.com/atmos-python/atmos/blob/f4af8eaca23cce881bde979599d15d322fc1935e/atmos/util.py#L143-L158
def parse_derivative_string(string, quantity_dict): ''' Assuming the string is of the form d(var1)d(var2), returns var1, var2. Raises ValueError if the string is not of this form, or if the vars are not keys in the quantity_dict, or if var2 is not a coordinate-like variable. ''' match = derivative_prog.match(string) if match is None: raise ValueError('string is not in the form of a derivative') varname = match.group(1) coordname = match.group(2) if (varname not in quantity_dict.keys() or coordname not in quantity_dict.keys()): raise ValueError('variable in string not a valid quantity') return varname, coordname
[ "def", "parse_derivative_string", "(", "string", ",", "quantity_dict", ")", ":", "match", "=", "derivative_prog", ".", "match", "(", "string", ")", "if", "match", "is", "None", ":", "raise", "ValueError", "(", "'string is not in the form of a derivative'", ")", "varname", "=", "match", ".", "group", "(", "1", ")", "coordname", "=", "match", ".", "group", "(", "2", ")", "if", "(", "varname", "not", "in", "quantity_dict", ".", "keys", "(", ")", "or", "coordname", "not", "in", "quantity_dict", ".", "keys", "(", ")", ")", ":", "raise", "ValueError", "(", "'variable in string not a valid quantity'", ")", "return", "varname", ",", "coordname" ]
Assuming the string is of the form d(var1)d(var2), returns var1, var2. Raises ValueError if the string is not of this form, or if the vars are not keys in the quantity_dict, or if var2 is not a coordinate-like variable.
[ "Assuming", "the", "string", "is", "of", "the", "form", "d", "(", "var1", ")", "d", "(", "var2", ")", "returns", "var1", "var2", ".", "Raises", "ValueError", "if", "the", "string", "is", "not", "of", "this", "form", "or", "if", "the", "vars", "are", "not", "keys", "in", "the", "quantity_dict", "or", "if", "var2", "is", "not", "a", "coordinate", "-", "like", "variable", "." ]
python
train
devassistant/devassistant
devassistant/dapi/dapicli.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/dapi/dapicli.py#L95-L99
def data(link): '''Returns a dictionary from requested link''' link = _remove_api_url_from_link(link) req = _get_from_dapi_or_mirror(link) return _process_req(req)
[ "def", "data", "(", "link", ")", ":", "link", "=", "_remove_api_url_from_link", "(", "link", ")", "req", "=", "_get_from_dapi_or_mirror", "(", "link", ")", "return", "_process_req", "(", "req", ")" ]
Returns a dictionary from requested link
[ "Returns", "a", "dictionary", "from", "requested", "link" ]
python
train
Scoppio/RagnarokEngine3
RagnarokEngine3/RE3.py
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L1365-L1369
def load_texture(self, file_path): """Generate our sprite's surface by loading the specified image from disk. Note that this automatically centers the origin.""" self.image = pygame.image.load(file_path) self.apply_texture(self.image)
[ "def", "load_texture", "(", "self", ",", "file_path", ")", ":", "self", ".", "image", "=", "pygame", ".", "image", ".", "load", "(", "file_path", ")", "self", ".", "apply_texture", "(", "self", ".", "image", ")" ]
Generate our sprite's surface by loading the specified image from disk. Note that this automatically centers the origin.
[ "Generate", "our", "sprite", "s", "surface", "by", "loading", "the", "specified", "image", "from", "disk", ".", "Note", "that", "this", "automatically", "centers", "the", "origin", "." ]
python
train
facelessuser/pyspelling
pyspelling/filters/xml.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/xml.py#L250-L253
def sfilter(self, source): """Filter.""" return self._filter(source.text, source.context, source.encoding)
[ "def", "sfilter", "(", "self", ",", "source", ")", ":", "return", "self", ".", "_filter", "(", "source", ".", "text", ",", "source", ".", "context", ",", "source", ".", "encoding", ")" ]
Filter.
[ "Filter", "." ]
python
train
SatelliteQE/nailgun
nailgun/entities.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L1439-L1445
def update_payload(self, fields=None): """Wrap submitted data within an extra dict.""" payload = super(ConfigTemplate, self).update_payload(fields) if 'template_combinations' in payload: payload['template_combinations_attributes'] = payload.pop( 'template_combinations') return {u'config_template': payload}
[ "def", "update_payload", "(", "self", ",", "fields", "=", "None", ")", ":", "payload", "=", "super", "(", "ConfigTemplate", ",", "self", ")", ".", "update_payload", "(", "fields", ")", "if", "'template_combinations'", "in", "payload", ":", "payload", "[", "'template_combinations_attributes'", "]", "=", "payload", ".", "pop", "(", "'template_combinations'", ")", "return", "{", "u'config_template'", ":", "payload", "}" ]
Wrap submitted data within an extra dict.
[ "Wrap", "submitted", "data", "within", "an", "extra", "dict", "." ]
python
train
Yubico/python-yubico
yubico/yubikey_neo_usb_hid.py
https://github.com/Yubico/python-yubico/blob/a72e8eddb90da6ee96e29f60912ca1f2872c9aea/yubico/yubikey_neo_usb_hid.py#L269-L277
def _encode_ndef_text_params(self, data): """ Prepend language and enconding information to data, according to nfcforum-ts-rtd-text-1-0.pdf """ status = len(self.ndef_text_lang) if self.ndef_text_enc == 'UTF16': status = status & 0b10000000 return yubico_util.chr_byte(status) + self.ndef_text_lang + data
[ "def", "_encode_ndef_text_params", "(", "self", ",", "data", ")", ":", "status", "=", "len", "(", "self", ".", "ndef_text_lang", ")", "if", "self", ".", "ndef_text_enc", "==", "'UTF16'", ":", "status", "=", "status", "&", "0b10000000", "return", "yubico_util", ".", "chr_byte", "(", "status", ")", "+", "self", ".", "ndef_text_lang", "+", "data" ]
Prepend language and enconding information to data, according to nfcforum-ts-rtd-text-1-0.pdf
[ "Prepend", "language", "and", "enconding", "information", "to", "data", "according", "to", "nfcforum", "-", "ts", "-", "rtd", "-", "text", "-", "1", "-", "0", ".", "pdf" ]
python
train
keon/algorithms
algorithms/strings/text_justification.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/strings/text_justification.py#L34-L89
def text_justification(words, max_width): ''' :type words: list :type max_width: int :rtype: list ''' ret = [] # return value row_len = 0 # current length of strs in a row row_words = [] # current words in a row index = 0 # the index of current word in words is_first_word = True # is current word the first in a row while index < len(words): while row_len <= max_width and index < len(words): if len(words[index]) > max_width: raise ValueError("there exists word whose length is larger than max_width") tmp = row_len row_words.append(words[index]) tmp += len(words[index]) if not is_first_word: tmp += 1 # except for the first word, each word should have at least a ' ' before it. if tmp > max_width: row_words.pop() break row_len = tmp index += 1 is_first_word = False # here we have already got a row of str , then we should supplement enough ' ' to make sure the length is max_width. row = "" # if the row is the last if index == len(words): for word in row_words: row += (word + ' ') row = row[:-1] row += ' ' * (max_width - len(row)) # not the last row and more than one word elif len(row_words) != 1: space_num = max_width - row_len space_num_of_each_interval = space_num // (len(row_words) - 1) space_num_rest = space_num - space_num_of_each_interval * (len(row_words) - 1) for j in range(len(row_words)): row += row_words[j] if j != len(row_words) - 1: row += ' ' * (1 + space_num_of_each_interval) if space_num_rest > 0: row += ' ' space_num_rest -= 1 # row with only one word else: row += row_words[0] row += ' ' * (max_width - len(row)) ret.append(row) # after a row , reset those value row_len = 0 row_words = [] is_first_word = True return ret
[ "def", "text_justification", "(", "words", ",", "max_width", ")", ":", "ret", "=", "[", "]", "# return value", "row_len", "=", "0", "# current length of strs in a row", "row_words", "=", "[", "]", "# current words in a row", "index", "=", "0", "# the index of current word in words", "is_first_word", "=", "True", "# is current word the first in a row", "while", "index", "<", "len", "(", "words", ")", ":", "while", "row_len", "<=", "max_width", "and", "index", "<", "len", "(", "words", ")", ":", "if", "len", "(", "words", "[", "index", "]", ")", ">", "max_width", ":", "raise", "ValueError", "(", "\"there exists word whose length is larger than max_width\"", ")", "tmp", "=", "row_len", "row_words", ".", "append", "(", "words", "[", "index", "]", ")", "tmp", "+=", "len", "(", "words", "[", "index", "]", ")", "if", "not", "is_first_word", ":", "tmp", "+=", "1", "# except for the first word, each word should have at least a ' ' before it.", "if", "tmp", ">", "max_width", ":", "row_words", ".", "pop", "(", ")", "break", "row_len", "=", "tmp", "index", "+=", "1", "is_first_word", "=", "False", "# here we have already got a row of str , then we should supplement enough ' ' to make sure the length is max_width.", "row", "=", "\"\"", "# if the row is the last", "if", "index", "==", "len", "(", "words", ")", ":", "for", "word", "in", "row_words", ":", "row", "+=", "(", "word", "+", "' '", ")", "row", "=", "row", "[", ":", "-", "1", "]", "row", "+=", "' '", "*", "(", "max_width", "-", "len", "(", "row", ")", ")", "# not the last row and more than one word", "elif", "len", "(", "row_words", ")", "!=", "1", ":", "space_num", "=", "max_width", "-", "row_len", "space_num_of_each_interval", "=", "space_num", "//", "(", "len", "(", "row_words", ")", "-", "1", ")", "space_num_rest", "=", "space_num", "-", "space_num_of_each_interval", "*", "(", "len", "(", "row_words", ")", "-", "1", ")", "for", "j", "in", "range", "(", "len", "(", "row_words", ")", ")", ":", "row", "+=", "row_words", "[", "j", "]", "if", "j", "!=", "len", "(", "row_words", ")", "-", "1", ":", "row", "+=", "' '", "*", "(", "1", "+", "space_num_of_each_interval", ")", "if", "space_num_rest", ">", "0", ":", "row", "+=", "' '", "space_num_rest", "-=", "1", "# row with only one word", "else", ":", "row", "+=", "row_words", "[", "0", "]", "row", "+=", "' '", "*", "(", "max_width", "-", "len", "(", "row", ")", ")", "ret", ".", "append", "(", "row", ")", "# after a row , reset those value", "row_len", "=", "0", "row_words", "=", "[", "]", "is_first_word", "=", "True", "return", "ret" ]
:type words: list :type max_width: int :rtype: list
[ ":", "type", "words", ":", "list", ":", "type", "max_width", ":", "int", ":", "rtype", ":", "list" ]
python
train
inveniosoftware/invenio-theme
invenio_theme/bundles.py
https://github.com/inveniosoftware/invenio-theme/blob/4e07607b1a40805df1d8e4ab9cc2afd728579ca9/invenio_theme/bundles.py#L33-L38
def _get_contents(self): """Create strings from lazy strings.""" return [ str(value) if is_lazy_string(value) else value for value in super(LazyNpmBundle, self)._get_contents() ]
[ "def", "_get_contents", "(", "self", ")", ":", "return", "[", "str", "(", "value", ")", "if", "is_lazy_string", "(", "value", ")", "else", "value", "for", "value", "in", "super", "(", "LazyNpmBundle", ",", "self", ")", ".", "_get_contents", "(", ")", "]" ]
Create strings from lazy strings.
[ "Create", "strings", "from", "lazy", "strings", "." ]
python
train
nabetama/slacky
slacky/rest/rest.py
https://github.com/nabetama/slacky/blob/dde62ce49af9b8f581729c36d2ac790310b570e4/slacky/rest/rest.py#L504-L512
def kick(self, group_name, user): """ https://api.slack.com/methods/groups.kick """ group_id = self.get_group_id(group_name) self.params.update({ 'channel': group_id, 'user': user, }) return FromUrl('https://slack.com/api/groups.kick', self._requests)(data=self.params).post()
[ "def", "kick", "(", "self", ",", "group_name", ",", "user", ")", ":", "group_id", "=", "self", ".", "get_group_id", "(", "group_name", ")", "self", ".", "params", ".", "update", "(", "{", "'channel'", ":", "group_id", ",", "'user'", ":", "user", ",", "}", ")", "return", "FromUrl", "(", "'https://slack.com/api/groups.kick'", ",", "self", ".", "_requests", ")", "(", "data", "=", "self", ".", "params", ")", ".", "post", "(", ")" ]
https://api.slack.com/methods/groups.kick
[ "https", ":", "//", "api", ".", "slack", ".", "com", "/", "methods", "/", "groups", ".", "kick" ]
python
train
Chilipp/psyplot
psyplot/data.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L2192-L2223
def _register_update(self, replot=False, fmt={}, force=False, todefault=False): """ Register new formatoptions for updating Parameters ---------- replot: bool Boolean that determines whether the data specific formatoptions shall be updated in any case or not. Note, if `dims` is not empty or any coordinate keyword is in ``**kwargs``, this will be set to True automatically fmt: dict Keys may be any valid formatoption of the formatoptions in the :attr:`plotter` force: str, list of str or bool If formatoption key (i.e. string) or list of formatoption keys, thery are definitely updated whether they changed or not. If True, all the given formatoptions in this call of the are :meth:`update` method are updated todefault: bool If True, all changed formatoptions (except the registered ones) are updated to their default value as stored in the :attr:`~psyplot.plotter.Plotter.rc` attribute See Also -------- start_update""" self.replot = self.replot or replot if self.plotter is not None: self.plotter._register_update(replot=self.replot, fmt=fmt, force=force, todefault=todefault)
[ "def", "_register_update", "(", "self", ",", "replot", "=", "False", ",", "fmt", "=", "{", "}", ",", "force", "=", "False", ",", "todefault", "=", "False", ")", ":", "self", ".", "replot", "=", "self", ".", "replot", "or", "replot", "if", "self", ".", "plotter", "is", "not", "None", ":", "self", ".", "plotter", ".", "_register_update", "(", "replot", "=", "self", ".", "replot", ",", "fmt", "=", "fmt", ",", "force", "=", "force", ",", "todefault", "=", "todefault", ")" ]
Register new formatoptions for updating Parameters ---------- replot: bool Boolean that determines whether the data specific formatoptions shall be updated in any case or not. Note, if `dims` is not empty or any coordinate keyword is in ``**kwargs``, this will be set to True automatically fmt: dict Keys may be any valid formatoption of the formatoptions in the :attr:`plotter` force: str, list of str or bool If formatoption key (i.e. string) or list of formatoption keys, thery are definitely updated whether they changed or not. If True, all the given formatoptions in this call of the are :meth:`update` method are updated todefault: bool If True, all changed formatoptions (except the registered ones) are updated to their default value as stored in the :attr:`~psyplot.plotter.Plotter.rc` attribute See Also -------- start_update
[ "Register", "new", "formatoptions", "for", "updating" ]
python
train
pandas-dev/pandas
pandas/core/computation/align.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/align.py#L114-L132
def _align(terms): """Align a set of terms""" try: # flatten the parse tree (a nested list, really) terms = list(com.flatten(terms)) except TypeError: # can't iterate so it must just be a constant or single variable if isinstance(terms.value, pd.core.generic.NDFrame): typ = type(terms.value) return typ, _zip_axes_from_type(typ, terms.value.axes) return np.result_type(terms.type), None # if all resolved variables are numeric scalars if all(term.is_scalar for term in terms): return _result_type_many(*(term.value for term in terms)).type, None # perform the main alignment typ, axes = _align_core(terms) return typ, axes
[ "def", "_align", "(", "terms", ")", ":", "try", ":", "# flatten the parse tree (a nested list, really)", "terms", "=", "list", "(", "com", ".", "flatten", "(", "terms", ")", ")", "except", "TypeError", ":", "# can't iterate so it must just be a constant or single variable", "if", "isinstance", "(", "terms", ".", "value", ",", "pd", ".", "core", ".", "generic", ".", "NDFrame", ")", ":", "typ", "=", "type", "(", "terms", ".", "value", ")", "return", "typ", ",", "_zip_axes_from_type", "(", "typ", ",", "terms", ".", "value", ".", "axes", ")", "return", "np", ".", "result_type", "(", "terms", ".", "type", ")", ",", "None", "# if all resolved variables are numeric scalars", "if", "all", "(", "term", ".", "is_scalar", "for", "term", "in", "terms", ")", ":", "return", "_result_type_many", "(", "*", "(", "term", ".", "value", "for", "term", "in", "terms", ")", ")", ".", "type", ",", "None", "# perform the main alignment", "typ", ",", "axes", "=", "_align_core", "(", "terms", ")", "return", "typ", ",", "axes" ]
Align a set of terms
[ "Align", "a", "set", "of", "terms" ]
python
train
awslabs/sockeye
sockeye/encoder.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/encoder.py#L747-L753
def get_max_seq_len(self) -> Optional[int]: """ :return: The maximum length supported by the encoder if such a restriction exists. """ max_seq_len = min((encoder.get_max_seq_len() for encoder in self.encoders if encoder.get_max_seq_len() is not None), default=None) return max_seq_len
[ "def", "get_max_seq_len", "(", "self", ")", "->", "Optional", "[", "int", "]", ":", "max_seq_len", "=", "min", "(", "(", "encoder", ".", "get_max_seq_len", "(", ")", "for", "encoder", "in", "self", ".", "encoders", "if", "encoder", ".", "get_max_seq_len", "(", ")", "is", "not", "None", ")", ",", "default", "=", "None", ")", "return", "max_seq_len" ]
:return: The maximum length supported by the encoder if such a restriction exists.
[ ":", "return", ":", "The", "maximum", "length", "supported", "by", "the", "encoder", "if", "such", "a", "restriction", "exists", "." ]
python
train
autokey/autokey
lib/autokey/scripting.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/scripting.py#L384-L400
def open_file(self, title="Open File", initialDir="~", fileTypes="*|All Files", rememberAs=None, **kwargs): """ Show an Open File dialog Usage: C{dialog.open_file(title="Open File", initialDir="~", fileTypes="*|All Files", rememberAs=None, **kwargs)} @param title: window title for the dialog @param initialDir: starting directory for the file dialog @param fileTypes: file type filter expression @param rememberAs: gives an ID to this file dialog, allowing it to open at the last used path next time @return: a tuple containing the exit code and file path @rtype: C{DialogData(int, str)} """ if rememberAs is not None: return self._run_kdialog(title, ["--getopenfilename", initialDir, fileTypes, ":" + rememberAs], kwargs) else: return self._run_kdialog(title, ["--getopenfilename", initialDir, fileTypes], kwargs)
[ "def", "open_file", "(", "self", ",", "title", "=", "\"Open File\"", ",", "initialDir", "=", "\"~\"", ",", "fileTypes", "=", "\"*|All Files\"", ",", "rememberAs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "rememberAs", "is", "not", "None", ":", "return", "self", ".", "_run_kdialog", "(", "title", ",", "[", "\"--getopenfilename\"", ",", "initialDir", ",", "fileTypes", ",", "\":\"", "+", "rememberAs", "]", ",", "kwargs", ")", "else", ":", "return", "self", ".", "_run_kdialog", "(", "title", ",", "[", "\"--getopenfilename\"", ",", "initialDir", ",", "fileTypes", "]", ",", "kwargs", ")" ]
Show an Open File dialog Usage: C{dialog.open_file(title="Open File", initialDir="~", fileTypes="*|All Files", rememberAs=None, **kwargs)} @param title: window title for the dialog @param initialDir: starting directory for the file dialog @param fileTypes: file type filter expression @param rememberAs: gives an ID to this file dialog, allowing it to open at the last used path next time @return: a tuple containing the exit code and file path @rtype: C{DialogData(int, str)}
[ "Show", "an", "Open", "File", "dialog", "Usage", ":", "C", "{", "dialog", ".", "open_file", "(", "title", "=", "Open", "File", "initialDir", "=", "~", "fileTypes", "=", "*", "|All", "Files", "rememberAs", "=", "None", "**", "kwargs", ")", "}" ]
python
train
ClericPy/torequests
torequests/main.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/main.py#L357-L364
def callback_result(self): """Block the main thead until future finish, return the future.callback_result.""" if self._state in [PENDING, RUNNING]: self.x if self._user_callbacks: return self._callback_result else: return self.x
[ "def", "callback_result", "(", "self", ")", ":", "if", "self", ".", "_state", "in", "[", "PENDING", ",", "RUNNING", "]", ":", "self", ".", "x", "if", "self", ".", "_user_callbacks", ":", "return", "self", ".", "_callback_result", "else", ":", "return", "self", ".", "x" ]
Block the main thead until future finish, return the future.callback_result.
[ "Block", "the", "main", "thead", "until", "future", "finish", "return", "the", "future", ".", "callback_result", "." ]
python
train
MacHu-GWU/pathlib_mate-project
pathlib_mate/mate_path_filters.py
https://github.com/MacHu-GWU/pathlib_mate-project/blob/f9fb99dd7cc9ea05d1bec8b9ce8f659e8d97b0f1/pathlib_mate/mate_path_filters.py#L132-L141
def n_subfile(self): """ Count how many files in this directory (doesn't include files in sub folders). """ self.assert_is_dir_and_exists() n = 0 for _ in self.select_file(recursive=False): n += 1 return n
[ "def", "n_subfile", "(", "self", ")", ":", "self", ".", "assert_is_dir_and_exists", "(", ")", "n", "=", "0", "for", "_", "in", "self", ".", "select_file", "(", "recursive", "=", "False", ")", ":", "n", "+=", "1", "return", "n" ]
Count how many files in this directory (doesn't include files in sub folders).
[ "Count", "how", "many", "files", "in", "this", "directory", "(", "doesn", "t", "include", "files", "in", "sub", "folders", ")", "." ]
python
valid
materialsproject/pymatgen-db
matgendb/util.py
https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/util.py#L72-L86
def collection_keys(coll, sep='.'): """Get a list of all (including nested) keys in a collection. Examines the first document in the collection. :param sep: Separator for nested keys :return: List of str """ def _keys(x, pre=''): for k in x: yield (pre + k) if isinstance(x[k], dict): for nested in _keys(x[k], pre + k + sep): yield nested return list(_keys(coll.find_one()))
[ "def", "collection_keys", "(", "coll", ",", "sep", "=", "'.'", ")", ":", "def", "_keys", "(", "x", ",", "pre", "=", "''", ")", ":", "for", "k", "in", "x", ":", "yield", "(", "pre", "+", "k", ")", "if", "isinstance", "(", "x", "[", "k", "]", ",", "dict", ")", ":", "for", "nested", "in", "_keys", "(", "x", "[", "k", "]", ",", "pre", "+", "k", "+", "sep", ")", ":", "yield", "nested", "return", "list", "(", "_keys", "(", "coll", ".", "find_one", "(", ")", ")", ")" ]
Get a list of all (including nested) keys in a collection. Examines the first document in the collection. :param sep: Separator for nested keys :return: List of str
[ "Get", "a", "list", "of", "all", "(", "including", "nested", ")", "keys", "in", "a", "collection", ".", "Examines", "the", "first", "document", "in", "the", "collection", "." ]
python
train
awickert/gFlex
gflex/f2d.py
https://github.com/awickert/gFlex/blob/3ac32249375b0f8d342a142585d86ea4d905a5a0/gflex/f2d.py#L1503-L1544
def fd_solve(self): """ w = fd_solve() Sparse flexural response calculation. Can be performed by direct factorization with UMFpack (defuault) or by an iterative minimum residual technique These are both the fastest of the standard Scipy builtin techniques in their respective classes Requires the coefficient matrix from "2D.coeff_matrix" """ if self.Debug: try: # Will fail if scalar print("self.Te", self.Te.shape) except: pass print("self.qs", self.qs.shape) self.calc_max_flexural_wavelength() print("maxFlexuralWavelength_ncells: (x, y):", self.maxFlexuralWavelength_ncells_x, self.maxFlexuralWavelength_ncells_y) q0vector = self.qs.reshape(-1, order='C') if self.Solver == "iterative" or self.Solver == "Iterative": if self.Debug: print("Using generalized minimal residual method for iterative solution") if self.Verbose: print("Converging to a tolerance of", self.iterative_ConvergenceTolerance, "m between iterations") wvector = scipy.sparse.linalg.isolve.lgmres(self.coeff_matrix, q0vector)#, tol=1E-10)#,x0=woldvector)#,x0=wvector,tol=1E-15) wvector = wvector[0] # Reach into tuple to get my array back else: if self.Solver == "direct" or self.Solver == "Direct": if self.Debug: print("Using direct solution with UMFpack") else: if self.Quiet == False: print("Solution type not understood:") print("Defaulting to direct solution with UMFpack") wvector = scipy.sparse.linalg.spsolve(self.coeff_matrix, q0vector, use_umfpack=True) # Reshape into grid self.w = -wvector.reshape(self.qs.shape) self.w_padded = self.w.copy()
[ "def", "fd_solve", "(", "self", ")", ":", "if", "self", ".", "Debug", ":", "try", ":", "# Will fail if scalar", "print", "(", "\"self.Te\"", ",", "self", ".", "Te", ".", "shape", ")", "except", ":", "pass", "print", "(", "\"self.qs\"", ",", "self", ".", "qs", ".", "shape", ")", "self", ".", "calc_max_flexural_wavelength", "(", ")", "print", "(", "\"maxFlexuralWavelength_ncells: (x, y):\"", ",", "self", ".", "maxFlexuralWavelength_ncells_x", ",", "self", ".", "maxFlexuralWavelength_ncells_y", ")", "q0vector", "=", "self", ".", "qs", ".", "reshape", "(", "-", "1", ",", "order", "=", "'C'", ")", "if", "self", ".", "Solver", "==", "\"iterative\"", "or", "self", ".", "Solver", "==", "\"Iterative\"", ":", "if", "self", ".", "Debug", ":", "print", "(", "\"Using generalized minimal residual method for iterative solution\"", ")", "if", "self", ".", "Verbose", ":", "print", "(", "\"Converging to a tolerance of\"", ",", "self", ".", "iterative_ConvergenceTolerance", ",", "\"m between iterations\"", ")", "wvector", "=", "scipy", ".", "sparse", ".", "linalg", ".", "isolve", ".", "lgmres", "(", "self", ".", "coeff_matrix", ",", "q0vector", ")", "#, tol=1E-10)#,x0=woldvector)#,x0=wvector,tol=1E-15) ", "wvector", "=", "wvector", "[", "0", "]", "# Reach into tuple to get my array back", "else", ":", "if", "self", ".", "Solver", "==", "\"direct\"", "or", "self", ".", "Solver", "==", "\"Direct\"", ":", "if", "self", ".", "Debug", ":", "print", "(", "\"Using direct solution with UMFpack\"", ")", "else", ":", "if", "self", ".", "Quiet", "==", "False", ":", "print", "(", "\"Solution type not understood:\"", ")", "print", "(", "\"Defaulting to direct solution with UMFpack\"", ")", "wvector", "=", "scipy", ".", "sparse", ".", "linalg", ".", "spsolve", "(", "self", ".", "coeff_matrix", ",", "q0vector", ",", "use_umfpack", "=", "True", ")", "# Reshape into grid", "self", ".", "w", "=", "-", "wvector", ".", "reshape", "(", "self", ".", "qs", ".", "shape", ")", "self", ".", "w_padded", "=", "self", ".", "w", ".", "copy", "(", ")" ]
w = fd_solve() Sparse flexural response calculation. Can be performed by direct factorization with UMFpack (defuault) or by an iterative minimum residual technique These are both the fastest of the standard Scipy builtin techniques in their respective classes Requires the coefficient matrix from "2D.coeff_matrix"
[ "w", "=", "fd_solve", "()", "Sparse", "flexural", "response", "calculation", ".", "Can", "be", "performed", "by", "direct", "factorization", "with", "UMFpack", "(", "defuault", ")", "or", "by", "an", "iterative", "minimum", "residual", "technique", "These", "are", "both", "the", "fastest", "of", "the", "standard", "Scipy", "builtin", "techniques", "in", "their", "respective", "classes", "Requires", "the", "coefficient", "matrix", "from", "2D", ".", "coeff_matrix" ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py#L1001-L1014
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_app(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status") config = logical_chassis_fwdl_status output = ET.SubElement(logical_chassis_fwdl_status, "output") cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries") fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries") blade_app = ET.SubElement(fwdl_entries, "blade-app") blade_app.text = kwargs.pop('blade_app') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_app", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "logical_chassis_fwdl_status", "=", "ET", ".", "Element", "(", "\"logical_chassis_fwdl_status\"", ")", "config", "=", "logical_chassis_fwdl_status", "output", "=", "ET", ".", "SubElement", "(", "logical_chassis_fwdl_status", ",", "\"output\"", ")", "cluster_fwdl_entries", "=", "ET", ".", "SubElement", "(", "output", ",", "\"cluster-fwdl-entries\"", ")", "fwdl_entries", "=", "ET", ".", "SubElement", "(", "cluster_fwdl_entries", ",", "\"fwdl-entries\"", ")", "blade_app", "=", "ET", ".", "SubElement", "(", "fwdl_entries", ",", "\"blade-app\"", ")", "blade_app", ".", "text", "=", "kwargs", ".", "pop", "(", "'blade_app'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train