repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L6035-L6039
def htmlDocContentDumpFormatOutput(self, cur, encoding, format): """Dump an HTML document. """ if cur is None: cur__o = None else: cur__o = cur._o libxml2mod.htmlDocContentDumpFormatOutput(self._o, cur__o, encoding, format)
[ "def", "htmlDocContentDumpFormatOutput", "(", "self", ",", "cur", ",", "encoding", ",", "format", ")", ":", "if", "cur", "is", "None", ":", "cur__o", "=", "None", "else", ":", "cur__o", "=", "cur", ".", "_o", "libxml2mod", ".", "htmlDocContentDumpFormatOutput", "(", "self", ".", "_o", ",", "cur__o", ",", "encoding", ",", "format", ")" ]
Dump an HTML document.
[ "Dump", "an", "HTML", "document", "." ]
python
train
50.2
kiwi0fruit/sugartex
sugartex/sugartex_filter.py
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L671-L696
def _su_scripts_regex(self): """ :return: [compiled regex, function] """ sups = re.escape(''.join([k for k in self.superscripts.keys()])) subs = re.escape(''.join([k for k in self.subscripts.keys()])) # language=PythonRegExp su_regex = (r'\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' + r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format( su_=subs + sups, sub=subs, sup=sups) su_regex = re.compile(su_regex) def su_replace(m): esc, sub, root_sup, sup = m.groups() if esc is not None: return esc elif sub is not None: return '_{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.subscripts[c] for c in sub]) + '}' elif root_sup is not None: return ''.join([self.superscripts[c] for c in root_sup]) elif sup is not None: return '^{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.superscripts[c] for c in sup]) + '}' else: raise TypeError("Regex bug: this should never be reached") return [su_regex, su_replace]
[ "def", "_su_scripts_regex", "(", "self", ")", ":", "sups", "=", "re", ".", "escape", "(", "''", ".", "join", "(", "[", "k", "for", "k", "in", "self", ".", "superscripts", ".", "keys", "(", ")", "]", ")", ")", "subs", "=", "re", ".", "escape", "(", "''", ".", "join", "(", "[", "k", "for", "k", "in", "self", ".", "subscripts", ".", "keys", "(", ")", "]", ")", ")", "# language=PythonRegExp", "su_regex", "=", "(", "r'\\\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' +", "", "r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format(", "", "", "", "", "su_", "=", "subs", "+", "sups", ",", "sub", "=", "subs", ",", "sup", "=", "sups", ")", "su_regex", "=", "re", ".", "compile", "(", "su_regex", ")", "def", "su_replace", "(", "m", ")", ":", "esc", ",", "sub", ",", "root_sup", ",", "sup", "=", "m", ".", "groups", "(", ")", "if", "esc", "is", "not", "None", ":", "return", "esc", "elif", "sub", "is", "not", "None", ":", "return", "'_{'", "+", "''", ".", "join", "(", "[", "c", "if", "(", "c", "in", "[", "'‹', ", "'", "', '˹", "'", " '˺'", "]", " els", "e", " ", "elf.", "ubsc", "r", "ipts[c] fo", "r", " ", "c", "in ", "u", "])", "+ '", "}", "'", "", "", "elif", "root_sup", "is", "not", "None", ":", "return", "''", ".", "join", "(", "[", "self", ".", "superscripts", "[", "c", "]", "for", "c", "in", "root_sup", "]", ")", "elif", "sup", "is", "not", "None", ":", "return", "'^{'", "+", "''", ".", "join", "(", "[", "c", "if", "(", "c", "in", "[", "'‹', ", "'", "', '˹", "'", " '˺'", "]", " els", "e", " ", "elf.", "uper", "s", "cripts[c] fo", "r", " ", "c", "in ", "u", "])", "+ '", "}", "'", "", "", "else", ":", "raise", "TypeError", "(", "\"Regex bug: this should never be reached\"", ")", "return", "[", "su_regex", ",", "su_replace", "]" ]
:return: [compiled regex, function]
[ ":", "return", ":", "[", "compiled", "regex", "function", "]" ]
python
train
45.461538
ANTsX/ANTsPy
ants/utils/mni2tal.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/utils/mni2tal.py#L4-L53
def mni2tal(xin): """ mni2tal for converting from ch2/mni space to tal - very approximate. This is a standard approach but it's not very accurate. ANTsR function: `mni2tal` Arguments --------- xin : tuple point in mni152 space. Returns ------- tuple Example ------- >>> import ants >>> ants.mni2tal( (10,12,14) ) References ---------- http://bioimagesuite.yale.edu/mni2tal/501_95733_More\%20Accurate\%20Talairach\%20Coordinates\%20SLIDES.pdf http://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach """ if (not isinstance(xin, (tuple,list))) or (len(xin) != 3): raise ValueError('xin must be tuple/list with 3 coordinates') x = list(xin) # The input image is in RAS coordinates but we use ITK which returns LPS # coordinates. So we need to flip the coordinates such that L => R and P => A to # get RAS (MNI) coordinates x[0] = x[0] * (-1) # flip X x[1] = x[1] * (-1) # flip Y xout = x if (x[2] >= 0): xout[0] = x[0] * 0.99 xout[1] = x[1] * 0.9688 + 0.046 * x[2] xout[2] = x[1] * (-0.0485) + 0.9189 * x[2] if (x[2] < 0): xout[0] = x[0] * 0.99 xout[1] = x[1] * 0.9688 + 0.042 * x[2] xout[2] = x[1] * (-0.0485) + 0.839 * x[2] return(xout)
[ "def", "mni2tal", "(", "xin", ")", ":", "if", "(", "not", "isinstance", "(", "xin", ",", "(", "tuple", ",", "list", ")", ")", ")", "or", "(", "len", "(", "xin", ")", "!=", "3", ")", ":", "raise", "ValueError", "(", "'xin must be tuple/list with 3 coordinates'", ")", "x", "=", "list", "(", "xin", ")", "# The input image is in RAS coordinates but we use ITK which returns LPS", "# coordinates. So we need to flip the coordinates such that L => R and P => A to", "# get RAS (MNI) coordinates", "x", "[", "0", "]", "=", "x", "[", "0", "]", "*", "(", "-", "1", ")", "# flip X", "x", "[", "1", "]", "=", "x", "[", "1", "]", "*", "(", "-", "1", ")", "# flip Y", "xout", "=", "x", "if", "(", "x", "[", "2", "]", ">=", "0", ")", ":", "xout", "[", "0", "]", "=", "x", "[", "0", "]", "*", "0.99", "xout", "[", "1", "]", "=", "x", "[", "1", "]", "*", "0.9688", "+", "0.046", "*", "x", "[", "2", "]", "xout", "[", "2", "]", "=", "x", "[", "1", "]", "*", "(", "-", "0.0485", ")", "+", "0.9189", "*", "x", "[", "2", "]", "if", "(", "x", "[", "2", "]", "<", "0", ")", ":", "xout", "[", "0", "]", "=", "x", "[", "0", "]", "*", "0.99", "xout", "[", "1", "]", "=", "x", "[", "1", "]", "*", "0.9688", "+", "0.042", "*", "x", "[", "2", "]", "xout", "[", "2", "]", "=", "x", "[", "1", "]", "*", "(", "-", "0.0485", ")", "+", "0.839", "*", "x", "[", "2", "]", "return", "(", "xout", ")" ]
mni2tal for converting from ch2/mni space to tal - very approximate. This is a standard approach but it's not very accurate. ANTsR function: `mni2tal` Arguments --------- xin : tuple point in mni152 space. Returns ------- tuple Example ------- >>> import ants >>> ants.mni2tal( (10,12,14) ) References ---------- http://bioimagesuite.yale.edu/mni2tal/501_95733_More\%20Accurate\%20Talairach\%20Coordinates\%20SLIDES.pdf http://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach
[ "mni2tal", "for", "converting", "from", "ch2", "/", "mni", "space", "to", "tal", "-", "very", "approximate", "." ]
python
train
25.94
JNRowe/pyisbn
pyisbn/__init__.py
https://github.com/JNRowe/pyisbn/blob/653cb1798d4f231d552991a1011d6aba1c4de396/pyisbn/__init__.py#L447-L475
def calculate_checksum(isbn): """Calculate ISBN checksum. Args: isbn (str): SBN, ISBN-10 or ISBN-13 Returns: ``str``: Checksum for given ISBN or SBN """ isbn = [int(i) for i in _isbn_cleanse(isbn, checksum=False)] if len(isbn) == 9: products = [x * y for x, y in zip(isbn, range(1, 10))] check = sum(products) % 11 if check == 10: check = 'X' else: # As soon as Python 2.4 support is dumped # [(isbn[i] if i % 2 == 0 else isbn[i] * 3) for i in range(12)] products = [] for i in range(12): if i % 2 == 0: products.append(isbn[i]) else: products.append(isbn[i] * 3) check = 10 - sum(products) % 10 if check == 10: check = 0 return str(check)
[ "def", "calculate_checksum", "(", "isbn", ")", ":", "isbn", "=", "[", "int", "(", "i", ")", "for", "i", "in", "_isbn_cleanse", "(", "isbn", ",", "checksum", "=", "False", ")", "]", "if", "len", "(", "isbn", ")", "==", "9", ":", "products", "=", "[", "x", "*", "y", "for", "x", ",", "y", "in", "zip", "(", "isbn", ",", "range", "(", "1", ",", "10", ")", ")", "]", "check", "=", "sum", "(", "products", ")", "%", "11", "if", "check", "==", "10", ":", "check", "=", "'X'", "else", ":", "# As soon as Python 2.4 support is dumped", "# [(isbn[i] if i % 2 == 0 else isbn[i] * 3) for i in range(12)]", "products", "=", "[", "]", "for", "i", "in", "range", "(", "12", ")", ":", "if", "i", "%", "2", "==", "0", ":", "products", ".", "append", "(", "isbn", "[", "i", "]", ")", "else", ":", "products", ".", "append", "(", "isbn", "[", "i", "]", "*", "3", ")", "check", "=", "10", "-", "sum", "(", "products", ")", "%", "10", "if", "check", "==", "10", ":", "check", "=", "0", "return", "str", "(", "check", ")" ]
Calculate ISBN checksum. Args: isbn (str): SBN, ISBN-10 or ISBN-13 Returns: ``str``: Checksum for given ISBN or SBN
[ "Calculate", "ISBN", "checksum", "." ]
python
train
28.068966
elastic/apm-agent-python
elasticapm/base.py
https://github.com/elastic/apm-agent-python/blob/2975663d7bd22282dc39336b2c37b37c12c7a774/elasticapm/base.py#L228-L231
def begin_transaction(self, transaction_type, trace_parent=None): """Register the start of a transaction on the client """ return self.tracer.begin_transaction(transaction_type, trace_parent=trace_parent)
[ "def", "begin_transaction", "(", "self", ",", "transaction_type", ",", "trace_parent", "=", "None", ")", ":", "return", "self", ".", "tracer", ".", "begin_transaction", "(", "transaction_type", ",", "trace_parent", "=", "trace_parent", ")" ]
Register the start of a transaction on the client
[ "Register", "the", "start", "of", "a", "transaction", "on", "the", "client" ]
python
train
56.25
zyga/python-glibc
pyglibc/select.py
https://github.com/zyga/python-glibc/blob/d6fdb306b123a995471584a5201155c60a34448a/pyglibc/select.py#L234-L254
def modify(self, fd, eventmask): """ Change the bit-mask of events associated with a previously-registered descriptor. :param fd: The descriptor to modify. :param eventmask: New bit-mask of events that will be monitored. :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_ctl(2)`` fails. The error message matches those found in the manual page. """ if self._epfd < 0: _err_closed() ev = epoll_event() ev.events = eventmask ev.data.fd = fd epoll_ctl(self._epfd, EPOLL_CTL_MOD, fd, byref(ev))
[ "def", "modify", "(", "self", ",", "fd", ",", "eventmask", ")", ":", "if", "self", ".", "_epfd", "<", "0", ":", "_err_closed", "(", ")", "ev", "=", "epoll_event", "(", ")", "ev", ".", "events", "=", "eventmask", "ev", ".", "data", ".", "fd", "=", "fd", "epoll_ctl", "(", "self", ".", "_epfd", ",", "EPOLL_CTL_MOD", ",", "fd", ",", "byref", "(", "ev", ")", ")" ]
Change the bit-mask of events associated with a previously-registered descriptor. :param fd: The descriptor to modify. :param eventmask: New bit-mask of events that will be monitored. :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_ctl(2)`` fails. The error message matches those found in the manual page.
[ "Change", "the", "bit", "-", "mask", "of", "events", "associated", "with", "a", "previously", "-", "registered", "descriptor", "." ]
python
train
32.761905
istresearch/scrapy-cluster
crawler/crawling/pipelines.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/pipelines.py#L165-L173
def _kafka_failure(self, item, spider, response): ''' Callback for failed send ''' item['success'] = False item['exception'] = traceback.format_exc() item['spiderid'] = spider.name item = self._clean_item(item) self.logger.error("Failed to send page to Kafka", item)
[ "def", "_kafka_failure", "(", "self", ",", "item", ",", "spider", ",", "response", ")", ":", "item", "[", "'success'", "]", "=", "False", "item", "[", "'exception'", "]", "=", "traceback", ".", "format_exc", "(", ")", "item", "[", "'spiderid'", "]", "=", "spider", ".", "name", "item", "=", "self", ".", "_clean_item", "(", "item", ")", "self", ".", "logger", ".", "error", "(", "\"Failed to send page to Kafka\"", ",", "item", ")" ]
Callback for failed send
[ "Callback", "for", "failed", "send" ]
python
train
35.777778
HDI-Project/ballet
ballet/validation/common.py
https://github.com/HDI-Project/ballet/blob/6f4d4b87b8234cb6bb38b9e9484a58ef8fe8fdb2/ballet/validation/common.py#L152-L191
def _categorize_file_diffs(self, file_diffs): """Partition file changes into admissible and inadmissible changes""" # TODO move this into a new validator candidate_feature_diffs = [] valid_init_diffs = [] inadmissible_files = [] for diff in file_diffs: valid, failures = check_from_class( ProjectStructureCheck, diff, self.project) if valid: if pathlib.Path(diff.b_path).parts[-1] != '__init__.py': candidate_feature_diffs.append(diff) logger.debug( 'Categorized {file} as CANDIDATE FEATURE MODULE' .format(file=diff.b_path)) else: valid_init_diffs.append(diff) logger.debug( 'Categorized {file} as VALID INIT MODULE' .format(file=diff.b_path)) else: inadmissible_files.append(diff) logger.debug( 'Categorized {file} as INADMISSIBLE; ' 'failures were {failures}' .format(file=diff.b_path, failures=failures)) logger.info( 'Admitted {} candidate feature{} ' 'and {} __init__ module{} ' 'and rejected {} file{}' .format(len(candidate_feature_diffs), make_plural_suffix(candidate_feature_diffs), len(valid_init_diffs), make_plural_suffix(valid_init_diffs), len(inadmissible_files), make_plural_suffix(inadmissible_files))) return candidate_feature_diffs, valid_init_diffs, inadmissible_files
[ "def", "_categorize_file_diffs", "(", "self", ",", "file_diffs", ")", ":", "# TODO move this into a new validator", "candidate_feature_diffs", "=", "[", "]", "valid_init_diffs", "=", "[", "]", "inadmissible_files", "=", "[", "]", "for", "diff", "in", "file_diffs", ":", "valid", ",", "failures", "=", "check_from_class", "(", "ProjectStructureCheck", ",", "diff", ",", "self", ".", "project", ")", "if", "valid", ":", "if", "pathlib", ".", "Path", "(", "diff", ".", "b_path", ")", ".", "parts", "[", "-", "1", "]", "!=", "'__init__.py'", ":", "candidate_feature_diffs", ".", "append", "(", "diff", ")", "logger", ".", "debug", "(", "'Categorized {file} as CANDIDATE FEATURE MODULE'", ".", "format", "(", "file", "=", "diff", ".", "b_path", ")", ")", "else", ":", "valid_init_diffs", ".", "append", "(", "diff", ")", "logger", ".", "debug", "(", "'Categorized {file} as VALID INIT MODULE'", ".", "format", "(", "file", "=", "diff", ".", "b_path", ")", ")", "else", ":", "inadmissible_files", ".", "append", "(", "diff", ")", "logger", ".", "debug", "(", "'Categorized {file} as INADMISSIBLE; '", "'failures were {failures}'", ".", "format", "(", "file", "=", "diff", ".", "b_path", ",", "failures", "=", "failures", ")", ")", "logger", ".", "info", "(", "'Admitted {} candidate feature{} '", "'and {} __init__ module{} '", "'and rejected {} file{}'", ".", "format", "(", "len", "(", "candidate_feature_diffs", ")", ",", "make_plural_suffix", "(", "candidate_feature_diffs", ")", ",", "len", "(", "valid_init_diffs", ")", ",", "make_plural_suffix", "(", "valid_init_diffs", ")", ",", "len", "(", "inadmissible_files", ")", ",", "make_plural_suffix", "(", "inadmissible_files", ")", ")", ")", "return", "candidate_feature_diffs", ",", "valid_init_diffs", ",", "inadmissible_files" ]
Partition file changes into admissible and inadmissible changes
[ "Partition", "file", "changes", "into", "admissible", "and", "inadmissible", "changes" ]
python
train
42.9
xflr6/fileconfig
fileconfig/tools.py
https://github.com/xflr6/fileconfig/blob/473d65f6442eb1ac49ada0b6e56cab45f8018c15/fileconfig/tools.py#L23-L35
def caller_path(steps=1): """Return the path to the source file of the current frames' caller.""" frame = sys._getframe(steps + 1) try: path = os.path.dirname(frame.f_code.co_filename) finally: del frame if not path: path = os.getcwd() return os.path.realpath(path)
[ "def", "caller_path", "(", "steps", "=", "1", ")", ":", "frame", "=", "sys", ".", "_getframe", "(", "steps", "+", "1", ")", "try", ":", "path", "=", "os", ".", "path", ".", "dirname", "(", "frame", ".", "f_code", ".", "co_filename", ")", "finally", ":", "del", "frame", "if", "not", "path", ":", "path", "=", "os", ".", "getcwd", "(", ")", "return", "os", ".", "path", ".", "realpath", "(", "path", ")" ]
Return the path to the source file of the current frames' caller.
[ "Return", "the", "path", "to", "the", "source", "file", "of", "the", "current", "frames", "caller", "." ]
python
train
23.384615
wandb/client
wandb/vendor/prompt_toolkit/buffer.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/buffer.py#L643-L653
def delete(self, count=1): """ Delete specified number of characters and Return the deleted text. """ if self.cursor_position < len(self.text): deleted = self.document.text_after_cursor[:count] self.text = self.text[:self.cursor_position] + \ self.text[self.cursor_position + len(deleted):] return deleted else: return ''
[ "def", "delete", "(", "self", ",", "count", "=", "1", ")", ":", "if", "self", ".", "cursor_position", "<", "len", "(", "self", ".", "text", ")", ":", "deleted", "=", "self", ".", "document", ".", "text_after_cursor", "[", ":", "count", "]", "self", ".", "text", "=", "self", ".", "text", "[", ":", "self", ".", "cursor_position", "]", "+", "self", ".", "text", "[", "self", ".", "cursor_position", "+", "len", "(", "deleted", ")", ":", "]", "return", "deleted", "else", ":", "return", "''" ]
Delete specified number of characters and Return the deleted text.
[ "Delete", "specified", "number", "of", "characters", "and", "Return", "the", "deleted", "text", "." ]
python
train
37.727273
SolutionsCloud/apidoc
apidoc/factory/template.py
https://github.com/SolutionsCloud/apidoc/blob/1ee25d886a5bea11dc744c2f3d0abb0b55d942e1/apidoc/factory/template.py#L12-L25
def create_from_config(self, config): """Create a template object file defined in the config object """ configService = ConfigService() template = TemplateService() template.output = config["output"]["location"] template_file = configService.get_template_from_config(config) template.input = os.path.basename(template_file) template.env = Environment(loader=FileSystemLoader(os.path.dirname(template_file))) return template
[ "def", "create_from_config", "(", "self", ",", "config", ")", ":", "configService", "=", "ConfigService", "(", ")", "template", "=", "TemplateService", "(", ")", "template", ".", "output", "=", "config", "[", "\"output\"", "]", "[", "\"location\"", "]", "template_file", "=", "configService", ".", "get_template_from_config", "(", "config", ")", "template", ".", "input", "=", "os", ".", "path", ".", "basename", "(", "template_file", ")", "template", ".", "env", "=", "Environment", "(", "loader", "=", "FileSystemLoader", "(", "os", ".", "path", ".", "dirname", "(", "template_file", ")", ")", ")", "return", "template" ]
Create a template object file defined in the config object
[ "Create", "a", "template", "object", "file", "defined", "in", "the", "config", "object" ]
python
train
34.714286
JonathanRaiman/pytreebank
pytreebank/labeled_trees.py
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L60-L70
def copy(self): """ Deep Copy of a LabeledTree """ return LabeledTree( udepth = self.udepth, depth = self.depth, text = self.text, label = self.label, children = self.children.copy() if self.children != None else [], parent = self.parent)
[ "def", "copy", "(", "self", ")", ":", "return", "LabeledTree", "(", "udepth", "=", "self", ".", "udepth", ",", "depth", "=", "self", ".", "depth", ",", "text", "=", "self", ".", "text", ",", "label", "=", "self", ".", "label", ",", "children", "=", "self", ".", "children", ".", "copy", "(", ")", "if", "self", ".", "children", "!=", "None", "else", "[", "]", ",", "parent", "=", "self", ".", "parent", ")" ]
Deep Copy of a LabeledTree
[ "Deep", "Copy", "of", "a", "LabeledTree" ]
python
train
30.181818
nschloe/colorio
experiments/pade2d.py
https://github.com/nschloe/colorio/blob/357d6001b3cf30f752e23726bf429dc1d1c60b3a/experiments/pade2d.py#L145-L178
def jac(self, xy=None): """Get the Jacobian at (x, y). """ if xy is not None: self.set_xy(xy) ux = numpy.dot(self.ax, self.xy_list[: len(self.ax)]) vx = numpy.dot(self.bx, self.xy_list[: len(self.bx)]) uy = numpy.dot(self.ay, self.xy_list[: len(self.ay)]) vy = numpy.dot(self.by, self.xy_list[: len(self.by)]) ux_dx = numpy.dot(self.ax, self.dx_list[: len(self.ax)]) vx_dx = numpy.dot(self.bx, self.dx_list[: len(self.bx)]) uy_dx = numpy.dot(self.ay, self.dx_list[: len(self.ay)]) vy_dx = numpy.dot(self.by, self.dx_list[: len(self.by)]) ux_dy = numpy.dot(self.ax, self.dy_list[: len(self.ax)]) vx_dy = numpy.dot(self.bx, self.dy_list[: len(self.bx)]) uy_dy = numpy.dot(self.ay, self.dy_list[: len(self.ay)]) vy_dy = numpy.dot(self.by, self.dy_list[: len(self.by)]) jac = numpy.array( [ [ (ux_dx * vx - vx_dx * ux) / vx ** 2, (ux_dy * vx - vx_dy * ux) / vx ** 2, ], [ (uy_dx * vy - vy_dx * uy) / vy ** 2, (uy_dy * vy - vy_dy * uy) / vy ** 2, ], ] ) return jac
[ "def", "jac", "(", "self", ",", "xy", "=", "None", ")", ":", "if", "xy", "is", "not", "None", ":", "self", ".", "set_xy", "(", "xy", ")", "ux", "=", "numpy", ".", "dot", "(", "self", ".", "ax", ",", "self", ".", "xy_list", "[", ":", "len", "(", "self", ".", "ax", ")", "]", ")", "vx", "=", "numpy", ".", "dot", "(", "self", ".", "bx", ",", "self", ".", "xy_list", "[", ":", "len", "(", "self", ".", "bx", ")", "]", ")", "uy", "=", "numpy", ".", "dot", "(", "self", ".", "ay", ",", "self", ".", "xy_list", "[", ":", "len", "(", "self", ".", "ay", ")", "]", ")", "vy", "=", "numpy", ".", "dot", "(", "self", ".", "by", ",", "self", ".", "xy_list", "[", ":", "len", "(", "self", ".", "by", ")", "]", ")", "ux_dx", "=", "numpy", ".", "dot", "(", "self", ".", "ax", ",", "self", ".", "dx_list", "[", ":", "len", "(", "self", ".", "ax", ")", "]", ")", "vx_dx", "=", "numpy", ".", "dot", "(", "self", ".", "bx", ",", "self", ".", "dx_list", "[", ":", "len", "(", "self", ".", "bx", ")", "]", ")", "uy_dx", "=", "numpy", ".", "dot", "(", "self", ".", "ay", ",", "self", ".", "dx_list", "[", ":", "len", "(", "self", ".", "ay", ")", "]", ")", "vy_dx", "=", "numpy", ".", "dot", "(", "self", ".", "by", ",", "self", ".", "dx_list", "[", ":", "len", "(", "self", ".", "by", ")", "]", ")", "ux_dy", "=", "numpy", ".", "dot", "(", "self", ".", "ax", ",", "self", ".", "dy_list", "[", ":", "len", "(", "self", ".", "ax", ")", "]", ")", "vx_dy", "=", "numpy", ".", "dot", "(", "self", ".", "bx", ",", "self", ".", "dy_list", "[", ":", "len", "(", "self", ".", "bx", ")", "]", ")", "uy_dy", "=", "numpy", ".", "dot", "(", "self", ".", "ay", ",", "self", ".", "dy_list", "[", ":", "len", "(", "self", ".", "ay", ")", "]", ")", "vy_dy", "=", "numpy", ".", "dot", "(", "self", ".", "by", ",", "self", ".", "dy_list", "[", ":", "len", "(", "self", ".", "by", ")", "]", ")", "jac", "=", "numpy", ".", "array", "(", "[", "[", "(", "ux_dx", "*", "vx", "-", "vx_dx", "*", "ux", ")", "/", "vx", "**", "2", ",", "(", "ux_dy", "*", "vx", "-", "vx_dy", "*", "ux", ")", "/", "vx", "**", "2", ",", "]", ",", "[", "(", "uy_dx", "*", "vy", "-", "vy_dx", "*", "uy", ")", "/", "vy", "**", "2", ",", "(", "uy_dy", "*", "vy", "-", "vy_dy", "*", "uy", ")", "/", "vy", "**", "2", ",", "]", ",", "]", ")", "return", "jac" ]
Get the Jacobian at (x, y).
[ "Get", "the", "Jacobian", "at", "(", "x", "y", ")", "." ]
python
train
36.882353
Cadasta/django-tutelary
tutelary/engine.py
https://github.com/Cadasta/django-tutelary/blob/66bb05de7098777c0a383410c287bf48433cde87/tutelary/engine.py#L270-L284
def add(self, effect=None, act=None, obj=None, policy=None, policies=None): """Insert an individual (effect, action, object) triple or all triples for a policy or list of policies. """ if policies is not None: for p in policies: self.add(policy=p) elif policy is not None: for e, a, o in policy: self.add(e, a, o) else: objc = obj.components if obj is not None else [] self.tree[act.components + objc] = effect
[ "def", "add", "(", "self", ",", "effect", "=", "None", ",", "act", "=", "None", ",", "obj", "=", "None", ",", "policy", "=", "None", ",", "policies", "=", "None", ")", ":", "if", "policies", "is", "not", "None", ":", "for", "p", "in", "policies", ":", "self", ".", "add", "(", "policy", "=", "p", ")", "elif", "policy", "is", "not", "None", ":", "for", "e", ",", "a", ",", "o", "in", "policy", ":", "self", ".", "add", "(", "e", ",", "a", ",", "o", ")", "else", ":", "objc", "=", "obj", ".", "components", "if", "obj", "is", "not", "None", "else", "[", "]", "self", ".", "tree", "[", "act", ".", "components", "+", "objc", "]", "=", "effect" ]
Insert an individual (effect, action, object) triple or all triples for a policy or list of policies.
[ "Insert", "an", "individual", "(", "effect", "action", "object", ")", "triple", "or", "all", "triples", "for", "a", "policy", "or", "list", "of", "policies", "." ]
python
train
35.8
materialsproject/pymatgen
pymatgen/io/abinit/launcher.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/launcher.py#L890-L938
def sendmail(subject, text, mailto, sender=None): """ Sends an e-mail with unix sendmail. Args: subject: String with the subject of the mail. text: String with the body of the mail. mailto: String or list of string with the recipients. sender: string with the sender address. If sender is None, username@hostname is used. Returns: Exit status """ def user_at_host(): from socket import gethostname return os.getlogin() + "@" + gethostname() # Body of the message. try: sender = user_at_host() if sender is None else sender except OSError: sender = 'abipyscheduler@youknowwhere' if is_string(mailto): mailto = [mailto] from email.mime.text import MIMEText mail = MIMEText(text) mail["Subject"] = subject mail["From"] = sender mail["To"] = ", ".join(mailto) msg = mail.as_string() # sendmail works much better than the python interface. # Note that sendmail is available only on Unix-like OS. from subprocess import Popen, PIPE import sys sendmail = which("sendmail") if sendmail is None: return -1 if sys.version_info[0] < 3: p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE) else: # msg is string not bytes so must use universal_newlines p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE, universal_newlines=True) outdata, errdata = p.communicate(msg) return len(errdata)
[ "def", "sendmail", "(", "subject", ",", "text", ",", "mailto", ",", "sender", "=", "None", ")", ":", "def", "user_at_host", "(", ")", ":", "from", "socket", "import", "gethostname", "return", "os", ".", "getlogin", "(", ")", "+", "\"@\"", "+", "gethostname", "(", ")", "# Body of the message.", "try", ":", "sender", "=", "user_at_host", "(", ")", "if", "sender", "is", "None", "else", "sender", "except", "OSError", ":", "sender", "=", "'abipyscheduler@youknowwhere'", "if", "is_string", "(", "mailto", ")", ":", "mailto", "=", "[", "mailto", "]", "from", "email", ".", "mime", ".", "text", "import", "MIMEText", "mail", "=", "MIMEText", "(", "text", ")", "mail", "[", "\"Subject\"", "]", "=", "subject", "mail", "[", "\"From\"", "]", "=", "sender", "mail", "[", "\"To\"", "]", "=", "\", \"", ".", "join", "(", "mailto", ")", "msg", "=", "mail", ".", "as_string", "(", ")", "# sendmail works much better than the python interface.", "# Note that sendmail is available only on Unix-like OS.", "from", "subprocess", "import", "Popen", ",", "PIPE", "import", "sys", "sendmail", "=", "which", "(", "\"sendmail\"", ")", "if", "sendmail", "is", "None", ":", "return", "-", "1", "if", "sys", ".", "version_info", "[", "0", "]", "<", "3", ":", "p", "=", "Popen", "(", "[", "sendmail", ",", "\"-t\"", "]", ",", "stdin", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "else", ":", "# msg is string not bytes so must use universal_newlines", "p", "=", "Popen", "(", "[", "sendmail", ",", "\"-t\"", "]", ",", "stdin", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "universal_newlines", "=", "True", ")", "outdata", ",", "errdata", "=", "p", ".", "communicate", "(", "msg", ")", "return", "len", "(", "errdata", ")" ]
Sends an e-mail with unix sendmail. Args: subject: String with the subject of the mail. text: String with the body of the mail. mailto: String or list of string with the recipients. sender: string with the sender address. If sender is None, username@hostname is used. Returns: Exit status
[ "Sends", "an", "e", "-", "mail", "with", "unix", "sendmail", "." ]
python
train
29.571429
fulfilio/fulfil-python-api
fulfil_client/model.py
https://github.com/fulfilio/fulfil-python-api/blob/180ac969c427b1292439a0371866aa5f169ffa6b/fulfil_client/model.py#L445-L451
def offset(self, offset): """ Apply an OFFSET to the query and return the newly resulting Query. """ query = self._copy() query._offset = offset return query
[ "def", "offset", "(", "self", ",", "offset", ")", ":", "query", "=", "self", ".", "_copy", "(", ")", "query", ".", "_offset", "=", "offset", "return", "query" ]
Apply an OFFSET to the query and return the newly resulting Query.
[ "Apply", "an", "OFFSET", "to", "the", "query", "and", "return", "the", "newly", "resulting", "Query", "." ]
python
train
28.428571
sony/nnabla
python/src/nnabla/parametric_functions.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1122-L1180
def inq_convolution(inp, outmaps, kernel, pad=None, stride=None, dilation=None, group=1, num_bits=4, inq_iterations=(), selection_algorithm='random', seed=-1, w_init=None, i_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True): """Incremental Network Quantization Convolution Layer During training, the weights are sequentially quantized to power-of-two values, which allows the training of a multiplierless network. Using `inq_iterations`, one can specify after how many forward passes half of the learnable weights are fixed and quantized to powers-of-two. After reaching the last value in `inq_iterations`, all weights are fixed. For more details, please refer to the reference. Reference: Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization: Towards lossless CNNs with low-precision weights. <https://arxiv.org/abs/1702.03044> Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix. n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0" inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights. selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly) seed (int): Random seed for INQ algorithm w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weight and bias will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable` """ if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if i_init is None: i_init = ConstantInitializer() if b_init is None: b_init = ConstantInitializer() w = get_parameter_or_create( "W", (outmaps, inp.shape[base_axis]) + tuple(kernel), w_init, True, not fix_parameters) i = get_parameter_or_create( "I", (outmaps, inp.shape[base_axis]) + tuple(kernel), i_init, False) b = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) return F.inq_convolution(inp, w, i, b, base_axis, pad, stride, dilation, group, num_bits, inq_iterations, selection_algorithm, seed)
[ "def", "inq_convolution", "(", "inp", ",", "outmaps", ",", "kernel", ",", "pad", "=", "None", ",", "stride", "=", "None", ",", "dilation", "=", "None", ",", "group", "=", "1", ",", "num_bits", "=", "4", ",", "inq_iterations", "=", "(", ")", ",", "selection_algorithm", "=", "'random'", ",", "seed", "=", "-", "1", ",", "w_init", "=", "None", ",", "i_init", "=", "None", ",", "b_init", "=", "None", ",", "base_axis", "=", "1", ",", "fix_parameters", "=", "False", ",", "rng", "=", "None", ",", "with_bias", "=", "True", ")", ":", "if", "w_init", "is", "None", ":", "w_init", "=", "UniformInitializer", "(", "calc_uniform_lim_glorot", "(", "inp", ".", "shape", "[", "base_axis", "]", ",", "outmaps", ",", "tuple", "(", "kernel", ")", ")", ",", "rng", "=", "rng", ")", "if", "i_init", "is", "None", ":", "i_init", "=", "ConstantInitializer", "(", ")", "if", "b_init", "is", "None", ":", "b_init", "=", "ConstantInitializer", "(", ")", "w", "=", "get_parameter_or_create", "(", "\"W\"", ",", "(", "outmaps", ",", "inp", ".", "shape", "[", "base_axis", "]", ")", "+", "tuple", "(", "kernel", ")", ",", "w_init", ",", "True", ",", "not", "fix_parameters", ")", "i", "=", "get_parameter_or_create", "(", "\"I\"", ",", "(", "outmaps", ",", "inp", ".", "shape", "[", "base_axis", "]", ")", "+", "tuple", "(", "kernel", ")", ",", "i_init", ",", "False", ")", "b", "=", "None", "if", "with_bias", ":", "b", "=", "get_parameter_or_create", "(", "\"b\"", ",", "(", "outmaps", ",", ")", ",", "b_init", ",", "True", ",", "not", "fix_parameters", ")", "return", "F", ".", "inq_convolution", "(", "inp", ",", "w", ",", "i", ",", "b", ",", "base_axis", ",", "pad", ",", "stride", ",", "dilation", ",", "group", ",", "num_bits", ",", "inq_iterations", ",", "selection_algorithm", ",", "seed", ")" ]
Incremental Network Quantization Convolution Layer During training, the weights are sequentially quantized to power-of-two values, which allows the training of a multiplierless network. Using `inq_iterations`, one can specify after how many forward passes half of the learnable weights are fixed and quantized to powers-of-two. After reaching the last value in `inq_iterations`, all weights are fixed. For more details, please refer to the reference. Reference: Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization: Towards lossless CNNs with low-precision weights. <https://arxiv.org/abs/1702.03044> Args: inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix. n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0" inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights. selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly) seed (int): Random seed for INQ algorithm w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weight and bias will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. Returns: :class:`~nnabla.Variable`
[ "Incremental", "Network", "Quantization", "Convolution", "Layer" ]
python
train
60.864407
475Cumulus/TBone
tbone/resources/resources.py
https://github.com/475Cumulus/TBone/blob/5a6672d8bbac449a0ab9e99560609f671fe84d4d/tbone/resources/resources.py#L201-L213
def wrap_handler(cls, handler, protocol, **kwargs): ''' Wrap a request handler with the matching protocol handler ''' def _wrapper(request, *args, **kwargs): instance = cls(request=request, **kwargs) if protocol == Resource.Protocol.http: return instance._wrap_http(handler, request=request, **kwargs) elif protocol == Resource.Protocol.websocket: return instance._wrap_ws(handler, request=request, **kwargs) elif protocol == Resource.Protocol.amqp: return instance._wrap_amqp(view_type, *args, **kwargs) else: raise Exception('Communication protocol not specified') return _wrapper
[ "def", "wrap_handler", "(", "cls", ",", "handler", ",", "protocol", ",", "*", "*", "kwargs", ")", ":", "def", "_wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "instance", "=", "cls", "(", "request", "=", "request", ",", "*", "*", "kwargs", ")", "if", "protocol", "==", "Resource", ".", "Protocol", ".", "http", ":", "return", "instance", ".", "_wrap_http", "(", "handler", ",", "request", "=", "request", ",", "*", "*", "kwargs", ")", "elif", "protocol", "==", "Resource", ".", "Protocol", ".", "websocket", ":", "return", "instance", ".", "_wrap_ws", "(", "handler", ",", "request", "=", "request", ",", "*", "*", "kwargs", ")", "elif", "protocol", "==", "Resource", ".", "Protocol", ".", "amqp", ":", "return", "instance", ".", "_wrap_amqp", "(", "view_type", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "Exception", "(", "'Communication protocol not specified'", ")", "return", "_wrapper" ]
Wrap a request handler with the matching protocol handler
[ "Wrap", "a", "request", "handler", "with", "the", "matching", "protocol", "handler" ]
python
train
55.230769
linkedin/luminol
src/luminol/algorithms/anomaly_detector_algorithms/exp_avg_detector.py
https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/exp_avg_detector.py#L42-L51
def _compute_anom_score(self, lag_window_points, point): """ Compute anomaly score for a single data point. Anomaly score for a single data point(t,v) equals: abs(v - ema(lagging window)). :param list lag_window_points: values in the lagging window. :param float point: data point value. :return float: the anomaly score. """ ema = utils.compute_ema(self.smoothing_factor, lag_window_points)[-1] return abs(point - ema)
[ "def", "_compute_anom_score", "(", "self", ",", "lag_window_points", ",", "point", ")", ":", "ema", "=", "utils", ".", "compute_ema", "(", "self", ".", "smoothing_factor", ",", "lag_window_points", ")", "[", "-", "1", "]", "return", "abs", "(", "point", "-", "ema", ")" ]
Compute anomaly score for a single data point. Anomaly score for a single data point(t,v) equals: abs(v - ema(lagging window)). :param list lag_window_points: values in the lagging window. :param float point: data point value. :return float: the anomaly score.
[ "Compute", "anomaly", "score", "for", "a", "single", "data", "point", ".", "Anomaly", "score", "for", "a", "single", "data", "point", "(", "t", "v", ")", "equals", ":", "abs", "(", "v", "-", "ema", "(", "lagging", "window", "))", ".", ":", "param", "list", "lag_window_points", ":", "values", "in", "the", "lagging", "window", ".", ":", "param", "float", "point", ":", "data", "point", "value", ".", ":", "return", "float", ":", "the", "anomaly", "score", "." ]
python
train
48.2
poulp/zenipy
zenipy/zenipy.py
https://github.com/poulp/zenipy/blob/fd1de3c268bb1cffcb35b4f8186893c492dd6eaf/zenipy/zenipy.py#L480-L502
def question(title="", text="", width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, timeout=None): """ Display a question, possible answer are yes/no. :param text: text inside the window :type text: str :param title: title of the window :type title: str :param width: window width :type width: int :param height: window height :type height: int :param timeout: close the window after n seconds :type timeout: int :return: The answer as a boolean :rtype: bool """ response = _simple_dialog(Gtk.MessageType.QUESTION, text, title, width, height, timeout) if response == Gtk.ResponseType.YES: return True elif response == Gtk.ResponseType.NO: return False return None
[ "def", "question", "(", "title", "=", "\"\"", ",", "text", "=", "\"\"", ",", "width", "=", "DEFAULT_WIDTH", ",", "height", "=", "DEFAULT_HEIGHT", ",", "timeout", "=", "None", ")", ":", "response", "=", "_simple_dialog", "(", "Gtk", ".", "MessageType", ".", "QUESTION", ",", "text", ",", "title", ",", "width", ",", "height", ",", "timeout", ")", "if", "response", "==", "Gtk", ".", "ResponseType", ".", "YES", ":", "return", "True", "elif", "response", "==", "Gtk", ".", "ResponseType", ".", "NO", ":", "return", "False", "return", "None" ]
Display a question, possible answer are yes/no. :param text: text inside the window :type text: str :param title: title of the window :type title: str :param width: window width :type width: int :param height: window height :type height: int :param timeout: close the window after n seconds :type timeout: int :return: The answer as a boolean :rtype: bool
[ "Display", "a", "question", "possible", "answer", "are", "yes", "/", "no", "." ]
python
train
31.565217
assamite/creamas
creamas/grid.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/grid.py#L527-L540
async def populate(self, agent_cls, *args, **kwargs): '''Populate all the slave grid environments with agents. Assumes that no agents have been spawned yet to the slave environment grids. This excludes the slave environment managers as they are not in the grids.) ''' n = self.gs[0] * self.gs[1] tasks = [] for addr in self.addrs: task = asyncio.ensure_future(self._populate_slave(addr, agent_cls, n, *args, **kwargs)) tasks.append(task) rets = await asyncio.gather(*tasks) return rets
[ "async", "def", "populate", "(", "self", ",", "agent_cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "n", "=", "self", ".", "gs", "[", "0", "]", "*", "self", ".", "gs", "[", "1", "]", "tasks", "=", "[", "]", "for", "addr", "in", "self", ".", "addrs", ":", "task", "=", "asyncio", ".", "ensure_future", "(", "self", ".", "_populate_slave", "(", "addr", ",", "agent_cls", ",", "n", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", "tasks", ".", "append", "(", "task", ")", "rets", "=", "await", "asyncio", ".", "gather", "(", "*", "tasks", ")", "return", "rets" ]
Populate all the slave grid environments with agents. Assumes that no agents have been spawned yet to the slave environment grids. This excludes the slave environment managers as they are not in the grids.)
[ "Populate", "all", "the", "slave", "grid", "environments", "with", "agents", ".", "Assumes", "that", "no", "agents", "have", "been", "spawned", "yet", "to", "the", "slave", "environment", "grids", ".", "This", "excludes", "the", "slave", "environment", "managers", "as", "they", "are", "not", "in", "the", "grids", ".", ")" ]
python
train
49.428571
micolous/python-slackrealtime
src/slackrealtime/session.py
https://github.com/micolous/python-slackrealtime/blob/e9c94416f979a6582110ebba09c147de2bfe20a1/src/slackrealtime/session.py#L127-L205
def _update_deferred(self, event): """ This does the actual work of updating channel metadata. This is called by the update(), and runs this method in another thread. """ if isinstance(event, ChannelCreated): i = event.channel[u'id'] event.channel[u'is_archived'] = event.channel[u'is_member'] = False self.channels[i] = event.channel elif isinstance(event, ChannelArchive): self.channels[event.channel][u'is_archived'] = True elif isinstance(event, GroupArchive): self.groups[event.channel][u'is_archived'] = True elif isinstance(event, ChannelDeleted): # FIXME: Handle delete events properly. # Channels don't really get deleted, they're more just archived. self.channels[event.channel][u'is_archived'] = True self.channels[event.channel][u'is_open'] = False elif isinstance(event, GroupClose): # When you close a group, it isn't open to you anymore, but it might # still exist. Treat it like ChannelDeleted self.groups[event.channel][u'is_archived'] = True self.groups[event.channel][u'is_open'] = False elif isinstance(event, ChannelJoined): cid = event.channel[u'id'] self.channels[cid] = event.channel elif isinstance(event, GroupJoined): gid = event.channel[u'id'] self.groups[gid] = event.channel elif isinstance(event, ChannelLeft): self.channels[event.channel][u'is_member'] = False elif isinstance(event, GroupLeft): self.groups[event.channel][u'is_member'] = False elif isinstance(event, ChannelMarked): # TODO: implement datetime handler properly self.channels[event.channel][u'last_read'] = event._b[u'ts'] elif isinstance(event, GroupMarked): self.groups[event.channel][u'last_read'] = event._b[u'ts'] elif isinstance(event, ChannelRename): self.channels[event.channel[u'id']][u'name'] = event.channel[u'name'] elif isinstance(event, GroupRename): self.groups[event.channel[u'id']][u'name'] = event.channel[u'name'] elif isinstance(event, ChannelUnarchive): self.channels[event.channel][u'is_archived'] = False elif isinstance(event, GroupUnarchive): self.groups[event.channel][u'is_archived'] = False elif isinstance(event, ImClose): self.ims[event.channel][u'is_open'] = False elif isinstance(event, ImCreated): i = event.channel[u'id'] event.channel[u'user'] = event.user self.ims[i] = event.channel elif isinstance(event, ImMarked): # TODO: implement datetime handler properly self.ims[event.channel][u'last_read'] = event._b[u'ts'] elif isinstance(event, ImOpen): self.ims[event.channel][u'is_open'] = True elif isinstance(event, PresenceChange): self.users[event.user][u'presence'] = event.presence elif isinstance(event, UserChange): # Everything but the status is provided # Copy this out of the existing object uid = event.user[u'id'] if event.user.get(u'status') is None and u'presence' in self.users[uid]: event.user[u'status'] = self.users[uid][u'presence'] self.users[uid] = event.user elif isinstance(event, TeamPrefChange): self.team[u'prefs'][event.name] = event.value elif isinstance(event, TeamJoin): uid = event.user[u'id'] self.users[uid] = event.user elif isinstance(event, BotAdded) or isinstance(event, BotChanged): bid = event.bot[u'id'] self.bots[bid] = event.bot
[ "def", "_update_deferred", "(", "self", ",", "event", ")", ":", "if", "isinstance", "(", "event", ",", "ChannelCreated", ")", ":", "i", "=", "event", ".", "channel", "[", "u'id'", "]", "event", ".", "channel", "[", "u'is_archived'", "]", "=", "event", ".", "channel", "[", "u'is_member'", "]", "=", "False", "self", ".", "channels", "[", "i", "]", "=", "event", ".", "channel", "elif", "isinstance", "(", "event", ",", "ChannelArchive", ")", ":", "self", ".", "channels", "[", "event", ".", "channel", "]", "[", "u'is_archived'", "]", "=", "True", "elif", "isinstance", "(", "event", ",", "GroupArchive", ")", ":", "self", ".", "groups", "[", "event", ".", "channel", "]", "[", "u'is_archived'", "]", "=", "True", "elif", "isinstance", "(", "event", ",", "ChannelDeleted", ")", ":", "# FIXME: Handle delete events properly.", "# Channels don't really get deleted, they're more just archived.", "self", ".", "channels", "[", "event", ".", "channel", "]", "[", "u'is_archived'", "]", "=", "True", "self", ".", "channels", "[", "event", ".", "channel", "]", "[", "u'is_open'", "]", "=", "False", "elif", "isinstance", "(", "event", ",", "GroupClose", ")", ":", "# When you close a group, it isn't open to you anymore, but it might", "# still exist. Treat it like ChannelDeleted", "self", ".", "groups", "[", "event", ".", "channel", "]", "[", "u'is_archived'", "]", "=", "True", "self", ".", "groups", "[", "event", ".", "channel", "]", "[", "u'is_open'", "]", "=", "False", "elif", "isinstance", "(", "event", ",", "ChannelJoined", ")", ":", "cid", "=", "event", ".", "channel", "[", "u'id'", "]", "self", ".", "channels", "[", "cid", "]", "=", "event", ".", "channel", "elif", "isinstance", "(", "event", ",", "GroupJoined", ")", ":", "gid", "=", "event", ".", "channel", "[", "u'id'", "]", "self", ".", "groups", "[", "gid", "]", "=", "event", ".", "channel", "elif", "isinstance", "(", "event", ",", "ChannelLeft", ")", ":", "self", ".", "channels", "[", "event", ".", "channel", "]", "[", "u'is_member'", "]", "=", "False", "elif", "isinstance", "(", "event", ",", "GroupLeft", ")", ":", "self", ".", "groups", "[", "event", ".", "channel", "]", "[", "u'is_member'", "]", "=", "False", "elif", "isinstance", "(", "event", ",", "ChannelMarked", ")", ":", "# TODO: implement datetime handler properly", "self", ".", "channels", "[", "event", ".", "channel", "]", "[", "u'last_read'", "]", "=", "event", ".", "_b", "[", "u'ts'", "]", "elif", "isinstance", "(", "event", ",", "GroupMarked", ")", ":", "self", ".", "groups", "[", "event", ".", "channel", "]", "[", "u'last_read'", "]", "=", "event", ".", "_b", "[", "u'ts'", "]", "elif", "isinstance", "(", "event", ",", "ChannelRename", ")", ":", "self", ".", "channels", "[", "event", ".", "channel", "[", "u'id'", "]", "]", "[", "u'name'", "]", "=", "event", ".", "channel", "[", "u'name'", "]", "elif", "isinstance", "(", "event", ",", "GroupRename", ")", ":", "self", ".", "groups", "[", "event", ".", "channel", "[", "u'id'", "]", "]", "[", "u'name'", "]", "=", "event", ".", "channel", "[", "u'name'", "]", "elif", "isinstance", "(", "event", ",", "ChannelUnarchive", ")", ":", "self", ".", "channels", "[", "event", ".", "channel", "]", "[", "u'is_archived'", "]", "=", "False", "elif", "isinstance", "(", "event", ",", "GroupUnarchive", ")", ":", "self", ".", "groups", "[", "event", ".", "channel", "]", "[", "u'is_archived'", "]", "=", "False", "elif", "isinstance", "(", "event", ",", "ImClose", ")", ":", "self", ".", "ims", "[", "event", ".", "channel", "]", "[", "u'is_open'", "]", "=", "False", "elif", "isinstance", "(", "event", ",", "ImCreated", ")", ":", "i", "=", "event", ".", "channel", "[", "u'id'", "]", "event", ".", "channel", "[", "u'user'", "]", "=", "event", ".", "user", "self", ".", "ims", "[", "i", "]", "=", "event", ".", "channel", "elif", "isinstance", "(", "event", ",", "ImMarked", ")", ":", "# TODO: implement datetime handler properly", "self", ".", "ims", "[", "event", ".", "channel", "]", "[", "u'last_read'", "]", "=", "event", ".", "_b", "[", "u'ts'", "]", "elif", "isinstance", "(", "event", ",", "ImOpen", ")", ":", "self", ".", "ims", "[", "event", ".", "channel", "]", "[", "u'is_open'", "]", "=", "True", "elif", "isinstance", "(", "event", ",", "PresenceChange", ")", ":", "self", ".", "users", "[", "event", ".", "user", "]", "[", "u'presence'", "]", "=", "event", ".", "presence", "elif", "isinstance", "(", "event", ",", "UserChange", ")", ":", "# Everything but the status is provided", "# Copy this out of the existing object", "uid", "=", "event", ".", "user", "[", "u'id'", "]", "if", "event", ".", "user", ".", "get", "(", "u'status'", ")", "is", "None", "and", "u'presence'", "in", "self", ".", "users", "[", "uid", "]", ":", "event", ".", "user", "[", "u'status'", "]", "=", "self", ".", "users", "[", "uid", "]", "[", "u'presence'", "]", "self", ".", "users", "[", "uid", "]", "=", "event", ".", "user", "elif", "isinstance", "(", "event", ",", "TeamPrefChange", ")", ":", "self", ".", "team", "[", "u'prefs'", "]", "[", "event", ".", "name", "]", "=", "event", ".", "value", "elif", "isinstance", "(", "event", ",", "TeamJoin", ")", ":", "uid", "=", "event", ".", "user", "[", "u'id'", "]", "self", ".", "users", "[", "uid", "]", "=", "event", ".", "user", "elif", "isinstance", "(", "event", ",", "BotAdded", ")", "or", "isinstance", "(", "event", ",", "BotChanged", ")", ":", "bid", "=", "event", ".", "bot", "[", "u'id'", "]", "self", ".", "bots", "[", "bid", "]", "=", "event", ".", "bot" ]
This does the actual work of updating channel metadata. This is called by the update(), and runs this method in another thread.
[ "This", "does", "the", "actual", "work", "of", "updating", "channel", "metadata", ".", "This", "is", "called", "by", "the", "update", "()", "and", "runs", "this", "method", "in", "another", "thread", "." ]
python
train
40.797468
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L193-L227
def list_projects(self, max_results=None, page_token=None, retry=DEFAULT_RETRY): """List projects for the project associated with this client. See https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list :type max_results: int :param max_results: (Optional) maximum number of projects to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: (Optional) Token representing a cursor into the projects. If not passed, the API will return the first page of projects. The token marks the beginning of the iterator to be returned and the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.bigquery.client.Project` accessible to the current client. """ return page_iterator.HTTPIterator( client=self, api_request=functools.partial(self._call_api, retry), path="/projects", item_to_value=_item_to_project, items_key="projects", page_token=page_token, max_results=max_results, )
[ "def", "list_projects", "(", "self", ",", "max_results", "=", "None", ",", "page_token", "=", "None", ",", "retry", "=", "DEFAULT_RETRY", ")", ":", "return", "page_iterator", ".", "HTTPIterator", "(", "client", "=", "self", ",", "api_request", "=", "functools", ".", "partial", "(", "self", ".", "_call_api", ",", "retry", ")", ",", "path", "=", "\"/projects\"", ",", "item_to_value", "=", "_item_to_project", ",", "items_key", "=", "\"projects\"", ",", "page_token", "=", "page_token", ",", "max_results", "=", "max_results", ",", ")" ]
List projects for the project associated with this client. See https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list :type max_results: int :param max_results: (Optional) maximum number of projects to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: (Optional) Token representing a cursor into the projects. If not passed, the API will return the first page of projects. The token marks the beginning of the iterator to be returned and the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.bigquery.client.Project` accessible to the current client.
[ "List", "projects", "for", "the", "project", "associated", "with", "this", "client", "." ]
python
train
42.542857
psss/did
did/stats.py
https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/stats.py#L54-L57
def add_option(self, group): """ Add option for self to the parser group object. """ group.add_argument( "--{0}".format(self.option), action="store_true", help=self.name)
[ "def", "add_option", "(", "self", ",", "group", ")", ":", "group", ".", "add_argument", "(", "\"--{0}\"", ".", "format", "(", "self", ".", "option", ")", ",", "action", "=", "\"store_true\"", ",", "help", "=", "self", ".", "name", ")" ]
Add option for self to the parser group object.
[ "Add", "option", "for", "self", "to", "the", "parser", "group", "object", "." ]
python
train
48.75
ajenhl/tacl
tacl/text.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/text.py#L50-L72
def get_ngrams(self, minimum, maximum, skip_sizes=None): """Returns a generator supplying the n-grams (`minimum` <= n <= `maximum`) for this text. Each iteration of the generator supplies a tuple consisting of the size of the n-grams and a `collections.Counter` of the n-grams. :param minimum: minimum n-gram size :type minimum: `int` :param maximum: maximum n-gram size :type maximum: `int` :param skip_sizes: sizes to not generate n-grams for :type skip_sizes: `list` of `int` :rtype: `generator` """ skip_sizes = skip_sizes or [] tokens = self.get_tokens() for size in range(minimum, maximum + 1): if size not in skip_sizes: ngrams = collections.Counter(self._ngrams(tokens, size)) yield (size, ngrams)
[ "def", "get_ngrams", "(", "self", ",", "minimum", ",", "maximum", ",", "skip_sizes", "=", "None", ")", ":", "skip_sizes", "=", "skip_sizes", "or", "[", "]", "tokens", "=", "self", ".", "get_tokens", "(", ")", "for", "size", "in", "range", "(", "minimum", ",", "maximum", "+", "1", ")", ":", "if", "size", "not", "in", "skip_sizes", ":", "ngrams", "=", "collections", ".", "Counter", "(", "self", ".", "_ngrams", "(", "tokens", ",", "size", ")", ")", "yield", "(", "size", ",", "ngrams", ")" ]
Returns a generator supplying the n-grams (`minimum` <= n <= `maximum`) for this text. Each iteration of the generator supplies a tuple consisting of the size of the n-grams and a `collections.Counter` of the n-grams. :param minimum: minimum n-gram size :type minimum: `int` :param maximum: maximum n-gram size :type maximum: `int` :param skip_sizes: sizes to not generate n-grams for :type skip_sizes: `list` of `int` :rtype: `generator`
[ "Returns", "a", "generator", "supplying", "the", "n", "-", "grams", "(", "minimum", "<", "=", "n", "<", "=", "maximum", ")", "for", "this", "text", "." ]
python
train
37.304348
cloudera/cm_api
python/src/cm_api/endpoints/services.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/services.py#L423-L450
def get_yarn_applications(self, start_time, end_time, filter_str="", limit=100, offset=0): """ Returns a list of YARN applications that satisfy the filter @type start_time: datetime.datetime. Note that the datetime must either be time zone aware or specified in the server time zone. See the python datetime documentation for more details about python's time zone handling. @param start_time: Applications must have ended after this time @type end_time: datetime.datetime. Note that the datetime must either be time zone aware or specified in the server time zone. See the python datetime documentation for more details about python's time zone handling. @param filter_str: A filter to apply to the applications. For example: 'user = root and applicationDuration > 5s' @param limit: The maximum number of results to return @param offset: The offset into the return list @since: API v6 """ params = { 'from': start_time.isoformat(), 'to': end_time.isoformat(), 'filter': filter_str, 'limit': limit, 'offset': offset } return self._get("yarnApplications", ApiYarnApplicationResponse, params=params, api_version=6)
[ "def", "get_yarn_applications", "(", "self", ",", "start_time", ",", "end_time", ",", "filter_str", "=", "\"\"", ",", "limit", "=", "100", ",", "offset", "=", "0", ")", ":", "params", "=", "{", "'from'", ":", "start_time", ".", "isoformat", "(", ")", ",", "'to'", ":", "end_time", ".", "isoformat", "(", ")", ",", "'filter'", ":", "filter_str", ",", "'limit'", ":", "limit", ",", "'offset'", ":", "offset", "}", "return", "self", ".", "_get", "(", "\"yarnApplications\"", ",", "ApiYarnApplicationResponse", ",", "params", "=", "params", ",", "api_version", "=", "6", ")" ]
Returns a list of YARN applications that satisfy the filter @type start_time: datetime.datetime. Note that the datetime must either be time zone aware or specified in the server time zone. See the python datetime documentation for more details about python's time zone handling. @param start_time: Applications must have ended after this time @type end_time: datetime.datetime. Note that the datetime must either be time zone aware or specified in the server time zone. See the python datetime documentation for more details about python's time zone handling. @param filter_str: A filter to apply to the applications. For example: 'user = root and applicationDuration > 5s' @param limit: The maximum number of results to return @param offset: The offset into the return list @since: API v6
[ "Returns", "a", "list", "of", "YARN", "applications", "that", "satisfy", "the", "filter" ]
python
train
47.714286
gem/oq-engine
openquake/hmtk/sources/source_model.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/sources/source_model.py#L95-L110
def serialise_to_nrml(self, filename, use_defaults=False): ''' Writes the source model to a nrml source model file given by the filename :param str filename: Path to output file :param bool use_defaults: Boolean to indicate whether to use default values (True) or not. If set to False, ValueErrors will be raised when an essential attribute is missing. ''' source_model = self.convert_to_oqhazardlib( PoissonTOM(1.0), 2.0, 2.0, 10.0, use_defaults=use_defaults) write_source_model(filename, source_model, name=self.name)
[ "def", "serialise_to_nrml", "(", "self", ",", "filename", ",", "use_defaults", "=", "False", ")", ":", "source_model", "=", "self", ".", "convert_to_oqhazardlib", "(", "PoissonTOM", "(", "1.0", ")", ",", "2.0", ",", "2.0", ",", "10.0", ",", "use_defaults", "=", "use_defaults", ")", "write_source_model", "(", "filename", ",", "source_model", ",", "name", "=", "self", ".", "name", ")" ]
Writes the source model to a nrml source model file given by the filename :param str filename: Path to output file :param bool use_defaults: Boolean to indicate whether to use default values (True) or not. If set to False, ValueErrors will be raised when an essential attribute is missing.
[ "Writes", "the", "source", "model", "to", "a", "nrml", "source", "model", "file", "given", "by", "the", "filename" ]
python
train
39.375
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/freqt.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/freqt.py#L39-L55
def sentence2freqt(docgraph, root, successors=None, include_pos=False, escape_func=FREQT_ESCAPE_FUNC): """convert a sentence subgraph into a FREQT string.""" if successors is None: successors = sorted_bfs_successors(docgraph, root) if root in successors: # root node has children / subgraphs embed_str = u"".join(sentence2freqt(docgraph, child, successors, include_pos=include_pos, escape_func=escape_func) for child in successors[root]) return node2freqt( docgraph, root, embed_str, include_pos=include_pos, escape_func=escape_func) else: # root node has no children / subgraphs return node2freqt(docgraph, root, include_pos=include_pos, escape_func=escape_func)
[ "def", "sentence2freqt", "(", "docgraph", ",", "root", ",", "successors", "=", "None", ",", "include_pos", "=", "False", ",", "escape_func", "=", "FREQT_ESCAPE_FUNC", ")", ":", "if", "successors", "is", "None", ":", "successors", "=", "sorted_bfs_successors", "(", "docgraph", ",", "root", ")", "if", "root", "in", "successors", ":", "# root node has children / subgraphs", "embed_str", "=", "u\"\"", ".", "join", "(", "sentence2freqt", "(", "docgraph", ",", "child", ",", "successors", ",", "include_pos", "=", "include_pos", ",", "escape_func", "=", "escape_func", ")", "for", "child", "in", "successors", "[", "root", "]", ")", "return", "node2freqt", "(", "docgraph", ",", "root", ",", "embed_str", ",", "include_pos", "=", "include_pos", ",", "escape_func", "=", "escape_func", ")", "else", ":", "# root node has no children / subgraphs", "return", "node2freqt", "(", "docgraph", ",", "root", ",", "include_pos", "=", "include_pos", ",", "escape_func", "=", "escape_func", ")" ]
convert a sentence subgraph into a FREQT string.
[ "convert", "a", "sentence", "subgraph", "into", "a", "FREQT", "string", "." ]
python
train
52
pytroll/pyspectral
rsr_convert_scripts/abi_rsr.py
https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/rsr_convert_scripts/abi_rsr.py#L71-L87
def _load(self, scale=1.0): """Load the ABI relative spectral responses """ LOG.debug("File: %s", str(self.requested_band_filename)) data = np.genfromtxt(self.requested_band_filename, unpack=True, names=['wavelength', 'wavenumber', 'response'], skip_header=2) wvl = data['wavelength'] * scale resp = data['response'] self.rsr = {'wavelength': wvl, 'response': resp}
[ "def", "_load", "(", "self", ",", "scale", "=", "1.0", ")", ":", "LOG", ".", "debug", "(", "\"File: %s\"", ",", "str", "(", "self", ".", "requested_band_filename", ")", ")", "data", "=", "np", ".", "genfromtxt", "(", "self", ".", "requested_band_filename", ",", "unpack", "=", "True", ",", "names", "=", "[", "'wavelength'", ",", "'wavenumber'", ",", "'response'", "]", ",", "skip_header", "=", "2", ")", "wvl", "=", "data", "[", "'wavelength'", "]", "*", "scale", "resp", "=", "data", "[", "'response'", "]", "self", ".", "rsr", "=", "{", "'wavelength'", ":", "wvl", ",", "'response'", ":", "resp", "}" ]
Load the ABI relative spectral responses
[ "Load", "the", "ABI", "relative", "spectral", "responses" ]
python
train
33.411765
onnx/onnxmltools
onnxmltools/convert/coreml/shape_calculators/neural_network/Permute.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/coreml/shape_calculators/neural_network/Permute.py#L13-L29
def calculate_permute_output_shapes(operator): ''' Allowed input/output patterns are 1. [N, C, H, W] ---> [N', C', H', W'] Note that here [N', C', H', W'] means all possible permutations of [N, C, H, W] ''' check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1) check_input_and_output_types(operator, good_input_types=[FloatTensorType, Int64TensorType, StringTensorType], good_output_types=[FloatTensorType, Int64TensorType, StringTensorType]) input = operator.inputs[0] output = operator.outputs[0] axes = [int(i) for i in operator.raw_operator.permute.axis] input_shape = copy.deepcopy(input.type.shape) output.type.shape = [input_shape[a] for a in axes]
[ "def", "calculate_permute_output_shapes", "(", "operator", ")", ":", "check_input_and_output_numbers", "(", "operator", ",", "input_count_range", "=", "1", ",", "output_count_range", "=", "1", ")", "check_input_and_output_types", "(", "operator", ",", "good_input_types", "=", "[", "FloatTensorType", ",", "Int64TensorType", ",", "StringTensorType", "]", ",", "good_output_types", "=", "[", "FloatTensorType", ",", "Int64TensorType", ",", "StringTensorType", "]", ")", "input", "=", "operator", ".", "inputs", "[", "0", "]", "output", "=", "operator", ".", "outputs", "[", "0", "]", "axes", "=", "[", "int", "(", "i", ")", "for", "i", "in", "operator", ".", "raw_operator", ".", "permute", ".", "axis", "]", "input_shape", "=", "copy", ".", "deepcopy", "(", "input", ".", "type", ".", "shape", ")", "output", ".", "type", ".", "shape", "=", "[", "input_shape", "[", "a", "]", "for", "a", "in", "axes", "]" ]
Allowed input/output patterns are 1. [N, C, H, W] ---> [N', C', H', W'] Note that here [N', C', H', W'] means all possible permutations of [N, C, H, W]
[ "Allowed", "input", "/", "output", "patterns", "are", "1", ".", "[", "N", "C", "H", "W", "]", "---", ">", "[", "N", "C", "H", "W", "]" ]
python
train
44.529412
payu-org/payu
payu/manifest.py
https://github.com/payu-org/payu/blob/1442a9a226012eff248b8097cc1eaabc3e224867/payu/manifest.py#L172-L181
def copy_file(self, filepath): """ Returns flag which says to copy rather than link a file. """ copy_file = False try: copy_file = self.data[filepath]['copy'] except KeyError: return False return copy_file
[ "def", "copy_file", "(", "self", ",", "filepath", ")", ":", "copy_file", "=", "False", "try", ":", "copy_file", "=", "self", ".", "data", "[", "filepath", "]", "[", "'copy'", "]", "except", "KeyError", ":", "return", "False", "return", "copy_file" ]
Returns flag which says to copy rather than link a file.
[ "Returns", "flag", "which", "says", "to", "copy", "rather", "than", "link", "a", "file", "." ]
python
train
27.6
pyca/pynacl
src/nacl/bindings/crypto_sign.py
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/bindings/crypto_sign.py#L33-L50
def crypto_sign_keypair(): """ Returns a randomly generated public key and secret key. :rtype: (bytes(public_key), bytes(secret_key)) """ pk = ffi.new("unsigned char[]", crypto_sign_PUBLICKEYBYTES) sk = ffi.new("unsigned char[]", crypto_sign_SECRETKEYBYTES) rc = lib.crypto_sign_keypair(pk, sk) ensure(rc == 0, 'Unexpected library error', raising=exc.RuntimeError) return ( ffi.buffer(pk, crypto_sign_PUBLICKEYBYTES)[:], ffi.buffer(sk, crypto_sign_SECRETKEYBYTES)[:], )
[ "def", "crypto_sign_keypair", "(", ")", ":", "pk", "=", "ffi", ".", "new", "(", "\"unsigned char[]\"", ",", "crypto_sign_PUBLICKEYBYTES", ")", "sk", "=", "ffi", ".", "new", "(", "\"unsigned char[]\"", ",", "crypto_sign_SECRETKEYBYTES", ")", "rc", "=", "lib", ".", "crypto_sign_keypair", "(", "pk", ",", "sk", ")", "ensure", "(", "rc", "==", "0", ",", "'Unexpected library error'", ",", "raising", "=", "exc", ".", "RuntimeError", ")", "return", "(", "ffi", ".", "buffer", "(", "pk", ",", "crypto_sign_PUBLICKEYBYTES", ")", "[", ":", "]", ",", "ffi", ".", "buffer", "(", "sk", ",", "crypto_sign_SECRETKEYBYTES", ")", "[", ":", "]", ",", ")" ]
Returns a randomly generated public key and secret key. :rtype: (bytes(public_key), bytes(secret_key))
[ "Returns", "a", "randomly", "generated", "public", "key", "and", "secret", "key", "." ]
python
train
29.611111
soldag/python-pwmled
pwmled/led/__init__.py
https://github.com/soldag/python-pwmled/blob/09cde36ecc0153fa81dc2a1b9bb07d1c0e418c8c/pwmled/led/__init__.py#L184-L198
def _transition_stage(self, step, total_steps, brightness=None): """ Get a transition stage at a specific step. :param step: The current step. :param total_steps: The total number of steps. :param brightness: The brightness to transition to (0.0-1.0). :return: The stage at the specific step. """ if brightness is not None: self._assert_is_brightness(brightness) brightness = self._interpolate(self.brightness, brightness, step, total_steps) return {'brightness': brightness}
[ "def", "_transition_stage", "(", "self", ",", "step", ",", "total_steps", ",", "brightness", "=", "None", ")", ":", "if", "brightness", "is", "not", "None", ":", "self", ".", "_assert_is_brightness", "(", "brightness", ")", "brightness", "=", "self", ".", "_interpolate", "(", "self", ".", "brightness", ",", "brightness", ",", "step", ",", "total_steps", ")", "return", "{", "'brightness'", ":", "brightness", "}" ]
Get a transition stage at a specific step. :param step: The current step. :param total_steps: The total number of steps. :param brightness: The brightness to transition to (0.0-1.0). :return: The stage at the specific step.
[ "Get", "a", "transition", "stage", "at", "a", "specific", "step", "." ]
python
train
40.133333
cloudnull/turbolift
turbolift/methods/__init__.py
https://github.com/cloudnull/turbolift/blob/da33034e88959226529ce762e2895e6f6356c448/turbolift/methods/__init__.py#L497-L523
def _index_fs(self): """Returns a deque object full of local file system items. :returns: ``deque`` """ indexed_objects = self._return_deque() directory = self.job_args.get('directory') if directory: indexed_objects = self._return_deque( deque=indexed_objects, item=self._drectory_local_files( directory=directory ) ) object_names = self.job_args.get('object') if object_names: indexed_objects = self._return_deque( deque=indexed_objects, item=self._named_local_files( object_names=object_names ) ) return indexed_objects
[ "def", "_index_fs", "(", "self", ")", ":", "indexed_objects", "=", "self", ".", "_return_deque", "(", ")", "directory", "=", "self", ".", "job_args", ".", "get", "(", "'directory'", ")", "if", "directory", ":", "indexed_objects", "=", "self", ".", "_return_deque", "(", "deque", "=", "indexed_objects", ",", "item", "=", "self", ".", "_drectory_local_files", "(", "directory", "=", "directory", ")", ")", "object_names", "=", "self", ".", "job_args", ".", "get", "(", "'object'", ")", "if", "object_names", ":", "indexed_objects", "=", "self", ".", "_return_deque", "(", "deque", "=", "indexed_objects", ",", "item", "=", "self", ".", "_named_local_files", "(", "object_names", "=", "object_names", ")", ")", "return", "indexed_objects" ]
Returns a deque object full of local file system items. :returns: ``deque``
[ "Returns", "a", "deque", "object", "full", "of", "local", "file", "system", "items", "." ]
python
train
28
VingtCinq/python-mailchimp
mailchimp3/entities/listwebhooks.py
https://github.com/VingtCinq/python-mailchimp/blob/1b472f1b64fdde974732ac4b7ed48908bb707260/mailchimp3/entities/listwebhooks.py#L97-L108
def delete(self, list_id, webhook_id): """ Delete a specific webhook in a list. :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param webhook_id: The unique id for the webhook. :type webhook_id: :py:class:`str` """ self.list_id = list_id self.webhook_id = webhook_id return self._mc_client._delete(url=self._build_path(list_id, 'webhooks', webhook_id))
[ "def", "delete", "(", "self", ",", "list_id", ",", "webhook_id", ")", ":", "self", ".", "list_id", "=", "list_id", "self", ".", "webhook_id", "=", "webhook_id", "return", "self", ".", "_mc_client", ".", "_delete", "(", "url", "=", "self", ".", "_build_path", "(", "list_id", ",", "'webhooks'", ",", "webhook_id", ")", ")" ]
Delete a specific webhook in a list. :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param webhook_id: The unique id for the webhook. :type webhook_id: :py:class:`str`
[ "Delete", "a", "specific", "webhook", "in", "a", "list", "." ]
python
valid
37.5
davidfokkema/artist
demo/demo_event_display.py
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/demo/demo_event_display.py#L4-L83
def main(): """Event display for an event of station 503 Date Time Timestamp Nanoseconds 2012-03-29 10:51:36 1333018296 870008589 Number of MIPs 35.0 51.9 35.8 78.9 Arrival time 15.0 17.5 20.0 27.5 """ # Detector positions in ENU relative to the station GPS x = [-6.34, -2.23, -3.6, 3.46] y = [6.34, 2.23, -3.6, 3.46] # Scale mips to fit the graph n = [35.0, 51.9, 35.8, 78.9] # Make times relative to first detection t = [15., 17.5, 20., 27.5] dt = [ti - min(t) for ti in t] plot = Plot() plot.scatter([0], [0], mark='triangle') plot.add_pin_at_xy(0, 0, 'Station 503', use_arrow=False, location='below') plot.scatter_table(x, y, dt, n) plot.set_scalebar(location="lower right") plot.set_colorbar('$\Delta$t [ns]') plot.set_axis_equal() plot.set_mlimits(max=16.) plot.set_slimits(min=10., max=100.) plot.set_xlabel('x [m]') plot.set_ylabel('y [m]') plot.save('event_display') # Add event by Station 508 # Detector positions in ENU relative to the station GPS x508 = [6.12, 0.00, -3.54, 3.54] y508 = [-6.12, -13.23, -3.54, 3.54] # Event GPS timestamp: 1371498167.016412100 # MIPS n508 = [5.6, 16.7, 36.6, 9.0] # Arrival Times t508 = [15., 22.5, 22.5, 30.] dt508 = [ti - min(t508) for ti in t508] plot = MultiPlot(1, 2, width=r'.33\linewidth') plot.set_xlimits_for_all(min=-10, max=15) plot.set_ylimits_for_all(min=-15, max=10) plot.set_mlimits_for_all(min=0., max=16.) plot.set_colorbar('$\Delta$t [ns]', False) plot.set_colormap('blackwhite') plot.set_scalebar_for_all(location="upper right") p = plot.get_subplot_at(0, 0) p.scatter([0], [0], mark='triangle') p.add_pin_at_xy(0, 0, 'Station 503', use_arrow=False, location='below') p.scatter_table(x, y, dt, n) p.set_axis_equal() p = plot.get_subplot_at(0, 1) p.scatter([0], [0], mark='triangle') p.add_pin_at_xy(0, 0, 'Station 508', use_arrow=False, location='below') p.scatter_table(x508, y508, dt508, n508) p.set_axis_equal() plot.show_yticklabels_for_all([(0, 0)]) plot.show_xticklabels_for_all([(0, 0), (0, 1)]) plot.set_xlabel('x [m]') plot.set_ylabel('y [m]') plot.save('multi_event_display')
[ "def", "main", "(", ")", ":", "# Detector positions in ENU relative to the station GPS", "x", "=", "[", "-", "6.34", ",", "-", "2.23", ",", "-", "3.6", ",", "3.46", "]", "y", "=", "[", "6.34", ",", "2.23", ",", "-", "3.6", ",", "3.46", "]", "# Scale mips to fit the graph", "n", "=", "[", "35.0", ",", "51.9", ",", "35.8", ",", "78.9", "]", "# Make times relative to first detection", "t", "=", "[", "15.", ",", "17.5", ",", "20.", ",", "27.5", "]", "dt", "=", "[", "ti", "-", "min", "(", "t", ")", "for", "ti", "in", "t", "]", "plot", "=", "Plot", "(", ")", "plot", ".", "scatter", "(", "[", "0", "]", ",", "[", "0", "]", ",", "mark", "=", "'triangle'", ")", "plot", ".", "add_pin_at_xy", "(", "0", ",", "0", ",", "'Station 503'", ",", "use_arrow", "=", "False", ",", "location", "=", "'below'", ")", "plot", ".", "scatter_table", "(", "x", ",", "y", ",", "dt", ",", "n", ")", "plot", ".", "set_scalebar", "(", "location", "=", "\"lower right\"", ")", "plot", ".", "set_colorbar", "(", "'$\\Delta$t [ns]'", ")", "plot", ".", "set_axis_equal", "(", ")", "plot", ".", "set_mlimits", "(", "max", "=", "16.", ")", "plot", ".", "set_slimits", "(", "min", "=", "10.", ",", "max", "=", "100.", ")", "plot", ".", "set_xlabel", "(", "'x [m]'", ")", "plot", ".", "set_ylabel", "(", "'y [m]'", ")", "plot", ".", "save", "(", "'event_display'", ")", "# Add event by Station 508", "# Detector positions in ENU relative to the station GPS", "x508", "=", "[", "6.12", ",", "0.00", ",", "-", "3.54", ",", "3.54", "]", "y508", "=", "[", "-", "6.12", ",", "-", "13.23", ",", "-", "3.54", ",", "3.54", "]", "# Event GPS timestamp: 1371498167.016412100", "# MIPS", "n508", "=", "[", "5.6", ",", "16.7", ",", "36.6", ",", "9.0", "]", "# Arrival Times", "t508", "=", "[", "15.", ",", "22.5", ",", "22.5", ",", "30.", "]", "dt508", "=", "[", "ti", "-", "min", "(", "t508", ")", "for", "ti", "in", "t508", "]", "plot", "=", "MultiPlot", "(", "1", ",", "2", ",", "width", "=", "r'.33\\linewidth'", ")", "plot", ".", "set_xlimits_for_all", "(", "min", "=", "-", "10", ",", "max", "=", "15", ")", "plot", ".", "set_ylimits_for_all", "(", "min", "=", "-", "15", ",", "max", "=", "10", ")", "plot", ".", "set_mlimits_for_all", "(", "min", "=", "0.", ",", "max", "=", "16.", ")", "plot", ".", "set_colorbar", "(", "'$\\Delta$t [ns]'", ",", "False", ")", "plot", ".", "set_colormap", "(", "'blackwhite'", ")", "plot", ".", "set_scalebar_for_all", "(", "location", "=", "\"upper right\"", ")", "p", "=", "plot", ".", "get_subplot_at", "(", "0", ",", "0", ")", "p", ".", "scatter", "(", "[", "0", "]", ",", "[", "0", "]", ",", "mark", "=", "'triangle'", ")", "p", ".", "add_pin_at_xy", "(", "0", ",", "0", ",", "'Station 503'", ",", "use_arrow", "=", "False", ",", "location", "=", "'below'", ")", "p", ".", "scatter_table", "(", "x", ",", "y", ",", "dt", ",", "n", ")", "p", ".", "set_axis_equal", "(", ")", "p", "=", "plot", ".", "get_subplot_at", "(", "0", ",", "1", ")", "p", ".", "scatter", "(", "[", "0", "]", ",", "[", "0", "]", ",", "mark", "=", "'triangle'", ")", "p", ".", "add_pin_at_xy", "(", "0", ",", "0", ",", "'Station 508'", ",", "use_arrow", "=", "False", ",", "location", "=", "'below'", ")", "p", ".", "scatter_table", "(", "x508", ",", "y508", ",", "dt508", ",", "n508", ")", "p", ".", "set_axis_equal", "(", ")", "plot", ".", "show_yticklabels_for_all", "(", "[", "(", "0", ",", "0", ")", "]", ")", "plot", ".", "show_xticklabels_for_all", "(", "[", "(", "0", ",", "0", ")", ",", "(", "0", ",", "1", ")", "]", ")", "plot", ".", "set_xlabel", "(", "'x [m]'", ")", "plot", ".", "set_ylabel", "(", "'y [m]'", ")", "plot", ".", "save", "(", "'multi_event_display'", ")" ]
Event display for an event of station 503 Date Time Timestamp Nanoseconds 2012-03-29 10:51:36 1333018296 870008589 Number of MIPs 35.0 51.9 35.8 78.9 Arrival time 15.0 17.5 20.0 27.5
[ "Event", "display", "for", "an", "event", "of", "station", "503" ]
python
train
28.15
saltstack/salt
salt/modules/win_dacl.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_dacl.py#L748-L810
def check_ace(path, objectType, user, permission=None, acetype=None, propagation=None, exactPermissionMatch=False): ''' Checks a path to verify the ACE (access control entry) specified exists Args: path: path to the file/reg key objectType: The type of object (FILE, DIRECTORY, REGISTRY) user: user that the ACL is for permission: permission to test for (READ, FULLCONTROL, etc) acetype: the type of ACE (ALLOW or DENY) propagation: the propagation type of the ACE (FILES, FOLDERS, KEY, KEY&SUBKEYS, SUBKEYS, etc) exactPermissionMatch: the ACL must match exactly, IE if READ is specified, the user must have READ exactly and not FULLCONTROL (which also has the READ permission obviously) Returns (dict): 'Exists' true if the ACE exists, false if it does not CLI Example: .. code-block:: bash salt 'minion-id' win_dacl.check_ace c:\temp directory <username> fullcontrol ''' ret = {'result': False, 'Exists': False, 'comment': ''} dc = daclConstants() objectTypeBit = dc.getObjectTypeBit(objectType) path = dc.processPath(path, objectTypeBit) permission = permission.upper() if permission else None acetype = acetype.upper() if permission else None propagation = propagation.upper() if propagation else None permissionbit = dc.getPermissionBit(objectTypeBit, permission) if permission else None acetypebit = dc.getAceTypeBit(acetype) if acetype else None propagationbit = dc.getPropagationBit(objectTypeBit, propagation) if propagation else None sidRet = _getUserSid(user) if not sidRet['result']: return sidRet dacls = _get_dacl(path, objectTypeBit) ret['result'] = True if dacls: for counter in range(0, dacls.GetAceCount()): ace = dacls.GetAce(counter) if ace[2] == sidRet['sid']: if not acetypebit or ace[0][0] == acetypebit: if not propagationbit or (ace[0][1] & propagationbit) == propagationbit: if not permissionbit: ret['Exists'] = True return ret if exactPermissionMatch: if ace[1] == permissionbit: ret['Exists'] = True return ret else: if (ace[1] & permissionbit) == permissionbit: ret['Exists'] = True return ret else: ret['comment'] = 'No DACL found for object.' return ret
[ "def", "check_ace", "(", "path", ",", "objectType", ",", "user", ",", "permission", "=", "None", ",", "acetype", "=", "None", ",", "propagation", "=", "None", ",", "exactPermissionMatch", "=", "False", ")", ":", "ret", "=", "{", "'result'", ":", "False", ",", "'Exists'", ":", "False", ",", "'comment'", ":", "''", "}", "dc", "=", "daclConstants", "(", ")", "objectTypeBit", "=", "dc", ".", "getObjectTypeBit", "(", "objectType", ")", "path", "=", "dc", ".", "processPath", "(", "path", ",", "objectTypeBit", ")", "permission", "=", "permission", ".", "upper", "(", ")", "if", "permission", "else", "None", "acetype", "=", "acetype", ".", "upper", "(", ")", "if", "permission", "else", "None", "propagation", "=", "propagation", ".", "upper", "(", ")", "if", "propagation", "else", "None", "permissionbit", "=", "dc", ".", "getPermissionBit", "(", "objectTypeBit", ",", "permission", ")", "if", "permission", "else", "None", "acetypebit", "=", "dc", ".", "getAceTypeBit", "(", "acetype", ")", "if", "acetype", "else", "None", "propagationbit", "=", "dc", ".", "getPropagationBit", "(", "objectTypeBit", ",", "propagation", ")", "if", "propagation", "else", "None", "sidRet", "=", "_getUserSid", "(", "user", ")", "if", "not", "sidRet", "[", "'result'", "]", ":", "return", "sidRet", "dacls", "=", "_get_dacl", "(", "path", ",", "objectTypeBit", ")", "ret", "[", "'result'", "]", "=", "True", "if", "dacls", ":", "for", "counter", "in", "range", "(", "0", ",", "dacls", ".", "GetAceCount", "(", ")", ")", ":", "ace", "=", "dacls", ".", "GetAce", "(", "counter", ")", "if", "ace", "[", "2", "]", "==", "sidRet", "[", "'sid'", "]", ":", "if", "not", "acetypebit", "or", "ace", "[", "0", "]", "[", "0", "]", "==", "acetypebit", ":", "if", "not", "propagationbit", "or", "(", "ace", "[", "0", "]", "[", "1", "]", "&", "propagationbit", ")", "==", "propagationbit", ":", "if", "not", "permissionbit", ":", "ret", "[", "'Exists'", "]", "=", "True", "return", "ret", "if", "exactPermissionMatch", ":", "if", "ace", "[", "1", "]", "==", "permissionbit", ":", "ret", "[", "'Exists'", "]", "=", "True", "return", "ret", "else", ":", "if", "(", "ace", "[", "1", "]", "&", "permissionbit", ")", "==", "permissionbit", ":", "ret", "[", "'Exists'", "]", "=", "True", "return", "ret", "else", ":", "ret", "[", "'comment'", "]", "=", "'No DACL found for object.'", "return", "ret" ]
Checks a path to verify the ACE (access control entry) specified exists Args: path: path to the file/reg key objectType: The type of object (FILE, DIRECTORY, REGISTRY) user: user that the ACL is for permission: permission to test for (READ, FULLCONTROL, etc) acetype: the type of ACE (ALLOW or DENY) propagation: the propagation type of the ACE (FILES, FOLDERS, KEY, KEY&SUBKEYS, SUBKEYS, etc) exactPermissionMatch: the ACL must match exactly, IE if READ is specified, the user must have READ exactly and not FULLCONTROL (which also has the READ permission obviously) Returns (dict): 'Exists' true if the ACE exists, false if it does not CLI Example: .. code-block:: bash salt 'minion-id' win_dacl.check_ace c:\temp directory <username> fullcontrol
[ "Checks", "a", "path", "to", "verify", "the", "ACE", "(", "access", "control", "entry", ")", "specified", "exists" ]
python
train
41.539683
robotools/fontParts
Lib/fontParts/base/font.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/font.py#L1453-L1499
def _isCompatible(self, other, reporter): """ This is the environment implementation of :meth:`BaseFont.isCompatible`. Subclasses may override this method. """ font1 = self font2 = other # incompatible guidelines guidelines1 = set(font1.guidelines) guidelines2 = set(font2.guidelines) if len(guidelines1) != len(guidelines2): reporter.warning = True reporter.guidelineCountDifference = True if len(guidelines1.difference(guidelines2)) != 0: reporter.warning = True reporter.guidelinesMissingFromFont2 = list( guidelines1.difference(guidelines2)) if len(guidelines2.difference(guidelines1)) != 0: reporter.warning = True reporter.guidelinesMissingInFont1 = list( guidelines2.difference(guidelines1)) # incompatible layers layers1 = set(font1.layerOrder) layers2 = set(font2.layerOrder) if len(layers1) != len(layers2): reporter.warning = True reporter.layerCountDifference = True if len(layers1.difference(layers2)) != 0: reporter.warning = True reporter.layersMissingFromFont2 = list(layers1.difference(layers2)) if len(layers2.difference(layers1)) != 0: reporter.warning = True reporter.layersMissingInFont1 = list(layers2.difference(layers1)) # test layers for layerName in sorted(layers1.intersection(layers2)): layer1 = font1.getLayer(layerName) layer2 = font2.getLayer(layerName) layerCompatibility = layer1.isCompatible(layer2)[1] if layerCompatibility.fatal or layerCompatibility.warning: if layerCompatibility.fatal: reporter.fatal = True if layerCompatibility.warning: reporter.warning = True reporter.layers.append(layerCompatibility)
[ "def", "_isCompatible", "(", "self", ",", "other", ",", "reporter", ")", ":", "font1", "=", "self", "font2", "=", "other", "# incompatible guidelines", "guidelines1", "=", "set", "(", "font1", ".", "guidelines", ")", "guidelines2", "=", "set", "(", "font2", ".", "guidelines", ")", "if", "len", "(", "guidelines1", ")", "!=", "len", "(", "guidelines2", ")", ":", "reporter", ".", "warning", "=", "True", "reporter", ".", "guidelineCountDifference", "=", "True", "if", "len", "(", "guidelines1", ".", "difference", "(", "guidelines2", ")", ")", "!=", "0", ":", "reporter", ".", "warning", "=", "True", "reporter", ".", "guidelinesMissingFromFont2", "=", "list", "(", "guidelines1", ".", "difference", "(", "guidelines2", ")", ")", "if", "len", "(", "guidelines2", ".", "difference", "(", "guidelines1", ")", ")", "!=", "0", ":", "reporter", ".", "warning", "=", "True", "reporter", ".", "guidelinesMissingInFont1", "=", "list", "(", "guidelines2", ".", "difference", "(", "guidelines1", ")", ")", "# incompatible layers", "layers1", "=", "set", "(", "font1", ".", "layerOrder", ")", "layers2", "=", "set", "(", "font2", ".", "layerOrder", ")", "if", "len", "(", "layers1", ")", "!=", "len", "(", "layers2", ")", ":", "reporter", ".", "warning", "=", "True", "reporter", ".", "layerCountDifference", "=", "True", "if", "len", "(", "layers1", ".", "difference", "(", "layers2", ")", ")", "!=", "0", ":", "reporter", ".", "warning", "=", "True", "reporter", ".", "layersMissingFromFont2", "=", "list", "(", "layers1", ".", "difference", "(", "layers2", ")", ")", "if", "len", "(", "layers2", ".", "difference", "(", "layers1", ")", ")", "!=", "0", ":", "reporter", ".", "warning", "=", "True", "reporter", ".", "layersMissingInFont1", "=", "list", "(", "layers2", ".", "difference", "(", "layers1", ")", ")", "# test layers", "for", "layerName", "in", "sorted", "(", "layers1", ".", "intersection", "(", "layers2", ")", ")", ":", "layer1", "=", "font1", ".", "getLayer", "(", "layerName", ")", "layer2", "=", "font2", ".", "getLayer", "(", "layerName", ")", "layerCompatibility", "=", "layer1", ".", "isCompatible", "(", "layer2", ")", "[", "1", "]", "if", "layerCompatibility", ".", "fatal", "or", "layerCompatibility", ".", "warning", ":", "if", "layerCompatibility", ".", "fatal", ":", "reporter", ".", "fatal", "=", "True", "if", "layerCompatibility", ".", "warning", ":", "reporter", ".", "warning", "=", "True", "reporter", ".", "layers", ".", "append", "(", "layerCompatibility", ")" ]
This is the environment implementation of :meth:`BaseFont.isCompatible`. Subclasses may override this method.
[ "This", "is", "the", "environment", "implementation", "of", ":", "meth", ":", "BaseFont", ".", "isCompatible", "." ]
python
train
42.12766
bids-standard/pybids
bids/reports/utils.py
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/reports/utils.py#L22-L29
def remove_duplicates(seq): """ Return unique elements from list while preserving order. From https://stackoverflow.com/a/480227/2589328 """ seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))]
[ "def", "remove_duplicates", "(", "seq", ")", ":", "seen", "=", "set", "(", ")", "seen_add", "=", "seen", ".", "add", "return", "[", "x", "for", "x", "in", "seq", "if", "not", "(", "x", "in", "seen", "or", "seen_add", "(", "x", ")", ")", "]" ]
Return unique elements from list while preserving order. From https://stackoverflow.com/a/480227/2589328
[ "Return", "unique", "elements", "from", "list", "while", "preserving", "order", ".", "From", "https", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "480227", "/", "2589328" ]
python
train
31.5
wesm/feather
cpp/build-support/cpplint.py
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L2573-L2734
def CheckForNonStandardConstructs(filename, clean_lines, linenum, nesting_state, error): r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. Complain about several constructs which gcc-2 accepts, but which are not standard C++. Warning about these in lint is one way to ease the transition to new compilers. - put storage class first (e.g. "static const" instead of "const static"). - "%lld" instead of %qd" in printf-type functions. - "%1$d" is non-standard in printf-type functions. - "\%" is an undefined character escape sequence. - text after #endif is not allowed. - invalid inner-style forward declaration. - >? and <? operators, and their >?= and <?= cousins. Additionally, check for constructor/destructor style violations and reference members, as it is very convenient to do so while checking for gcc-2 compliance. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message """ # Remove comments from the line, but leave in strings for now. line = clean_lines.lines[linenum] if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line): error(filename, linenum, 'runtime/printf_format', 3, '%q in format strings is deprecated. Use %ll instead.') if Search(r'printf\s*\(.*".*%\d+\$', line): error(filename, linenum, 'runtime/printf_format', 2, '%N$ formats are unconventional. Try rewriting to avoid them.') # Remove escaped backslashes before looking for undefined escapes. line = line.replace('\\\\', '') if Search(r'("|\').*\\(%|\[|\(|{)', line): error(filename, linenum, 'build/printf_format', 3, '%, [, (, and { are undefined character escapes. Unescape them.') # For the rest, work with both comments and strings removed. line = clean_lines.elided[linenum] if Search(r'\b(const|volatile|void|char|short|int|long' r'|float|double|signed|unsigned' r'|schar|u?int8|u?int16|u?int32|u?int64)' r'\s+(register|static|extern|typedef)\b', line): error(filename, linenum, 'build/storage_class', 5, 'Storage class (static, extern, typedef, etc) should be first.') if Match(r'\s*#\s*endif\s*[^/\s]+', line): error(filename, linenum, 'build/endif_comment', 5, 'Uncommented text after #endif is non-standard. Use a comment.') if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line): error(filename, linenum, 'build/forward_decl', 5, 'Inner-style forward declarations are invalid. Remove this line.') if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line): error(filename, linenum, 'build/deprecated', 3, '>? and <? (max and min) operators are non-standard and deprecated.') if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line): # TODO(unknown): Could it be expanded safely to arbitrary references, # without triggering too many false positives? The first # attempt triggered 5 warnings for mostly benign code in the regtest, hence # the restriction. # Here's the original regexp, for the reference: # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?' # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' error(filename, linenum, 'runtime/member_string_references', 2, 'const string& members are dangerous. It is much better to use ' 'alternatives, such as pointers or simple constants.') # Everything else in this function operates on class declarations. # Return early if the top of the nesting stack is not a class, or if # the class head is not completed yet. classinfo = nesting_state.InnermostClass() if not classinfo or not classinfo.seen_open_brace: return # The class may have been declared with namespace or classname qualifiers. # The constructor and destructor will not have those qualifiers. base_classname = classinfo.name.split('::')[-1] # Look for single-argument constructors that aren't marked explicit. # Technically a valid construct, but against style. Also look for # non-single-argument constructors which are also technically valid, but # strongly suggest something is wrong. explicit_constructor_match = Match( r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*' r'\(((?:[^()]|\([^()]*\))*)\)' % re.escape(base_classname), line) if explicit_constructor_match: is_marked_explicit = explicit_constructor_match.group(1) if not explicit_constructor_match.group(2): constructor_args = [] else: constructor_args = explicit_constructor_match.group(2).split(',') # collapse arguments so that commas in template parameter lists and function # argument parameter lists don't split arguments in two i = 0 while i < len(constructor_args): constructor_arg = constructor_args[i] while (constructor_arg.count('<') > constructor_arg.count('>') or constructor_arg.count('(') > constructor_arg.count(')')): constructor_arg += ',' + constructor_args[i + 1] del constructor_args[i + 1] constructor_args[i] = constructor_arg i += 1 defaulted_args = [arg for arg in constructor_args if '=' in arg] noarg_constructor = (not constructor_args or # empty arg list # 'void' arg specifier (len(constructor_args) == 1 and constructor_args[0].strip() == 'void')) onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg not noarg_constructor) or # all but at most one arg defaulted (len(constructor_args) >= 1 and not noarg_constructor and len(defaulted_args) >= len(constructor_args) - 1)) initializer_list_constructor = bool( onearg_constructor and Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0])) copy_constructor = bool( onearg_constructor and Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&' % re.escape(base_classname), constructor_args[0].strip())) if (not is_marked_explicit and onearg_constructor and not initializer_list_constructor and not copy_constructor): if defaulted_args: error(filename, linenum, 'runtime/explicit', 5, 'Constructors callable with one argument ' 'should be marked explicit.') else: error(filename, linenum, 'runtime/explicit', 5, 'Single-parameter constructors should be marked explicit.') elif is_marked_explicit and not onearg_constructor: if noarg_constructor: error(filename, linenum, 'runtime/explicit', 5, 'Zero-parameter constructors should not be marked explicit.') else: error(filename, linenum, 'runtime/explicit', 0, 'Constructors that require multiple arguments ' 'should not be marked explicit.')
[ "def", "CheckForNonStandardConstructs", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "nesting_state", ",", "error", ")", ":", "# Remove comments from the line, but leave in strings for now.", "line", "=", "clean_lines", ".", "lines", "[", "linenum", "]", "if", "Search", "(", "r'printf\\s*\\(.*\".*%[-+ ]?\\d*q'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf_format'", ",", "3", ",", "'%q in format strings is deprecated. Use %ll instead.'", ")", "if", "Search", "(", "r'printf\\s*\\(.*\".*%\\d+\\$'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf_format'", ",", "2", ",", "'%N$ formats are unconventional. Try rewriting to avoid them.'", ")", "# Remove escaped backslashes before looking for undefined escapes.", "line", "=", "line", ".", "replace", "(", "'\\\\\\\\'", ",", "''", ")", "if", "Search", "(", "r'(\"|\\').*\\\\(%|\\[|\\(|{)'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'build/printf_format'", ",", "3", ",", "'%, [, (, and { are undefined character escapes. Unescape them.'", ")", "# For the rest, work with both comments and strings removed.", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "if", "Search", "(", "r'\\b(const|volatile|void|char|short|int|long'", "r'|float|double|signed|unsigned'", "r'|schar|u?int8|u?int16|u?int32|u?int64)'", "r'\\s+(register|static|extern|typedef)\\b'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'build/storage_class'", ",", "5", ",", "'Storage class (static, extern, typedef, etc) should be first.'", ")", "if", "Match", "(", "r'\\s*#\\s*endif\\s*[^/\\s]+'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'build/endif_comment'", ",", "5", ",", "'Uncommented text after #endif is non-standard. Use a comment.'", ")", "if", "Match", "(", "r'\\s*class\\s+(\\w+\\s*::\\s*)+\\w+\\s*;'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'build/forward_decl'", ",", "5", ",", "'Inner-style forward declarations are invalid. Remove this line.'", ")", "if", "Search", "(", "r'(\\w+|[+-]?\\d+(\\.\\d*)?)\\s*(<|>)\\?=?\\s*(\\w+|[+-]?\\d+)(\\.\\d*)?'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'build/deprecated'", ",", "3", ",", "'>? and <? (max and min) operators are non-standard and deprecated.'", ")", "if", "Search", "(", "r'^\\s*const\\s*string\\s*&\\s*\\w+\\s*;'", ",", "line", ")", ":", "# TODO(unknown): Could it be expanded safely to arbitrary references,", "# without triggering too many false positives? The first", "# attempt triggered 5 warnings for mostly benign code in the regtest, hence", "# the restriction.", "# Here's the original regexp, for the reference:", "# type_name = r'\\w+((\\s*::\\s*\\w+)|(\\s*<\\s*\\w+?\\s*>))?'", "# r'\\s*const\\s*' + type_name + '\\s*&\\s*\\w+\\s*;'", "error", "(", "filename", ",", "linenum", ",", "'runtime/member_string_references'", ",", "2", ",", "'const string& members are dangerous. It is much better to use '", "'alternatives, such as pointers or simple constants.'", ")", "# Everything else in this function operates on class declarations.", "# Return early if the top of the nesting stack is not a class, or if", "# the class head is not completed yet.", "classinfo", "=", "nesting_state", ".", "InnermostClass", "(", ")", "if", "not", "classinfo", "or", "not", "classinfo", ".", "seen_open_brace", ":", "return", "# The class may have been declared with namespace or classname qualifiers.", "# The constructor and destructor will not have those qualifiers.", "base_classname", "=", "classinfo", ".", "name", ".", "split", "(", "'::'", ")", "[", "-", "1", "]", "# Look for single-argument constructors that aren't marked explicit.", "# Technically a valid construct, but against style. Also look for", "# non-single-argument constructors which are also technically valid, but", "# strongly suggest something is wrong.", "explicit_constructor_match", "=", "Match", "(", "r'\\s+(?:inline\\s+)?(explicit\\s+)?(?:inline\\s+)?%s\\s*'", "r'\\(((?:[^()]|\\([^()]*\\))*)\\)'", "%", "re", ".", "escape", "(", "base_classname", ")", ",", "line", ")", "if", "explicit_constructor_match", ":", "is_marked_explicit", "=", "explicit_constructor_match", ".", "group", "(", "1", ")", "if", "not", "explicit_constructor_match", ".", "group", "(", "2", ")", ":", "constructor_args", "=", "[", "]", "else", ":", "constructor_args", "=", "explicit_constructor_match", ".", "group", "(", "2", ")", ".", "split", "(", "','", ")", "# collapse arguments so that commas in template parameter lists and function", "# argument parameter lists don't split arguments in two", "i", "=", "0", "while", "i", "<", "len", "(", "constructor_args", ")", ":", "constructor_arg", "=", "constructor_args", "[", "i", "]", "while", "(", "constructor_arg", ".", "count", "(", "'<'", ")", ">", "constructor_arg", ".", "count", "(", "'>'", ")", "or", "constructor_arg", ".", "count", "(", "'('", ")", ">", "constructor_arg", ".", "count", "(", "')'", ")", ")", ":", "constructor_arg", "+=", "','", "+", "constructor_args", "[", "i", "+", "1", "]", "del", "constructor_args", "[", "i", "+", "1", "]", "constructor_args", "[", "i", "]", "=", "constructor_arg", "i", "+=", "1", "defaulted_args", "=", "[", "arg", "for", "arg", "in", "constructor_args", "if", "'='", "in", "arg", "]", "noarg_constructor", "=", "(", "not", "constructor_args", "or", "# empty arg list", "# 'void' arg specifier", "(", "len", "(", "constructor_args", ")", "==", "1", "and", "constructor_args", "[", "0", "]", ".", "strip", "(", ")", "==", "'void'", ")", ")", "onearg_constructor", "=", "(", "(", "len", "(", "constructor_args", ")", "==", "1", "and", "# exactly one arg", "not", "noarg_constructor", ")", "or", "# all but at most one arg defaulted", "(", "len", "(", "constructor_args", ")", ">=", "1", "and", "not", "noarg_constructor", "and", "len", "(", "defaulted_args", ")", ">=", "len", "(", "constructor_args", ")", "-", "1", ")", ")", "initializer_list_constructor", "=", "bool", "(", "onearg_constructor", "and", "Search", "(", "r'\\bstd\\s*::\\s*initializer_list\\b'", ",", "constructor_args", "[", "0", "]", ")", ")", "copy_constructor", "=", "bool", "(", "onearg_constructor", "and", "Match", "(", "r'(const\\s+)?%s(\\s*<[^>]*>)?(\\s+const)?\\s*(?:<\\w+>\\s*)?&'", "%", "re", ".", "escape", "(", "base_classname", ")", ",", "constructor_args", "[", "0", "]", ".", "strip", "(", ")", ")", ")", "if", "(", "not", "is_marked_explicit", "and", "onearg_constructor", "and", "not", "initializer_list_constructor", "and", "not", "copy_constructor", ")", ":", "if", "defaulted_args", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/explicit'", ",", "5", ",", "'Constructors callable with one argument '", "'should be marked explicit.'", ")", "else", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/explicit'", ",", "5", ",", "'Single-parameter constructors should be marked explicit.'", ")", "elif", "is_marked_explicit", "and", "not", "onearg_constructor", ":", "if", "noarg_constructor", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/explicit'", ",", "5", ",", "'Zero-parameter constructors should not be marked explicit.'", ")", "else", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/explicit'", ",", "0", ",", "'Constructors that require multiple arguments '", "'should not be marked explicit.'", ")" ]
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. Complain about several constructs which gcc-2 accepts, but which are not standard C++. Warning about these in lint is one way to ease the transition to new compilers. - put storage class first (e.g. "static const" instead of "const static"). - "%lld" instead of %qd" in printf-type functions. - "%1$d" is non-standard in printf-type functions. - "\%" is an undefined character escape sequence. - text after #endif is not allowed. - invalid inner-style forward declaration. - >? and <? operators, and their >?= and <?= cousins. Additionally, check for constructor/destructor style violations and reference members, as it is very convenient to do so while checking for gcc-2 compliance. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message
[ "r", "Logs", "an", "error", "if", "we", "see", "certain", "non", "-", "ANSI", "constructs", "ignored", "by", "gcc", "-", "2", "." ]
python
train
44.845679
BerkeleyAutomation/perception
perception/cnn.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/cnn.py#L131-L139
def _initialize(self): """ Open from caffe weights """ self._graph = tf.Graph() with self._graph.as_default(): self._input_node = tf.placeholder(tf.float32, (self._batch_size, self._im_height, self._im_width, self._num_channels)) weights = self.build_alexnet_weights() self._output_tensor = self.build_alexnet(weights) self._feature_tensor = self.build_alexnet(weights, output_layer=self._feature_layer) self._initialized = True
[ "def", "_initialize", "(", "self", ")", ":", "self", ".", "_graph", "=", "tf", ".", "Graph", "(", ")", "with", "self", ".", "_graph", ".", "as_default", "(", ")", ":", "self", ".", "_input_node", "=", "tf", ".", "placeholder", "(", "tf", ".", "float32", ",", "(", "self", ".", "_batch_size", ",", "self", ".", "_im_height", ",", "self", ".", "_im_width", ",", "self", ".", "_num_channels", ")", ")", "weights", "=", "self", ".", "build_alexnet_weights", "(", ")", "self", ".", "_output_tensor", "=", "self", ".", "build_alexnet", "(", "weights", ")", "self", ".", "_feature_tensor", "=", "self", ".", "build_alexnet", "(", "weights", ",", "output_layer", "=", "self", ".", "_feature_layer", ")", "self", ".", "_initialized", "=", "True" ]
Open from caffe weights
[ "Open", "from", "caffe", "weights" ]
python
train
56
libyal/dtfabric
dtfabric/reader.py
https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/reader.py#L1227-L1267
def ReadFileObject(self, definitions_registry, file_object): """Reads data type definitions from a file-like object into the registry. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. file_object (file): file-like object to read from. Raises: FormatError: if the definitions values are missing or if the format is incorrect. """ last_definition_object = None error_location = None error_message = None try: yaml_generator = yaml.safe_load_all(file_object) for yaml_definition in yaml_generator: definition_object = self._ReadDefinition( definitions_registry, yaml_definition) if not definition_object: error_location = self._GetFormatErrorLocation( yaml_definition, last_definition_object) error_message = '{0:s} Missing definition object.'.format( error_location) raise errors.FormatError(error_message) definitions_registry.RegisterDefinition(definition_object) last_definition_object = definition_object except errors.DefinitionReaderError as exception: error_message = 'in: {0:s} {1:s}'.format( exception.name or '<NAMELESS>', exception.message) raise errors.FormatError(error_message) except (yaml.reader.ReaderError, yaml.scanner.ScannerError) as exception: error_location = self._GetFormatErrorLocation({}, last_definition_object) error_message = '{0:s} {1!s}'.format(error_location, exception) raise errors.FormatError(error_message)
[ "def", "ReadFileObject", "(", "self", ",", "definitions_registry", ",", "file_object", ")", ":", "last_definition_object", "=", "None", "error_location", "=", "None", "error_message", "=", "None", "try", ":", "yaml_generator", "=", "yaml", ".", "safe_load_all", "(", "file_object", ")", "for", "yaml_definition", "in", "yaml_generator", ":", "definition_object", "=", "self", ".", "_ReadDefinition", "(", "definitions_registry", ",", "yaml_definition", ")", "if", "not", "definition_object", ":", "error_location", "=", "self", ".", "_GetFormatErrorLocation", "(", "yaml_definition", ",", "last_definition_object", ")", "error_message", "=", "'{0:s} Missing definition object.'", ".", "format", "(", "error_location", ")", "raise", "errors", ".", "FormatError", "(", "error_message", ")", "definitions_registry", ".", "RegisterDefinition", "(", "definition_object", ")", "last_definition_object", "=", "definition_object", "except", "errors", ".", "DefinitionReaderError", "as", "exception", ":", "error_message", "=", "'in: {0:s} {1:s}'", ".", "format", "(", "exception", ".", "name", "or", "'<NAMELESS>'", ",", "exception", ".", "message", ")", "raise", "errors", ".", "FormatError", "(", "error_message", ")", "except", "(", "yaml", ".", "reader", ".", "ReaderError", ",", "yaml", ".", "scanner", ".", "ScannerError", ")", "as", "exception", ":", "error_location", "=", "self", ".", "_GetFormatErrorLocation", "(", "{", "}", ",", "last_definition_object", ")", "error_message", "=", "'{0:s} {1!s}'", ".", "format", "(", "error_location", ",", "exception", ")", "raise", "errors", ".", "FormatError", "(", "error_message", ")" ]
Reads data type definitions from a file-like object into the registry. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. file_object (file): file-like object to read from. Raises: FormatError: if the definitions values are missing or if the format is incorrect.
[ "Reads", "data", "type", "definitions", "from", "a", "file", "-", "like", "object", "into", "the", "registry", "." ]
python
train
38.560976
contentful/contentful.py
contentful/client.py
https://github.com/contentful/contentful.py/blob/73fe01d6ae5a1f8818880da65199107b584681dd/contentful/client.py#L227-L257
def entries(self, query=None): """Fetches all Entries from the Space (up to the set limit, can be modified in `query`). API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/entries/entries-collection/get-all-entries-of-a-space :param query: (optional) Dict with API options. :return: List of :class:`Entry <contentful.entry.Entry>` objects. :rtype: List of contentful.entry.Entry Usage: >>> entries = client.entries() [<Entry[cat] id='happycat'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='5ETMRzkl9KM4omyMwKAOki'>, <Entry[dog] id='6KntaYXaHSyIw8M6eo26OK'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='7qVBlCjpWE86Oseo40gAEY'>, <Entry[cat] id='garfield'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='4MU1s3potiUEM2G4okYOqw'>, <Entry[cat] id='nyancat'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='ge1xHyH3QOWucKWCCAgIG'>, <Entry[human] id='finn'>, <Entry[dog] id='jake'>] """ if query is None: query = {} self._normalize_select(query) return self._get( self.environment_url('/entries'), query )
[ "def", "entries", "(", "self", ",", "query", "=", "None", ")", ":", "if", "query", "is", "None", ":", "query", "=", "{", "}", "self", ".", "_normalize_select", "(", "query", ")", "return", "self", ".", "_get", "(", "self", ".", "environment_url", "(", "'/entries'", ")", ",", "query", ")" ]
Fetches all Entries from the Space (up to the set limit, can be modified in `query`). API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/entries/entries-collection/get-all-entries-of-a-space :param query: (optional) Dict with API options. :return: List of :class:`Entry <contentful.entry.Entry>` objects. :rtype: List of contentful.entry.Entry Usage: >>> entries = client.entries() [<Entry[cat] id='happycat'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='5ETMRzkl9KM4omyMwKAOki'>, <Entry[dog] id='6KntaYXaHSyIw8M6eo26OK'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='7qVBlCjpWE86Oseo40gAEY'>, <Entry[cat] id='garfield'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='4MU1s3potiUEM2G4okYOqw'>, <Entry[cat] id='nyancat'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='ge1xHyH3QOWucKWCCAgIG'>, <Entry[human] id='finn'>, <Entry[dog] id='jake'>]
[ "Fetches", "all", "Entries", "from", "the", "Space", "(", "up", "to", "the", "set", "limit", "can", "be", "modified", "in", "query", ")", "." ]
python
train
40.258065
crytic/slither
slither/detectors/shadowing/local.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/detectors/shadowing/local.py#L92-L131
def _detect(self): """ Detect shadowing local variables Recursively visit the calls Returns: list: {'vuln', 'filename,'contract','func', 'shadow'} """ results = [] for contract in self.contracts: shadows = self.detect_shadowing_definitions(contract) if shadows: for shadow in shadows: local_parent_name = shadow[1] local_variable = shadow[2] overshadowed = shadow[3] info = '{}.{}.{} (local variable @ {}) shadows:\n'.format(contract.name, local_parent_name, local_variable.name, local_variable.source_mapping_str) for overshadowed_entry in overshadowed: info += "\t- {}.{} ({} @ {})\n".format(overshadowed_entry[1], overshadowed_entry[2], overshadowed_entry[0], overshadowed_entry[2].source_mapping_str) # Generate relevant JSON data for this shadowing definition. json = self.generate_json_result(info) self.add_variable_to_json(local_variable, json) for overshadowed_entry in overshadowed: if overshadowed_entry[0] in [self.OVERSHADOWED_FUNCTION, self.OVERSHADOWED_MODIFIER, self.OVERSHADOWED_EVENT]: self.add_function_to_json(overshadowed_entry[2], json) elif overshadowed_entry[0] == self.OVERSHADOWED_STATE_VARIABLE: self.add_variable_to_json(overshadowed_entry[2], json) results.append(json) return results
[ "def", "_detect", "(", "self", ")", ":", "results", "=", "[", "]", "for", "contract", "in", "self", ".", "contracts", ":", "shadows", "=", "self", ".", "detect_shadowing_definitions", "(", "contract", ")", "if", "shadows", ":", "for", "shadow", "in", "shadows", ":", "local_parent_name", "=", "shadow", "[", "1", "]", "local_variable", "=", "shadow", "[", "2", "]", "overshadowed", "=", "shadow", "[", "3", "]", "info", "=", "'{}.{}.{} (local variable @ {}) shadows:\\n'", ".", "format", "(", "contract", ".", "name", ",", "local_parent_name", ",", "local_variable", ".", "name", ",", "local_variable", ".", "source_mapping_str", ")", "for", "overshadowed_entry", "in", "overshadowed", ":", "info", "+=", "\"\\t- {}.{} ({} @ {})\\n\"", ".", "format", "(", "overshadowed_entry", "[", "1", "]", ",", "overshadowed_entry", "[", "2", "]", ",", "overshadowed_entry", "[", "0", "]", ",", "overshadowed_entry", "[", "2", "]", ".", "source_mapping_str", ")", "# Generate relevant JSON data for this shadowing definition.", "json", "=", "self", ".", "generate_json_result", "(", "info", ")", "self", ".", "add_variable_to_json", "(", "local_variable", ",", "json", ")", "for", "overshadowed_entry", "in", "overshadowed", ":", "if", "overshadowed_entry", "[", "0", "]", "in", "[", "self", ".", "OVERSHADOWED_FUNCTION", ",", "self", ".", "OVERSHADOWED_MODIFIER", ",", "self", ".", "OVERSHADOWED_EVENT", "]", ":", "self", ".", "add_function_to_json", "(", "overshadowed_entry", "[", "2", "]", ",", "json", ")", "elif", "overshadowed_entry", "[", "0", "]", "==", "self", ".", "OVERSHADOWED_STATE_VARIABLE", ":", "self", ".", "add_variable_to_json", "(", "overshadowed_entry", "[", "2", "]", ",", "json", ")", "results", ".", "append", "(", "json", ")", "return", "results" ]
Detect shadowing local variables Recursively visit the calls Returns: list: {'vuln', 'filename,'contract','func', 'shadow'}
[ "Detect", "shadowing", "local", "variables" ]
python
train
52.3
leancloud/python-sdk
leancloud/query.py
https://github.com/leancloud/python-sdk/blob/fea3240257ce65e6a32c7312a5cee1f94a51a587/leancloud/query.py#L216-L230
def find(self): """ 根据查询条件,获取包含所有满足条件的对象。 :rtype: list """ content = self._do_request(self.dump()) objs = [] for result in content['results']: obj = self._new_object() obj._update_data(self._process_result(result)) objs.append(obj) return objs
[ "def", "find", "(", "self", ")", ":", "content", "=", "self", ".", "_do_request", "(", "self", ".", "dump", "(", ")", ")", "objs", "=", "[", "]", "for", "result", "in", "content", "[", "'results'", "]", ":", "obj", "=", "self", ".", "_new_object", "(", ")", "obj", ".", "_update_data", "(", "self", ".", "_process_result", "(", "result", ")", ")", "objs", ".", "append", "(", "obj", ")", "return", "objs" ]
根据查询条件,获取包含所有满足条件的对象。 :rtype: list
[ "根据查询条件,获取包含所有满足条件的对象。" ]
python
train
22.133333
FPGAwars/apio
apio/commands/init.py
https://github.com/FPGAwars/apio/blob/5c6310f11a061a760764c6b5847bfb431dc3d0bc/apio/commands/init.py#L27-L35
def cli(ctx, board, scons, project_dir, sayyes): """Manage apio projects.""" if scons: Project().create_sconstruct(project_dir, sayyes) elif board: Project().create_ini(board, project_dir, sayyes) else: click.secho(ctx.get_help())
[ "def", "cli", "(", "ctx", ",", "board", ",", "scons", ",", "project_dir", ",", "sayyes", ")", ":", "if", "scons", ":", "Project", "(", ")", ".", "create_sconstruct", "(", "project_dir", ",", "sayyes", ")", "elif", "board", ":", "Project", "(", ")", ".", "create_ini", "(", "board", ",", "project_dir", ",", "sayyes", ")", "else", ":", "click", ".", "secho", "(", "ctx", ".", "get_help", "(", ")", ")" ]
Manage apio projects.
[ "Manage", "apio", "projects", "." ]
python
train
29.222222
biolink/ontobio
ontobio/io/entityparser.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/entityparser.py#L62-L81
def load_gpi(self, gpi_path): """ Loads a GPI as a file from the `config.gpi_authority_path` """ if self.config.gpi_authority_path is not None: gpis = dict() parser = entityparser.GpiParser() with open(self.config.gpi_authority_path) as gpi_f: entities = parser.parse(file=gpi_f) for entity in entities: gpis[entity["id"]] = { "symbol": entity["label"], "name": entity["full_name"], "synonyms": entitywriter.stringify(entity["synonyms"]), "type": entity["type"] } return gpis # If there is no config file path, return None return None
[ "def", "load_gpi", "(", "self", ",", "gpi_path", ")", ":", "if", "self", ".", "config", ".", "gpi_authority_path", "is", "not", "None", ":", "gpis", "=", "dict", "(", ")", "parser", "=", "entityparser", ".", "GpiParser", "(", ")", "with", "open", "(", "self", ".", "config", ".", "gpi_authority_path", ")", "as", "gpi_f", ":", "entities", "=", "parser", ".", "parse", "(", "file", "=", "gpi_f", ")", "for", "entity", "in", "entities", ":", "gpis", "[", "entity", "[", "\"id\"", "]", "]", "=", "{", "\"symbol\"", ":", "entity", "[", "\"label\"", "]", ",", "\"name\"", ":", "entity", "[", "\"full_name\"", "]", ",", "\"synonyms\"", ":", "entitywriter", ".", "stringify", "(", "entity", "[", "\"synonyms\"", "]", ")", ",", "\"type\"", ":", "entity", "[", "\"type\"", "]", "}", "return", "gpis", "# If there is no config file path, return None", "return", "None" ]
Loads a GPI as a file from the `config.gpi_authority_path`
[ "Loads", "a", "GPI", "as", "a", "file", "from", "the", "config", ".", "gpi_authority_path" ]
python
train
35.4
twilio/twilio-python
twilio/rest/serverless/v1/service/environment/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/serverless/v1/service/environment/__init__.py#L194-L203
def get_instance(self, payload): """ Build an instance of EnvironmentInstance :param dict payload: Payload response from the API :returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance :rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance """ return EnvironmentInstance(self._version, payload, service_sid=self._solution['service_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "EnvironmentInstance", "(", "self", ".", "_version", ",", "payload", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", ")" ]
Build an instance of EnvironmentInstance :param dict payload: Payload response from the API :returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance :rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
[ "Build", "an", "instance", "of", "EnvironmentInstance" ]
python
train
42.7
squaresLab/BugZoo
bugzoo/mgr/tool.py
https://github.com/squaresLab/BugZoo/blob/68664f1977e85b37a78604f7c570382ffae1fa3b/bugzoo/mgr/tool.py#L93-L105
def download(self, tool: Tool, force=False) -> bool: """ Attempts to download the Docker image for a given tool from `DockerHub <https://hub.docker.com>`_. If the force parameter is set to True, any existing image will be overwritten. Returns: `True` if successfully downloaded, else `False`. """ return self.__installation.build.download(tool.image, force=force)
[ "def", "download", "(", "self", ",", "tool", ":", "Tool", ",", "force", "=", "False", ")", "->", "bool", ":", "return", "self", ".", "__installation", ".", "build", ".", "download", "(", "tool", ".", "image", ",", "force", "=", "force", ")" ]
Attempts to download the Docker image for a given tool from `DockerHub <https://hub.docker.com>`_. If the force parameter is set to True, any existing image will be overwritten. Returns: `True` if successfully downloaded, else `False`.
[ "Attempts", "to", "download", "the", "Docker", "image", "for", "a", "given", "tool", "from", "DockerHub", "<https", ":", "//", "hub", ".", "docker", ".", "com", ">", "_", ".", "If", "the", "force", "parameter", "is", "set", "to", "True", "any", "existing", "image", "will", "be", "overwritten", "." ]
python
train
38.769231
synw/dataswim
dataswim/data/select.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/select.py#L169-L176
def daterange_(self, datecol, date_start, op, **args): """ Returns a DataSwim instance with rows in a date range """ df = self._daterange(datecol, date_start, op, **args) if df is None: self.err("Can not select date range data") return self._duplicate_(df)
[ "def", "daterange_", "(", "self", ",", "datecol", ",", "date_start", ",", "op", ",", "*", "*", "args", ")", ":", "df", "=", "self", ".", "_daterange", "(", "datecol", ",", "date_start", ",", "op", ",", "*", "*", "args", ")", "if", "df", "is", "None", ":", "self", ".", "err", "(", "\"Can not select date range data\"", ")", "return", "self", ".", "_duplicate_", "(", "df", ")" ]
Returns a DataSwim instance with rows in a date range
[ "Returns", "a", "DataSwim", "instance", "with", "rows", "in", "a", "date", "range" ]
python
train
38.625
apache/incubator-mxnet
example/ctc/lstm.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ctc/lstm.py#L158-L174
def init_states(batch_size, num_lstm_layer, num_hidden): """ Returns name and shape of init states of LSTM network Parameters ---------- batch_size: list of tuple of str and tuple of int and int num_lstm_layer: int num_hidden: int Returns ------- list of tuple of str and tuple of int and int """ init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)] init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)] return init_c + init_h
[ "def", "init_states", "(", "batch_size", ",", "num_lstm_layer", ",", "num_hidden", ")", ":", "init_c", "=", "[", "(", "'l%d_init_c'", "%", "l", ",", "(", "batch_size", ",", "num_hidden", ")", ")", "for", "l", "in", "range", "(", "num_lstm_layer", ")", "]", "init_h", "=", "[", "(", "'l%d_init_h'", "%", "l", ",", "(", "batch_size", ",", "num_hidden", ")", ")", "for", "l", "in", "range", "(", "num_lstm_layer", ")", "]", "return", "init_c", "+", "init_h" ]
Returns name and shape of init states of LSTM network Parameters ---------- batch_size: list of tuple of str and tuple of int and int num_lstm_layer: int num_hidden: int Returns ------- list of tuple of str and tuple of int and int
[ "Returns", "name", "and", "shape", "of", "init", "states", "of", "LSTM", "network" ]
python
train
31.470588
piglei/uwsgi-sloth
uwsgi_sloth/commands/start.py
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/commands/start.py#L37-L50
def update_html_symlink(html_dir): """"Maintail symlink: "today.html", "yesterday.html" """ today = datetime.date.today() yesterday = datetime.date.today() - datetime.timedelta(days=1) for from_date, alias_name in ( (today, 'today.html'), (yesterday, 'yesterday.html')): from_date_file_path = os.path.join(html_dir, 'day_%s.html' % from_date) symlink_path = os.path.join(html_dir, alias_name) try: os.unlink(symlink_path) except OSError: pass os.symlink(from_date_file_path, symlink_path)
[ "def", "update_html_symlink", "(", "html_dir", ")", ":", "today", "=", "datetime", ".", "date", ".", "today", "(", ")", "yesterday", "=", "datetime", ".", "date", ".", "today", "(", ")", "-", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "for", "from_date", ",", "alias_name", "in", "(", "(", "today", ",", "'today.html'", ")", ",", "(", "yesterday", ",", "'yesterday.html'", ")", ")", ":", "from_date_file_path", "=", "os", ".", "path", ".", "join", "(", "html_dir", ",", "'day_%s.html'", "%", "from_date", ")", "symlink_path", "=", "os", ".", "path", ".", "join", "(", "html_dir", ",", "alias_name", ")", "try", ":", "os", ".", "unlink", "(", "symlink_path", ")", "except", "OSError", ":", "pass", "os", ".", "symlink", "(", "from_date_file_path", ",", "symlink_path", ")" ]
Maintail symlink: "today.html", "yesterday.html"
[ "Maintail", "symlink", ":", "today", ".", "html", "yesterday", ".", "html" ]
python
train
63.285714
python/core-workflow
cherry_picker/cherry_picker/cherry_picker.py
https://github.com/python/core-workflow/blob/b93c76195f6db382cfcefee334380fb4c68d4e21/cherry_picker/cherry_picker/cherry_picker.py#L203-L214
def get_commit_message(self, commit_sha): """ Return the commit message for the current commit hash, replace #<PRID> with GH-<PRID> """ cmd = ["git", "show", "-s", "--format=%B", commit_sha] output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) message = output.strip().decode("utf-8") if self.config["fix_commit_msg"]: return message.replace("#", "GH-") else: return message
[ "def", "get_commit_message", "(", "self", ",", "commit_sha", ")", ":", "cmd", "=", "[", "\"git\"", ",", "\"show\"", ",", "\"-s\"", ",", "\"--format=%B\"", ",", "commit_sha", "]", "output", "=", "subprocess", ".", "check_output", "(", "cmd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "message", "=", "output", ".", "strip", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", "if", "self", ".", "config", "[", "\"fix_commit_msg\"", "]", ":", "return", "message", ".", "replace", "(", "\"#\"", ",", "\"GH-\"", ")", "else", ":", "return", "message" ]
Return the commit message for the current commit hash, replace #<PRID> with GH-<PRID>
[ "Return", "the", "commit", "message", "for", "the", "current", "commit", "hash", "replace", "#<PRID", ">", "with", "GH", "-", "<PRID", ">" ]
python
train
39.166667
timkpaine/pyEX
pyEX/marketdata/http.py
https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/marketdata/http.py#L329-L350
def securityEventDF(symbol=None, token='', version=''): '''The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs https://iexcloud.io/docs/api/#deep-security-event Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result ''' x = securityEvent(symbol, token, version) data = [] for key in x: d = x[key] d['symbol'] = key data.append(d) df = pd.DataFrame(data) _toDatetime(df) return df
[ "def", "securityEventDF", "(", "symbol", "=", "None", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "x", "=", "securityEvent", "(", "symbol", ",", "token", ",", "version", ")", "data", "=", "[", "]", "for", "key", "in", "x", ":", "d", "=", "x", "[", "key", "]", "d", "[", "'symbol'", "]", "=", "key", "data", ".", "append", "(", "d", ")", "df", "=", "pd", ".", "DataFrame", "(", "data", ")", "_toDatetime", "(", "df", ")", "return", "df" ]
The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs https://iexcloud.io/docs/api/#deep-security-event Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
[ "The", "Security", "event", "message", "is", "used", "to", "indicate", "events", "that", "apply", "to", "a", "security", ".", "A", "Security", "event", "message", "will", "be", "sent", "whenever", "such", "event", "occurs" ]
python
valid
28.454545
monarch-initiative/dipper
dipper/models/Model.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Model.py#L152-L167
def addDeprecatedIndividual(self, old_id, new_ids=None): """ Will mark the oldid as a deprecated individual. if one newid is supplied, it will mark it as replaced by. if >1 newid is supplied, it will mark it with consider properties :param g: :param oldid: the individual id to deprecate :param newids: the individual idlist that is the replacement(s) of the old individual. Not required. :return: """ self.graph.addTriple( old_id, self.globaltt['type'], self.globaltt['named_individual']) self._addReplacementIds(old_id, new_ids)
[ "def", "addDeprecatedIndividual", "(", "self", ",", "old_id", ",", "new_ids", "=", "None", ")", ":", "self", ".", "graph", ".", "addTriple", "(", "old_id", ",", "self", ".", "globaltt", "[", "'type'", "]", ",", "self", ".", "globaltt", "[", "'named_individual'", "]", ")", "self", ".", "_addReplacementIds", "(", "old_id", ",", "new_ids", ")" ]
Will mark the oldid as a deprecated individual. if one newid is supplied, it will mark it as replaced by. if >1 newid is supplied, it will mark it with consider properties :param g: :param oldid: the individual id to deprecate :param newids: the individual idlist that is the replacement(s) of the old individual. Not required. :return:
[ "Will", "mark", "the", "oldid", "as", "a", "deprecated", "individual", ".", "if", "one", "newid", "is", "supplied", "it", "will", "mark", "it", "as", "replaced", "by", ".", "if", ">", "1", "newid", "is", "supplied", "it", "will", "mark", "it", "with", "consider", "properties", ":", "param", "g", ":", ":", "param", "oldid", ":", "the", "individual", "id", "to", "deprecate", ":", "param", "newids", ":", "the", "individual", "idlist", "that", "is", "the", "replacement", "(", "s", ")", "of", "the", "old", "individual", ".", "Not", "required", ".", ":", "return", ":" ]
python
train
40.0625
horazont/aioxmpp
aioxmpp/tracking.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/tracking.py#L453-L497
def attach_tracker(self, stanza, tracker=None, token=None): """ Configure tracking for a stanza without sending it. :param stanza: Message stanza to send. :type stanza: :class:`aioxmpp.Message` :param tracker: Message tracker to use. :type tracker: :class:`~.MessageTracker` or :data:`None` :param token: Optional stanza token for more fine-grained tracking. :type token: :class:`~.StanzaToken` :rtype: :class:`~.MessageTracker` :return: The message tracker. If `tracker` is :data:`None`, a new :class:`~.MessageTracker` is created. If `token` is not :data:`None`, updates to the stanza `token` are reflected in the `tracker`. If an error reply is received, the tracker will enter :class:`~.MessageState.ERROR` and the error will be set as :attr:`~.MessageTracker.response`. You should use :meth:`send_tracked` if possible. This method however is very useful if you need to track carbon copies of sent messages, as a stanza token is not available here and re-sending the message to obtain one is generally not desirable ☺. """ if tracker is None: tracker = MessageTracker() stanza.autoset_id() key = stanza.to.bare(), stanza.id_ self._trackers[key] = tracker tracker.on_closed.connect( functools.partial(self._tracker_closed, key) ) if token is not None: token.future.add_done_callback( functools.partial( self._stanza_sent, tracker, token, ) ) return tracker
[ "def", "attach_tracker", "(", "self", ",", "stanza", ",", "tracker", "=", "None", ",", "token", "=", "None", ")", ":", "if", "tracker", "is", "None", ":", "tracker", "=", "MessageTracker", "(", ")", "stanza", ".", "autoset_id", "(", ")", "key", "=", "stanza", ".", "to", ".", "bare", "(", ")", ",", "stanza", ".", "id_", "self", ".", "_trackers", "[", "key", "]", "=", "tracker", "tracker", ".", "on_closed", ".", "connect", "(", "functools", ".", "partial", "(", "self", ".", "_tracker_closed", ",", "key", ")", ")", "if", "token", "is", "not", "None", ":", "token", ".", "future", ".", "add_done_callback", "(", "functools", ".", "partial", "(", "self", ".", "_stanza_sent", ",", "tracker", ",", "token", ",", ")", ")", "return", "tracker" ]
Configure tracking for a stanza without sending it. :param stanza: Message stanza to send. :type stanza: :class:`aioxmpp.Message` :param tracker: Message tracker to use. :type tracker: :class:`~.MessageTracker` or :data:`None` :param token: Optional stanza token for more fine-grained tracking. :type token: :class:`~.StanzaToken` :rtype: :class:`~.MessageTracker` :return: The message tracker. If `tracker` is :data:`None`, a new :class:`~.MessageTracker` is created. If `token` is not :data:`None`, updates to the stanza `token` are reflected in the `tracker`. If an error reply is received, the tracker will enter :class:`~.MessageState.ERROR` and the error will be set as :attr:`~.MessageTracker.response`. You should use :meth:`send_tracked` if possible. This method however is very useful if you need to track carbon copies of sent messages, as a stanza token is not available here and re-sending the message to obtain one is generally not desirable ☺.
[ "Configure", "tracking", "for", "a", "stanza", "without", "sending", "it", "." ]
python
train
37.777778
globocom/GloboNetworkAPI-client-python
networkapiclient/Roteiro.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Roteiro.py#L87-L117
def alterar(self, id_script, id_script_type, script, description, model=None): """Change Script from by the identifier. :param id_script: Identifier of the Script. Integer value and greater than zero. :param id_script_type: Identifier of the Script Type. Integer value and greater than zero. :param script: Script name. String with a minimum 3 and maximum of 40 characters :param description: Script description. String with a minimum 3 and maximum of 100 characters :return: None :raise InvalidParameterError: The identifier of Script, script Type, script or description is null and invalid. :raise RoteiroNaoExisteError: Script not registered. :raise TipoRoteiroNaoExisteError: Script Type not registered. :raise NomeRoteiroDuplicadoError: Script already registered with informed. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_script): raise InvalidParameterError(u'The identifier of Script is invalid or was not informed.') script_map = dict() script_map['id_script_type'] = id_script_type script_map['script'] = script script_map['model'] = model script_map['description'] = description url = 'script/edit/' + str(id_script) + '/' code, xml = self.submit({'script': script_map}, 'PUT', url) return self.response(code, xml)
[ "def", "alterar", "(", "self", ",", "id_script", ",", "id_script_type", ",", "script", ",", "description", ",", "model", "=", "None", ")", ":", "if", "not", "is_valid_int_param", "(", "id_script", ")", ":", "raise", "InvalidParameterError", "(", "u'The identifier of Script is invalid or was not informed.'", ")", "script_map", "=", "dict", "(", ")", "script_map", "[", "'id_script_type'", "]", "=", "id_script_type", "script_map", "[", "'script'", "]", "=", "script", "script_map", "[", "'model'", "]", "=", "model", "script_map", "[", "'description'", "]", "=", "description", "url", "=", "'script/edit/'", "+", "str", "(", "id_script", ")", "+", "'/'", "code", ",", "xml", "=", "self", ".", "submit", "(", "{", "'script'", ":", "script_map", "}", ",", "'PUT'", ",", "url", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
Change Script from by the identifier. :param id_script: Identifier of the Script. Integer value and greater than zero. :param id_script_type: Identifier of the Script Type. Integer value and greater than zero. :param script: Script name. String with a minimum 3 and maximum of 40 characters :param description: Script description. String with a minimum 3 and maximum of 100 characters :return: None :raise InvalidParameterError: The identifier of Script, script Type, script or description is null and invalid. :raise RoteiroNaoExisteError: Script not registered. :raise TipoRoteiroNaoExisteError: Script Type not registered. :raise NomeRoteiroDuplicadoError: Script already registered with informed. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
[ "Change", "Script", "from", "by", "the", "identifier", "." ]
python
train
48.612903
adafruit/Adafruit_Python_BluefruitLE
Adafruit_BluefruitLE/corebluetooth/gatt.py
https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/34fc6f596371b961628369d78ce836950514062f/Adafruit_BluefruitLE/corebluetooth/gatt.py#L101-L111
def start_notify(self, on_change): """Enable notification of changes for this characteristic on the specified on_change callback. on_change should be a function that takes one parameter which is the value (as a string of bytes) of the changed characteristic value. """ # Tell the device what callback to use for changes to this characteristic. self._device._notify_characteristic(self._characteristic, on_change) # Turn on notifications of characteristic changes. self._device._peripheral.setNotifyValue_forCharacteristic_(True, self._characteristic)
[ "def", "start_notify", "(", "self", ",", "on_change", ")", ":", "# Tell the device what callback to use for changes to this characteristic.", "self", ".", "_device", ".", "_notify_characteristic", "(", "self", ".", "_characteristic", ",", "on_change", ")", "# Turn on notifications of characteristic changes.", "self", ".", "_device", ".", "_peripheral", ".", "setNotifyValue_forCharacteristic_", "(", "True", ",", "self", ".", "_characteristic", ")" ]
Enable notification of changes for this characteristic on the specified on_change callback. on_change should be a function that takes one parameter which is the value (as a string of bytes) of the changed characteristic value.
[ "Enable", "notification", "of", "changes", "for", "this", "characteristic", "on", "the", "specified", "on_change", "callback", ".", "on_change", "should", "be", "a", "function", "that", "takes", "one", "parameter", "which", "is", "the", "value", "(", "as", "a", "string", "of", "bytes", ")", "of", "the", "changed", "characteristic", "value", "." ]
python
valid
56.818182
twilio/twilio-python
twilio/rest/preview/wireless/sim/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/wireless/sim/__init__.py#L38-L73
def stream(self, status=values.unset, iccid=values.unset, rate_plan=values.unset, e_id=values.unset, sim_registration_code=values.unset, limit=None, page_size=None): """ Streams SimInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode status: The status :param unicode iccid: The iccid :param unicode rate_plan: The rate_plan :param unicode e_id: The e_id :param unicode sim_registration_code: The sim_registration_code :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.wireless.sim.SimInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page( status=status, iccid=iccid, rate_plan=rate_plan, e_id=e_id, sim_registration_code=sim_registration_code, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
[ "def", "stream", "(", "self", ",", "status", "=", "values", ".", "unset", ",", "iccid", "=", "values", ".", "unset", ",", "rate_plan", "=", "values", ".", "unset", ",", "e_id", "=", "values", ".", "unset", ",", "sim_registration_code", "=", "values", ".", "unset", ",", "limit", "=", "None", ",", "page_size", "=", "None", ")", ":", "limits", "=", "self", ".", "_version", ".", "read_limits", "(", "limit", ",", "page_size", ")", "page", "=", "self", ".", "page", "(", "status", "=", "status", ",", "iccid", "=", "iccid", ",", "rate_plan", "=", "rate_plan", ",", "e_id", "=", "e_id", ",", "sim_registration_code", "=", "sim_registration_code", ",", "page_size", "=", "limits", "[", "'page_size'", "]", ",", ")", "return", "self", ".", "_version", ".", "stream", "(", "page", ",", "limits", "[", "'limit'", "]", ",", "limits", "[", "'page_limit'", "]", ")" ]
Streams SimInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode status: The status :param unicode iccid: The iccid :param unicode rate_plan: The rate_plan :param unicode e_id: The e_id :param unicode sim_registration_code: The sim_registration_code :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.wireless.sim.SimInstance]
[ "Streams", "SimInstance", "records", "from", "the", "API", "as", "a", "generator", "stream", ".", "This", "operation", "lazily", "loads", "records", "as", "efficiently", "as", "possible", "until", "the", "limit", "is", "reached", ".", "The", "results", "are", "returned", "as", "a", "generator", "so", "this", "operation", "is", "memory", "efficient", "." ]
python
train
48.638889
opencobra/cobrapy
cobra/medium/minimal_medium.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/medium/minimal_medium.py#L17-L38
def add_linear_obj(model): """Add a linear version of a minimal medium to the model solver. Changes the optimization objective to finding the growth medium requiring the smallest total import flux:: minimize sum |r_i| for r_i in import_reactions Arguments --------- model : cobra.Model The model to modify. """ coefs = {} for rxn in find_boundary_types(model, "exchange"): export = len(rxn.reactants) == 1 if export: coefs[rxn.reverse_variable] = 1 else: coefs[rxn.forward_variable] = 1 model.objective.set_linear_coefficients(coefs) model.objective.direction = "min"
[ "def", "add_linear_obj", "(", "model", ")", ":", "coefs", "=", "{", "}", "for", "rxn", "in", "find_boundary_types", "(", "model", ",", "\"exchange\"", ")", ":", "export", "=", "len", "(", "rxn", ".", "reactants", ")", "==", "1", "if", "export", ":", "coefs", "[", "rxn", ".", "reverse_variable", "]", "=", "1", "else", ":", "coefs", "[", "rxn", ".", "forward_variable", "]", "=", "1", "model", ".", "objective", ".", "set_linear_coefficients", "(", "coefs", ")", "model", ".", "objective", ".", "direction", "=", "\"min\"" ]
Add a linear version of a minimal medium to the model solver. Changes the optimization objective to finding the growth medium requiring the smallest total import flux:: minimize sum |r_i| for r_i in import_reactions Arguments --------- model : cobra.Model The model to modify.
[ "Add", "a", "linear", "version", "of", "a", "minimal", "medium", "to", "the", "model", "solver", "." ]
python
valid
29.863636
marrow/mongo
marrow/mongo/query/query.py
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/query/query.py#L201-L213
def S(self): """Allow for the projection (and update) of nested values contained within the single match of an array. Projection operator: https://docs.mongodb.com/manual/reference/operator/projection/positional/#proj._S_ Array update operator: https://docs.mongodb.com/manual/reference/operator/update/positional/ """ if self._combining: raise TypeError("Unable to dereference after combining fields.") instance = self.__class__(self._document, self._field) instance._name = self._name + '.' + '$' # pylint:disable=protected-access return instance
[ "def", "S", "(", "self", ")", ":", "if", "self", ".", "_combining", ":", "raise", "TypeError", "(", "\"Unable to dereference after combining fields.\"", ")", "instance", "=", "self", ".", "__class__", "(", "self", ".", "_document", ",", "self", ".", "_field", ")", "instance", ".", "_name", "=", "self", ".", "_name", "+", "'.'", "+", "'$'", "# pylint:disable=protected-access", "return", "instance" ]
Allow for the projection (and update) of nested values contained within the single match of an array. Projection operator: https://docs.mongodb.com/manual/reference/operator/projection/positional/#proj._S_ Array update operator: https://docs.mongodb.com/manual/reference/operator/update/positional/
[ "Allow", "for", "the", "projection", "(", "and", "update", ")", "of", "nested", "values", "contained", "within", "the", "single", "match", "of", "an", "array", ".", "Projection", "operator", ":", "https", ":", "//", "docs", ".", "mongodb", ".", "com", "/", "manual", "/", "reference", "/", "operator", "/", "projection", "/", "positional", "/", "#proj", ".", "_S_", "Array", "update", "operator", ":", "https", ":", "//", "docs", ".", "mongodb", ".", "com", "/", "manual", "/", "reference", "/", "operator", "/", "update", "/", "positional", "/" ]
python
train
43.461538
Dallinger/Dallinger
dallinger/data.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/data.py#L139-L145
def register(id, url=None): """Register a UUID key in the global S3 bucket.""" bucket = registration_s3_bucket() key = registration_key(id) obj = bucket.Object(key) obj.put(Body=url or "missing") return _generate_s3_url(bucket, key)
[ "def", "register", "(", "id", ",", "url", "=", "None", ")", ":", "bucket", "=", "registration_s3_bucket", "(", ")", "key", "=", "registration_key", "(", "id", ")", "obj", "=", "bucket", ".", "Object", "(", "key", ")", "obj", ".", "put", "(", "Body", "=", "url", "or", "\"missing\"", ")", "return", "_generate_s3_url", "(", "bucket", ",", "key", ")" ]
Register a UUID key in the global S3 bucket.
[ "Register", "a", "UUID", "key", "in", "the", "global", "S3", "bucket", "." ]
python
train
35.714286
twisted/mantissa
xmantissa/publicweb.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/publicweb.py#L558-L570
def render_head(self, ctx, data): """ This renderer calculates content for the <head> tag by concatenating the values from L{getHeadContent} and the overridden L{head} method. """ req = inevow.IRequest(ctx) more = getattr(self.fragment, 'head', None) if more is not None: fragmentHead = more() else: fragmentHead = None return ctx.tag[filter(None, list(self.getHeadContent(req)) + [fragmentHead])]
[ "def", "render_head", "(", "self", ",", "ctx", ",", "data", ")", ":", "req", "=", "inevow", ".", "IRequest", "(", "ctx", ")", "more", "=", "getattr", "(", "self", ".", "fragment", ",", "'head'", ",", "None", ")", "if", "more", "is", "not", "None", ":", "fragmentHead", "=", "more", "(", ")", "else", ":", "fragmentHead", "=", "None", "return", "ctx", ".", "tag", "[", "filter", "(", "None", ",", "list", "(", "self", ".", "getHeadContent", "(", "req", ")", ")", "+", "[", "fragmentHead", "]", ")", "]" ]
This renderer calculates content for the <head> tag by concatenating the values from L{getHeadContent} and the overridden L{head} method.
[ "This", "renderer", "calculates", "content", "for", "the", "<head", ">", "tag", "by", "concatenating", "the", "values", "from", "L", "{", "getHeadContent", "}", "and", "the", "overridden", "L", "{", "head", "}", "method", "." ]
python
train
39.307692
Parquery/icontract
icontract/_represent.py
https://github.com/Parquery/icontract/blob/846e3187869a9ba790e9b893c98e5055e1cce274/icontract/_represent.py#L354-L405
def generate_message(contract: Contract, condition_kwargs: Mapping[str, Any]) -> str: """Generate the message upon contract violation.""" # pylint: disable=protected-access parts = [] # type: List[str] if contract.location is not None: parts.append("{}:\n".format(contract.location)) if contract.description is not None: parts.append("{}: ".format(contract.description)) lambda_inspection = None # type: Optional[ConditionLambdaInspection] if not _is_lambda(a_function=contract.condition): condition_text = contract.condition.__name__ else: # We need to extract the source code corresponding to the decorator since inspect.getsource() is broken with # lambdas. # Find the line corresponding to the condition lambda lines, condition_lineno = inspect.findsource(contract.condition) filename = inspect.getsourcefile(contract.condition) decorator_inspection = inspect_decorator(lines=lines, lineno=condition_lineno, filename=filename) lambda_inspection = find_lambda_condition(decorator_inspection=decorator_inspection) assert lambda_inspection is not None, \ "Expected lambda_inspection to be non-None if _is_lambda is True on: {}".format(contract.condition) condition_text = lambda_inspection.text parts.append(condition_text) repr_vals = repr_values( condition=contract.condition, lambda_inspection=lambda_inspection, condition_kwargs=condition_kwargs, a_repr=contract._a_repr) if len(repr_vals) == 0: # Do not append anything since no value could be represented as a string. # This could appear in case we have, for example, a generator expression as the return value of a lambda. pass elif len(repr_vals) == 1: parts.append(': ') parts.append(repr_vals[0]) else: parts.append(':\n') parts.append('\n'.join(repr_vals)) msg = "".join(parts) return msg
[ "def", "generate_message", "(", "contract", ":", "Contract", ",", "condition_kwargs", ":", "Mapping", "[", "str", ",", "Any", "]", ")", "->", "str", ":", "# pylint: disable=protected-access", "parts", "=", "[", "]", "# type: List[str]", "if", "contract", ".", "location", "is", "not", "None", ":", "parts", ".", "append", "(", "\"{}:\\n\"", ".", "format", "(", "contract", ".", "location", ")", ")", "if", "contract", ".", "description", "is", "not", "None", ":", "parts", ".", "append", "(", "\"{}: \"", ".", "format", "(", "contract", ".", "description", ")", ")", "lambda_inspection", "=", "None", "# type: Optional[ConditionLambdaInspection]", "if", "not", "_is_lambda", "(", "a_function", "=", "contract", ".", "condition", ")", ":", "condition_text", "=", "contract", ".", "condition", ".", "__name__", "else", ":", "# We need to extract the source code corresponding to the decorator since inspect.getsource() is broken with", "# lambdas.", "# Find the line corresponding to the condition lambda", "lines", ",", "condition_lineno", "=", "inspect", ".", "findsource", "(", "contract", ".", "condition", ")", "filename", "=", "inspect", ".", "getsourcefile", "(", "contract", ".", "condition", ")", "decorator_inspection", "=", "inspect_decorator", "(", "lines", "=", "lines", ",", "lineno", "=", "condition_lineno", ",", "filename", "=", "filename", ")", "lambda_inspection", "=", "find_lambda_condition", "(", "decorator_inspection", "=", "decorator_inspection", ")", "assert", "lambda_inspection", "is", "not", "None", ",", "\"Expected lambda_inspection to be non-None if _is_lambda is True on: {}\"", ".", "format", "(", "contract", ".", "condition", ")", "condition_text", "=", "lambda_inspection", ".", "text", "parts", ".", "append", "(", "condition_text", ")", "repr_vals", "=", "repr_values", "(", "condition", "=", "contract", ".", "condition", ",", "lambda_inspection", "=", "lambda_inspection", ",", "condition_kwargs", "=", "condition_kwargs", ",", "a_repr", "=", "contract", ".", "_a_repr", ")", "if", "len", "(", "repr_vals", ")", "==", "0", ":", "# Do not append anything since no value could be represented as a string.", "# This could appear in case we have, for example, a generator expression as the return value of a lambda.", "pass", "elif", "len", "(", "repr_vals", ")", "==", "1", ":", "parts", ".", "append", "(", "': '", ")", "parts", ".", "append", "(", "repr_vals", "[", "0", "]", ")", "else", ":", "parts", ".", "append", "(", "':\\n'", ")", "parts", ".", "append", "(", "'\\n'", ".", "join", "(", "repr_vals", ")", ")", "msg", "=", "\"\"", ".", "join", "(", "parts", ")", "return", "msg" ]
Generate the message upon contract violation.
[ "Generate", "the", "message", "upon", "contract", "violation", "." ]
python
train
37.865385
open-mmlab/mmcv
mmcv/utils/misc.py
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/utils/misc.py#L21-L43
def iter_cast(inputs, dst_type, return_type=None): """Cast elements of an iterable object into some type. Args: inputs (Iterable): The input object. dst_type (type): Destination type. return_type (type, optional): If specified, the output object will be converted to this type, otherwise an iterator. Returns: iterator or specified type: The converted object. """ if not isinstance(inputs, collections_abc.Iterable): raise TypeError('inputs must be an iterable object') if not isinstance(dst_type, type): raise TypeError('"dst_type" must be a valid type') out_iterable = six.moves.map(dst_type, inputs) if return_type is None: return out_iterable else: return return_type(out_iterable)
[ "def", "iter_cast", "(", "inputs", ",", "dst_type", ",", "return_type", "=", "None", ")", ":", "if", "not", "isinstance", "(", "inputs", ",", "collections_abc", ".", "Iterable", ")", ":", "raise", "TypeError", "(", "'inputs must be an iterable object'", ")", "if", "not", "isinstance", "(", "dst_type", ",", "type", ")", ":", "raise", "TypeError", "(", "'\"dst_type\" must be a valid type'", ")", "out_iterable", "=", "six", ".", "moves", ".", "map", "(", "dst_type", ",", "inputs", ")", "if", "return_type", "is", "None", ":", "return", "out_iterable", "else", ":", "return", "return_type", "(", "out_iterable", ")" ]
Cast elements of an iterable object into some type. Args: inputs (Iterable): The input object. dst_type (type): Destination type. return_type (type, optional): If specified, the output object will be converted to this type, otherwise an iterator. Returns: iterator or specified type: The converted object.
[ "Cast", "elements", "of", "an", "iterable", "object", "into", "some", "type", "." ]
python
test
33.869565
ejeschke/ginga
ginga/rv/plugins/SaveImage.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/SaveImage.py#L95-L174
def build_gui(self, container): """Build GUI such that image list area is maximized.""" vbox, sw, orientation = Widgets.get_oriented_box(container) captions = (('Channel:', 'label', 'Channel Name', 'combobox', 'Modified only', 'checkbutton'), ) w, b = Widgets.build_info(captions, orientation=orientation) self.w.update(b) b.channel_name.set_tooltip('Channel for locating images to save') b.channel_name.add_callback('activated', self.select_channel_cb) mod_only = self.settings.get('modified_only', True) b.modified_only.set_state(mod_only) b.modified_only.add_callback('activated', lambda *args: self.redo()) b.modified_only.set_tooltip("Show only locally modified images") container.add_widget(w, stretch=0) captions = (('Path:', 'llabel', 'OutDir', 'entry', 'Browse', 'button'), ('Suffix:', 'llabel', 'Suffix', 'entry')) w, b = Widgets.build_info(captions, orientation=orientation) self.w.update(b) b.outdir.set_text(self.outdir) b.outdir.set_tooltip('Output directory') b.outdir.add_callback('activated', lambda w: self.set_outdir()) b.browse.set_tooltip('Browse for output directory') b.browse.add_callback('activated', lambda w: self.browse_outdir()) b.suffix.set_text(self.suffix) b.suffix.set_tooltip('Suffix to append to filename') b.suffix.add_callback('activated', lambda w: self.set_suffix()) container.add_widget(w, stretch=0) self.treeview = Widgets.TreeView(auto_expand=True, sortable=True, selection='multiple', use_alt_row_color=True) self.treeview.setup_table(self.columns, 1, 'IMAGE') self.treeview.add_callback('selected', self.toggle_save_cb) container.add_widget(self.treeview, stretch=1) captions = (('Status', 'llabel'), ) w, b = Widgets.build_info(captions, orientation=orientation) self.w.update(b) b.status.set_text('') b.status.set_tooltip('Status message') container.add_widget(w, stretch=0) btns = Widgets.HBox() btns.set_border_width(4) btns.set_spacing(3) btn = Widgets.Button('Save') btn.set_tooltip('Save selected image(s)') btn.add_callback('activated', lambda w: self.save_images()) btn.set_enabled(False) btns.add_widget(btn, stretch=0) self.w.save = btn btn = Widgets.Button('Close') btn.add_callback('activated', lambda w: self.close()) btns.add_widget(btn, stretch=0) btn = Widgets.Button("Help") btn.add_callback('activated', lambda w: self.help()) btns.add_widget(btn, stretch=0) btns.add_widget(Widgets.Label(''), stretch=1) container.add_widget(btns, stretch=0) self.gui_up = True # Initialize directory selection dialog self.dirsel = DirectorySelection(self.fv.w.root.get_widget()) # Generate initial listing self.update_channels()
[ "def", "build_gui", "(", "self", ",", "container", ")", ":", "vbox", ",", "sw", ",", "orientation", "=", "Widgets", ".", "get_oriented_box", "(", "container", ")", "captions", "=", "(", "(", "'Channel:'", ",", "'label'", ",", "'Channel Name'", ",", "'combobox'", ",", "'Modified only'", ",", "'checkbutton'", ")", ",", ")", "w", ",", "b", "=", "Widgets", ".", "build_info", "(", "captions", ",", "orientation", "=", "orientation", ")", "self", ".", "w", ".", "update", "(", "b", ")", "b", ".", "channel_name", ".", "set_tooltip", "(", "'Channel for locating images to save'", ")", "b", ".", "channel_name", ".", "add_callback", "(", "'activated'", ",", "self", ".", "select_channel_cb", ")", "mod_only", "=", "self", ".", "settings", ".", "get", "(", "'modified_only'", ",", "True", ")", "b", ".", "modified_only", ".", "set_state", "(", "mod_only", ")", "b", ".", "modified_only", ".", "add_callback", "(", "'activated'", ",", "lambda", "*", "args", ":", "self", ".", "redo", "(", ")", ")", "b", ".", "modified_only", ".", "set_tooltip", "(", "\"Show only locally modified images\"", ")", "container", ".", "add_widget", "(", "w", ",", "stretch", "=", "0", ")", "captions", "=", "(", "(", "'Path:'", ",", "'llabel'", ",", "'OutDir'", ",", "'entry'", ",", "'Browse'", ",", "'button'", ")", ",", "(", "'Suffix:'", ",", "'llabel'", ",", "'Suffix'", ",", "'entry'", ")", ")", "w", ",", "b", "=", "Widgets", ".", "build_info", "(", "captions", ",", "orientation", "=", "orientation", ")", "self", ".", "w", ".", "update", "(", "b", ")", "b", ".", "outdir", ".", "set_text", "(", "self", ".", "outdir", ")", "b", ".", "outdir", ".", "set_tooltip", "(", "'Output directory'", ")", "b", ".", "outdir", ".", "add_callback", "(", "'activated'", ",", "lambda", "w", ":", "self", ".", "set_outdir", "(", ")", ")", "b", ".", "browse", ".", "set_tooltip", "(", "'Browse for output directory'", ")", "b", ".", "browse", ".", "add_callback", "(", "'activated'", ",", "lambda", "w", ":", "self", ".", "browse_outdir", "(", ")", ")", "b", ".", "suffix", ".", "set_text", "(", "self", ".", "suffix", ")", "b", ".", "suffix", ".", "set_tooltip", "(", "'Suffix to append to filename'", ")", "b", ".", "suffix", ".", "add_callback", "(", "'activated'", ",", "lambda", "w", ":", "self", ".", "set_suffix", "(", ")", ")", "container", ".", "add_widget", "(", "w", ",", "stretch", "=", "0", ")", "self", ".", "treeview", "=", "Widgets", ".", "TreeView", "(", "auto_expand", "=", "True", ",", "sortable", "=", "True", ",", "selection", "=", "'multiple'", ",", "use_alt_row_color", "=", "True", ")", "self", ".", "treeview", ".", "setup_table", "(", "self", ".", "columns", ",", "1", ",", "'IMAGE'", ")", "self", ".", "treeview", ".", "add_callback", "(", "'selected'", ",", "self", ".", "toggle_save_cb", ")", "container", ".", "add_widget", "(", "self", ".", "treeview", ",", "stretch", "=", "1", ")", "captions", "=", "(", "(", "'Status'", ",", "'llabel'", ")", ",", ")", "w", ",", "b", "=", "Widgets", ".", "build_info", "(", "captions", ",", "orientation", "=", "orientation", ")", "self", ".", "w", ".", "update", "(", "b", ")", "b", ".", "status", ".", "set_text", "(", "''", ")", "b", ".", "status", ".", "set_tooltip", "(", "'Status message'", ")", "container", ".", "add_widget", "(", "w", ",", "stretch", "=", "0", ")", "btns", "=", "Widgets", ".", "HBox", "(", ")", "btns", ".", "set_border_width", "(", "4", ")", "btns", ".", "set_spacing", "(", "3", ")", "btn", "=", "Widgets", ".", "Button", "(", "'Save'", ")", "btn", ".", "set_tooltip", "(", "'Save selected image(s)'", ")", "btn", ".", "add_callback", "(", "'activated'", ",", "lambda", "w", ":", "self", ".", "save_images", "(", ")", ")", "btn", ".", "set_enabled", "(", "False", ")", "btns", ".", "add_widget", "(", "btn", ",", "stretch", "=", "0", ")", "self", ".", "w", ".", "save", "=", "btn", "btn", "=", "Widgets", ".", "Button", "(", "'Close'", ")", "btn", ".", "add_callback", "(", "'activated'", ",", "lambda", "w", ":", "self", ".", "close", "(", ")", ")", "btns", ".", "add_widget", "(", "btn", ",", "stretch", "=", "0", ")", "btn", "=", "Widgets", ".", "Button", "(", "\"Help\"", ")", "btn", ".", "add_callback", "(", "'activated'", ",", "lambda", "w", ":", "self", ".", "help", "(", ")", ")", "btns", ".", "add_widget", "(", "btn", ",", "stretch", "=", "0", ")", "btns", ".", "add_widget", "(", "Widgets", ".", "Label", "(", "''", ")", ",", "stretch", "=", "1", ")", "container", ".", "add_widget", "(", "btns", ",", "stretch", "=", "0", ")", "self", ".", "gui_up", "=", "True", "# Initialize directory selection dialog", "self", ".", "dirsel", "=", "DirectorySelection", "(", "self", ".", "fv", ".", "w", ".", "root", ".", "get_widget", "(", ")", ")", "# Generate initial listing", "self", ".", "update_channels", "(", ")" ]
Build GUI such that image list area is maximized.
[ "Build", "GUI", "such", "that", "image", "list", "area", "is", "maximized", "." ]
python
train
39.1
phoebe-project/phoebe2
phoebe/frontend/bundle.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L1819-L1831
def add_spot(self, component=None, feature=None, **kwargs): """ Shortcut to :meth:`add_feature` but with kind='spot' """ if component is None: if len(self.hierarchy.get_stars())==1: component = self.hierarchy.get_stars()[0] else: raise ValueError("must provide component for spot") kwargs.setdefault('component', component) kwargs.setdefault('feature', feature) return self.add_feature('spot', **kwargs)
[ "def", "add_spot", "(", "self", ",", "component", "=", "None", ",", "feature", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "component", "is", "None", ":", "if", "len", "(", "self", ".", "hierarchy", ".", "get_stars", "(", ")", ")", "==", "1", ":", "component", "=", "self", ".", "hierarchy", ".", "get_stars", "(", ")", "[", "0", "]", "else", ":", "raise", "ValueError", "(", "\"must provide component for spot\"", ")", "kwargs", ".", "setdefault", "(", "'component'", ",", "component", ")", "kwargs", ".", "setdefault", "(", "'feature'", ",", "feature", ")", "return", "self", ".", "add_feature", "(", "'spot'", ",", "*", "*", "kwargs", ")" ]
Shortcut to :meth:`add_feature` but with kind='spot'
[ "Shortcut", "to", ":", "meth", ":", "add_feature", "but", "with", "kind", "=", "spot" ]
python
train
38.769231
solocompt/plugs-core
plugs_core/clients.py
https://github.com/solocompt/plugs-core/blob/19fd23101fcfdabe657485f0a22e6b63e2b44f9d/plugs_core/clients.py#L14-L25
def parse_params(self, params): """ Parsing params, params is a dict and the dict value can be a string or an iterable, namely a list, we need to process those iterables """ for (key, value) in params.items(): if not isinstance(value, str): string_params = self.to_string(value) params[key] = string_params return params
[ "def", "parse_params", "(", "self", ",", "params", ")", ":", "for", "(", "key", ",", "value", ")", "in", "params", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "value", ",", "str", ")", ":", "string_params", "=", "self", ".", "to_string", "(", "value", ")", "params", "[", "key", "]", "=", "string_params", "return", "params" ]
Parsing params, params is a dict and the dict value can be a string or an iterable, namely a list, we need to process those iterables
[ "Parsing", "params", "params", "is", "a", "dict", "and", "the", "dict", "value", "can", "be", "a", "string", "or", "an", "iterable", "namely", "a", "list", "we", "need", "to", "process", "those", "iterables" ]
python
train
34.75
liftoff/pyminifier
pyminifier/obfuscate.py
https://github.com/liftoff/pyminifier/blob/087ea7b0c8c964f1f907c3f350f5ce281798db86/pyminifier/obfuscate.py#L101-L148
def find_obfuscatables(tokens, obfunc, ignore_length=False): """ Iterates over *tokens*, which must be an equivalent output to what tokenize.generate_tokens() produces, calling *obfunc* on each with the following parameters: - **tokens:** The current list of tokens. - **index:** The current position in the list. *obfunc* is expected to return the token string if that token can be safely obfuscated **or** one of the following optional values which will instruct find_obfuscatables() how to proceed: - **'__skipline__'** Keep skipping tokens until a newline is reached. - **'__skipnext__'** Skip the next token in the sequence. If *ignore_length* is ``True`` then single-character obfuscatables will be obfuscated anyway (even though it wouldn't save any space). """ global keyword_args keyword_args = analyze.enumerate_keyword_args(tokens) global imported_modules imported_modules = analyze.enumerate_imports(tokens) #print("imported_modules: %s" % imported_modules) skip_line = False skip_next = False obfuscatables = [] for index, tok in enumerate(tokens): token_type = tok[0] if token_type == tokenize.NEWLINE: skip_line = False if skip_line: continue result = obfunc(tokens, index, ignore_length=ignore_length) if result: if skip_next: skip_next = False elif result == '__skipline__': skip_line = True elif result == '__skipnext__': skip_next = True elif result in obfuscatables: pass else: obfuscatables.append(result) else: # If result is empty we need to reset skip_next so we don't skip_next = False # accidentally skip the next identifier return obfuscatables
[ "def", "find_obfuscatables", "(", "tokens", ",", "obfunc", ",", "ignore_length", "=", "False", ")", ":", "global", "keyword_args", "keyword_args", "=", "analyze", ".", "enumerate_keyword_args", "(", "tokens", ")", "global", "imported_modules", "imported_modules", "=", "analyze", ".", "enumerate_imports", "(", "tokens", ")", "#print(\"imported_modules: %s\" % imported_modules)", "skip_line", "=", "False", "skip_next", "=", "False", "obfuscatables", "=", "[", "]", "for", "index", ",", "tok", "in", "enumerate", "(", "tokens", ")", ":", "token_type", "=", "tok", "[", "0", "]", "if", "token_type", "==", "tokenize", ".", "NEWLINE", ":", "skip_line", "=", "False", "if", "skip_line", ":", "continue", "result", "=", "obfunc", "(", "tokens", ",", "index", ",", "ignore_length", "=", "ignore_length", ")", "if", "result", ":", "if", "skip_next", ":", "skip_next", "=", "False", "elif", "result", "==", "'__skipline__'", ":", "skip_line", "=", "True", "elif", "result", "==", "'__skipnext__'", ":", "skip_next", "=", "True", "elif", "result", "in", "obfuscatables", ":", "pass", "else", ":", "obfuscatables", ".", "append", "(", "result", ")", "else", ":", "# If result is empty we need to reset skip_next so we don't", "skip_next", "=", "False", "# accidentally skip the next identifier", "return", "obfuscatables" ]
Iterates over *tokens*, which must be an equivalent output to what tokenize.generate_tokens() produces, calling *obfunc* on each with the following parameters: - **tokens:** The current list of tokens. - **index:** The current position in the list. *obfunc* is expected to return the token string if that token can be safely obfuscated **or** one of the following optional values which will instruct find_obfuscatables() how to proceed: - **'__skipline__'** Keep skipping tokens until a newline is reached. - **'__skipnext__'** Skip the next token in the sequence. If *ignore_length* is ``True`` then single-character obfuscatables will be obfuscated anyway (even though it wouldn't save any space).
[ "Iterates", "over", "*", "tokens", "*", "which", "must", "be", "an", "equivalent", "output", "to", "what", "tokenize", ".", "generate_tokens", "()", "produces", "calling", "*", "obfunc", "*", "on", "each", "with", "the", "following", "parameters", ":" ]
python
train
39.104167
tensorflow/mesh
mesh_tensorflow/simd_mesh_impl.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/simd_mesh_impl.py#L568-L592
def _ring_2d(m, n): """Ring-order of a mxn mesh. Args: m: an integer n: an integer Returns: a list of mxn pairs """ if m == 1: return [(0, i) for i in range(n)] if n == 1: return [(i, 0) for i in range(m)] if m % 2 != 0: tf.logging.warning("Odd dimension") return [(i % m, i // m) for i in range(n * m)] ret = [(0, 0)] for i in range(m // 2): for j in range(1, n): ret.append((2 * i, j)) for j in range(n-1, 0, -1): ret.append((2 * i + 1, j)) for i in range(m-1, 0, -1): ret.append((i, 0)) return ret
[ "def", "_ring_2d", "(", "m", ",", "n", ")", ":", "if", "m", "==", "1", ":", "return", "[", "(", "0", ",", "i", ")", "for", "i", "in", "range", "(", "n", ")", "]", "if", "n", "==", "1", ":", "return", "[", "(", "i", ",", "0", ")", "for", "i", "in", "range", "(", "m", ")", "]", "if", "m", "%", "2", "!=", "0", ":", "tf", ".", "logging", ".", "warning", "(", "\"Odd dimension\"", ")", "return", "[", "(", "i", "%", "m", ",", "i", "//", "m", ")", "for", "i", "in", "range", "(", "n", "*", "m", ")", "]", "ret", "=", "[", "(", "0", ",", "0", ")", "]", "for", "i", "in", "range", "(", "m", "//", "2", ")", ":", "for", "j", "in", "range", "(", "1", ",", "n", ")", ":", "ret", ".", "append", "(", "(", "2", "*", "i", ",", "j", ")", ")", "for", "j", "in", "range", "(", "n", "-", "1", ",", "0", ",", "-", "1", ")", ":", "ret", ".", "append", "(", "(", "2", "*", "i", "+", "1", ",", "j", ")", ")", "for", "i", "in", "range", "(", "m", "-", "1", ",", "0", ",", "-", "1", ")", ":", "ret", ".", "append", "(", "(", "i", ",", "0", ")", ")", "return", "ret" ]
Ring-order of a mxn mesh. Args: m: an integer n: an integer Returns: a list of mxn pairs
[ "Ring", "-", "order", "of", "a", "mxn", "mesh", "." ]
python
train
22.04
DLR-RM/RAFCON
source/rafcon/core/states/container_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L1227-L1254
def add_transition(self, from_state_id, from_outcome, to_state_id, to_outcome, transition_id=None): """Adds a transition to the container state Note: Either the toState or the toOutcome needs to be "None" :param from_state_id: The source state of the transition :param from_outcome: The outcome id of the source state to connect the transition to :param to_state_id: The target state of the transition :param to_outcome: The target outcome id of a container state :param transition_id: An optional transition id for the new transition """ transition_id = self.check_transition_id(transition_id) # Set from_state_id to None for start transitions, as from_state_id and from_outcome should both be None for # these transitions if from_state_id == self.state_id and from_outcome is None: from_state_id = None new_transition = Transition(from_state_id, from_outcome, to_state_id, to_outcome, transition_id, self) self.transitions[transition_id] = new_transition # notify all states waiting for transition to be connected self._transitions_cv.acquire() self._transitions_cv.notify_all() self._transitions_cv.release() # self.create_transition(from_state_id, from_outcome, to_state_id, to_outcome, transition_id) return transition_id
[ "def", "add_transition", "(", "self", ",", "from_state_id", ",", "from_outcome", ",", "to_state_id", ",", "to_outcome", ",", "transition_id", "=", "None", ")", ":", "transition_id", "=", "self", ".", "check_transition_id", "(", "transition_id", ")", "# Set from_state_id to None for start transitions, as from_state_id and from_outcome should both be None for", "# these transitions", "if", "from_state_id", "==", "self", ".", "state_id", "and", "from_outcome", "is", "None", ":", "from_state_id", "=", "None", "new_transition", "=", "Transition", "(", "from_state_id", ",", "from_outcome", ",", "to_state_id", ",", "to_outcome", ",", "transition_id", ",", "self", ")", "self", ".", "transitions", "[", "transition_id", "]", "=", "new_transition", "# notify all states waiting for transition to be connected", "self", ".", "_transitions_cv", ".", "acquire", "(", ")", "self", ".", "_transitions_cv", ".", "notify_all", "(", ")", "self", ".", "_transitions_cv", ".", "release", "(", ")", "# self.create_transition(from_state_id, from_outcome, to_state_id, to_outcome, transition_id)", "return", "transition_id" ]
Adds a transition to the container state Note: Either the toState or the toOutcome needs to be "None" :param from_state_id: The source state of the transition :param from_outcome: The outcome id of the source state to connect the transition to :param to_state_id: The target state of the transition :param to_outcome: The target outcome id of a container state :param transition_id: An optional transition id for the new transition
[ "Adds", "a", "transition", "to", "the", "container", "state" ]
python
train
49.178571
instaloader/instaloader
instaloader/instaloadercontext.py
https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/instaloadercontext.py#L131-L149
def _default_http_header(self, empty_session_only: bool = False) -> Dict[str, str]: """Returns default HTTP header we use for requests.""" header = {'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive', 'Content-Length': '0', 'Host': 'www.instagram.com', 'Origin': 'https://www.instagram.com', 'Referer': 'https://www.instagram.com/', 'User-Agent': self.user_agent, 'X-Instagram-AJAX': '1', 'X-Requested-With': 'XMLHttpRequest'} if empty_session_only: del header['Host'] del header['Origin'] del header['Referer'] del header['X-Instagram-AJAX'] del header['X-Requested-With'] return header
[ "def", "_default_http_header", "(", "self", ",", "empty_session_only", ":", "bool", "=", "False", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "header", "=", "{", "'Accept-Encoding'", ":", "'gzip, deflate'", ",", "'Accept-Language'", ":", "'en-US,en;q=0.8'", ",", "'Connection'", ":", "'keep-alive'", ",", "'Content-Length'", ":", "'0'", ",", "'Host'", ":", "'www.instagram.com'", ",", "'Origin'", ":", "'https://www.instagram.com'", ",", "'Referer'", ":", "'https://www.instagram.com/'", ",", "'User-Agent'", ":", "self", ".", "user_agent", ",", "'X-Instagram-AJAX'", ":", "'1'", ",", "'X-Requested-With'", ":", "'XMLHttpRequest'", "}", "if", "empty_session_only", ":", "del", "header", "[", "'Host'", "]", "del", "header", "[", "'Origin'", "]", "del", "header", "[", "'Referer'", "]", "del", "header", "[", "'X-Instagram-AJAX'", "]", "del", "header", "[", "'X-Requested-With'", "]", "return", "header" ]
Returns default HTTP header we use for requests.
[ "Returns", "default", "HTTP", "header", "we", "use", "for", "requests", "." ]
python
train
45.894737
threeML/astromodels
astromodels/functions/priors.py
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/functions/priors.py#L594-L612
def from_unit_cube(self, x): """ Used by multinest :param x: 0 < x < 1 :param lower_bound: :param upper_bound: :return: """ lower_bound = self.lower_bound.value upper_bound = self.upper_bound.value low = lower_bound spread = float(upper_bound - lower_bound) par = x * spread + low return par
[ "def", "from_unit_cube", "(", "self", ",", "x", ")", ":", "lower_bound", "=", "self", ".", "lower_bound", ".", "value", "upper_bound", "=", "self", ".", "upper_bound", ".", "value", "low", "=", "lower_bound", "spread", "=", "float", "(", "upper_bound", "-", "lower_bound", ")", "par", "=", "x", "*", "spread", "+", "low", "return", "par" ]
Used by multinest :param x: 0 < x < 1 :param lower_bound: :param upper_bound: :return:
[ "Used", "by", "multinest" ]
python
train
20.105263
cslarsen/lyn
lyn/lyn.py
https://github.com/cslarsen/lyn/blob/d615a6a9473083ffc7318a98fcc697cbc28ba5da/lyn/lyn.py#L83-L86
def load(self, name): """Loads and returns foreign library.""" name = ctypes.util.find_library(name) return ctypes.cdll.LoadLibrary(name)
[ "def", "load", "(", "self", ",", "name", ")", ":", "name", "=", "ctypes", ".", "util", ".", "find_library", "(", "name", ")", "return", "ctypes", ".", "cdll", ".", "LoadLibrary", "(", "name", ")" ]
Loads and returns foreign library.
[ "Loads", "and", "returns", "foreign", "library", "." ]
python
train
39.5
mrstephenneal/pdfconduit
pdf/convert/pdf2img.py
https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/pdf/convert/pdf2img.py#L51-L79
def _get_page_data(self, pno, zoom=0): """ Return a PNG image for a document page number. If zoom is other than 0, one of the 4 page quadrants are zoomed-in instead and the corresponding clip returned. """ dlist = self.dlist_tab[pno] # get display list if not dlist: # create if not yet there self.dlist_tab[pno] = self.doc[pno].getDisplayList() dlist = self.dlist_tab[pno] r = dlist.rect # page rectangle mp = r.tl + (r.br - r.tl) * 0.5 # rect middle point mt = r.tl + (r.tr - r.tl) * 0.5 # middle of top edge ml = r.tl + (r.bl - r.tl) * 0.5 # middle of left edge mr = r.tr + (r.br - r.tr) * 0.5 # middle of right egde mb = r.bl + (r.br - r.bl) * 0.5 # middle of bottom edge mat = fitz.Matrix(2, 2) # zoom matrix if zoom == 1: # top-left quadrant clip = fitz.Rect(r.tl, mp) elif zoom == 4: # bot-right quadrant clip = fitz.Rect(mp, r.br) elif zoom == 2: # top-right clip = fitz.Rect(mt, mr) elif zoom == 3: # bot-left clip = fitz.Rect(ml, mb) if zoom == 0: # total page pix = dlist.getPixmap(alpha=False) else: pix = dlist.getPixmap(alpha=False, matrix=mat, clip=clip) return pix.getPNGData()
[ "def", "_get_page_data", "(", "self", ",", "pno", ",", "zoom", "=", "0", ")", ":", "dlist", "=", "self", ".", "dlist_tab", "[", "pno", "]", "# get display list", "if", "not", "dlist", ":", "# create if not yet there", "self", ".", "dlist_tab", "[", "pno", "]", "=", "self", ".", "doc", "[", "pno", "]", ".", "getDisplayList", "(", ")", "dlist", "=", "self", ".", "dlist_tab", "[", "pno", "]", "r", "=", "dlist", ".", "rect", "# page rectangle", "mp", "=", "r", ".", "tl", "+", "(", "r", ".", "br", "-", "r", ".", "tl", ")", "*", "0.5", "# rect middle point", "mt", "=", "r", ".", "tl", "+", "(", "r", ".", "tr", "-", "r", ".", "tl", ")", "*", "0.5", "# middle of top edge", "ml", "=", "r", ".", "tl", "+", "(", "r", ".", "bl", "-", "r", ".", "tl", ")", "*", "0.5", "# middle of left edge", "mr", "=", "r", ".", "tr", "+", "(", "r", ".", "br", "-", "r", ".", "tr", ")", "*", "0.5", "# middle of right egde", "mb", "=", "r", ".", "bl", "+", "(", "r", ".", "br", "-", "r", ".", "bl", ")", "*", "0.5", "# middle of bottom edge", "mat", "=", "fitz", ".", "Matrix", "(", "2", ",", "2", ")", "# zoom matrix", "if", "zoom", "==", "1", ":", "# top-left quadrant", "clip", "=", "fitz", ".", "Rect", "(", "r", ".", "tl", ",", "mp", ")", "elif", "zoom", "==", "4", ":", "# bot-right quadrant", "clip", "=", "fitz", ".", "Rect", "(", "mp", ",", "r", ".", "br", ")", "elif", "zoom", "==", "2", ":", "# top-right", "clip", "=", "fitz", ".", "Rect", "(", "mt", ",", "mr", ")", "elif", "zoom", "==", "3", ":", "# bot-left", "clip", "=", "fitz", ".", "Rect", "(", "ml", ",", "mb", ")", "if", "zoom", "==", "0", ":", "# total page", "pix", "=", "dlist", ".", "getPixmap", "(", "alpha", "=", "False", ")", "else", ":", "pix", "=", "dlist", ".", "getPixmap", "(", "alpha", "=", "False", ",", "matrix", "=", "mat", ",", "clip", "=", "clip", ")", "return", "pix", ".", "getPNGData", "(", ")" ]
Return a PNG image for a document page number. If zoom is other than 0, one of the 4 page quadrants are zoomed-in instead and the corresponding clip returned.
[ "Return", "a", "PNG", "image", "for", "a", "document", "page", "number", ".", "If", "zoom", "is", "other", "than", "0", "one", "of", "the", "4", "page", "quadrants", "are", "zoomed", "-", "in", "instead", "and", "the", "corresponding", "clip", "returned", "." ]
python
train
46.034483
stoneworksolutions/stoneredis
stoneredis/client.py
https://github.com/stoneworksolutions/stoneredis/blob/46018b81317c152bd47c0764c3f1293379618af4/stoneredis/client.py#L228-L246
def get_lock(self, lockname, locktime=60, auto_renewal=False): ''' Gets a lock and returns if it can be stablished. Returns false otherwise ''' pid = os.getpid() caller = inspect.stack()[0][3] try: # rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ]) rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal) except: if self.logger: self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc())) return False try: lock = rl.acquire(blocking=False) except RedisError: return False if not lock: return False else: return rl
[ "def", "get_lock", "(", "self", ",", "lockname", ",", "locktime", "=", "60", ",", "auto_renewal", "=", "False", ")", ":", "pid", "=", "os", ".", "getpid", "(", ")", "caller", "=", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", "try", ":", "# rl = redlock.Redlock([{\"host\": settings.REDIS_SERVERS['std_redis']['host'], \"port\": settings.REDIS_SERVERS['std_redis']['port'], \"db\": settings.REDIS_SERVERS['std_redis']['db']}, ])\r", "rl", "=", "redis_lock", ".", "Lock", "(", "self", ",", "lockname", ",", "expire", "=", "locktime", ",", "auto_renewal", "=", "auto_renewal", ")", "except", ":", "if", "self", ".", "logger", ":", "self", ".", "logger", ".", "error", "(", "'Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'", ".", "format", "(", "pid", ",", "caller", ",", "lockname", ",", "traceback", ".", "format_exc", "(", ")", ")", ")", "return", "False", "try", ":", "lock", "=", "rl", ".", "acquire", "(", "blocking", "=", "False", ")", "except", "RedisError", ":", "return", "False", "if", "not", "lock", ":", "return", "False", "else", ":", "return", "rl" ]
Gets a lock and returns if it can be stablished. Returns false otherwise
[ "Gets", "a", "lock", "and", "returns", "if", "it", "can", "be", "stablished", ".", "Returns", "false", "otherwise" ]
python
train
49.894737
frc1418/tbapy
tbapy/main.py
https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L209-L216
def team_districts(self, team): """ Get districts a team has competed in. :param team: Team to get data on. :return: List of District objects. """ return [District(raw) for raw in self._get('team/%s/districts' % self.team_key(team))]
[ "def", "team_districts", "(", "self", ",", "team", ")", ":", "return", "[", "District", "(", "raw", ")", "for", "raw", "in", "self", ".", "_get", "(", "'team/%s/districts'", "%", "self", ".", "team_key", "(", "team", ")", ")", "]" ]
Get districts a team has competed in. :param team: Team to get data on. :return: List of District objects.
[ "Get", "districts", "a", "team", "has", "competed", "in", "." ]
python
train
34.375
datastax/python-driver
cassandra/cqlengine/query.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/query.py#L831-L846
def count(self): """ Returns the number of rows matched by this query. *Note: This function executes a SELECT COUNT() and has a performance cost on large datasets* """ if self._batch: raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode") if self._count is None: query = self._select_query() query.count = True result = self._execute(query) count_row = result.one().popitem() self._count = count_row[1] return self._count
[ "def", "count", "(", "self", ")", ":", "if", "self", ".", "_batch", ":", "raise", "CQLEngineException", "(", "\"Only inserts, updates, and deletes are available in batch mode\"", ")", "if", "self", ".", "_count", "is", "None", ":", "query", "=", "self", ".", "_select_query", "(", ")", "query", ".", "count", "=", "True", "result", "=", "self", ".", "_execute", "(", "query", ")", "count_row", "=", "result", ".", "one", "(", ")", ".", "popitem", "(", ")", "self", ".", "_count", "=", "count_row", "[", "1", "]", "return", "self", ".", "_count" ]
Returns the number of rows matched by this query. *Note: This function executes a SELECT COUNT() and has a performance cost on large datasets*
[ "Returns", "the", "number", "of", "rows", "matched", "by", "this", "query", "." ]
python
train
35.75
EricDalrymple91/strawpy
strawpy/asyncstrawpy.py
https://github.com/EricDalrymple91/strawpy/blob/0c4294fc2dca250a5c13a97e825ae21587278a91/strawpy/asyncstrawpy.py#L11-L22
async def get_poll(poll_id): """ Get a strawpoll. Example: poll = strawpy.get_poll('11682852') :param poll_id: :return: strawpy.Strawpoll object """ async with aiohttp.get('{api_url}/{poll_id}'.format(api_url=api_url, poll_id=poll_id)) as r: return await StrawPoll(r)
[ "async", "def", "get_poll", "(", "poll_id", ")", ":", "async", "with", "aiohttp", ".", "get", "(", "'{api_url}/{poll_id}'", ".", "format", "(", "api_url", "=", "api_url", ",", "poll_id", "=", "poll_id", ")", ")", "as", "r", ":", "return", "await", "StrawPoll", "(", "r", ")" ]
Get a strawpoll. Example: poll = strawpy.get_poll('11682852') :param poll_id: :return: strawpy.Strawpoll object
[ "Get", "a", "strawpoll", "." ]
python
train
24.916667
SeabornGames/Table
seaborn_table/table.py
https://github.com/SeabornGames/Table/blob/0c474ef2fb00db0e7cf47e8af91e3556c2e7485a/seaborn_table/table.py#L1063-L1096
def pertibate(self, pertibate_columns=None, filter_func=None, max_size=1000): """ :param pertibate_columns: list of str fo columns to pertibate see DOE :param filter_func: func that takes a SeabornRow and return True if this row should be exist :param max_size: int of the max number of rows to try but some may be filtered out :return: None """ pertibate_columns = pertibate_columns or self.columns for c in pertibate_columns: assert c in self.columns, 'Column %s was not part of this self' % c # noinspection PyTypeChecker column_size = [c in pertibate_columns and len(self._parameters[c]) or 1 for c in self.columns] max_size = min(max_size, reduce(lambda x, y: x * y, column_size)) for indexes in self._index_iterator(column_size, max_size): row = SeabornRow(self._column_index, [self._pertibate_value(indexes.pop(0), c) for c in self.columns]) kwargs = row.obj_to_dict() if filter_func is None or filter_func(_row_index=len(self.table), **kwargs): self.table.append(row) for c in self.columns: # if the parameter is a dynamic function if hasattr(self._parameters.get(c, ''), '__call__'): # noinspection PyTypeChecker self.set_column(c, self._parameters[c])
[ "def", "pertibate", "(", "self", ",", "pertibate_columns", "=", "None", ",", "filter_func", "=", "None", ",", "max_size", "=", "1000", ")", ":", "pertibate_columns", "=", "pertibate_columns", "or", "self", ".", "columns", "for", "c", "in", "pertibate_columns", ":", "assert", "c", "in", "self", ".", "columns", ",", "'Column %s was not part of this self'", "%", "c", "# noinspection PyTypeChecker", "column_size", "=", "[", "c", "in", "pertibate_columns", "and", "len", "(", "self", ".", "_parameters", "[", "c", "]", ")", "or", "1", "for", "c", "in", "self", ".", "columns", "]", "max_size", "=", "min", "(", "max_size", ",", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "*", "y", ",", "column_size", ")", ")", "for", "indexes", "in", "self", ".", "_index_iterator", "(", "column_size", ",", "max_size", ")", ":", "row", "=", "SeabornRow", "(", "self", ".", "_column_index", ",", "[", "self", ".", "_pertibate_value", "(", "indexes", ".", "pop", "(", "0", ")", ",", "c", ")", "for", "c", "in", "self", ".", "columns", "]", ")", "kwargs", "=", "row", ".", "obj_to_dict", "(", ")", "if", "filter_func", "is", "None", "or", "filter_func", "(", "_row_index", "=", "len", "(", "self", ".", "table", ")", ",", "*", "*", "kwargs", ")", ":", "self", ".", "table", ".", "append", "(", "row", ")", "for", "c", "in", "self", ".", "columns", ":", "# if the parameter is a dynamic function", "if", "hasattr", "(", "self", ".", "_parameters", ".", "get", "(", "c", ",", "''", ")", ",", "'__call__'", ")", ":", "# noinspection PyTypeChecker", "self", ".", "set_column", "(", "c", ",", "self", ".", "_parameters", "[", "c", "]", ")" ]
:param pertibate_columns: list of str fo columns to pertibate see DOE :param filter_func: func that takes a SeabornRow and return True if this row should be exist :param max_size: int of the max number of rows to try but some may be filtered out :return: None
[ ":", "param", "pertibate_columns", ":", "list", "of", "str", "fo", "columns", "to", "pertibate", "see", "DOE", ":", "param", "filter_func", ":", "func", "that", "takes", "a", "SeabornRow", "and", "return", "True", "if", "this", "row", "should", "be", "exist", ":", "param", "max_size", ":", "int", "of", "the", "max", "number", "of", "rows", "to", "try", "but", "some", "may", "be", "filtered", "out", ":", "return", ":", "None" ]
python
train
45.676471
cvxopt/chompack
src/python/symbolic.py
https://github.com/cvxopt/chompack/blob/e07106b58b8055c34f6201e8c954482f86987833/src/python/symbolic.py#L1176-L1210
def add_projection(self, A, alpha = 1.0, beta = 1.0, reordered=False): """ Add projection of a dense matrix :math:`A` to :py:class:`cspmatrix`. X := alpha*proj(A) + beta*X """ assert self.is_factor is False, "cannot project matrix onto a cspmatrix factor" assert isinstance(A, matrix), "argument A must be a dense matrix" symb = self.symb blkval = self.blkval n = symb.n snptr = symb.snptr snode = symb.snode relptr = symb.relptr snrowidx = symb.snrowidx sncolptr = symb.sncolptr blkptr = symb.blkptr if self.symb.p is not None and reordered is False: A = tril(A) A = A+A.T A[::A.size[0]+1] *= 0.5 A = A[self.symb.p,self.symb.p] # for each block ... for k in range(self.symb.Nsn): nn = snptr[k+1]-snptr[k] na = relptr[k+1]-relptr[k] nj = nn + na blkval[blkptr[k]:blkptr[k+1]] = beta*blkval[blkptr[k]:blkptr[k+1]] + alpha*(A[snrowidx[sncolptr[k]:sncolptr[k+1]],snode[snptr[k]:snptr[k+1]]][:]) return
[ "def", "add_projection", "(", "self", ",", "A", ",", "alpha", "=", "1.0", ",", "beta", "=", "1.0", ",", "reordered", "=", "False", ")", ":", "assert", "self", ".", "is_factor", "is", "False", ",", "\"cannot project matrix onto a cspmatrix factor\"", "assert", "isinstance", "(", "A", ",", "matrix", ")", ",", "\"argument A must be a dense matrix\"", "symb", "=", "self", ".", "symb", "blkval", "=", "self", ".", "blkval", "n", "=", "symb", ".", "n", "snptr", "=", "symb", ".", "snptr", "snode", "=", "symb", ".", "snode", "relptr", "=", "symb", ".", "relptr", "snrowidx", "=", "symb", ".", "snrowidx", "sncolptr", "=", "symb", ".", "sncolptr", "blkptr", "=", "symb", ".", "blkptr", "if", "self", ".", "symb", ".", "p", "is", "not", "None", "and", "reordered", "is", "False", ":", "A", "=", "tril", "(", "A", ")", "A", "=", "A", "+", "A", ".", "T", "A", "[", ":", ":", "A", ".", "size", "[", "0", "]", "+", "1", "]", "*=", "0.5", "A", "=", "A", "[", "self", ".", "symb", ".", "p", ",", "self", ".", "symb", ".", "p", "]", "# for each block ...", "for", "k", "in", "range", "(", "self", ".", "symb", ".", "Nsn", ")", ":", "nn", "=", "snptr", "[", "k", "+", "1", "]", "-", "snptr", "[", "k", "]", "na", "=", "relptr", "[", "k", "+", "1", "]", "-", "relptr", "[", "k", "]", "nj", "=", "nn", "+", "na", "blkval", "[", "blkptr", "[", "k", "]", ":", "blkptr", "[", "k", "+", "1", "]", "]", "=", "beta", "*", "blkval", "[", "blkptr", "[", "k", "]", ":", "blkptr", "[", "k", "+", "1", "]", "]", "+", "alpha", "*", "(", "A", "[", "snrowidx", "[", "sncolptr", "[", "k", "]", ":", "sncolptr", "[", "k", "+", "1", "]", "]", ",", "snode", "[", "snptr", "[", "k", "]", ":", "snptr", "[", "k", "+", "1", "]", "]", "]", "[", ":", "]", ")", "return" ]
Add projection of a dense matrix :math:`A` to :py:class:`cspmatrix`. X := alpha*proj(A) + beta*X
[ "Add", "projection", "of", "a", "dense", "matrix", ":", "math", ":", "A", "to", ":", "py", ":", "class", ":", "cspmatrix", "." ]
python
train
32.914286
spulec/freezegun
freezegun/api.py
https://github.com/spulec/freezegun/blob/9347d133f33f675c87bb0569d70d9d95abef737f/freezegun/api.py#L448-L452
def move_to(self, target_datetime): """Moves frozen date to the given ``target_datetime``""" target_datetime = _parse_time_to_freeze(target_datetime) delta = target_datetime - self.time_to_freeze self.tick(delta=delta)
[ "def", "move_to", "(", "self", ",", "target_datetime", ")", ":", "target_datetime", "=", "_parse_time_to_freeze", "(", "target_datetime", ")", "delta", "=", "target_datetime", "-", "self", ".", "time_to_freeze", "self", ".", "tick", "(", "delta", "=", "delta", ")" ]
Moves frozen date to the given ``target_datetime``
[ "Moves", "frozen", "date", "to", "the", "given", "target_datetime" ]
python
train
49.2
slackapi/python-slackclient
slack/web/client.py
https://github.com/slackapi/python-slackclient/blob/901341c0284fd81e6d2719d6a0502308760d83e4/slack/web/client.py#L1272-L1280
def users_setPhoto(self, *, image: Union[str, IOBase], **kwargs) -> SlackResponse: """Set the user profile photo Args: image (str): Supply the path of the image you'd like to upload. e.g. 'myimage.png' """ self._validate_xoxp_token() return self.api_call("users.setPhoto", files={"image": image}, data=kwargs)
[ "def", "users_setPhoto", "(", "self", ",", "*", ",", "image", ":", "Union", "[", "str", ",", "IOBase", "]", ",", "*", "*", "kwargs", ")", "->", "SlackResponse", ":", "self", ".", "_validate_xoxp_token", "(", ")", "return", "self", ".", "api_call", "(", "\"users.setPhoto\"", ",", "files", "=", "{", "\"image\"", ":", "image", "}", ",", "data", "=", "kwargs", ")" ]
Set the user profile photo Args: image (str): Supply the path of the image you'd like to upload. e.g. 'myimage.png'
[ "Set", "the", "user", "profile", "photo" ]
python
train
41.111111
prompt-toolkit/pymux
pymux/arrangement.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/arrangement.py#L353-L359
def focus_next(self, count=1): " Focus the next pane. " panes = self.panes if panes: self.active_pane = panes[(panes.index(self.active_pane) + count) % len(panes)] else: self.active_pane = None
[ "def", "focus_next", "(", "self", ",", "count", "=", "1", ")", ":", "panes", "=", "self", ".", "panes", "if", "panes", ":", "self", ".", "active_pane", "=", "panes", "[", "(", "panes", ".", "index", "(", "self", ".", "active_pane", ")", "+", "count", ")", "%", "len", "(", "panes", ")", "]", "else", ":", "self", ".", "active_pane", "=", "None" ]
Focus the next pane.
[ "Focus", "the", "next", "pane", "." ]
python
train
34.714286
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/bindepend.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/bindepend.py#L341-L391
def selectImports(pth, xtrapath=None): """ Return the dependencies of a binary that should be included. Return a list of pairs (name, fullpath) """ rv = [] if xtrapath is None: xtrapath = [os.path.dirname(pth)] else: assert isinstance(xtrapath, list) xtrapath = [os.path.dirname(pth)] + xtrapath # make a copy dlls = getImports(pth) for lib in dlls: if seen.get(lib.upper(), 0): continue if not is_win and not is_cygwin: # all other platforms npth = lib dir, lib = os.path.split(lib) else: # plain win case npth = getfullnameof(lib, xtrapath) # now npth is a candidate lib if found # check again for excludes but with regex FIXME: split the list if npth: candidatelib = npth else: candidatelib = lib if not dylib.include_library(candidatelib): if (candidatelib.find('libpython') < 0 and candidatelib.find('Python.framework') < 0): # skip libs not containing (libpython or Python.framework) if not seen.get(npth.upper(), 0): logger.debug("Skipping %s dependency of %s", lib, os.path.basename(pth)) continue else: pass if npth: if not seen.get(npth.upper(), 0): logger.debug("Adding %s dependency of %s", lib, os.path.basename(pth)) rv.append((lib, npth)) else: logger.error("lib not found: %s dependency of %s", lib, pth) return rv
[ "def", "selectImports", "(", "pth", ",", "xtrapath", "=", "None", ")", ":", "rv", "=", "[", "]", "if", "xtrapath", "is", "None", ":", "xtrapath", "=", "[", "os", ".", "path", ".", "dirname", "(", "pth", ")", "]", "else", ":", "assert", "isinstance", "(", "xtrapath", ",", "list", ")", "xtrapath", "=", "[", "os", ".", "path", ".", "dirname", "(", "pth", ")", "]", "+", "xtrapath", "# make a copy", "dlls", "=", "getImports", "(", "pth", ")", "for", "lib", "in", "dlls", ":", "if", "seen", ".", "get", "(", "lib", ".", "upper", "(", ")", ",", "0", ")", ":", "continue", "if", "not", "is_win", "and", "not", "is_cygwin", ":", "# all other platforms", "npth", "=", "lib", "dir", ",", "lib", "=", "os", ".", "path", ".", "split", "(", "lib", ")", "else", ":", "# plain win case", "npth", "=", "getfullnameof", "(", "lib", ",", "xtrapath", ")", "# now npth is a candidate lib if found", "# check again for excludes but with regex FIXME: split the list", "if", "npth", ":", "candidatelib", "=", "npth", "else", ":", "candidatelib", "=", "lib", "if", "not", "dylib", ".", "include_library", "(", "candidatelib", ")", ":", "if", "(", "candidatelib", ".", "find", "(", "'libpython'", ")", "<", "0", "and", "candidatelib", ".", "find", "(", "'Python.framework'", ")", "<", "0", ")", ":", "# skip libs not containing (libpython or Python.framework)", "if", "not", "seen", ".", "get", "(", "npth", ".", "upper", "(", ")", ",", "0", ")", ":", "logger", ".", "debug", "(", "\"Skipping %s dependency of %s\"", ",", "lib", ",", "os", ".", "path", ".", "basename", "(", "pth", ")", ")", "continue", "else", ":", "pass", "if", "npth", ":", "if", "not", "seen", ".", "get", "(", "npth", ".", "upper", "(", ")", ",", "0", ")", ":", "logger", ".", "debug", "(", "\"Adding %s dependency of %s\"", ",", "lib", ",", "os", ".", "path", ".", "basename", "(", "pth", ")", ")", "rv", ".", "append", "(", "(", "lib", ",", "npth", ")", ")", "else", ":", "logger", ".", "error", "(", "\"lib not found: %s dependency of %s\"", ",", "lib", ",", "pth", ")", "return", "rv" ]
Return the dependencies of a binary that should be included. Return a list of pairs (name, fullpath)
[ "Return", "the", "dependencies", "of", "a", "binary", "that", "should", "be", "included", "." ]
python
train
32.686275
apache/incubator-superset
superset/tasks/schedules.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/schedules.py#L419-L440
def schedule_window(report_type, start_at, stop_at, resolution): """ Find all active schedules and schedule celery tasks for each of them with a specific ETA (determined by parsing the cron schedule for the schedule) """ model_cls = get_scheduler_model(report_type) dbsession = db.create_scoped_session() schedules = dbsession.query(model_cls).filter(model_cls.active.is_(True)) for schedule in schedules: args = ( report_type, schedule.id, ) # Schedule the job for the specified time window for eta in next_schedules(schedule.crontab, start_at, stop_at, resolution=resolution): schedule_email_report.apply_async(args, eta=eta)
[ "def", "schedule_window", "(", "report_type", ",", "start_at", ",", "stop_at", ",", "resolution", ")", ":", "model_cls", "=", "get_scheduler_model", "(", "report_type", ")", "dbsession", "=", "db", ".", "create_scoped_session", "(", ")", "schedules", "=", "dbsession", ".", "query", "(", "model_cls", ")", ".", "filter", "(", "model_cls", ".", "active", ".", "is_", "(", "True", ")", ")", "for", "schedule", "in", "schedules", ":", "args", "=", "(", "report_type", ",", "schedule", ".", "id", ",", ")", "# Schedule the job for the specified time window", "for", "eta", "in", "next_schedules", "(", "schedule", ".", "crontab", ",", "start_at", ",", "stop_at", ",", "resolution", "=", "resolution", ")", ":", "schedule_email_report", ".", "apply_async", "(", "args", ",", "eta", "=", "eta", ")" ]
Find all active schedules and schedule celery tasks for each of them with a specific ETA (determined by parsing the cron schedule for the schedule)
[ "Find", "all", "active", "schedules", "and", "schedule", "celery", "tasks", "for", "each", "of", "them", "with", "a", "specific", "ETA", "(", "determined", "by", "parsing", "the", "cron", "schedule", "for", "the", "schedule", ")" ]
python
train
37
sanoma/django-arctic
arctic/mixins.py
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/mixins.py#L623-L631
def _get_ordering_field_lookup(self, field_name): """ get real model field to order by """ field = field_name get_field = getattr(self, "get_%s_ordering_field" % field_name, None) if get_field: field = get_field() return field
[ "def", "_get_ordering_field_lookup", "(", "self", ",", "field_name", ")", ":", "field", "=", "field_name", "get_field", "=", "getattr", "(", "self", ",", "\"get_%s_ordering_field\"", "%", "field_name", ",", "None", ")", "if", "get_field", ":", "field", "=", "get_field", "(", ")", "return", "field" ]
get real model field to order by
[ "get", "real", "model", "field", "to", "order", "by" ]
python
train
31.777778
EnigmaBridge/client.py
ebclient/crypto_util.py
https://github.com/EnigmaBridge/client.py/blob/0fafe3902da394da88e9f960751d695ca65bbabd/ebclient/crypto_util.py#L208-L226
def get_random_integer(N, randfunc=None): """getRandomInteger(N:int, randfunc:callable):long Return a random number with at most N bits. If randfunc is omitted, then Random.new().read is used. This function is for internal use only and may be renamed or removed in the future. """ if randfunc is None: randfunc = Random.new().read S = randfunc(N>>3) odd_bits = N % 8 if odd_bits != 0: char = ord(randfunc(1)) >> (8-odd_bits) S = bchr(char) + S value = bytes_to_long(S) return value
[ "def", "get_random_integer", "(", "N", ",", "randfunc", "=", "None", ")", ":", "if", "randfunc", "is", "None", ":", "randfunc", "=", "Random", ".", "new", "(", ")", ".", "read", "S", "=", "randfunc", "(", "N", ">>", "3", ")", "odd_bits", "=", "N", "%", "8", "if", "odd_bits", "!=", "0", ":", "char", "=", "ord", "(", "randfunc", "(", "1", ")", ")", ">>", "(", "8", "-", "odd_bits", ")", "S", "=", "bchr", "(", "char", ")", "+", "S", "value", "=", "bytes_to_long", "(", "S", ")", "return", "value" ]
getRandomInteger(N:int, randfunc:callable):long Return a random number with at most N bits. If randfunc is omitted, then Random.new().read is used. This function is for internal use only and may be renamed or removed in the future.
[ "getRandomInteger", "(", "N", ":", "int", "randfunc", ":", "callable", ")", ":", "long", "Return", "a", "random", "number", "with", "at", "most", "N", "bits", "." ]
python
train
28.315789
maxpumperla/elephas
elephas/java/adapter.py
https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/java/adapter.py#L16-L32
def to_numpy(nd4j_array): """ Convert an ND4J array to a numpy array :param nd4j_array: :return: """ buff = nd4j_array.data() address = buff.pointer().address() type_name = java_classes.DataTypeUtil.getDtypeFromContext() data_type = java_classes.DataTypeUtil.getDTypeForName(type_name) mapping = { 'double': ctypes.c_double, 'float': ctypes.c_float } Pointer = ctypes.POINTER(mapping[data_type]) pointer = ctypes.cast(address, Pointer) np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape())) return np_array
[ "def", "to_numpy", "(", "nd4j_array", ")", ":", "buff", "=", "nd4j_array", ".", "data", "(", ")", "address", "=", "buff", ".", "pointer", "(", ")", ".", "address", "(", ")", "type_name", "=", "java_classes", ".", "DataTypeUtil", ".", "getDtypeFromContext", "(", ")", "data_type", "=", "java_classes", ".", "DataTypeUtil", ".", "getDTypeForName", "(", "type_name", ")", "mapping", "=", "{", "'double'", ":", "ctypes", ".", "c_double", ",", "'float'", ":", "ctypes", ".", "c_float", "}", "Pointer", "=", "ctypes", ".", "POINTER", "(", "mapping", "[", "data_type", "]", ")", "pointer", "=", "ctypes", ".", "cast", "(", "address", ",", "Pointer", ")", "np_array", "=", "np", ".", "ctypeslib", ".", "as_array", "(", "pointer", ",", "tuple", "(", "nd4j_array", ".", "shape", "(", ")", ")", ")", "return", "np_array" ]
Convert an ND4J array to a numpy array :param nd4j_array: :return:
[ "Convert", "an", "ND4J", "array", "to", "a", "numpy", "array", ":", "param", "nd4j_array", ":", ":", "return", ":" ]
python
train
33.882353
Koed00/django-q
django_q/core_signing.py
https://github.com/Koed00/django-q/blob/c84fd11a67c9a47d821786dfcdc189bb258c6f54/django_q/core_signing.py#L63-L79
def unsign(self, value, max_age=None): """ Retrieve original value and check it wasn't signed more than max_age seconds ago. """ result = super(TimestampSigner, self).unsign(value) value, timestamp = result.rsplit(self.sep, 1) timestamp = baseconv.base62.decode(timestamp) if max_age is not None: if isinstance(max_age, datetime.timedelta): max_age = max_age.total_seconds() # Check timestamp is not older than max_age age = time.time() - timestamp if age > max_age: raise SignatureExpired( 'Signature age %s > %s seconds' % (age, max_age)) return value
[ "def", "unsign", "(", "self", ",", "value", ",", "max_age", "=", "None", ")", ":", "result", "=", "super", "(", "TimestampSigner", ",", "self", ")", ".", "unsign", "(", "value", ")", "value", ",", "timestamp", "=", "result", ".", "rsplit", "(", "self", ".", "sep", ",", "1", ")", "timestamp", "=", "baseconv", ".", "base62", ".", "decode", "(", "timestamp", ")", "if", "max_age", "is", "not", "None", ":", "if", "isinstance", "(", "max_age", ",", "datetime", ".", "timedelta", ")", ":", "max_age", "=", "max_age", ".", "total_seconds", "(", ")", "# Check timestamp is not older than max_age", "age", "=", "time", ".", "time", "(", ")", "-", "timestamp", "if", "age", ">", "max_age", ":", "raise", "SignatureExpired", "(", "'Signature age %s > %s seconds'", "%", "(", "age", ",", "max_age", ")", ")", "return", "value" ]
Retrieve original value and check it wasn't signed more than max_age seconds ago.
[ "Retrieve", "original", "value", "and", "check", "it", "wasn", "t", "signed", "more", "than", "max_age", "seconds", "ago", "." ]
python
train
41.705882
dmwm/DBS
Server/Python/src/dbs/business/DBSMigrate.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/business/DBSMigrate.py#L492-L526
def updateMigrationBlockStatus(self, migration_status=0, migration_block=None, migration_request=None): """ migration_status: 0=PENDING 1=IN PROGRESS 2=COMPLETED 3=FAILED (will be retried) 9=Terminally FAILED status change: 0 -> 1 1 -> 2 1 -> 3 1 -> 9 are only allowed changes for working through migration. 3 -> 1 allowed for retrying. """ conn = self.dbi.connection() tran = conn.begin() try: if migration_block: upst = dict(migration_status=migration_status, migration_block_id=migration_block, last_modification_date=dbsUtils().getTime()) elif migration_request: upst = dict(migration_status=migration_status, migration_request_id=migration_request, last_modification_date=dbsUtils().getTime()) self.mgrup.execute(conn, upst) except: if tran:tran.rollback() raise else: if tran:tran.commit() finally: if conn:conn.close()
[ "def", "updateMigrationBlockStatus", "(", "self", ",", "migration_status", "=", "0", ",", "migration_block", "=", "None", ",", "migration_request", "=", "None", ")", ":", "conn", "=", "self", ".", "dbi", ".", "connection", "(", ")", "tran", "=", "conn", ".", "begin", "(", ")", "try", ":", "if", "migration_block", ":", "upst", "=", "dict", "(", "migration_status", "=", "migration_status", ",", "migration_block_id", "=", "migration_block", ",", "last_modification_date", "=", "dbsUtils", "(", ")", ".", "getTime", "(", ")", ")", "elif", "migration_request", ":", "upst", "=", "dict", "(", "migration_status", "=", "migration_status", ",", "migration_request_id", "=", "migration_request", ",", "last_modification_date", "=", "dbsUtils", "(", ")", ".", "getTime", "(", ")", ")", "self", ".", "mgrup", ".", "execute", "(", "conn", ",", "upst", ")", "except", ":", "if", "tran", ":", "tran", ".", "rollback", "(", ")", "raise", "else", ":", "if", "tran", ":", "tran", ".", "commit", "(", ")", "finally", ":", "if", "conn", ":", "conn", ".", "close", "(", ")" ]
migration_status: 0=PENDING 1=IN PROGRESS 2=COMPLETED 3=FAILED (will be retried) 9=Terminally FAILED status change: 0 -> 1 1 -> 2 1 -> 3 1 -> 9 are only allowed changes for working through migration. 3 -> 1 allowed for retrying.
[ "migration_status", ":", "0", "=", "PENDING", "1", "=", "IN", "PROGRESS", "2", "=", "COMPLETED", "3", "=", "FAILED", "(", "will", "be", "retried", ")", "9", "=", "Terminally", "FAILED", "status", "change", ":", "0", "-", ">", "1", "1", "-", ">", "2", "1", "-", ">", "3", "1", "-", ">", "9", "are", "only", "allowed", "changes", "for", "working", "through", "migration", ".", "3", "-", ">", "1", "allowed", "for", "retrying", "." ]
python
train
32.257143
secdev/scapy
scapy/sendrecv.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/sendrecv.py#L720-L733
def srp1flood(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): # noqa: E501 """Flood and receive packets at layer 2 and return only the first answer prn: function applied to packets received verbose: set verbosity level nofilter: put 1 to avoid use of BPF filters filter: provide a BPF filter iface: listen answers only on the given interface""" s = conf.L2socket(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) # noqa: E501 ans, _ = sndrcvflood(s, x, *args, **kargs) s.close() if len(ans) > 0: return ans[0][1] else: return None
[ "def", "srp1flood", "(", "x", ",", "promisc", "=", "None", ",", "filter", "=", "None", ",", "iface", "=", "None", ",", "nofilter", "=", "0", ",", "*", "args", ",", "*", "*", "kargs", ")", ":", "# noqa: E501", "s", "=", "conf", ".", "L2socket", "(", "promisc", "=", "promisc", ",", "filter", "=", "filter", ",", "nofilter", "=", "nofilter", ",", "iface", "=", "iface", ")", "# noqa: E501", "ans", ",", "_", "=", "sndrcvflood", "(", "s", ",", "x", ",", "*", "args", ",", "*", "*", "kargs", ")", "s", ".", "close", "(", ")", "if", "len", "(", "ans", ")", ">", "0", ":", "return", "ans", "[", "0", "]", "[", "1", "]", "else", ":", "return", "None" ]
Flood and receive packets at layer 2 and return only the first answer prn: function applied to packets received verbose: set verbosity level nofilter: put 1 to avoid use of BPF filters filter: provide a BPF filter iface: listen answers only on the given interface
[ "Flood", "and", "receive", "packets", "at", "layer", "2", "and", "return", "only", "the", "first", "answer", "prn", ":", "function", "applied", "to", "packets", "received", "verbose", ":", "set", "verbosity", "level", "nofilter", ":", "put", "1", "to", "avoid", "use", "of", "BPF", "filters", "filter", ":", "provide", "a", "BPF", "filter", "iface", ":", "listen", "answers", "only", "on", "the", "given", "interface" ]
python
train
43.357143
pyblish/pyblish-qml
pyblish_qml/vendor/Qt.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/vendor/Qt.py#L1262-L1357
def _loadUi(uifile, baseinstance=None): """Dynamically load a user interface from the given `uifile` This function calls `uic.loadUi` if using PyQt bindings, else it implements a comparable binding for PySide. Documentation: http://pyqt.sourceforge.net/Docs/PyQt5/designer.html#PyQt5.uic.loadUi Arguments: uifile (str): Absolute path to Qt Designer file. baseinstance (QWidget): Instantiated QWidget or subclass thereof Return: baseinstance if `baseinstance` is not `None`. Otherwise return the newly created instance of the user interface. """ if hasattr(Qt, "_uic"): return Qt._uic.loadUi(uifile, baseinstance) elif hasattr(Qt, "_QtUiTools"): # Implement `PyQt5.uic.loadUi` for PySide(2) class _UiLoader(Qt._QtUiTools.QUiLoader): """Create the user interface in a base instance. Unlike `Qt._QtUiTools.QUiLoader` itself this class does not create a new instance of the top-level widget, but creates the user interface in an existing instance of the top-level class if needed. This mimics the behaviour of `PyQt5.uic.loadUi`. """ def __init__(self, baseinstance): super(_UiLoader, self).__init__(baseinstance) self.baseinstance = baseinstance def load(self, uifile, *args, **kwargs): from xml.etree.ElementTree import ElementTree # For whatever reason, if this doesn't happen then # reading an invalid or non-existing .ui file throws # a RuntimeError. etree = ElementTree() etree.parse(uifile) widget = Qt._QtUiTools.QUiLoader.load( self, uifile, *args, **kwargs) # Workaround for PySide 1.0.9, see issue #208 widget.parentWidget() return widget def createWidget(self, class_name, parent=None, name=""): """Called for each widget defined in ui file Overridden here to populate `baseinstance` instead. """ if parent is None and self.baseinstance: # Supposed to create the top-level widget, # return the base instance instead return self.baseinstance # For some reason, Line is not in the list of available # widgets, but works fine, so we have to special case it here. if class_name in self.availableWidgets() + ["Line"]: # Create a new widget for child widgets widget = Qt._QtUiTools.QUiLoader.createWidget(self, class_name, parent, name) else: raise Exception("Custom widget '%s' not supported" % class_name) if self.baseinstance: # Set an attribute for the new child widget on the base # instance, just like PyQt5.uic.loadUi does. setattr(self.baseinstance, name, widget) return widget widget = _UiLoader(baseinstance).load(uifile) Qt.QtCore.QMetaObject.connectSlotsByName(widget) return widget else: raise NotImplementedError("No implementation available for loadUi")
[ "def", "_loadUi", "(", "uifile", ",", "baseinstance", "=", "None", ")", ":", "if", "hasattr", "(", "Qt", ",", "\"_uic\"", ")", ":", "return", "Qt", ".", "_uic", ".", "loadUi", "(", "uifile", ",", "baseinstance", ")", "elif", "hasattr", "(", "Qt", ",", "\"_QtUiTools\"", ")", ":", "# Implement `PyQt5.uic.loadUi` for PySide(2)", "class", "_UiLoader", "(", "Qt", ".", "_QtUiTools", ".", "QUiLoader", ")", ":", "\"\"\"Create the user interface in a base instance.\n\n Unlike `Qt._QtUiTools.QUiLoader` itself this class does not\n create a new instance of the top-level widget, but creates the user\n interface in an existing instance of the top-level class if needed.\n\n This mimics the behaviour of `PyQt5.uic.loadUi`.\n\n \"\"\"", "def", "__init__", "(", "self", ",", "baseinstance", ")", ":", "super", "(", "_UiLoader", ",", "self", ")", ".", "__init__", "(", "baseinstance", ")", "self", ".", "baseinstance", "=", "baseinstance", "def", "load", "(", "self", ",", "uifile", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "xml", ".", "etree", ".", "ElementTree", "import", "ElementTree", "# For whatever reason, if this doesn't happen then", "# reading an invalid or non-existing .ui file throws", "# a RuntimeError.", "etree", "=", "ElementTree", "(", ")", "etree", ".", "parse", "(", "uifile", ")", "widget", "=", "Qt", ".", "_QtUiTools", ".", "QUiLoader", ".", "load", "(", "self", ",", "uifile", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# Workaround for PySide 1.0.9, see issue #208", "widget", ".", "parentWidget", "(", ")", "return", "widget", "def", "createWidget", "(", "self", ",", "class_name", ",", "parent", "=", "None", ",", "name", "=", "\"\"", ")", ":", "\"\"\"Called for each widget defined in ui file\n\n Overridden here to populate `baseinstance` instead.\n\n \"\"\"", "if", "parent", "is", "None", "and", "self", ".", "baseinstance", ":", "# Supposed to create the top-level widget,", "# return the base instance instead", "return", "self", ".", "baseinstance", "# For some reason, Line is not in the list of available", "# widgets, but works fine, so we have to special case it here.", "if", "class_name", "in", "self", ".", "availableWidgets", "(", ")", "+", "[", "\"Line\"", "]", ":", "# Create a new widget for child widgets", "widget", "=", "Qt", ".", "_QtUiTools", ".", "QUiLoader", ".", "createWidget", "(", "self", ",", "class_name", ",", "parent", ",", "name", ")", "else", ":", "raise", "Exception", "(", "\"Custom widget '%s' not supported\"", "%", "class_name", ")", "if", "self", ".", "baseinstance", ":", "# Set an attribute for the new child widget on the base", "# instance, just like PyQt5.uic.loadUi does.", "setattr", "(", "self", ".", "baseinstance", ",", "name", ",", "widget", ")", "return", "widget", "widget", "=", "_UiLoader", "(", "baseinstance", ")", ".", "load", "(", "uifile", ")", "Qt", ".", "QtCore", ".", "QMetaObject", ".", "connectSlotsByName", "(", "widget", ")", "return", "widget", "else", ":", "raise", "NotImplementedError", "(", "\"No implementation available for loadUi\"", ")" ]
Dynamically load a user interface from the given `uifile` This function calls `uic.loadUi` if using PyQt bindings, else it implements a comparable binding for PySide. Documentation: http://pyqt.sourceforge.net/Docs/PyQt5/designer.html#PyQt5.uic.loadUi Arguments: uifile (str): Absolute path to Qt Designer file. baseinstance (QWidget): Instantiated QWidget or subclass thereof Return: baseinstance if `baseinstance` is not `None`. Otherwise return the newly created instance of the user interface.
[ "Dynamically", "load", "a", "user", "interface", "from", "the", "given", "uifile" ]
python
train
36.802083
gwpy/gwpy
gwpy/io/hdf5.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/hdf5.py#L104-L129
def with_write_hdf5(func): """Decorate an HDF5-writing function to open a filepath if needed ``func`` should be written to take the object to be written as the first argument, and then presume an `h5py.Group` as the second. This method uses keywords ``append`` and ``overwrite`` as follows if the output file already exists: - ``append=False, overwrite=False``: raise `~exceptions.IOError` - ``append=True``: open in mode ``a`` - ``append=False, overwrite=True``: open in mode ``w`` """ @wraps(func) def decorated_func(obj, fobj, *args, **kwargs): # pylint: disable=missing-docstring if not isinstance(fobj, h5py.HLObject): append = kwargs.get('append', False) overwrite = kwargs.get('overwrite', False) if os.path.exists(fobj) and not (overwrite or append): raise IOError("File exists: %s" % fobj) with h5py.File(fobj, 'a' if append else 'w') as h5f: return func(obj, h5f, *args, **kwargs) return func(obj, fobj, *args, **kwargs) return decorated_func
[ "def", "with_write_hdf5", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "decorated_func", "(", "obj", ",", "fobj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=missing-docstring", "if", "not", "isinstance", "(", "fobj", ",", "h5py", ".", "HLObject", ")", ":", "append", "=", "kwargs", ".", "get", "(", "'append'", ",", "False", ")", "overwrite", "=", "kwargs", ".", "get", "(", "'overwrite'", ",", "False", ")", "if", "os", ".", "path", ".", "exists", "(", "fobj", ")", "and", "not", "(", "overwrite", "or", "append", ")", ":", "raise", "IOError", "(", "\"File exists: %s\"", "%", "fobj", ")", "with", "h5py", ".", "File", "(", "fobj", ",", "'a'", "if", "append", "else", "'w'", ")", "as", "h5f", ":", "return", "func", "(", "obj", ",", "h5f", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "func", "(", "obj", ",", "fobj", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "decorated_func" ]
Decorate an HDF5-writing function to open a filepath if needed ``func`` should be written to take the object to be written as the first argument, and then presume an `h5py.Group` as the second. This method uses keywords ``append`` and ``overwrite`` as follows if the output file already exists: - ``append=False, overwrite=False``: raise `~exceptions.IOError` - ``append=True``: open in mode ``a`` - ``append=False, overwrite=True``: open in mode ``w``
[ "Decorate", "an", "HDF5", "-", "writing", "function", "to", "open", "a", "filepath", "if", "needed" ]
python
train
41.653846
Qiskit/qiskit-terra
qiskit/quantum_info/operators/channel/kraus.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/channel/kraus.py#L166-L177
def is_cptp(self, atol=None, rtol=None): """Return True if completely-positive trace-preserving.""" if self._data[1] is not None: return False if atol is None: atol = self._atol if rtol is None: rtol = self._rtol accum = 0j for op in self._data[0]: accum += np.dot(np.transpose(np.conj(op)), op) return is_identity_matrix(accum, rtol=rtol, atol=atol)
[ "def", "is_cptp", "(", "self", ",", "atol", "=", "None", ",", "rtol", "=", "None", ")", ":", "if", "self", ".", "_data", "[", "1", "]", "is", "not", "None", ":", "return", "False", "if", "atol", "is", "None", ":", "atol", "=", "self", ".", "_atol", "if", "rtol", "is", "None", ":", "rtol", "=", "self", ".", "_rtol", "accum", "=", "0j", "for", "op", "in", "self", ".", "_data", "[", "0", "]", ":", "accum", "+=", "np", ".", "dot", "(", "np", ".", "transpose", "(", "np", ".", "conj", "(", "op", ")", ")", ",", "op", ")", "return", "is_identity_matrix", "(", "accum", ",", "rtol", "=", "rtol", ",", "atol", "=", "atol", ")" ]
Return True if completely-positive trace-preserving.
[ "Return", "True", "if", "completely", "-", "positive", "trace", "-", "preserving", "." ]
python
test
36.916667
clusterpoint/python-client-api
pycps/converters.py
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/converters.py#L85-L138
def dict_to_etree(source, root_tag=None): """ Recursively load dict/list representation of an XML tree into an etree representation. Args: source -- A dictionary representing an XML document where identical children tags are countained in a list. Keyword args: root_tag -- A parent tag in which to wrap the xml tree. If None, and the source dict contains multiple root items, a list of etree's Elements will be returned. Returns: An ET.Element which is the root of an XML tree or a list of these. >>> dict_to_etree({'foo': 'lorem'}) #doctest: +ELLIPSIS <Element foo at 0x...> >>> dict_to_etree({'foo': 'lorem', 'bar': 'ipsum'}) #doctest: +ELLIPSIS [<Element foo at 0x...>, <Element bar at 0x...>] >>> ET.tostring(dict_to_etree({'document': {'item1': 'foo', 'item2': 'bar'}})) '<document><item2>bar</item2><item1>foo</item1></document>' >>> ET.tostring(dict_to_etree({'foo': 'baz'}, root_tag='document')) '<document><foo>baz</foo></document>' >>> ET.tostring(dict_to_etree({'title': 'foo', 'list': [{'li':1}, {'li':2}]}, root_tag='document')) '<document><list><li>1</li><li>2</li></list><title>foo</title></document>' """ def dict_to_etree_recursive(source, parent): if hasattr(source, 'keys'): for key, value in source.iteritems(): sub = ET.SubElement(parent, key) dict_to_etree_recursive(value, sub) elif isinstance(source, list): for element in source: dict_to_etree_recursive(element, parent) else: # TODO: Add feature to include xml literals as special objects or a etree subtree parent.text = source if root_tag is None: if len(source) == 1: root_tag = source.keys()[0] source = source[root_tag] else: roots = [] for tag, content in source.iteritems(): root = ET.Element(tag) dict_to_etree_recursive(content, root) roots.append(root) return roots root = ET.Element(root_tag) dict_to_etree_recursive(source, root) return root
[ "def", "dict_to_etree", "(", "source", ",", "root_tag", "=", "None", ")", ":", "def", "dict_to_etree_recursive", "(", "source", ",", "parent", ")", ":", "if", "hasattr", "(", "source", ",", "'keys'", ")", ":", "for", "key", ",", "value", "in", "source", ".", "iteritems", "(", ")", ":", "sub", "=", "ET", ".", "SubElement", "(", "parent", ",", "key", ")", "dict_to_etree_recursive", "(", "value", ",", "sub", ")", "elif", "isinstance", "(", "source", ",", "list", ")", ":", "for", "element", "in", "source", ":", "dict_to_etree_recursive", "(", "element", ",", "parent", ")", "else", ":", "# TODO: Add feature to include xml literals as special objects or a etree subtree", "parent", ".", "text", "=", "source", "if", "root_tag", "is", "None", ":", "if", "len", "(", "source", ")", "==", "1", ":", "root_tag", "=", "source", ".", "keys", "(", ")", "[", "0", "]", "source", "=", "source", "[", "root_tag", "]", "else", ":", "roots", "=", "[", "]", "for", "tag", ",", "content", "in", "source", ".", "iteritems", "(", ")", ":", "root", "=", "ET", ".", "Element", "(", "tag", ")", "dict_to_etree_recursive", "(", "content", ",", "root", ")", "roots", ".", "append", "(", "root", ")", "return", "roots", "root", "=", "ET", ".", "Element", "(", "root_tag", ")", "dict_to_etree_recursive", "(", "source", ",", "root", ")", "return", "root" ]
Recursively load dict/list representation of an XML tree into an etree representation. Args: source -- A dictionary representing an XML document where identical children tags are countained in a list. Keyword args: root_tag -- A parent tag in which to wrap the xml tree. If None, and the source dict contains multiple root items, a list of etree's Elements will be returned. Returns: An ET.Element which is the root of an XML tree or a list of these. >>> dict_to_etree({'foo': 'lorem'}) #doctest: +ELLIPSIS <Element foo at 0x...> >>> dict_to_etree({'foo': 'lorem', 'bar': 'ipsum'}) #doctest: +ELLIPSIS [<Element foo at 0x...>, <Element bar at 0x...>] >>> ET.tostring(dict_to_etree({'document': {'item1': 'foo', 'item2': 'bar'}})) '<document><item2>bar</item2><item1>foo</item1></document>' >>> ET.tostring(dict_to_etree({'foo': 'baz'}, root_tag='document')) '<document><foo>baz</foo></document>' >>> ET.tostring(dict_to_etree({'title': 'foo', 'list': [{'li':1}, {'li':2}]}, root_tag='document')) '<document><list><li>1</li><li>2</li></list><title>foo</title></document>'
[ "Recursively", "load", "dict", "/", "list", "representation", "of", "an", "XML", "tree", "into", "an", "etree", "representation", "." ]
python
train
40.5
refenv/cijoe
deprecated/modules/cij/nvme.py
https://github.com/refenv/cijoe/blob/21d7b2ed4ff68e0a1457e7df2db27f6334f1a379/deprecated/modules/cij/nvme.py#L30-L37
def cat_file(path): """Cat file and return content""" cmd = ["cat", path] status, stdout, _ = cij.ssh.command(cmd, shell=True, echo=True) if status: raise RuntimeError("cij.nvme.env: cat %s failed" % path) return stdout.strip()
[ "def", "cat_file", "(", "path", ")", ":", "cmd", "=", "[", "\"cat\"", ",", "path", "]", "status", ",", "stdout", ",", "_", "=", "cij", ".", "ssh", ".", "command", "(", "cmd", ",", "shell", "=", "True", ",", "echo", "=", "True", ")", "if", "status", ":", "raise", "RuntimeError", "(", "\"cij.nvme.env: cat %s failed\"", "%", "path", ")", "return", "stdout", ".", "strip", "(", ")" ]
Cat file and return content
[ "Cat", "file", "and", "return", "content" ]
python
valid
31.125
googleapis/google-cloud-python
api_core/google/api_core/page_iterator.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/page_iterator.py#L385-L397
def _get_query_params(self): """Getter for query parameters for the next request. Returns: dict: A dictionary of query parameters. """ result = {} if self.next_page_token is not None: result[self._PAGE_TOKEN] = self.next_page_token if self.max_results is not None: result[self._MAX_RESULTS] = self.max_results - self.num_results result.update(self.extra_params) return result
[ "def", "_get_query_params", "(", "self", ")", ":", "result", "=", "{", "}", "if", "self", ".", "next_page_token", "is", "not", "None", ":", "result", "[", "self", ".", "_PAGE_TOKEN", "]", "=", "self", ".", "next_page_token", "if", "self", ".", "max_results", "is", "not", "None", ":", "result", "[", "self", ".", "_MAX_RESULTS", "]", "=", "self", ".", "max_results", "-", "self", ".", "num_results", "result", ".", "update", "(", "self", ".", "extra_params", ")", "return", "result" ]
Getter for query parameters for the next request. Returns: dict: A dictionary of query parameters.
[ "Getter", "for", "query", "parameters", "for", "the", "next", "request", "." ]
python
train
35.692308
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L234-L245
def _encode_observations(self, observations): """Encodes observations as PNG.""" return [ Observation( self._session.obj.run( self._encoded_image_t.obj, feed_dict={self._decoded_image_p.obj: observation} ), self._decode_png ) for observation in observations ]
[ "def", "_encode_observations", "(", "self", ",", "observations", ")", ":", "return", "[", "Observation", "(", "self", ".", "_session", ".", "obj", ".", "run", "(", "self", ".", "_encoded_image_t", ".", "obj", ",", "feed_dict", "=", "{", "self", ".", "_decoded_image_p", ".", "obj", ":", "observation", "}", ")", ",", "self", ".", "_decode_png", ")", "for", "observation", "in", "observations", "]" ]
Encodes observations as PNG.
[ "Encodes", "observations", "as", "PNG", "." ]
python
train
29.333333
blockstack/blockstack-core
blockstack/lib/nameset/namedb.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/namedb.py#L1270-L1302
def get_name_value_hash_txid( self, name, value_hash ): """ Given a name and a value hash (i.e. the zone file hash), return the txid for the value hash. Return None if the name doesn't exist, or is revoked, or did not receive a NAME_UPDATE since it was last preordered. """ rec = self.get_name( name ) if rec is None: return None if rec['revoked']: return None # find the txid of the given value hash if rec['value_hash'] == value_hash: return rec['txid'] else: # search backwards for it hist = rec['history'] flat_hist = namedb_flatten_history( hist ) for i in xrange(len(flat_hist)-1, 0, -1): delta = flat_hist[i] if delta['op'] == NAME_PREORDER: # this name was re-registered. skip return None if delta['value_hash'] == value_hash: # this is the txid that affected it return delta['txid'] # not found return None
[ "def", "get_name_value_hash_txid", "(", "self", ",", "name", ",", "value_hash", ")", ":", "rec", "=", "self", ".", "get_name", "(", "name", ")", "if", "rec", "is", "None", ":", "return", "None", "if", "rec", "[", "'revoked'", "]", ":", "return", "None", "# find the txid of the given value hash", "if", "rec", "[", "'value_hash'", "]", "==", "value_hash", ":", "return", "rec", "[", "'txid'", "]", "else", ":", "# search backwards for it ", "hist", "=", "rec", "[", "'history'", "]", "flat_hist", "=", "namedb_flatten_history", "(", "hist", ")", "for", "i", "in", "xrange", "(", "len", "(", "flat_hist", ")", "-", "1", ",", "0", ",", "-", "1", ")", ":", "delta", "=", "flat_hist", "[", "i", "]", "if", "delta", "[", "'op'", "]", "==", "NAME_PREORDER", ":", "# this name was re-registered. skip", "return", "None", "if", "delta", "[", "'value_hash'", "]", "==", "value_hash", ":", "# this is the txid that affected it ", "return", "delta", "[", "'txid'", "]", "# not found", "return", "None" ]
Given a name and a value hash (i.e. the zone file hash), return the txid for the value hash. Return None if the name doesn't exist, or is revoked, or did not receive a NAME_UPDATE since it was last preordered.
[ "Given", "a", "name", "and", "a", "value", "hash", "(", "i", ".", "e", ".", "the", "zone", "file", "hash", ")", "return", "the", "txid", "for", "the", "value", "hash", ".", "Return", "None", "if", "the", "name", "doesn", "t", "exist", "or", "is", "revoked", "or", "did", "not", "receive", "a", "NAME_UPDATE", "since", "it", "was", "last", "preordered", "." ]
python
train
34.060606