text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
async def start_polling(self, timeout=20, relax=0.1, limit=None, reset_webhook=None, fast: typing.Optional[bool] = True, error_sleep: int = 5): """ Start long-polling :param timeout: :param relax: :param limit: :param reset_webhook: :param fast: :return: """ if self._polling: raise RuntimeError('Polling already started') log.info('Start polling.') # context.set_value(MODE, LONG_POLLING) Dispatcher.set_current(self) Bot.set_current(self.bot) if reset_webhook is None: await self.reset_webhook(check=False) if reset_webhook: await self.reset_webhook(check=True) self._polling = True offset = None try: current_request_timeout = self.bot.timeout if current_request_timeout is not sentinel and timeout is not None: request_timeout = aiohttp.ClientTimeout(total=current_request_timeout.total + timeout or 1) else: request_timeout = None while self._polling: try: with self.bot.request_timeout(request_timeout): updates = await self.bot.get_updates(limit=limit, offset=offset, timeout=timeout) except: log.exception('Cause exception while getting updates.') await asyncio.sleep(error_sleep) continue if updates: log.debug(f"Received {len(updates)} updates.") offset = updates[-1].update_id + 1 self.loop.create_task(self._process_polling_updates(updates, fast)) if relax: await asyncio.sleep(relax) finally: self._close_waiter._set_result(None) log.warning('Polling is stopped.')
[ "async", "def", "start_polling", "(", "self", ",", "timeout", "=", "20", ",", "relax", "=", "0.1", ",", "limit", "=", "None", ",", "reset_webhook", "=", "None", ",", "fast", ":", "typing", ".", "Optional", "[", "bool", "]", "=", "True", ",", "error_sleep", ":", "int", "=", "5", ")", ":", "if", "self", ".", "_polling", ":", "raise", "RuntimeError", "(", "'Polling already started'", ")", "log", ".", "info", "(", "'Start polling.'", ")", "# context.set_value(MODE, LONG_POLLING)", "Dispatcher", ".", "set_current", "(", "self", ")", "Bot", ".", "set_current", "(", "self", ".", "bot", ")", "if", "reset_webhook", "is", "None", ":", "await", "self", ".", "reset_webhook", "(", "check", "=", "False", ")", "if", "reset_webhook", ":", "await", "self", ".", "reset_webhook", "(", "check", "=", "True", ")", "self", ".", "_polling", "=", "True", "offset", "=", "None", "try", ":", "current_request_timeout", "=", "self", ".", "bot", ".", "timeout", "if", "current_request_timeout", "is", "not", "sentinel", "and", "timeout", "is", "not", "None", ":", "request_timeout", "=", "aiohttp", ".", "ClientTimeout", "(", "total", "=", "current_request_timeout", ".", "total", "+", "timeout", "or", "1", ")", "else", ":", "request_timeout", "=", "None", "while", "self", ".", "_polling", ":", "try", ":", "with", "self", ".", "bot", ".", "request_timeout", "(", "request_timeout", ")", ":", "updates", "=", "await", "self", ".", "bot", ".", "get_updates", "(", "limit", "=", "limit", ",", "offset", "=", "offset", ",", "timeout", "=", "timeout", ")", "except", ":", "log", ".", "exception", "(", "'Cause exception while getting updates.'", ")", "await", "asyncio", ".", "sleep", "(", "error_sleep", ")", "continue", "if", "updates", ":", "log", ".", "debug", "(", "f\"Received {len(updates)} updates.\"", ")", "offset", "=", "updates", "[", "-", "1", "]", ".", "update_id", "+", "1", "self", ".", "loop", ".", "create_task", "(", "self", ".", "_process_polling_updates", "(", "updates", ",", "fast", ")", ")", "if", "relax", ":", "await", "asyncio", ".", "sleep", "(", "relax", ")", "finally", ":", "self", ".", "_close_waiter", ".", "_set_result", "(", "None", ")", "log", ".", "warning", "(", "'Polling is stopped.'", ")" ]
33.491803
20.213115
def _qname(self) -> Optional[QualName]: """Parse XML QName.""" if self.test_string("*"): self.skip_ws() return False ident = self.yang_identifier() ws = self.skip_ws() try: next = self.peek() except EndOfInput: return ident, None if next == "(": return self._node_type(ident) if not ws and self.test_string(":"): res = ( self.yang_identifier(), self.sctx.schema_data.prefix2ns(ident, self.sctx.text_mid)) else: res = (ident, None) self.skip_ws() return res
[ "def", "_qname", "(", "self", ")", "->", "Optional", "[", "QualName", "]", ":", "if", "self", ".", "test_string", "(", "\"*\"", ")", ":", "self", ".", "skip_ws", "(", ")", "return", "False", "ident", "=", "self", ".", "yang_identifier", "(", ")", "ws", "=", "self", ".", "skip_ws", "(", ")", "try", ":", "next", "=", "self", ".", "peek", "(", ")", "except", "EndOfInput", ":", "return", "ident", ",", "None", "if", "next", "==", "\"(\"", ":", "return", "self", ".", "_node_type", "(", "ident", ")", "if", "not", "ws", "and", "self", ".", "test_string", "(", "\":\"", ")", ":", "res", "=", "(", "self", ".", "yang_identifier", "(", ")", ",", "self", ".", "sctx", ".", "schema_data", ".", "prefix2ns", "(", "ident", ",", "self", ".", "sctx", ".", "text_mid", ")", ")", "else", ":", "res", "=", "(", "ident", ",", "None", ")", "self", ".", "skip_ws", "(", ")", "return", "res" ]
30.47619
12.857143
def attach_virtual_server(self, ticket_id=None, virtual_id=None): """Attach a virtual server to a ticket. :param integer ticket_id: the id of the ticket to attach to :param integer virtual_id: the id of the virtual server to attach :returns: dict -- The new ticket attachment """ return self.ticket.addAttachedVirtualGuest(virtual_id, id=ticket_id)
[ "def", "attach_virtual_server", "(", "self", ",", "ticket_id", "=", "None", ",", "virtual_id", "=", "None", ")", ":", "return", "self", ".", "ticket", ".", "addAttachedVirtualGuest", "(", "virtual_id", ",", "id", "=", "ticket_id", ")" ]
43.333333
23.555556
def make_line_segments(x, y, ispath=True): """ Return an (n x 2 x 2) array of n line segments Parameters ---------- x : array-like x points y : array-like y points ispath : bool If True, the points represent a path from one point to the next until the last. If False, then each pair of successive(even-odd pair) points yields a line. """ if ispath: x = interleave(x[:-1], x[1:]) y = interleave(y[:-1], y[1:]) elif len(x) % 2: raise PlotnineError("Expects an even number of points") n = len(x) // 2 segments = np.reshape(list(zip(x, y)), [n, 2, 2]) return segments
[ "def", "make_line_segments", "(", "x", ",", "y", ",", "ispath", "=", "True", ")", ":", "if", "ispath", ":", "x", "=", "interleave", "(", "x", "[", ":", "-", "1", "]", ",", "x", "[", "1", ":", "]", ")", "y", "=", "interleave", "(", "y", "[", ":", "-", "1", "]", ",", "y", "[", "1", ":", "]", ")", "elif", "len", "(", "x", ")", "%", "2", ":", "raise", "PlotnineError", "(", "\"Expects an even number of points\"", ")", "n", "=", "len", "(", "x", ")", "//", "2", "segments", "=", "np", ".", "reshape", "(", "list", "(", "zip", "(", "x", ",", "y", ")", ")", ",", "[", "n", ",", "2", ",", "2", "]", ")", "return", "segments" ]
27.416667
18.583333
def run_netsh_command(netsh_args): """Execute a netsh command and return the output.""" devnull = open(os.devnull, 'w') command_raw = 'netsh interface ipv4 ' + netsh_args return int(subprocess.call(command_raw, stdout=devnull))
[ "def", "run_netsh_command", "(", "netsh_args", ")", ":", "devnull", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "command_raw", "=", "'netsh interface ipv4 '", "+", "netsh_args", "return", "int", "(", "subprocess", ".", "call", "(", "command_raw", ",", "stdout", "=", "devnull", ")", ")" ]
47.8
9
def find_if_multibase(column, quality_cutoff, base_cutoff, base_fraction_cutoff): """ Finds if a position in a pileup has more than one base present. :param column: A pileupColumn generated by pysam :param quality_cutoff: Desired minimum phred quality for a base in order to be counted towards a multi-allelic column :param base_cutoff: Minimum number of bases needed to support presence of a base. :param base_fraction_cutoff: Minimum fraction of bases needed to support presence of a base. If specified, noth the base_cutoff and base_fraction_cutoff will have to be met :return: If position has more than one base, a dictionary with counts for the bases. Otherwise, returns empty dictionary """ # Sometimes the qualities come out to ridiculously high (>70) values. Looks to be because sometimes reads # are overlapping and the qualities get summed for overlapping bases. Issue opened on pysam. unfiltered_base_qualities = dict() for read in column.pileups: if read.query_position is not None: # Not entirely sure why this is sometimes None, but it causes bad stuff reference_sequence = read.alignment.get_reference_sequence() previous_position = read.query_position - 1 if read.query_position > 1 else 0 next_position = read.query_position + 1 # This causes index errors. Fix at some point soon. # Another stringency check - to make sure that we're actually looking at a point mutation, check that the # base before and after the one we're looking at match the reference. With Nanopore data, lots of indels and # the like cause false positives, so this filters those out. try: # Need to actually handle this at some point. For now, be lazy previous_reference_base = reference_sequence[previous_position] next_reference_base = reference_sequence[next_position] previous_base = read.alignment.query_sequence[previous_position] next_base = read.alignment.query_sequence[next_position] base = read.alignment.query_sequence[read.query_position] quality = read.alignment.query_qualities[read.query_position] if previous_reference_base == previous_base and next_reference_base == next_base: if base not in unfiltered_base_qualities: unfiltered_base_qualities[base] = [quality] else: unfiltered_base_qualities[base].append(quality) except IndexError: pass # Now check that at least two bases for each of the bases present high quality. # first remove all low quality bases # Use dictionary comprehension to make a new dictionary where only scores above threshold are kept. # Internally list comprehension is used to filter the list filtered_base_qualities = {base:[score for score in scores if score >= quality_cutoff] for (base,scores) in unfiltered_base_qualities.items()} # Now remove bases that have no high quality scores # Use dictionary comprehension to make a new dictionary where bases that have a non-empty scores list are kept filtered_base_qualities = {base:scores for (base,scores) in filtered_base_qualities.items() if scores} # If we less than two bases with high quality scores, ignore things. if len(filtered_base_qualities) < 2: return dict() # Now that filtered_base_qualities only contains bases with more than one HQ base, make just a dict with base counts with dict comprehension high_quality_base_count = {base: len(scores) for (base, scores) in filtered_base_qualities.items()} if number_of_bases_above_threshold(high_quality_base_count, base_count_cutoff=base_cutoff, base_fraction_cutoff=base_fraction_cutoff) > 1: logging.debug('base qualities before filtering: {0}'.format(unfiltered_base_qualities)) logging.debug('base qualities after filtering: {0}'.format(filtered_base_qualities)) logging.debug('SNVs found at position {0}: {1}\n'.format(column.pos, high_quality_base_count)) return high_quality_base_count else: # logging.debug('No SNVs\n') return dict()
[ "def", "find_if_multibase", "(", "column", ",", "quality_cutoff", ",", "base_cutoff", ",", "base_fraction_cutoff", ")", ":", "# Sometimes the qualities come out to ridiculously high (>70) values. Looks to be because sometimes reads", "# are overlapping and the qualities get summed for overlapping bases. Issue opened on pysam.", "unfiltered_base_qualities", "=", "dict", "(", ")", "for", "read", "in", "column", ".", "pileups", ":", "if", "read", ".", "query_position", "is", "not", "None", ":", "# Not entirely sure why this is sometimes None, but it causes bad stuff", "reference_sequence", "=", "read", ".", "alignment", ".", "get_reference_sequence", "(", ")", "previous_position", "=", "read", ".", "query_position", "-", "1", "if", "read", ".", "query_position", ">", "1", "else", "0", "next_position", "=", "read", ".", "query_position", "+", "1", "# This causes index errors. Fix at some point soon.", "# Another stringency check - to make sure that we're actually looking at a point mutation, check that the", "# base before and after the one we're looking at match the reference. With Nanopore data, lots of indels and", "# the like cause false positives, so this filters those out.", "try", ":", "# Need to actually handle this at some point. For now, be lazy", "previous_reference_base", "=", "reference_sequence", "[", "previous_position", "]", "next_reference_base", "=", "reference_sequence", "[", "next_position", "]", "previous_base", "=", "read", ".", "alignment", ".", "query_sequence", "[", "previous_position", "]", "next_base", "=", "read", ".", "alignment", ".", "query_sequence", "[", "next_position", "]", "base", "=", "read", ".", "alignment", ".", "query_sequence", "[", "read", ".", "query_position", "]", "quality", "=", "read", ".", "alignment", ".", "query_qualities", "[", "read", ".", "query_position", "]", "if", "previous_reference_base", "==", "previous_base", "and", "next_reference_base", "==", "next_base", ":", "if", "base", "not", "in", "unfiltered_base_qualities", ":", "unfiltered_base_qualities", "[", "base", "]", "=", "[", "quality", "]", "else", ":", "unfiltered_base_qualities", "[", "base", "]", ".", "append", "(", "quality", ")", "except", "IndexError", ":", "pass", "# Now check that at least two bases for each of the bases present high quality.", "# first remove all low quality bases", "# Use dictionary comprehension to make a new dictionary where only scores above threshold are kept.", "# Internally list comprehension is used to filter the list", "filtered_base_qualities", "=", "{", "base", ":", "[", "score", "for", "score", "in", "scores", "if", "score", ">=", "quality_cutoff", "]", "for", "(", "base", ",", "scores", ")", "in", "unfiltered_base_qualities", ".", "items", "(", ")", "}", "# Now remove bases that have no high quality scores", "# Use dictionary comprehension to make a new dictionary where bases that have a non-empty scores list are kept", "filtered_base_qualities", "=", "{", "base", ":", "scores", "for", "(", "base", ",", "scores", ")", "in", "filtered_base_qualities", ".", "items", "(", ")", "if", "scores", "}", "# If we less than two bases with high quality scores, ignore things.", "if", "len", "(", "filtered_base_qualities", ")", "<", "2", ":", "return", "dict", "(", ")", "# Now that filtered_base_qualities only contains bases with more than one HQ base, make just a dict with base counts with dict comprehension", "high_quality_base_count", "=", "{", "base", ":", "len", "(", "scores", ")", "for", "(", "base", ",", "scores", ")", "in", "filtered_base_qualities", ".", "items", "(", ")", "}", "if", "number_of_bases_above_threshold", "(", "high_quality_base_count", ",", "base_count_cutoff", "=", "base_cutoff", ",", "base_fraction_cutoff", "=", "base_fraction_cutoff", ")", ">", "1", ":", "logging", ".", "debug", "(", "'base qualities before filtering: {0}'", ".", "format", "(", "unfiltered_base_qualities", ")", ")", "logging", ".", "debug", "(", "'base qualities after filtering: {0}'", ".", "format", "(", "filtered_base_qualities", ")", ")", "logging", ".", "debug", "(", "'SNVs found at position {0}: {1}\\n'", ".", "format", "(", "column", ".", "pos", ",", "high_quality_base_count", ")", ")", "return", "high_quality_base_count", "else", ":", "# logging.debug('No SNVs\\n')", "return", "dict", "(", ")" ]
68.096774
40.322581
def _grow_trees(self): """ Adds new trees to the forest according to the specified growth method. """ if self.grow_method == GROW_AUTO_INCREMENTAL: self.tree_kwargs['auto_grow'] = True while len(self.trees) < self.size: self.trees.append(Tree(data=self.data, **self.tree_kwargs))
[ "def", "_grow_trees", "(", "self", ")", ":", "if", "self", ".", "grow_method", "==", "GROW_AUTO_INCREMENTAL", ":", "self", ".", "tree_kwargs", "[", "'auto_grow'", "]", "=", "True", "while", "len", "(", "self", ".", "trees", ")", "<", "self", ".", "size", ":", "self", ".", "trees", ".", "append", "(", "Tree", "(", "data", "=", "self", ".", "data", ",", "*", "*", "self", ".", "tree_kwargs", ")", ")" ]
38.222222
15.777778
def thumbnail(self, path, height, width, quality=100, **kwargs): """获取文件缩略图 :param path: 远程文件路径 :param height: 缩略图高 :param width: 缩略图宽 :param quality: 缩略图质量,默认100 :return: requests.Response .. note:: 如果返回 HTTP 404 说明该文件不存在缩略图形式 """ params = {'ec': 1, 'path': path, 'quality': quality, 'width': width, 'height': height} url = 'http://{0}/rest/2.0/pcs/thumbnail'.format(BAIDUPCS_SERVER) return self._request('thumbnail', 'generate', url=url, extra_params=params, **kwargs)
[ "def", "thumbnail", "(", "self", ",", "path", ",", "height", ",", "width", ",", "quality", "=", "100", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'ec'", ":", "1", ",", "'path'", ":", "path", ",", "'quality'", ":", "quality", ",", "'width'", ":", "width", ",", "'height'", ":", "height", "}", "url", "=", "'http://{0}/rest/2.0/pcs/thumbnail'", ".", "format", "(", "BAIDUPCS_SERVER", ")", "return", "self", ".", "_request", "(", "'thumbnail'", ",", "'generate'", ",", "url", "=", "url", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
30.190476
18.142857
def _intersection_with_si(self, si): """ Intersection with another :class:`StridedInterval`. :param si: The other operand :return: """ new_si_set = set() for si_ in self._si_set: r = si_.intersection(si) new_si_set.add(r) if len(new_si_set): ret = DiscreteStridedIntervalSet(bits=self.bits, si_set=new_si_set) if ret.should_collapse(): return ret.collapse() else: return ret else: # There is no intersection between two operands return StridedInterval.empty(self.bits)
[ "def", "_intersection_with_si", "(", "self", ",", "si", ")", ":", "new_si_set", "=", "set", "(", ")", "for", "si_", "in", "self", ".", "_si_set", ":", "r", "=", "si_", ".", "intersection", "(", "si", ")", "new_si_set", ".", "add", "(", "r", ")", "if", "len", "(", "new_si_set", ")", ":", "ret", "=", "DiscreteStridedIntervalSet", "(", "bits", "=", "self", ".", "bits", ",", "si_set", "=", "new_si_set", ")", "if", "ret", ".", "should_collapse", "(", ")", ":", "return", "ret", ".", "collapse", "(", ")", "else", ":", "return", "ret", "else", ":", "# There is no intersection between two operands", "return", "StridedInterval", ".", "empty", "(", "self", ".", "bits", ")" ]
27.636364
19.454545
def find_behind_subscriptions(self, request): """ Starts a celery task that looks through active subscriptions to find and subscriptions that are behind where they should be, and adds a BehindSubscription for them. """ task_id = find_behind_subscriptions.delay() return Response( {"accepted": True, "task_id": str(task_id)}, status=status.HTTP_202_ACCEPTED )
[ "def", "find_behind_subscriptions", "(", "self", ",", "request", ")", ":", "task_id", "=", "find_behind_subscriptions", ".", "delay", "(", ")", "return", "Response", "(", "{", "\"accepted\"", ":", "True", ",", "\"task_id\"", ":", "str", "(", "task_id", ")", "}", ",", "status", "=", "status", ".", "HTTP_202_ACCEPTED", ")" ]
38.636364
20.454545
def _restore_backup(self): """Restore the specified database.""" input_filename, input_file = self._get_backup_file(database=self.database_name, servername=self.servername) self.logger.info("Restoring backup for database '%s' and server '%s'", self.database_name, self.servername) self.logger.info("Restoring: %s" % input_filename) if self.decrypt: unencrypted_file, input_filename = utils.unencrypt_file(input_file, input_filename, self.passphrase) input_file.close() input_file = unencrypted_file if self.uncompress: uncompressed_file, input_filename = utils.uncompress_file(input_file, input_filename) input_file.close() input_file = uncompressed_file self.logger.info("Restore tempfile created: %s", utils.handle_size(input_file)) if self.interactive: self._ask_confirmation() input_file.seek(0) self.connector = get_connector(self.database_name) self.connector.restore_dump(input_file)
[ "def", "_restore_backup", "(", "self", ")", ":", "input_filename", ",", "input_file", "=", "self", ".", "_get_backup_file", "(", "database", "=", "self", ".", "database_name", ",", "servername", "=", "self", ".", "servername", ")", "self", ".", "logger", ".", "info", "(", "\"Restoring backup for database '%s' and server '%s'\"", ",", "self", ".", "database_name", ",", "self", ".", "servername", ")", "self", ".", "logger", ".", "info", "(", "\"Restoring: %s\"", "%", "input_filename", ")", "if", "self", ".", "decrypt", ":", "unencrypted_file", ",", "input_filename", "=", "utils", ".", "unencrypt_file", "(", "input_file", ",", "input_filename", ",", "self", ".", "passphrase", ")", "input_file", ".", "close", "(", ")", "input_file", "=", "unencrypted_file", "if", "self", ".", "uncompress", ":", "uncompressed_file", ",", "input_filename", "=", "utils", ".", "uncompress_file", "(", "input_file", ",", "input_filename", ")", "input_file", ".", "close", "(", ")", "input_file", "=", "uncompressed_file", "self", ".", "logger", ".", "info", "(", "\"Restore tempfile created: %s\"", ",", "utils", ".", "handle_size", "(", "input_file", ")", ")", "if", "self", ".", "interactive", ":", "self", ".", "_ask_confirmation", "(", ")", "input_file", ".", "seek", "(", "0", ")", "self", ".", "connector", "=", "get_connector", "(", "self", ".", "database_name", ")", "self", ".", "connector", ".", "restore_dump", "(", "input_file", ")" ]
47.72
24.56
def parseParams(string): """ Parse parameters """ all = params_re.findall(string) allParameters = [] for tup in all: paramList = [tup[0]] # tup looks like (name, valuesString) for pair in param_values_re.findall(tup[1]): # pair looks like ('', value) or (value, '') if pair[0] != '': paramList.append(pair[0]) else: paramList.append(pair[1]) allParameters.append(paramList) return allParameters
[ "def", "parseParams", "(", "string", ")", ":", "all", "=", "params_re", ".", "findall", "(", "string", ")", "allParameters", "=", "[", "]", "for", "tup", "in", "all", ":", "paramList", "=", "[", "tup", "[", "0", "]", "]", "# tup looks like (name, valuesString)", "for", "pair", "in", "param_values_re", ".", "findall", "(", "tup", "[", "1", "]", ")", ":", "# pair looks like ('', value) or (value, '')", "if", "pair", "[", "0", "]", "!=", "''", ":", "paramList", ".", "append", "(", "pair", "[", "0", "]", ")", "else", ":", "paramList", ".", "append", "(", "pair", "[", "1", "]", ")", "allParameters", ".", "append", "(", "paramList", ")", "return", "allParameters" ]
31.25
11.75
def pop_to(source, dest, key, name=None): """ A convenience function which pops a key k from source to dest. None values are not passed on. If k already exists in dest an error is raised. """ value = source.pop(key, None) if value is not None: safe_setitem(dest, key, value, name=name) return value
[ "def", "pop_to", "(", "source", ",", "dest", ",", "key", ",", "name", "=", "None", ")", ":", "value", "=", "source", ".", "pop", "(", "key", ",", "None", ")", "if", "value", "is", "not", "None", ":", "safe_setitem", "(", "dest", ",", "key", ",", "value", ",", "name", "=", "name", ")", "return", "value" ]
33
12.8
def toMBI(self, getMemoryDump = False): """ Returns a L{win32.MemoryBasicInformation} object using the data retrieved from the database. @type getMemoryDump: bool @param getMemoryDump: (Optional) If C{True} retrieve the memory dump. Defaults to C{False} since this may be a costly operation. @rtype: L{win32.MemoryBasicInformation} @return: Memory block information. """ mbi = win32.MemoryBasicInformation() mbi.BaseAddress = self.address mbi.RegionSize = self.size mbi.State = self._parse_state(self.state) mbi.Protect = self._parse_access(self.access) mbi.Type = self._parse_type(self.type) if self.alloc_base is not None: mbi.AllocationBase = self.alloc_base else: mbi.AllocationBase = mbi.BaseAddress if self.alloc_access is not None: mbi.AllocationProtect = self._parse_access(self.alloc_access) else: mbi.AllocationProtect = mbi.Protect if self.filename is not None: mbi.filename = self.filename if getMemoryDump and self.content is not None: mbi.content = self.content return mbi
[ "def", "toMBI", "(", "self", ",", "getMemoryDump", "=", "False", ")", ":", "mbi", "=", "win32", ".", "MemoryBasicInformation", "(", ")", "mbi", ".", "BaseAddress", "=", "self", ".", "address", "mbi", ".", "RegionSize", "=", "self", ".", "size", "mbi", ".", "State", "=", "self", ".", "_parse_state", "(", "self", ".", "state", ")", "mbi", ".", "Protect", "=", "self", ".", "_parse_access", "(", "self", ".", "access", ")", "mbi", ".", "Type", "=", "self", ".", "_parse_type", "(", "self", ".", "type", ")", "if", "self", ".", "alloc_base", "is", "not", "None", ":", "mbi", ".", "AllocationBase", "=", "self", ".", "alloc_base", "else", ":", "mbi", ".", "AllocationBase", "=", "mbi", ".", "BaseAddress", "if", "self", ".", "alloc_access", "is", "not", "None", ":", "mbi", ".", "AllocationProtect", "=", "self", ".", "_parse_access", "(", "self", ".", "alloc_access", ")", "else", ":", "mbi", ".", "AllocationProtect", "=", "mbi", ".", "Protect", "if", "self", ".", "filename", "is", "not", "None", ":", "mbi", ".", "filename", "=", "self", ".", "filename", "if", "getMemoryDump", "and", "self", ".", "content", "is", "not", "None", ":", "mbi", ".", "content", "=", "self", ".", "content", "return", "mbi" ]
39.709677
13.129032
def get_stdev(self, asset_type): """ Returns the standard deviation for a set of a certain asset type. :param asset_type: ``str`` of the asset type to calculate standard deviation for. :returns: A ``int`` or ``float`` of standard deviation, depending on the self.decimal_precision """ load_times = [] # Handle edge cases like TTFB if asset_type == 'ttfb': for page in self.pages: if page.time_to_first_byte is not None: load_times.append(page.time_to_first_byte) elif asset_type not in self.asset_types and asset_type != 'page': raise ValueError('asset_type must be one of:\nttfb\n{0}'.format( '\n'.join(self.asset_types))) else: load_times = self.get_load_times(asset_type) if not load_times or not sum(load_times): return 0 return round(stdev(load_times), self.decimal_precision)
[ "def", "get_stdev", "(", "self", ",", "asset_type", ")", ":", "load_times", "=", "[", "]", "# Handle edge cases like TTFB", "if", "asset_type", "==", "'ttfb'", ":", "for", "page", "in", "self", ".", "pages", ":", "if", "page", ".", "time_to_first_byte", "is", "not", "None", ":", "load_times", ".", "append", "(", "page", ".", "time_to_first_byte", ")", "elif", "asset_type", "not", "in", "self", ".", "asset_types", "and", "asset_type", "!=", "'page'", ":", "raise", "ValueError", "(", "'asset_type must be one of:\\nttfb\\n{0}'", ".", "format", "(", "'\\n'", ".", "join", "(", "self", ".", "asset_types", ")", ")", ")", "else", ":", "load_times", "=", "self", ".", "get_load_times", "(", "asset_type", ")", "if", "not", "load_times", "or", "not", "sum", "(", "load_times", ")", ":", "return", "0", "return", "round", "(", "stdev", "(", "load_times", ")", ",", "self", ".", "decimal_precision", ")" ]
39.84
17.6
def verify_x509_cert_chain(cert_chain, ca_pem_file=None, ca_path=None): """ Look at certs in the cert chain and add them to the store one by one. Return the cert at the end of the chain. That is the cert to be used by the caller for verifying. From https://www.w3.org/TR/xmldsig-core2/#sec-X509Data: "All certificates appearing in an X509Data element must relate to the validation key by either containing it or being part of a certification chain that terminates in a certificate containing the validation key. No ordering is implied by the above constraints" """ from OpenSSL import SSL context = SSL.Context(SSL.TLSv1_METHOD) if ca_pem_file is None and ca_path is None: import certifi ca_pem_file = certifi.where() context.load_verify_locations(ensure_bytes(ca_pem_file, none_ok=True), capath=ca_path) store = context.get_cert_store() certs = list(reversed(cert_chain)) end_of_chain, last_error = None, None while len(certs) > 0: for cert in certs: try: end_of_chain = _add_cert_to_store(store, cert) certs.remove(cert) break except RedundantCert: certs.remove(cert) if end_of_chain is None: end_of_chain = cert break except Exception as e: last_error = e else: raise last_error return end_of_chain
[ "def", "verify_x509_cert_chain", "(", "cert_chain", ",", "ca_pem_file", "=", "None", ",", "ca_path", "=", "None", ")", ":", "from", "OpenSSL", "import", "SSL", "context", "=", "SSL", ".", "Context", "(", "SSL", ".", "TLSv1_METHOD", ")", "if", "ca_pem_file", "is", "None", "and", "ca_path", "is", "None", ":", "import", "certifi", "ca_pem_file", "=", "certifi", ".", "where", "(", ")", "context", ".", "load_verify_locations", "(", "ensure_bytes", "(", "ca_pem_file", ",", "none_ok", "=", "True", ")", ",", "capath", "=", "ca_path", ")", "store", "=", "context", ".", "get_cert_store", "(", ")", "certs", "=", "list", "(", "reversed", "(", "cert_chain", ")", ")", "end_of_chain", ",", "last_error", "=", "None", ",", "None", "while", "len", "(", "certs", ")", ">", "0", ":", "for", "cert", "in", "certs", ":", "try", ":", "end_of_chain", "=", "_add_cert_to_store", "(", "store", ",", "cert", ")", "certs", ".", "remove", "(", "cert", ")", "break", "except", "RedundantCert", ":", "certs", ".", "remove", "(", "cert", ")", "if", "end_of_chain", "is", "None", ":", "end_of_chain", "=", "cert", "break", "except", "Exception", "as", "e", ":", "last_error", "=", "e", "else", ":", "raise", "last_error", "return", "end_of_chain" ]
42.588235
17.647059
def on_connected(self, headers, body): """ Once the connection is established, and 'heart-beat' is found in the headers, we calculate the real heartbeat numbers (based on what the server sent and what was specified by the client) - if the heartbeats are not 0, we start up the heartbeat loop accordingly. :param dict headers: headers in the connection message :param body: the message body """ if 'heart-beat' in headers: self.heartbeats = utils.calculate_heartbeats( headers['heart-beat'].replace(' ', '').split(','), self.heartbeats) if self.heartbeats != (0, 0): self.send_sleep = self.heartbeats[0] / 1000 # by default, receive gets an additional grace of 50% # set a different heart-beat-receive-scale when creating the connection to override that self.receive_sleep = (self.heartbeats[1] / 1000) * self.heart_beat_receive_scale log.debug("Setting receive_sleep to %s", self.receive_sleep) # Give grace of receiving the first heartbeat self.received_heartbeat = monotonic() + self.receive_sleep self.running = True if self.heartbeat_thread is None: self.heartbeat_thread = utils.default_create_thread( self.__heartbeat_loop) self.heartbeat_thread.name = "StompHeartbeat%s" % \ getattr(self.heartbeat_thread, "name", "Thread")
[ "def", "on_connected", "(", "self", ",", "headers", ",", "body", ")", ":", "if", "'heart-beat'", "in", "headers", ":", "self", ".", "heartbeats", "=", "utils", ".", "calculate_heartbeats", "(", "headers", "[", "'heart-beat'", "]", ".", "replace", "(", "' '", ",", "''", ")", ".", "split", "(", "','", ")", ",", "self", ".", "heartbeats", ")", "if", "self", ".", "heartbeats", "!=", "(", "0", ",", "0", ")", ":", "self", ".", "send_sleep", "=", "self", ".", "heartbeats", "[", "0", "]", "/", "1000", "# by default, receive gets an additional grace of 50%", "# set a different heart-beat-receive-scale when creating the connection to override that", "self", ".", "receive_sleep", "=", "(", "self", ".", "heartbeats", "[", "1", "]", "/", "1000", ")", "*", "self", ".", "heart_beat_receive_scale", "log", ".", "debug", "(", "\"Setting receive_sleep to %s\"", ",", "self", ".", "receive_sleep", ")", "# Give grace of receiving the first heartbeat", "self", ".", "received_heartbeat", "=", "monotonic", "(", ")", "+", "self", ".", "receive_sleep", "self", ".", "running", "=", "True", "if", "self", ".", "heartbeat_thread", "is", "None", ":", "self", ".", "heartbeat_thread", "=", "utils", ".", "default_create_thread", "(", "self", ".", "__heartbeat_loop", ")", "self", ".", "heartbeat_thread", ".", "name", "=", "\"StompHeartbeat%s\"", "%", "getattr", "(", "self", ".", "heartbeat_thread", ",", "\"name\"", ",", "\"Thread\"", ")" ]
53.172414
27.241379
def account_info(remote, resp): """Retrieve remote account information used to find local user. It returns a dictionary with the following structure: .. code-block:: python { 'user': { 'email': '...', 'profile': { 'username': '...', 'full_name': '...', } }, 'external_id': 'github-unique-identifier', 'external_method': 'github', } Information inside the user dictionary are available for other modules. For example, they are used from the module invenio-userprofiles to fill the user profile. :param remote: The remote application. :param resp: The response. :returns: A dictionary with the user information. """ gh = github3.login(token=resp['access_token']) me = gh.me() return dict( user=dict( email=_extract_email(gh), profile=dict( username=me.login, full_name=me.name, ), ), external_id=str(me.id), external_method='github' )
[ "def", "account_info", "(", "remote", ",", "resp", ")", ":", "gh", "=", "github3", ".", "login", "(", "token", "=", "resp", "[", "'access_token'", "]", ")", "me", "=", "gh", ".", "me", "(", ")", "return", "dict", "(", "user", "=", "dict", "(", "email", "=", "_extract_email", "(", "gh", ")", ",", "profile", "=", "dict", "(", "username", "=", "me", ".", "login", ",", "full_name", "=", "me", ".", "name", ",", ")", ",", ")", ",", "external_id", "=", "str", "(", "me", ".", "id", ")", ",", "external_method", "=", "'github'", ")" ]
27.65
18.5
def variables(self, value): """ Setter for **self.__variables** attribute. :param value: Attribute value. :type value: dict """ if value is not None: assert type(value) is dict, "'{0}' attribute: '{1}' type is not 'dict'!".format("variables", value) for key, element in value.iteritems(): assert type(key) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "variables", key) assert type(element) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "variables", element) self.__variables = value
[ "def", "variables", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "dict", ",", "\"'{0}' attribute: '{1}' type is not 'dict'!\"", ".", "format", "(", "\"variables\"", ",", "value", ")", "for", "key", ",", "element", "in", "value", ".", "iteritems", "(", ")", ":", "assert", "type", "(", "key", ")", "is", "unicode", ",", "\"'{0}' attribute: '{1}' type is not 'unicode'!\"", ".", "format", "(", "\"variables\"", ",", "key", ")", "assert", "type", "(", "element", ")", "is", "unicode", ",", "\"'{0}' attribute: '{1}' type is not 'unicode'!\"", ".", "format", "(", "\"variables\"", ",", "element", ")", "self", ".", "__variables", "=", "value" ]
41.625
21.75
def _make_request(self, url, **kwargs): """ Make a request to an OAuth2 endpoint """ response = requests.post(url, **kwargs) try: return response.json() except ValueError: pass return parse_qs(response.content)
[ "def", "_make_request", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "response", "=", "requests", ".", "post", "(", "url", ",", "*", "*", "kwargs", ")", "try", ":", "return", "response", ".", "json", "(", ")", "except", "ValueError", ":", "pass", "return", "parse_qs", "(", "response", ".", "content", ")" ]
28.1
8.5
def relative_path(path): """ Return the given path relative to this file. """ return os.path.join(os.path.dirname(__file__), path)
[ "def", "relative_path", "(", "path", ")", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "path", ")" ]
28.4
8
def do(self, command, files=None, use_long_polling=False, request_timeout=None, **query): """ Return the request params we would send to the api. """ url, params = self._prepare_request(command, query) return { "url": url, "params": params, "files": files, "stream": use_long_polling, "verify": True, # No self signed certificates. Telegram should be trustworthy anyway... "timeout": request_timeout }
[ "def", "do", "(", "self", ",", "command", ",", "files", "=", "None", ",", "use_long_polling", "=", "False", ",", "request_timeout", "=", "None", ",", "*", "*", "query", ")", ":", "url", ",", "params", "=", "self", ".", "_prepare_request", "(", "command", ",", "query", ")", "return", "{", "\"url\"", ":", "url", ",", "\"params\"", ":", "params", ",", "\"files\"", ":", "files", ",", "\"stream\"", ":", "use_long_polling", ",", "\"verify\"", ":", "True", ",", "# No self signed certificates. Telegram should be trustworthy anyway...", "\"timeout\"", ":", "request_timeout", "}" ]
47.7
24.9
def statement(self, days=60): """Download the :py:class:`ofxparse.Statement` given the time range :param days: Number of days to look back at :type days: integer :rtype: :py:class:`ofxparser.Statement` """ parsed = self.download_parsed(days=days) return parsed.account.statement
[ "def", "statement", "(", "self", ",", "days", "=", "60", ")", ":", "parsed", "=", "self", ".", "download_parsed", "(", "days", "=", "days", ")", "return", "parsed", ".", "account", ".", "statement" ]
36.333333
10.111111
def main(): """ The single entry point to glim command line interface.Main method is called from pypi console_scripts key or by glim.py on root.This function initializes a new app given the glim commands and app commands if app exists. Usage ----- $ python glim/cli.py start $ python glim.py start (on root folder) """ # register the global parser preparser = argparse.ArgumentParser(description=description, add_help=False) preparser.add_argument('--env', '-e', dest='env', default='development', help='choose application environment') # parse existing options namespace, extra = preparser.parse_known_args() env = namespace.env # register the subparsers parser = argparse.ArgumentParser(parents=[preparser], description=description, add_help=True) subparsers = parser.add_subparsers(title='commands', help='commands') # initialize a command adapter with subparsers commandadapter = CommandAdapter(subparsers) # register glim commands commandadapter.register(glim.commands) # register app commands appcommands = import_module('app.commands', pass_errors=True) commandadapter.register(appcommands) app = None if paths.app_exists() is False: # check if a new app is being created new = True if 'new' in extra else False if ('help' in extra) or ('--help' in extra) or ('-h' in extra): help = True else: help = False if help: parser.print_help() exit() else: app = make_app(env, commandadapter) args = parser.parse_args() command = commandadapter.match(args) commandadapter.dispatch(command, app)
[ "def", "main", "(", ")", ":", "# register the global parser", "preparser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "description", ",", "add_help", "=", "False", ")", "preparser", ".", "add_argument", "(", "'--env'", ",", "'-e'", ",", "dest", "=", "'env'", ",", "default", "=", "'development'", ",", "help", "=", "'choose application environment'", ")", "# parse existing options", "namespace", ",", "extra", "=", "preparser", ".", "parse_known_args", "(", ")", "env", "=", "namespace", ".", "env", "# register the subparsers", "parser", "=", "argparse", ".", "ArgumentParser", "(", "parents", "=", "[", "preparser", "]", ",", "description", "=", "description", ",", "add_help", "=", "True", ")", "subparsers", "=", "parser", ".", "add_subparsers", "(", "title", "=", "'commands'", ",", "help", "=", "'commands'", ")", "# initialize a command adapter with subparsers", "commandadapter", "=", "CommandAdapter", "(", "subparsers", ")", "# register glim commands", "commandadapter", ".", "register", "(", "glim", ".", "commands", ")", "# register app commands", "appcommands", "=", "import_module", "(", "'app.commands'", ",", "pass_errors", "=", "True", ")", "commandadapter", ".", "register", "(", "appcommands", ")", "app", "=", "None", "if", "paths", ".", "app_exists", "(", ")", "is", "False", ":", "# check if a new app is being created", "new", "=", "True", "if", "'new'", "in", "extra", "else", "False", "if", "(", "'help'", "in", "extra", ")", "or", "(", "'--help'", "in", "extra", ")", "or", "(", "'-h'", "in", "extra", ")", ":", "help", "=", "True", "else", ":", "help", "=", "False", "if", "help", ":", "parser", ".", "print_help", "(", ")", "exit", "(", ")", "else", ":", "app", "=", "make_app", "(", "env", ",", "commandadapter", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "command", "=", "commandadapter", ".", "match", "(", "args", ")", "commandadapter", ".", "dispatch", "(", "command", ",", "app", ")" ]
29.709677
21.354839
def service_checks(): """Validate all `service_checks.json` files.""" root = get_root() echo_info("Validating all service_checks.json files...") failed_checks = 0 ok_checks = 0 for check_name in sorted(os.listdir(root)): service_checks_file = os.path.join(root, check_name, 'service_checks.json') if file_exists(service_checks_file): file_failed = False display_queue = [] try: decoded = json.loads(read_file(service_checks_file).strip(), object_pairs_hook=OrderedDict) except JSONDecodeError as e: failed_checks += 1 echo_info("{}/service_checks.json... ".format(check_name), nl=False) echo_failure("FAILED") echo_failure(' invalid json: {}'.format(e)) continue unique_names = set() unique_checks = set() for service_check in decoded: # attributes are valid attrs = set(service_check) for attr in sorted(attrs - REQUIRED_ATTRIBUTES): file_failed = True display_queue.append((echo_failure, ' Attribute `{}` is invalid'.format(attr))) for attr in sorted(REQUIRED_ATTRIBUTES - attrs): file_failed = True display_queue.append((echo_failure, ' Attribute `{}` is required'.format(attr))) # agent_version agent_version = service_check.get('agent_version') version_parts = parse_version_parts(agent_version) if len(version_parts) != 3: file_failed = True if not agent_version: output = ' required non-null string: agent_version' else: output = ' invalid `agent_version`: {}'.format(agent_version) display_queue.append((echo_failure, output)) # check check = service_check.get('check') if not check or not isinstance(check, string_types): file_failed = True display_queue.append((echo_failure, ' required non-null string: check')) else: if check in unique_checks: file_failed = True display_queue.append((echo_failure, ' {} is not a unique check'.format(check))) else: unique_checks.add(check) # description description = service_check.get('description') if not description or not isinstance(description, string_types): file_failed = True display_queue.append((echo_failure, ' required non-null string: description')) # groups groups = service_check.get('groups') if groups is None or not isinstance(groups, list): file_failed = True display_queue.append((echo_failure, ' required list: groups')) # integration integration = service_check.get('integration') if integration is None or not isinstance(integration, string_types): file_failed = True display_queue.append((echo_failure, ' required non-null string: integration')) # name name = service_check.get('name') if not name or not isinstance(name, string_types): file_failed = True display_queue.append((echo_failure, ' required non-null string: name')) else: if name in unique_names: file_failed = True display_queue.append((echo_failure, ' {} is not a unique name'.format(name))) else: unique_names.add(name) # statuses statuses = service_check.get('statuses') if not statuses or not isinstance(statuses, list): file_failed = True display_queue.append((echo_failure, ' required non empty list: statuses')) if file_failed: failed_checks += 1 # Display detailed info if file invalid echo_info("{}/service_checks.json... ".format(check_name), nl=False) echo_failure("FAILED") for display_func, message in display_queue: display_func(message) else: ok_checks += 1 if ok_checks: echo_success("{} valid files".format(ok_checks)) if failed_checks: echo_failure("{} invalid files".format(failed_checks)) abort()
[ "def", "service_checks", "(", ")", ":", "root", "=", "get_root", "(", ")", "echo_info", "(", "\"Validating all service_checks.json files...\"", ")", "failed_checks", "=", "0", "ok_checks", "=", "0", "for", "check_name", "in", "sorted", "(", "os", ".", "listdir", "(", "root", ")", ")", ":", "service_checks_file", "=", "os", ".", "path", ".", "join", "(", "root", ",", "check_name", ",", "'service_checks.json'", ")", "if", "file_exists", "(", "service_checks_file", ")", ":", "file_failed", "=", "False", "display_queue", "=", "[", "]", "try", ":", "decoded", "=", "json", ".", "loads", "(", "read_file", "(", "service_checks_file", ")", ".", "strip", "(", ")", ",", "object_pairs_hook", "=", "OrderedDict", ")", "except", "JSONDecodeError", "as", "e", ":", "failed_checks", "+=", "1", "echo_info", "(", "\"{}/service_checks.json... \"", ".", "format", "(", "check_name", ")", ",", "nl", "=", "False", ")", "echo_failure", "(", "\"FAILED\"", ")", "echo_failure", "(", "' invalid json: {}'", ".", "format", "(", "e", ")", ")", "continue", "unique_names", "=", "set", "(", ")", "unique_checks", "=", "set", "(", ")", "for", "service_check", "in", "decoded", ":", "# attributes are valid", "attrs", "=", "set", "(", "service_check", ")", "for", "attr", "in", "sorted", "(", "attrs", "-", "REQUIRED_ATTRIBUTES", ")", ":", "file_failed", "=", "True", "display_queue", ".", "append", "(", "(", "echo_failure", ",", "' Attribute `{}` is invalid'", ".", "format", "(", "attr", ")", ")", ")", "for", "attr", "in", "sorted", "(", "REQUIRED_ATTRIBUTES", "-", "attrs", ")", ":", "file_failed", "=", "True", "display_queue", ".", "append", "(", "(", "echo_failure", ",", "' Attribute `{}` is required'", ".", "format", "(", "attr", ")", ")", ")", "# agent_version", "agent_version", "=", "service_check", ".", "get", "(", "'agent_version'", ")", "version_parts", "=", "parse_version_parts", "(", "agent_version", ")", "if", "len", "(", "version_parts", ")", "!=", "3", ":", "file_failed", "=", "True", "if", "not", "agent_version", ":", "output", "=", "' required non-null string: agent_version'", "else", ":", "output", "=", "' invalid `agent_version`: {}'", ".", "format", "(", "agent_version", ")", "display_queue", ".", "append", "(", "(", "echo_failure", ",", "output", ")", ")", "# check", "check", "=", "service_check", ".", "get", "(", "'check'", ")", "if", "not", "check", "or", "not", "isinstance", "(", "check", ",", "string_types", ")", ":", "file_failed", "=", "True", "display_queue", ".", "append", "(", "(", "echo_failure", ",", "' required non-null string: check'", ")", ")", "else", ":", "if", "check", "in", "unique_checks", ":", "file_failed", "=", "True", "display_queue", ".", "append", "(", "(", "echo_failure", ",", "' {} is not a unique check'", ".", "format", "(", "check", ")", ")", ")", "else", ":", "unique_checks", ".", "add", "(", "check", ")", "# description", "description", "=", "service_check", ".", "get", "(", "'description'", ")", "if", "not", "description", "or", "not", "isinstance", "(", "description", ",", "string_types", ")", ":", "file_failed", "=", "True", "display_queue", ".", "append", "(", "(", "echo_failure", ",", "' required non-null string: description'", ")", ")", "# groups", "groups", "=", "service_check", ".", "get", "(", "'groups'", ")", "if", "groups", "is", "None", "or", "not", "isinstance", "(", "groups", ",", "list", ")", ":", "file_failed", "=", "True", "display_queue", ".", "append", "(", "(", "echo_failure", ",", "' required list: groups'", ")", ")", "# integration", "integration", "=", "service_check", ".", "get", "(", "'integration'", ")", "if", "integration", "is", "None", "or", "not", "isinstance", "(", "integration", ",", "string_types", ")", ":", "file_failed", "=", "True", "display_queue", ".", "append", "(", "(", "echo_failure", ",", "' required non-null string: integration'", ")", ")", "# name", "name", "=", "service_check", ".", "get", "(", "'name'", ")", "if", "not", "name", "or", "not", "isinstance", "(", "name", ",", "string_types", ")", ":", "file_failed", "=", "True", "display_queue", ".", "append", "(", "(", "echo_failure", ",", "' required non-null string: name'", ")", ")", "else", ":", "if", "name", "in", "unique_names", ":", "file_failed", "=", "True", "display_queue", ".", "append", "(", "(", "echo_failure", ",", "' {} is not a unique name'", ".", "format", "(", "name", ")", ")", ")", "else", ":", "unique_names", ".", "add", "(", "name", ")", "# statuses", "statuses", "=", "service_check", ".", "get", "(", "'statuses'", ")", "if", "not", "statuses", "or", "not", "isinstance", "(", "statuses", ",", "list", ")", ":", "file_failed", "=", "True", "display_queue", ".", "append", "(", "(", "echo_failure", ",", "' required non empty list: statuses'", ")", ")", "if", "file_failed", ":", "failed_checks", "+=", "1", "# Display detailed info if file invalid", "echo_info", "(", "\"{}/service_checks.json... \"", ".", "format", "(", "check_name", ")", ",", "nl", "=", "False", ")", "echo_failure", "(", "\"FAILED\"", ")", "for", "display_func", ",", "message", "in", "display_queue", ":", "display_func", "(", "message", ")", "else", ":", "ok_checks", "+=", "1", "if", "ok_checks", ":", "echo_success", "(", "\"{} valid files\"", ".", "format", "(", "ok_checks", ")", ")", "if", "failed_checks", ":", "echo_failure", "(", "\"{} invalid files\"", ".", "format", "(", "failed_checks", ")", ")", "abort", "(", ")" ]
43.454545
22.3
def Parse(self, stat, file_object, knowledge_base): """Parse the History file.""" _ = knowledge_base # TODO(user): Convert this to use the far more intelligent plaso parser. chrome = ChromeParser(file_object) for timestamp, entry_type, url, data1, _, _ in chrome.Parse(): if entry_type == "CHROME_DOWNLOAD": yield rdf_webhistory.BrowserHistoryItem( url=url, domain=urlparse.urlparse(url).netloc, access_time=timestamp, program_name="Chrome", source_path=file_object.Path(), download_path=data1) elif entry_type == "CHROME_VISIT": yield rdf_webhistory.BrowserHistoryItem( url=url, domain=urlparse.urlparse(url).netloc, access_time=timestamp, program_name="Chrome", source_path=file_object.Path(), title=data1)
[ "def", "Parse", "(", "self", ",", "stat", ",", "file_object", ",", "knowledge_base", ")", ":", "_", "=", "knowledge_base", "# TODO(user): Convert this to use the far more intelligent plaso parser.", "chrome", "=", "ChromeParser", "(", "file_object", ")", "for", "timestamp", ",", "entry_type", ",", "url", ",", "data1", ",", "_", ",", "_", "in", "chrome", ".", "Parse", "(", ")", ":", "if", "entry_type", "==", "\"CHROME_DOWNLOAD\"", ":", "yield", "rdf_webhistory", ".", "BrowserHistoryItem", "(", "url", "=", "url", ",", "domain", "=", "urlparse", ".", "urlparse", "(", "url", ")", ".", "netloc", ",", "access_time", "=", "timestamp", ",", "program_name", "=", "\"Chrome\"", ",", "source_path", "=", "file_object", ".", "Path", "(", ")", ",", "download_path", "=", "data1", ")", "elif", "entry_type", "==", "\"CHROME_VISIT\"", ":", "yield", "rdf_webhistory", ".", "BrowserHistoryItem", "(", "url", "=", "url", ",", "domain", "=", "urlparse", ".", "urlparse", "(", "url", ")", ".", "netloc", ",", "access_time", "=", "timestamp", ",", "program_name", "=", "\"Chrome\"", ",", "source_path", "=", "file_object", ".", "Path", "(", ")", ",", "title", "=", "data1", ")" ]
39.954545
10.090909
def format_image(path, options): '''Formats an image. Args: path (str): Path to the image file. options (dict): Options to apply to the image. Returns: (list) A list of PIL images. The list will always be of length 1 unless resolutions for resizing are provided in the options. ''' image = Image.open(path) image_pipeline_results = __pipeline_image(image, options) return image_pipeline_results
[ "def", "format_image", "(", "path", ",", "options", ")", ":", "image", "=", "Image", ".", "open", "(", "path", ")", "image_pipeline_results", "=", "__pipeline_image", "(", "image", ",", "options", ")", "return", "image_pipeline_results" ]
31.714286
22.428571
def list_milestones(self, **queryparams): """ Get the list of :class:`Milestone` resources for the project. """ return Milestones(self.requester).list(project=self.id, **queryparams)
[ "def", "list_milestones", "(", "self", ",", "*", "*", "queryparams", ")", ":", "return", "Milestones", "(", "self", ".", "requester", ")", ".", "list", "(", "project", "=", "self", ".", "id", ",", "*", "*", "queryparams", ")" ]
42
13.6
def unwrap(self, value): """Unpack a Value into an augmented python type (selected from the 'value' field) """ if value.changed('value.choices'): self._choices = value['value.choices'] idx = value['value.index'] ret = ntenum(idx)._store(value) try: ret.choice = self._choices[idx] except IndexError: pass # leave it as None return ret
[ "def", "unwrap", "(", "self", ",", "value", ")", ":", "if", "value", ".", "changed", "(", "'value.choices'", ")", ":", "self", ".", "_choices", "=", "value", "[", "'value.choices'", "]", "idx", "=", "value", "[", "'value.index'", "]", "ret", "=", "ntenum", "(", "idx", ")", ".", "_store", "(", "value", ")", "try", ":", "ret", ".", "choice", "=", "self", ".", "_choices", "[", "idx", "]", "except", "IndexError", ":", "pass", "# leave it as None", "return", "ret" ]
32.538462
11.307692
def file_upload_dialog_timeout(self, value): """ Sets the options File Upload Dialog Timeout value :Args: - value: Timeout in milliseconds """ if not isinstance(value, int): raise ValueError('File Upload Dialog Timeout must be an integer.') self._options[self.FILE_UPLOAD_DIALOG_TIMEOUT] = value
[ "def", "file_upload_dialog_timeout", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "int", ")", ":", "raise", "ValueError", "(", "'File Upload Dialog Timeout must be an integer.'", ")", "self", ".", "_options", "[", "self", ".", "FILE_UPLOAD_DIALOG_TIMEOUT", "]", "=", "value" ]
32.363636
17.272727
def _is_base_matcher_class_definition(meta, classname, dict_): """Checks whether given class name and dictionary define the :class:`BaseMatcher`. """ if classname != 'BaseMatcher': return False methods = list(filter(inspect.isfunction, dict_.values())) return methods and all(m.__module__ == __name__ for m in methods)
[ "def", "_is_base_matcher_class_definition", "(", "meta", ",", "classname", ",", "dict_", ")", ":", "if", "classname", "!=", "'BaseMatcher'", ":", "return", "False", "methods", "=", "list", "(", "filter", "(", "inspect", ".", "isfunction", ",", "dict_", ".", "values", "(", ")", ")", ")", "return", "methods", "and", "all", "(", "m", ".", "__module__", "==", "__name__", "for", "m", "in", "methods", ")" ]
46.375
12.375
def to_json_data(self, model_name=None): """ Parameters ---------- model_name: str, default None if given, will be used as external file directory base name Returns ------- A dictionary of serialized data. """ return collections.OrderedDict([(k, self.get_serialized_value(k, model_name=model_name )) for k in self._data])
[ "def", "to_json_data", "(", "self", ",", "model_name", "=", "None", ")", ":", "return", "collections", ".", "OrderedDict", "(", "[", "(", "k", ",", "self", ".", "get_serialized_value", "(", "k", ",", "model_name", "=", "model_name", ")", ")", "for", "k", "in", "self", ".", "_data", "]", ")" ]
32.916667
20.583333
def domains(self): """ This method returns all of your current domains. """ json = self.request('/domains', method='GET') status = json.get('status') if status == 'OK': domains_json = json.get('domains', []) domains = [Domain.from_json(domain) for domain in domains_json] return domains else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
[ "def", "domains", "(", "self", ")", ":", "json", "=", "self", ".", "request", "(", "'/domains'", ",", "method", "=", "'GET'", ")", "status", "=", "json", ".", "get", "(", "'status'", ")", "if", "status", "==", "'OK'", ":", "domains_json", "=", "json", ".", "get", "(", "'domains'", ",", "[", "]", ")", "domains", "=", "[", "Domain", ".", "from_json", "(", "domain", ")", "for", "domain", "in", "domains_json", "]", "return", "domains", "else", ":", "message", "=", "json", ".", "get", "(", "'message'", ")", "raise", "DOPException", "(", "'[%s]: %s'", "%", "(", "status", ",", "message", ")", ")" ]
36.692308
13.769231
def attach_photo(self, photo: String, caption: String = None): """ Attach photo :param photo: :param caption: :return: self """ self.media.attach_photo(photo, caption) return self
[ "def", "attach_photo", "(", "self", ",", "photo", ":", "String", ",", "caption", ":", "String", "=", "None", ")", ":", "self", ".", "media", ".", "attach_photo", "(", "photo", ",", "caption", ")", "return", "self" ]
23.5
16.5
def sample(self, fraction, seed=None, exact=False): """ Sample a fraction of the current SFrame's rows. Parameters ---------- fraction : float Fraction of the rows to fetch. Must be between 0 and 1. if exact is False (default), the number of rows returned is approximately the fraction times the number of rows. seed : int, optional Seed for the random number generator used to sample. exact: bool, optional Defaults to False. If exact=True, an exact fraction is returned, but at a performance penalty. Returns ------- out : SFrame A new SFrame containing sampled rows of the current SFrame. Examples -------- Suppose we have an SFrame with 6,145 rows. >>> import random >>> sf = SFrame({'id': range(0, 6145)}) Retrieve about 30% of the SFrame rows with repeatable results by setting the random seed. >>> len(sf.sample(.3, seed=5)) 1783 """ if seed is None: seed = abs(hash("%0.20f" % time.time())) % (2 ** 31) if (fraction > 1 or fraction < 0): raise ValueError('Invalid sampling rate: ' + str(fraction)) if (self.num_rows() == 0 or self.num_columns() == 0): return self else: with cython_context(): return SFrame(_proxy=self.__proxy__.sample(fraction, seed, exact))
[ "def", "sample", "(", "self", ",", "fraction", ",", "seed", "=", "None", ",", "exact", "=", "False", ")", ":", "if", "seed", "is", "None", ":", "seed", "=", "abs", "(", "hash", "(", "\"%0.20f\"", "%", "time", ".", "time", "(", ")", ")", ")", "%", "(", "2", "**", "31", ")", "if", "(", "fraction", ">", "1", "or", "fraction", "<", "0", ")", ":", "raise", "ValueError", "(", "'Invalid sampling rate: '", "+", "str", "(", "fraction", ")", ")", "if", "(", "self", ".", "num_rows", "(", ")", "==", "0", "or", "self", ".", "num_columns", "(", ")", "==", "0", ")", ":", "return", "self", "else", ":", "with", "cython_context", "(", ")", ":", "return", "SFrame", "(", "_proxy", "=", "self", ".", "__proxy__", ".", "sample", "(", "fraction", ",", "seed", ",", "exact", ")", ")" ]
30.666667
23.541667
def by_key(self, style_key, style_value): """Return a processor for a "simple" style value. Parameters ---------- style_key : str A style key. style_value : bool or str A "simple" style value that is either a style attribute (str) and a boolean flag indicating to use the style attribute named by `style_key`. Returns ------- A function. """ if self.style_types[style_key] is bool: style_attr = style_key else: style_attr = style_value def proc(_, result): return self.render(style_attr, result) return proc
[ "def", "by_key", "(", "self", ",", "style_key", ",", "style_value", ")", ":", "if", "self", ".", "style_types", "[", "style_key", "]", "is", "bool", ":", "style_attr", "=", "style_key", "else", ":", "style_attr", "=", "style_value", "def", "proc", "(", "_", ",", "result", ")", ":", "return", "self", ".", "render", "(", "style_attr", ",", "result", ")", "return", "proc" ]
28.125
18.708333
def _get_correct_module(mod): """returns imported module check if is ``leonardo_module_conf`` specified and then import them """ module_location = getattr( mod, 'leonardo_module_conf', getattr(mod, "LEONARDO_MODULE_CONF", None)) if module_location: mod = import_module(module_location) elif hasattr(mod, 'default_app_config'): # use django behavior mod_path, _, cls_name = mod.default_app_config.rpartition('.') _mod = import_module(mod_path) config_class = getattr(_mod, cls_name) # check if is leonardo config compliant if _is_leonardo_module(config_class): mod = config_class return mod
[ "def", "_get_correct_module", "(", "mod", ")", ":", "module_location", "=", "getattr", "(", "mod", ",", "'leonardo_module_conf'", ",", "getattr", "(", "mod", ",", "\"LEONARDO_MODULE_CONF\"", ",", "None", ")", ")", "if", "module_location", ":", "mod", "=", "import_module", "(", "module_location", ")", "elif", "hasattr", "(", "mod", ",", "'default_app_config'", ")", ":", "# use django behavior", "mod_path", ",", "_", ",", "cls_name", "=", "mod", ".", "default_app_config", ".", "rpartition", "(", "'.'", ")", "_mod", "=", "import_module", "(", "mod_path", ")", "config_class", "=", "getattr", "(", "_mod", ",", "cls_name", ")", "# check if is leonardo config compliant", "if", "_is_leonardo_module", "(", "config_class", ")", ":", "mod", "=", "config_class", "return", "mod" ]
32.571429
14.714286
def connect(self, url: str): """Connect to the database and set it as main database :param url: path to the database, uses the Sqlalchemy format :type url: str :example: ``ds.connect("sqlite:///mydb.slqite")`` """ try: self.db = dataset.connect(url, row_type=stuf) except Exception as e: self.err(e, "Can not connect to database") return if self.db is None: self.err("Database " + url + " not found") return self.ok("Db", self.db.url, "connected")
[ "def", "connect", "(", "self", ",", "url", ":", "str", ")", ":", "try", ":", "self", ".", "db", "=", "dataset", ".", "connect", "(", "url", ",", "row_type", "=", "stuf", ")", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "\"Can not connect to database\"", ")", "return", "if", "self", ".", "db", "is", "None", ":", "self", ".", "err", "(", "\"Database \"", "+", "url", "+", "\" not found\"", ")", "return", "self", ".", "ok", "(", "\"Db\"", ",", "self", ".", "db", ".", "url", ",", "\"connected\"", ")" ]
33.235294
17.764706
def In(self, *values): """Sets the type of the WHERE clause as "in". Args: *values: The values to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to. """ self._awql = self._CreateMultipleValuesCondition(values, 'IN') return self._query_builder
[ "def", "In", "(", "self", ",", "*", "values", ")", ":", "self", ".", "_awql", "=", "self", ".", "_CreateMultipleValuesCondition", "(", "values", ",", "'IN'", ")", "return", "self", ".", "_query_builder" ]
28.363636
20.909091
def get_energy(self): """ Returns the consumed energy since the start of the statistics in Wh. Attention: Returns None if the value can't be queried or is unknown. """ value = self.box.homeautoswitch("getswitchenergy", self.actor_id) return int(value) if value.isdigit() else None
[ "def", "get_energy", "(", "self", ")", ":", "value", "=", "self", ".", "box", ".", "homeautoswitch", "(", "\"getswitchenergy\"", ",", "self", ".", "actor_id", ")", "return", "int", "(", "value", ")", "if", "value", ".", "isdigit", "(", ")", "else", "None" ]
46
19.714286
def should_break_here(self, frame): """Check wether there is a breakpoint at this frame.""" # Next line commented out for performance #_logger.b_debug("should_break_here(filename=%s, lineno=%s) with breaks=%s", # frame.f_code.co_filename, # frame.f_lineno, # IKBreakpoint.breakpoints_by_number) c_file_name = self.canonic(frame.f_code.co_filename) if not c_file_name in IKBreakpoint.breakpoints_files: return False bp = IKBreakpoint.lookup_effective_breakpoint(c_file_name, frame.f_lineno, frame) return True if bp else False
[ "def", "should_break_here", "(", "self", ",", "frame", ")", ":", "# Next line commented out for performance", "#_logger.b_debug(\"should_break_here(filename=%s, lineno=%s) with breaks=%s\",", "# frame.f_code.co_filename,", "# frame.f_lineno,", "# IKBreakpoint.breakpoints_by_number)", "c_file_name", "=", "self", ".", "canonic", "(", "frame", ".", "f_code", ".", "co_filename", ")", "if", "not", "c_file_name", "in", "IKBreakpoint", ".", "breakpoints_files", ":", "return", "False", "bp", "=", "IKBreakpoint", ".", "lookup_effective_breakpoint", "(", "c_file_name", ",", "frame", ".", "f_lineno", ",", "frame", ")", "return", "True", "if", "bp", "else", "False" ]
51.133333
17.2
def given_i_am_logged_in(context, username): """ :type username: str :type context: behave.runner.Context """ user = get_user_model().objects.get(username=username) context.apiClient.force_authenticate(user=user)
[ "def", "given_i_am_logged_in", "(", "context", ",", "username", ")", ":", "user", "=", "get_user_model", "(", ")", ".", "objects", ".", "get", "(", "username", "=", "username", ")", "context", ".", "apiClient", ".", "force_authenticate", "(", "user", "=", "user", ")" ]
33.714286
7.428571
def ASR(self, a): """ ASR (Arithmetic Shift Right) alias LSR (Logical Shift Right) Shifts all bits of the register one place to the right. Bit seven is held constant. Bit zero is shifted into the C (carry) bit. source code forms: ASR Q; ASRA; ASRB CC bits "HNZVC": uaa-s """ r = (a >> 1) | (a & 0x80) self.clear_NZC() self.C = get_bit(a, bit=0) # same as: self.C |= (a & 1) self.update_NZ_8(r) return r
[ "def", "ASR", "(", "self", ",", "a", ")", ":", "r", "=", "(", "a", ">>", "1", ")", "|", "(", "a", "&", "0x80", ")", "self", ".", "clear_NZC", "(", ")", "self", ".", "C", "=", "get_bit", "(", "a", ",", "bit", "=", "0", ")", "# same as: self.C |= (a & 1)", "self", ".", "update_NZ_8", "(", "r", ")", "return", "r" ]
30.375
20.625
def create_role_config_group(resource_root, service_name, name, display_name, role_type, cluster_name="default"): """ Create a role config group. @param resource_root: The root Resource object. @param service_name: Service name. @param name: The name of the new group. @param display_name: The display name of the new group. @param role_type: The role type of the new group. @param cluster_name: Cluster name. @return: List of created role config groups. """ apigroup = ApiRoleConfigGroup(resource_root, name, display_name, role_type) return create_role_config_groups(resource_root, service_name, [apigroup], cluster_name)[0]
[ "def", "create_role_config_group", "(", "resource_root", ",", "service_name", ",", "name", ",", "display_name", ",", "role_type", ",", "cluster_name", "=", "\"default\"", ")", ":", "apigroup", "=", "ApiRoleConfigGroup", "(", "resource_root", ",", "name", ",", "display_name", ",", "role_type", ")", "return", "create_role_config_groups", "(", "resource_root", ",", "service_name", ",", "[", "apigroup", "]", ",", "cluster_name", ")", "[", "0", "]" ]
43
12.733333
def printFrequencyStatistics(counts, frequencies, numWords, size): """ Print interesting statistics regarding the counts and frequency matrices """ avgBits = float(counts.sum())/numWords print "Retina width=128, height=128" print "Total number of words processed=",numWords print "Average number of bits per word=",avgBits, print "avg sparsity=",avgBits/size print "counts matrix sum=",counts.sum(), print "max=",counts.max(), "min=",counts.min(), print "mean=",counts.sum()/float(size) print "frequency matrix sum=",frequencies.sum(), print "max=",frequencies.max(), "min=",frequencies.min(), print "mean=",frequencies.sum()/float(size) print "Number of bits with zero entries",frequencies.nZeroCols()
[ "def", "printFrequencyStatistics", "(", "counts", ",", "frequencies", ",", "numWords", ",", "size", ")", ":", "avgBits", "=", "float", "(", "counts", ".", "sum", "(", ")", ")", "/", "numWords", "print", "\"Retina width=128, height=128\"", "print", "\"Total number of words processed=\"", ",", "numWords", "print", "\"Average number of bits per word=\"", ",", "avgBits", ",", "print", "\"avg sparsity=\"", ",", "avgBits", "/", "size", "print", "\"counts matrix sum=\"", ",", "counts", ".", "sum", "(", ")", ",", "print", "\"max=\"", ",", "counts", ".", "max", "(", ")", ",", "\"min=\"", ",", "counts", ".", "min", "(", ")", ",", "print", "\"mean=\"", ",", "counts", ".", "sum", "(", ")", "/", "float", "(", "size", ")", "print", "\"frequency matrix sum=\"", ",", "frequencies", ".", "sum", "(", ")", ",", "print", "\"max=\"", ",", "frequencies", ".", "max", "(", ")", ",", "\"min=\"", ",", "frequencies", ".", "min", "(", ")", ",", "print", "\"mean=\"", ",", "frequencies", ".", "sum", "(", ")", "/", "float", "(", "size", ")", "print", "\"Number of bits with zero entries\"", ",", "frequencies", ".", "nZeroCols", "(", ")" ]
44.8125
9.9375
def get_policy(self, name): """Get a single Policy by name. Args: name (str): The name of the Policy. Returns: (:obj:`Policy`) The Policy that matches the name. """ address = _create_policy_address(name) policy_list_bytes = None try: policy_list_bytes = self._state_view.get(address=address) except KeyError: return None if policy_list_bytes is not None: policy_list = _create_from_bytes(policy_list_bytes, identity_pb2.PolicyList) for policy in policy_list.policies: if policy.name == name: return policy return None
[ "def", "get_policy", "(", "self", ",", "name", ")", ":", "address", "=", "_create_policy_address", "(", "name", ")", "policy_list_bytes", "=", "None", "try", ":", "policy_list_bytes", "=", "self", ".", "_state_view", ".", "get", "(", "address", "=", "address", ")", "except", "KeyError", ":", "return", "None", "if", "policy_list_bytes", "is", "not", "None", ":", "policy_list", "=", "_create_from_bytes", "(", "policy_list_bytes", ",", "identity_pb2", ".", "PolicyList", ")", "for", "policy", "in", "policy_list", ".", "policies", ":", "if", "policy", ".", "name", "==", "name", ":", "return", "policy", "return", "None" ]
29.24
19.4
def findall(self, string, *args, **kwargs): """Apply `findall`.""" return self._pattern.findall(string, *args, **kwargs)
[ "def", "findall", "(", "self", ",", "string", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_pattern", ".", "findall", "(", "string", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
33.5
16
def evaluate_formula(formula, variables): """Very simple formula evaluator. Beware the security. :param formula: A simple formula. :type formula: str :param variables: A collection of variable (key and value). :type variables: dict :returns: The result of the formula execution. :rtype: float, int """ for key, value in list(variables.items()): if value is None or (hasattr(value, 'isNull') and value.isNull()): # If one value is null, we return null. return value formula = formula.replace(key, str(value)) result = eval(formula) return result
[ "def", "evaluate_formula", "(", "formula", ",", "variables", ")", ":", "for", "key", ",", "value", "in", "list", "(", "variables", ".", "items", "(", ")", ")", ":", "if", "value", "is", "None", "or", "(", "hasattr", "(", "value", ",", "'isNull'", ")", "and", "value", ".", "isNull", "(", ")", ")", ":", "# If one value is null, we return null.", "return", "value", "formula", "=", "formula", ".", "replace", "(", "key", ",", "str", "(", "value", ")", ")", "result", "=", "eval", "(", "formula", ")", "return", "result" ]
32.263158
16.947368
def parse_path(root_dir): """Split path into head and last component for the completer. Also return position where last component starts. :param root_dir: str path :return: tuple of (string, string, int) """ base_dir, last_dir, position = '', '', 0 if root_dir: base_dir, last_dir = os.path.split(root_dir) position = -len(last_dir) if last_dir else 0 return base_dir, last_dir, position
[ "def", "parse_path", "(", "root_dir", ")", ":", "base_dir", ",", "last_dir", ",", "position", "=", "''", ",", "''", ",", "0", "if", "root_dir", ":", "base_dir", ",", "last_dir", "=", "os", ".", "path", ".", "split", "(", "root_dir", ")", "position", "=", "-", "len", "(", "last_dir", ")", "if", "last_dir", "else", "0", "return", "base_dir", ",", "last_dir", ",", "position" ]
38.636364
8.636364
def H(g,i): """recursively constructs H line for g; i = len(g)-1""" g1 = g&(2**i) if i: n = Hwidth(i) i=i-1 Hn = H(g,i) if g1: return Hn<<(2*n) | Hn<<n | Hn else: return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn else: if g1: return int('111',2) else: return int('101',2)
[ "def", "H", "(", "g", ",", "i", ")", ":", "g1", "=", "g", "&", "(", "2", "**", "i", ")", "if", "i", ":", "n", "=", "Hwidth", "(", "i", ")", "i", "=", "i", "-", "1", "Hn", "=", "H", "(", "g", ",", "i", ")", "if", "g1", ":", "return", "Hn", "<<", "(", "2", "*", "n", ")", "|", "Hn", "<<", "n", "|", "Hn", "else", ":", "return", "int", "(", "'1'", "*", "n", ",", "2", ")", "<<", "(", "2", "*", "n", ")", "|", "L", "(", "g", ",", "i", ")", "<<", "n", "|", "Hn", "else", ":", "if", "g1", ":", "return", "int", "(", "'111'", ",", "2", ")", "else", ":", "return", "int", "(", "'101'", ",", "2", ")" ]
24
20.9375
def validate_project(project_name): """ Check the defined project name against keywords, builtins and existing modules to avoid name clashing """ if '-' in project_name: return None if keyword.iskeyword(project_name): return None if project_name in dir(__builtins__): return None try: __import__(project_name) return None except ImportError: return project_name
[ "def", "validate_project", "(", "project_name", ")", ":", "if", "'-'", "in", "project_name", ":", "return", "None", "if", "keyword", ".", "iskeyword", "(", "project_name", ")", ":", "return", "None", "if", "project_name", "in", "dir", "(", "__builtins__", ")", ":", "return", "None", "try", ":", "__import__", "(", "project_name", ")", "return", "None", "except", "ImportError", ":", "return", "project_name" ]
26.875
13.375
def _parse_port_ranges(pool_str): """Given a 'N-P,X-Y' description of port ranges, return a set of ints.""" ports = set() for range_str in pool_str.split(','): try: a, b = range_str.split('-', 1) start, end = int(a), int(b) except ValueError: log.error('Ignoring unparsable port range %r.', range_str) continue if start < 1 or end > 65535: log.error('Ignoring out of bounds port range %r.', range_str) continue ports.update(set(range(start, end + 1))) return ports
[ "def", "_parse_port_ranges", "(", "pool_str", ")", ":", "ports", "=", "set", "(", ")", "for", "range_str", "in", "pool_str", ".", "split", "(", "','", ")", ":", "try", ":", "a", ",", "b", "=", "range_str", ".", "split", "(", "'-'", ",", "1", ")", "start", ",", "end", "=", "int", "(", "a", ")", ",", "int", "(", "b", ")", "except", "ValueError", ":", "log", ".", "error", "(", "'Ignoring unparsable port range %r.'", ",", "range_str", ")", "continue", "if", "start", "<", "1", "or", "end", ">", "65535", ":", "log", ".", "error", "(", "'Ignoring out of bounds port range %r.'", ",", "range_str", ")", "continue", "ports", ".", "update", "(", "set", "(", "range", "(", "start", ",", "end", "+", "1", ")", ")", ")", "return", "ports" ]
38
14.333333
def nodes(): ''' List running nodes on all enabled cloud providers. Automatically flushes caches ''' for name, provider in env.providers.items(): print name provider.nodes() print
[ "def", "nodes", "(", ")", ":", "for", "name", ",", "provider", "in", "env", ".", "providers", ".", "items", "(", ")", ":", "print", "name", "provider", ".", "nodes", "(", ")", "print" ]
26.5
26.25
def _create_output_directories(self, analysis): """ Create the necessary output and resource directories for the specified analysis :param: analysis: analysis associated with a given test_id """ try: os.makedirs(analysis.output_directory) except OSError as exception: if exception.errno != errno.EEXIST: raise try: resource_directory = os.path.join(analysis.output_directory, analysis.resource_path) os.makedirs(resource_directory) except OSError as exception: if exception.errno != errno.EEXIST: raise
[ "def", "_create_output_directories", "(", "self", ",", "analysis", ")", ":", "try", ":", "os", ".", "makedirs", "(", "analysis", ".", "output_directory", ")", "except", "OSError", "as", "exception", ":", "if", "exception", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise", "try", ":", "resource_directory", "=", "os", ".", "path", ".", "join", "(", "analysis", ".", "output_directory", ",", "analysis", ".", "resource_path", ")", "os", ".", "makedirs", "(", "resource_directory", ")", "except", "OSError", "as", "exception", ":", "if", "exception", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise" ]
35.3125
16.5625
def handle_image_posts(function=None): """ Decorator for views that handles ajax image posts in base64 encoding, saving the image and returning the url """ @wraps(function, assigned=available_attrs(function)) def _wrapped_view(request, *args, **kwargs): if 'image' in request.META['CONTENT_TYPE']: name = default_storage.save(os.path.join('images', 'aloha-uploads', request.META['HTTP_X_FILE_NAME']), ContentFile(base64.b64decode(request.body.split(",", 1)[1]))) return HttpResponse(posixpath.join(settings.MEDIA_URL, name), content_type="text/plain") else: return function(request, *args, **kwargs) return _wrapped_view
[ "def", "handle_image_posts", "(", "function", "=", "None", ")", ":", "@", "wraps", "(", "function", ",", "assigned", "=", "available_attrs", "(", "function", ")", ")", "def", "_wrapped_view", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'image'", "in", "request", ".", "META", "[", "'CONTENT_TYPE'", "]", ":", "name", "=", "default_storage", ".", "save", "(", "os", ".", "path", ".", "join", "(", "'images'", ",", "'aloha-uploads'", ",", "request", ".", "META", "[", "'HTTP_X_FILE_NAME'", "]", ")", ",", "ContentFile", "(", "base64", ".", "b64decode", "(", "request", ".", "body", ".", "split", "(", "\",\"", ",", "1", ")", "[", "1", "]", ")", ")", ")", "return", "HttpResponse", "(", "posixpath", ".", "join", "(", "settings", ".", "MEDIA_URL", ",", "name", ")", ",", "content_type", "=", "\"text/plain\"", ")", "else", ":", "return", "function", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_wrapped_view" ]
51.928571
23.785714
def copy(value, **kwargs): """Return a copy of a **HasProperties** instance A copy is produced by serializing the HasProperties instance then deserializing it to a new instance. Therefore, if any properties cannot be serialized/deserialized, :code:`copy` will fail. Any keyword arguments will be passed through to both :code:`serialize` and :code:`deserialize`. """ if not isinstance(value, HasProperties): raise ValueError('properties.copy may only be used to copy' 'HasProperties instances') kwargs.update({'include_class': kwargs.get('include_class', True)}) kwargs.update({'trusted': kwargs.get('trusted', True)}) return value.__class__.deserialize(value.serialize(**kwargs), **kwargs)
[ "def", "copy", "(", "value", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "value", ",", "HasProperties", ")", ":", "raise", "ValueError", "(", "'properties.copy may only be used to copy'", "'HasProperties instances'", ")", "kwargs", ".", "update", "(", "{", "'include_class'", ":", "kwargs", ".", "get", "(", "'include_class'", ",", "True", ")", "}", ")", "kwargs", ".", "update", "(", "{", "'trusted'", ":", "kwargs", ".", "get", "(", "'trusted'", ",", "True", ")", "}", ")", "return", "value", ".", "__class__", ".", "deserialize", "(", "value", ".", "serialize", "(", "*", "*", "kwargs", ")", ",", "*", "*", "kwargs", ")" ]
47.0625
21.625
def save_to_name_list(dest, name_parts, value): """ Util to save some name sequence to a dict. For instance, `("location","query")` would save to dest["location"]["query"]. """ if len(name_parts) > 1: for part in name_parts[:-1]: if part not in dest: dest[part] = {} dest = dest[part] dest[name_parts[-1]] = value
[ "def", "save_to_name_list", "(", "dest", ",", "name_parts", ",", "value", ")", ":", "if", "len", "(", "name_parts", ")", ">", "1", ":", "for", "part", "in", "name_parts", "[", ":", "-", "1", "]", ":", "if", "part", "not", "in", "dest", ":", "dest", "[", "part", "]", "=", "{", "}", "dest", "=", "dest", "[", "part", "]", "dest", "[", "name_parts", "[", "-", "1", "]", "]", "=", "value" ]
34.090909
11
def prepare(self, **kwargs): """ Prepare for rendering """ for k, v in kwargs.items(): setattr(self, k, v) if not self.is_initialized: self.initialize() if not self.proxy_is_active: self.activate_proxy()
[ "def", "prepare", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "setattr", "(", "self", ",", "k", ",", "v", ")", "if", "not", "self", ".", "is_initialized", ":", "self", ".", "initialize", "(", ")", "if", "not", "self", ".", "proxy_is_active", ":", "self", ".", "activate_proxy", "(", ")" ]
33
6.625
def infer_bool(node, context=None): """Understand bool calls.""" if len(node.args) > 1: # Invalid bool call. raise UseInferenceDefault if not node.args: return nodes.Const(False) argument = node.args[0] try: inferred = next(argument.infer(context=context)) except InferenceError: return util.Uninferable if inferred is util.Uninferable: return util.Uninferable bool_value = inferred.bool_value() if bool_value is util.Uninferable: return util.Uninferable return nodes.Const(bool_value)
[ "def", "infer_bool", "(", "node", ",", "context", "=", "None", ")", ":", "if", "len", "(", "node", ".", "args", ")", ">", "1", ":", "# Invalid bool call.", "raise", "UseInferenceDefault", "if", "not", "node", ".", "args", ":", "return", "nodes", ".", "Const", "(", "False", ")", "argument", "=", "node", ".", "args", "[", "0", "]", "try", ":", "inferred", "=", "next", "(", "argument", ".", "infer", "(", "context", "=", "context", ")", ")", "except", "InferenceError", ":", "return", "util", ".", "Uninferable", "if", "inferred", "is", "util", ".", "Uninferable", ":", "return", "util", ".", "Uninferable", "bool_value", "=", "inferred", ".", "bool_value", "(", ")", "if", "bool_value", "is", "util", ".", "Uninferable", ":", "return", "util", ".", "Uninferable", "return", "nodes", ".", "Const", "(", "bool_value", ")" ]
26.857143
14.285714
def DiffPrimitiveArrays(self, oldObj, newObj): """Diff two primitive arrays""" if len(oldObj) != len(newObj): __Log__.debug('DiffDoArrays: Array lengths do not match %d != %d' % (len(oldObj), len(newObj))) return False match = True if self._ignoreArrayOrder: oldSet = oldObj and frozenset(oldObj) or frozenset() newSet = newObj and frozenset(newObj) or frozenset() match = (oldSet == newSet) else: for i, j in zip(oldObj, newObj): if i != j: match = False break if not match: __Log__.debug( 'DiffPrimitiveArrays: One of the elements do not match.') return False return True
[ "def", "DiffPrimitiveArrays", "(", "self", ",", "oldObj", ",", "newObj", ")", ":", "if", "len", "(", "oldObj", ")", "!=", "len", "(", "newObj", ")", ":", "__Log__", ".", "debug", "(", "'DiffDoArrays: Array lengths do not match %d != %d'", "%", "(", "len", "(", "oldObj", ")", ",", "len", "(", "newObj", ")", ")", ")", "return", "False", "match", "=", "True", "if", "self", ".", "_ignoreArrayOrder", ":", "oldSet", "=", "oldObj", "and", "frozenset", "(", "oldObj", ")", "or", "frozenset", "(", ")", "newSet", "=", "newObj", "and", "frozenset", "(", "newObj", ")", "or", "frozenset", "(", ")", "match", "=", "(", "oldSet", "==", "newSet", ")", "else", ":", "for", "i", ",", "j", "in", "zip", "(", "oldObj", ",", "newObj", ")", ":", "if", "i", "!=", "j", ":", "match", "=", "False", "break", "if", "not", "match", ":", "__Log__", ".", "debug", "(", "'DiffPrimitiveArrays: One of the elements do not match.'", ")", "return", "False", "return", "True" ]
34.904762
15.714286
def set_salt_view(): ''' Helper function that sets the salt design document. Uses get_valid_salt_views and some hardcoded values. ''' options = _get_options(ret=None) # Create the new object that we will shove in as the design doc. new_doc = {} new_doc['views'] = get_valid_salt_views() new_doc['language'] = "javascript" # Make the request to update the design doc. _response = _request("PUT", options['url'] + options['db'] + "/_design/salt", "application/json", salt.utils.json.dumps(new_doc)) if 'error' in _response: log.warning('Unable to set the salt design document: %s', _response['error']) return False return True
[ "def", "set_salt_view", "(", ")", ":", "options", "=", "_get_options", "(", "ret", "=", "None", ")", "# Create the new object that we will shove in as the design doc.", "new_doc", "=", "{", "}", "new_doc", "[", "'views'", "]", "=", "get_valid_salt_views", "(", ")", "new_doc", "[", "'language'", "]", "=", "\"javascript\"", "# Make the request to update the design doc.", "_response", "=", "_request", "(", "\"PUT\"", ",", "options", "[", "'url'", "]", "+", "options", "[", "'db'", "]", "+", "\"/_design/salt\"", ",", "\"application/json\"", ",", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "new_doc", ")", ")", "if", "'error'", "in", "_response", ":", "log", ".", "warning", "(", "'Unable to set the salt design document: %s'", ",", "_response", "[", "'error'", "]", ")", "return", "False", "return", "True" ]
34.52381
23.285714
def str_index(x, sub, start=0, end=None): """Returns the lowest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. It is the same as `str.find`. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the lowest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.index(sub="et") Expression = str_find(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1 """ return str_find(x, sub, start, end)
[ "def", "str_index", "(", "x", ",", "sub", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "return", "str_find", "(", "x", ",", "sub", ",", "start", ",", "end", ")" ]
28.484848
22.515152
def removeChild(self, child): """ Remove a child from this element. The child element is returned, and it's parentNode element is reset. If the child will not be used any more, you should call its unlink() method to promote garbage collection. """ for i, childNode in enumerate(self.childNodes): if childNode is child: del self.childNodes[i] child.parentNode = None return child raise ValueError(child)
[ "def", "removeChild", "(", "self", ",", "child", ")", ":", "for", "i", ",", "childNode", "in", "enumerate", "(", "self", ".", "childNodes", ")", ":", "if", "childNode", "is", "child", ":", "del", "self", ".", "childNodes", "[", "i", "]", "child", ".", "parentNode", "=", "None", "return", "child", "raise", "ValueError", "(", "child", ")" ]
32.538462
12.076923
def report(times=None, include_itrs=True, include_stats=True, delim_mode=False, format_options=None): """ Produce a formatted report of the current timing data. Notes: When reporting a collection of parallel subdivisions, only the one with the greatest total time is reported on, and the rest are ignored (no branching). To compare parallel subdivisions use compare(). Args: times (Times, optional): Times object to report on. If not provided, uses current root timer. include_itrs (bool, optional): Display invidual iteration times. include_stats (bool, optional): Display iteration statistics. delim_mode (bool, optional): If True, format for spreadsheet. format_options (dict, optional): Formatting options, see below. Formatting Keywords & Defaults: Human-Readable Mode - 'stamp_name_width': 20 - 'itr_tab_width': 2 - 'itr_num_width': 6 - 'itr_name_width': 12 - 'indent_symbol': ' ' (two spaces) - 'parallel_symbol': '(par)' Delimited Mode - 'delimiter': '\t' (tab) - 'ident_symbol': '+' - 'parallel_symbol': '(par)' Returns: str: Timing data report as formatted string. Raises: TypeError: If 'times' param is used and value is not a Times object. """ if times is None: if f.root.stopped: return report_loc.report(f.root.times, include_itrs, include_stats, delim_mode, format_options) else: t = timer() rep = report_loc.report(collapse.collapse_times(), include_itrs, include_stats, delim_mode, format_options, timer_state='running') f.root.self_cut += timer() - t return rep else: if not isinstance(times, Times): raise TypeError("Expected Times instance for param 'times' (default is root).") return report_loc.report(times, include_itrs, include_stats, delim_mode, format_options)
[ "def", "report", "(", "times", "=", "None", ",", "include_itrs", "=", "True", ",", "include_stats", "=", "True", ",", "delim_mode", "=", "False", ",", "format_options", "=", "None", ")", ":", "if", "times", "is", "None", ":", "if", "f", ".", "root", ".", "stopped", ":", "return", "report_loc", ".", "report", "(", "f", ".", "root", ".", "times", ",", "include_itrs", ",", "include_stats", ",", "delim_mode", ",", "format_options", ")", "else", ":", "t", "=", "timer", "(", ")", "rep", "=", "report_loc", ".", "report", "(", "collapse", ".", "collapse_times", "(", ")", ",", "include_itrs", ",", "include_stats", ",", "delim_mode", ",", "format_options", ",", "timer_state", "=", "'running'", ")", "f", ".", "root", ".", "self_cut", "+=", "timer", "(", ")", "-", "t", "return", "rep", "else", ":", "if", "not", "isinstance", "(", "times", ",", "Times", ")", ":", "raise", "TypeError", "(", "\"Expected Times instance for param 'times' (default is root).\"", ")", "return", "report_loc", ".", "report", "(", "times", ",", "include_itrs", ",", "include_stats", ",", "delim_mode", ",", "format_options", ")" ]
38.384615
17.246154
def start_head_processes(self): """Start head processes on the node.""" logger.info( "Process STDOUT and STDERR is being redirected to {}.".format( self._logs_dir)) assert self._redis_address is None # If this is the head node, start the relevant head node processes. self.start_redis() self.start_monitor() self.start_raylet_monitor() # The dashboard is Python3.x only. if PY3 and self._ray_params.include_webui: self.start_dashboard()
[ "def", "start_head_processes", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Process STDOUT and STDERR is being redirected to {}.\"", ".", "format", "(", "self", ".", "_logs_dir", ")", ")", "assert", "self", ".", "_redis_address", "is", "None", "# If this is the head node, start the relevant head node processes.", "self", ".", "start_redis", "(", ")", "self", ".", "start_monitor", "(", ")", "self", ".", "start_raylet_monitor", "(", ")", "# The dashboard is Python3.x only.", "if", "PY3", "and", "self", ".", "_ray_params", ".", "include_webui", ":", "self", ".", "start_dashboard", "(", ")" ]
41.230769
12.076923
def filter_exclude_dicts(filter_dict=None, exclude_dict=None, name='acctno', values=[], swap=False): """Produces kwargs dicts for Django Queryset `filter` and `exclude` from a list of values The last, critical step in generating Django ORM kwargs dicts from a natural language query. Properly parses "NOT" unary operators on each field value in the list. Assumes the lists have been pre-processed to consolidate NOTs and normalize values and syntax. Examples: >>> filter_exclude_dicts(name='num', values=['NOT 1', '2', '3', 'NOT 4'] ... ) == ({'num__in': ['2', '3']}, {'num__in': ['1', '4']}) True """ filter_dict = filter_dict or {} exclude_dict = exclude_dict or {} if not name.endswith('__in'): name += '__in' filter_dict[name], exclude_dict[name] = [], [] for v in values: # "NOT " means switch from include (filter) to exclude for that one account number if v.startswith('NOT '): exclude_dict[name] += [v[4:]] else: filter_dict[name] += [v] if swap: return exclude_dict, filter_dict return filter_dict, exclude_dict
[ "def", "filter_exclude_dicts", "(", "filter_dict", "=", "None", ",", "exclude_dict", "=", "None", ",", "name", "=", "'acctno'", ",", "values", "=", "[", "]", ",", "swap", "=", "False", ")", ":", "filter_dict", "=", "filter_dict", "or", "{", "}", "exclude_dict", "=", "exclude_dict", "or", "{", "}", "if", "not", "name", ".", "endswith", "(", "'__in'", ")", ":", "name", "+=", "'__in'", "filter_dict", "[", "name", "]", ",", "exclude_dict", "[", "name", "]", "=", "[", "]", ",", "[", "]", "for", "v", "in", "values", ":", "# \"NOT \" means switch from include (filter) to exclude for that one account number", "if", "v", ".", "startswith", "(", "'NOT '", ")", ":", "exclude_dict", "[", "name", "]", "+=", "[", "v", "[", "4", ":", "]", "]", "else", ":", "filter_dict", "[", "name", "]", "+=", "[", "v", "]", "if", "swap", ":", "return", "exclude_dict", ",", "filter_dict", "return", "filter_dict", ",", "exclude_dict" ]
40.392857
24.035714
def add_leverage(self): """ Adds leverage term to the model Returns ---------- None (changes instance attributes) """ if self.leverage is True: pass else: self.leverage = True self.z_no += 1 self.latent_variables.z_list.pop() self.latent_variables.z_list.pop() self.latent_variables.z_list.pop() self.latent_variables.z_list.pop() self.latent_variables.add_z('Leverage Term', fam.Flat(transform=None), fam.Normal(0, 3)) self.latent_variables.add_z('Skewness', fam.Flat(transform='exp'), fam.Normal(0, 3)) self.latent_variables.add_z('v', fam.Flat(transform='exp'), fam.Normal(0, 3)) self.latent_variables.add_z('Returns Constant', fam.Normal(0,3,transform=None), fam.Normal(0, 3)) self.latent_variables.add_z('GARCH-M', fam.Normal(0,3,transform=None), fam.Normal(0, 3)) self.latent_variables.z_list[-3].start = 2.0
[ "def", "add_leverage", "(", "self", ")", ":", "if", "self", ".", "leverage", "is", "True", ":", "pass", "else", ":", "self", ".", "leverage", "=", "True", "self", ".", "z_no", "+=", "1", "self", ".", "latent_variables", ".", "z_list", ".", "pop", "(", ")", "self", ".", "latent_variables", ".", "z_list", ".", "pop", "(", ")", "self", ".", "latent_variables", ".", "z_list", ".", "pop", "(", ")", "self", ".", "latent_variables", ".", "z_list", ".", "pop", "(", ")", "self", ".", "latent_variables", ".", "add_z", "(", "'Leverage Term'", ",", "fam", ".", "Flat", "(", "transform", "=", "None", ")", ",", "fam", ".", "Normal", "(", "0", ",", "3", ")", ")", "self", ".", "latent_variables", ".", "add_z", "(", "'Skewness'", ",", "fam", ".", "Flat", "(", "transform", "=", "'exp'", ")", ",", "fam", ".", "Normal", "(", "0", ",", "3", ")", ")", "self", ".", "latent_variables", ".", "add_z", "(", "'v'", ",", "fam", ".", "Flat", "(", "transform", "=", "'exp'", ")", ",", "fam", ".", "Normal", "(", "0", ",", "3", ")", ")", "self", ".", "latent_variables", ".", "add_z", "(", "'Returns Constant'", ",", "fam", ".", "Normal", "(", "0", ",", "3", ",", "transform", "=", "None", ")", ",", "fam", ".", "Normal", "(", "0", ",", "3", ")", ")", "self", ".", "latent_variables", ".", "add_z", "(", "'GARCH-M'", ",", "fam", ".", "Normal", "(", "0", ",", "3", ",", "transform", "=", "None", ")", ",", "fam", ".", "Normal", "(", "0", ",", "3", ")", ")", "self", ".", "latent_variables", ".", "z_list", "[", "-", "3", "]", ".", "start", "=", "2.0" ]
44.304348
24.347826
def _dynamic_operation(self, map_obj): """ Generate function to dynamically apply the operation. Wraps an existing HoloMap or DynamicMap. """ if not isinstance(map_obj, DynamicMap): def dynamic_operation(*key, **kwargs): kwargs = dict(self._eval_kwargs(), **kwargs) obj = map_obj[key] if isinstance(map_obj, HoloMap) else map_obj return self._process(obj, key, kwargs) else: def dynamic_operation(*key, **kwargs): kwargs = dict(self._eval_kwargs(), **kwargs) return self._process(map_obj[key], key, kwargs) if isinstance(self.p.operation, Operation): return OperationCallable(dynamic_operation, inputs=[map_obj], link_inputs=self.p.link_inputs, operation=self.p.operation) else: return Callable(dynamic_operation, inputs=[map_obj], link_inputs=self.p.link_inputs)
[ "def", "_dynamic_operation", "(", "self", ",", "map_obj", ")", ":", "if", "not", "isinstance", "(", "map_obj", ",", "DynamicMap", ")", ":", "def", "dynamic_operation", "(", "*", "key", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "dict", "(", "self", ".", "_eval_kwargs", "(", ")", ",", "*", "*", "kwargs", ")", "obj", "=", "map_obj", "[", "key", "]", "if", "isinstance", "(", "map_obj", ",", "HoloMap", ")", "else", "map_obj", "return", "self", ".", "_process", "(", "obj", ",", "key", ",", "kwargs", ")", "else", ":", "def", "dynamic_operation", "(", "*", "key", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "dict", "(", "self", ".", "_eval_kwargs", "(", ")", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_process", "(", "map_obj", "[", "key", "]", ",", "key", ",", "kwargs", ")", "if", "isinstance", "(", "self", ".", "p", ".", "operation", ",", "Operation", ")", ":", "return", "OperationCallable", "(", "dynamic_operation", ",", "inputs", "=", "[", "map_obj", "]", ",", "link_inputs", "=", "self", ".", "p", ".", "link_inputs", ",", "operation", "=", "self", ".", "p", ".", "operation", ")", "else", ":", "return", "Callable", "(", "dynamic_operation", ",", "inputs", "=", "[", "map_obj", "]", ",", "link_inputs", "=", "self", ".", "p", ".", "link_inputs", ")" ]
49.380952
17.47619
def connect( creator, maxusage=None, setsession=None, failures=None, ping=1, closeable=True, *args, **kwargs): """A tough version of the connection constructor of a DB-API 2 module. creator: either an arbitrary function returning new DB-API 2 compliant connection objects or a DB-API 2 compliant database module maxusage: maximum usage limit for the underlying DB-API 2 connection (number of database operations, 0 or None means unlimited usage) callproc(), execute() and executemany() count as one operation. When the limit is reached, the connection is automatically reset. setsession: an optional list of SQL commands that may serve to prepare the session, e.g. ["set datestyle to german", "set time zone mez"] failures: an optional exception class or a tuple of exception classes for which the failover mechanism shall be applied, if the default (OperationalError, InternalError) is not adequate ping: determines when the connection should be checked with ping() (0 = None = never, 1 = default = when _ping_check() is called, 2 = whenever a cursor is created, 4 = when a query is executed, 7 = always, and all other bit combinations of these values) closeable: if this is set to false, then closing the connection will be silently ignored, but by default the connection can be closed args, kwargs: the parameters that shall be passed to the creator function or the connection constructor of the DB-API 2 module """ return SteadyDBConnection( creator, maxusage, setsession, failures, ping, closeable, *args, **kwargs)
[ "def", "connect", "(", "creator", ",", "maxusage", "=", "None", ",", "setsession", "=", "None", ",", "failures", "=", "None", ",", "ping", "=", "1", ",", "closeable", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "SteadyDBConnection", "(", "creator", ",", "maxusage", ",", "setsession", ",", "failures", ",", "ping", ",", "closeable", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
57.310345
25.551724
def _get_predictions(self, data, break_ties="random", return_probs=False, **kwargs): """Computes predictions in batch, given a labeled dataset Args: data: a Pytorch DataLoader, Dataset, or tuple with Tensors (X,Y): X: The input for the predict method Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels in {1,...,k} break_ties: How to break ties when making predictions return_probs: Return the predicted probabilities as well Returns: Y_p: A Tensor of predictions Y: A Tensor of labels [Optionally: Y_s: An [n, k] np.ndarray of predicted probabilities] """ data_loader = self._create_data_loader(data) Y_p = [] Y = [] Y_s = [] # Do batch evaluation by default, getting the predictions and labels for batch_num, data in enumerate(data_loader): Xb, Yb = data Y.append(self._to_numpy(Yb)) # Optionally move to device if self.config["device"] != "cpu": Xb = place_on_gpu(Xb) # Append predictions and labels from DataLoader Y_pb, Y_sb = self.predict( Xb, break_ties=break_ties, return_probs=True, **kwargs ) Y_p.append(self._to_numpy(Y_pb)) Y_s.append(self._to_numpy(Y_sb)) Y_p, Y, Y_s = map(self._stack_batches, [Y_p, Y, Y_s]) if return_probs: return Y_p, Y, Y_s else: return Y_p, Y
[ "def", "_get_predictions", "(", "self", ",", "data", ",", "break_ties", "=", "\"random\"", ",", "return_probs", "=", "False", ",", "*", "*", "kwargs", ")", ":", "data_loader", "=", "self", ".", "_create_data_loader", "(", "data", ")", "Y_p", "=", "[", "]", "Y", "=", "[", "]", "Y_s", "=", "[", "]", "# Do batch evaluation by default, getting the predictions and labels", "for", "batch_num", ",", "data", "in", "enumerate", "(", "data_loader", ")", ":", "Xb", ",", "Yb", "=", "data", "Y", ".", "append", "(", "self", ".", "_to_numpy", "(", "Yb", ")", ")", "# Optionally move to device", "if", "self", ".", "config", "[", "\"device\"", "]", "!=", "\"cpu\"", ":", "Xb", "=", "place_on_gpu", "(", "Xb", ")", "# Append predictions and labels from DataLoader", "Y_pb", ",", "Y_sb", "=", "self", ".", "predict", "(", "Xb", ",", "break_ties", "=", "break_ties", ",", "return_probs", "=", "True", ",", "*", "*", "kwargs", ")", "Y_p", ".", "append", "(", "self", ".", "_to_numpy", "(", "Y_pb", ")", ")", "Y_s", ".", "append", "(", "self", ".", "_to_numpy", "(", "Y_sb", ")", ")", "Y_p", ",", "Y", ",", "Y_s", "=", "map", "(", "self", ".", "_stack_batches", ",", "[", "Y_p", ",", "Y", ",", "Y_s", "]", ")", "if", "return_probs", ":", "return", "Y_p", ",", "Y", ",", "Y_s", "else", ":", "return", "Y_p", ",", "Y" ]
37.756098
20.097561
def IMUL(cpu, *operands): """ Signed multiply. Performs a signed multiplication of two operands. This instruction has three forms, depending on the number of operands. - One-operand form. This form is identical to that used by the MUL instruction. Here, the source operand (in a general-purpose register or memory location) is multiplied by the value in the AL, AX, or EAX register (depending on the operand size) and the product is stored in the AX, DX:AX, or EDX:EAX registers, respectively. - Two-operand form. With this form the destination operand (the first operand) is multiplied by the source operand (second operand). The destination operand is a general-purpose register and the source operand is an immediate value, a general-purpose register, or a memory location. The product is then stored in the destination operand location. - Three-operand form. This form requires a destination operand (the first operand) and two source operands (the second and the third operands). Here, the first source operand (which can be a general-purpose register or a memory location) is multiplied by the second source operand (an immediate value). The product is then stored in the destination operand (a general-purpose register). When an immediate value is used as an operand, it is sign-extended to the length of the destination operand format. The CF and OF flags are set when significant bits are carried into the upper half of the result. The CF and OF flags are cleared when the result fits exactly in the lower half of the result. The three forms of the IMUL instruction are similar in that the length of the product is calculated to twice the length of the operands. With the one-operand form, the product is stored exactly in the destination. With the two- and three- operand forms, however, result is truncated to the length of the destination before it is stored in the destination register. Because of this truncation, the CF or OF flag should be tested to ensure that no significant bits are lost. The two- and three-operand forms may also be used with unsigned operands because the lower half of the product is the same regardless if the operands are signed or unsigned. The CF and OF flags, however, cannot be used to determine if the upper half of the result is non-zero:: IF (NumberOfOperands == 1) THEN IF (OperandSize == 8) THEN AX = AL * SRC (* Signed multiplication *) IF AL == AX THEN CF = 0; OF = 0; ELSE CF = 1; OF = 1; FI; ELSE IF OperandSize == 16 THEN DX:AX = AX * SRC (* Signed multiplication *) IF sign_extend_to_32 (AX) == DX:AX THEN CF = 0; OF = 0; ELSE CF = 1; OF = 1; FI; ELSE IF OperandSize == 32 THEN EDX:EAX = EAX * SRC (* Signed multiplication *) IF EAX == EDX:EAX THEN CF = 0; OF = 0; ELSE CF = 1; OF = 1; FI; ELSE (* OperandSize = 64 *) RDX:RAX = RAX * SRC (* Signed multiplication *) IF RAX == RDX:RAX THEN CF = 0; OF = 0; ELSE CF = 1; OF = 1; FI; FI; FI; ELSE IF (NumberOfOperands = 2) THEN temp = DEST * SRC (* Signed multiplication; temp is double DEST size *) DEST = DEST * SRC (* Signed multiplication *) IF temp != DEST THEN CF = 1; OF = 1; ELSE CF = 0; OF = 0; FI; ELSE (* NumberOfOperands = 3 *) DEST = SRC1 * SRC2 (* Signed multiplication *) temp = SRC1 * SRC2 (* Signed multiplication; temp is double SRC1 size *) IF temp != DEST THEN CF = 1; OF = 1; ELSE CF = 0; OF = 0; FI; FI; FI; :param cpu: current CPU. :param operands: variable list of operands. """ dest = operands[0] OperandSize = dest.size reg_name_h = {8: 'AH', 16: 'DX', 32: 'EDX', 64: 'RDX'}[OperandSize] reg_name_l = {8: 'AL', 16: 'AX', 32: 'EAX', 64: 'RAX'}[OperandSize] arg0 = dest.read() arg1 = None arg2 = None res = None if len(operands) == 1: arg1 = cpu.read_register(reg_name_l) temp = (Operators.SEXTEND(arg0, OperandSize, OperandSize * 2) * Operators.SEXTEND(arg1, OperandSize, OperandSize * 2)) temp = temp & ((1 << (OperandSize * 2)) - 1) cpu.write_register(reg_name_l, Operators.EXTRACT(temp, 0, OperandSize)) cpu.write_register(reg_name_h, Operators.EXTRACT(temp, OperandSize, OperandSize)) res = Operators.EXTRACT(temp, 0, OperandSize) elif len(operands) == 2: arg1 = operands[1].read() arg1 = Operators.SEXTEND(arg1, OperandSize, OperandSize * 2) temp = Operators.SEXTEND(arg0, OperandSize, OperandSize * 2) * arg1 temp = temp & ((1 << (OperandSize * 2)) - 1) res = dest.write(Operators.EXTRACT(temp, 0, OperandSize)) else: arg1 = operands[1].read() arg2 = operands[2].read() temp = (Operators.SEXTEND(arg1, OperandSize, OperandSize * 2) * Operators.SEXTEND(arg2, operands[2].size, OperandSize * 2)) temp = temp & ((1 << (OperandSize * 2)) - 1) res = dest.write(Operators.EXTRACT(temp, 0, OperandSize)) cpu.CF = (Operators.SEXTEND(res, OperandSize, OperandSize * 2) != temp) cpu.OF = cpu.CF
[ "def", "IMUL", "(", "cpu", ",", "*", "operands", ")", ":", "dest", "=", "operands", "[", "0", "]", "OperandSize", "=", "dest", ".", "size", "reg_name_h", "=", "{", "8", ":", "'AH'", ",", "16", ":", "'DX'", ",", "32", ":", "'EDX'", ",", "64", ":", "'RDX'", "}", "[", "OperandSize", "]", "reg_name_l", "=", "{", "8", ":", "'AL'", ",", "16", ":", "'AX'", ",", "32", ":", "'EAX'", ",", "64", ":", "'RAX'", "}", "[", "OperandSize", "]", "arg0", "=", "dest", ".", "read", "(", ")", "arg1", "=", "None", "arg2", "=", "None", "res", "=", "None", "if", "len", "(", "operands", ")", "==", "1", ":", "arg1", "=", "cpu", ".", "read_register", "(", "reg_name_l", ")", "temp", "=", "(", "Operators", ".", "SEXTEND", "(", "arg0", ",", "OperandSize", ",", "OperandSize", "*", "2", ")", "*", "Operators", ".", "SEXTEND", "(", "arg1", ",", "OperandSize", ",", "OperandSize", "*", "2", ")", ")", "temp", "=", "temp", "&", "(", "(", "1", "<<", "(", "OperandSize", "*", "2", ")", ")", "-", "1", ")", "cpu", ".", "write_register", "(", "reg_name_l", ",", "Operators", ".", "EXTRACT", "(", "temp", ",", "0", ",", "OperandSize", ")", ")", "cpu", ".", "write_register", "(", "reg_name_h", ",", "Operators", ".", "EXTRACT", "(", "temp", ",", "OperandSize", ",", "OperandSize", ")", ")", "res", "=", "Operators", ".", "EXTRACT", "(", "temp", ",", "0", ",", "OperandSize", ")", "elif", "len", "(", "operands", ")", "==", "2", ":", "arg1", "=", "operands", "[", "1", "]", ".", "read", "(", ")", "arg1", "=", "Operators", ".", "SEXTEND", "(", "arg1", ",", "OperandSize", ",", "OperandSize", "*", "2", ")", "temp", "=", "Operators", ".", "SEXTEND", "(", "arg0", ",", "OperandSize", ",", "OperandSize", "*", "2", ")", "*", "arg1", "temp", "=", "temp", "&", "(", "(", "1", "<<", "(", "OperandSize", "*", "2", ")", ")", "-", "1", ")", "res", "=", "dest", ".", "write", "(", "Operators", ".", "EXTRACT", "(", "temp", ",", "0", ",", "OperandSize", ")", ")", "else", ":", "arg1", "=", "operands", "[", "1", "]", ".", "read", "(", ")", "arg2", "=", "operands", "[", "2", "]", ".", "read", "(", ")", "temp", "=", "(", "Operators", ".", "SEXTEND", "(", "arg1", ",", "OperandSize", ",", "OperandSize", "*", "2", ")", "*", "Operators", ".", "SEXTEND", "(", "arg2", ",", "operands", "[", "2", "]", ".", "size", ",", "OperandSize", "*", "2", ")", ")", "temp", "=", "temp", "&", "(", "(", "1", "<<", "(", "OperandSize", "*", "2", ")", ")", "-", "1", ")", "res", "=", "dest", ".", "write", "(", "Operators", ".", "EXTRACT", "(", "temp", ",", "0", ",", "OperandSize", ")", ")", "cpu", ".", "CF", "=", "(", "Operators", ".", "SEXTEND", "(", "res", ",", "OperandSize", ",", "OperandSize", "*", "2", ")", "!=", "temp", ")", "cpu", ".", "OF", "=", "cpu", ".", "CF" ]
45.573427
21.755245
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None, predicates=None, properties=None): """ Construct a :class:`DataFrame` representing the database table named ``table`` accessible via JDBC URL ``url`` and connection ``properties``. Partitions of the table will be retrieved in parallel if either ``column`` or ``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions`` is needed when ``column`` is specified. If both ``column`` and ``predicates`` are specified, ``column`` will be used. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: the name of the table :param column: the name of an integer column that will be used for partitioning; if this parameter is specified, then ``numPartitions``, ``lowerBound`` (inclusive), and ``upperBound`` (exclusive) will form partition strides for generated WHERE clause expressions used to split the column ``column`` evenly :param lowerBound: the minimum value of ``column`` used to decide partition stride :param upperBound: the maximum value of ``column`` used to decide partition stride :param numPartitions: the number of partitions :param predicates: a list of expressions suitable for inclusion in WHERE clauses; each one defines one partition of the :class:`DataFrame` :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } :return: a DataFrame """ if properties is None: properties = dict() jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)() for k in properties: jprop.setProperty(k, properties[k]) if column is not None: assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified" assert upperBound is not None, "upperBound can not be None when ``column`` is specified" assert numPartitions is not None, \ "numPartitions can not be None when ``column`` is specified" return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound), int(numPartitions), jprop)) if predicates is not None: gateway = self._spark._sc._gateway jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates) return self._df(self._jreader.jdbc(url, table, jpredicates, jprop)) return self._df(self._jreader.jdbc(url, table, jprop))
[ "def", "jdbc", "(", "self", ",", "url", ",", "table", ",", "column", "=", "None", ",", "lowerBound", "=", "None", ",", "upperBound", "=", "None", ",", "numPartitions", "=", "None", ",", "predicates", "=", "None", ",", "properties", "=", "None", ")", ":", "if", "properties", "is", "None", ":", "properties", "=", "dict", "(", ")", "jprop", "=", "JavaClass", "(", "\"java.util.Properties\"", ",", "self", ".", "_spark", ".", "_sc", ".", "_gateway", ".", "_gateway_client", ")", "(", ")", "for", "k", "in", "properties", ":", "jprop", ".", "setProperty", "(", "k", ",", "properties", "[", "k", "]", ")", "if", "column", "is", "not", "None", ":", "assert", "lowerBound", "is", "not", "None", ",", "\"lowerBound can not be None when ``column`` is specified\"", "assert", "upperBound", "is", "not", "None", ",", "\"upperBound can not be None when ``column`` is specified\"", "assert", "numPartitions", "is", "not", "None", ",", "\"numPartitions can not be None when ``column`` is specified\"", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "jdbc", "(", "url", ",", "table", ",", "column", ",", "int", "(", "lowerBound", ")", ",", "int", "(", "upperBound", ")", ",", "int", "(", "numPartitions", ")", ",", "jprop", ")", ")", "if", "predicates", "is", "not", "None", ":", "gateway", "=", "self", ".", "_spark", ".", "_sc", ".", "_gateway", "jpredicates", "=", "utils", ".", "toJArray", "(", "gateway", ",", "gateway", ".", "jvm", ".", "java", ".", "lang", ".", "String", ",", "predicates", ")", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "jdbc", "(", "url", ",", "table", ",", "jpredicates", ",", "jprop", ")", ")", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "jdbc", "(", "url", ",", "table", ",", "jprop", ")", ")" ]
62.612245
32.734694
def stitch(images): """Stitch regular spaced images. Parameters ---------- images : ImageCollection or list of tuple(path, row, column) Each image-tuple should contain path, row and column. Row 0, column 0 is top left image. Example: >>> images = [('1.png', 0, 0), ('2.png', 0, 1)] Returns ------- tuple (stitched, offset) Stitched image and registered offset (y, x). """ if type(images) != ImageCollection: images = ImageCollection(images) calc_translations_parallel(images) _translation_warn(images) yoffset, xoffset = images.median_translation() if xoffset != yoffset: warn('yoffset != xoffset: %s != %s' % (yoffset, xoffset)) # assume all images have the same shape y, x = imread(images[0].path).shape height = y*len(images.rows) + yoffset*(len(images.rows)-1) width = x*len(images.cols) + xoffset*(len(images.cols)-1) # last dimension is number of images on top of each other merged = np.zeros((height, width, 2), dtype=np.int) for image in images: r, c = image.row, image.col mask = _merge_slice(r, c, y, x, yoffset, xoffset) # last dim is used for averaging the seam img = _add_ones_dim(imread(image.path)) merged[mask] += img # average seam, possible improvement: use gradient merged[..., 0] /= merged[..., 1] return merged[..., 0].astype(np.uint8), (yoffset, xoffset)
[ "def", "stitch", "(", "images", ")", ":", "if", "type", "(", "images", ")", "!=", "ImageCollection", ":", "images", "=", "ImageCollection", "(", "images", ")", "calc_translations_parallel", "(", "images", ")", "_translation_warn", "(", "images", ")", "yoffset", ",", "xoffset", "=", "images", ".", "median_translation", "(", ")", "if", "xoffset", "!=", "yoffset", ":", "warn", "(", "'yoffset != xoffset: %s != %s'", "%", "(", "yoffset", ",", "xoffset", ")", ")", "# assume all images have the same shape", "y", ",", "x", "=", "imread", "(", "images", "[", "0", "]", ".", "path", ")", ".", "shape", "height", "=", "y", "*", "len", "(", "images", ".", "rows", ")", "+", "yoffset", "*", "(", "len", "(", "images", ".", "rows", ")", "-", "1", ")", "width", "=", "x", "*", "len", "(", "images", ".", "cols", ")", "+", "xoffset", "*", "(", "len", "(", "images", ".", "cols", ")", "-", "1", ")", "# last dimension is number of images on top of each other", "merged", "=", "np", ".", "zeros", "(", "(", "height", ",", "width", ",", "2", ")", ",", "dtype", "=", "np", ".", "int", ")", "for", "image", "in", "images", ":", "r", ",", "c", "=", "image", ".", "row", ",", "image", ".", "col", "mask", "=", "_merge_slice", "(", "r", ",", "c", ",", "y", ",", "x", ",", "yoffset", ",", "xoffset", ")", "# last dim is used for averaging the seam", "img", "=", "_add_ones_dim", "(", "imread", "(", "image", ".", "path", ")", ")", "merged", "[", "mask", "]", "+=", "img", "# average seam, possible improvement: use gradient", "merged", "[", "...", ",", "0", "]", "/=", "merged", "[", "...", ",", "1", "]", "return", "merged", "[", "...", ",", "0", "]", ".", "astype", "(", "np", ".", "uint8", ")", ",", "(", "yoffset", ",", "xoffset", ")" ]
31.755556
19.2
def list_containers(active=True, defined=True, as_object=False, config_path=None): """ List the containers on the system. """ if config_path: if not os.path.exists(config_path): return tuple() try: entries = _lxc.list_containers(active=active, defined=defined, config_path=config_path) except ValueError: return tuple() else: try: entries = _lxc.list_containers(active=active, defined=defined) except ValueError: return tuple() if as_object: return tuple([Container(name, config_path) for name in entries]) else: return entries
[ "def", "list_containers", "(", "active", "=", "True", ",", "defined", "=", "True", ",", "as_object", "=", "False", ",", "config_path", "=", "None", ")", ":", "if", "config_path", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "config_path", ")", ":", "return", "tuple", "(", ")", "try", ":", "entries", "=", "_lxc", ".", "list_containers", "(", "active", "=", "active", ",", "defined", "=", "defined", ",", "config_path", "=", "config_path", ")", "except", "ValueError", ":", "return", "tuple", "(", ")", "else", ":", "try", ":", "entries", "=", "_lxc", ".", "list_containers", "(", "active", "=", "active", ",", "defined", "=", "defined", ")", "except", "ValueError", ":", "return", "tuple", "(", ")", "if", "as_object", ":", "return", "tuple", "(", "[", "Container", "(", "name", ",", "config_path", ")", "for", "name", "in", "entries", "]", ")", "else", ":", "return", "entries" ]
29.875
20.125
def list_users(self, **kwargs): """List all users in organisation. :param int limit: The number of users to retrieve :param str order: The ordering direction, ascending (asc) or descending (desc) :param str after: Get users after/starting at given user ID :param dict filters: Dictionary of filters to apply: str status (eq) :returns: a list of :py:class:`User` objects :rtype: PaginatedResponse """ kwargs = self._verify_sort_options(kwargs) kwargs = self._verify_filters(kwargs, User) api = self._get_api(iam.AccountAdminApi) return PaginatedResponse(api.get_all_users, lwrap_type=User, **kwargs)
[ "def", "list_users", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "self", ".", "_verify_sort_options", "(", "kwargs", ")", "kwargs", "=", "self", ".", "_verify_filters", "(", "kwargs", ",", "User", ")", "api", "=", "self", ".", "_get_api", "(", "iam", ".", "AccountAdminApi", ")", "return", "PaginatedResponse", "(", "api", ".", "get_all_users", ",", "lwrap_type", "=", "User", ",", "*", "*", "kwargs", ")" ]
48.714286
18.642857
def check_if_alive(self): """Check if the content is available on the host server. Returns `True` if available, else `False`. This method is `lazy`-evaluated or only executes when called. :rtype: bool """ try: from urllib2 import urlopen, URLError, HTTPError except ImportError: from urllib.request import urlopen, URLError, HTTPError if len(self.instance.STATUS_LINK): check_url = self.instance.STATUS_LINK % ({'content_uid': self.get_content_uid()}) else: # fallback check_url = self.instance.url try: response = urlopen(check_url) except (HTTPError, URLError): return False except ValueError: raise URLError('Invalid URL: %s'.format(check_url)) else: return True if response.code == 200 else False
[ "def", "check_if_alive", "(", "self", ")", ":", "try", ":", "from", "urllib2", "import", "urlopen", ",", "URLError", ",", "HTTPError", "except", "ImportError", ":", "from", "urllib", ".", "request", "import", "urlopen", ",", "URLError", ",", "HTTPError", "if", "len", "(", "self", ".", "instance", ".", "STATUS_LINK", ")", ":", "check_url", "=", "self", ".", "instance", ".", "STATUS_LINK", "%", "(", "{", "'content_uid'", ":", "self", ".", "get_content_uid", "(", ")", "}", ")", "else", ":", "# fallback", "check_url", "=", "self", ".", "instance", ".", "url", "try", ":", "response", "=", "urlopen", "(", "check_url", ")", "except", "(", "HTTPError", ",", "URLError", ")", ":", "return", "False", "except", "ValueError", ":", "raise", "URLError", "(", "'Invalid URL: %s'", ".", "format", "(", "check_url", ")", ")", "else", ":", "return", "True", "if", "response", ".", "code", "==", "200", "else", "False" ]
35.32
20.12
def as_proj4(self): """ Return the PROJ.4 string which corresponds to the CRS. For example:: >>> print(get(21781).as_proj4()) +proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 \ +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel \ +towgs84=674.4,15.1,405.3,0,0,0,0 +units=m +no_defs """ url = '{prefix}{code}.proj4?download'.format(prefix=EPSG_IO_URL, code=self.id) return requests.get(url).text.strip()
[ "def", "as_proj4", "(", "self", ")", ":", "url", "=", "'{prefix}{code}.proj4?download'", ".", "format", "(", "prefix", "=", "EPSG_IO_URL", ",", "code", "=", "self", ".", "id", ")", "return", "requests", ".", "get", "(", "url", ")", ".", "text", ".", "strip", "(", ")" ]
34.933333
20.133333
def do_clearqueue(self, line): """clearqueue Remove the operations in the queue of write operations without performing them.""" self._split_args(line, 0, 0) self._command_processor.get_operation_queue().clear() self._print_info_if_verbose("All operations in the write queue were cleared")
[ "def", "do_clearqueue", "(", "self", ",", "line", ")", ":", "self", ".", "_split_args", "(", "line", ",", "0", ",", "0", ")", "self", ".", "_command_processor", ".", "get_operation_queue", "(", ")", ".", "clear", "(", ")", "self", ".", "_print_info_if_verbose", "(", "\"All operations in the write queue were cleared\"", ")" ]
53.833333
13.333333
def connect(self, timeout_sec=TIMEOUT_SEC): """Connect to the device. If not connected within the specified timeout then an exception is thrown. """ self._central_manager.connectPeripheral_options_(self._peripheral, None) if not self._connected.wait(timeout_sec): raise RuntimeError('Failed to connect to device within timeout period!')
[ "def", "connect", "(", "self", ",", "timeout_sec", "=", "TIMEOUT_SEC", ")", ":", "self", ".", "_central_manager", ".", "connectPeripheral_options_", "(", "self", ".", "_peripheral", ",", "None", ")", "if", "not", "self", ".", "_connected", ".", "wait", "(", "timeout_sec", ")", ":", "raise", "RuntimeError", "(", "'Failed to connect to device within timeout period!'", ")" ]
54.714286
14.285714
def _publish_status(self, status, parent=None): """send status (busy/idle) on IOPub""" self.session.send(self.iopub_socket, u'status', {u'execution_state': status}, parent=parent, ident=self._topic('status'), )
[ "def", "_publish_status", "(", "self", ",", "status", ",", "parent", "=", "None", ")", ":", "self", ".", "session", ".", "send", "(", "self", ".", "iopub_socket", ",", "u'status'", ",", "{", "u'execution_state'", ":", "status", "}", ",", "parent", "=", "parent", ",", "ident", "=", "self", ".", "_topic", "(", "'status'", ")", ",", ")" ]
43.625
7.125
def _get(self, field): """ Return the value for the queried field. Get the value of a given field. The list of all queryable fields is documented in the beginning of the model class. >>> out = m._get('graph') Parameters ---------- field : string Name of the field to be retrieved. Returns ------- out : value The current value of the requested field. """ if field in self._list_fields(): return self.__proxy__.get(field) else: raise KeyError('Key \"%s\" not in model. Available fields are %s.' % (field, ', '.join(self._list_fields())))
[ "def", "_get", "(", "self", ",", "field", ")", ":", "if", "field", "in", "self", ".", "_list_fields", "(", ")", ":", "return", "self", ".", "__proxy__", ".", "get", "(", "field", ")", "else", ":", "raise", "KeyError", "(", "'Key \\\"%s\\\" not in model. Available fields are %s.'", "%", "(", "field", ",", "', '", ".", "join", "(", "self", ".", "_list_fields", "(", ")", ")", ")", ")" ]
29.478261
22
def set_environment_variable(self, name, value): """ Set the value of an environment variable. .. warning:: The server may reject this request depending on its ``AcceptEnv`` setting; such rejections will fail silently (which is common client practice for this particular request type). Make sure you understand your server's configuration before using! :param str name: name of the environment variable :param str value: value of the environment variable :raises: `.SSHException` -- if the request was rejected or the channel was closed """ m = Message() m.add_byte(cMSG_CHANNEL_REQUEST) m.add_int(self.remote_chanid) m.add_string("env") m.add_boolean(False) m.add_string(name) m.add_string(value) self.transport._send_user_message(m)
[ "def", "set_environment_variable", "(", "self", ",", "name", ",", "value", ")", ":", "m", "=", "Message", "(", ")", "m", ".", "add_byte", "(", "cMSG_CHANNEL_REQUEST", ")", "m", ".", "add_int", "(", "self", ".", "remote_chanid", ")", "m", ".", "add_string", "(", "\"env\"", ")", "m", ".", "add_boolean", "(", "False", ")", "m", ".", "add_string", "(", "name", ")", "m", ".", "add_string", "(", "value", ")", "self", ".", "transport", ".", "_send_user_message", "(", "m", ")" ]
36.2
19.32
def dual_csiszar_function(logu, csiszar_function, name=None): """Calculates the dual Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Csiszar-dual is defined as: ```none f^*(u) = u f(1 / u) ``` where `f` is some other Csiszar-function. For example, the dual of `kl_reverse` is `kl_forward`, i.e., ```none f(u) = -log(u) f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u) ``` The dual of the dual is the original function: ```none f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u) ``` Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. csiszar_function: Python `callable` representing a Csiszar-function over log-domain. name: Python `str` name prefixed to Ops created by this function. Returns: dual_f_of_u: `float`-like `Tensor` of the result of calculating the dual of `f` at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "dual_csiszar_function", [logu]): return tf.exp(logu) * csiszar_function(-logu)
[ "def", "dual_csiszar_function", "(", "logu", ",", "csiszar_function", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"dual_csiszar_function\"", ",", "[", "logu", "]", ")", ":", "return", "tf", ".", "exp", "(", "logu", ")", "*", "csiszar_function", "(", "-", "logu", ")" ]
25.23913
26.673913
def program_unitary(program, n_qubits): """ Return the unitary of a pyQuil program. :param program: A program consisting only of :py:class:`Gate`.: :return: a unitary corresponding to the composition of the program's gates. """ umat = np.eye(2 ** n_qubits) for instruction in program: if isinstance(instruction, Gate): unitary = lifted_gate(gate=instruction, n_qubits=n_qubits) umat = unitary.dot(umat) else: raise ValueError("Can only compute program unitary for programs composed of `Gate`s") return umat
[ "def", "program_unitary", "(", "program", ",", "n_qubits", ")", ":", "umat", "=", "np", ".", "eye", "(", "2", "**", "n_qubits", ")", "for", "instruction", "in", "program", ":", "if", "isinstance", "(", "instruction", ",", "Gate", ")", ":", "unitary", "=", "lifted_gate", "(", "gate", "=", "instruction", ",", "n_qubits", "=", "n_qubits", ")", "umat", "=", "unitary", ".", "dot", "(", "umat", ")", "else", ":", "raise", "ValueError", "(", "\"Can only compute program unitary for programs composed of `Gate`s\"", ")", "return", "umat" ]
38.466667
18.066667
def status_mute(self, id): """ Mute notifications for a status. Returns a `toot dict`_ with the now muted status """ id = self.__unpack_id(id) url = '/api/v1/statuses/{0}/mute'.format(str(id)) return self.__api_request('POST', url)
[ "def", "status_mute", "(", "self", ",", "id", ")", ":", "id", "=", "self", ".", "__unpack_id", "(", "id", ")", "url", "=", "'/api/v1/statuses/{0}/mute'", ".", "format", "(", "str", "(", "id", ")", ")", "return", "self", ".", "__api_request", "(", "'POST'", ",", "url", ")" ]
31.111111
11.111111
def save(self, to, driver=None): """Save this instance to the path and format provided. Arguments: to -- output path as str, file, or MemFileIO instance Keyword args: driver -- GDAL driver name as string or ImageDriver """ path = getattr(to, 'name', to) if not driver and hasattr(path, 'encode'): driver = driver_for_path(path, self.driver.filter_copyable()) elif hasattr(driver, 'encode'): driver = ImageDriver(driver) if driver is None or not driver.copyable: raise ValueError('Copy supporting driver not found for %s' % path) driver.copy(self, path).close()
[ "def", "save", "(", "self", ",", "to", ",", "driver", "=", "None", ")", ":", "path", "=", "getattr", "(", "to", ",", "'name'", ",", "to", ")", "if", "not", "driver", "and", "hasattr", "(", "path", ",", "'encode'", ")", ":", "driver", "=", "driver_for_path", "(", "path", ",", "self", ".", "driver", ".", "filter_copyable", "(", ")", ")", "elif", "hasattr", "(", "driver", ",", "'encode'", ")", ":", "driver", "=", "ImageDriver", "(", "driver", ")", "if", "driver", "is", "None", "or", "not", "driver", ".", "copyable", ":", "raise", "ValueError", "(", "'Copy supporting driver not found for %s'", "%", "path", ")", "driver", ".", "copy", "(", "self", ",", "path", ")", ".", "close", "(", ")" ]
41.875
13.9375
def get_connection(self, command, args=()): """Get free connection from pool. Returns connection. """ # TODO: find a better way to determine if connection is free # and not havily used. command = command.upper().strip() is_pubsub = command in _PUBSUB_COMMANDS if is_pubsub and self._pubsub_conn: if not self._pubsub_conn.closed: return self._pubsub_conn, self._pubsub_conn.address self._pubsub_conn = None for i in range(self.freesize): conn = self._pool[0] self._pool.rotate(1) if conn.closed: # or conn._waiters: (eg: busy connection) continue if conn.in_pubsub: continue if is_pubsub: self._pubsub_conn = conn self._pool.remove(conn) self._used.add(conn) return conn, conn.address return None, self._address
[ "def", "get_connection", "(", "self", ",", "command", ",", "args", "=", "(", ")", ")", ":", "# TODO: find a better way to determine if connection is free", "# and not havily used.", "command", "=", "command", ".", "upper", "(", ")", ".", "strip", "(", ")", "is_pubsub", "=", "command", "in", "_PUBSUB_COMMANDS", "if", "is_pubsub", "and", "self", ".", "_pubsub_conn", ":", "if", "not", "self", ".", "_pubsub_conn", ".", "closed", ":", "return", "self", ".", "_pubsub_conn", ",", "self", ".", "_pubsub_conn", ".", "address", "self", ".", "_pubsub_conn", "=", "None", "for", "i", "in", "range", "(", "self", ".", "freesize", ")", ":", "conn", "=", "self", ".", "_pool", "[", "0", "]", "self", ".", "_pool", ".", "rotate", "(", "1", ")", "if", "conn", ".", "closed", ":", "# or conn._waiters: (eg: busy connection)", "continue", "if", "conn", ".", "in_pubsub", ":", "continue", "if", "is_pubsub", ":", "self", ".", "_pubsub_conn", "=", "conn", "self", ".", "_pool", ".", "remove", "(", "conn", ")", "self", ".", "_used", ".", "add", "(", "conn", ")", "return", "conn", ",", "conn", ".", "address", "return", "None", ",", "self", ".", "_address" ]
37.115385
9.730769
def random_uniform(attrs, inputs, proto_obj): """Draw random samples from a uniform distribtuion.""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError: raise ImportError("Onnx and protobuf need to be installed. " "Instructions to install - https://github.com/onnx/onnx") new_attrs = translation_utils._remove_attributes(attrs, ['seed']) new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs.get('dtype', 1))] return 'random_uniform', new_attrs, inputs
[ "def", "random_uniform", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "try", ":", "from", "onnx", ".", "mapping", "import", "TENSOR_TYPE_TO_NP_TYPE", "except", "ImportError", ":", "raise", "ImportError", "(", "\"Onnx and protobuf need to be installed. \"", "\"Instructions to install - https://github.com/onnx/onnx\"", ")", "new_attrs", "=", "translation_utils", ".", "_remove_attributes", "(", "attrs", ",", "[", "'seed'", "]", ")", "new_attrs", "[", "'dtype'", "]", "=", "TENSOR_TYPE_TO_NP_TYPE", "[", "int", "(", "new_attrs", ".", "get", "(", "'dtype'", ",", "1", ")", ")", "]", "return", "'random_uniform'", ",", "new_attrs", ",", "inputs" ]
53.4
21.4
def get_model(model_fn, train_data, param): """Feed model_fn with train_data and param """ model_param = merge_dicts({"train_data": train_data}, param["model"], param.get("shared", {})) return model_fn(**model_param)
[ "def", "get_model", "(", "model_fn", ",", "train_data", ",", "param", ")", ":", "model_param", "=", "merge_dicts", "(", "{", "\"train_data\"", ":", "train_data", "}", ",", "param", "[", "\"model\"", "]", ",", "param", ".", "get", "(", "\"shared\"", ",", "{", "}", ")", ")", "return", "model_fn", "(", "*", "*", "model_param", ")" ]
45.6
13.4
def ellipse(self, x, y, width, height, color): """ See the Processing function ellipse(): https://processing.org/reference/ellipse_.html """ self.context.set_source_rgb(*color) self.context.save() self.context.translate(self.tx(x + (width / 2.0)), self.ty(y + (height / 2.0))) self.context.scale(self.tx(width / 2.0), self.ty(height / 2.0)) self.context.arc(0.0, 0.0, 1.0, 0.0, 2 * math.pi) self.context.fill() self.context.restore()
[ "def", "ellipse", "(", "self", ",", "x", ",", "y", ",", "width", ",", "height", ",", "color", ")", ":", "self", ".", "context", ".", "set_source_rgb", "(", "*", "color", ")", "self", ".", "context", ".", "save", "(", ")", "self", ".", "context", ".", "translate", "(", "self", ".", "tx", "(", "x", "+", "(", "width", "/", "2.0", ")", ")", ",", "self", ".", "ty", "(", "y", "+", "(", "height", "/", "2.0", ")", ")", ")", "self", ".", "context", ".", "scale", "(", "self", ".", "tx", "(", "width", "/", "2.0", ")", ",", "self", ".", "ty", "(", "height", "/", "2.0", ")", ")", "self", ".", "context", ".", "arc", "(", "0.0", ",", "0.0", ",", "1.0", ",", "0.0", ",", "2", "*", "math", ".", "pi", ")", "self", ".", "context", ".", "fill", "(", ")", "self", ".", "context", ".", "restore", "(", ")" ]
42.5
13.333333
def to_text(self): """Render a Text MessageElement as plain text Args: None Returns: Str the plain text representation of the Text MessageElement Raises: Errors are propagated """ if self.items is None: return else: text = '' for i, item in enumerate(self.items): text += ' %s. %s\n' % (i + 1, item.to_text()) return text
[ "def", "to_text", "(", "self", ")", ":", "if", "self", ".", "items", "is", "None", ":", "return", "else", ":", "text", "=", "''", "for", "i", ",", "item", "in", "enumerate", "(", "self", ".", "items", ")", ":", "text", "+=", "' %s. %s\\n'", "%", "(", "i", "+", "1", ",", "item", ".", "to_text", "(", ")", ")", "return", "text" ]
23.1
22.3
def decode(cls, root_element): """ Decode the object to the object :param root_element: the parsed xml Element :type root_element: xml.etree.ElementTree.Element :return: the decoded Element as object :rtype: object """ new_object = cls() field_names_to_attributes = new_object._get_field_names_to_attributes() for child_element in root_element: new_object._set_field(new_object, field_names_to_attributes, child_element) return new_object
[ "def", "decode", "(", "cls", ",", "root_element", ")", ":", "new_object", "=", "cls", "(", ")", "field_names_to_attributes", "=", "new_object", ".", "_get_field_names_to_attributes", "(", ")", "for", "child_element", "in", "root_element", ":", "new_object", ".", "_set_field", "(", "new_object", ",", "field_names_to_attributes", ",", "child_element", ")", "return", "new_object" ]
37.571429
15.714286
def get_cxflow_arg_parser(add_common_arguments: bool=False) -> ArgumentParser: """ Create the **cxflow** argument parser. :return: an instance of the parser """ # create parser main_parser = ArgumentParser('cxflow', description='cxflow: lightweight framework for machine learning with ' 'focus on modularization, re-usability and rapid experimenting.', epilog='For more info see <https://cognexa.github.io/cxflow>') main_parser.add_argument('--version', action='version', help='Print cxflow version and quit.', version='cxflow {}'.format(pkg_resources.get_distribution('cxflow').version)) subparsers = main_parser.add_subparsers(help='cxflow commands') # create train sub-parser train_parser = subparsers.add_parser('train', description='Start cxflow training from the ``config_file``.') train_parser.set_defaults(subcommand='train') train_parser.add_argument('config_file', help='path to the config file') # create resume sub-parser resume_parser = subparsers.add_parser('resume', description='Resume cxflow training from the ``config_path``.') resume_parser.set_defaults(subcommand='resume') resume_parser.add_argument('config_path', help='path to the config file or the directory in which it is stored') resume_parser.add_argument('restore_from', nargs='?', default=None, help='information passed to the model constructor (backend-specific); ' 'usually a directory in which the trained model is stored') # create predict sub-parser (deprecated) predict_parser = subparsers.add_parser('predict', description='Run prediction with the given ``config_path``.') predict_parser.set_defaults(subcommand='predict') predict_parser.add_argument('config_path', help='path to the config file or the directory in which it is stored') predict_parser.add_argument('restore_from', nargs='?', default=None, help='information passed to the model constructor (backend-specific); usually a ' 'directory in which the trained model is stored') # create eval sub-parser eval_parser = subparsers.add_parser('eval', description='Evaluate the given model on the specified data stream.') eval_parser.set_defaults(subcommand='eval') eval_parser.add_argument('stream_name', help='stream name to be evaluated') eval_parser.add_argument('model_path', help='model path to be evaluated') eval_parser.add_argument('--config', '-c', nargs='?', default=None, help='optional config path to be used') # create dataset sub-parser dataset_parser = subparsers.add_parser('dataset', description='Invoke arbitrary dataset method.') dataset_parser.set_defaults(subcommand='dataset') dataset_parser.add_argument('method', help='name of the method to be invoked') dataset_parser.add_argument('config_file', help='path to the config file') # create grid-search sub-parser gridsearch_parser = subparsers.add_parser('gridsearch', description='Do parameter grid search (experimental).') gridsearch_parser.set_defaults(subcommand='gridsearch') gridsearch_parser.add_argument('script', help='Script to be grid-searched') gridsearch_parser.add_argument('params', nargs='*', help='Params to be tested. Format: name:type=[value1,value2]. ' 'Type is optional') gridsearch_parser.add_argument('--dry-run', action='store_true', help='Only print command output instead ' 'of executing it right away') # create ls sub-parser ls_parser = subparsers.add_parser('ls', description='List training log dirs in the given path.') ls_parser.set_defaults(subcommand='ls') ls_parser.add_argument('dir', nargs='?', default=CXF_DEFAULT_LOG_DIR, help='path to the log directory to be listed') ls_parser.add_argument('-l', '--long', action='store_true', help='use long listing format') ls_parser.add_argument('-a', '--all', action='store_true', help='include trainings with no epochs done') ls_parser.add_argument('-r', '--recursive', action='store_true', help='list all the dirs recursively, stop at training dirs') ls_parser.add_argument('-v', '--verbose', action='store_true', help='print more verbose output, applicable only when a single train dir is listed') # create prune sub-parser prune_parser = subparsers.add_parser('prune', description='Prune training log dirs in the given path without finished epochs.') prune_parser.set_defaults(subcommand='prune') prune_parser.add_argument('dir', nargs='?', default=CXF_DEFAULT_LOG_DIR, help='path to the log directory to be pruned') prune_parser.add_argument('-e', '--epochs', default=1, type=int, help='keep only training log dirs having at least this many completed epochs, default 1') prune_parser.add_argument('-s', '--subdirs', action='store_true', help='delete all subdirectories in training directories') # add common arguments if add_common_arguments: for parser in [main_parser, train_parser, resume_parser, predict_parser, dataset_parser, eval_parser]: parser.add_argument('--output_root', '-o', default='./log', help='output directory') parser.add_argument('--verbose', '-v', action='store_true', help='increase verbosity to level DEBUG') return main_parser
[ "def", "get_cxflow_arg_parser", "(", "add_common_arguments", ":", "bool", "=", "False", ")", "->", "ArgumentParser", ":", "# create parser", "main_parser", "=", "ArgumentParser", "(", "'cxflow'", ",", "description", "=", "'cxflow: lightweight framework for machine learning with '", "'focus on modularization, re-usability and rapid experimenting.'", ",", "epilog", "=", "'For more info see <https://cognexa.github.io/cxflow>'", ")", "main_parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'version'", ",", "help", "=", "'Print cxflow version and quit.'", ",", "version", "=", "'cxflow {}'", ".", "format", "(", "pkg_resources", ".", "get_distribution", "(", "'cxflow'", ")", ".", "version", ")", ")", "subparsers", "=", "main_parser", ".", "add_subparsers", "(", "help", "=", "'cxflow commands'", ")", "# create train sub-parser", "train_parser", "=", "subparsers", ".", "add_parser", "(", "'train'", ",", "description", "=", "'Start cxflow training from the ``config_file``.'", ")", "train_parser", ".", "set_defaults", "(", "subcommand", "=", "'train'", ")", "train_parser", ".", "add_argument", "(", "'config_file'", ",", "help", "=", "'path to the config file'", ")", "# create resume sub-parser", "resume_parser", "=", "subparsers", ".", "add_parser", "(", "'resume'", ",", "description", "=", "'Resume cxflow training from the ``config_path``.'", ")", "resume_parser", ".", "set_defaults", "(", "subcommand", "=", "'resume'", ")", "resume_parser", ".", "add_argument", "(", "'config_path'", ",", "help", "=", "'path to the config file or the directory in which it is stored'", ")", "resume_parser", ".", "add_argument", "(", "'restore_from'", ",", "nargs", "=", "'?'", ",", "default", "=", "None", ",", "help", "=", "'information passed to the model constructor (backend-specific); '", "'usually a directory in which the trained model is stored'", ")", "# create predict sub-parser (deprecated)", "predict_parser", "=", "subparsers", ".", "add_parser", "(", "'predict'", ",", "description", "=", "'Run prediction with the given ``config_path``.'", ")", "predict_parser", ".", "set_defaults", "(", "subcommand", "=", "'predict'", ")", "predict_parser", ".", "add_argument", "(", "'config_path'", ",", "help", "=", "'path to the config file or the directory in which it is stored'", ")", "predict_parser", ".", "add_argument", "(", "'restore_from'", ",", "nargs", "=", "'?'", ",", "default", "=", "None", ",", "help", "=", "'information passed to the model constructor (backend-specific); usually a '", "'directory in which the trained model is stored'", ")", "# create eval sub-parser", "eval_parser", "=", "subparsers", ".", "add_parser", "(", "'eval'", ",", "description", "=", "'Evaluate the given model on the specified data stream.'", ")", "eval_parser", ".", "set_defaults", "(", "subcommand", "=", "'eval'", ")", "eval_parser", ".", "add_argument", "(", "'stream_name'", ",", "help", "=", "'stream name to be evaluated'", ")", "eval_parser", ".", "add_argument", "(", "'model_path'", ",", "help", "=", "'model path to be evaluated'", ")", "eval_parser", ".", "add_argument", "(", "'--config'", ",", "'-c'", ",", "nargs", "=", "'?'", ",", "default", "=", "None", ",", "help", "=", "'optional config path to be used'", ")", "# create dataset sub-parser", "dataset_parser", "=", "subparsers", ".", "add_parser", "(", "'dataset'", ",", "description", "=", "'Invoke arbitrary dataset method.'", ")", "dataset_parser", ".", "set_defaults", "(", "subcommand", "=", "'dataset'", ")", "dataset_parser", ".", "add_argument", "(", "'method'", ",", "help", "=", "'name of the method to be invoked'", ")", "dataset_parser", ".", "add_argument", "(", "'config_file'", ",", "help", "=", "'path to the config file'", ")", "# create grid-search sub-parser", "gridsearch_parser", "=", "subparsers", ".", "add_parser", "(", "'gridsearch'", ",", "description", "=", "'Do parameter grid search (experimental).'", ")", "gridsearch_parser", ".", "set_defaults", "(", "subcommand", "=", "'gridsearch'", ")", "gridsearch_parser", ".", "add_argument", "(", "'script'", ",", "help", "=", "'Script to be grid-searched'", ")", "gridsearch_parser", ".", "add_argument", "(", "'params'", ",", "nargs", "=", "'*'", ",", "help", "=", "'Params to be tested. Format: name:type=[value1,value2]. '", "'Type is optional'", ")", "gridsearch_parser", ".", "add_argument", "(", "'--dry-run'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Only print command output instead '", "'of executing it right away'", ")", "# create ls sub-parser", "ls_parser", "=", "subparsers", ".", "add_parser", "(", "'ls'", ",", "description", "=", "'List training log dirs in the given path.'", ")", "ls_parser", ".", "set_defaults", "(", "subcommand", "=", "'ls'", ")", "ls_parser", ".", "add_argument", "(", "'dir'", ",", "nargs", "=", "'?'", ",", "default", "=", "CXF_DEFAULT_LOG_DIR", ",", "help", "=", "'path to the log directory to be listed'", ")", "ls_parser", ".", "add_argument", "(", "'-l'", ",", "'--long'", ",", "action", "=", "'store_true'", ",", "help", "=", "'use long listing format'", ")", "ls_parser", ".", "add_argument", "(", "'-a'", ",", "'--all'", ",", "action", "=", "'store_true'", ",", "help", "=", "'include trainings with no epochs done'", ")", "ls_parser", ".", "add_argument", "(", "'-r'", ",", "'--recursive'", ",", "action", "=", "'store_true'", ",", "help", "=", "'list all the dirs recursively, stop at training dirs'", ")", "ls_parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "action", "=", "'store_true'", ",", "help", "=", "'print more verbose output, applicable only when a single train dir is listed'", ")", "# create prune sub-parser", "prune_parser", "=", "subparsers", ".", "add_parser", "(", "'prune'", ",", "description", "=", "'Prune training log dirs in the given path without finished epochs.'", ")", "prune_parser", ".", "set_defaults", "(", "subcommand", "=", "'prune'", ")", "prune_parser", ".", "add_argument", "(", "'dir'", ",", "nargs", "=", "'?'", ",", "default", "=", "CXF_DEFAULT_LOG_DIR", ",", "help", "=", "'path to the log directory to be pruned'", ")", "prune_parser", ".", "add_argument", "(", "'-e'", ",", "'--epochs'", ",", "default", "=", "1", ",", "type", "=", "int", ",", "help", "=", "'keep only training log dirs having at least this many completed epochs, default 1'", ")", "prune_parser", ".", "add_argument", "(", "'-s'", ",", "'--subdirs'", ",", "action", "=", "'store_true'", ",", "help", "=", "'delete all subdirectories in training directories'", ")", "# add common arguments", "if", "add_common_arguments", ":", "for", "parser", "in", "[", "main_parser", ",", "train_parser", ",", "resume_parser", ",", "predict_parser", ",", "dataset_parser", ",", "eval_parser", "]", ":", "parser", ".", "add_argument", "(", "'--output_root'", ",", "'-o'", ",", "default", "=", "'./log'", ",", "help", "=", "'output directory'", ")", "parser", ".", "add_argument", "(", "'--verbose'", ",", "'-v'", ",", "action", "=", "'store_true'", ",", "help", "=", "'increase verbosity to level DEBUG'", ")", "return", "main_parser" ]
64.863636
39.681818
def workflow_add_stage(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /workflow-xxxx/addStage API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fworkflow-xxxx%2FaddStage """ return DXHTTPRequest('/%s/addStage' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "workflow_add_stage", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/addStage'", "%", "object_id", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
55.285714
36.142857
def input(self, _in, out, **kw): """Input filtering.""" args = [self.binary or 'cleancss'] + self.rebase_opt if self.extra_args: args.extend(self.extra_args) self.subprocess(args, out, _in)
[ "def", "input", "(", "self", ",", "_in", ",", "out", ",", "*", "*", "kw", ")", ":", "args", "=", "[", "self", ".", "binary", "or", "'cleancss'", "]", "+", "self", ".", "rebase_opt", "if", "self", ".", "extra_args", ":", "args", ".", "extend", "(", "self", ".", "extra_args", ")", "self", ".", "subprocess", "(", "args", ",", "out", ",", "_in", ")" ]
38
7
def gen_client_id(): """ Generates random client ID :return: """ import random gen_id = 'hbmqtt/' for i in range(7, 23): gen_id += chr(random.randint(0, 74) + 48) return gen_id
[ "def", "gen_client_id", "(", ")", ":", "import", "random", "gen_id", "=", "'hbmqtt/'", "for", "i", "in", "range", "(", "7", ",", "23", ")", ":", "gen_id", "+=", "chr", "(", "random", ".", "randint", "(", "0", ",", "74", ")", "+", "48", ")", "return", "gen_id" ]
18.818182
16.818182
def modify_postquery_parts(self, postquery_parts): """ Make the comparison recipe a subquery that is left joined to the base recipe using dimensions that are shared between the recipes. Hoist the metric from the comparison recipe up to the base query while adding the suffix. """ if not self.blend_recipes: return postquery_parts for blend_recipe, blend_type, blend_criteria in \ zip(self.blend_recipes, self.blend_types, self.blend_criteria): join_base, join_blend = blend_criteria blend_subq = blend_recipe.subquery() # For all metrics in the blend recipe # Use the metric in the base recipe and # Add the metric columns to the base recipe for m in blend_recipe.metric_ids: met = blend_recipe._cauldron[m] self.recipe._cauldron.use(met) for suffix in met.make_column_suffixes(): col = getattr(blend_subq.c, met.id, None) if col is not None: postquery_parts['query'] = postquery_parts[ 'query' ].add_columns(col.label(met.id + suffix)) else: raise BadRecipe( '{} could not be found in .blend() ' 'recipe subquery'.format(id + suffix) ) # For all dimensions in the blend recipe # Use the dimension in the base recipe and # Add the dimension columns and group_by to the base recipe # Ignore the join_blend dimension for d in blend_recipe.dimension_ids: if d == join_blend: continue dim = blend_recipe._cauldron[d] self.recipe._cauldron.use(dim) for suffix in dim.make_column_suffixes(): col = getattr(blend_subq.c, dim.id, None) if col is not None: postquery_parts['query'] = postquery_parts[ 'query' ].add_columns(col.label(dim.id + suffix)) postquery_parts['query'] = postquery_parts[ 'query' ].group_by(col) else: raise BadRecipe( '{} could not be found in .blend() ' 'recipe subquery'.format(id + suffix) ) base_dim = self.recipe._cauldron[join_base] blend_dim = blend_recipe._cauldron[join_blend] base_col = base_dim.columns[0] blend_col = getattr(blend_subq.c, blend_dim.id_prop, None) if blend_col is None: raise BadRecipe( 'Can\'t find join property for {} dimension in \ blend recipe'.format(blend_dim.id_prop) ) if blend_type == 'outer': postquery_parts['query'] = postquery_parts['query'] \ .outerjoin(blend_subq, base_col == blend_col) else: postquery_parts['query'] = postquery_parts['query'] \ .join(blend_subq, base_col == blend_col) return postquery_parts
[ "def", "modify_postquery_parts", "(", "self", ",", "postquery_parts", ")", ":", "if", "not", "self", ".", "blend_recipes", ":", "return", "postquery_parts", "for", "blend_recipe", ",", "blend_type", ",", "blend_criteria", "in", "zip", "(", "self", ".", "blend_recipes", ",", "self", ".", "blend_types", ",", "self", ".", "blend_criteria", ")", ":", "join_base", ",", "join_blend", "=", "blend_criteria", "blend_subq", "=", "blend_recipe", ".", "subquery", "(", ")", "# For all metrics in the blend recipe", "# Use the metric in the base recipe and", "# Add the metric columns to the base recipe", "for", "m", "in", "blend_recipe", ".", "metric_ids", ":", "met", "=", "blend_recipe", ".", "_cauldron", "[", "m", "]", "self", ".", "recipe", ".", "_cauldron", ".", "use", "(", "met", ")", "for", "suffix", "in", "met", ".", "make_column_suffixes", "(", ")", ":", "col", "=", "getattr", "(", "blend_subq", ".", "c", ",", "met", ".", "id", ",", "None", ")", "if", "col", "is", "not", "None", ":", "postquery_parts", "[", "'query'", "]", "=", "postquery_parts", "[", "'query'", "]", ".", "add_columns", "(", "col", ".", "label", "(", "met", ".", "id", "+", "suffix", ")", ")", "else", ":", "raise", "BadRecipe", "(", "'{} could not be found in .blend() '", "'recipe subquery'", ".", "format", "(", "id", "+", "suffix", ")", ")", "# For all dimensions in the blend recipe", "# Use the dimension in the base recipe and", "# Add the dimension columns and group_by to the base recipe", "# Ignore the join_blend dimension", "for", "d", "in", "blend_recipe", ".", "dimension_ids", ":", "if", "d", "==", "join_blend", ":", "continue", "dim", "=", "blend_recipe", ".", "_cauldron", "[", "d", "]", "self", ".", "recipe", ".", "_cauldron", ".", "use", "(", "dim", ")", "for", "suffix", "in", "dim", ".", "make_column_suffixes", "(", ")", ":", "col", "=", "getattr", "(", "blend_subq", ".", "c", ",", "dim", ".", "id", ",", "None", ")", "if", "col", "is", "not", "None", ":", "postquery_parts", "[", "'query'", "]", "=", "postquery_parts", "[", "'query'", "]", ".", "add_columns", "(", "col", ".", "label", "(", "dim", ".", "id", "+", "suffix", ")", ")", "postquery_parts", "[", "'query'", "]", "=", "postquery_parts", "[", "'query'", "]", ".", "group_by", "(", "col", ")", "else", ":", "raise", "BadRecipe", "(", "'{} could not be found in .blend() '", "'recipe subquery'", ".", "format", "(", "id", "+", "suffix", ")", ")", "base_dim", "=", "self", ".", "recipe", ".", "_cauldron", "[", "join_base", "]", "blend_dim", "=", "blend_recipe", ".", "_cauldron", "[", "join_blend", "]", "base_col", "=", "base_dim", ".", "columns", "[", "0", "]", "blend_col", "=", "getattr", "(", "blend_subq", ".", "c", ",", "blend_dim", ".", "id_prop", ",", "None", ")", "if", "blend_col", "is", "None", ":", "raise", "BadRecipe", "(", "'Can\\'t find join property for {} dimension in \\\n blend recipe'", ".", "format", "(", "blend_dim", ".", "id_prop", ")", ")", "if", "blend_type", "==", "'outer'", ":", "postquery_parts", "[", "'query'", "]", "=", "postquery_parts", "[", "'query'", "]", ".", "outerjoin", "(", "blend_subq", ",", "base_col", "==", "blend_col", ")", "else", ":", "postquery_parts", "[", "'query'", "]", "=", "postquery_parts", "[", "'query'", "]", ".", "join", "(", "blend_subq", ",", "base_col", "==", "blend_col", ")", "return", "postquery_parts" ]
41.728395
17.358025
def _add_outcome_provenance(self, association, outcome): """ :param association: str association curie :param outcome: dict (json) :return: None """ provenance = Provenance(self.graph) base = self.curie_map.get_base() provenance.add_agent_to_graph(base, 'Monarch Initiative') self.graph.addTriple(association, self.globaltt['asserted_by'], base)
[ "def", "_add_outcome_provenance", "(", "self", ",", "association", ",", "outcome", ")", ":", "provenance", "=", "Provenance", "(", "self", ".", "graph", ")", "base", "=", "self", ".", "curie_map", ".", "get_base", "(", ")", "provenance", ".", "add_agent_to_graph", "(", "base", ",", "'Monarch Initiative'", ")", "self", ".", "graph", ".", "addTriple", "(", "association", ",", "self", ".", "globaltt", "[", "'asserted_by'", "]", ",", "base", ")" ]
37.090909
14
def _copy_delpoy_scripts(self, scripts): """ Copy the given deploy scripts to the scripts dir in the prefix Args: scripts(list of str): list of paths of the scripts to copy to the prefix Returns: list of str: list with the paths to the copied scripts, with a prefixed with $LAGO_PREFIX_PATH so the full path is not hardcoded """ if not os.path.exists(self.paths.scripts()): os.makedirs(self.paths.scripts()) new_scripts = [] for script in scripts: script = os.path.expandvars(script) if not os.path.exists(script): raise RuntimeError('Script %s does not exist' % script) sanitized_name = script.replace('/', '_') new_script_cur_path = os.path.expandvars( self.paths.scripts(sanitized_name) ) shutil.copy(script, new_script_cur_path) new_script_init_path = os.path.join( '$LAGO_PREFIX_PATH', os.path.basename(self.paths.scripts()), sanitized_name, ) new_scripts.append(new_script_init_path) return new_scripts
[ "def", "_copy_delpoy_scripts", "(", "self", ",", "scripts", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "paths", ".", "scripts", "(", ")", ")", ":", "os", ".", "makedirs", "(", "self", ".", "paths", ".", "scripts", "(", ")", ")", "new_scripts", "=", "[", "]", "for", "script", "in", "scripts", ":", "script", "=", "os", ".", "path", ".", "expandvars", "(", "script", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "script", ")", ":", "raise", "RuntimeError", "(", "'Script %s does not exist'", "%", "script", ")", "sanitized_name", "=", "script", ".", "replace", "(", "'/'", ",", "'_'", ")", "new_script_cur_path", "=", "os", ".", "path", ".", "expandvars", "(", "self", ".", "paths", ".", "scripts", "(", "sanitized_name", ")", ")", "shutil", ".", "copy", "(", "script", ",", "new_script_cur_path", ")", "new_script_init_path", "=", "os", ".", "path", ".", "join", "(", "'$LAGO_PREFIX_PATH'", ",", "os", ".", "path", ".", "basename", "(", "self", ".", "paths", ".", "scripts", "(", ")", ")", ",", "sanitized_name", ",", ")", "new_scripts", ".", "append", "(", "new_script_init_path", ")", "return", "new_scripts" ]
33.972222
19.527778
def Match(self, registry_key): """Determines if a Windows Registry key matches the filter. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key. Returns: bool: True if the keys match. """ value_names = frozenset([ registry_value.name for registry_value in registry_key.GetValues()]) return self._value_names.issubset(value_names)
[ "def", "Match", "(", "self", ",", "registry_key", ")", ":", "value_names", "=", "frozenset", "(", "[", "registry_value", ".", "name", "for", "registry_value", "in", "registry_key", ".", "GetValues", "(", ")", "]", ")", "return", "self", ".", "_value_names", ".", "issubset", "(", "value_names", ")" ]
29.076923
21.384615