text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def schedule_forced_host_check(self, host, check_time): """Schedule a forced check on a host Format of the line that triggers function call:: SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: int :return: None """ host.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=True, force_time=check_time) self.send_an_element(host.get_update_status_brok())
[ "def", "schedule_forced_host_check", "(", "self", ",", "host", ",", "check_time", ")", ":", "host", ".", "schedule", "(", "self", ".", "daemon", ".", "hosts", ",", "self", ".", "daemon", ".", "services", ",", "self", ".", "daemon", ".", "timeperiods", ",", "self", ".", "daemon", ".", "macromodulations", ",", "self", ".", "daemon", ".", "checkmodulations", ",", "self", ".", "daemon", ".", "checks", ",", "force", "=", "True", ",", "force_time", "=", "check_time", ")", "self", ".", "send_an_element", "(", "host", ".", "get_update_status_brok", "(", ")", ")" ]
42.176471
17.294118
def toggle_tbstyle(self, button): """Toogle the ToolButtonStyle of the given button between :data:`ToolButtonIconOnly` and :data:`ToolButtonTextBesideIcon` :param button: a tool button :type button: :class:`QtGui.QToolButton` :returns: None :rtype: None :raises: None """ old = button.toolButtonStyle() if old == QtCore.Qt.ToolButtonIconOnly: new = QtCore.Qt.ToolButtonTextBesideIcon else: new = QtCore.Qt.ToolButtonIconOnly button.setToolButtonStyle(new)
[ "def", "toggle_tbstyle", "(", "self", ",", "button", ")", ":", "old", "=", "button", ".", "toolButtonStyle", "(", ")", "if", "old", "==", "QtCore", ".", "Qt", ".", "ToolButtonIconOnly", ":", "new", "=", "QtCore", ".", "Qt", ".", "ToolButtonTextBesideIcon", "else", ":", "new", "=", "QtCore", ".", "Qt", ".", "ToolButtonIconOnly", "button", ".", "setToolButtonStyle", "(", "new", ")" ]
36.933333
11.466667
def _get_prioritized_parameters(plugins_dict, is_using_default_value_map, prefer_default=True): """ :type plugins_dict: dict(plugin_name => plugin_params) :param plugin_dict: mapping of plugin name to all plugin params :type is_using_default_value_map: dict(str => bool) :param is_using_default_value_map: mapping of parameter name to whether its value is derived from a default value. :param prefer_default: if True, will yield if plugin parameters are from default values. Otherwise, will yield if plugin parameters are *not* from default values. """ for plugin_name, plugin_params in plugins_dict.items(): for param_name, param_value in plugin_params.items(): is_using_default = is_using_default_value_map.get(param_name, False) if is_using_default == prefer_default: yield plugin_name, param_name, param_value
[ "def", "_get_prioritized_parameters", "(", "plugins_dict", ",", "is_using_default_value_map", ",", "prefer_default", "=", "True", ")", ":", "for", "plugin_name", ",", "plugin_params", "in", "plugins_dict", ".", "items", "(", ")", ":", "for", "param_name", ",", "param_value", "in", "plugin_params", ".", "items", "(", ")", ":", "is_using_default", "=", "is_using_default_value_map", ".", "get", "(", "param_name", ",", "False", ")", "if", "is_using_default", "==", "prefer_default", ":", "yield", "plugin_name", ",", "param_name", ",", "param_value" ]
52.647059
27.235294
def quit(self): """Run the "are you sure" dialog for quitting Guake """ # Stop an open "close tab" dialog from obstructing a quit response = self.run() == Gtk.ResponseType.YES self.destroy() # Keep Guake focussed after dismissing tab-close prompt # if tab == -1: # self.window.present() return response
[ "def", "quit", "(", "self", ")", ":", "# Stop an open \"close tab\" dialog from obstructing a quit", "response", "=", "self", ".", "run", "(", ")", "==", "Gtk", ".", "ResponseType", ".", "YES", "self", ".", "destroy", "(", ")", "# Keep Guake focussed after dismissing tab-close prompt", "# if tab == -1:", "# self.window.present()", "return", "response" ]
36.9
14.3
def delete_external_feed_courses(self, course_id, external_feed_id): """ Delete an external feed. Deletes the external feed. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - external_feed_id """ID""" path["external_feed_id"] = external_feed_id self.logger.debug("DELETE /api/v1/courses/{course_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/courses/{course_id}/external_feeds/{external_feed_id}".format(**path), data=data, params=params, single_item=True)
[ "def", "delete_external_feed_courses", "(", "self", ",", "course_id", ",", "external_feed_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=", "course_id", "# REQUIRED - PATH - external_feed_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"external_feed_id\"", "]", "=", "external_feed_id", "self", ".", "logger", ".", "debug", "(", "\"DELETE /api/v1/courses/{course_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"DELETE\"", ",", "\"/api/v1/courses/{course_id}/external_feeds/{external_feed_id}\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "single_item", "=", "True", ")" ]
39.3
28
def info(self, name, args): """Interfaces with the info dumpers (DBGFInfo). This feature is not implemented in the 4.0.0 release but it may show up in a dot release. in name of type str The name of the info item. in args of type str Arguments to the info dumper. return info of type str The into string. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") if not isinstance(args, basestring): raise TypeError("args can only be an instance of type basestring") info = self._call("info", in_p=[name, args]) return info
[ "def", "info", "(", "self", ",", "name", ",", "args", ")", ":", "if", "not", "isinstance", "(", "name", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"name can only be an instance of type basestring\"", ")", "if", "not", "isinstance", "(", "args", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"args can only be an instance of type basestring\"", ")", "info", "=", "self", ".", "_call", "(", "\"info\"", ",", "in_p", "=", "[", "name", ",", "args", "]", ")", "return", "info" ]
31.826087
18.347826
def remove_tags(self, tags): """ Add tags to a server. Accepts tags as strings or Tag objects. """ if self.cloud_manager.remove_tags(self, tags): new_tags = [tag for tag in self.tags if tag not in tags] object.__setattr__(self, 'tags', new_tags)
[ "def", "remove_tags", "(", "self", ",", "tags", ")", ":", "if", "self", ".", "cloud_manager", ".", "remove_tags", "(", "self", ",", "tags", ")", ":", "new_tags", "=", "[", "tag", "for", "tag", "in", "self", ".", "tags", "if", "tag", "not", "in", "tags", "]", "object", ".", "__setattr__", "(", "self", ",", "'tags'", ",", "new_tags", ")" ]
42.142857
13.857143
def p_partselect_plus(self, p): 'partselect : identifier LBRACKET expression PLUSCOLON expression RBRACKET' p[0] = Partselect(p[1], p[3], Plus( p[3], p[5], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_partselect_plus", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Partselect", "(", "p", "[", "1", "]", ",", "p", "[", "3", "]", ",", "Plus", "(", "p", "[", "3", "]", ",", "p", "[", "5", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
51.4
16.6
def purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['yum', '--assumeyes', 'remove'] if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) log("Purging {}".format(packages)) _run_yum_command(cmd, fatal)
[ "def", "purge", "(", "packages", ",", "fatal", "=", "False", ")", ":", "cmd", "=", "[", "'yum'", ",", "'--assumeyes'", ",", "'remove'", "]", "if", "isinstance", "(", "packages", ",", "six", ".", "string_types", ")", ":", "cmd", ".", "append", "(", "packages", ")", "else", ":", "cmd", ".", "extend", "(", "packages", ")", "log", "(", "\"Purging {}\"", ".", "format", "(", "packages", ")", ")", "_run_yum_command", "(", "cmd", ",", "fatal", ")" ]
32.555556
8.888889
def new_instance(settings): """ MAKE A PYTHON INSTANCE `settings` HAS ALL THE `kwargs`, PLUS `class` ATTRIBUTE TO INDICATE THE CLASS TO CREATE """ settings = set_default({}, settings) if not settings["class"]: Log.error("Expecting 'class' attribute with fully qualified class name") # IMPORT MODULE FOR HANDLER path = settings["class"].split(".") class_name = path[-1] path = ".".join(path[:-1]) constructor = None try: temp = __import__(path, globals(), locals(), [class_name], 0) constructor = object.__getattribute__(temp, class_name) except Exception as e: Log.error("Can not find class {{class}}", {"class": path}, cause=e) settings['class'] = None try: return constructor(kwargs=settings) # MAYBE IT TAKES A KWARGS OBJECT except Exception as e: pass try: return constructor(**settings) except Exception as e: Log.error("Can not create instance of {{name}}", name=".".join(path), cause=e)
[ "def", "new_instance", "(", "settings", ")", ":", "settings", "=", "set_default", "(", "{", "}", ",", "settings", ")", "if", "not", "settings", "[", "\"class\"", "]", ":", "Log", ".", "error", "(", "\"Expecting 'class' attribute with fully qualified class name\"", ")", "# IMPORT MODULE FOR HANDLER", "path", "=", "settings", "[", "\"class\"", "]", ".", "split", "(", "\".\"", ")", "class_name", "=", "path", "[", "-", "1", "]", "path", "=", "\".\"", ".", "join", "(", "path", "[", ":", "-", "1", "]", ")", "constructor", "=", "None", "try", ":", "temp", "=", "__import__", "(", "path", ",", "globals", "(", ")", ",", "locals", "(", ")", ",", "[", "class_name", "]", ",", "0", ")", "constructor", "=", "object", ".", "__getattribute__", "(", "temp", ",", "class_name", ")", "except", "Exception", "as", "e", ":", "Log", ".", "error", "(", "\"Can not find class {{class}}\"", ",", "{", "\"class\"", ":", "path", "}", ",", "cause", "=", "e", ")", "settings", "[", "'class'", "]", "=", "None", "try", ":", "return", "constructor", "(", "kwargs", "=", "settings", ")", "# MAYBE IT TAKES A KWARGS OBJECT", "except", "Exception", "as", "e", ":", "pass", "try", ":", "return", "constructor", "(", "*", "*", "settings", ")", "except", "Exception", "as", "e", ":", "Log", ".", "error", "(", "\"Can not create instance of {{name}}\"", ",", "name", "=", "\".\"", ".", "join", "(", "path", ")", ",", "cause", "=", "e", ")" ]
32.387097
22.322581
def _wrlog_details_illegal_gaf(self, fout_err, err_cnts): """Print details regarding illegal GAF lines seen to a log file.""" # fout_err = "{}.log".format(fin_gaf) gaf_base = os.path.basename(fout_err) with open(fout_err, 'w') as prt: prt.write("ILLEGAL GAF ERROR SUMMARY:\n\n") for err_cnt in err_cnts: prt.write(err_cnt) prt.write("\n\nILLEGAL GAF ERROR DETAILS:\n\n") for lnum, line in self.ignored: prt.write("**WARNING: GAF LINE IGNORED: {FIN}[{LNUM}]:\n{L}\n".format( FIN=gaf_base, L=line, LNUM=lnum)) self.prt_line_detail(prt, line) prt.write("\n\n") for error, lines in self.illegal_lines.items(): for lnum, line in lines: prt.write("**WARNING: GAF LINE ILLEGAL({ERR}): {FIN}[{LNUM}]:\n{L}\n".format( ERR=error, FIN=gaf_base, L=line, LNUM=lnum)) self.prt_line_detail(prt, line) prt.write("\n\n") return fout_err
[ "def", "_wrlog_details_illegal_gaf", "(", "self", ",", "fout_err", ",", "err_cnts", ")", ":", "# fout_err = \"{}.log\".format(fin_gaf)", "gaf_base", "=", "os", ".", "path", ".", "basename", "(", "fout_err", ")", "with", "open", "(", "fout_err", ",", "'w'", ")", "as", "prt", ":", "prt", ".", "write", "(", "\"ILLEGAL GAF ERROR SUMMARY:\\n\\n\"", ")", "for", "err_cnt", "in", "err_cnts", ":", "prt", ".", "write", "(", "err_cnt", ")", "prt", ".", "write", "(", "\"\\n\\nILLEGAL GAF ERROR DETAILS:\\n\\n\"", ")", "for", "lnum", ",", "line", "in", "self", ".", "ignored", ":", "prt", ".", "write", "(", "\"**WARNING: GAF LINE IGNORED: {FIN}[{LNUM}]:\\n{L}\\n\"", ".", "format", "(", "FIN", "=", "gaf_base", ",", "L", "=", "line", ",", "LNUM", "=", "lnum", ")", ")", "self", ".", "prt_line_detail", "(", "prt", ",", "line", ")", "prt", ".", "write", "(", "\"\\n\\n\"", ")", "for", "error", ",", "lines", "in", "self", ".", "illegal_lines", ".", "items", "(", ")", ":", "for", "lnum", ",", "line", "in", "lines", ":", "prt", ".", "write", "(", "\"**WARNING: GAF LINE ILLEGAL({ERR}): {FIN}[{LNUM}]:\\n{L}\\n\"", ".", "format", "(", "ERR", "=", "error", ",", "FIN", "=", "gaf_base", ",", "L", "=", "line", ",", "LNUM", "=", "lnum", ")", ")", "self", ".", "prt_line_detail", "(", "prt", ",", "line", ")", "prt", ".", "write", "(", "\"\\n\\n\"", ")", "return", "fout_err" ]
51.571429
13.428571
def create_symbol(self, type_, **kwargs): """ Banana banana """ unique_name = kwargs.get('unique_name') if not unique_name: unique_name = kwargs.get('display_name') kwargs['unique_name'] = unique_name filename = kwargs.get('filename') if filename: filename = os.path.abspath(filename) kwargs['filename'] = os.path.abspath(filename) if unique_name in self.__symbols: warn('symbol-redefined', "%s(unique_name=%s, filename=%s, project=%s)" " has already been defined: %s" % (type_.__name__, unique_name, filename, kwargs.get('project_name'), self.get_symbol(unique_name))) return None aliases = kwargs.pop('aliases', []) for alias in aliases: self.create_symbol(ProxySymbol, unique_name=alias, target=unique_name) symbol = type_() debug('Created symbol with unique name %s' % unique_name, 'symbols') for key, value in list(kwargs.items()): setattr(symbol, key, value) self.__symbols[unique_name] = symbol for alias in aliases: self.__symbols[alias] = symbol self.__aliases[unique_name] = aliases return symbol
[ "def", "create_symbol", "(", "self", ",", "type_", ",", "*", "*", "kwargs", ")", ":", "unique_name", "=", "kwargs", ".", "get", "(", "'unique_name'", ")", "if", "not", "unique_name", ":", "unique_name", "=", "kwargs", ".", "get", "(", "'display_name'", ")", "kwargs", "[", "'unique_name'", "]", "=", "unique_name", "filename", "=", "kwargs", ".", "get", "(", "'filename'", ")", "if", "filename", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "kwargs", "[", "'filename'", "]", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "if", "unique_name", "in", "self", ".", "__symbols", ":", "warn", "(", "'symbol-redefined'", ",", "\"%s(unique_name=%s, filename=%s, project=%s)\"", "\" has already been defined: %s\"", "%", "(", "type_", ".", "__name__", ",", "unique_name", ",", "filename", ",", "kwargs", ".", "get", "(", "'project_name'", ")", ",", "self", ".", "get_symbol", "(", "unique_name", ")", ")", ")", "return", "None", "aliases", "=", "kwargs", ".", "pop", "(", "'aliases'", ",", "[", "]", ")", "for", "alias", "in", "aliases", ":", "self", ".", "create_symbol", "(", "ProxySymbol", ",", "unique_name", "=", "alias", ",", "target", "=", "unique_name", ")", "symbol", "=", "type_", "(", ")", "debug", "(", "'Created symbol with unique name %s'", "%", "unique_name", ",", "'symbols'", ")", "for", "key", ",", "value", "in", "list", "(", "kwargs", ".", "items", "(", ")", ")", ":", "setattr", "(", "symbol", ",", "key", ",", "value", ")", "self", ".", "__symbols", "[", "unique_name", "]", "=", "symbol", "for", "alias", "in", "aliases", ":", "self", ".", "__symbols", "[", "alias", "]", "=", "symbol", "self", ".", "__aliases", "[", "unique_name", "]", "=", "aliases", "return", "symbol" ]
35.375
17.975
def convert_aa_code(x): """Converts between 3-letter and 1-letter amino acid codes.""" if len(x) == 1: return amino_acid_codes[x.upper()] elif len(x) == 3: return inverse_aa_codes[x.upper()] else: raise ValueError("Can only convert 1-letter or 3-letter amino acid codes, " "not %r" % x)
[ "def", "convert_aa_code", "(", "x", ")", ":", "if", "len", "(", "x", ")", "==", "1", ":", "return", "amino_acid_codes", "[", "x", ".", "upper", "(", ")", "]", "elif", "len", "(", "x", ")", "==", "3", ":", "return", "inverse_aa_codes", "[", "x", ".", "upper", "(", ")", "]", "else", ":", "raise", "ValueError", "(", "\"Can only convert 1-letter or 3-letter amino acid codes, \"", "\"not %r\"", "%", "x", ")" ]
38.111111
15.222222
def ensure_dir(path): """Ensure directory exists. Args: path(str): dir path """ dirpath = os.path.dirname(path) if dirpath and not os.path.exists(dirpath): os.makedirs(dirpath)
[ "def", "ensure_dir", "(", "path", ")", ":", "dirpath", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "if", "dirpath", "and", "not", "os", ".", "path", ".", "exists", "(", "dirpath", ")", ":", "os", ".", "makedirs", "(", "dirpath", ")" ]
20.5
16.7
def get_cache(self, namespace, query_hash, length, start, end): """Get a cached value for the specified date range and query""" query = 'SELECT start, value FROM gauged_cache WHERE namespace = ? ' \ 'AND hash = ? AND length = ? AND start BETWEEN ? AND ?' cursor = self.cursor cursor.execute(query, (namespace, query_hash, length, start, end)) return tuple(cursor.fetchall())
[ "def", "get_cache", "(", "self", ",", "namespace", ",", "query_hash", ",", "length", ",", "start", ",", "end", ")", ":", "query", "=", "'SELECT start, value FROM gauged_cache WHERE namespace = ? '", "'AND hash = ? AND length = ? AND start BETWEEN ? AND ?'", "cursor", "=", "self", ".", "cursor", "cursor", ".", "execute", "(", "query", ",", "(", "namespace", ",", "query_hash", ",", "length", ",", "start", ",", "end", ")", ")", "return", "tuple", "(", "cursor", ".", "fetchall", "(", ")", ")" ]
60
19.285714
def steadystate(A, max_iter=100): """ Empirically determine the steady state probabilities from a stochastic matrix """ P = np.linalg.matrix_power(A, max_iter) # Determine the unique rows in A v = [] for i in range(len(P)): if not np.any([np.allclose(P[i], vi, ) for vi in v]): v.append(P[i]) return normalize(np.sum(v, axis=0))
[ "def", "steadystate", "(", "A", ",", "max_iter", "=", "100", ")", ":", "P", "=", "np", ".", "linalg", ".", "matrix_power", "(", "A", ",", "max_iter", ")", "# Determine the unique rows in A", "v", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "P", ")", ")", ":", "if", "not", "np", ".", "any", "(", "[", "np", ".", "allclose", "(", "P", "[", "i", "]", ",", "vi", ",", ")", "for", "vi", "in", "v", "]", ")", ":", "v", ".", "append", "(", "P", "[", "i", "]", ")", "return", "normalize", "(", "np", ".", "sum", "(", "v", ",", "axis", "=", "0", ")", ")" ]
28.461538
16.461538
def uavionix_adsb_out_dynamic_encode(self, utcTime, gpsLat, gpsLon, gpsAlt, gpsFix, numSats, baroAltMSL, accuracyHor, accuracyVert, accuracyVel, velVert, velNS, VelEW, emergencyStatus, state, squawk): ''' Dynamic data used to generate ADS-B out transponder data (send at 5Hz) utcTime : UTC time in seconds since GPS epoch (Jan 6, 1980). If unknown set to UINT32_MAX (uint32_t) gpsLat : Latitude WGS84 (deg * 1E7). If unknown set to INT32_MAX (int32_t) gpsLon : Longitude WGS84 (deg * 1E7). If unknown set to INT32_MAX (int32_t) gpsAlt : Altitude in mm (m * 1E-3) UP +ve. WGS84 altitude. If unknown set to INT32_MAX (int32_t) gpsFix : 0-1: no fix, 2: 2D fix, 3: 3D fix, 4: DGPS, 5: RTK (uint8_t) numSats : Number of satellites visible. If unknown set to UINT8_MAX (uint8_t) baroAltMSL : Barometric pressure altitude relative to a standard atmosphere of 1013.2 mBar and NOT bar corrected altitude (m * 1E-3). (up +ve). If unknown set to INT32_MAX (int32_t) accuracyHor : Horizontal accuracy in mm (m * 1E-3). If unknown set to UINT32_MAX (uint32_t) accuracyVert : Vertical accuracy in cm. If unknown set to UINT16_MAX (uint16_t) accuracyVel : Velocity accuracy in mm/s (m * 1E-3). If unknown set to UINT16_MAX (uint16_t) velVert : GPS vertical speed in cm/s. If unknown set to INT16_MAX (int16_t) velNS : North-South velocity over ground in cm/s North +ve. If unknown set to INT16_MAX (int16_t) VelEW : East-West velocity over ground in cm/s East +ve. If unknown set to INT16_MAX (int16_t) emergencyStatus : Emergency status (uint8_t) state : ADS-B transponder dynamic input state flags (uint16_t) squawk : Mode A code (typically 1200 [0x04B0] for VFR) (uint16_t) ''' return MAVLink_uavionix_adsb_out_dynamic_message(utcTime, gpsLat, gpsLon, gpsAlt, gpsFix, numSats, baroAltMSL, accuracyHor, accuracyVert, accuracyVel, velVert, velNS, VelEW, emergencyStatus, state, squawk)
[ "def", "uavionix_adsb_out_dynamic_encode", "(", "self", ",", "utcTime", ",", "gpsLat", ",", "gpsLon", ",", "gpsAlt", ",", "gpsFix", ",", "numSats", ",", "baroAltMSL", ",", "accuracyHor", ",", "accuracyVert", ",", "accuracyVel", ",", "velVert", ",", "velNS", ",", "VelEW", ",", "emergencyStatus", ",", "state", ",", "squawk", ")", ":", "return", "MAVLink_uavionix_adsb_out_dynamic_message", "(", "utcTime", ",", "gpsLat", ",", "gpsLon", ",", "gpsAlt", ",", "gpsFix", ",", "numSats", ",", "baroAltMSL", ",", "accuracyHor", ",", "accuracyVert", ",", "accuracyVel", ",", "velVert", ",", "velNS", ",", "VelEW", ",", "emergencyStatus", ",", "state", ",", "squawk", ")" ]
106.347826
76.956522
def retry_count(self): """ Amount of retried test cases in this list. :return: integer """ retries = len([i for i, result in enumerate(self.data) if result.retries_left > 0]) return retries
[ "def", "retry_count", "(", "self", ")", ":", "retries", "=", "len", "(", "[", "i", "for", "i", ",", "result", "in", "enumerate", "(", "self", ".", "data", ")", "if", "result", ".", "retries_left", ">", "0", "]", ")", "return", "retries" ]
28.875
19.125
def safe_join(directory, filename): """Safely join `directory` and `filename`. Example usage:: @app.route('/wiki/<path:filename>') def wiki_page(filename): filename = safe_join(app.config['WIKI_FOLDER'], filename) with open(filename, 'rb') as fd: content = fd.read() # Read and process the file content... :param directory: the base directory. :param filename: the untrusted filename relative to that directory. :raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path would fall out of `directory`. """ filename = posixpath.normpath(filename) for sep in _os_alt_seps: if sep in filename: raise NotFound() if os.path.isabs(filename) or \ filename == '..' or \ filename.startswith('../'): raise NotFound() return os.path.join(directory, filename)
[ "def", "safe_join", "(", "directory", ",", "filename", ")", ":", "filename", "=", "posixpath", ".", "normpath", "(", "filename", ")", "for", "sep", "in", "_os_alt_seps", ":", "if", "sep", "in", "filename", ":", "raise", "NotFound", "(", ")", "if", "os", ".", "path", ".", "isabs", "(", "filename", ")", "or", "filename", "==", "'..'", "or", "filename", ".", "startswith", "(", "'../'", ")", ":", "raise", "NotFound", "(", ")", "return", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ")" ]
35.52
15
def future(self): """Returns all outlets that are or will be active.""" qs = self.get_queryset() return qs.filter( models.Q(end_date__isnull=True) | models.Q(end_date__gte=now().date()) )
[ "def", "future", "(", "self", ")", ":", "qs", "=", "self", ".", "get_queryset", "(", ")", "return", "qs", ".", "filter", "(", "models", ".", "Q", "(", "end_date__isnull", "=", "True", ")", "|", "models", ".", "Q", "(", "end_date__gte", "=", "now", "(", ")", ".", "date", "(", ")", ")", ")" ]
33.857143
12.857143
def get_purged_review_history_for(brain_or_object): """Returns the review history for the object passed in, but filtered with the actions and states that match with the workflow currently bound to the object plus those actions that are None (for initial state) """ history = review_history_cache.get(api.get_uid(brain_or_object), []) # Boil out those actions not supported by object's current workflow available_actions = get_workflow_actions_for(brain_or_object) history = filter(lambda action: action["action"] in available_actions or action["action"] is None, history) # Boil out those states not supported by object's current workflow available_states = get_workflow_states_for(brain_or_object) history = filter(lambda act: act["review_state"] in available_states, history) # If no meaning history found, create a default one for initial state if not history: history = create_initial_review_history(brain_or_object) return history
[ "def", "get_purged_review_history_for", "(", "brain_or_object", ")", ":", "history", "=", "review_history_cache", ".", "get", "(", "api", ".", "get_uid", "(", "brain_or_object", ")", ",", "[", "]", ")", "# Boil out those actions not supported by object's current workflow", "available_actions", "=", "get_workflow_actions_for", "(", "brain_or_object", ")", "history", "=", "filter", "(", "lambda", "action", ":", "action", "[", "\"action\"", "]", "in", "available_actions", "or", "action", "[", "\"action\"", "]", "is", "None", ",", "history", ")", "# Boil out those states not supported by object's current workflow", "available_states", "=", "get_workflow_states_for", "(", "brain_or_object", ")", "history", "=", "filter", "(", "lambda", "act", ":", "act", "[", "\"review_state\"", "]", "in", "available_states", ",", "history", ")", "# If no meaning history found, create a default one for initial state", "if", "not", "history", ":", "history", "=", "create_initial_review_history", "(", "brain_or_object", ")", "return", "history" ]
49.47619
26.095238
def extract_smiles(s): """Return a list of SMILES identifiers extracted from the string.""" # TODO: This still gets a lot of false positives. smiles = [] for t in s.split(): if len(t) > 2 and SMILES_RE.match(t) and not t.endswith('.') and bracket_level(t) == 0: smiles.append(t) return smiles
[ "def", "extract_smiles", "(", "s", ")", ":", "# TODO: This still gets a lot of false positives.", "smiles", "=", "[", "]", "for", "t", "in", "s", ".", "split", "(", ")", ":", "if", "len", "(", "t", ")", ">", "2", "and", "SMILES_RE", ".", "match", "(", "t", ")", "and", "not", "t", ".", "endswith", "(", "'.'", ")", "and", "bracket_level", "(", "t", ")", "==", "0", ":", "smiles", ".", "append", "(", "t", ")", "return", "smiles" ]
40.625
20.375
def put(self, filepath): """ Change the group or permissions of the specified file. Action must be specified when calling this method. """ action = self.get_body_argument('action') if action['action'] == 'update_group': newgrp = action['group'] try: self.fs.update_group(filepath,newgrp) self.write({'msg':'Updated group for {}'.format(filepath)}) except OSError: raise tornado.web.HTTPError(404) elif action['action'] == 'update_permissions': newperms = action['permissions'] try: self.fs.update_permissions(filepath,newperms) self.write({'msg':'Updated permissions for {}'.format(filepath)}) except OSError: raise tornado.web.HTTPError(404) else: raise tornado.web.HTTPError(400)
[ "def", "put", "(", "self", ",", "filepath", ")", ":", "action", "=", "self", ".", "get_body_argument", "(", "'action'", ")", "if", "action", "[", "'action'", "]", "==", "'update_group'", ":", "newgrp", "=", "action", "[", "'group'", "]", "try", ":", "self", ".", "fs", ".", "update_group", "(", "filepath", ",", "newgrp", ")", "self", ".", "write", "(", "{", "'msg'", ":", "'Updated group for {}'", ".", "format", "(", "filepath", ")", "}", ")", "except", "OSError", ":", "raise", "tornado", ".", "web", ".", "HTTPError", "(", "404", ")", "elif", "action", "[", "'action'", "]", "==", "'update_permissions'", ":", "newperms", "=", "action", "[", "'permissions'", "]", "try", ":", "self", ".", "fs", ".", "update_permissions", "(", "filepath", ",", "newperms", ")", "self", ".", "write", "(", "{", "'msg'", ":", "'Updated permissions for {}'", ".", "format", "(", "filepath", ")", "}", ")", "except", "OSError", ":", "raise", "tornado", ".", "web", ".", "HTTPError", "(", "404", ")", "else", ":", "raise", "tornado", ".", "web", ".", "HTTPError", "(", "400", ")" ]
39.304348
15.826087
def pack(self): """ Pack the structure data into a string """ data = [] for field in self.__fields__: (vtype, vlen) = self.__fields_types__[field] if vtype == 'char': # string data.append(getattr(self, field)) elif isinstance(vtype, CStructMeta): num = int(vlen / vtype.size) if num == 1: # single struct v = getattr(self, field, vtype()) v = v.pack() if sys.version_info >= (3, 0): v = ([bytes([x]) for x in v]) data.extend(v) else: # multiple struct values = getattr(self, field, []) for j in range(0, num): try: v = values[j] except: v = vtype() v = v.pack() if sys.version_info >= (3, 0): v = ([bytes([x]) for x in v]) data.extend(v) elif vlen == 1: data.append(getattr(self, field)) else: v = getattr(self, field) v = v[:vlen] + [0] * (vlen - len(v)) data.extend(v) return struct.pack(self.__fmt__, *data)
[ "def", "pack", "(", "self", ")", ":", "data", "=", "[", "]", "for", "field", "in", "self", ".", "__fields__", ":", "(", "vtype", ",", "vlen", ")", "=", "self", ".", "__fields_types__", "[", "field", "]", "if", "vtype", "==", "'char'", ":", "# string", "data", ".", "append", "(", "getattr", "(", "self", ",", "field", ")", ")", "elif", "isinstance", "(", "vtype", ",", "CStructMeta", ")", ":", "num", "=", "int", "(", "vlen", "/", "vtype", ".", "size", ")", "if", "num", "==", "1", ":", "# single struct", "v", "=", "getattr", "(", "self", ",", "field", ",", "vtype", "(", ")", ")", "v", "=", "v", ".", "pack", "(", ")", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ":", "v", "=", "(", "[", "bytes", "(", "[", "x", "]", ")", "for", "x", "in", "v", "]", ")", "data", ".", "extend", "(", "v", ")", "else", ":", "# multiple struct", "values", "=", "getattr", "(", "self", ",", "field", ",", "[", "]", ")", "for", "j", "in", "range", "(", "0", ",", "num", ")", ":", "try", ":", "v", "=", "values", "[", "j", "]", "except", ":", "v", "=", "vtype", "(", ")", "v", "=", "v", ".", "pack", "(", ")", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ":", "v", "=", "(", "[", "bytes", "(", "[", "x", "]", ")", "for", "x", "in", "v", "]", ")", "data", ".", "extend", "(", "v", ")", "elif", "vlen", "==", "1", ":", "data", ".", "append", "(", "getattr", "(", "self", ",", "field", ")", ")", "else", ":", "v", "=", "getattr", "(", "self", ",", "field", ")", "v", "=", "v", "[", ":", "vlen", "]", "+", "[", "0", "]", "*", "(", "vlen", "-", "len", "(", "v", ")", ")", "data", ".", "extend", "(", "v", ")", "return", "struct", ".", "pack", "(", "self", ".", "__fmt__", ",", "*", "data", ")" ]
38.857143
8.514286
def _ExpandArtifactGroupSource(self, source, requested): """Recursively expands an artifact group source.""" artifact_list = [] if "names" in source.attributes: artifact_list = source.attributes["names"] for artifact_name in artifact_list: if artifact_name in self.processed_artifacts: continue artifact_obj = artifact_registry.REGISTRY.GetArtifact(artifact_name) for expanded_artifact in self.Expand(artifact_obj, requested): yield expanded_artifact
[ "def", "_ExpandArtifactGroupSource", "(", "self", ",", "source", ",", "requested", ")", ":", "artifact_list", "=", "[", "]", "if", "\"names\"", "in", "source", ".", "attributes", ":", "artifact_list", "=", "source", ".", "attributes", "[", "\"names\"", "]", "for", "artifact_name", "in", "artifact_list", ":", "if", "artifact_name", "in", "self", ".", "processed_artifacts", ":", "continue", "artifact_obj", "=", "artifact_registry", ".", "REGISTRY", ".", "GetArtifact", "(", "artifact_name", ")", "for", "expanded_artifact", "in", "self", ".", "Expand", "(", "artifact_obj", ",", "requested", ")", ":", "yield", "expanded_artifact" ]
45.090909
13.909091
def set_knowledge_category(self, grade_id): """Sets the knowledge category. arg: grade_id (osid.id.Id): the new knowledge category raise: InvalidArgument - ``grade_id`` is invalid raise: NoAccess - ``grade_id`` cannot be modified raise: NullArgument - ``grade_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.set_avatar_template if self.get_knowledge_category_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_id(grade_id): raise errors.InvalidArgument() self._my_map['knowledgeCategoryId'] = str(grade_id)
[ "def", "set_knowledge_category", "(", "self", ",", "grade_id", ")", ":", "# Implemented from template for osid.resource.ResourceForm.set_avatar_template", "if", "self", ".", "get_knowledge_category_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "if", "not", "self", ".", "_is_valid_id", "(", "grade_id", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "self", ".", "_my_map", "[", "'knowledgeCategoryId'", "]", "=", "str", "(", "grade_id", ")" ]
45.3125
17.8125
def delete(self, *args, **kwargs): """ Executes an HTTP DELETE. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments """ return self.session.delete(*args, **self.get_kwargs(**kwargs))
[ "def", "delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "session", ".", "delete", "(", "*", "args", ",", "*", "*", "self", ".", "get_kwargs", "(", "*", "*", "kwargs", ")", ")" ]
28.888889
11.777778
def position(self): ''' Return the current position of the boat. :returns: current position :rtype: Point ''' content = self._cached_boat lat, lon = content.get('position') return Point(lat, lon)
[ "def", "position", "(", "self", ")", ":", "content", "=", "self", ".", "_cached_boat", "lat", ",", "lon", "=", "content", ".", "get", "(", "'position'", ")", "return", "Point", "(", "lat", ",", "lon", ")" ]
25.1
16.9
def cookiecutter( template, checkout=None, no_input=False, extra_context=None, replay=False, overwrite_if_exists=False, output_dir='.', config_file=None, default_config=False, password=None): """ Run Cookiecutter just as if using it from the command line. :param template: A directory containing a project template directory, or a URL to a git repository. :param checkout: The branch, tag or commit ID to checkout after clone. :param no_input: Prompt the user at command line for manual configuration? :param extra_context: A dictionary of context that overrides default and user configuration. :param: overwrite_if_exists: Overwrite the contents of output directory if it exists :param output_dir: Where to output the generated project dir into. :param config_file: User configuration file path. :param default_config: Use default values rather than a config file. :param password: The password to use when extracting the repository. """ if replay and ((no_input is not False) or (extra_context is not None)): err_msg = ( "You can not use both replay and no_input or extra_context " "at the same time." ) raise InvalidModeException(err_msg) config_dict = get_user_config( config_file=config_file, default_config=default_config, ) repo_dir, cleanup = determine_repo_dir( template=template, abbreviations=config_dict['abbreviations'], clone_to_dir=config_dict['cookiecutters_dir'], checkout=checkout, no_input=no_input, password=password ) template_name = os.path.basename(os.path.abspath(repo_dir)) if replay: context = load(config_dict['replay_dir'], template_name) else: context_file = os.path.join(repo_dir, 'cookiecutter.json') logger.debug('context_file is {}'.format(context_file)) context = generate_context( context_file=context_file, default_context=config_dict['default_context'], extra_context=extra_context, ) # prompt the user to manually configure at the command line. # except when 'no-input' flag is set context['cookiecutter'] = prompt_for_config(context, no_input) # include template dir or url in the context dict context['cookiecutter']['_template'] = template dump(config_dict['replay_dir'], template_name, context) # Create project from local context and project template. result = generate_files( repo_dir=repo_dir, context=context, overwrite_if_exists=overwrite_if_exists, output_dir=output_dir ) # Cleanup (if required) if cleanup: rmtree(repo_dir) return result
[ "def", "cookiecutter", "(", "template", ",", "checkout", "=", "None", ",", "no_input", "=", "False", ",", "extra_context", "=", "None", ",", "replay", "=", "False", ",", "overwrite_if_exists", "=", "False", ",", "output_dir", "=", "'.'", ",", "config_file", "=", "None", ",", "default_config", "=", "False", ",", "password", "=", "None", ")", ":", "if", "replay", "and", "(", "(", "no_input", "is", "not", "False", ")", "or", "(", "extra_context", "is", "not", "None", ")", ")", ":", "err_msg", "=", "(", "\"You can not use both replay and no_input or extra_context \"", "\"at the same time.\"", ")", "raise", "InvalidModeException", "(", "err_msg", ")", "config_dict", "=", "get_user_config", "(", "config_file", "=", "config_file", ",", "default_config", "=", "default_config", ",", ")", "repo_dir", ",", "cleanup", "=", "determine_repo_dir", "(", "template", "=", "template", ",", "abbreviations", "=", "config_dict", "[", "'abbreviations'", "]", ",", "clone_to_dir", "=", "config_dict", "[", "'cookiecutters_dir'", "]", ",", "checkout", "=", "checkout", ",", "no_input", "=", "no_input", ",", "password", "=", "password", ")", "template_name", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "abspath", "(", "repo_dir", ")", ")", "if", "replay", ":", "context", "=", "load", "(", "config_dict", "[", "'replay_dir'", "]", ",", "template_name", ")", "else", ":", "context_file", "=", "os", ".", "path", ".", "join", "(", "repo_dir", ",", "'cookiecutter.json'", ")", "logger", ".", "debug", "(", "'context_file is {}'", ".", "format", "(", "context_file", ")", ")", "context", "=", "generate_context", "(", "context_file", "=", "context_file", ",", "default_context", "=", "config_dict", "[", "'default_context'", "]", ",", "extra_context", "=", "extra_context", ",", ")", "# prompt the user to manually configure at the command line.", "# except when 'no-input' flag is set", "context", "[", "'cookiecutter'", "]", "=", "prompt_for_config", "(", "context", ",", "no_input", ")", "# include template dir or url in the context dict", "context", "[", "'cookiecutter'", "]", "[", "'_template'", "]", "=", "template", "dump", "(", "config_dict", "[", "'replay_dir'", "]", ",", "template_name", ",", "context", ")", "# Create project from local context and project template.", "result", "=", "generate_files", "(", "repo_dir", "=", "repo_dir", ",", "context", "=", "context", ",", "overwrite_if_exists", "=", "overwrite_if_exists", ",", "output_dir", "=", "output_dir", ")", "# Cleanup (if required)", "if", "cleanup", ":", "rmtree", "(", "repo_dir", ")", "return", "result" ]
35.727273
22.532468
def _nfw_func(self, x): """ Classic NFW function in terms of arctanh and arctan :param x: r/Rs :return: """ c = 0.000001 if isinstance(x, np.ndarray): x[np.where(x<c)] = c nfwvals = np.ones_like(x) inds1 = np.where(x < 1) inds2 = np.where(x > 1) nfwvals[inds1] = (1 - x[inds1] ** 2) ** -.5 * np.arctanh((1 - x[inds1] ** 2) ** .5) nfwvals[inds2] = (x[inds2] ** 2 - 1) ** -.5 * np.arctan((x[inds2] ** 2 - 1) ** .5) return nfwvals elif isinstance(x, float) or isinstance(x, int): x = max(x, c) if x == 1: return 1 if x < 1: return (1 - x ** 2) ** -.5 * np.arctanh((1 - x ** 2) ** .5) else: return (x ** 2 - 1) ** -.5 * np.arctan((x ** 2 - 1) ** .5)
[ "def", "_nfw_func", "(", "self", ",", "x", ")", ":", "c", "=", "0.000001", "if", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ":", "x", "[", "np", ".", "where", "(", "x", "<", "c", ")", "]", "=", "c", "nfwvals", "=", "np", ".", "ones_like", "(", "x", ")", "inds1", "=", "np", ".", "where", "(", "x", "<", "1", ")", "inds2", "=", "np", ".", "where", "(", "x", ">", "1", ")", "nfwvals", "[", "inds1", "]", "=", "(", "1", "-", "x", "[", "inds1", "]", "**", "2", ")", "**", "-", ".5", "*", "np", ".", "arctanh", "(", "(", "1", "-", "x", "[", "inds1", "]", "**", "2", ")", "**", ".5", ")", "nfwvals", "[", "inds2", "]", "=", "(", "x", "[", "inds2", "]", "**", "2", "-", "1", ")", "**", "-", ".5", "*", "np", ".", "arctan", "(", "(", "x", "[", "inds2", "]", "**", "2", "-", "1", ")", "**", ".5", ")", "return", "nfwvals", "elif", "isinstance", "(", "x", ",", "float", ")", "or", "isinstance", "(", "x", ",", "int", ")", ":", "x", "=", "max", "(", "x", ",", "c", ")", "if", "x", "==", "1", ":", "return", "1", "if", "x", "<", "1", ":", "return", "(", "1", "-", "x", "**", "2", ")", "**", "-", ".5", "*", "np", ".", "arctanh", "(", "(", "1", "-", "x", "**", "2", ")", "**", ".5", ")", "else", ":", "return", "(", "x", "**", "2", "-", "1", ")", "**", "-", ".5", "*", "np", ".", "arctan", "(", "(", "x", "**", "2", "-", "1", ")", "**", ".5", ")" ]
30.964286
22.178571
def _start_connection_setup(self): """Start the connection setup by refreshing the TOCs""" logger.info('We are connected[%s], request connection setup', self.link_uri) self.platform.fetch_platform_informations(self._platform_info_fetched)
[ "def", "_start_connection_setup", "(", "self", ")", ":", "logger", ".", "info", "(", "'We are connected[%s], request connection setup'", ",", "self", ".", "link_uri", ")", "self", ".", "platform", ".", "fetch_platform_informations", "(", "self", ".", "_platform_info_fetched", ")" ]
55.6
15.8
def _generate(self, source, name, filename, defer_init=False): """Internal hook that can be overridden to hook a different generate method in. .. versionadded:: 2.5 """ return generate(source, self, name, filename, defer_init=defer_init)
[ "def", "_generate", "(", "self", ",", "source", ",", "name", ",", "filename", ",", "defer_init", "=", "False", ")", ":", "return", "generate", "(", "source", ",", "self", ",", "name", ",", "filename", ",", "defer_init", "=", "defer_init", ")" ]
38.857143
18.714286
def target_socket(self, config): """ This method overrides :meth:`.WNetworkNativeTransport.target_socket` method. Do the same thing as basic method do, but also checks that the result address is IPv4 address. :param config: beacon configuration :return: WIPV4SocketInfo """ target = WNetworkNativeTransport.target_socket(self, config) if isinstance(target.address(), WIPV4Address) is False: raise ValueError('Invalid address for broadcast transport') return target
[ "def", "target_socket", "(", "self", ",", "config", ")", ":", "target", "=", "WNetworkNativeTransport", ".", "target_socket", "(", "self", ",", "config", ")", "if", "isinstance", "(", "target", ".", "address", "(", ")", ",", "WIPV4Address", ")", "is", "False", ":", "raise", "ValueError", "(", "'Invalid address for broadcast transport'", ")", "return", "target" ]
43.090909
16.909091
def _update_zone(self, zone, status=None): """ Updates a zones status. :param zone: zone number :type zone: int :param status: zone status :type status: int :raises: IndexError """ if not zone in self._zones: raise IndexError('Zone does not exist and cannot be updated: %d', zone) old_status = self._zones[zone].status if status is None: status = old_status self._zones[zone].status = status self._zones[zone].timestamp = time.time() if status == Zone.CLEAR: if zone in self._zones_faulted: self._zones_faulted.remove(zone) self.on_restore(zone=zone) else: if old_status != status and status is not None: self.on_fault(zone=zone)
[ "def", "_update_zone", "(", "self", ",", "zone", ",", "status", "=", "None", ")", ":", "if", "not", "zone", "in", "self", ".", "_zones", ":", "raise", "IndexError", "(", "'Zone does not exist and cannot be updated: %d'", ",", "zone", ")", "old_status", "=", "self", ".", "_zones", "[", "zone", "]", ".", "status", "if", "status", "is", "None", ":", "status", "=", "old_status", "self", ".", "_zones", "[", "zone", "]", ".", "status", "=", "status", "self", ".", "_zones", "[", "zone", "]", ".", "timestamp", "=", "time", ".", "time", "(", ")", "if", "status", "==", "Zone", ".", "CLEAR", ":", "if", "zone", "in", "self", ".", "_zones_faulted", ":", "self", ".", "_zones_faulted", ".", "remove", "(", "zone", ")", "self", ".", "on_restore", "(", "zone", "=", "zone", ")", "else", ":", "if", "old_status", "!=", "status", "and", "status", "is", "not", "None", ":", "self", ".", "on_fault", "(", "zone", "=", "zone", ")" ]
28.241379
15.965517
def context_exists(self, name): """Check if a given context exists.""" contexts = self.data['contexts'] for context in contexts: if context['name'] == name: return True return False
[ "def", "context_exists", "(", "self", ",", "name", ")", ":", "contexts", "=", "self", ".", "data", "[", "'contexts'", "]", "for", "context", "in", "contexts", ":", "if", "context", "[", "'name'", "]", "==", "name", ":", "return", "True", "return", "False" ]
33.571429
7.285714
def s_res(self, components=None): """ Get apparent power in kVA at line(s) and transformer(s). Parameters ---------- components : :obj:`list` List of string representatives of :class:`~.grid.components.Line` or :class:`~.grid.components.Transformer`. If not provided defaults to return apparent power of all lines and transformers in the grid. Returns ------- :pandas:`pandas.DataFrame<dataframe>` Apparent power in kVA for lines and/or transformers. """ if components is None: return self.apparent_power else: not_included = [_ for _ in components if _ not in self.apparent_power.index] labels_included = [_ for _ in components if _ not in not_included] if not_included: logging.warning( "No apparent power results available for: {}".format( not_included)) return self.apparent_power.loc[:, labels_included]
[ "def", "s_res", "(", "self", ",", "components", "=", "None", ")", ":", "if", "components", "is", "None", ":", "return", "self", ".", "apparent_power", "else", ":", "not_included", "=", "[", "_", "for", "_", "in", "components", "if", "_", "not", "in", "self", ".", "apparent_power", ".", "index", "]", "labels_included", "=", "[", "_", "for", "_", "in", "components", "if", "_", "not", "in", "not_included", "]", "if", "not_included", ":", "logging", ".", "warning", "(", "\"No apparent power results available for: {}\"", ".", "format", "(", "not_included", ")", ")", "return", "self", ".", "apparent_power", ".", "loc", "[", ":", ",", "labels_included", "]" ]
36.827586
21.586207
def iter_init_append(self) : "creates a Message.AppendIter for appending arguments to the Message." iter = self.AppendIter(None) dbus.dbus_message_iter_init_append(self._dbobj, iter._dbobj) return \ iter
[ "def", "iter_init_append", "(", "self", ")", ":", "iter", "=", "self", ".", "AppendIter", "(", "None", ")", "dbus", ".", "dbus_message_iter_init_append", "(", "self", ".", "_dbobj", ",", "iter", ".", "_dbobj", ")", "return", "iter" ]
40.333333
21.666667
def add_filter(self, filter_, frequencies=None, dB=True, analog=False, sample_rate=None, **kwargs): """Add a linear time-invariant filter to this BodePlot Parameters ---------- filter_ : `~scipy.signal.lti`, `tuple` the filter to plot, either as a `~scipy.signal.lti`, or a `tuple` with the following number and meaning of elements - 2: (numerator, denominator) - 3: (zeros, poles, gain) - 4: (A, B, C, D) frequencies : `numpy.ndarray`, optional list of frequencies (in Hertz) at which to plot dB : `bool`, optional if `True`, display magnitude in decibels, otherwise display amplitude, default: `True` **kwargs any other keyword arguments accepted by :meth:`~matplotlib.axes.Axes.plot` Returns ------- mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>` the lines drawn for the magnitude and phase of the filter. """ if not analog: if not sample_rate: raise ValueError("Must give sample_rate frequency to display " "digital (analog=False) filter") sample_rate = Quantity(sample_rate, 'Hz').value dt = 2 * pi / sample_rate if not isinstance(frequencies, (type(None), int)): frequencies = numpy.atleast_1d(frequencies).copy() frequencies *= dt # parse filter (without digital conversions) _, fcomp = parse_filter(filter_, analog=False) if analog: lti = signal.lti(*fcomp) else: lti = signal.dlti(*fcomp, dt=dt) # calculate frequency response w, mag, phase = lti.bode(w=frequencies) # convert from decibels if not dB: mag = 10 ** (mag / 10.) # draw mline = self.maxes.plot(w, mag, **kwargs)[0] pline = self.paxes.plot(w, phase, **kwargs)[0] return mline, pline
[ "def", "add_filter", "(", "self", ",", "filter_", ",", "frequencies", "=", "None", ",", "dB", "=", "True", ",", "analog", "=", "False", ",", "sample_rate", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "analog", ":", "if", "not", "sample_rate", ":", "raise", "ValueError", "(", "\"Must give sample_rate frequency to display \"", "\"digital (analog=False) filter\"", ")", "sample_rate", "=", "Quantity", "(", "sample_rate", ",", "'Hz'", ")", ".", "value", "dt", "=", "2", "*", "pi", "/", "sample_rate", "if", "not", "isinstance", "(", "frequencies", ",", "(", "type", "(", "None", ")", ",", "int", ")", ")", ":", "frequencies", "=", "numpy", ".", "atleast_1d", "(", "frequencies", ")", ".", "copy", "(", ")", "frequencies", "*=", "dt", "# parse filter (without digital conversions)", "_", ",", "fcomp", "=", "parse_filter", "(", "filter_", ",", "analog", "=", "False", ")", "if", "analog", ":", "lti", "=", "signal", ".", "lti", "(", "*", "fcomp", ")", "else", ":", "lti", "=", "signal", ".", "dlti", "(", "*", "fcomp", ",", "dt", "=", "dt", ")", "# calculate frequency response", "w", ",", "mag", ",", "phase", "=", "lti", ".", "bode", "(", "w", "=", "frequencies", ")", "# convert from decibels", "if", "not", "dB", ":", "mag", "=", "10", "**", "(", "mag", "/", "10.", ")", "# draw", "mline", "=", "self", ".", "maxes", ".", "plot", "(", "w", ",", "mag", ",", "*", "*", "kwargs", ")", "[", "0", "]", "pline", "=", "self", ".", "paxes", ".", "plot", "(", "w", ",", "phase", ",", "*", "*", "kwargs", ")", "[", "0", "]", "return", "mline", ",", "pline" ]
35.172414
19.568966
def load_json_body(handler): """ Automatically deserialize event bodies with json.loads. Automatically returns a 400 BAD REQUEST if there is an error while parsing. Usage:: >>> from lambda_decorators import load_json_body >>> @load_json_body ... def handler(event, context): ... return event['body']['foo'] >>> handler({'body': '{"foo": "bar"}'}, object()) 'bar' note that ``event['body']`` is already a dictionary and didn't have to explicitly be parsed. """ @wraps(handler) def wrapper(event, context): if isinstance(event.get('body'), str): try: event['body'] = json.loads(event['body']) except: return {'statusCode': 400, 'body': 'BAD REQUEST'} return handler(event, context) return wrapper
[ "def", "load_json_body", "(", "handler", ")", ":", "@", "wraps", "(", "handler", ")", "def", "wrapper", "(", "event", ",", "context", ")", ":", "if", "isinstance", "(", "event", ".", "get", "(", "'body'", ")", ",", "str", ")", ":", "try", ":", "event", "[", "'body'", "]", "=", "json", ".", "loads", "(", "event", "[", "'body'", "]", ")", "except", ":", "return", "{", "'statusCode'", ":", "400", ",", "'body'", ":", "'BAD REQUEST'", "}", "return", "handler", "(", "event", ",", "context", ")", "return", "wrapper" ]
29.428571
20.357143
def wind_bft(ms): "Convert wind from metres per second to Beaufort scale" if ms is None: return None for bft in range(len(_bft_threshold)): if ms < _bft_threshold[bft]: return bft return len(_bft_threshold)
[ "def", "wind_bft", "(", "ms", ")", ":", "if", "ms", "is", "None", ":", "return", "None", "for", "bft", "in", "range", "(", "len", "(", "_bft_threshold", ")", ")", ":", "if", "ms", "<", "_bft_threshold", "[", "bft", "]", ":", "return", "bft", "return", "len", "(", "_bft_threshold", ")" ]
30.375
14.875
def euclidean(src, tar, qval=2, normalized=False, alphabet=None): """Return the Euclidean distance between two strings. This is a wrapper for :py:meth:`Euclidean.dist_abs`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version normalized : bool Normalizes to [0, 1] if True alphabet : collection or int The values or size of the alphabet Returns ------- float: The Euclidean distance Examples -------- >>> euclidean('cat', 'hat') 2.0 >>> round(euclidean('Niall', 'Neil'), 12) 2.645751311065 >>> euclidean('Colin', 'Cuilen') 3.0 >>> round(euclidean('ATCG', 'TAGC'), 12) 3.162277660168 """ return Euclidean().dist_abs(src, tar, qval, normalized, alphabet)
[ "def", "euclidean", "(", "src", ",", "tar", ",", "qval", "=", "2", ",", "normalized", "=", "False", ",", "alphabet", "=", "None", ")", ":", "return", "Euclidean", "(", ")", ".", "dist_abs", "(", "src", ",", "tar", ",", "qval", ",", "normalized", ",", "alphabet", ")" ]
26.428571
21.571429
def _collapse_cursor(self, parts): """ Act on any CursorMoveUp commands by deleting preceding tokens """ final_parts = [] for part in parts: # Throw out empty string tokens ("") if not part: continue # Go back, deleting every token in the last 'line' if part == CursorMoveUp: if final_parts: final_parts.pop() while final_parts and '\n' not in final_parts[-1]: final_parts.pop() continue # Otherwise, just pass this token forward final_parts.append(part) return final_parts
[ "def", "_collapse_cursor", "(", "self", ",", "parts", ")", ":", "final_parts", "=", "[", "]", "for", "part", "in", "parts", ":", "# Throw out empty string tokens (\"\")", "if", "not", "part", ":", "continue", "# Go back, deleting every token in the last 'line'", "if", "part", "==", "CursorMoveUp", ":", "if", "final_parts", ":", "final_parts", ".", "pop", "(", ")", "while", "final_parts", "and", "'\\n'", "not", "in", "final_parts", "[", "-", "1", "]", ":", "final_parts", ".", "pop", "(", ")", "continue", "# Otherwise, just pass this token forward", "final_parts", ".", "append", "(", "part", ")", "return", "final_parts" ]
27.708333
19.583333
def reassemble_options(payload): ''' Reassemble partial options to options, returns a list of dhcp_option DHCP options are basically `|tag|length|value|` structure. When an option is longer than 255 bytes, it can be splitted into multiple structures with the same tag. The splitted structures must be joined back to get the original option. `dhcp_option_partial` is used to present the splitted options, and `dhcp_option` is used for reassembled option. ''' options = [] option_indices = {} def process_option_list(partials): for p in partials: if p.tag == OPTION_END: break if p.tag == OPTION_PAD: continue if p.tag in option_indices: # Reassemble the data options[option_indices[p.tag]][1].append(p.data) else: options.append((p.tag, [p.data])) option_indices[p.tag] = len(options) - 1 # First process options field process_option_list(payload.options) if OPTION_OVERLOAD in option_indices: # There is an overload option data = b''.join(options[option_indices[OPTION_OVERLOAD]][1]) overload_option = dhcp_overload.create(data) if overload_option & OVERLOAD_FILE: process_option_list(dhcp_option_partial[0].create(payload.file)) if overload_option & OVERLOAD_SNAME: process_option_list(dhcp_option_partial[0].create(payload.sname)) def _create_dhcp_option(tag, data): opt = dhcp_option(tag = tag) opt._setextra(data) opt._autosubclass() return opt return [_create_dhcp_option(tag, b''.join(data)) for tag,data in options]
[ "def", "reassemble_options", "(", "payload", ")", ":", "options", "=", "[", "]", "option_indices", "=", "{", "}", "def", "process_option_list", "(", "partials", ")", ":", "for", "p", "in", "partials", ":", "if", "p", ".", "tag", "==", "OPTION_END", ":", "break", "if", "p", ".", "tag", "==", "OPTION_PAD", ":", "continue", "if", "p", ".", "tag", "in", "option_indices", ":", "# Reassemble the data", "options", "[", "option_indices", "[", "p", ".", "tag", "]", "]", "[", "1", "]", ".", "append", "(", "p", ".", "data", ")", "else", ":", "options", ".", "append", "(", "(", "p", ".", "tag", ",", "[", "p", ".", "data", "]", ")", ")", "option_indices", "[", "p", ".", "tag", "]", "=", "len", "(", "options", ")", "-", "1", "# First process options field", "process_option_list", "(", "payload", ".", "options", ")", "if", "OPTION_OVERLOAD", "in", "option_indices", ":", "# There is an overload option", "data", "=", "b''", ".", "join", "(", "options", "[", "option_indices", "[", "OPTION_OVERLOAD", "]", "]", "[", "1", "]", ")", "overload_option", "=", "dhcp_overload", ".", "create", "(", "data", ")", "if", "overload_option", "&", "OVERLOAD_FILE", ":", "process_option_list", "(", "dhcp_option_partial", "[", "0", "]", ".", "create", "(", "payload", ".", "file", ")", ")", "if", "overload_option", "&", "OVERLOAD_SNAME", ":", "process_option_list", "(", "dhcp_option_partial", "[", "0", "]", ".", "create", "(", "payload", ".", "sname", ")", ")", "def", "_create_dhcp_option", "(", "tag", ",", "data", ")", ":", "opt", "=", "dhcp_option", "(", "tag", "=", "tag", ")", "opt", ".", "_setextra", "(", "data", ")", "opt", ".", "_autosubclass", "(", ")", "return", "opt", "return", "[", "_create_dhcp_option", "(", "tag", ",", "b''", ".", "join", "(", "data", ")", ")", "for", "tag", ",", "data", "in", "options", "]" ]
40.642857
16.738095
def _dump_multipoint(obj, decimals): """ Dump a GeoJSON-like MultiPoint object to WKT. Input parameters and return value are the MULTIPOINT equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] mp = 'MULTIPOINT (%s)' points = (' '.join(_round_and_pad(c, decimals) for c in pt) for pt in coords) # Add parens around each point. points = ('(%s)' % pt for pt in points) mp %= ', '.join(points) return mp
[ "def", "_dump_multipoint", "(", "obj", ",", "decimals", ")", ":", "coords", "=", "obj", "[", "'coordinates'", "]", "mp", "=", "'MULTIPOINT (%s)'", "points", "=", "(", "' '", ".", "join", "(", "_round_and_pad", "(", "c", ",", "decimals", ")", "for", "c", "in", "pt", ")", "for", "pt", "in", "coords", ")", "# Add parens around each point.", "points", "=", "(", "'(%s)'", "%", "pt", "for", "pt", "in", "points", ")", "mp", "%=", "', '", ".", "join", "(", "points", ")", "return", "mp" ]
31.4
12.866667
def get_client(config_file=None, apikey=None, username=None, userpass=None, service_url=None, verify_ssl_certs=None, select_first=None): """Configure the API service and creates a new instance of client. :param str config_file: absolute path to configuration file :param str apikey: apikey from thetvdb :param str username: username used on thetvdb :param str userpass: password used on thetvdb :param str service_url: the url for thetvdb api service :param str verify_ssl_certs: flag for validating ssl certs for service url (https) :param str select_first: flag for selecting first series from search results :returns: tvdbapi client :rtype: tvdbapi_client.api.TVDBClient """ from oslo_config import cfg from tvdbapi_client import api if config_file is not None: cfg.CONF([], default_config_files=[config_file]) else: if apikey is not None: cfg.CONF.set_override('apikey', apikey, 'tvdb') if username is not None: cfg.CONF.set_override('username', username, 'tvdb') if userpass is not None: cfg.CONF.set_override('userpass', userpass, 'tvdb') if service_url is not None: cfg.CONF.set_override('service_url', service_url, 'tvdb') if verify_ssl_certs is not None: cfg.CONF.set_override('verify_ssl_certs', verify_ssl_certs, 'tvdb') if select_first is not None: cfg.CONF.set_override('select_first', select_first, 'tvdb') return api.TVDBClient()
[ "def", "get_client", "(", "config_file", "=", "None", ",", "apikey", "=", "None", ",", "username", "=", "None", ",", "userpass", "=", "None", ",", "service_url", "=", "None", ",", "verify_ssl_certs", "=", "None", ",", "select_first", "=", "None", ")", ":", "from", "oslo_config", "import", "cfg", "from", "tvdbapi_client", "import", "api", "if", "config_file", "is", "not", "None", ":", "cfg", ".", "CONF", "(", "[", "]", ",", "default_config_files", "=", "[", "config_file", "]", ")", "else", ":", "if", "apikey", "is", "not", "None", ":", "cfg", ".", "CONF", ".", "set_override", "(", "'apikey'", ",", "apikey", ",", "'tvdb'", ")", "if", "username", "is", "not", "None", ":", "cfg", ".", "CONF", ".", "set_override", "(", "'username'", ",", "username", ",", "'tvdb'", ")", "if", "userpass", "is", "not", "None", ":", "cfg", ".", "CONF", ".", "set_override", "(", "'userpass'", ",", "userpass", ",", "'tvdb'", ")", "if", "service_url", "is", "not", "None", ":", "cfg", ".", "CONF", ".", "set_override", "(", "'service_url'", ",", "service_url", ",", "'tvdb'", ")", "if", "verify_ssl_certs", "is", "not", "None", ":", "cfg", ".", "CONF", ".", "set_override", "(", "'verify_ssl_certs'", ",", "verify_ssl_certs", ",", "'tvdb'", ")", "if", "select_first", "is", "not", "None", ":", "cfg", ".", "CONF", ".", "set_override", "(", "'select_first'", ",", "select_first", ",", "'tvdb'", ")", "return", "api", ".", "TVDBClient", "(", ")" ]
42.72973
17.675676
async def get_types(self): """Gets all available types. This function is a coroutine. Return Type: `list`""" async with aiohttp.ClientSession() as session: async with session.get('https://api.weeb.sh/images/types', headers=self.__headers) as resp: if resp.status == 200: return (await resp.json())['types'] else: raise Exception((await resp.json())['message'])
[ "async", "def", "get_types", "(", "self", ")", ":", "async", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "session", ":", "async", "with", "session", ".", "get", "(", "'https://api.weeb.sh/images/types'", ",", "headers", "=", "self", ".", "__headers", ")", "as", "resp", ":", "if", "resp", ".", "status", "==", "200", ":", "return", "(", "await", "resp", ".", "json", "(", ")", ")", "[", "'types'", "]", "else", ":", "raise", "Exception", "(", "(", "await", "resp", ".", "json", "(", ")", ")", "[", "'message'", "]", ")" ]
38.916667
19.75
def nhapDaiHan(self, cucSo, gioiTinh): """Nhap dai han Args: cucSo (TYPE): Description gioiTinh (TYPE): Description Returns: TYPE: Description """ for cung in self.thapNhiCung: khoangCach = khoangCachCung(cung.cungSo, self.cungMenh, gioiTinh) cung.daiHan(cucSo + khoangCach * 10) return self
[ "def", "nhapDaiHan", "(", "self", ",", "cucSo", ",", "gioiTinh", ")", ":", "for", "cung", "in", "self", ".", "thapNhiCung", ":", "khoangCach", "=", "khoangCachCung", "(", "cung", ".", "cungSo", ",", "self", ".", "cungMenh", ",", "gioiTinh", ")", "cung", ".", "daiHan", "(", "cucSo", "+", "khoangCach", "*", "10", ")", "return", "self" ]
27.714286
15.428571
def getbranchesurl(idbranch, *args, **kwargs): """Request Branches URL. If idbranch is set, you'll get a response adequate for a MambuBranch object. If not set, you'll get a response adequate for a MambuBranches object. See mambubranch module and pydoc for further information. Currently implemented filter parameters: * fullDetails * limit * offset See Mambu official developer documentation for further details, and info on parameters that may be implemented here in the future. """ getparams = [] if kwargs: try: if kwargs["fullDetails"] == True: getparams.append("fullDetails=true") else: getparams.append("fullDetails=false") except Exception as ex: pass try: getparams.append("offset=%s" % kwargs["offset"]) except Exception as ex: pass try: getparams.append("limit=%s" % kwargs["limit"]) except Exception as ex: pass branchidparam = "" if idbranch == "" else "/"+idbranch url = getmambuurl(*args, **kwargs) + "branches" + branchidparam + ("" if len(getparams) == 0 else "?" + "&".join(getparams) ) return url
[ "def", "getbranchesurl", "(", "idbranch", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "getparams", "=", "[", "]", "if", "kwargs", ":", "try", ":", "if", "kwargs", "[", "\"fullDetails\"", "]", "==", "True", ":", "getparams", ".", "append", "(", "\"fullDetails=true\"", ")", "else", ":", "getparams", ".", "append", "(", "\"fullDetails=false\"", ")", "except", "Exception", "as", "ex", ":", "pass", "try", ":", "getparams", ".", "append", "(", "\"offset=%s\"", "%", "kwargs", "[", "\"offset\"", "]", ")", "except", "Exception", "as", "ex", ":", "pass", "try", ":", "getparams", ".", "append", "(", "\"limit=%s\"", "%", "kwargs", "[", "\"limit\"", "]", ")", "except", "Exception", "as", "ex", ":", "pass", "branchidparam", "=", "\"\"", "if", "idbranch", "==", "\"\"", "else", "\"/\"", "+", "idbranch", "url", "=", "getmambuurl", "(", "*", "args", ",", "*", "*", "kwargs", ")", "+", "\"branches\"", "+", "branchidparam", "+", "(", "\"\"", "if", "len", "(", "getparams", ")", "==", "0", "else", "\"?\"", "+", "\"&\"", ".", "join", "(", "getparams", ")", ")", "return", "url" ]
33.666667
23.805556
def matchImpObjStrs(fdefs,imp_obj_strs,cdefs): '''returns imp_funcs, a dictionary with filepath keys that contains lists of function definition nodes that were imported using from __ import __ style syntax. also returns imp_classes, which is the same for class definition nodes.''' imp_funcs=dict() imp_classes=dict() for source in imp_obj_strs: if not imp_obj_strs[source]: continue imp_funcs[source]=[] imp_classes[source]=[] for (mod,func) in imp_obj_strs[source]: if mod not in fdefs: #print(mod+" is not part of the project.") continue if func=='*': all_fns = [x for x in fdefs[mod] if x.name!='body'] imp_funcs[source] += all_fns all_cls = [x for x in cdefs[mod]] imp_classes[source] += all_cls else: fn_node = [x for x in fdefs[mod] if x.name==func] cls_node = [x for x in cdefs[mod] if x.name==func] #assert len(fn_node) in [1,0] #assert len(cls_node) in [1,0] if cls_node: imp_classes[source] += cls_node if fn_node: imp_funcs[source] += fn_node if not fn_node and not cls_node: pass #print(func+' not found in function and class definitions.') return imp_funcs,imp_classes
[ "def", "matchImpObjStrs", "(", "fdefs", ",", "imp_obj_strs", ",", "cdefs", ")", ":", "imp_funcs", "=", "dict", "(", ")", "imp_classes", "=", "dict", "(", ")", "for", "source", "in", "imp_obj_strs", ":", "if", "not", "imp_obj_strs", "[", "source", "]", ":", "continue", "imp_funcs", "[", "source", "]", "=", "[", "]", "imp_classes", "[", "source", "]", "=", "[", "]", "for", "(", "mod", ",", "func", ")", "in", "imp_obj_strs", "[", "source", "]", ":", "if", "mod", "not", "in", "fdefs", ":", "#print(mod+\" is not part of the project.\")", "continue", "if", "func", "==", "'*'", ":", "all_fns", "=", "[", "x", "for", "x", "in", "fdefs", "[", "mod", "]", "if", "x", ".", "name", "!=", "'body'", "]", "imp_funcs", "[", "source", "]", "+=", "all_fns", "all_cls", "=", "[", "x", "for", "x", "in", "cdefs", "[", "mod", "]", "]", "imp_classes", "[", "source", "]", "+=", "all_cls", "else", ":", "fn_node", "=", "[", "x", "for", "x", "in", "fdefs", "[", "mod", "]", "if", "x", ".", "name", "==", "func", "]", "cls_node", "=", "[", "x", "for", "x", "in", "cdefs", "[", "mod", "]", "if", "x", ".", "name", "==", "func", "]", "#assert len(fn_node) in [1,0]", "#assert len(cls_node) in [1,0]", "if", "cls_node", ":", "imp_classes", "[", "source", "]", "+=", "cls_node", "if", "fn_node", ":", "imp_funcs", "[", "source", "]", "+=", "fn_node", "if", "not", "fn_node", "and", "not", "cls_node", ":", "pass", "#print(func+' not found in function and class definitions.')", "return", "imp_funcs", ",", "imp_classes" ]
42.705882
14.705882
def make_dict(name, keys, **kwargs): """ Creates a dictionary-like mapping class that uses perfect hashing. ``name`` is the proper class name of the returned class. See ``hash_parameters()`` for documentation on all arguments after ``name``. >>> MyDict = make_dict('MyDict', '+-<>[],.', to_int=ord) >>> d = MyDict([('+', 1), ('-', 2)]) >>> d[','] = 3 >>> d MyDict([('+', 1), (',', 3), ('-', 2)]) >>> del d['+'] >>> del d['.'] Traceback (most recent call last): ... KeyError: '.' >>> len(d) 2 """ hash_func = make_hash(keys, **kwargs) slots = hash_func.slots # Create a docstring that at least describes where the class came from... doc = """ Dictionary-like object that uses perfect hashing. This class was generated by `%s.%s(%r, ...)`. """ % (__name__, make_dict.__name__, name) return create_dict_subclass(name, hash_func, slots, doc)
[ "def", "make_dict", "(", "name", ",", "keys", ",", "*", "*", "kwargs", ")", ":", "hash_func", "=", "make_hash", "(", "keys", ",", "*", "*", "kwargs", ")", "slots", "=", "hash_func", ".", "slots", "# Create a docstring that at least describes where the class came from...", "doc", "=", "\"\"\"\n Dictionary-like object that uses perfect hashing. This class was\n generated by `%s.%s(%r, ...)`.\n \"\"\"", "%", "(", "__name__", ",", "make_dict", ".", "__name__", ",", "name", ")", "return", "create_dict_subclass", "(", "name", ",", "hash_func", ",", "slots", ",", "doc", ")" ]
30.9
19.133333
def softDeactivate(rh): """ Deactivate a virtual machine by first shutting down Linux and then log it off. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'SOFTOFF' userid - userid of the virtual machine parms['maxQueries'] - Maximum number of queries to issue. Optional. parms['maxWait'] - Maximum time to wait in seconds. Optional, unless 'maxQueries' is specified. parms['poll'] - Polling interval in seconds. Optional, unless 'maxQueries' is specified. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.softDeactivate, userid: " + rh.userid) strCmd = "echo 'ping'" iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd) if iucvResults['overallRC'] == 0: # We could talk to the machine, tell it to shutdown nicely. strCmd = "shutdown -h now" iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd) if iucvResults['overallRC'] == 0: time.sleep(15) else: # Shutdown failed. Let CP take down the system # after we log the results. rh.printSysLog("powerVM.softDeactivate " + rh.userid + " is unreachable. Treating it as already shutdown.") else: # Could not ping the machine. Treat it as a success # after we log the results. rh.printSysLog("powerVM.softDeactivate " + rh.userid + " is unreachable. Treating it as already shutdown.") # Tell z/VM to log off the system. parms = ["-T", rh.userid] smcliResults = invokeSMCLI(rh, "Image_Deactivate", parms) if smcliResults['overallRC'] == 0: pass elif (smcliResults['overallRC'] == 8 and smcliResults['rc'] == 200 and (smcliResults['rs'] == 12 or + smcliResults['rs'] == 16)): # Tolerable error. # Machine is already logged off or is logging off. rh.printLn("N", rh.userid + " is already logged off.") else: # SMAPI API failed. rh.printLn("ES", smcliResults['response']) rh.updateResults(smcliResults) # Use results from invokeSMCLI if rh.results['overallRC'] == 0 and 'maxQueries' in rh.parms: # Wait for the system to log off. waitResults = waitForVMState( rh, rh.userid, 'off', maxQueries=rh.parms['maxQueries'], sleepSecs=rh.parms['poll']) if waitResults['overallRC'] == 0: rh.printLn("N", "Userid '" + rh.userid + " is in the desired state: off") else: rh.updateResults(waitResults) rh.printSysLog("Exit powerVM.softDeactivate, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC']
[ "def", "softDeactivate", "(", "rh", ")", ":", "rh", ".", "printSysLog", "(", "\"Enter powerVM.softDeactivate, userid: \"", "+", "rh", ".", "userid", ")", "strCmd", "=", "\"echo 'ping'\"", "iucvResults", "=", "execCmdThruIUCV", "(", "rh", ",", "rh", ".", "userid", ",", "strCmd", ")", "if", "iucvResults", "[", "'overallRC'", "]", "==", "0", ":", "# We could talk to the machine, tell it to shutdown nicely.", "strCmd", "=", "\"shutdown -h now\"", "iucvResults", "=", "execCmdThruIUCV", "(", "rh", ",", "rh", ".", "userid", ",", "strCmd", ")", "if", "iucvResults", "[", "'overallRC'", "]", "==", "0", ":", "time", ".", "sleep", "(", "15", ")", "else", ":", "# Shutdown failed. Let CP take down the system", "# after we log the results.", "rh", ".", "printSysLog", "(", "\"powerVM.softDeactivate \"", "+", "rh", ".", "userid", "+", "\" is unreachable. Treating it as already shutdown.\"", ")", "else", ":", "# Could not ping the machine. Treat it as a success", "# after we log the results.", "rh", ".", "printSysLog", "(", "\"powerVM.softDeactivate \"", "+", "rh", ".", "userid", "+", "\" is unreachable. Treating it as already shutdown.\"", ")", "# Tell z/VM to log off the system.", "parms", "=", "[", "\"-T\"", ",", "rh", ".", "userid", "]", "smcliResults", "=", "invokeSMCLI", "(", "rh", ",", "\"Image_Deactivate\"", ",", "parms", ")", "if", "smcliResults", "[", "'overallRC'", "]", "==", "0", ":", "pass", "elif", "(", "smcliResults", "[", "'overallRC'", "]", "==", "8", "and", "smcliResults", "[", "'rc'", "]", "==", "200", "and", "(", "smcliResults", "[", "'rs'", "]", "==", "12", "or", "+", "smcliResults", "[", "'rs'", "]", "==", "16", ")", ")", ":", "# Tolerable error.", "# Machine is already logged off or is logging off.", "rh", ".", "printLn", "(", "\"N\"", ",", "rh", ".", "userid", "+", "\" is already logged off.\"", ")", "else", ":", "# SMAPI API failed.", "rh", ".", "printLn", "(", "\"ES\"", ",", "smcliResults", "[", "'response'", "]", ")", "rh", ".", "updateResults", "(", "smcliResults", ")", "# Use results from invokeSMCLI", "if", "rh", ".", "results", "[", "'overallRC'", "]", "==", "0", "and", "'maxQueries'", "in", "rh", ".", "parms", ":", "# Wait for the system to log off.", "waitResults", "=", "waitForVMState", "(", "rh", ",", "rh", ".", "userid", ",", "'off'", ",", "maxQueries", "=", "rh", ".", "parms", "[", "'maxQueries'", "]", ",", "sleepSecs", "=", "rh", ".", "parms", "[", "'poll'", "]", ")", "if", "waitResults", "[", "'overallRC'", "]", "==", "0", ":", "rh", ".", "printLn", "(", "\"N\"", ",", "\"Userid '\"", "+", "rh", ".", "userid", "+", "\" is in the desired state: off\"", ")", "else", ":", "rh", ".", "updateResults", "(", "waitResults", ")", "rh", ".", "printSysLog", "(", "\"Exit powerVM.softDeactivate, rc: \"", "+", "str", "(", "rh", ".", "results", "[", "'overallRC'", "]", ")", ")", "return", "rh", ".", "results", "[", "'overallRC'", "]" ]
38.115385
18.423077
def mdot(*args): """Computes a matrix product of multiple ndarrays This is a convenience function to avoid constructs such as np.dot(A, np.dot(B, np.dot(C, D))) and instead use mdot(A, B, C, D). Parameters ---------- *args : an arbitrarily long list of ndarrays that must be compatible for multiplication, i.e. args[i].shape[1] = args[i+1].shape[0]. """ if len(args) < 1: raise ValueError('need at least one argument') elif len(args) == 1: return args[0] elif len(args) == 2: return np.dot(args[0], args[1]) else: return np.dot(args[0], mdot(*args[1:]))
[ "def", "mdot", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", "<", "1", ":", "raise", "ValueError", "(", "'need at least one argument'", ")", "elif", "len", "(", "args", ")", "==", "1", ":", "return", "args", "[", "0", "]", "elif", "len", "(", "args", ")", "==", "2", ":", "return", "np", ".", "dot", "(", "args", "[", "0", "]", ",", "args", "[", "1", "]", ")", "else", ":", "return", "np", ".", "dot", "(", "args", "[", "0", "]", ",", "mdot", "(", "*", "args", "[", "1", ":", "]", ")", ")" ]
32.684211
22.368421
def findViewWithAttributeThatMatches(self, attr, regex, root="ROOT"): ''' Finds the list of Views with the specified attribute matching regex ''' return self.__findViewWithAttributeInTreeThatMatches(attr, regex, root)
[ "def", "findViewWithAttributeThatMatches", "(", "self", ",", "attr", ",", "regex", ",", "root", "=", "\"ROOT\"", ")", ":", "return", "self", ".", "__findViewWithAttributeInTreeThatMatches", "(", "attr", ",", "regex", ",", "root", ")" ]
36
31.714286
def write_data(self, buf): """Send data to the device. If the write fails for any reason, an :obj:`IOError` exception is raised. :param buf: the data to send. :type buf: list(int) :return: success status. :rtype: bool """ bmRequestType = usb.util.build_request_type( usb.util.ENDPOINT_OUT, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE ) result = self.dev.ctrl_transfer( bmRequestType=bmRequestType, bRequest=usb.REQ_SET_CONFIGURATION, data_or_wLength=buf, wValue=0x200, timeout=50) if result != len(buf): raise IOError('pywws.device_pyusb1.USBDevice.write_data failed') return True
[ "def", "write_data", "(", "self", ",", "buf", ")", ":", "bmRequestType", "=", "usb", ".", "util", ".", "build_request_type", "(", "usb", ".", "util", ".", "ENDPOINT_OUT", ",", "usb", ".", "util", ".", "CTRL_TYPE_CLASS", ",", "usb", ".", "util", ".", "CTRL_RECIPIENT_INTERFACE", ")", "result", "=", "self", ".", "dev", ".", "ctrl_transfer", "(", "bmRequestType", "=", "bmRequestType", ",", "bRequest", "=", "usb", ".", "REQ_SET_CONFIGURATION", ",", "data_or_wLength", "=", "buf", ",", "wValue", "=", "0x200", ",", "timeout", "=", "50", ")", "if", "result", "!=", "len", "(", "buf", ")", ":", "raise", "IOError", "(", "'pywws.device_pyusb1.USBDevice.write_data failed'", ")", "return", "True" ]
27.241379
17.793103
def mobile(self): """ Access the mobile :returns: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileList :rtype: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileList """ if self._mobile is None: self._mobile = MobileList(self._version, account_sid=self._solution['account_sid'], ) return self._mobile
[ "def", "mobile", "(", "self", ")", ":", "if", "self", ".", "_mobile", "is", "None", ":", "self", ".", "_mobile", "=", "MobileList", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", ")", "return", "self", ".", "_mobile" ]
39.2
24.8
def remove(self, block_id): """Remove a Processing Block from the queue. Args: block_id (str): """ with self._mutex: entry = self._block_map[block_id] self._queue.remove(entry)
[ "def", "remove", "(", "self", ",", "block_id", ")", ":", "with", "self", ".", "_mutex", ":", "entry", "=", "self", ".", "_block_map", "[", "block_id", "]", "self", ".", "_queue", ".", "remove", "(", "entry", ")" ]
26.333333
12.888889
def color(self, key): """ Returns the color value for the given key for this console. :param key | <unicode> :return <QtGui.QColor> """ if type(key) == int: key = self.LoggingMap.get(key, ('NotSet', ''))[0] name = nativestring(key).capitalize() return self._colorSet.color(name)
[ "def", "color", "(", "self", ",", "key", ")", ":", "if", "type", "(", "key", ")", "==", "int", ":", "key", "=", "self", ".", "LoggingMap", ".", "get", "(", "key", ",", "(", "'NotSet'", ",", "''", ")", ")", "[", "0", "]", "name", "=", "nativestring", "(", "key", ")", ".", "capitalize", "(", ")", "return", "self", ".", "_colorSet", ".", "color", "(", "name", ")" ]
31.75
13.083333
def get_common(self, filename): ''' Process lists of common name words ''' word_list = [] words = open(filename) for word in words.readlines(): word_list.append(word.strip()) return word_list
[ "def", "get_common", "(", "self", ",", "filename", ")", ":", "word_list", "=", "[", "]", "words", "=", "open", "(", "filename", ")", "for", "word", "in", "words", ".", "readlines", "(", ")", ":", "word_list", ".", "append", "(", "word", ".", "strip", "(", ")", ")", "return", "word_list" ]
33.857143
9.571429
def update(self, *args, **kwargs): """ update() method will *recursively* update nested dict: >>> d=Dict({'a':{'b':{'c':3,'d':4},'h':4}}) >>> d.update({'a':{'b':{'c':'888'}}}) >>> d {'a': {'b': {'c': '888', 'd': 4}, 'h': 4}} please use update_dict() if you do not want this behaviour """ for arg in args: if not arg: continue elif isinstance(arg, dict): for k, v in arg.items(): self._update_kv(k, v) elif isinstance(arg, (list, tuple)) and (not isinstance(arg[0], (list, tuple))): k = arg[0] v = arg[1] self._update_kv(k, v) elif isinstance(arg, (list, tuple)) or isgenerator(arg): for k, v in arg: self._update_kv(k, v) else: raise TypeError("update does not understand " "{0} types".format(type(arg))) for k, v in kwargs.items(): self._update_kv(k, v)
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "arg", "in", "args", ":", "if", "not", "arg", ":", "continue", "elif", "isinstance", "(", "arg", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "arg", ".", "items", "(", ")", ":", "self", ".", "_update_kv", "(", "k", ",", "v", ")", "elif", "isinstance", "(", "arg", ",", "(", "list", ",", "tuple", ")", ")", "and", "(", "not", "isinstance", "(", "arg", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ")", ":", "k", "=", "arg", "[", "0", "]", "v", "=", "arg", "[", "1", "]", "self", ".", "_update_kv", "(", "k", ",", "v", ")", "elif", "isinstance", "(", "arg", ",", "(", "list", ",", "tuple", ")", ")", "or", "isgenerator", "(", "arg", ")", ":", "for", "k", ",", "v", "in", "arg", ":", "self", ".", "_update_kv", "(", "k", ",", "v", ")", "else", ":", "raise", "TypeError", "(", "\"update does not understand \"", "\"{0} types\"", ".", "format", "(", "type", "(", "arg", ")", ")", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "self", ".", "_update_kv", "(", "k", ",", "v", ")" ]
37.137931
16.068966
def stop(name, vmid=None, call=None): ''' Stop a node ("pulling the plug"). CLI Example: .. code-block:: bash salt-cloud -a stop mymachine ''' if call != 'action': raise SaltCloudSystemExit( 'The stop action must be called with -a or --action.' ) if not set_vm_status('stop', name, vmid=vmid): log.error('Unable to bring VM %s (%s) down..', name, vmid) raise SaltCloudExecutionFailure # xxx: TBD: Check here whether the status was actually changed to 'stopped' return {'Stopped': '{0} was stopped.'.format(name)}
[ "def", "stop", "(", "name", ",", "vmid", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The stop action must be called with -a or --action.'", ")", "if", "not", "set_vm_status", "(", "'stop'", ",", "name", ",", "vmid", "=", "vmid", ")", ":", "log", ".", "error", "(", "'Unable to bring VM %s (%s) down..'", ",", "name", ",", "vmid", ")", "raise", "SaltCloudExecutionFailure", "# xxx: TBD: Check here whether the status was actually changed to 'stopped'", "return", "{", "'Stopped'", ":", "'{0} was stopped.'", ".", "format", "(", "name", ")", "}" ]
26.590909
23.863636
def _profiles_index(self): """ read profiles.index and make hash array Notes ----- sets the attributes. log_ind : hash array that returns profile.data or log.data file number from model number. model : the models for which profile.data or log.data is available """ prof_ind_name = self.prof_ind_name f = open(self.sldir+'/'+prof_ind_name,'r') line = f.readline() numlines=int(line.split()[0]) print(str(numlines)+' in profiles.index file ...') model=[] log_file_num=[] for line in f: model.append(int(line.split()[0])) log_file_num.append(int(line.split()[2])) log_ind={} # profile.data number from model for a,b in zip(model,log_file_num): log_ind[a] = b self.log_ind=log_ind self.model=model
[ "def", "_profiles_index", "(", "self", ")", ":", "prof_ind_name", "=", "self", ".", "prof_ind_name", "f", "=", "open", "(", "self", ".", "sldir", "+", "'/'", "+", "prof_ind_name", ",", "'r'", ")", "line", "=", "f", ".", "readline", "(", ")", "numlines", "=", "int", "(", "line", ".", "split", "(", ")", "[", "0", "]", ")", "print", "(", "str", "(", "numlines", ")", "+", "' in profiles.index file ...'", ")", "model", "=", "[", "]", "log_file_num", "=", "[", "]", "for", "line", "in", "f", ":", "model", ".", "append", "(", "int", "(", "line", ".", "split", "(", ")", "[", "0", "]", ")", ")", "log_file_num", ".", "append", "(", "int", "(", "line", ".", "split", "(", ")", "[", "2", "]", ")", ")", "log_ind", "=", "{", "}", "# profile.data number from model", "for", "a", ",", "b", "in", "zip", "(", "model", ",", "log_file_num", ")", ":", "log_ind", "[", "a", "]", "=", "b", "self", ".", "log_ind", "=", "log_ind", "self", ".", "model", "=", "model" ]
25.228571
20.142857
def render(sls_data, saltenv='base', sls='', **kws): ''' Accepts YAML_EX as a string or as a file object and runs it through the YAML_EX parser. :rtype: A Python data structure ''' with warnings.catch_warnings(record=True) as warn_list: data = deserialize(sls_data) or {} for item in warn_list: log.warning( '%s found in %s saltenv=%s', item.message, salt.utils.url.create(sls), saltenv ) log.debug('Results of SLS rendering: \n%s', data) return data
[ "def", "render", "(", "sls_data", ",", "saltenv", "=", "'base'", ",", "sls", "=", "''", ",", "*", "*", "kws", ")", ":", "with", "warnings", ".", "catch_warnings", "(", "record", "=", "True", ")", "as", "warn_list", ":", "data", "=", "deserialize", "(", "sls_data", ")", "or", "{", "}", "for", "item", "in", "warn_list", ":", "log", ".", "warning", "(", "'%s found in %s saltenv=%s'", ",", "item", ".", "message", ",", "salt", ".", "utils", ".", "url", ".", "create", "(", "sls", ")", ",", "saltenv", ")", "log", ".", "debug", "(", "'Results of SLS rendering: \\n%s'", ",", "data", ")", "return", "data" ]
28.631579
24.210526
def retryable_writes_supported(self): """Checks if this server supports retryable writes.""" return ( self._ls_timeout_minutes is not None and self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary))
[ "def", "retryable_writes_supported", "(", "self", ")", ":", "return", "(", "self", ".", "_ls_timeout_minutes", "is", "not", "None", "and", "self", ".", "_server_type", "in", "(", "SERVER_TYPE", ".", "Mongos", ",", "SERVER_TYPE", ".", "RSPrimary", ")", ")" ]
48.8
15.2
def sentence_texts(self): """The list of texts representing ``sentences`` layer elements.""" if not self.is_tagged(SENTENCES): self.tokenize_sentences() return self.texts(SENTENCES)
[ "def", "sentence_texts", "(", "self", ")", ":", "if", "not", "self", ".", "is_tagged", "(", "SENTENCES", ")", ":", "self", ".", "tokenize_sentences", "(", ")", "return", "self", ".", "texts", "(", "SENTENCES", ")" ]
42.6
4.6
def set(self, section, option, value=None): """Set an option. Args: section (str): section name option (str): option name value (str): value, default None """ try: section = self.__getitem__(section) except KeyError: raise NoSectionError(section) from None option = self.optionxform(option) if option in section: section[option].value = value else: section[option] = value return self
[ "def", "set", "(", "self", ",", "section", ",", "option", ",", "value", "=", "None", ")", ":", "try", ":", "section", "=", "self", ".", "__getitem__", "(", "section", ")", "except", "KeyError", ":", "raise", "NoSectionError", "(", "section", ")", "from", "None", "option", "=", "self", ".", "optionxform", "(", "option", ")", "if", "option", "in", "section", ":", "section", "[", "option", "]", ".", "value", "=", "value", "else", ":", "section", "[", "option", "]", "=", "value", "return", "self" ]
29.111111
11.444444
def FormatException(self, e, output): """Append HTML version of e to list output.""" d = e.GetDictToFormat() for k in ('file_name', 'feedname', 'column_name'): if k in d.keys(): d[k] = '<code>%s</code>' % d[k] if 'url' in d.keys(): d['url'] = '<a href="%(url)s">%(url)s</a>' % d problem_text = e.FormatProblem(d).replace('\n', '<br>') problem_class = 'problem' if e.IsNotice(): problem_class += ' notice' output.append('<li>') output.append('<div class="%s">%s</div>' % (problem_class, transitfeed.EncodeUnicode(problem_text))) try: if hasattr(e, 'row_num'): line_str = 'line %d of ' % e.row_num else: line_str = '' output.append('in %s<code>%s</code><br>\n' % (line_str, transitfeed.EncodeUnicode(e.file_name))) row = e.row headers = e.headers column_name = e.column_name table_header = '' # HTML table_data = '' # HTML for header, value in zip(headers, row): attributes = '' if header == column_name: attributes = ' class="problem"' table_header += '<th%s>%s</th>' % (attributes, header) table_data += '<td%s>%s</td>' % (attributes, value) # Make sure output is encoded into UTF-8 output.append('<table class="dump"><tr>%s</tr>\n' % transitfeed.EncodeUnicode(table_header)) output.append('<tr>%s</tr></table>\n' % transitfeed.EncodeUnicode(table_data)) except AttributeError as e: pass # Hope this was getting an attribute from e ;-) output.append('<br></li>\n')
[ "def", "FormatException", "(", "self", ",", "e", ",", "output", ")", ":", "d", "=", "e", ".", "GetDictToFormat", "(", ")", "for", "k", "in", "(", "'file_name'", ",", "'feedname'", ",", "'column_name'", ")", ":", "if", "k", "in", "d", ".", "keys", "(", ")", ":", "d", "[", "k", "]", "=", "'<code>%s</code>'", "%", "d", "[", "k", "]", "if", "'url'", "in", "d", ".", "keys", "(", ")", ":", "d", "[", "'url'", "]", "=", "'<a href=\"%(url)s\">%(url)s</a>'", "%", "d", "problem_text", "=", "e", ".", "FormatProblem", "(", "d", ")", ".", "replace", "(", "'\\n'", ",", "'<br>'", ")", "problem_class", "=", "'problem'", "if", "e", ".", "IsNotice", "(", ")", ":", "problem_class", "+=", "' notice'", "output", ".", "append", "(", "'<li>'", ")", "output", ".", "append", "(", "'<div class=\"%s\">%s</div>'", "%", "(", "problem_class", ",", "transitfeed", ".", "EncodeUnicode", "(", "problem_text", ")", ")", ")", "try", ":", "if", "hasattr", "(", "e", ",", "'row_num'", ")", ":", "line_str", "=", "'line %d of '", "%", "e", ".", "row_num", "else", ":", "line_str", "=", "''", "output", ".", "append", "(", "'in %s<code>%s</code><br>\\n'", "%", "(", "line_str", ",", "transitfeed", ".", "EncodeUnicode", "(", "e", ".", "file_name", ")", ")", ")", "row", "=", "e", ".", "row", "headers", "=", "e", ".", "headers", "column_name", "=", "e", ".", "column_name", "table_header", "=", "''", "# HTML", "table_data", "=", "''", "# HTML", "for", "header", ",", "value", "in", "zip", "(", "headers", ",", "row", ")", ":", "attributes", "=", "''", "if", "header", "==", "column_name", ":", "attributes", "=", "' class=\"problem\"'", "table_header", "+=", "'<th%s>%s</th>'", "%", "(", "attributes", ",", "header", ")", "table_data", "+=", "'<td%s>%s</td>'", "%", "(", "attributes", ",", "value", ")", "# Make sure output is encoded into UTF-8", "output", ".", "append", "(", "'<table class=\"dump\"><tr>%s</tr>\\n'", "%", "transitfeed", ".", "EncodeUnicode", "(", "table_header", ")", ")", "output", ".", "append", "(", "'<tr>%s</tr></table>\\n'", "%", "transitfeed", ".", "EncodeUnicode", "(", "table_data", ")", ")", "except", "AttributeError", "as", "e", ":", "pass", "# Hope this was getting an attribute from e ;-)", "output", ".", "append", "(", "'<br></li>\\n'", ")" ]
38.452381
14.309524
def sorted_bits(self) -> List[Tuple[str, int]]: """Return list of bit items sorted by position.""" return sorted(self.bit.items(), key=lambda x: x[1])
[ "def", "sorted_bits", "(", "self", ")", "->", "List", "[", "Tuple", "[", "str", ",", "int", "]", "]", ":", "return", "sorted", "(", "self", ".", "bit", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")" ]
54.666667
8.666667
def resample_spline(points, smooth=.001, count=None, degree=3): """ Resample a path in space, smoothing along a b-spline. Parameters ----------- points: (n, dimension) float, points in space smooth: float, smoothing amount count: number of samples in output degree: int, degree of spline polynomial Returns --------- resampled: (count, dimension) float, points in space """ from scipy.interpolate import splprep, splev if count is None: count = len(points) points = np.asanyarray(points) closed = np.linalg.norm(points[0] - points[-1]) < tol.merge tpl = splprep(points.T, s=smooth, k=degree)[0] i = np.linspace(0.0, 1.0, count) resampled = np.column_stack(splev(i, tpl)) if closed: shared = resampled[[0, -1]].mean(axis=0) resampled[0] = shared resampled[-1] = shared return resampled
[ "def", "resample_spline", "(", "points", ",", "smooth", "=", ".001", ",", "count", "=", "None", ",", "degree", "=", "3", ")", ":", "from", "scipy", ".", "interpolate", "import", "splprep", ",", "splev", "if", "count", "is", "None", ":", "count", "=", "len", "(", "points", ")", "points", "=", "np", ".", "asanyarray", "(", "points", ")", "closed", "=", "np", ".", "linalg", ".", "norm", "(", "points", "[", "0", "]", "-", "points", "[", "-", "1", "]", ")", "<", "tol", ".", "merge", "tpl", "=", "splprep", "(", "points", ".", "T", ",", "s", "=", "smooth", ",", "k", "=", "degree", ")", "[", "0", "]", "i", "=", "np", ".", "linspace", "(", "0.0", ",", "1.0", ",", "count", ")", "resampled", "=", "np", ".", "column_stack", "(", "splev", "(", "i", ",", "tpl", ")", ")", "if", "closed", ":", "shared", "=", "resampled", "[", "[", "0", ",", "-", "1", "]", "]", ".", "mean", "(", "axis", "=", "0", ")", "resampled", "[", "0", "]", "=", "shared", "resampled", "[", "-", "1", "]", "=", "shared", "return", "resampled" ]
28.258065
17.612903
def delete_one(self, mongo_collection, filter_doc, mongo_db=None, **kwargs): """ Deletes a single document in a mongo collection. https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_one :param mongo_collection: The name of the collection to delete from. :type mongo_collection: str :param filter_doc: A query that matches the document to delete. :type filter_doc: dict :param mongo_db: The name of the database to use. Can be omitted; then the database from the connection string is used. :type mongo_db: str """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.delete_one(filter_doc, **kwargs)
[ "def", "delete_one", "(", "self", ",", "mongo_collection", ",", "filter_doc", ",", "mongo_db", "=", "None", ",", "*", "*", "kwargs", ")", ":", "collection", "=", "self", ".", "get_collection", "(", "mongo_collection", ",", "mongo_db", "=", "mongo_db", ")", "return", "collection", ".", "delete_one", "(", "filter_doc", ",", "*", "*", "kwargs", ")" ]
45.882353
26.705882
def _autofill_spec_record(record): """ Returns an astropy table with columns auto-filled from FITS header Parameters ---------- record: astropy.io.fits.table.table.Row The spectrum table row to scrape Returns ------- record: astropy.io.fits.table.table.Row The spectrum table row with possible new rows inserted """ try: record['filename'] = os.path.basename(record['spectrum']) if record['spectrum'].endswith('.fits'): header = pf.getheader(record['spectrum']) # Wavelength units if not record['wavelength_units']: try: record['wavelength_units'] = header['XUNITS'] except KeyError: try: if header['BUNIT']: record['wavelength_units'] = 'um' except KeyError: pass if 'microns' in record['wavelength_units'] or 'Microns' in record['wavelength_units'] or 'um' in record[ 'wavelength_units']: record['wavelength_units'] = 'um' # Flux units if not record['flux_units']: try: record['flux_units'] = header['YUNITS'].replace(' ', '') except KeyError: try: record['flux_units'] = header['BUNIT'].replace(' ', '') except KeyError: pass if 'erg' in record['flux_units'] and 'A' in record['flux_units']: record['flux_units'] = 'ergs-1cm-2A-1' if 'erg' in record['flux_units'] and 'A' in record['flux_units'] \ else 'ergs-1cm-2um-1' if 'erg' in record['flux_units'] and 'um' in record['flux_units'] \ else 'Wm-2um-1' if 'W' in record['flux_units'] and 'um' in record['flux_units'] \ else 'Wm-2A-1' if 'W' in record['flux_units'] and 'A' in record['flux_units'] \ else '' # Observation date if not record['obs_date']: try: record['obs_date'] = header['DATE_OBS'] except KeyError: try: record['obs_date'] = header['DATE-OBS'] except KeyError: try: record['obs_date'] = header['DATE'] except KeyError: pass # Telescope id if not record['telescope_id']: try: n = header['TELESCOP'].lower() if isinstance(header['TELESCOP'], str) else '' record['telescope_id'] = 5 if 'hst' in n \ else 6 if 'spitzer' in n \ else 7 if 'irtf' in n \ else 9 if 'keck' in n and 'ii' in n \ else 8 if 'keck' in n and 'i' in n \ else 10 if 'kp' in n and '4' in n \ else 11 if 'kp' in n and '2' in n \ else 12 if 'bok' in n \ else 13 if 'mmt' in n \ else 14 if 'ctio' in n and '1' in n \ else 15 if 'ctio' in n and '4' in n \ else 16 if 'gemini' in n and 'north' in n \ else 17 if 'gemini' in n and 'south' in n \ else 18 if ('vlt' in n and 'U2' in n) \ else 19 if '3.5m' in n \ else 20 if 'subaru' in n \ else 21 if ('mag' in n and 'ii' in n) or ('clay' in n) \ else 22 if ('mag' in n and 'i' in n) or ('baade' in n) \ else 23 if ('eso' in n and '1m' in n) \ else 24 if 'cfht' in n \ else 25 if 'ntt' in n \ else 26 if ('palomar' in n and '200-inch' in n) \ else 27 if 'pan-starrs' in n \ else 28 if ('palomar' in n and '60-inch' in n) \ else 29 if ('ctio' in n and '0.9m' in n) \ else 30 if 'soar' in n \ else 31 if ('vlt' in n and 'U3' in n) \ else 32 if ('vlt' in n and 'U4' in n) \ else 33 if 'gtc' in n \ else None except KeyError: pass # Instrument id if not record['instrument_id']: try: i = header['INSTRUME'].lower() record[ 'instrument_id'] = 1 if 'r-c spec' in i or 'test' in i or 'nod' in i else 2 if 'gmos-n' in i else 3 if 'gmos-s' in i else 4 if 'fors' in i else 5 if 'lris' in i else 6 if 'spex' in i else 7 if 'ldss3' in i else 8 if 'focas' in i else 9 if 'nirspec' in i else 10 if 'irs' in i else 11 if 'fire' in i else 12 if 'mage' in i else 13 if 'goldcam' in i else 14 if 'sinfoni' in i else 15 if 'osiris' in i else 16 if 'triplespec' in i else 17 if 'x-shooter' in i else 18 if 'gnirs' in i else 19 if 'wircam' in i else 20 if 'cormass' in i else 21 if 'isaac' in i else 22 if 'irac' in i else 23 if 'dis' in i else 24 if 'susi2' in i else 25 if 'ircs' in i else 26 if 'nirc' in i else 29 if 'stis' in i else 0 except KeyError: pass except: pass return record
[ "def", "_autofill_spec_record", "(", "record", ")", ":", "try", ":", "record", "[", "'filename'", "]", "=", "os", ".", "path", ".", "basename", "(", "record", "[", "'spectrum'", "]", ")", "if", "record", "[", "'spectrum'", "]", ".", "endswith", "(", "'.fits'", ")", ":", "header", "=", "pf", ".", "getheader", "(", "record", "[", "'spectrum'", "]", ")", "# Wavelength units", "if", "not", "record", "[", "'wavelength_units'", "]", ":", "try", ":", "record", "[", "'wavelength_units'", "]", "=", "header", "[", "'XUNITS'", "]", "except", "KeyError", ":", "try", ":", "if", "header", "[", "'BUNIT'", "]", ":", "record", "[", "'wavelength_units'", "]", "=", "'um'", "except", "KeyError", ":", "pass", "if", "'microns'", "in", "record", "[", "'wavelength_units'", "]", "or", "'Microns'", "in", "record", "[", "'wavelength_units'", "]", "or", "'um'", "in", "record", "[", "'wavelength_units'", "]", ":", "record", "[", "'wavelength_units'", "]", "=", "'um'", "# Flux units", "if", "not", "record", "[", "'flux_units'", "]", ":", "try", ":", "record", "[", "'flux_units'", "]", "=", "header", "[", "'YUNITS'", "]", ".", "replace", "(", "' '", ",", "''", ")", "except", "KeyError", ":", "try", ":", "record", "[", "'flux_units'", "]", "=", "header", "[", "'BUNIT'", "]", ".", "replace", "(", "' '", ",", "''", ")", "except", "KeyError", ":", "pass", "if", "'erg'", "in", "record", "[", "'flux_units'", "]", "and", "'A'", "in", "record", "[", "'flux_units'", "]", ":", "record", "[", "'flux_units'", "]", "=", "'ergs-1cm-2A-1'", "if", "'erg'", "in", "record", "[", "'flux_units'", "]", "and", "'A'", "in", "record", "[", "'flux_units'", "]", "else", "'ergs-1cm-2um-1'", "if", "'erg'", "in", "record", "[", "'flux_units'", "]", "and", "'um'", "in", "record", "[", "'flux_units'", "]", "else", "'Wm-2um-1'", "if", "'W'", "in", "record", "[", "'flux_units'", "]", "and", "'um'", "in", "record", "[", "'flux_units'", "]", "else", "'Wm-2A-1'", "if", "'W'", "in", "record", "[", "'flux_units'", "]", "and", "'A'", "in", "record", "[", "'flux_units'", "]", "else", "''", "# Observation date", "if", "not", "record", "[", "'obs_date'", "]", ":", "try", ":", "record", "[", "'obs_date'", "]", "=", "header", "[", "'DATE_OBS'", "]", "except", "KeyError", ":", "try", ":", "record", "[", "'obs_date'", "]", "=", "header", "[", "'DATE-OBS'", "]", "except", "KeyError", ":", "try", ":", "record", "[", "'obs_date'", "]", "=", "header", "[", "'DATE'", "]", "except", "KeyError", ":", "pass", "# Telescope id", "if", "not", "record", "[", "'telescope_id'", "]", ":", "try", ":", "n", "=", "header", "[", "'TELESCOP'", "]", ".", "lower", "(", ")", "if", "isinstance", "(", "header", "[", "'TELESCOP'", "]", ",", "str", ")", "else", "''", "record", "[", "'telescope_id'", "]", "=", "5", "if", "'hst'", "in", "n", "else", "6", "if", "'spitzer'", "in", "n", "else", "7", "if", "'irtf'", "in", "n", "else", "9", "if", "'keck'", "in", "n", "and", "'ii'", "in", "n", "else", "8", "if", "'keck'", "in", "n", "and", "'i'", "in", "n", "else", "10", "if", "'kp'", "in", "n", "and", "'4'", "in", "n", "else", "11", "if", "'kp'", "in", "n", "and", "'2'", "in", "n", "else", "12", "if", "'bok'", "in", "n", "else", "13", "if", "'mmt'", "in", "n", "else", "14", "if", "'ctio'", "in", "n", "and", "'1'", "in", "n", "else", "15", "if", "'ctio'", "in", "n", "and", "'4'", "in", "n", "else", "16", "if", "'gemini'", "in", "n", "and", "'north'", "in", "n", "else", "17", "if", "'gemini'", "in", "n", "and", "'south'", "in", "n", "else", "18", "if", "(", "'vlt'", "in", "n", "and", "'U2'", "in", "n", ")", "else", "19", "if", "'3.5m'", "in", "n", "else", "20", "if", "'subaru'", "in", "n", "else", "21", "if", "(", "'mag'", "in", "n", "and", "'ii'", "in", "n", ")", "or", "(", "'clay'", "in", "n", ")", "else", "22", "if", "(", "'mag'", "in", "n", "and", "'i'", "in", "n", ")", "or", "(", "'baade'", "in", "n", ")", "else", "23", "if", "(", "'eso'", "in", "n", "and", "'1m'", "in", "n", ")", "else", "24", "if", "'cfht'", "in", "n", "else", "25", "if", "'ntt'", "in", "n", "else", "26", "if", "(", "'palomar'", "in", "n", "and", "'200-inch'", "in", "n", ")", "else", "27", "if", "'pan-starrs'", "in", "n", "else", "28", "if", "(", "'palomar'", "in", "n", "and", "'60-inch'", "in", "n", ")", "else", "29", "if", "(", "'ctio'", "in", "n", "and", "'0.9m'", "in", "n", ")", "else", "30", "if", "'soar'", "in", "n", "else", "31", "if", "(", "'vlt'", "in", "n", "and", "'U3'", "in", "n", ")", "else", "32", "if", "(", "'vlt'", "in", "n", "and", "'U4'", "in", "n", ")", "else", "33", "if", "'gtc'", "in", "n", "else", "None", "except", "KeyError", ":", "pass", "# Instrument id", "if", "not", "record", "[", "'instrument_id'", "]", ":", "try", ":", "i", "=", "header", "[", "'INSTRUME'", "]", ".", "lower", "(", ")", "record", "[", "'instrument_id'", "]", "=", "1", "if", "'r-c spec'", "in", "i", "or", "'test'", "in", "i", "or", "'nod'", "in", "i", "else", "2", "if", "'gmos-n'", "in", "i", "else", "3", "if", "'gmos-s'", "in", "i", "else", "4", "if", "'fors'", "in", "i", "else", "5", "if", "'lris'", "in", "i", "else", "6", "if", "'spex'", "in", "i", "else", "7", "if", "'ldss3'", "in", "i", "else", "8", "if", "'focas'", "in", "i", "else", "9", "if", "'nirspec'", "in", "i", "else", "10", "if", "'irs'", "in", "i", "else", "11", "if", "'fire'", "in", "i", "else", "12", "if", "'mage'", "in", "i", "else", "13", "if", "'goldcam'", "in", "i", "else", "14", "if", "'sinfoni'", "in", "i", "else", "15", "if", "'osiris'", "in", "i", "else", "16", "if", "'triplespec'", "in", "i", "else", "17", "if", "'x-shooter'", "in", "i", "else", "18", "if", "'gnirs'", "in", "i", "else", "19", "if", "'wircam'", "in", "i", "else", "20", "if", "'cormass'", "in", "i", "else", "21", "if", "'isaac'", "in", "i", "else", "22", "if", "'irac'", "in", "i", "else", "23", "if", "'dis'", "in", "i", "else", "24", "if", "'susi2'", "in", "i", "else", "25", "if", "'ircs'", "in", "i", "else", "26", "if", "'nirc'", "in", "i", "else", "29", "if", "'stis'", "in", "i", "else", "0", "except", "KeyError", ":", "pass", "except", ":", "pass", "return", "record" ]
48.392857
26.946429
def _draw(self, size=1, **kwargs): """Draws random samples without applying physical constrains. """ # draw masses try: mass1 = kwargs['mass1'] except KeyError: mass1 = self.mass1_distr.rvs(size=size)['mass1'] try: mass2 = kwargs['mass2'] except KeyError: mass2 = self.mass2_distr.rvs(size=size)['mass2'] # draw angles try: phi_a = kwargs['phi_a'] except KeyError: phi_a = self.phia_distr.rvs(size=size)['phi_a'] try: phi_s = kwargs['phi_s'] except KeyError: phi_s = self.phis_distr.rvs(size=size)['phi_s'] # draw chi_eff, chi_a try: chi_eff = kwargs['chi_eff'] except KeyError: chi_eff = self.chieff_distr.rvs(size=size)['chi_eff'] try: chi_a = kwargs['chi_a'] except KeyError: chi_a = self.chia_distr.rvs(size=size)['chi_a'] # draw xis try: xi1 = kwargs['xi1'] except KeyError: xi1 = self.xi1_distr.rvs(size=size)['xi1'] try: xi2 = kwargs['xi2'] except KeyError: xi2 = self.xi2_distr.rvs(size=size)['xi2'] dtype = [(p, float) for p in self.params] arr = numpy.zeros(size, dtype=dtype) arr['mass1'] = mass1 arr['mass2'] = mass2 arr['phi_a'] = phi_a arr['phi_s'] = phi_s arr['chi_eff'] = chi_eff arr['chi_a'] = chi_a arr['xi1'] = xi1 arr['xi2'] = xi2 return arr
[ "def", "_draw", "(", "self", ",", "size", "=", "1", ",", "*", "*", "kwargs", ")", ":", "# draw masses", "try", ":", "mass1", "=", "kwargs", "[", "'mass1'", "]", "except", "KeyError", ":", "mass1", "=", "self", ".", "mass1_distr", ".", "rvs", "(", "size", "=", "size", ")", "[", "'mass1'", "]", "try", ":", "mass2", "=", "kwargs", "[", "'mass2'", "]", "except", "KeyError", ":", "mass2", "=", "self", ".", "mass2_distr", ".", "rvs", "(", "size", "=", "size", ")", "[", "'mass2'", "]", "# draw angles", "try", ":", "phi_a", "=", "kwargs", "[", "'phi_a'", "]", "except", "KeyError", ":", "phi_a", "=", "self", ".", "phia_distr", ".", "rvs", "(", "size", "=", "size", ")", "[", "'phi_a'", "]", "try", ":", "phi_s", "=", "kwargs", "[", "'phi_s'", "]", "except", "KeyError", ":", "phi_s", "=", "self", ".", "phis_distr", ".", "rvs", "(", "size", "=", "size", ")", "[", "'phi_s'", "]", "# draw chi_eff, chi_a", "try", ":", "chi_eff", "=", "kwargs", "[", "'chi_eff'", "]", "except", "KeyError", ":", "chi_eff", "=", "self", ".", "chieff_distr", ".", "rvs", "(", "size", "=", "size", ")", "[", "'chi_eff'", "]", "try", ":", "chi_a", "=", "kwargs", "[", "'chi_a'", "]", "except", "KeyError", ":", "chi_a", "=", "self", ".", "chia_distr", ".", "rvs", "(", "size", "=", "size", ")", "[", "'chi_a'", "]", "# draw xis", "try", ":", "xi1", "=", "kwargs", "[", "'xi1'", "]", "except", "KeyError", ":", "xi1", "=", "self", ".", "xi1_distr", ".", "rvs", "(", "size", "=", "size", ")", "[", "'xi1'", "]", "try", ":", "xi2", "=", "kwargs", "[", "'xi2'", "]", "except", "KeyError", ":", "xi2", "=", "self", ".", "xi2_distr", ".", "rvs", "(", "size", "=", "size", ")", "[", "'xi2'", "]", "dtype", "=", "[", "(", "p", ",", "float", ")", "for", "p", "in", "self", ".", "params", "]", "arr", "=", "numpy", ".", "zeros", "(", "size", ",", "dtype", "=", "dtype", ")", "arr", "[", "'mass1'", "]", "=", "mass1", "arr", "[", "'mass2'", "]", "=", "mass2", "arr", "[", "'phi_a'", "]", "=", "phi_a", "arr", "[", "'phi_s'", "]", "=", "phi_s", "arr", "[", "'chi_eff'", "]", "=", "chi_eff", "arr", "[", "'chi_a'", "]", "=", "chi_a", "arr", "[", "'xi1'", "]", "=", "xi1", "arr", "[", "'xi2'", "]", "=", "xi2", "return", "arr" ]
31.36
15.16
def get_id_constraints(pkname, pkey): """Returns primary key consraints. :pkname: if a string, returns a dict with pkname=pkey. pkname and pkey must be enumerables of matching length. """ if isinstance(pkname, str): return {pkname: pkey} else: return dict(zip(pkname, pkey))
[ "def", "get_id_constraints", "(", "pkname", ",", "pkey", ")", ":", "if", "isinstance", "(", "pkname", ",", "str", ")", ":", "return", "{", "pkname", ":", "pkey", "}", "else", ":", "return", "dict", "(", "zip", "(", "pkname", ",", "pkey", ")", ")" ]
28.6
18.5
def stat_article_detail_list(self, page=1, start_date=str(date.today()+timedelta(days=-30)), end_date=str(date.today())): """ 获取图文分析数据 返回JSON示例 :: { "hasMore": true, // 说明是否可以增加 page 页码来获取数据 "data": [ { "index": [ "20,816", // 送达人数 "1,944", // 图文页阅读人数 "2,554", // 图文页阅读次数 "9.34%", // (图文页阅读人数 / 送达人数) "0", // 原文页阅读人数 "0", // 原文页阅读次数 "0%", // (原文页阅读人数 / 图文页阅读人数) "47", // 分享转发人数 "61", // 分享转发次数 "1" // 微信收藏人数 ], "time": "2015-01-21", "table_data": "{\"fields\":{\"TargetUser\":{\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"number\":false,\"colAlign\":\"center\",\"needOrder\":false,\"precision\":0},\"IntPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"IntPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"PageConversion\":{\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"OriPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"OriPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"Conversion\":{\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"ShareUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"ShareCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"AddToFavUser\":{\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0}},\"data\":[{\"MsgId\":\"205104027_1\",\"Title\":\"\\u56de\\u5bb6\\u5927\\u4f5c\\u6218 | \\u5feb\\u6765\\u5e26\\u6211\\u56de\\u5bb6\",\"RefDate\":\"20150121\",\"TargetUser\":\"20,816\",\"IntPageReadUser\":\"1,944\",\"IntPageReadCount\":\"2,554\",\"OriPageReadUser\":\"0\",\"OriPageReadCount\":\"0\",\"ShareUser\":\"47\",\"ShareCount\":\"61\",\"AddToFavUser\":\"1\",\"Conversion\":\"0%\",\"PageConversion\":\"9.34%\"}],\"fixedRow\":false,\"cssSetting\":{\"\":\"\"},\"complexHeader\":[[{\"field\":\"TargetUser\",\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"rowSpan\":2,\"colSpan\":1},{\"thText\":\"\\u56fe\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u539f\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u5206\\u4eab\\u8f6c\\u53d1\",\"colSpan\":2},{\"field\":\"AddToFavUser\",\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"rowSpan\":2,\"enable\":true}],[{\"field\":\"IntPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"IntPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"PageConversion\",\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"OriPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"OriPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"Conversion\",\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"ShareUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"ShareCount\",\"thText\":\"\\u6b21\\u6570\"}]]}", "id": "205104027_1", "title": "回家大作战 | 快来带我回家" }, { "index": [ "20,786", // 送达人数 "2,598", // 图文页阅读人数 "3,368", // 图文页阅读次数 "12.5%", // (图文页阅读人数 / 送达人数) "0", // 原文页阅读人数 "0", // 原文页阅读次数 "0%", // (原文页阅读人数 / 图文页阅读人数) "73", // 分享转发人数 "98", // 分享转发次数 "1" // 微信收藏人数 ], "time": "2015-01-20", "table_data": "{\"fields\":{\"TargetUser\":{\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"number\":false,\"colAlign\":\"center\",\"needOrder\":false,\"precision\":0},\"IntPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"IntPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"PageConversion\":{\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"OriPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"OriPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"Conversion\":{\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"ShareUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"ShareCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"AddToFavUser\":{\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0}},\"data\":[{\"MsgId\":\"205066833_1\",\"Title\":\"\\u56de\\u5bb6\\u5927\\u4f5c\\u6218 | \\u5982\\u4f55\\u4f18\\u96c5\\u5730\\u53bb\\u5f80\\u8f66\\u7ad9\\u548c\\u673a\\u573a\",\"RefDate\":\"20150120\",\"TargetUser\":\"20,786\",\"IntPageReadUser\":\"2,598\",\"IntPageReadCount\":\"3,368\",\"OriPageReadUser\":\"0\",\"OriPageReadCount\":\"0\",\"ShareUser\":\"73\",\"ShareCount\":\"98\",\"AddToFavUser\":\"1\",\"Conversion\":\"0%\",\"PageConversion\":\"12.5%\"}],\"fixedRow\":false,\"cssSetting\":{\"\":\"\"},\"complexHeader\":[[{\"field\":\"TargetUser\",\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"rowSpan\":2,\"colSpan\":1},{\"thText\":\"\\u56fe\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u539f\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u5206\\u4eab\\u8f6c\\u53d1\",\"colSpan\":2},{\"field\":\"AddToFavUser\",\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"rowSpan\":2,\"enable\":true}],[{\"field\":\"IntPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"IntPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"PageConversion\",\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"OriPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"OriPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"Conversion\",\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"ShareUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"ShareCount\",\"thText\":\"\\u6b21\\u6570\"}]]}", "id": "205066833_1", "title": "回家大作战 | 如何优雅地去往车站和机场" }, { "index": [ "20,745", // 送达人数 "1,355", // 图文页阅读人数 "1,839", // 图文页阅读次数 "6.53%", // (图文页阅读人数 / 送达人数) "145", // 原文页阅读人数 "184", // 原文页阅读次数 "10.7%", // (原文页阅读人数 / 图文页阅读人数) "48", // 分享转发人数 "64", // 分享转发次数 "5" // 微信收藏人数 ], "time": "2015-01-19", "table_data": "{\"fields\":{\"TargetUser\":{\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"number\":false,\"colAlign\":\"center\",\"needOrder\":false,\"precision\":0},\"IntPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"IntPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"PageConversion\":{\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"OriPageReadUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"OriPageReadCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"Conversion\":{\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":\"2\"},\"ShareUser\":{\"thText\":\"\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"ShareCount\":{\"thText\":\"\\u6b21\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0},\"AddToFavUser\":{\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"number\":true,\"colAlign\":\"right\",\"needOrder\":false,\"precision\":0}},\"data\":[{\"MsgId\":\"205028693_1\",\"Title\":\"\\u5145\\u7535\\u65f6\\u95f4 | \\u542c\\u542c\\u7535\\u53f0\\uff0c\\u4f18\\u96c5\\u5730\\u63d0\\u5347\\u5b66\\u4e60\\u6548\\u7387\",\"RefDate\":\"20150119\",\"TargetUser\":\"20,745\",\"IntPageReadUser\":\"1,355\",\"IntPageReadCount\":\"1,839\",\"OriPageReadUser\":\"145\",\"OriPageReadCount\":\"184\",\"ShareUser\":\"48\",\"ShareCount\":\"64\",\"AddToFavUser\":\"5\",\"Conversion\":\"10.7%\",\"PageConversion\":\"6.53%\"}],\"fixedRow\":false,\"cssSetting\":{\"\":\"\"},\"complexHeader\":[[{\"field\":\"TargetUser\",\"thText\":\"\\u9001\\u8fbe\\u4eba\\u6570\",\"rowSpan\":2,\"colSpan\":1},{\"thText\":\"\\u56fe\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u539f\\u6587\\u9875\\u9605\\u8bfb\",\"colSpan\":3},{\"thText\":\"\\u5206\\u4eab\\u8f6c\\u53d1\",\"colSpan\":2},{\"field\":\"AddToFavUser\",\"thText\":\"\\u5fae\\u4fe1\\u6536\\u85cf\\u4eba\\u6570\",\"rowSpan\":2,\"enable\":true}],[{\"field\":\"IntPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"IntPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"PageConversion\",\"thText\":\"\\u56fe\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"OriPageReadUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"OriPageReadCount\",\"thText\":\"\\u6b21\\u6570\"},{\"field\":\"Conversion\",\"thText\":\"\\u539f\\u6587\\u8f6c\\u5316\\u7387\"},{\"field\":\"ShareUser\",\"thText\":\"\\u4eba\\u6570\"},{\"field\":\"ShareCount\",\"thText\":\"\\u6b21\\u6570\"}]]}", "id": "205028693_1", "title": "充电时间 | 听听电台,优雅地提升学习效率" } ] } :param page: 页码 (由于腾讯接口限制,page 从 1 开始,3 条数据为 1 页) :param start_date: 开始时间,默认是今天-30天 (类型: str 格式示例: "2015-01-15") :param end_date: 结束时间,默认是今天 (类型: str 格式示例: "2015-02-01") :return: 返回的 JSON 数据,具体的各项内容解释参见上面的 JSON 返回示例 :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 """ self._init_plugin_token_appid() url = 'http://mta.qq.com/mta/wechat/ctr_article_detail/get_list?sort=RefDate%20desc&keyword=&page={page}&appid={appid}&pluginid=luopan&token={token}&from=&src=false&devtype=3&time_type=day&start_date={start_date}&end_date={end_date}&need_compare=0&app_id=&rnd={rnd}&ajax=1'.format( page=page, appid=self.__appid, token=self.__plugin_token, rnd=int(time.time()), start_date=start_date, end_date=end_date, ) headers = { 'x-requested-with': 'XMLHttpRequest', 'referer': 'http://mta.qq.com/mta/wechat/ctr_article_detail/get_list?sort=RefDate%20desc&keyword=&page={page}&appid={appid}&pluginid=luopan&token={token}&from=&src=false&devtype=3&time_type=day&start_date={start_date}&end_date={end_date}&need_compare=0&app_id=&rnd={rnd}&ajax=1'.format( page=page, appid=self.__appid, token=self.__plugin_token, rnd=int(time.time()), start_date=start_date, end_date=end_date, ), 'cookie': self.__cookies, } r = requests.get(url, headers=headers) if not re.search(r'wechat_token', self.__cookies): for cookie in r.cookies: self.__cookies += cookie.name + '=' + cookie.value + ';' try: data = json.loads(r.text) if data.get('is_session_expire'): raise NeedLoginError(r.text) message = json.dumps(data, ensure_ascii=False) except (KeyError, ValueError): raise NeedLoginError(r.text) return message
[ "def", "stat_article_detail_list", "(", "self", ",", "page", "=", "1", ",", "start_date", "=", "str", "(", "date", ".", "today", "(", ")", "+", "timedelta", "(", "days", "=", "-", "30", ")", ")", ",", "end_date", "=", "str", "(", "date", ".", "today", "(", ")", ")", ")", ":", "self", ".", "_init_plugin_token_appid", "(", ")", "url", "=", "'http://mta.qq.com/mta/wechat/ctr_article_detail/get_list?sort=RefDate%20desc&keyword=&page={page}&appid={appid}&pluginid=luopan&token={token}&from=&src=false&devtype=3&time_type=day&start_date={start_date}&end_date={end_date}&need_compare=0&app_id=&rnd={rnd}&ajax=1'", ".", "format", "(", "page", "=", "page", ",", "appid", "=", "self", ".", "__appid", ",", "token", "=", "self", ".", "__plugin_token", ",", "rnd", "=", "int", "(", "time", ".", "time", "(", ")", ")", ",", "start_date", "=", "start_date", ",", "end_date", "=", "end_date", ",", ")", "headers", "=", "{", "'x-requested-with'", ":", "'XMLHttpRequest'", ",", "'referer'", ":", "'http://mta.qq.com/mta/wechat/ctr_article_detail/get_list?sort=RefDate%20desc&keyword=&page={page}&appid={appid}&pluginid=luopan&token={token}&from=&src=false&devtype=3&time_type=day&start_date={start_date}&end_date={end_date}&need_compare=0&app_id=&rnd={rnd}&ajax=1'", ".", "format", "(", "page", "=", "page", ",", "appid", "=", "self", ".", "__appid", ",", "token", "=", "self", ".", "__plugin_token", ",", "rnd", "=", "int", "(", "time", ".", "time", "(", ")", ")", ",", "start_date", "=", "start_date", ",", "end_date", "=", "end_date", ",", ")", ",", "'cookie'", ":", "self", ".", "__cookies", ",", "}", "r", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "if", "not", "re", ".", "search", "(", "r'wechat_token'", ",", "self", ".", "__cookies", ")", ":", "for", "cookie", "in", "r", ".", "cookies", ":", "self", ".", "__cookies", "+=", "cookie", ".", "name", "+", "'='", "+", "cookie", ".", "value", "+", "';'", "try", ":", "data", "=", "json", ".", "loads", "(", "r", ".", "text", ")", "if", "data", ".", "get", "(", "'is_session_expire'", ")", ":", "raise", "NeedLoginError", "(", "r", ".", "text", ")", "message", "=", "json", ".", "dumps", "(", "data", ",", "ensure_ascii", "=", "False", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "NeedLoginError", "(", "r", ".", "text", ")", "return", "message" ]
119.192661
95.119266
def confdate(self): """Date range of the conference the abstract belongs to represented by two tuples in the form (YYYY, MM, DD). """ date = self._confevent.get('confdate', {}) if len(date) > 0: start = {k: int(v) for k, v in date['startdate'].items()} end = {k: int(v) for k, v in date['enddate'].items()} return ((start['@year'], start['@month'], start['@day']), (end['@year'], end['@month'], end['@day'])) else: return ((None, None, None), (None, None, None))
[ "def", "confdate", "(", "self", ")", ":", "date", "=", "self", ".", "_confevent", ".", "get", "(", "'confdate'", ",", "{", "}", ")", "if", "len", "(", "date", ")", ">", "0", ":", "start", "=", "{", "k", ":", "int", "(", "v", ")", "for", "k", ",", "v", "in", "date", "[", "'startdate'", "]", ".", "items", "(", ")", "}", "end", "=", "{", "k", ":", "int", "(", "v", ")", "for", "k", ",", "v", "in", "date", "[", "'enddate'", "]", ".", "items", "(", ")", "}", "return", "(", "(", "start", "[", "'@year'", "]", ",", "start", "[", "'@month'", "]", ",", "start", "[", "'@day'", "]", ")", ",", "(", "end", "[", "'@year'", "]", ",", "end", "[", "'@month'", "]", ",", "end", "[", "'@day'", "]", ")", ")", "else", ":", "return", "(", "(", "None", ",", "None", ",", "None", ")", ",", "(", "None", ",", "None", ",", "None", ")", ")" ]
47.25
17.25
def __package_dependencies(dist_dir, additional_reqs, silent): """ Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark. Parameters ---------- dist_dir (str): Path to directory where the packaged libs shall be located additional_reqs (str): Path to a requirements.txt, containing any of the app's additional requirements silent (bool): Flag indicating whether pip output should be printed to console """ logging.info('Packaging dependencies') libs_dir = os.path.join(dist_dir, 'libs') if not os.path.isdir(libs_dir): os.mkdir(libs_dir) # Get requirements req_txt = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements-submit.txt') with open(req_txt, 'r') as req: requirements = req.read().splitlines() if additional_reqs: with open(additional_reqs, 'r') as req: for row in req: requirements.append(row) # Remove duplicates requirements = list(set(requirements)) # Install devnull = open(os.devnull, 'w') outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {} for pkg in requirements: cmd = ['pip', 'install', pkg, '-t', libs_dir] logging.debug('Calling `%s`', str(cmd)) call(cmd, **outp) devnull.close() # Package shutil.make_archive(libs_dir, 'zip', libs_dir, './')
[ "def", "__package_dependencies", "(", "dist_dir", ",", "additional_reqs", ",", "silent", ")", ":", "logging", ".", "info", "(", "'Packaging dependencies'", ")", "libs_dir", "=", "os", ".", "path", ".", "join", "(", "dist_dir", ",", "'libs'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "libs_dir", ")", ":", "os", ".", "mkdir", "(", "libs_dir", ")", "# Get requirements", "req_txt", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ",", "'requirements-submit.txt'", ")", "with", "open", "(", "req_txt", ",", "'r'", ")", "as", "req", ":", "requirements", "=", "req", ".", "read", "(", ")", ".", "splitlines", "(", ")", "if", "additional_reqs", ":", "with", "open", "(", "additional_reqs", ",", "'r'", ")", "as", "req", ":", "for", "row", "in", "req", ":", "requirements", ".", "append", "(", "row", ")", "# Remove duplicates", "requirements", "=", "list", "(", "set", "(", "requirements", ")", ")", "# Install", "devnull", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "outp", "=", "{", "'stderr'", ":", "STDOUT", ",", "'stdout'", ":", "devnull", "}", "if", "silent", "else", "{", "}", "for", "pkg", "in", "requirements", ":", "cmd", "=", "[", "'pip'", ",", "'install'", ",", "pkg", ",", "'-t'", ",", "libs_dir", "]", "logging", ".", "debug", "(", "'Calling `%s`'", ",", "str", "(", "cmd", ")", ")", "call", "(", "cmd", ",", "*", "*", "outp", ")", "devnull", ".", "close", "(", ")", "# Package", "shutil", ".", "make_archive", "(", "libs_dir", ",", "'zip'", ",", "libs_dir", ",", "'./'", ")" ]
35.384615
21.076923
def get_pyocd_flash_algo(self, blocksize, ram_region): """! @brief Return a dictionary representing a pyOCD flash algorithm, or None. The most interesting operation this method performs is dynamically allocating memory for the flash algo from a given RAM region. Note that the .data and .bss sections are concatenated with .text. That's why there isn't a specific allocation for those sections. Double buffering is supported as long as there is enough RAM. Memory layout: ``` [stack] [code] [buf1] [buf2] ``` @param self @param blocksize The size to use for page buffers, normally the erase block size. @param ram_region A RamRegion object where the flash algo will be allocated. @return A pyOCD-style flash algo dictionary. If None is returned, the flash algo did not fit into the provided ram_region. """ instructions = self._FLASH_BLOB_HEADER + byte_list_to_u32le_list(self.algo_data) offset = 0 # Stack offset += FLASH_ALGO_STACK_SIZE addr_stack = ram_region.start + offset # Load address addr_load = ram_region.start + offset offset += len(instructions) * 4 # Data buffer 1 addr_data = ram_region.start + offset offset += blocksize if offset > ram_region.length: # Not enough space for flash algorithm LOG.warning("Not enough space for flash algorithm") return None # Data buffer 2 addr_data2 = ram_region.start + offset offset += blocksize if offset > ram_region.length: page_buffers = [addr_data] else: page_buffers = [addr_data, addr_data2] # TODO - analyzer support code_start = addr_load + self._FLASH_BLOB_HEADER_SIZE flash_algo = { "load_address": addr_load, "instructions": instructions, "pc_init": code_start + self.symbols["Init"], "pc_unInit": code_start + self.symbols["UnInit"], "pc_eraseAll": code_start + self.symbols["EraseChip"], "pc_erase_sector": code_start + self.symbols["EraseSector"], "pc_program_page": code_start + self.symbols["ProgramPage"], "page_buffers": page_buffers, "begin_data": page_buffers[0], "begin_stack": addr_stack, "static_base": code_start + self.rw_start, "min_program_length": self.page_size, "analyzer_supported": False } return flash_algo
[ "def", "get_pyocd_flash_algo", "(", "self", ",", "blocksize", ",", "ram_region", ")", ":", "instructions", "=", "self", ".", "_FLASH_BLOB_HEADER", "+", "byte_list_to_u32le_list", "(", "self", ".", "algo_data", ")", "offset", "=", "0", "# Stack", "offset", "+=", "FLASH_ALGO_STACK_SIZE", "addr_stack", "=", "ram_region", ".", "start", "+", "offset", "# Load address", "addr_load", "=", "ram_region", ".", "start", "+", "offset", "offset", "+=", "len", "(", "instructions", ")", "*", "4", "# Data buffer 1", "addr_data", "=", "ram_region", ".", "start", "+", "offset", "offset", "+=", "blocksize", "if", "offset", ">", "ram_region", ".", "length", ":", "# Not enough space for flash algorithm", "LOG", ".", "warning", "(", "\"Not enough space for flash algorithm\"", ")", "return", "None", "# Data buffer 2", "addr_data2", "=", "ram_region", ".", "start", "+", "offset", "offset", "+=", "blocksize", "if", "offset", ">", "ram_region", ".", "length", ":", "page_buffers", "=", "[", "addr_data", "]", "else", ":", "page_buffers", "=", "[", "addr_data", ",", "addr_data2", "]", "# TODO - analyzer support", "code_start", "=", "addr_load", "+", "self", ".", "_FLASH_BLOB_HEADER_SIZE", "flash_algo", "=", "{", "\"load_address\"", ":", "addr_load", ",", "\"instructions\"", ":", "instructions", ",", "\"pc_init\"", ":", "code_start", "+", "self", ".", "symbols", "[", "\"Init\"", "]", ",", "\"pc_unInit\"", ":", "code_start", "+", "self", ".", "symbols", "[", "\"UnInit\"", "]", ",", "\"pc_eraseAll\"", ":", "code_start", "+", "self", ".", "symbols", "[", "\"EraseChip\"", "]", ",", "\"pc_erase_sector\"", ":", "code_start", "+", "self", ".", "symbols", "[", "\"EraseSector\"", "]", ",", "\"pc_program_page\"", ":", "code_start", "+", "self", ".", "symbols", "[", "\"ProgramPage\"", "]", ",", "\"page_buffers\"", ":", "page_buffers", ",", "\"begin_data\"", ":", "page_buffers", "[", "0", "]", ",", "\"begin_stack\"", ":", "addr_stack", ",", "\"static_base\"", ":", "code_start", "+", "self", ".", "rw_start", ",", "\"min_program_length\"", ":", "self", ".", "page_size", ",", "\"analyzer_supported\"", ":", "False", "}", "return", "flash_algo" ]
37.434783
21.594203
def step(self, thumb=False): """Executes a single step. Steps even if there is a breakpoint. Args: self (JLink): the ``JLink`` instance thumb (bool): boolean indicating if to step in thumb mode Returns: ``None`` Raises: JLinkException: on error """ method = self._dll.JLINKARM_Step if thumb: method = self._dll.JLINKARM_StepComposite res = method() if res != 0: raise errors.JLinkException("Failed to step over instruction.") return None
[ "def", "step", "(", "self", ",", "thumb", "=", "False", ")", ":", "method", "=", "self", ".", "_dll", ".", "JLINKARM_Step", "if", "thumb", ":", "method", "=", "self", ".", "_dll", ".", "JLINKARM_StepComposite", "res", "=", "method", "(", ")", "if", "res", "!=", "0", ":", "raise", "errors", ".", "JLinkException", "(", "\"Failed to step over instruction.\"", ")", "return", "None" ]
23.833333
21.791667
def write_plot(plot, filename, width=DEFAULT_PAGE_WIDTH, height=DEFAULT_PAGE_HEIGHT, unit=DEFAULT_PAGE_UNIT): """Writes a plot SVG to a file. Args: plot (list): a list of layers to plot filename (str): the name of the file to write width (float): the width of the output SVG height (float): the height of the output SVG unit (str): the unit of the height and width """ svg = plot_to_svg(plot, width, height, unit) with open(filename, 'w') as outfile: outfile.write(svg)
[ "def", "write_plot", "(", "plot", ",", "filename", ",", "width", "=", "DEFAULT_PAGE_WIDTH", ",", "height", "=", "DEFAULT_PAGE_HEIGHT", ",", "unit", "=", "DEFAULT_PAGE_UNIT", ")", ":", "svg", "=", "plot_to_svg", "(", "plot", ",", "width", ",", "height", ",", "unit", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "svg", ")" ]
40.461538
16.461538
def common_srun_options(cls, campaign): """Get options to be given to all srun commands :rtype: list of string """ default = dict(campaign.process.get('srun') or {}) default.update(output='slurm-%N-%t.stdout', error='slurm-%N-%t.error') return default
[ "def", "common_srun_options", "(", "cls", ",", "campaign", ")", ":", "default", "=", "dict", "(", "campaign", ".", "process", ".", "get", "(", "'srun'", ")", "or", "{", "}", ")", "default", ".", "update", "(", "output", "=", "'slurm-%N-%t.stdout'", ",", "error", "=", "'slurm-%N-%t.error'", ")", "return", "default" ]
36.625
15.625
def _initialize_from_model(self, model): """ Loads a model from """ for name, value in model.__dict__.items(): if name in self._properties: setattr(self, name, value)
[ "def", "_initialize_from_model", "(", "self", ",", "model", ")", ":", "for", "name", ",", "value", "in", "model", ".", "__dict__", ".", "items", "(", ")", ":", "if", "name", "in", "self", ".", "_properties", ":", "setattr", "(", "self", ",", "name", ",", "value", ")" ]
31.428571
3.714286
def translate_reaction(reaction, metabolite_mapping): """ Return a mapping from KEGG compound identifiers to coefficients. Parameters ---------- reaction : cobra.Reaction The reaction whose metabolites are to be translated. metabolite_mapping : dict An existing mapping from cobra.Metabolite to KEGG compound identifier that may already contain the metabolites in question or will have to be extended. Returns ------- dict The stoichiometry of the reaction given as a mapping from metabolite KEGG identifier to coefficient. """ # Transport reactions where the same metabolite occurs in different # compartments should have been filtered out but just to be sure, we add # coefficients in the mapping. stoichiometry = defaultdict(float) for met, coef in iteritems(reaction.metabolites): kegg_id = metabolite_mapping.setdefault(met, map_metabolite2kegg(met)) if kegg_id is None: continue stoichiometry[kegg_id] += coef return dict(stoichiometry)
[ "def", "translate_reaction", "(", "reaction", ",", "metabolite_mapping", ")", ":", "# Transport reactions where the same metabolite occurs in different", "# compartments should have been filtered out but just to be sure, we add", "# coefficients in the mapping.", "stoichiometry", "=", "defaultdict", "(", "float", ")", "for", "met", ",", "coef", "in", "iteritems", "(", "reaction", ".", "metabolites", ")", ":", "kegg_id", "=", "metabolite_mapping", ".", "setdefault", "(", "met", ",", "map_metabolite2kegg", "(", "met", ")", ")", "if", "kegg_id", "is", "None", ":", "continue", "stoichiometry", "[", "kegg_id", "]", "+=", "coef", "return", "dict", "(", "stoichiometry", ")" ]
35.466667
21.733333
def degToDms(dec, isLatitude=True): """Convert the dec, in degrees, to an (sign,D,M,S) tuple. D and M are integer, and sign and S are float. """ if isLatitude: assert dec <= 90, WCSError("DEC (%f) > 90.0" % (dec)) assert dec >= -90, WCSError("DEC (%f) < -90.0" % (dec)) if dec < 0.0: sign = -1.0 else: sign = 1.0 dec = dec * sign #mnt = (dec % 1.0) * 60.0 #sec = (dec % (1.0/60.0)) * 3600.0 # this calculation with return values produces conversion problem. # e.g. dec +311600.00 -> 31.2666666667 degree # deg=31 min=15 sec=60 instead deg=31 min=16 sec=0.0 # bug fixed mnt, sec = divmod(dec * 3600, 60) deg, mnt = divmod(mnt, 60) return (int(sign), int(deg), int(mnt), sec)
[ "def", "degToDms", "(", "dec", ",", "isLatitude", "=", "True", ")", ":", "if", "isLatitude", ":", "assert", "dec", "<=", "90", ",", "WCSError", "(", "\"DEC (%f) > 90.0\"", "%", "(", "dec", ")", ")", "assert", "dec", ">=", "-", "90", ",", "WCSError", "(", "\"DEC (%f) < -90.0\"", "%", "(", "dec", ")", ")", "if", "dec", "<", "0.0", ":", "sign", "=", "-", "1.0", "else", ":", "sign", "=", "1.0", "dec", "=", "dec", "*", "sign", "#mnt = (dec % 1.0) * 60.0", "#sec = (dec % (1.0/60.0)) * 3600.0", "# this calculation with return values produces conversion problem.", "# e.g. dec +311600.00 -> 31.2666666667 degree", "# deg=31 min=15 sec=60 instead deg=31 min=16 sec=0.0", "# bug fixed", "mnt", ",", "sec", "=", "divmod", "(", "dec", "*", "3600", ",", "60", ")", "deg", ",", "mnt", "=", "divmod", "(", "mnt", ",", "60", ")", "return", "(", "int", "(", "sign", ")", ",", "int", "(", "deg", ")", ",", "int", "(", "mnt", ")", ",", "sec", ")" ]
31.208333
17.958333
def _register_info(self): """Register local methods in the Workbench Information system""" # Stores information on Workbench commands and signatures for name, meth in inspect.getmembers(self, predicate=inspect.isroutine): if not name.startswith('_') and name != 'run': info = {'command': name, 'sig': str(funcsigs.signature(meth)), 'docstring': meth.__doc__} self.workbench.store_info(info, name, 'command') # Register help information self.workbench.store_info({'help': self.help.help_cli()}, 'cli', 'help') self.workbench.store_info({'help': self.help.help_cli_basic()}, 'cli_basic', 'help') self.workbench.store_info({'help': self.help.help_cli_search()}, 'search', 'help') self.workbench.store_info({'help': self.help.help_dataframe()}, 'dataframe', 'help') self.workbench.store_info({'help': self.help.help_dataframe_memory()}, 'dataframe_memory', 'help') self.workbench.store_info({'help': self.help.help_dataframe_pe()}, 'dataframe_pe', 'help')
[ "def", "_register_info", "(", "self", ")", ":", "# Stores information on Workbench commands and signatures", "for", "name", ",", "meth", "in", "inspect", ".", "getmembers", "(", "self", ",", "predicate", "=", "inspect", ".", "isroutine", ")", ":", "if", "not", "name", ".", "startswith", "(", "'_'", ")", "and", "name", "!=", "'run'", ":", "info", "=", "{", "'command'", ":", "name", ",", "'sig'", ":", "str", "(", "funcsigs", ".", "signature", "(", "meth", ")", ")", ",", "'docstring'", ":", "meth", ".", "__doc__", "}", "self", ".", "workbench", ".", "store_info", "(", "info", ",", "name", ",", "'command'", ")", "# Register help information", "self", ".", "workbench", ".", "store_info", "(", "{", "'help'", ":", "self", ".", "help", ".", "help_cli", "(", ")", "}", ",", "'cli'", ",", "'help'", ")", "self", ".", "workbench", ".", "store_info", "(", "{", "'help'", ":", "self", ".", "help", ".", "help_cli_basic", "(", ")", "}", ",", "'cli_basic'", ",", "'help'", ")", "self", ".", "workbench", ".", "store_info", "(", "{", "'help'", ":", "self", ".", "help", ".", "help_cli_search", "(", ")", "}", ",", "'search'", ",", "'help'", ")", "self", ".", "workbench", ".", "store_info", "(", "{", "'help'", ":", "self", ".", "help", ".", "help_dataframe", "(", ")", "}", ",", "'dataframe'", ",", "'help'", ")", "self", ".", "workbench", ".", "store_info", "(", "{", "'help'", ":", "self", ".", "help", ".", "help_dataframe_memory", "(", ")", "}", ",", "'dataframe_memory'", ",", "'help'", ")", "self", ".", "workbench", ".", "store_info", "(", "{", "'help'", ":", "self", ".", "help", ".", "help_dataframe_pe", "(", ")", "}", ",", "'dataframe_pe'", ",", "'help'", ")" ]
70.8
36.666667
def original_lesk(context_sentence: str, ambiguous_word: str, dictionary=None, from_cache=True) -> "wn.Synset": """ This function is the implementation of the original Lesk algorithm (1986). It requires a dictionary which contains the definition of the different sense of each word. See http://dl.acm.org/citation.cfm?id=318728 :param context_sentence: String, sentence or document. :param ambiguous_word: String, a single word. :return: A Synset for the estimated best sense. """ ambiguous_word = lemmatize(ambiguous_word) if not dictionary: # If dictionary is not provided, use the WN defintion. dictionary = signatures(ambiguous_word, original_lesk=True, from_cache=from_cache) best_sense = compare_overlaps_greedy(context_sentence.split(), dictionary) return best_sense
[ "def", "original_lesk", "(", "context_sentence", ":", "str", ",", "ambiguous_word", ":", "str", ",", "dictionary", "=", "None", ",", "from_cache", "=", "True", ")", "->", "\"wn.Synset\"", ":", "ambiguous_word", "=", "lemmatize", "(", "ambiguous_word", ")", "if", "not", "dictionary", ":", "# If dictionary is not provided, use the WN defintion.", "dictionary", "=", "signatures", "(", "ambiguous_word", ",", "original_lesk", "=", "True", ",", "from_cache", "=", "from_cache", ")", "best_sense", "=", "compare_overlaps_greedy", "(", "context_sentence", ".", "split", "(", ")", ",", "dictionary", ")", "return", "best_sense" ]
48
28.235294
def cache(ctx, clear_subliminal): """Cache management.""" if clear_subliminal: for file in glob.glob(os.path.join(ctx.parent.params['cache_dir'], cache_file) + '*'): os.remove(file) click.echo('Subliminal\'s cache cleared.') else: click.echo('Nothing done.')
[ "def", "cache", "(", "ctx", ",", "clear_subliminal", ")", ":", "if", "clear_subliminal", ":", "for", "file", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "ctx", ".", "parent", ".", "params", "[", "'cache_dir'", "]", ",", "cache_file", ")", "+", "'*'", ")", ":", "os", ".", "remove", "(", "file", ")", "click", ".", "echo", "(", "'Subliminal\\'s cache cleared.'", ")", "else", ":", "click", ".", "echo", "(", "'Nothing done.'", ")" ]
37.375
17
def patched_contractfunction_estimateGas(self, transaction=None, block_identifier=None): """Temporary workaround until next web3.py release (5.X.X)""" if transaction is None: estimate_gas_transaction = {} else: estimate_gas_transaction = dict(**transaction) if 'data' in estimate_gas_transaction: raise ValueError('Cannot set data in estimateGas transaction') if 'to' in estimate_gas_transaction: raise ValueError('Cannot set to in estimateGas transaction') if self.address: estimate_gas_transaction.setdefault('to', self.address) if self.web3.eth.defaultAccount is not empty: estimate_gas_transaction.setdefault('from', self.web3.eth.defaultAccount) if 'to' not in estimate_gas_transaction: if isinstance(self, type): raise ValueError( 'When using `Contract.estimateGas` from a contract factory ' 'you must provide a `to` address with the transaction', ) else: raise ValueError( 'Please ensure that this contract instance has an address.', ) return estimate_gas_for_function( self.address, self.web3, self.function_identifier, estimate_gas_transaction, self.contract_abi, self.abi, block_identifier, *self.args, **self.kwargs, )
[ "def", "patched_contractfunction_estimateGas", "(", "self", ",", "transaction", "=", "None", ",", "block_identifier", "=", "None", ")", ":", "if", "transaction", "is", "None", ":", "estimate_gas_transaction", "=", "{", "}", "else", ":", "estimate_gas_transaction", "=", "dict", "(", "*", "*", "transaction", ")", "if", "'data'", "in", "estimate_gas_transaction", ":", "raise", "ValueError", "(", "'Cannot set data in estimateGas transaction'", ")", "if", "'to'", "in", "estimate_gas_transaction", ":", "raise", "ValueError", "(", "'Cannot set to in estimateGas transaction'", ")", "if", "self", ".", "address", ":", "estimate_gas_transaction", ".", "setdefault", "(", "'to'", ",", "self", ".", "address", ")", "if", "self", ".", "web3", ".", "eth", ".", "defaultAccount", "is", "not", "empty", ":", "estimate_gas_transaction", ".", "setdefault", "(", "'from'", ",", "self", ".", "web3", ".", "eth", ".", "defaultAccount", ")", "if", "'to'", "not", "in", "estimate_gas_transaction", ":", "if", "isinstance", "(", "self", ",", "type", ")", ":", "raise", "ValueError", "(", "'When using `Contract.estimateGas` from a contract factory '", "'you must provide a `to` address with the transaction'", ",", ")", "else", ":", "raise", "ValueError", "(", "'Please ensure that this contract instance has an address.'", ",", ")", "return", "estimate_gas_for_function", "(", "self", ".", "address", ",", "self", ".", "web3", ",", "self", ".", "function_identifier", ",", "estimate_gas_transaction", ",", "self", ".", "contract_abi", ",", "self", ".", "abi", ",", "block_identifier", ",", "*", "self", ".", "args", ",", "*", "*", "self", ".", "kwargs", ",", ")" ]
35.051282
21.076923
def backup_file(filename): """ create a backup of the file desired """ if not os.path.exists(filename): return BACKUP_SUFFIX = ".sprinter.bak" backup_filename = filename + BACKUP_SUFFIX shutil.copyfile(filename, backup_filename)
[ "def", "backup_file", "(", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "return", "BACKUP_SUFFIX", "=", "\".sprinter.bak\"", "backup_filename", "=", "filename", "+", "BACKUP_SUFFIX", "shutil", ".", "copyfile", "(", "filename", ",", "backup_filename", ")" ]
27.777778
15.666667
def _page(self, text, html=False): """ Displays text using the pager if it exceeds the height of the viewport. Parameters: ----------- html : bool, optional (default False) If set, the text will be interpreted as HTML instead of plain text. """ line_height = QtGui.QFontMetrics(self.font).height() minlines = self._control.viewport().height() / line_height if self.paging != 'none' and \ re.match("(?:[^\n]*\n){%i}" % minlines, text): if self.paging == 'custom': self.custom_page_requested.emit(text) else: self._page_control.clear() cursor = self._page_control.textCursor() if html: self._insert_html(cursor, text) else: self._insert_plain_text(cursor, text) self._page_control.moveCursor(QtGui.QTextCursor.Start) self._page_control.viewport().resize(self._control.size()) if self._splitter: self._page_control.show() self._page_control.setFocus() else: self.layout().setCurrentWidget(self._page_control) elif html: self._append_html(text) else: self._append_plain_text(text)
[ "def", "_page", "(", "self", ",", "text", ",", "html", "=", "False", ")", ":", "line_height", "=", "QtGui", ".", "QFontMetrics", "(", "self", ".", "font", ")", ".", "height", "(", ")", "minlines", "=", "self", ".", "_control", ".", "viewport", "(", ")", ".", "height", "(", ")", "/", "line_height", "if", "self", ".", "paging", "!=", "'none'", "and", "re", ".", "match", "(", "\"(?:[^\\n]*\\n){%i}\"", "%", "minlines", ",", "text", ")", ":", "if", "self", ".", "paging", "==", "'custom'", ":", "self", ".", "custom_page_requested", ".", "emit", "(", "text", ")", "else", ":", "self", ".", "_page_control", ".", "clear", "(", ")", "cursor", "=", "self", ".", "_page_control", ".", "textCursor", "(", ")", "if", "html", ":", "self", ".", "_insert_html", "(", "cursor", ",", "text", ")", "else", ":", "self", ".", "_insert_plain_text", "(", "cursor", ",", "text", ")", "self", ".", "_page_control", ".", "moveCursor", "(", "QtGui", ".", "QTextCursor", ".", "Start", ")", "self", ".", "_page_control", ".", "viewport", "(", ")", ".", "resize", "(", "self", ".", "_control", ".", "size", "(", ")", ")", "if", "self", ".", "_splitter", ":", "self", ".", "_page_control", ".", "show", "(", ")", "self", ".", "_page_control", ".", "setFocus", "(", ")", "else", ":", "self", ".", "layout", "(", ")", ".", "setCurrentWidget", "(", "self", ".", "_page_control", ")", "elif", "html", ":", "self", ".", "_append_html", "(", "text", ")", "else", ":", "self", ".", "_append_plain_text", "(", "text", ")" ]
39.794118
16.794118
def _save_to_database(url, property_name, data): """ Store `data` under `property_name` in the `url` key in REST API DB. Args: url (obj): URL of the resource to which `property_name` will be stored. property_name (str): Name of the property under which the `data` will be stored. data (obj): Any object. """ data = json.dumps([ d.to_dict() if hasattr(d, "to_dict") else d for d in data ]) logger.debug("_save_to_database() data: %s" % repr(data)) requests.post( _WEB_URL + _REQUEST_DB_SAVE, timeout=REQUEST_TIMEOUT, allow_redirects=True, verify=False, data={ "url": url, "value": data, "property_name": property_name, } ) logger.info( "`%s` for `%s` sent to REST DB." % ( property_name, url, ) )
[ "def", "_save_to_database", "(", "url", ",", "property_name", ",", "data", ")", ":", "data", "=", "json", ".", "dumps", "(", "[", "d", ".", "to_dict", "(", ")", "if", "hasattr", "(", "d", ",", "\"to_dict\"", ")", "else", "d", "for", "d", "in", "data", "]", ")", "logger", ".", "debug", "(", "\"_save_to_database() data: %s\"", "%", "repr", "(", "data", ")", ")", "requests", ".", "post", "(", "_WEB_URL", "+", "_REQUEST_DB_SAVE", ",", "timeout", "=", "REQUEST_TIMEOUT", ",", "allow_redirects", "=", "True", ",", "verify", "=", "False", ",", "data", "=", "{", "\"url\"", ":", "url", ",", "\"value\"", ":", "data", ",", "\"property_name\"", ":", "property_name", ",", "}", ")", "logger", ".", "info", "(", "\"`%s` for `%s` sent to REST DB.\"", "%", "(", "property_name", ",", "url", ",", ")", ")" ]
25.285714
21.628571
def industry_name(self): """ [str] 国民经济行业分类名称(股票专用) """ try: return self.__dict__["industry_name"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'industry_name' ".format(self.order_book_id) )
[ "def", "industry_name", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"industry_name\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'industry_name' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
32.8
16.6
def _get_outputs(self): """ Return a dict of the terraform outputs. :return: dict of terraform outputs :rtype: dict """ if self.tf_version >= (0, 7, 0): logger.debug('Running: terraform output') res = self._run_tf('output', cmd_args=['-json']) outs = json.loads(res.strip()) res = {} for k in outs.keys(): if isinstance(outs[k], type({})): res[k] = outs[k]['value'] else: res[k] = outs[k] logger.debug('Terraform outputs: %s', res) return res logger.debug('Running: terraform output') res = self._run_tf('output') outs = {} for line in res.split("\n"): line = line.strip() if line == '': continue parts = line.split(' = ', 1) outs[parts[0]] = parts[1] logger.debug('Terraform outputs: %s', outs) return outs
[ "def", "_get_outputs", "(", "self", ")", ":", "if", "self", ".", "tf_version", ">=", "(", "0", ",", "7", ",", "0", ")", ":", "logger", ".", "debug", "(", "'Running: terraform output'", ")", "res", "=", "self", ".", "_run_tf", "(", "'output'", ",", "cmd_args", "=", "[", "'-json'", "]", ")", "outs", "=", "json", ".", "loads", "(", "res", ".", "strip", "(", ")", ")", "res", "=", "{", "}", "for", "k", "in", "outs", ".", "keys", "(", ")", ":", "if", "isinstance", "(", "outs", "[", "k", "]", ",", "type", "(", "{", "}", ")", ")", ":", "res", "[", "k", "]", "=", "outs", "[", "k", "]", "[", "'value'", "]", "else", ":", "res", "[", "k", "]", "=", "outs", "[", "k", "]", "logger", ".", "debug", "(", "'Terraform outputs: %s'", ",", "res", ")", "return", "res", "logger", ".", "debug", "(", "'Running: terraform output'", ")", "res", "=", "self", ".", "_run_tf", "(", "'output'", ")", "outs", "=", "{", "}", "for", "line", "in", "res", ".", "split", "(", "\"\\n\"", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", "==", "''", ":", "continue", "parts", "=", "line", ".", "split", "(", "' = '", ",", "1", ")", "outs", "[", "parts", "[", "0", "]", "]", "=", "parts", "[", "1", "]", "logger", ".", "debug", "(", "'Terraform outputs: %s'", ",", "outs", ")", "return", "outs" ]
33.166667
11.033333
def get_node(self): """return etree Element representing this slide""" # already added title, text frames # add animation chunks if self.animations: anim_par = el("anim:par", attrib={"presentation:node-type": "timing-root"}) self._page.append(anim_par) anim_seq = sub_el( anim_par, "anim:seq", attrib={"presentation:node-type": "main-sequence"} ) for a in self.animations: a_node = a.get_node() anim_seq.append(a_node) # add notes now (so they are last) if self.notes_frame: notes = self.notes_frame.get_node() self._page.append(notes) if self.footer: self._page.attrib[ns("presentation", "use-footer-name")] = self.footer.name return self._page
[ "def", "get_node", "(", "self", ")", ":", "# already added title, text frames", "# add animation chunks", "if", "self", ".", "animations", ":", "anim_par", "=", "el", "(", "\"anim:par\"", ",", "attrib", "=", "{", "\"presentation:node-type\"", ":", "\"timing-root\"", "}", ")", "self", ".", "_page", ".", "append", "(", "anim_par", ")", "anim_seq", "=", "sub_el", "(", "anim_par", ",", "\"anim:seq\"", ",", "attrib", "=", "{", "\"presentation:node-type\"", ":", "\"main-sequence\"", "}", ")", "for", "a", "in", "self", ".", "animations", ":", "a_node", "=", "a", ".", "get_node", "(", ")", "anim_seq", ".", "append", "(", "a_node", ")", "# add notes now (so they are last)", "if", "self", ".", "notes_frame", ":", "notes", "=", "self", ".", "notes_frame", ".", "get_node", "(", ")", "self", ".", "_page", ".", "append", "(", "notes", ")", "if", "self", ".", "footer", ":", "self", ".", "_page", ".", "attrib", "[", "ns", "(", "\"presentation\"", ",", "\"use-footer-name\"", ")", "]", "=", "self", ".", "footer", ".", "name", "return", "self", ".", "_page" ]
39.714286
15.714286
def write_to_file(self, filename): """ Write the configuration to a file. Use the correct order of values. """ fid = open(filename, 'w') for key in self.key_order: if(key == -1): fid.write('\n') else: fid.write('{0}\n'.format(self[key])) fid.close()
[ "def", "write_to_file", "(", "self", ",", "filename", ")", ":", "fid", "=", "open", "(", "filename", ",", "'w'", ")", "for", "key", "in", "self", ".", "key_order", ":", "if", "(", "key", "==", "-", "1", ")", ":", "fid", ".", "write", "(", "'\\n'", ")", "else", ":", "fid", ".", "write", "(", "'{0}\\n'", ".", "format", "(", "self", "[", "key", "]", ")", ")", "fid", ".", "close", "(", ")" ]
28
14.833333
def ask_user(d): """Wrap sphinx.quickstart.ask_user, and add additional questions.""" # Print welcome message msg = bold('Welcome to the Hieroglyph %s quickstart utility.') % ( version(), ) print(msg) msg = """ This will ask questions for creating a Hieroglyph project, and then ask some basic Sphinx questions. """ print(msg) # set a few defaults that we don't usually care about for Hieroglyph d.update({ 'version': datetime.date.today().strftime('%Y.%m.%d'), 'release': datetime.date.today().strftime('%Y.%m.%d'), 'make_mode': True, }) if 'project' not in d: print(''' The presentation title will be included on the title slide.''') sphinx.quickstart.do_prompt(d, 'project', 'Presentation title') if 'author' not in d: sphinx.quickstart.do_prompt(d, 'author', 'Author name(s)') # slide_theme theme_entrypoints = pkg_resources.iter_entry_points('hieroglyph.theme') themes = [ t.load() for t in theme_entrypoints ] msg = """ Available themes: """ for theme in themes: msg += '\n'.join([ bold(theme['name']), theme['desc'], '', '', ]) msg += """Which theme would you like to use?""" print(msg) sphinx.quickstart.do_prompt( d, 'slide_theme', 'Slide Theme', themes[0]['name'], sphinx.quickstart.choice( *[t['name'] for t in themes] ), ) # Ask original questions print("") sphinx.quickstart.ask_user(d)
[ "def", "ask_user", "(", "d", ")", ":", "# Print welcome message", "msg", "=", "bold", "(", "'Welcome to the Hieroglyph %s quickstart utility.'", ")", "%", "(", "version", "(", ")", ",", ")", "print", "(", "msg", ")", "msg", "=", "\"\"\"\nThis will ask questions for creating a Hieroglyph project, and then ask\nsome basic Sphinx questions.\n\"\"\"", "print", "(", "msg", ")", "# set a few defaults that we don't usually care about for Hieroglyph", "d", ".", "update", "(", "{", "'version'", ":", "datetime", ".", "date", ".", "today", "(", ")", ".", "strftime", "(", "'%Y.%m.%d'", ")", ",", "'release'", ":", "datetime", ".", "date", ".", "today", "(", ")", ".", "strftime", "(", "'%Y.%m.%d'", ")", ",", "'make_mode'", ":", "True", ",", "}", ")", "if", "'project'", "not", "in", "d", ":", "print", "(", "'''\nThe presentation title will be included on the title slide.'''", ")", "sphinx", ".", "quickstart", ".", "do_prompt", "(", "d", ",", "'project'", ",", "'Presentation title'", ")", "if", "'author'", "not", "in", "d", ":", "sphinx", ".", "quickstart", ".", "do_prompt", "(", "d", ",", "'author'", ",", "'Author name(s)'", ")", "# slide_theme", "theme_entrypoints", "=", "pkg_resources", ".", "iter_entry_points", "(", "'hieroglyph.theme'", ")", "themes", "=", "[", "t", ".", "load", "(", ")", "for", "t", "in", "theme_entrypoints", "]", "msg", "=", "\"\"\"\nAvailable themes:\n\n\"\"\"", "for", "theme", "in", "themes", ":", "msg", "+=", "'\\n'", ".", "join", "(", "[", "bold", "(", "theme", "[", "'name'", "]", ")", ",", "theme", "[", "'desc'", "]", ",", "''", ",", "''", ",", "]", ")", "msg", "+=", "\"\"\"Which theme would you like to use?\"\"\"", "print", "(", "msg", ")", "sphinx", ".", "quickstart", ".", "do_prompt", "(", "d", ",", "'slide_theme'", ",", "'Slide Theme'", ",", "themes", "[", "0", "]", "[", "'name'", "]", ",", "sphinx", ".", "quickstart", ".", "choice", "(", "*", "[", "t", "[", "'name'", "]", "for", "t", "in", "themes", "]", ")", ",", ")", "# Ask original questions", "print", "(", "\"\"", ")", "sphinx", ".", "quickstart", ".", "ask_user", "(", "d", ")" ]
24.754098
22.737705
def decode(self, r, nostrip=False, k=None, erasures_pos=None, only_erasures=False, return_string=True): '''Given a received string or byte array or list r of values between 0 and gf2_charac, attempts to decode it. If it's a valid codeword, or if there are no more than (n-k)/2 errors, the repaired message is returned. A message always has k bytes, if a message contained less it is left padded with null bytes. When decoded, these leading null bytes are stripped, but that can cause problems if decoding binary data. When nostrip is True, messages returned are always k bytes long. This is useful to make sure no data is lost when decoding binary data. Theoretically, we have R(x) = C(x) + E(x) + V(x), where R is the received message, C is the correct message without errors nor erasures, E are the errors and V the erasures. Thus the goal is to compute E and V from R, so that we can compute: C(x) = R(x) - E(x) - V(x), and then we have our original message! The main problem of decoding is to solve the so-called Key Equation, here we use Berlekamp-Massey. When stated in the language of spectral estimation, consists of a Fourier transform (syndrome computer), followed by a spectral analysis (Berlekamp-Massey or Euclidian algorithm), followed by an inverse Fourier transform (Chien search). (see Blahut, "Algebraic Codes for Data Transmission", 2003, chapter 7.6 Decoding in Time Domain). ''' n = self.n if not k: k = self.k # If we were given a string, convert to a list (important to support fields above 2^8) if isinstance(r, _str): r = [ord(x) for x in r] # Turn r into a polynomial rp = Polynomial([GF2int(x) for x in r]) if erasures_pos: # Convert string positions to coefficients positions for the algebra to work (see _find_erasures_locator(), ecc characters represent the first coefficients while the message is put last, so it's exactly the reverse of the string positions where the message is first and the ecc is last, thus it's just like if you read the message+ecc string in reverse) erasures_pos = [len(r)-1-x for x in erasures_pos] # Set erasures characters to null bytes # Note that you can just leave the original characters as they are, you don't need to set erased characters to null bytes for the decoding to work, but note that it won't help either (ie, fake erasures, meaning characters that were detected as erasures but actually aren't, will still "consume" one ecc symbol, even if you don't set them to null byte, this is because the syndrome is limited to n-k and thus you can't decode above this bound without a clever trick). # Example string containing a fake erasure: "hello sam" -> "ooooo sam" with erasures_pos = [0, 1, 2, 3, 4]. Here in fact the last erasure is fake because the original character also was "o" so if we detect "o" as an erasure, we will end up with one fake erasure. But setting it to null byte or not, it will still use up one ecc symbol, it will always be counted as a real erasure. If you're below the n-k bound, then the doceding will be ok. If you're above, then you can't do anything, the decoding won't work. Maybe todo: try to find a clever list decoding algorithm to account for fake erasures.... # Note: commented out so that the resulting omega (error evaluator polynomial) is the same as the erasure evaluator polynomial when decoding the same number of errors or erasures (ie, decoding 3 erasures only will give the same result as 3 errors only, with of course the errors/erasures on the same characters). #for erasure in erasures_pos: #rp[erasure] = GF2int(0) # Compute the syndromes: sz = self._syndromes(rp, k=k) if sz.coefficients.count(GF2int(0)) == len(sz): # the code is already valid, there's nothing to do # The last n-k bytes are parity ret = r[:-(n-k)] ecc = r[-(n-k):] if not nostrip: ret = self._list_lstrip(r[:-(n-k)], 0) if return_string and self.gf2_charac < 256: ret = self._list2str(ret) ecc = self._list2str(ecc) return ret, ecc # Erasures locator polynomial computation erasures_loc = None erasures_eval = None erasures_count = 0 if erasures_pos: erasures_count = len(erasures_pos) # Compute the erasure locator polynomial erasures_loc = self._find_erasures_locator(erasures_pos) # Compute the erasure evaluator polynomial erasures_eval = self._find_error_evaluator(sz, erasures_loc, k=k) if only_erasures: sigma = erasures_loc omega = erasures_eval else: # Find the error locator polynomial and error evaluator polynomial # using the Berlekamp-Massey algorithm # if erasures were supplied, BM will generate the errata (errors-and-erasures) locator and evaluator polynomials sigma, omega = self._berlekamp_massey(sz, k=k, erasures_loc=erasures_loc, erasures_eval=erasures_eval, erasures_count=erasures_count) omega = self._find_error_evaluator(sz, sigma, k=k) # we want to make sure that omega is correct (we know that sigma is always correct, but omega not really) # Now use Chien's procedure to find the error locations # j is an array of integers representing the positions of the errors, 0 # being the rightmost byte # X is a corresponding array of GF(2^8) values where X_i = alpha^(j_i) X, j = self._chien_search(sigma) # Sanity check: Cannot guarantee correct decoding of more than n-k errata (Singleton Bound, n-k being the minimum distance), and we cannot even check if it's correct (the syndrome will always be all 0 if we try to decode above the bound), thus it's better to just return the input as-is. if len(j) > n-k: ret = r[:-(n-k)] ecc = r[-(n-k):] if not nostrip: ret = self._list_lstrip(r[:-(n-k)], 0) if return_string and self.gf2_charac < 256: ret = self._list2str(ret) ecc = self._list2str(ecc) return ret, ecc # And finally, find the error magnitudes with Forney's Formula # Y is an array of GF(2^8) values corresponding to the error magnitude # at the position given by the j array Y = self._forney(omega, X) # Put the error and locations together to form the error polynomial # Note that an alternative would be to compute the error-spectrum polynomial E(x) which satisfies E(x)*Sigma(x) = 0 (mod x^n - 1) = Omega(x)(x^n - 1) -- see Blahut, Algebraic codes for data transmission Elist = [GF2int(0)] * self.gf2_charac if len(Y) >= len(j): # failsafe: if the number of erratas is higher than the number of coefficients in the magnitude polynomial, we failed! for i in _range(self.gf2_charac): # FIXME? is this really necessary to go to self.gf2_charac? len(rp) wouldn't be just enough? (since the goal is anyway to substract E to rp) if i in j: Elist[i] = Y[j.index(i)] E = Polynomial( Elist[::-1] ) # reverse the list because we used the coefficient degrees (j) instead of the error positions else: E = Polynomial() # And we get our real codeword! c = rp - E # Remember what we wrote above: R(x) = C(x) + E(x), so here to get back the original codeword C(x) = R(x) - E(x) ! (V(x) the erasures are here is included inside E(x)) if len(c) > len(r): c = rp # failsafe: in case the correction went totally wrong (we repaired padded null bytes instead of the message! thus we end up with a longer message than what we should have), then we just return the uncorrected message. Note: we compare the length of c with r on purpose, that's not an error: if we compare with rp, if the first few characters were erased (null bytes) in r, then in rp the Polynomial will automatically skip them, thus the length will always be smaller in that case. # Split the polynomial into two parts: the corrected message and the corrected ecc ret = c.coefficients[:-(n-k)] ecc = c.coefficients[-(n-k):] if nostrip: # Polynomial objects don't store leading 0 coefficients, so we # actually need to pad this to k bytes ret = self._list_rjust(ret, k, 0) if return_string and self.gf2_charac < 256: # automatically disable return_string if the field is above 255 (chr would fail, so it's up to the user to define the mapping) # Form it back into a string ret = self._list2str(ret) ecc = self._list2str(ecc) return ret, ecc
[ "def", "decode", "(", "self", ",", "r", ",", "nostrip", "=", "False", ",", "k", "=", "None", ",", "erasures_pos", "=", "None", ",", "only_erasures", "=", "False", ",", "return_string", "=", "True", ")", ":", "n", "=", "self", ".", "n", "if", "not", "k", ":", "k", "=", "self", ".", "k", "# If we were given a string, convert to a list (important to support fields above 2^8)", "if", "isinstance", "(", "r", ",", "_str", ")", ":", "r", "=", "[", "ord", "(", "x", ")", "for", "x", "in", "r", "]", "# Turn r into a polynomial", "rp", "=", "Polynomial", "(", "[", "GF2int", "(", "x", ")", "for", "x", "in", "r", "]", ")", "if", "erasures_pos", ":", "# Convert string positions to coefficients positions for the algebra to work (see _find_erasures_locator(), ecc characters represent the first coefficients while the message is put last, so it's exactly the reverse of the string positions where the message is first and the ecc is last, thus it's just like if you read the message+ecc string in reverse)", "erasures_pos", "=", "[", "len", "(", "r", ")", "-", "1", "-", "x", "for", "x", "in", "erasures_pos", "]", "# Set erasures characters to null bytes", "# Note that you can just leave the original characters as they are, you don't need to set erased characters to null bytes for the decoding to work, but note that it won't help either (ie, fake erasures, meaning characters that were detected as erasures but actually aren't, will still \"consume\" one ecc symbol, even if you don't set them to null byte, this is because the syndrome is limited to n-k and thus you can't decode above this bound without a clever trick).", "# Example string containing a fake erasure: \"hello sam\" -> \"ooooo sam\" with erasures_pos = [0, 1, 2, 3, 4]. Here in fact the last erasure is fake because the original character also was \"o\" so if we detect \"o\" as an erasure, we will end up with one fake erasure. But setting it to null byte or not, it will still use up one ecc symbol, it will always be counted as a real erasure. If you're below the n-k bound, then the doceding will be ok. If you're above, then you can't do anything, the decoding won't work. Maybe todo: try to find a clever list decoding algorithm to account for fake erasures....", "# Note: commented out so that the resulting omega (error evaluator polynomial) is the same as the erasure evaluator polynomial when decoding the same number of errors or erasures (ie, decoding 3 erasures only will give the same result as 3 errors only, with of course the errors/erasures on the same characters).", "#for erasure in erasures_pos:", "#rp[erasure] = GF2int(0)", "# Compute the syndromes:", "sz", "=", "self", ".", "_syndromes", "(", "rp", ",", "k", "=", "k", ")", "if", "sz", ".", "coefficients", ".", "count", "(", "GF2int", "(", "0", ")", ")", "==", "len", "(", "sz", ")", ":", "# the code is already valid, there's nothing to do", "# The last n-k bytes are parity", "ret", "=", "r", "[", ":", "-", "(", "n", "-", "k", ")", "]", "ecc", "=", "r", "[", "-", "(", "n", "-", "k", ")", ":", "]", "if", "not", "nostrip", ":", "ret", "=", "self", ".", "_list_lstrip", "(", "r", "[", ":", "-", "(", "n", "-", "k", ")", "]", ",", "0", ")", "if", "return_string", "and", "self", ".", "gf2_charac", "<", "256", ":", "ret", "=", "self", ".", "_list2str", "(", "ret", ")", "ecc", "=", "self", ".", "_list2str", "(", "ecc", ")", "return", "ret", ",", "ecc", "# Erasures locator polynomial computation", "erasures_loc", "=", "None", "erasures_eval", "=", "None", "erasures_count", "=", "0", "if", "erasures_pos", ":", "erasures_count", "=", "len", "(", "erasures_pos", ")", "# Compute the erasure locator polynomial", "erasures_loc", "=", "self", ".", "_find_erasures_locator", "(", "erasures_pos", ")", "# Compute the erasure evaluator polynomial", "erasures_eval", "=", "self", ".", "_find_error_evaluator", "(", "sz", ",", "erasures_loc", ",", "k", "=", "k", ")", "if", "only_erasures", ":", "sigma", "=", "erasures_loc", "omega", "=", "erasures_eval", "else", ":", "# Find the error locator polynomial and error evaluator polynomial", "# using the Berlekamp-Massey algorithm", "# if erasures were supplied, BM will generate the errata (errors-and-erasures) locator and evaluator polynomials", "sigma", ",", "omega", "=", "self", ".", "_berlekamp_massey", "(", "sz", ",", "k", "=", "k", ",", "erasures_loc", "=", "erasures_loc", ",", "erasures_eval", "=", "erasures_eval", ",", "erasures_count", "=", "erasures_count", ")", "omega", "=", "self", ".", "_find_error_evaluator", "(", "sz", ",", "sigma", ",", "k", "=", "k", ")", "# we want to make sure that omega is correct (we know that sigma is always correct, but omega not really)", "# Now use Chien's procedure to find the error locations", "# j is an array of integers representing the positions of the errors, 0", "# being the rightmost byte", "# X is a corresponding array of GF(2^8) values where X_i = alpha^(j_i)", "X", ",", "j", "=", "self", ".", "_chien_search", "(", "sigma", ")", "# Sanity check: Cannot guarantee correct decoding of more than n-k errata (Singleton Bound, n-k being the minimum distance), and we cannot even check if it's correct (the syndrome will always be all 0 if we try to decode above the bound), thus it's better to just return the input as-is.", "if", "len", "(", "j", ")", ">", "n", "-", "k", ":", "ret", "=", "r", "[", ":", "-", "(", "n", "-", "k", ")", "]", "ecc", "=", "r", "[", "-", "(", "n", "-", "k", ")", ":", "]", "if", "not", "nostrip", ":", "ret", "=", "self", ".", "_list_lstrip", "(", "r", "[", ":", "-", "(", "n", "-", "k", ")", "]", ",", "0", ")", "if", "return_string", "and", "self", ".", "gf2_charac", "<", "256", ":", "ret", "=", "self", ".", "_list2str", "(", "ret", ")", "ecc", "=", "self", ".", "_list2str", "(", "ecc", ")", "return", "ret", ",", "ecc", "# And finally, find the error magnitudes with Forney's Formula", "# Y is an array of GF(2^8) values corresponding to the error magnitude", "# at the position given by the j array", "Y", "=", "self", ".", "_forney", "(", "omega", ",", "X", ")", "# Put the error and locations together to form the error polynomial", "# Note that an alternative would be to compute the error-spectrum polynomial E(x) which satisfies E(x)*Sigma(x) = 0 (mod x^n - 1) = Omega(x)(x^n - 1) -- see Blahut, Algebraic codes for data transmission", "Elist", "=", "[", "GF2int", "(", "0", ")", "]", "*", "self", ".", "gf2_charac", "if", "len", "(", "Y", ")", ">=", "len", "(", "j", ")", ":", "# failsafe: if the number of erratas is higher than the number of coefficients in the magnitude polynomial, we failed!", "for", "i", "in", "_range", "(", "self", ".", "gf2_charac", ")", ":", "# FIXME? is this really necessary to go to self.gf2_charac? len(rp) wouldn't be just enough? (since the goal is anyway to substract E to rp)", "if", "i", "in", "j", ":", "Elist", "[", "i", "]", "=", "Y", "[", "j", ".", "index", "(", "i", ")", "]", "E", "=", "Polynomial", "(", "Elist", "[", ":", ":", "-", "1", "]", ")", "# reverse the list because we used the coefficient degrees (j) instead of the error positions", "else", ":", "E", "=", "Polynomial", "(", ")", "# And we get our real codeword!", "c", "=", "rp", "-", "E", "# Remember what we wrote above: R(x) = C(x) + E(x), so here to get back the original codeword C(x) = R(x) - E(x) ! (V(x) the erasures are here is included inside E(x))", "if", "len", "(", "c", ")", ">", "len", "(", "r", ")", ":", "c", "=", "rp", "# failsafe: in case the correction went totally wrong (we repaired padded null bytes instead of the message! thus we end up with a longer message than what we should have), then we just return the uncorrected message. Note: we compare the length of c with r on purpose, that's not an error: if we compare with rp, if the first few characters were erased (null bytes) in r, then in rp the Polynomial will automatically skip them, thus the length will always be smaller in that case.", "# Split the polynomial into two parts: the corrected message and the corrected ecc", "ret", "=", "c", ".", "coefficients", "[", ":", "-", "(", "n", "-", "k", ")", "]", "ecc", "=", "c", ".", "coefficients", "[", "-", "(", "n", "-", "k", ")", ":", "]", "if", "nostrip", ":", "# Polynomial objects don't store leading 0 coefficients, so we", "# actually need to pad this to k bytes", "ret", "=", "self", ".", "_list_rjust", "(", "ret", ",", "k", ",", "0", ")", "if", "return_string", "and", "self", ".", "gf2_charac", "<", "256", ":", "# automatically disable return_string if the field is above 255 (chr would fail, so it's up to the user to define the mapping)", "# Form it back into a string ", "ret", "=", "self", ".", "_list2str", "(", "ret", ")", "ecc", "=", "self", ".", "_list2str", "(", "ecc", ")", "return", "ret", ",", "ecc" ]
71.637097
51.604839
def _build_pub_key_auth(self, context, nonce, auth_token, public_key): """ [MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 3 https://msdn.microsoft.com/en-us/library/cc226791.aspx This step sends the final SPNEGO token to the server if required and computes the value for the pubKeyAuth field for the protocol version negotiated. The format of the pubKeyAuth field depends on the version that the server supports. For version 2 to 4: The pubKeyAuth field is just wrapped using the authenticated context For versions 5 to 6: The pubKeyAuth is a sha256 hash of the server's public key plus a nonce and a magic string value. This hash is wrapped using the authenticated context and the nonce is added to the TSRequest alongside the nonce used in the hash calcs. :param context: The authenticated context :param nonce: If versions 5+, the nonce to use in the hash :param auth_token: If NTLM, this is the last msg (authenticate msg) to send in the same request :param public_key: The server's public key :return: The TSRequest as a byte string to send to the server """ ts_request = TSRequest() if auth_token is not None: nego_token = NegoToken() nego_token['negoToken'] = auth_token ts_request['negoTokens'].append(nego_token) if nonce is not None: ts_request['clientNonce'] = nonce hash_input = b"CredSSP Client-To-Server Binding Hash\x00" + \ nonce + public_key pub_value = hashlib.sha256(hash_input).digest() else: pub_value = public_key enc_public_key = context.wrap(pub_value) ts_request['pubKeyAuth'] = enc_public_key return encoder.encode(ts_request)
[ "def", "_build_pub_key_auth", "(", "self", ",", "context", ",", "nonce", ",", "auth_token", ",", "public_key", ")", ":", "ts_request", "=", "TSRequest", "(", ")", "if", "auth_token", "is", "not", "None", ":", "nego_token", "=", "NegoToken", "(", ")", "nego_token", "[", "'negoToken'", "]", "=", "auth_token", "ts_request", "[", "'negoTokens'", "]", ".", "append", "(", "nego_token", ")", "if", "nonce", "is", "not", "None", ":", "ts_request", "[", "'clientNonce'", "]", "=", "nonce", "hash_input", "=", "b\"CredSSP Client-To-Server Binding Hash\\x00\"", "+", "nonce", "+", "public_key", "pub_value", "=", "hashlib", ".", "sha256", "(", "hash_input", ")", ".", "digest", "(", ")", "else", ":", "pub_value", "=", "public_key", "enc_public_key", "=", "context", ".", "wrap", "(", "pub_value", ")", "ts_request", "[", "'pubKeyAuth'", "]", "=", "enc_public_key", "return", "encoder", ".", "encode", "(", "ts_request", ")" ]
39.893617
22.276596
def addIndexOnAttribute(self, attributeName): ''' addIndexOnAttribute - Add an index for an arbitrary attribute. This will be used by the getElementsByAttr function. You should do this prior to parsing, or call reindex. Otherwise it will be blank. "name" and "id" will have no effect. @param attributeName <lowercase str> - An attribute name. Will be lowercased. ''' attributeName = attributeName.lower() self._otherAttributeIndexes[attributeName] = {} def _otherIndexFunction(self, tag): thisAttribute = tag.getAttribute(attributeName) if thisAttribute is not None: if thisAttribute not in self._otherAttributeIndexes[attributeName]: self._otherAttributeIndexes[attributeName][thisAttribute] = [] self._otherAttributeIndexes[attributeName][thisAttribute].append(tag) self.otherAttributeIndexFunctions[attributeName] = _otherIndexFunction
[ "def", "addIndexOnAttribute", "(", "self", ",", "attributeName", ")", ":", "attributeName", "=", "attributeName", ".", "lower", "(", ")", "self", ".", "_otherAttributeIndexes", "[", "attributeName", "]", "=", "{", "}", "def", "_otherIndexFunction", "(", "self", ",", "tag", ")", ":", "thisAttribute", "=", "tag", ".", "getAttribute", "(", "attributeName", ")", "if", "thisAttribute", "is", "not", "None", ":", "if", "thisAttribute", "not", "in", "self", ".", "_otherAttributeIndexes", "[", "attributeName", "]", ":", "self", ".", "_otherAttributeIndexes", "[", "attributeName", "]", "[", "thisAttribute", "]", "=", "[", "]", "self", ".", "_otherAttributeIndexes", "[", "attributeName", "]", "[", "thisAttribute", "]", ".", "append", "(", "tag", ")", "self", ".", "otherAttributeIndexFunctions", "[", "attributeName", "]", "=", "_otherIndexFunction" ]
52.421053
34.947368
def windows_from_blocksize(self, blocksize_xy=512): """Create rasterio.windows.Window instances with given size which fully cover the raster. Arguments: blocksize_xy {int or list of two int} -- Size of the window. If one integer is given it defines the width and height of the window. If a list of two integers if given the first defines the width and the second the height. Returns: None -- But the attributes ``windows``, ``windows_row`` and ``windows_col`` are updated. """ meta = self._get_template_for_given_resolution(self.dst_res, "meta") width = meta["width"] height = meta["height"] blocksize_wins = windows_from_blocksize(blocksize_xy, width, height) self.windows = np.array([win[1] for win in blocksize_wins]) self.windows_row = np.array([win[0][0] for win in blocksize_wins]) self.windows_col = np.array([win[0][1] for win in blocksize_wins]) return self
[ "def", "windows_from_blocksize", "(", "self", ",", "blocksize_xy", "=", "512", ")", ":", "meta", "=", "self", ".", "_get_template_for_given_resolution", "(", "self", ".", "dst_res", ",", "\"meta\"", ")", "width", "=", "meta", "[", "\"width\"", "]", "height", "=", "meta", "[", "\"height\"", "]", "blocksize_wins", "=", "windows_from_blocksize", "(", "blocksize_xy", ",", "width", ",", "height", ")", "self", ".", "windows", "=", "np", ".", "array", "(", "[", "win", "[", "1", "]", "for", "win", "in", "blocksize_wins", "]", ")", "self", ".", "windows_row", "=", "np", ".", "array", "(", "[", "win", "[", "0", "]", "[", "0", "]", "for", "win", "in", "blocksize_wins", "]", ")", "self", ".", "windows_col", "=", "np", ".", "array", "(", "[", "win", "[", "0", "]", "[", "1", "]", "for", "win", "in", "blocksize_wins", "]", ")", "return", "self" ]
50.1
29.4
def read_last_checkpoint(self): """Read the last checkpoint from the oplog progress dictionary. """ # In versions of mongo-connector 2.3 and before, # we used the repr of the # oplog collection as keys in the oplog_progress dictionary. # In versions thereafter, we use the replica set name. For backwards # compatibility, we check for both. oplog_str = str(self.oplog) ret_val = None with self.oplog_progress as oplog_prog: oplog_dict = oplog_prog.get_dict() try: # New format. ret_val = oplog_dict[self.replset_name] except KeyError: try: # Old format. ret_val = oplog_dict[oplog_str] except KeyError: pass LOG.debug("OplogThread: reading last checkpoint as %s " % str(ret_val)) self.checkpoint = ret_val return ret_val
[ "def", "read_last_checkpoint", "(", "self", ")", ":", "# In versions of mongo-connector 2.3 and before,", "# we used the repr of the", "# oplog collection as keys in the oplog_progress dictionary.", "# In versions thereafter, we use the replica set name. For backwards", "# compatibility, we check for both.", "oplog_str", "=", "str", "(", "self", ".", "oplog", ")", "ret_val", "=", "None", "with", "self", ".", "oplog_progress", "as", "oplog_prog", ":", "oplog_dict", "=", "oplog_prog", ".", "get_dict", "(", ")", "try", ":", "# New format.", "ret_val", "=", "oplog_dict", "[", "self", ".", "replset_name", "]", "except", "KeyError", ":", "try", ":", "# Old format.", "ret_val", "=", "oplog_dict", "[", "oplog_str", "]", "except", "KeyError", ":", "pass", "LOG", ".", "debug", "(", "\"OplogThread: reading last checkpoint as %s \"", "%", "str", "(", "ret_val", ")", ")", "self", ".", "checkpoint", "=", "ret_val", "return", "ret_val" ]
36.961538
15.5
def localDeformationVsBPS(dnaRef, bpRef, dnaProbe, bpProbe, parameter, err_type='std', bp_range=True, merge_bp=1, merge_method='mean', masked=False, tool='gmx analyze'): """To calculate deformation of local parameters in probe DNA with respect to a reference DNA as a function of the bp/s. .. note:: Deformation = Reference_DNA(parameter) - Probe_DNA(parameter) .. warning:: Number of segments/bp/bps should match between probe and reference DNA. .. warning:: To calculate errors by using ``error = 'acf'`` or ``error = 'block'``, GROMACS tool ``g_analyze`` or ``gmx analyze`` should be present in ``$PATH``. Parameters ---------- dnaRef : :class:`DNA` Reference DNA bpRef : 1D list array base-pairs or base-steps to consider from Reference DNA Example: :: bp = [6] # bp_range = False bp = [4,15] # bp_range = True bp = range(4,15) # bp_range = False bp = np.arange(4,15) # bp_range = False dnaProbe : :class:`DNA` probe DNA. Number of base-pairs in Reference and probe DNA **should be** same. bpProbe : 1D list or array base-pairs or base-steps to consider from Reference DNA. Foe more, see above example of ``bpSubj``. parameter : str Name of a base-pair or base-step or helical base-step parameter For details about accepted keywords, see ``parameter`` in the method :meth:`DNA.get_parameters`. error_type : str Method of error estimation. Currently accepted method as follows: * ``error = 'std'`` : Standard Deviation * ``error = 'acf'`` : Standard error using autocorrelation time (requires: ``g_analyze`` or ``gmx analyze``) * ``error = 'block'`` : Standard error using block averaging method (requires: ``g_analyze`` or ``gmx analyze``) bp_range : bool ``Default=True``: As shown above, if ``True``, bp is taken as a range otherwise list or numpy array. merge_bp : int Number of base-pairs or steps to merge for creating the small DNA segments merge_method : str Method to calculate the parameter of a DNA segment from local parameters of all base-pairs/steps that are between the range given through ``bp``. Currently accepted keywords are as follows: * ``merge_method = mean``: Average of local parameters * ``merge_method = sum``: Sum of local parameters masked : bool ``Default=False``. To skip specific frames/snapshots. ``DNA.mask`` array should be set to use this functionality. This array contains boolean (either ``True`` or ``False``) value for each frame to mask the frames. Presently, mask array is automatically generated during :meth:`DNA.generate_smooth_axis` to skip those frames where 3D fitting curve was not successful within the given criteria tool : str Gromacs tool ``g_analyze`` or ``gmx analyze`` or ``gmx_mpi analyze`` etc. It will be used to calculate autocorrelation time or block averaging error. It should be present in ``$PATH`` Returns ------- bpRef : 1D array) base-pair/step numbers of reference DNA. If ``merge_bp>1``, middle number will is returned.` bpProbe : 1D array base-pair/step numbers of probe DNA. If ``merge_bp>1``, middle number will is returned.` deviation : 1D array Deviation in the parameter of probe DNA with respect to reference DNA. error : 1D array Standard error of respective deviation """ bpRef, RefAvgValue, RefError = dnaRef.get_mean_error(bpRef, parameter, err_type=err_type, bp_range=bp_range, merge_bp=merge_bp, merge_method=merge_method, tool=tool, masked=masked) bpProbe, ProbeAvgValue, ProbeError = dnaProbe.get_mean_error(bpProbe, parameter, err_type=err_type, bp_range=bp_range, merge_bp=merge_bp, merge_method=merge_method, tool=tool, masked=masked) if len(bpRef) != len(bpProbe): raise ValueError( "Number (%d) of bp/bps/segments in reference DNA does not match with the number (%d) of probe DNA." % (len(bpRef), len(bpProbe))) deviation = RefAvgValue - ProbeAvgValue error = np.sqrt((RefError**2) + (ProbeError**2)) return bpRef, bpProbe, deviation, error
[ "def", "localDeformationVsBPS", "(", "dnaRef", ",", "bpRef", ",", "dnaProbe", ",", "bpProbe", ",", "parameter", ",", "err_type", "=", "'std'", ",", "bp_range", "=", "True", ",", "merge_bp", "=", "1", ",", "merge_method", "=", "'mean'", ",", "masked", "=", "False", ",", "tool", "=", "'gmx analyze'", ")", ":", "bpRef", ",", "RefAvgValue", ",", "RefError", "=", "dnaRef", ".", "get_mean_error", "(", "bpRef", ",", "parameter", ",", "err_type", "=", "err_type", ",", "bp_range", "=", "bp_range", ",", "merge_bp", "=", "merge_bp", ",", "merge_method", "=", "merge_method", ",", "tool", "=", "tool", ",", "masked", "=", "masked", ")", "bpProbe", ",", "ProbeAvgValue", ",", "ProbeError", "=", "dnaProbe", ".", "get_mean_error", "(", "bpProbe", ",", "parameter", ",", "err_type", "=", "err_type", ",", "bp_range", "=", "bp_range", ",", "merge_bp", "=", "merge_bp", ",", "merge_method", "=", "merge_method", ",", "tool", "=", "tool", ",", "masked", "=", "masked", ")", "if", "len", "(", "bpRef", ")", "!=", "len", "(", "bpProbe", ")", ":", "raise", "ValueError", "(", "\"Number (%d) of bp/bps/segments in reference DNA does not match with the number (%d) of probe DNA.\"", "%", "(", "len", "(", "bpRef", ")", ",", "len", "(", "bpProbe", ")", ")", ")", "deviation", "=", "RefAvgValue", "-", "ProbeAvgValue", "error", "=", "np", ".", "sqrt", "(", "(", "RefError", "**", "2", ")", "+", "(", "ProbeError", "**", "2", ")", ")", "return", "bpRef", ",", "bpProbe", ",", "deviation", ",", "error" ]
44.961165
33.563107