text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass root = os.path.realpath(__file__) root_dir = os.path.dirname(root) if os.path.isfile(os.path.join(root_dir, 'VERSION')): with open(os.path.join(root_dir, 'VERSION')) as vfile: version = vfile.readline().strip() return { "version": version, "full-revisionid": None, "dirty": None, "error": None, "date": None } try: # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None}
[ "def", "get_versions", "(", ")", ":", "# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have", "# __file__, we can work backwards from there to the root. Some", "# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which", "# case we can only use expanded keywords.", "cfg", "=", "get_config", "(", ")", "verbose", "=", "cfg", ".", "verbose", "try", ":", "return", "git_versions_from_keywords", "(", "get_keywords", "(", ")", ",", "cfg", ".", "tag_prefix", ",", "verbose", ")", "except", "NotThisMethod", ":", "pass", "root", "=", "os", ".", "path", ".", "realpath", "(", "__file__", ")", "root_dir", "=", "os", ".", "path", ".", "dirname", "(", "root", ")", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "'VERSION'", ")", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "'VERSION'", ")", ")", "as", "vfile", ":", "version", "=", "vfile", ".", "readline", "(", ")", ".", "strip", "(", ")", "return", "{", "\"version\"", ":", "version", ",", "\"full-revisionid\"", ":", "None", ",", "\"dirty\"", ":", "None", ",", "\"error\"", ":", "None", ",", "\"date\"", ":", "None", "}", "try", ":", "# versionfile_source is the relative path from the top of the source", "# tree (where the .git directory might live) to this file. Invert", "# this to find the root from __file__.", "for", "i", "in", "cfg", ".", "versionfile_source", ".", "split", "(", "'/'", ")", ":", "root", "=", "os", ".", "path", ".", "dirname", "(", "root", ")", "except", "NameError", ":", "return", "{", "\"version\"", ":", "\"0+unknown\"", ",", "\"full-revisionid\"", ":", "None", ",", "\"dirty\"", ":", "None", ",", "\"error\"", ":", "\"unable to find root of source tree\"", ",", "\"date\"", ":", "None", "}", "try", ":", "pieces", "=", "git_pieces_from_vcs", "(", "cfg", ".", "tag_prefix", ",", "root", ",", "verbose", ")", "return", "render", "(", "pieces", ",", "cfg", ".", "style", ")", "except", "NotThisMethod", ":", "pass", "try", ":", "if", "cfg", ".", "parentdir_prefix", ":", "return", "versions_from_parentdir", "(", "cfg", ".", "parentdir_prefix", ",", "root", ",", "verbose", ")", "except", "NotThisMethod", ":", "pass", "return", "{", "\"version\"", ":", "\"0+unknown\"", ",", "\"full-revisionid\"", ":", "None", ",", "\"dirty\"", ":", "None", ",", "\"error\"", ":", "\"unable to compute version\"", ",", "\"date\"", ":", "None", "}" ]
33.327586
22.172414
def require_auth(view_func): """Performs user authentication check. Similar to Django's `login_required` decorator, except that this throws :exc:`~horizon.exceptions.NotAuthenticated` exception if the user is not signed-in. """ from horizon.exceptions import NotAuthenticated @functools.wraps(view_func, assigned=available_attrs(view_func)) def dec(request, *args, **kwargs): if request.user.is_authenticated: return view_func(request, *args, **kwargs) raise NotAuthenticated(_("Please log in to continue.")) return dec
[ "def", "require_auth", "(", "view_func", ")", ":", "from", "horizon", ".", "exceptions", "import", "NotAuthenticated", "@", "functools", ".", "wraps", "(", "view_func", ",", "assigned", "=", "available_attrs", "(", "view_func", ")", ")", "def", "dec", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "request", ".", "user", ".", "is_authenticated", ":", "return", "view_func", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "raise", "NotAuthenticated", "(", "_", "(", "\"Please log in to continue.\"", ")", ")", "return", "dec" ]
38.066667
19.6
def wrsamp(self, expanded=False, write_dir=''): """ Write a wfdb header file and any associated dat files from this object. Parameters ---------- expanded : bool, optional Whether to write the expanded signal (e_d_signal) instead of the uniform signal (d_signal). write_dir : str, optional The directory in which to write the files. """ # Perform field validity and cohesion checks, and write the # header file. self.wrheader(write_dir=write_dir) if self.n_sig > 0: # Perform signal validity and cohesion checks, and write the # associated dat files. self.wr_dats(expanded=expanded, write_dir=write_dir)
[ "def", "wrsamp", "(", "self", ",", "expanded", "=", "False", ",", "write_dir", "=", "''", ")", ":", "# Perform field validity and cohesion checks, and write the", "# header file.", "self", ".", "wrheader", "(", "write_dir", "=", "write_dir", ")", "if", "self", ".", "n_sig", ">", "0", ":", "# Perform signal validity and cohesion checks, and write the", "# associated dat files.", "self", ".", "wr_dats", "(", "expanded", "=", "expanded", ",", "write_dir", "=", "write_dir", ")" ]
35.857143
17.666667
def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Note that if truncate is true, any truncated points will not be restored exactly. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ X = check_array(X, copy=self.copy) X -= self.min_ X /= self.scale_ return X
[ "def", "inverse_transform", "(", "self", ",", "X", ")", ":", "X", "=", "check_array", "(", "X", ",", "copy", "=", "self", ".", "copy", ")", "X", "-=", "self", ".", "min_", "X", "/=", "self", ".", "scale_", "return", "X" ]
29.533333
17.2
def fpopen(*args, **kwargs): ''' Shortcut for fopen with extra uid, gid, and mode options. Supported optional Keyword Arguments: mode Explicit mode to set. Mode is anything os.chmod would accept as input for mode. Works only on unix/unix-like systems. uid The uid to set, if not set, or it is None or -1 no changes are made. Same applies if the path is already owned by this uid. Must be int. Works only on unix/unix-like systems. gid The gid to set, if not set, or it is None or -1 no changes are made. Same applies if the path is already owned by this gid. Must be int. Works only on unix/unix-like systems. ''' # Remove uid, gid and mode from kwargs if present uid = kwargs.pop('uid', -1) # -1 means no change to current uid gid = kwargs.pop('gid', -1) # -1 means no change to current gid mode = kwargs.pop('mode', None) with fopen(*args, **kwargs) as f_handle: path = args[0] d_stat = os.stat(path) if hasattr(os, 'chown'): # if uid and gid are both -1 then go ahead with # no changes at all if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \ [i for i in (uid, gid) if i != -1]: os.chown(path, uid, gid) if mode is not None: mode_part = stat.S_IMODE(d_stat.st_mode) if mode_part != mode: os.chmod(path, (d_stat.st_mode ^ mode_part) | mode) yield f_handle
[ "def", "fpopen", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Remove uid, gid and mode from kwargs if present", "uid", "=", "kwargs", ".", "pop", "(", "'uid'", ",", "-", "1", ")", "# -1 means no change to current uid", "gid", "=", "kwargs", ".", "pop", "(", "'gid'", ",", "-", "1", ")", "# -1 means no change to current gid", "mode", "=", "kwargs", ".", "pop", "(", "'mode'", ",", "None", ")", "with", "fopen", "(", "*", "args", ",", "*", "*", "kwargs", ")", "as", "f_handle", ":", "path", "=", "args", "[", "0", "]", "d_stat", "=", "os", ".", "stat", "(", "path", ")", "if", "hasattr", "(", "os", ",", "'chown'", ")", ":", "# if uid and gid are both -1 then go ahead with", "# no changes at all", "if", "(", "d_stat", ".", "st_uid", "!=", "uid", "or", "d_stat", ".", "st_gid", "!=", "gid", ")", "and", "[", "i", "for", "i", "in", "(", "uid", ",", "gid", ")", "if", "i", "!=", "-", "1", "]", ":", "os", ".", "chown", "(", "path", ",", "uid", ",", "gid", ")", "if", "mode", "is", "not", "None", ":", "mode_part", "=", "stat", ".", "S_IMODE", "(", "d_stat", ".", "st_mode", ")", "if", "mode_part", "!=", "mode", ":", "os", ".", "chmod", "(", "path", ",", "(", "d_stat", ".", "st_mode", "^", "mode_part", ")", "|", "mode", ")", "yield", "f_handle" ]
35.619048
23.380952
def next(self): """ This method is deprecated, a holdover from when queries were iterators, rather than iterables. @return: one element of massaged data. """ if self._selfiter is None: warnings.warn( "Calling 'next' directly on a query is deprecated. " "Perhaps you want to use iter(query).next(), or something " "more expressive like store.findFirst or store.findOrCreate?", DeprecationWarning, stacklevel=2) self._selfiter = self.__iter__() return self._selfiter.next()
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "_selfiter", "is", "None", ":", "warnings", ".", "warn", "(", "\"Calling 'next' directly on a query is deprecated. \"", "\"Perhaps you want to use iter(query).next(), or something \"", "\"more expressive like store.findFirst or store.findOrCreate?\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "self", ".", "_selfiter", "=", "self", ".", "__iter__", "(", ")", "return", "self", ".", "_selfiter", ".", "next", "(", ")" ]
40.133333
17.2
def to_json(self, depth=-1, **kwargs): """Returns a JSON representation of the object.""" return json.dumps(self.to_dict(depth=depth, ordered=True), **kwargs)
[ "def", "to_json", "(", "self", ",", "depth", "=", "-", "1", ",", "*", "*", "kwargs", ")", ":", "return", "json", ".", "dumps", "(", "self", ".", "to_dict", "(", "depth", "=", "depth", ",", "ordered", "=", "True", ")", ",", "*", "*", "kwargs", ")" ]
57.333333
12.666667
def __snake_case(self, descriptor): """ Utility method to convert camelcase to snake :param descriptor: The dictionary to convert """ newdict = {} for i, (k, v) in enumerate(descriptor.items()): newkey = "" for j, c in enumerate(k): if c.isupper(): if len(newkey) != 0: newkey += '_' newkey += c.lower() else: newkey += c newdict[newkey] = v return newdict
[ "def", "__snake_case", "(", "self", ",", "descriptor", ")", ":", "newdict", "=", "{", "}", "for", "i", ",", "(", "k", ",", "v", ")", "in", "enumerate", "(", "descriptor", ".", "items", "(", ")", ")", ":", "newkey", "=", "\"\"", "for", "j", ",", "c", "in", "enumerate", "(", "k", ")", ":", "if", "c", ".", "isupper", "(", ")", ":", "if", "len", "(", "newkey", ")", "!=", "0", ":", "newkey", "+=", "'_'", "newkey", "+=", "c", ".", "lower", "(", ")", "else", ":", "newkey", "+=", "c", "newdict", "[", "newkey", "]", "=", "v", "return", "newdict" ]
30.444444
10.666667
def snapshots_delete(container, name, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Delete a snapshot for a container container : The name of the container to get. name : The name of the snapshot. remote_addr : An URL to a remote server. The 'cert' and 'key' fields must also be provided if 'remote_addr' is defined. Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Verify the ssl certificate. Default: True CLI Examples: .. code-block:: bash $ salt '*' lxd.snapshots_delete test-container test-snapshot ''' cont = container_get( container, remote_addr, cert, key, verify_cert, _raw=True ) try: for s in cont.snapshots.all(): if s.name == name: s.delete() return True except pylxd.exceptions.LXDAPIException: pass return False
[ "def", "snapshots_delete", "(", "container", ",", "name", ",", "remote_addr", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "verify_cert", "=", "True", ")", ":", "cont", "=", "container_get", "(", "container", ",", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ",", "_raw", "=", "True", ")", "try", ":", "for", "s", "in", "cont", ".", "snapshots", ".", "all", "(", ")", ":", "if", "s", ".", "name", "==", "name", ":", "s", ".", "delete", "(", ")", "return", "True", "except", "pylxd", ".", "exceptions", ".", "LXDAPIException", ":", "pass", "return", "False" ]
21.886792
23.509434
def configure(filename=None): """This function gives to the user application a chance to define where configuration file should live. Subsequent calls to this function will have no effect, unless you call :func:`reconfigure`. :param str filename: Full path to configuration file. """ global retry if getattr(configure, '_configured', False): return filename = filename or DEFAULT_CONFIG_FILENAME _ensure_directory(filename) parser = SafeConfigParser() if os.path.isfile(filename): with open(filename, 'r') as fp: parser.readfp(fp) if not parser.has_section(RETRY_SECTION): parser.add_section(RETRY_SECTION) parser.set(RETRY_SECTION, 'max_tries', str(constants.BACKOFF_DEFAULT_MAXTRIES)) parser.set(RETRY_SECTION, 'delay', str(constants.BACKOFF_DEFAULT_DELAY)) parser.set(RETRY_SECTION, 'factor', str(constants.BACKOFF_DEFAULT_FACTOR)) with open(filename, 'wb') as fp: parser.write(fp) retry = RetrySettings( max_tries=parser.getint(RETRY_SECTION, 'max_tries'), delay=parser.getint(RETRY_SECTION, 'delay'), factor=parser.getint(RETRY_SECTION, 'factor')) setattr(configure, '_configured', True) setattr(configure, '_configured_filename', filename)
[ "def", "configure", "(", "filename", "=", "None", ")", ":", "global", "retry", "if", "getattr", "(", "configure", ",", "'_configured'", ",", "False", ")", ":", "return", "filename", "=", "filename", "or", "DEFAULT_CONFIG_FILENAME", "_ensure_directory", "(", "filename", ")", "parser", "=", "SafeConfigParser", "(", ")", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "fp", ":", "parser", ".", "readfp", "(", "fp", ")", "if", "not", "parser", ".", "has_section", "(", "RETRY_SECTION", ")", ":", "parser", ".", "add_section", "(", "RETRY_SECTION", ")", "parser", ".", "set", "(", "RETRY_SECTION", ",", "'max_tries'", ",", "str", "(", "constants", ".", "BACKOFF_DEFAULT_MAXTRIES", ")", ")", "parser", ".", "set", "(", "RETRY_SECTION", ",", "'delay'", ",", "str", "(", "constants", ".", "BACKOFF_DEFAULT_DELAY", ")", ")", "parser", ".", "set", "(", "RETRY_SECTION", ",", "'factor'", ",", "str", "(", "constants", ".", "BACKOFF_DEFAULT_FACTOR", ")", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "fp", ":", "parser", ".", "write", "(", "fp", ")", "retry", "=", "RetrySettings", "(", "max_tries", "=", "parser", ".", "getint", "(", "RETRY_SECTION", ",", "'max_tries'", ")", ",", "delay", "=", "parser", ".", "getint", "(", "RETRY_SECTION", ",", "'delay'", ")", ",", "factor", "=", "parser", ".", "getint", "(", "RETRY_SECTION", ",", "'factor'", ")", ")", "setattr", "(", "configure", ",", "'_configured'", ",", "True", ")", "setattr", "(", "configure", ",", "'_configured_filename'", ",", "filename", ")" ]
34.052632
21.631579
def wait_until_clickable(self, timeout=None): """Search element and wait until it is clickable :param timeout: max time to wait :returns: page element instance """ try: self.utils.wait_until_element_clickable(self, timeout) except TimeoutException as exception: parent_msg = " and parent locator '{}'".format(self.parent) if self.parent else '' msg = "Page element of type '%s' with locator %s%s not found or is not clickable after %s seconds" timeout = timeout if timeout else self.utils.get_explicitly_wait() self.logger.error(msg, type(self).__name__, self.locator, parent_msg, timeout) exception.msg += "\n {}".format(msg % (type(self).__name__, self.locator, parent_msg, timeout)) raise exception return self
[ "def", "wait_until_clickable", "(", "self", ",", "timeout", "=", "None", ")", ":", "try", ":", "self", ".", "utils", ".", "wait_until_element_clickable", "(", "self", ",", "timeout", ")", "except", "TimeoutException", "as", "exception", ":", "parent_msg", "=", "\" and parent locator '{}'\"", ".", "format", "(", "self", ".", "parent", ")", "if", "self", ".", "parent", "else", "''", "msg", "=", "\"Page element of type '%s' with locator %s%s not found or is not clickable after %s seconds\"", "timeout", "=", "timeout", "if", "timeout", "else", "self", ".", "utils", ".", "get_explicitly_wait", "(", ")", "self", ".", "logger", ".", "error", "(", "msg", ",", "type", "(", "self", ")", ".", "__name__", ",", "self", ".", "locator", ",", "parent_msg", ",", "timeout", ")", "exception", ".", "msg", "+=", "\"\\n {}\"", ".", "format", "(", "msg", "%", "(", "type", "(", "self", ")", ".", "__name__", ",", "self", ".", "locator", ",", "parent_msg", ",", "timeout", ")", ")", "raise", "exception", "return", "self" ]
52.5
26.1875
def delete_user(self, user_descriptor): """DeleteUser. [Preview API] Disables a user. :param str user_descriptor: The descriptor of the user to delete. """ route_values = {} if user_descriptor is not None: route_values['userDescriptor'] = self._serialize.url('user_descriptor', user_descriptor, 'str') self._send(http_method='DELETE', location_id='005e26ec-6b77-4e4f-a986-b3827bf241f5', version='5.1-preview.1', route_values=route_values)
[ "def", "delete_user", "(", "self", ",", "user_descriptor", ")", ":", "route_values", "=", "{", "}", "if", "user_descriptor", "is", "not", "None", ":", "route_values", "[", "'userDescriptor'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'user_descriptor'", ",", "user_descriptor", ",", "'str'", ")", "self", ".", "_send", "(", "http_method", "=", "'DELETE'", ",", "location_id", "=", "'005e26ec-6b77-4e4f-a986-b3827bf241f5'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ")" ]
46
13.083333
def get_saved_policy(table='filter', chain=None, conf_file=None, family='ipv4'): ''' Return the current policy for the specified table/chain CLI Examples: .. code-block:: bash salt '*' iptables.get_saved_policy filter INPUT salt '*' iptables.get_saved_policy filter INPUT \\ conf_file=/etc/iptables.saved IPv6: salt '*' iptables.get_saved_policy filter INPUT family=ipv6 salt '*' iptables.get_saved_policy filter INPUT \\ conf_file=/etc/iptables.saved family=ipv6 ''' if not chain: return 'Error: Chain needs to be specified' rules = _parse_conf(conf_file, family=family) try: return rules[table][chain]['policy'] except KeyError: return None
[ "def", "get_saved_policy", "(", "table", "=", "'filter'", ",", "chain", "=", "None", ",", "conf_file", "=", "None", ",", "family", "=", "'ipv4'", ")", ":", "if", "not", "chain", ":", "return", "'Error: Chain needs to be specified'", "rules", "=", "_parse_conf", "(", "conf_file", ",", "family", "=", "family", ")", "try", ":", "return", "rules", "[", "table", "]", "[", "chain", "]", "[", "'policy'", "]", "except", "KeyError", ":", "return", "None" ]
28.730769
24.730769
def show_address_scope(self, address_scope, **_params): """Fetches information of a certain address scope.""" return self.get(self.address_scope_path % (address_scope), params=_params)
[ "def", "show_address_scope", "(", "self", ",", "address_scope", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "get", "(", "self", ".", "address_scope_path", "%", "(", "address_scope", ")", ",", "params", "=", "_params", ")" ]
55.25
10.5
def dist_abs(self, src, tar, gap_cost=1, sim_func=sim_ident): """Return the Smith-Waterman score of two strings. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison gap_cost : float The cost of an alignment gap (1 by default) sim_func : function A function that returns the similarity of two characters (identity similarity by default) Returns ------- float Smith-Waterman score Examples -------- >>> cmp = SmithWaterman() >>> cmp.dist_abs('cat', 'hat') 2.0 >>> cmp.dist_abs('Niall', 'Neil') 1.0 >>> cmp.dist_abs('aluminum', 'Catalan') 0.0 >>> cmp.dist_abs('ATCG', 'TAGC') 1.0 """ d_mat = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_float32) for i in range(len(src) + 1): d_mat[i, 0] = 0 for j in range(len(tar) + 1): d_mat[0, j] = 0 for i in range(1, len(src) + 1): for j in range(1, len(tar) + 1): match = d_mat[i - 1, j - 1] + sim_func(src[i - 1], tar[j - 1]) delete = d_mat[i - 1, j] - gap_cost insert = d_mat[i, j - 1] - gap_cost d_mat[i, j] = max(0, match, delete, insert) return d_mat[d_mat.shape[0] - 1, d_mat.shape[1] - 1]
[ "def", "dist_abs", "(", "self", ",", "src", ",", "tar", ",", "gap_cost", "=", "1", ",", "sim_func", "=", "sim_ident", ")", ":", "d_mat", "=", "np_zeros", "(", "(", "len", "(", "src", ")", "+", "1", ",", "len", "(", "tar", ")", "+", "1", ")", ",", "dtype", "=", "np_float32", ")", "for", "i", "in", "range", "(", "len", "(", "src", ")", "+", "1", ")", ":", "d_mat", "[", "i", ",", "0", "]", "=", "0", "for", "j", "in", "range", "(", "len", "(", "tar", ")", "+", "1", ")", ":", "d_mat", "[", "0", ",", "j", "]", "=", "0", "for", "i", "in", "range", "(", "1", ",", "len", "(", "src", ")", "+", "1", ")", ":", "for", "j", "in", "range", "(", "1", ",", "len", "(", "tar", ")", "+", "1", ")", ":", "match", "=", "d_mat", "[", "i", "-", "1", ",", "j", "-", "1", "]", "+", "sim_func", "(", "src", "[", "i", "-", "1", "]", ",", "tar", "[", "j", "-", "1", "]", ")", "delete", "=", "d_mat", "[", "i", "-", "1", ",", "j", "]", "-", "gap_cost", "insert", "=", "d_mat", "[", "i", ",", "j", "-", "1", "]", "-", "gap_cost", "d_mat", "[", "i", ",", "j", "]", "=", "max", "(", "0", ",", "match", ",", "delete", ",", "insert", ")", "return", "d_mat", "[", "d_mat", ".", "shape", "[", "0", "]", "-", "1", ",", "d_mat", ".", "shape", "[", "1", "]", "-", "1", "]" ]
31.108696
18.086957
def setLocalityGroups(self, login, tableName, groups): """ Parameters: - login - tableName - groups """ self.send_setLocalityGroups(login, tableName, groups) self.recv_setLocalityGroups()
[ "def", "setLocalityGroups", "(", "self", ",", "login", ",", "tableName", ",", "groups", ")", ":", "self", ".", "send_setLocalityGroups", "(", "login", ",", "tableName", ",", "groups", ")", "self", ".", "recv_setLocalityGroups", "(", ")" ]
23.777778
15.777778
def cbpdn_ystep(k): """Do the Y step of the cbpdn stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables. """ AXU = mp_Z_X[k] + mp_Z_U[k] mp_Z_Y[k] = sp.prox_l1(AXU, (mp_lmbda/mp_xrho))
[ "def", "cbpdn_ystep", "(", "k", ")", ":", "AXU", "=", "mp_Z_X", "[", "k", "]", "+", "mp_Z_U", "[", "k", "]", "mp_Z_Y", "[", "k", "]", "=", "sp", ".", "prox_l1", "(", "AXU", ",", "(", "mp_lmbda", "/", "mp_xrho", ")", ")" ]
35.625
15
def rpc_get_names(self, filename, source, offset): """Return the list of possible names""" names = jedi.api.names(source=source, path=filename, encoding='utf-8', all_scopes=True, definitions=True, references=True) result = [] for name in names: if name.module_path == filename: offset = linecol_to_pos(source, name.line, name.column) elif name.module_path is not None: with open(name.module_path) as f: text = f.read() offset = linecol_to_pos(text, name.line, name.column) result.append({"name": name.name, "filename": name.module_path, "offset": offset}) return result
[ "def", "rpc_get_names", "(", "self", ",", "filename", ",", "source", ",", "offset", ")", ":", "names", "=", "jedi", ".", "api", ".", "names", "(", "source", "=", "source", ",", "path", "=", "filename", ",", "encoding", "=", "'utf-8'", ",", "all_scopes", "=", "True", ",", "definitions", "=", "True", ",", "references", "=", "True", ")", "result", "=", "[", "]", "for", "name", "in", "names", ":", "if", "name", ".", "module_path", "==", "filename", ":", "offset", "=", "linecol_to_pos", "(", "source", ",", "name", ".", "line", ",", "name", ".", "column", ")", "elif", "name", ".", "module_path", "is", "not", "None", ":", "with", "open", "(", "name", ".", "module_path", ")", "as", "f", ":", "text", "=", "f", ".", "read", "(", ")", "offset", "=", "linecol_to_pos", "(", "text", ",", "name", ".", "line", ",", "name", ".", "column", ")", "result", ".", "append", "(", "{", "\"name\"", ":", "name", ".", "name", ",", "\"filename\"", ":", "name", ".", "module_path", ",", "\"offset\"", ":", "offset", "}", ")", "return", "result" ]
43.65
13.2
def update_path(self, path): """ There are EXTENDED messages which don't include any routers at all, and any of the EXTENDED messages may have some arbitrary flags in them. So far, they're all upper-case and none start with $ luckily. The routers in the path should all be LongName-style router names (this depends on them starting with $). For further complication, it's possible to extend a circuit to a router which isn't in the consensus. nickm via #tor thought this might happen in the case of hidden services choosing a rendevouz point not in the current consensus. """ oldpath = self.path self.path = [] for p in path: if p[0] != '$': break # this will create a Router if we give it a router # LongName that doesn't yet exist router = self.router_container.router_from_id(p) self.path.append(router) # if the path grew, notify listeners if len(self.path) > len(oldpath): for x in self.listeners: x.circuit_extend(self, router) oldpath = self.path
[ "def", "update_path", "(", "self", ",", "path", ")", ":", "oldpath", "=", "self", ".", "path", "self", ".", "path", "=", "[", "]", "for", "p", "in", "path", ":", "if", "p", "[", "0", "]", "!=", "'$'", ":", "break", "# this will create a Router if we give it a router", "# LongName that doesn't yet exist", "router", "=", "self", ".", "router_container", ".", "router_from_id", "(", "p", ")", "self", ".", "path", ".", "append", "(", "router", ")", "# if the path grew, notify listeners", "if", "len", "(", "self", ".", "path", ")", ">", "len", "(", "oldpath", ")", ":", "for", "x", "in", "self", ".", "listeners", ":", "x", ".", "circuit_extend", "(", "self", ",", "router", ")", "oldpath", "=", "self", ".", "path" ]
38.677419
19
def sign_transaction(self, tx_data, unspents=None): # pragma: no cover """Creates a signed P2PKH transaction using previously prepared transaction data. :param tx_data: Hex-encoded transaction or output of :func:`~bit.Key.prepare_transaction`. :type tx_data: ``str`` :param unspents: The UTXOs to use as the inputs. By default Bit will communicate with the blockchain itself. :type unspents: ``list`` of :class:`~bit.network.meta.Unspent` :returns: The signed transaction as hex. :rtype: ``str`` """ try: # Json-tx-data from :func:`~bit.Key.prepare_transaction` data = json.loads(tx_data) assert(unspents is None) unspents = [Unspent.from_dict(unspent) for unspent in data['unspents']] outputs = data['outputs'] return create_new_transaction(self, unspents, outputs) except: # May be hex-encoded transaction using batching: try: unspents = unspents or self.get_unspents() except ConnectionError: raise ConnectionError( 'All APIs are unreachable. Please provide the unspent ' 'inputs as unspents directly to sign this transaction.') tx_data = deserialize(tx_data) return sign_tx(self, tx_data, unspents=unspents)
[ "def", "sign_transaction", "(", "self", ",", "tx_data", ",", "unspents", "=", "None", ")", ":", "# pragma: no cover", "try", ":", "# Json-tx-data from :func:`~bit.Key.prepare_transaction`", "data", "=", "json", ".", "loads", "(", "tx_data", ")", "assert", "(", "unspents", "is", "None", ")", "unspents", "=", "[", "Unspent", ".", "from_dict", "(", "unspent", ")", "for", "unspent", "in", "data", "[", "'unspents'", "]", "]", "outputs", "=", "data", "[", "'outputs'", "]", "return", "create_new_transaction", "(", "self", ",", "unspents", ",", "outputs", ")", "except", ":", "# May be hex-encoded transaction using batching:", "try", ":", "unspents", "=", "unspents", "or", "self", ".", "get_unspents", "(", ")", "except", "ConnectionError", ":", "raise", "ConnectionError", "(", "'All APIs are unreachable. Please provide the unspent '", "'inputs as unspents directly to sign this transaction.'", ")", "tx_data", "=", "deserialize", "(", "tx_data", ")", "return", "sign_tx", "(", "self", ",", "tx_data", ",", "unspents", "=", "unspents", ")" ]
46.066667
22.133333
def get_barycenter(self): """Return the mass weighted average location. Args: None Returns: :class:`numpy.ndarray`: """ try: mass = self['mass'].values except KeyError: mass = self.add_data('mass')['mass'].values pos = self.loc[:, ['x', 'y', 'z']].values return (pos * mass[:, None]).sum(axis=0) / self.get_total_mass()
[ "def", "get_barycenter", "(", "self", ")", ":", "try", ":", "mass", "=", "self", "[", "'mass'", "]", ".", "values", "except", "KeyError", ":", "mass", "=", "self", ".", "add_data", "(", "'mass'", ")", "[", "'mass'", "]", ".", "values", "pos", "=", "self", ".", "loc", "[", ":", ",", "[", "'x'", ",", "'y'", ",", "'z'", "]", "]", ".", "values", "return", "(", "pos", "*", "mass", "[", ":", ",", "None", "]", ")", ".", "sum", "(", "axis", "=", "0", ")", "/", "self", ".", "get_total_mass", "(", ")" ]
27.933333
18.466667
def localeselector(): """Default locale selector used in abilian applications.""" # if a user is logged in, use the locale from the user settings user = getattr(g, "user", None) if user is not None: locale = getattr(user, "locale", None) if locale: return locale # Otherwise, try to guess the language from the user accept header the browser # transmits. By default we support en/fr. The best match wins. return request.accept_languages.best_match( current_app.config["BABEL_ACCEPT_LANGUAGES"] )
[ "def", "localeselector", "(", ")", ":", "# if a user is logged in, use the locale from the user settings", "user", "=", "getattr", "(", "g", ",", "\"user\"", ",", "None", ")", "if", "user", "is", "not", "None", ":", "locale", "=", "getattr", "(", "user", ",", "\"locale\"", ",", "None", ")", "if", "locale", ":", "return", "locale", "# Otherwise, try to guess the language from the user accept header the browser", "# transmits. By default we support en/fr. The best match wins.", "return", "request", ".", "accept_languages", ".", "best_match", "(", "current_app", ".", "config", "[", "\"BABEL_ACCEPT_LANGUAGES\"", "]", ")" ]
39.428571
19.5
def query_input(question, default=None, color=default_color): """Ask a question for input via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits <Enter>. The "answer" return value is a str. """ if default is None or default == '': prompt = ' ' elif type(default) == str: prompt = flo(' [{default}] ') else: raise ValueError("invalid default answer: '%s'" % default) while True: sys.stdout.write(color(question + prompt)) choice = raw_input() if default is not None and choice == '': return default if choice != '': return choice
[ "def", "query_input", "(", "question", ",", "default", "=", "None", ",", "color", "=", "default_color", ")", ":", "if", "default", "is", "None", "or", "default", "==", "''", ":", "prompt", "=", "' '", "elif", "type", "(", "default", ")", "==", "str", ":", "prompt", "=", "flo", "(", "' [{default}] '", ")", "else", ":", "raise", "ValueError", "(", "\"invalid default answer: '%s'\"", "%", "default", ")", "while", "True", ":", "sys", ".", "stdout", ".", "write", "(", "color", "(", "question", "+", "prompt", ")", ")", "choice", "=", "raw_input", "(", ")", "if", "default", "is", "not", "None", "and", "choice", "==", "''", ":", "return", "default", "if", "choice", "!=", "''", ":", "return", "choice" ]
32.772727
17.090909
def release(version): """Tags all submodules for a new release. Ensures that git tags, as well as the version.py files in each submodule, agree and that the new version is strictly greater than the current version. Will fail if the new version is not an increment (following PEP 440). Creates a new git tag and commit. """ check_new_version(version) set_new_version(version) commit_new_version(version) set_git_tag(version)
[ "def", "release", "(", "version", ")", ":", "check_new_version", "(", "version", ")", "set_new_version", "(", "version", ")", "commit_new_version", "(", "version", ")", "set_git_tag", "(", "version", ")" ]
40.909091
22.727273
def can(self): """Grant permission if owner or admin.""" return str(current_user.get_id()) == str(self.community.id_user) or \ DynamicPermission(ActionNeed('admin-access')).can()
[ "def", "can", "(", "self", ")", ":", "return", "str", "(", "current_user", ".", "get_id", "(", ")", ")", "==", "str", "(", "self", ".", "community", ".", "id_user", ")", "or", "DynamicPermission", "(", "ActionNeed", "(", "'admin-access'", ")", ")", ".", "can", "(", ")" ]
50.75
21.5
def request_leadership(self, opt_count, skip_brokers, skip_partitions): """Under-balanced broker requests leadership from current leader, on the pretext that it recursively can maintain its leadership count as optimal. :key_terms: leader-balanced: Count of brokers as leader is at least opt-count Algorithm: ========= Step-1: Broker will request leadership from current-leader of partitions it belongs to. Step-2: Current-leaders will grant their leadership if one of these happens:- a) Either they remain leader-balanced. b) Or they will recursively request leadership from other partitions until they are become leader-balanced. If both of these conditions fail, they will revoke their leadership-grant Step-3: If current-broker becomes leader-balanced it will return otherwise it moves ahead with next partition. """ # Possible partitions which can grant leadership to broker owned_partitions = list(filter( lambda p: self is not p.leader and len(p.replicas) > 1, self.partitions, )) for partition in owned_partitions: # Partition not available to grant leadership when: # 1. Broker is already under leadership change or # 2. Partition has already granted leadership before if partition.leader in skip_brokers or partition in skip_partitions: continue # Current broker is granted leadership temporarily prev_leader = partition.swap_leader(self) # Partition shouldn't be used again skip_partitions.append(partition) # Continue if prev-leader remains balanced # If leadership of prev_leader is to be revoked, it is considered balanced if prev_leader.count_preferred_replica() >= opt_count or \ prev_leader.revoked_leadership: # If current broker is leader-balanced return else # request next-partition if self.count_preferred_replica() >= opt_count: return else: continue else: # prev-leader (broker) became unbalanced # Append skip-brokers list so that it is not unbalanced further skip_brokers.append(prev_leader) # Try recursively arrange leadership for prev-leader prev_leader.request_leadership(opt_count, skip_brokers, skip_partitions) # If prev-leader couldn't be leader-balanced # revert its previous grant to current-broker if prev_leader.count_preferred_replica() < opt_count: # Partition can be used again for rebalancing skip_partitions.remove(partition) partition.swap_leader(prev_leader) # Try requesting leadership from next partition continue else: # If prev-leader successfully balanced skip_partitions.append(partition) # Removing from skip-broker list, since it can now again be # used for granting leadership for some other partition skip_brokers.remove(prev_leader) if self.count_preferred_replica() >= opt_count: # Return if current-broker is leader-balanced return else: continue
[ "def", "request_leadership", "(", "self", ",", "opt_count", ",", "skip_brokers", ",", "skip_partitions", ")", ":", "# Possible partitions which can grant leadership to broker", "owned_partitions", "=", "list", "(", "filter", "(", "lambda", "p", ":", "self", "is", "not", "p", ".", "leader", "and", "len", "(", "p", ".", "replicas", ")", ">", "1", ",", "self", ".", "partitions", ",", ")", ")", "for", "partition", "in", "owned_partitions", ":", "# Partition not available to grant leadership when:", "# 1. Broker is already under leadership change or", "# 2. Partition has already granted leadership before", "if", "partition", ".", "leader", "in", "skip_brokers", "or", "partition", "in", "skip_partitions", ":", "continue", "# Current broker is granted leadership temporarily", "prev_leader", "=", "partition", ".", "swap_leader", "(", "self", ")", "# Partition shouldn't be used again", "skip_partitions", ".", "append", "(", "partition", ")", "# Continue if prev-leader remains balanced", "# If leadership of prev_leader is to be revoked, it is considered balanced", "if", "prev_leader", ".", "count_preferred_replica", "(", ")", ">=", "opt_count", "or", "prev_leader", ".", "revoked_leadership", ":", "# If current broker is leader-balanced return else", "# request next-partition", "if", "self", ".", "count_preferred_replica", "(", ")", ">=", "opt_count", ":", "return", "else", ":", "continue", "else", ":", "# prev-leader (broker) became unbalanced", "# Append skip-brokers list so that it is not unbalanced further", "skip_brokers", ".", "append", "(", "prev_leader", ")", "# Try recursively arrange leadership for prev-leader", "prev_leader", ".", "request_leadership", "(", "opt_count", ",", "skip_brokers", ",", "skip_partitions", ")", "# If prev-leader couldn't be leader-balanced", "# revert its previous grant to current-broker", "if", "prev_leader", ".", "count_preferred_replica", "(", ")", "<", "opt_count", ":", "# Partition can be used again for rebalancing", "skip_partitions", ".", "remove", "(", "partition", ")", "partition", ".", "swap_leader", "(", "prev_leader", ")", "# Try requesting leadership from next partition", "continue", "else", ":", "# If prev-leader successfully balanced", "skip_partitions", ".", "append", "(", "partition", ")", "# Removing from skip-broker list, since it can now again be", "# used for granting leadership for some other partition", "skip_brokers", ".", "remove", "(", "prev_leader", ")", "if", "self", ".", "count_preferred_replica", "(", ")", ">=", "opt_count", ":", "# Return if current-broker is leader-balanced", "return", "else", ":", "continue" ]
52.676471
22.044118
def read_file(filename): """ return the contents of the file named filename or None if file not found """ if os.path.isfile(filename): with open(filename, 'r') as f: return f.read()
[ "def", "read_file", "(", "filename", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")" ]
41
7.8
def json_tuple(col, *fields): """Creates a new row for a json column according to the given field names. :param col: string column in json format :param fields: list of fields to extract >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')] >>> df = spark.createDataFrame(data, ("key", "jstring")) >>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect() [Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields)) return Column(jc)
[ "def", "json_tuple", "(", "col", ",", "*", "fields", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "json_tuple", "(", "_to_java_column", "(", "col", ")", ",", "_to_seq", "(", "sc", ",", "fields", ")", ")", "return", "Column", "(", "jc", ")" ]
46.928571
22.285714
def set_units(self, unit): """Set the unit for this data point Unit, as with data_type, are actually associated with the stream and not the individual data point. As such, changing this within a stream is not encouraged. Setting the unit on the data point is useful when the stream might be created with the write of a data point. """ self._units = validate_type(unit, type(None), *six.string_types)
[ "def", "set_units", "(", "self", ",", "unit", ")", ":", "self", ".", "_units", "=", "validate_type", "(", "unit", ",", "type", "(", "None", ")", ",", "*", "six", ".", "string_types", ")" ]
45
26.4
def merge(infiles, outfile, same_run, templatefile): """ Merge multiple OSW files and (for large experiments, it is recommended to subsample first). """ if len(infiles) < 1: raise click.ClickException("At least one PyProphet input file needs to be provided.") merge_osw(infiles, outfile, templatefile, same_run)
[ "def", "merge", "(", "infiles", ",", "outfile", ",", "same_run", ",", "templatefile", ")", ":", "if", "len", "(", "infiles", ")", "<", "1", ":", "raise", "click", ".", "ClickException", "(", "\"At least one PyProphet input file needs to be provided.\"", ")", "merge_osw", "(", "infiles", ",", "outfile", ",", "templatefile", ",", "same_run", ")" ]
37
25.666667
def ApplyEdits(self, adds=None, updates=None, deletes=None): """This operation adds, updates and deletes features to the associated feature layer or table in a single call (POST only). The apply edits operation is performed on a feature service layer resource. The result of this operation are 3 arrays of edit results (for adds, updates and deletes respectively). Each edit result identifies a single feature and indicates if the edit were successful or not. If not, it also includes an error code and an error description.""" add_str, update_str = None, None if adds: add_str = ",".join(json.dumps( feature._json_struct_for_featureset) for feature in adds) if updates: update_str = ",".join(json.dumps( feature._json_struct_for_featureset) for feature in updates) return self._get_subfolder("./applyEdits", JsonPostResult, {'adds': add_str, 'updates': update_str, 'deletes': deletes })
[ "def", "ApplyEdits", "(", "self", ",", "adds", "=", "None", ",", "updates", "=", "None", ",", "deletes", "=", "None", ")", ":", "add_str", ",", "update_str", "=", "None", ",", "None", "if", "adds", ":", "add_str", "=", "\",\"", ".", "join", "(", "json", ".", "dumps", "(", "feature", ".", "_json_struct_for_featureset", ")", "for", "feature", "in", "adds", ")", "if", "updates", ":", "update_str", "=", "\",\"", ".", "join", "(", "json", ".", "dumps", "(", "feature", ".", "_json_struct_for_featureset", ")", "for", "feature", "in", "updates", ")", "return", "self", ".", "_get_subfolder", "(", "\"./applyEdits\"", ",", "JsonPostResult", ",", "{", "'adds'", ":", "add_str", ",", "'updates'", ":", "update_str", ",", "'deletes'", ":", "deletes", "}", ")" ]
64.92
25.6
def create_diskgroup(cache_disk_id, capacity_disk_ids, safety_checks=True, service_instance=None): ''' Creates disk group on an ESXi host with the specified cache and capacity disks. cache_disk_id The canonical name of the disk to be used as a cache. The disk must be ssd. capacity_disk_ids A list containing canonical names of the capacity disks. Must contain at least one id. Default is True. safety_checks Specify whether to perform safety check or to skip the checks and try performing the required task. Default value is True. service_instance Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Default is None. .. code-block:: bash salt '*' vsphere.create_diskgroup cache_disk_id='naa.000000000000001' capacity_disk_ids='[naa.000000000000002, naa.000000000000003]' ''' log.trace('Validating diskgroup input') schema = DiskGroupsDiskIdSchema.serialize() try: jsonschema.validate( {'diskgroups': [{'cache_id': cache_disk_id, 'capacity_ids': capacity_disk_ids}]}, schema) except jsonschema.exceptions.ValidationError as exc: raise ArgumentValueError(exc) host_ref = _get_proxy_target(service_instance) hostname = __proxy__['esxi.get_details']()['esxi_host'] if safety_checks: diskgroups = \ salt.utils.vmware.get_diskgroups(host_ref, [cache_disk_id]) if diskgroups: raise VMwareObjectExistsError( 'Diskgroup with cache disk id \'{0}\' already exists ESXi ' 'host \'{1}\''.format(cache_disk_id, hostname)) disk_ids = capacity_disk_ids[:] disk_ids.insert(0, cache_disk_id) disks = salt.utils.vmware.get_disks(host_ref, disk_ids=disk_ids) for id in disk_ids: if not [d for d in disks if d.canonicalName == id]: raise VMwareObjectRetrievalError( 'No disk with id \'{0}\' was found in ESXi host \'{1}\'' ''.format(id, hostname)) cache_disk = [d for d in disks if d.canonicalName == cache_disk_id][0] capacity_disks = [d for d in disks if d.canonicalName in capacity_disk_ids] vsan_disk_mgmt_system = \ salt.utils.vsan.get_vsan_disk_management_system(service_instance) dg = salt.utils.vsan.create_diskgroup(service_instance, vsan_disk_mgmt_system, host_ref, cache_disk, capacity_disks) return True
[ "def", "create_diskgroup", "(", "cache_disk_id", ",", "capacity_disk_ids", ",", "safety_checks", "=", "True", ",", "service_instance", "=", "None", ")", ":", "log", ".", "trace", "(", "'Validating diskgroup input'", ")", "schema", "=", "DiskGroupsDiskIdSchema", ".", "serialize", "(", ")", "try", ":", "jsonschema", ".", "validate", "(", "{", "'diskgroups'", ":", "[", "{", "'cache_id'", ":", "cache_disk_id", ",", "'capacity_ids'", ":", "capacity_disk_ids", "}", "]", "}", ",", "schema", ")", "except", "jsonschema", ".", "exceptions", ".", "ValidationError", "as", "exc", ":", "raise", "ArgumentValueError", "(", "exc", ")", "host_ref", "=", "_get_proxy_target", "(", "service_instance", ")", "hostname", "=", "__proxy__", "[", "'esxi.get_details'", "]", "(", ")", "[", "'esxi_host'", "]", "if", "safety_checks", ":", "diskgroups", "=", "salt", ".", "utils", ".", "vmware", ".", "get_diskgroups", "(", "host_ref", ",", "[", "cache_disk_id", "]", ")", "if", "diskgroups", ":", "raise", "VMwareObjectExistsError", "(", "'Diskgroup with cache disk id \\'{0}\\' already exists ESXi '", "'host \\'{1}\\''", ".", "format", "(", "cache_disk_id", ",", "hostname", ")", ")", "disk_ids", "=", "capacity_disk_ids", "[", ":", "]", "disk_ids", ".", "insert", "(", "0", ",", "cache_disk_id", ")", "disks", "=", "salt", ".", "utils", ".", "vmware", ".", "get_disks", "(", "host_ref", ",", "disk_ids", "=", "disk_ids", ")", "for", "id", "in", "disk_ids", ":", "if", "not", "[", "d", "for", "d", "in", "disks", "if", "d", ".", "canonicalName", "==", "id", "]", ":", "raise", "VMwareObjectRetrievalError", "(", "'No disk with id \\'{0}\\' was found in ESXi host \\'{1}\\''", "''", ".", "format", "(", "id", ",", "hostname", ")", ")", "cache_disk", "=", "[", "d", "for", "d", "in", "disks", "if", "d", ".", "canonicalName", "==", "cache_disk_id", "]", "[", "0", "]", "capacity_disks", "=", "[", "d", "for", "d", "in", "disks", "if", "d", ".", "canonicalName", "in", "capacity_disk_ids", "]", "vsan_disk_mgmt_system", "=", "salt", ".", "utils", ".", "vsan", ".", "get_vsan_disk_management_system", "(", "service_instance", ")", "dg", "=", "salt", ".", "utils", ".", "vsan", ".", "create_diskgroup", "(", "service_instance", ",", "vsan_disk_mgmt_system", ",", "host_ref", ",", "cache_disk", ",", "capacity_disks", ")", "return", "True" ]
41.825397
22.714286
def _parse_downloadcount(self, text): """ parse download count text format maybe: - pure number: 1000 - number + unit: 1万 """ unit_map = { '千': 1000, '万': 10000, '百万': 1000000, } m = re.match(r'^(\d+(?:\.\d+)?)(\w{0,2})$', text, re.UNICODE) if m: n = float(m.group(1)) u = m.group(2) u = unit_map.get(u, 1) return int(n * u) else: return 0
[ "def", "_parse_downloadcount", "(", "self", ",", "text", ")", ":", "unit_map", "=", "{", "'千': ", "1", "00,", "", "'万': ", "1", "000,", "", "'百万': 10", "0", "000,", "", "}", "m", "=", "re", ".", "match", "(", "r'^(\\d+(?:\\.\\d+)?)(\\w{0,2})$'", ",", "text", ",", "re", ".", "UNICODE", ")", "if", "m", ":", "n", "=", "float", "(", "m", ".", "group", "(", "1", ")", ")", "u", "=", "m", ".", "group", "(", "2", ")", "u", "=", "unit_map", ".", "get", "(", "u", ",", "1", ")", "return", "int", "(", "n", "*", "u", ")", "else", ":", "return", "0" ]
24.85
16.2
def get_tl(self): """Returns the top left border of the cell""" cell_above_left = CellBorders(self.cell_attributes, *self.cell.get_above_left_key_rect()) return cell_above_left.get_r()
[ "def", "get_tl", "(", "self", ")", ":", "cell_above_left", "=", "CellBorders", "(", "self", ".", "cell_attributes", ",", "*", "self", ".", "cell", ".", "get_above_left_key_rect", "(", ")", ")", "return", "cell_above_left", ".", "get_r", "(", ")" ]
40.333333
19.833333
def prepare_params(self): """ Prepare the parameters passed to the templatetag """ if self.options.resolve_fragment: self.fragment_name = self.node.fragment_name.resolve(self.context) else: self.fragment_name = str(self.node.fragment_name) # Remove quotes that surround the name for char in '\'\"': if self.fragment_name.startswith(char) or self.fragment_name.endswith(char): if self.fragment_name.startswith(char) and self.fragment_name.endswith(char): self.fragment_name = self.fragment_name[1:-1] break else: raise ValueError('Number of quotes around the fragment name is incoherent') self.expire_time = self.get_expire_time() if self.options.versioning: self.version = force_bytes(self.get_version()) self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
[ "def", "prepare_params", "(", "self", ")", ":", "if", "self", ".", "options", ".", "resolve_fragment", ":", "self", ".", "fragment_name", "=", "self", ".", "node", ".", "fragment_name", ".", "resolve", "(", "self", ".", "context", ")", "else", ":", "self", ".", "fragment_name", "=", "str", "(", "self", ".", "node", ".", "fragment_name", ")", "# Remove quotes that surround the name", "for", "char", "in", "'\\'\\\"'", ":", "if", "self", ".", "fragment_name", ".", "startswith", "(", "char", ")", "or", "self", ".", "fragment_name", ".", "endswith", "(", "char", ")", ":", "if", "self", ".", "fragment_name", ".", "startswith", "(", "char", ")", "and", "self", ".", "fragment_name", ".", "endswith", "(", "char", ")", ":", "self", ".", "fragment_name", "=", "self", ".", "fragment_name", "[", "1", ":", "-", "1", "]", "break", "else", ":", "raise", "ValueError", "(", "'Number of quotes around the fragment name is incoherent'", ")", "self", ".", "expire_time", "=", "self", ".", "get_expire_time", "(", ")", "if", "self", ".", "options", ".", "versioning", ":", "self", ".", "version", "=", "force_bytes", "(", "self", ".", "get_version", "(", ")", ")", "self", ".", "vary_on", "=", "[", "template", ".", "Variable", "(", "var", ")", ".", "resolve", "(", "self", ".", "context", ")", "for", "var", "in", "self", ".", "node", ".", "vary_on", "]" ]
44.695652
24.782609
def get_attributes(self, sids, flds, **overrides): """Check cache first, then defer to data manager :param sids: security identifiers :param flds: fields to retrieve :param overrides: key-value pairs to pass to the mgr get_attributes method :return: DataFrame with flds as columns and sids as the row indices """ # Unfortunately must be inefficient with request flds = _force_array(flds) sids = _force_array(sids) cached = self._cache_get_attribute(sids, flds, **overrides) if not cached: # build get df = self.dm.get_attributes(sids, flds, **overrides) [self._cache_update_attribute(sid, df.ix[sid:sid], **overrides) for sid in sids] return df else: # Retrieve all missing and merge with existing cache for sid in sids: missed = flds if sid not in cached else set(flds) - set(cached[sid].columns) if missed: df = self.dm.get_attributes(sid, missed, **overrides) self._cache_update_attribute(sid, df, **overrides) # now just retrieve from cache data = self._cache_get_attribute(sids, flds, **overrides) # reindex and grab columns to sort frame = pd.concat(data.values()) return frame
[ "def", "get_attributes", "(", "self", ",", "sids", ",", "flds", ",", "*", "*", "overrides", ")", ":", "# Unfortunately must be inefficient with request", "flds", "=", "_force_array", "(", "flds", ")", "sids", "=", "_force_array", "(", "sids", ")", "cached", "=", "self", ".", "_cache_get_attribute", "(", "sids", ",", "flds", ",", "*", "*", "overrides", ")", "if", "not", "cached", ":", "# build get", "df", "=", "self", ".", "dm", ".", "get_attributes", "(", "sids", ",", "flds", ",", "*", "*", "overrides", ")", "[", "self", ".", "_cache_update_attribute", "(", "sid", ",", "df", ".", "ix", "[", "sid", ":", "sid", "]", ",", "*", "*", "overrides", ")", "for", "sid", "in", "sids", "]", "return", "df", "else", ":", "# Retrieve all missing and merge with existing cache", "for", "sid", "in", "sids", ":", "missed", "=", "flds", "if", "sid", "not", "in", "cached", "else", "set", "(", "flds", ")", "-", "set", "(", "cached", "[", "sid", "]", ".", "columns", ")", "if", "missed", ":", "df", "=", "self", ".", "dm", ".", "get_attributes", "(", "sid", ",", "missed", ",", "*", "*", "overrides", ")", "self", ".", "_cache_update_attribute", "(", "sid", ",", "df", ",", "*", "*", "overrides", ")", "# now just retrieve from cache", "data", "=", "self", ".", "_cache_get_attribute", "(", "sids", ",", "flds", ",", "*", "*", "overrides", ")", "# reindex and grab columns to sort", "frame", "=", "pd", ".", "concat", "(", "data", ".", "values", "(", ")", ")", "return", "frame" ]
48.071429
19.107143
def _sign_single(self, payload, signing_key): """ Make a single-signature JWT. Returns the serialized token (compact form), as a string """ if not isinstance(payload, Mapping): raise TypeError('Expecting a mapping object, as only ' 'JSON objects can be used as payloads.') token_segments = [] signing_key = load_signing_key(signing_key, self.crypto_backend) header = self._make_header() header_b64 = base64url_encode(json_encode(header)) payload_b64 = base64url_encode(json_encode(payload)) signature_b64 = self._make_signature(header_b64, payload_b64, signing_key) token_segments = [header_b64, payload_b64, signature_b64] # combine the header, payload, and signature into a token and return it token = b'.'.join(token_segments) return token
[ "def", "_sign_single", "(", "self", ",", "payload", ",", "signing_key", ")", ":", "if", "not", "isinstance", "(", "payload", ",", "Mapping", ")", ":", "raise", "TypeError", "(", "'Expecting a mapping object, as only '", "'JSON objects can be used as payloads.'", ")", "token_segments", "=", "[", "]", "signing_key", "=", "load_signing_key", "(", "signing_key", ",", "self", ".", "crypto_backend", ")", "header", "=", "self", ".", "_make_header", "(", ")", "header_b64", "=", "base64url_encode", "(", "json_encode", "(", "header", ")", ")", "payload_b64", "=", "base64url_encode", "(", "json_encode", "(", "payload", ")", ")", "signature_b64", "=", "self", ".", "_make_signature", "(", "header_b64", ",", "payload_b64", ",", "signing_key", ")", "token_segments", "=", "[", "header_b64", ",", "payload_b64", ",", "signature_b64", "]", "# combine the header, payload, and signature into a token and return it", "token", "=", "b'.'", ".", "join", "(", "token_segments", ")", "return", "token" ]
38.478261
21.956522
def _get_node_parent(self, age, pos): """Get the parent node of node, whch is located in tree's node list. Returns: object: The parent node. """ return self.nodes[age][int(pos / self.comp)]
[ "def", "_get_node_parent", "(", "self", ",", "age", ",", "pos", ")", ":", "return", "self", ".", "nodes", "[", "age", "]", "[", "int", "(", "pos", "/", "self", ".", "comp", ")", "]" ]
32.571429
11.857143
def _visible(self, element): """Used to filter text elements that have invisible text on the page. """ if element.name in self._disallowed_names: return False elif re.match(u'<!--.*-->', six.text_type(element.extract())): return False return True
[ "def", "_visible", "(", "self", ",", "element", ")", ":", "if", "element", ".", "name", "in", "self", ".", "_disallowed_names", ":", "return", "False", "elif", "re", ".", "match", "(", "u'<!--.*-->'", ",", "six", ".", "text_type", "(", "element", ".", "extract", "(", ")", ")", ")", ":", "return", "False", "return", "True" ]
37.875
13.125
def visit_keyword(self, node: AST, dfltChaining: bool = True) -> str: """Return representation of `node` as keyword arg.""" arg = node.arg if arg is None: return f"**{self.visit(node.value)}" else: return f"{arg}={self.visit(node.value)}"
[ "def", "visit_keyword", "(", "self", ",", "node", ":", "AST", ",", "dfltChaining", ":", "bool", "=", "True", ")", "->", "str", ":", "arg", "=", "node", ".", "arg", "if", "arg", "is", "None", ":", "return", "f\"**{self.visit(node.value)}\"", "else", ":", "return", "f\"{arg}={self.visit(node.value)}\"" ]
41.142857
15.857143
def spawn_agent(self, agent_definition, location): """Queues a spawn agent command. It will be applied when `tick` or `step` is called next. The agent won't be able to be used until the next frame. Args: agent_definition (:obj:`AgentDefinition`): The definition of the agent to spawn. location (np.ndarray or list): The position to spawn the agent in the world, in XYZ coordinates (in meters). """ self._should_write_to_command_buffer = True self._add_agents(agent_definition) command_to_send = SpawnAgentCommand(location, agent_definition.name, agent_definition.type) self._commands.add_command(command_to_send)
[ "def", "spawn_agent", "(", "self", ",", "agent_definition", ",", "location", ")", ":", "self", ".", "_should_write_to_command_buffer", "=", "True", "self", ".", "_add_agents", "(", "agent_definition", ")", "command_to_send", "=", "SpawnAgentCommand", "(", "location", ",", "agent_definition", ".", "name", ",", "agent_definition", ".", "type", ")", "self", ".", "_commands", ".", "add_command", "(", "command_to_send", ")" ]
57.5
26.333333
def backwards(self, orm): "Write your backwards methods here." from django.contrib.auth.models import Group projects = orm['samples.Project'].objects.all() names = [PROJECT_GROUP_TEMPLATE.format(p.name) for p in projects] # Remove groups named after these teams Group.objects.filter(name__in=names).delete()
[ "def", "backwards", "(", "self", ",", "orm", ")", ":", "from", "django", ".", "contrib", ".", "auth", ".", "models", "import", "Group", "projects", "=", "orm", "[", "'samples.Project'", "]", ".", "objects", ".", "all", "(", ")", "names", "=", "[", "PROJECT_GROUP_TEMPLATE", ".", "format", "(", "p", ".", "name", ")", "for", "p", "in", "projects", "]", "# Remove groups named after these teams", "Group", ".", "objects", ".", "filter", "(", "name__in", "=", "names", ")", ".", "delete", "(", ")" ]
38.777778
19.888889
def evaluate(self, env): """Evaluate the function call in the environment, returning a Unicode string. """ if self.ident in env.functions: arg_vals = [expr.evaluate(env) for expr in self.args] try: out = env.functions[self.ident](*arg_vals) except Exception as exc: # Function raised exception! Maybe inlining the name of # the exception will help debug. return u'<%s>' % str(exc) return str(out) else: return self.original
[ "def", "evaluate", "(", "self", ",", "env", ")", ":", "if", "self", ".", "ident", "in", "env", ".", "functions", ":", "arg_vals", "=", "[", "expr", ".", "evaluate", "(", "env", ")", "for", "expr", "in", "self", ".", "args", "]", "try", ":", "out", "=", "env", ".", "functions", "[", "self", ".", "ident", "]", "(", "*", "arg_vals", ")", "except", "Exception", "as", "exc", ":", "# Function raised exception! Maybe inlining the name of", "# the exception will help debug.", "return", "u'<%s>'", "%", "str", "(", "exc", ")", "return", "str", "(", "out", ")", "else", ":", "return", "self", ".", "original" ]
38.2
12.866667
def _hash_filter_fn(self, filter_fn, **kwargs): """ Construct string representing state of filter_fn Used to cache filtered variants or effects uniquely depending on filter fn values """ filter_fn_name = self._get_function_name(filter_fn, default="filter-none") logger.debug("Computing hash for filter_fn: {} with kwargs {}".format(filter_fn_name, str(dict(**kwargs)))) # hash function source code fn_source = str(dill.source.getsource(filter_fn)) pickled_fn_source = pickle.dumps(fn_source) ## encode as byte string hashed_fn_source = int(hashlib.sha1(pickled_fn_source).hexdigest(), 16) % (10 ** 11) # hash kwarg values kw_dict = dict(**kwargs) kw_hash = list() if not kw_dict: kw_hash = ["default"] else: [kw_hash.append("{}-{}".format(key, h)) for (key, h) in sorted(kw_dict.items())] # hash closure vars - for case where filter_fn is defined within closure of filter_fn closure = [] nonlocals = inspect.getclosurevars(filter_fn).nonlocals for (key, val) in nonlocals.items(): ## capture hash for any function within closure if inspect.isfunction(val): closure.append(self._hash_filter_fn(val)) closure.sort() # Sorted for file name consistency closure_str = "null" if len(closure) == 0 else "-".join(closure) # construct final string comprising hashed components hashed_fn = ".".join(["-".join([filter_fn_name, str(hashed_fn_source)]), ".".join(kw_hash), closure_str] ) return hashed_fn
[ "def", "_hash_filter_fn", "(", "self", ",", "filter_fn", ",", "*", "*", "kwargs", ")", ":", "filter_fn_name", "=", "self", ".", "_get_function_name", "(", "filter_fn", ",", "default", "=", "\"filter-none\"", ")", "logger", ".", "debug", "(", "\"Computing hash for filter_fn: {} with kwargs {}\"", ".", "format", "(", "filter_fn_name", ",", "str", "(", "dict", "(", "*", "*", "kwargs", ")", ")", ")", ")", "# hash function source code", "fn_source", "=", "str", "(", "dill", ".", "source", ".", "getsource", "(", "filter_fn", ")", ")", "pickled_fn_source", "=", "pickle", ".", "dumps", "(", "fn_source", ")", "## encode as byte string", "hashed_fn_source", "=", "int", "(", "hashlib", ".", "sha1", "(", "pickled_fn_source", ")", ".", "hexdigest", "(", ")", ",", "16", ")", "%", "(", "10", "**", "11", ")", "# hash kwarg values", "kw_dict", "=", "dict", "(", "*", "*", "kwargs", ")", "kw_hash", "=", "list", "(", ")", "if", "not", "kw_dict", ":", "kw_hash", "=", "[", "\"default\"", "]", "else", ":", "[", "kw_hash", ".", "append", "(", "\"{}-{}\"", ".", "format", "(", "key", ",", "h", ")", ")", "for", "(", "key", ",", "h", ")", "in", "sorted", "(", "kw_dict", ".", "items", "(", ")", ")", "]", "# hash closure vars - for case where filter_fn is defined within closure of filter_fn", "closure", "=", "[", "]", "nonlocals", "=", "inspect", ".", "getclosurevars", "(", "filter_fn", ")", ".", "nonlocals", "for", "(", "key", ",", "val", ")", "in", "nonlocals", ".", "items", "(", ")", ":", "## capture hash for any function within closure", "if", "inspect", ".", "isfunction", "(", "val", ")", ":", "closure", ".", "append", "(", "self", ".", "_hash_filter_fn", "(", "val", ")", ")", "closure", ".", "sort", "(", ")", "# Sorted for file name consistency", "closure_str", "=", "\"null\"", "if", "len", "(", "closure", ")", "==", "0", "else", "\"-\"", ".", "join", "(", "closure", ")", "# construct final string comprising hashed components", "hashed_fn", "=", "\".\"", ".", "join", "(", "[", "\"-\"", ".", "join", "(", "[", "filter_fn_name", ",", "str", "(", "hashed_fn_source", ")", "]", ")", ",", "\".\"", ".", "join", "(", "kw_hash", ")", ",", "closure_str", "]", ")", "return", "hashed_fn" ]
52.69697
21.515152
def container_setting(name, container, settings=None): ''' Set the value of the setting for an IIS container. :param str name: The name of the IIS container. :param str container: The type of IIS container. The container types are: AppPools, Sites, SslBindings :param str settings: A dictionary of the setting names and their values. Example of usage for the ``AppPools`` container: .. code-block:: yaml site0-apppool-setting: win_iis.container_setting: - name: site0 - container: AppPools - settings: managedPipelineMode: Integrated processModel.maxProcesses: 1 processModel.userName: TestUser processModel.password: TestPassword processModel.identityType: SpecificUser Example of usage for the ``Sites`` container: .. code-block:: yaml site0-site-setting: win_iis.container_setting: - name: site0 - container: Sites - settings: logFile.logFormat: W3C logFile.period: Daily limits.maxUrlSegments: 32 ''' identityType_map2string = {0: 'LocalSystem', 1: 'LocalService', 2: 'NetworkService', 3: 'SpecificUser', 4: 'ApplicationPoolIdentity'} ret = {'name': name, 'changes': {}, 'comment': str(), 'result': None} if not settings: ret['comment'] = 'No settings to change provided.' ret['result'] = True return ret ret_settings = { 'changes': {}, 'failures': {}, } current_settings = __salt__['win_iis.get_container_setting'](name=name, container=container, settings=settings.keys()) for setting in settings: # map identity type from numeric to string for comparing if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys(): settings[setting] = identityType_map2string[settings[setting]] if str(settings[setting]) != str(current_settings[setting]): ret_settings['changes'][setting] = {'old': current_settings[setting], 'new': settings[setting]} if not ret_settings['changes']: ret['comment'] = 'Settings already contain the provided values.' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'Settings will be changed.' ret['changes'] = ret_settings return ret __salt__['win_iis.set_container_setting'](name=name, container=container, settings=settings) new_settings = __salt__['win_iis.get_container_setting'](name=name, container=container, settings=settings.keys()) for setting in settings: if str(settings[setting]) != str(new_settings[setting]): ret_settings['failures'][setting] = {'old': current_settings[setting], 'new': new_settings[setting]} ret_settings['changes'].pop(setting, None) if ret_settings['failures']: ret['comment'] = 'Some settings failed to change.' ret['changes'] = ret_settings ret['result'] = False else: ret['comment'] = 'Set settings to contain the provided values.' ret['changes'] = ret_settings['changes'] ret['result'] = True return ret
[ "def", "container_setting", "(", "name", ",", "container", ",", "settings", "=", "None", ")", ":", "identityType_map2string", "=", "{", "0", ":", "'LocalSystem'", ",", "1", ":", "'LocalService'", ",", "2", ":", "'NetworkService'", ",", "3", ":", "'SpecificUser'", ",", "4", ":", "'ApplicationPoolIdentity'", "}", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "str", "(", ")", ",", "'result'", ":", "None", "}", "if", "not", "settings", ":", "ret", "[", "'comment'", "]", "=", "'No settings to change provided.'", "ret", "[", "'result'", "]", "=", "True", "return", "ret", "ret_settings", "=", "{", "'changes'", ":", "{", "}", ",", "'failures'", ":", "{", "}", ",", "}", "current_settings", "=", "__salt__", "[", "'win_iis.get_container_setting'", "]", "(", "name", "=", "name", ",", "container", "=", "container", ",", "settings", "=", "settings", ".", "keys", "(", ")", ")", "for", "setting", "in", "settings", ":", "# map identity type from numeric to string for comparing", "if", "setting", "==", "'processModel.identityType'", "and", "settings", "[", "setting", "]", "in", "identityType_map2string", ".", "keys", "(", ")", ":", "settings", "[", "setting", "]", "=", "identityType_map2string", "[", "settings", "[", "setting", "]", "]", "if", "str", "(", "settings", "[", "setting", "]", ")", "!=", "str", "(", "current_settings", "[", "setting", "]", ")", ":", "ret_settings", "[", "'changes'", "]", "[", "setting", "]", "=", "{", "'old'", ":", "current_settings", "[", "setting", "]", ",", "'new'", ":", "settings", "[", "setting", "]", "}", "if", "not", "ret_settings", "[", "'changes'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Settings already contain the provided values.'", "ret", "[", "'result'", "]", "=", "True", "return", "ret", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Settings will be changed.'", "ret", "[", "'changes'", "]", "=", "ret_settings", "return", "ret", "__salt__", "[", "'win_iis.set_container_setting'", "]", "(", "name", "=", "name", ",", "container", "=", "container", ",", "settings", "=", "settings", ")", "new_settings", "=", "__salt__", "[", "'win_iis.get_container_setting'", "]", "(", "name", "=", "name", ",", "container", "=", "container", ",", "settings", "=", "settings", ".", "keys", "(", ")", ")", "for", "setting", "in", "settings", ":", "if", "str", "(", "settings", "[", "setting", "]", ")", "!=", "str", "(", "new_settings", "[", "setting", "]", ")", ":", "ret_settings", "[", "'failures'", "]", "[", "setting", "]", "=", "{", "'old'", ":", "current_settings", "[", "setting", "]", ",", "'new'", ":", "new_settings", "[", "setting", "]", "}", "ret_settings", "[", "'changes'", "]", ".", "pop", "(", "setting", ",", "None", ")", "if", "ret_settings", "[", "'failures'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Some settings failed to change.'", "ret", "[", "'changes'", "]", "=", "ret_settings", "ret", "[", "'result'", "]", "=", "False", "else", ":", "ret", "[", "'comment'", "]", "=", "'Set settings to contain the provided values.'", "ret", "[", "'changes'", "]", "=", "ret_settings", "[", "'changes'", "]", "ret", "[", "'result'", "]", "=", "True", "return", "ret" ]
38.861702
23.968085
def add_role(self, service_name, deployment_name, role_name, system_config, os_virtual_hard_disk, network_config=None, availability_set_name=None, data_virtual_hard_disks=None, role_size=None, role_type='PersistentVMRole', resource_extension_references=None, provision_guest_agent=None, vm_image_name=None, media_location=None): ''' Adds a virtual machine to an existing deployment. service_name: The name of the service. deployment_name: The name of the deployment. role_name: The name of the role. system_config: Contains the metadata required to provision a virtual machine from a Windows or Linux OS image. Use an instance of WindowsConfigurationSet or LinuxConfigurationSet. os_virtual_hard_disk: Contains the parameters Windows Azure uses to create the operating system disk for the virtual machine. If you are creating a Virtual Machine by using a VM Image, this parameter is not used. network_config: Encapsulates the metadata required to create the virtual network configuration for a virtual machine. If you do not include a network configuration set you will not be able to access the VM through VIPs over the internet. If your virtual machine belongs to a virtual network you can not specify which subnet address space it resides under. availability_set_name: Specifies the name of an availability set to which to add the virtual machine. This value controls the virtual machine allocation in the Windows Azure environment. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. data_virtual_hard_disks: Contains the parameters Windows Azure uses to create a data disk for a virtual machine. role_size: The size of the virtual machine to allocate. The default value is Small. Possible values are: ExtraSmall, Small, Medium, Large, ExtraLarge. The specified value must be compatible with the disk selected in the OSVirtualHardDisk values. role_type: The type of the role for the virtual machine. The only supported value is PersistentVMRole. resource_extension_references: Optional. Contains a collection of resource extensions that are to be installed on the Virtual Machine. This element is used if provision_guest_agent is set to True. provision_guest_agent: Optional. Indicates whether the VM Agent is installed on the Virtual Machine. To run a resource extension in a Virtual Machine, this service must be installed. vm_image_name: Optional. Specifies the name of the VM Image that is to be used to create the Virtual Machine. If this is specified, the system_config and network_config parameters are not used. media_location: Optional. Required if the Virtual Machine is being created from a published VM Image. Specifies the location of the VHD file that is created when VMImageName specifies a published VM Image. ''' _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) _validate_not_none('role_name', role_name) return self._perform_post( self._get_role_path(service_name, deployment_name), _XmlSerializer.add_role_to_xml( role_name, system_config, os_virtual_hard_disk, role_type, network_config, availability_set_name, data_virtual_hard_disks, role_size, resource_extension_references, provision_guest_agent, vm_image_name, media_location), as_async=True)
[ "def", "add_role", "(", "self", ",", "service_name", ",", "deployment_name", ",", "role_name", ",", "system_config", ",", "os_virtual_hard_disk", ",", "network_config", "=", "None", ",", "availability_set_name", "=", "None", ",", "data_virtual_hard_disks", "=", "None", ",", "role_size", "=", "None", ",", "role_type", "=", "'PersistentVMRole'", ",", "resource_extension_references", "=", "None", ",", "provision_guest_agent", "=", "None", ",", "vm_image_name", "=", "None", ",", "media_location", "=", "None", ")", ":", "_validate_not_none", "(", "'service_name'", ",", "service_name", ")", "_validate_not_none", "(", "'deployment_name'", ",", "deployment_name", ")", "_validate_not_none", "(", "'role_name'", ",", "role_name", ")", "return", "self", ".", "_perform_post", "(", "self", ".", "_get_role_path", "(", "service_name", ",", "deployment_name", ")", ",", "_XmlSerializer", ".", "add_role_to_xml", "(", "role_name", ",", "system_config", ",", "os_virtual_hard_disk", ",", "role_type", ",", "network_config", ",", "availability_set_name", ",", "data_virtual_hard_disks", ",", "role_size", ",", "resource_extension_references", ",", "provision_guest_agent", ",", "vm_image_name", ",", "media_location", ")", ",", "as_async", "=", "True", ")" ]
49.833333
20.880952
def gap_index_map(sequence, gap_chars='-'): """ Opposite of ungap_index_map: returns mapping from gapped index to ungapped index. >>> gap_index_map('AC-TG-') {0: 0, 1: 1, 3: 2, 4: 3} """ return dict( (v, k) for k, v in list(ungap_index_map(sequence, gap_chars).items()))
[ "def", "gap_index_map", "(", "sequence", ",", "gap_chars", "=", "'-'", ")", ":", "return", "dict", "(", "(", "v", ",", "k", ")", "for", "k", ",", "v", "in", "list", "(", "ungap_index_map", "(", "sequence", ",", "gap_chars", ")", ".", "items", "(", ")", ")", ")" ]
29.8
19.4
def const_shuffle(arr, seed=23980): """ Shuffle an array in-place with a fixed seed. """ old_seed = np.random.seed() np.random.seed(seed) np.random.shuffle(arr) np.random.seed(old_seed)
[ "def", "const_shuffle", "(", "arr", ",", "seed", "=", "23980", ")", ":", "old_seed", "=", "np", ".", "random", ".", "seed", "(", ")", "np", ".", "random", ".", "seed", "(", "seed", ")", "np", ".", "random", ".", "shuffle", "(", "arr", ")", "np", ".", "random", ".", "seed", "(", "old_seed", ")" ]
29
8
def update(self, json_state): """Update the json data from a dictionary. Only updates if it already exists in the device. """ self._json_state.update( {k: json_state[k] for k in json_state if self._json_state.get(k)}) self._update_name()
[ "def", "update", "(", "self", ",", "json_state", ")", ":", "self", ".", "_json_state", ".", "update", "(", "{", "k", ":", "json_state", "[", "k", "]", "for", "k", "in", "json_state", "if", "self", ".", "_json_state", ".", "get", "(", "k", ")", "}", ")", "self", ".", "_update_name", "(", ")" ]
35.375
15.75
def handler(self, scheme_name=None): """ Return handler which scheme name matches the specified one :param scheme_name: scheme name to search for :return: WSchemeHandler class or None (if matching handler was not found) """ if scheme_name is None: return self.__default_handler_cls for handler in self.__handlers_cls: if handler.scheme_specification().scheme_name() == scheme_name: return handler
[ "def", "handler", "(", "self", ",", "scheme_name", "=", "None", ")", ":", "if", "scheme_name", "is", "None", ":", "return", "self", ".", "__default_handler_cls", "for", "handler", "in", "self", ".", "__handlers_cls", ":", "if", "handler", ".", "scheme_specification", "(", ")", ".", "scheme_name", "(", ")", "==", "scheme_name", ":", "return", "handler" ]
37.181818
14.181818
def matrix(self): """Build matrix representation of Householder transformation. Builds the matrix representation :math:`H = I - \\beta vv^*`. **Use with care!** This routine may be helpful for testing purposes but should not be used in production codes for high dimensions since the resulting matrix is dense. """ n = self.v.shape[0] return numpy.eye(n, n) - self.beta * numpy.dot(self.v, self.v.T.conj())
[ "def", "matrix", "(", "self", ")", ":", "n", "=", "self", ".", "v", ".", "shape", "[", "0", "]", "return", "numpy", ".", "eye", "(", "n", ",", "n", ")", "-", "self", ".", "beta", "*", "numpy", ".", "dot", "(", "self", ".", "v", ",", "self", ".", "v", ".", "T", ".", "conj", "(", ")", ")" ]
39
19.333333
def with_organisation(self, organisation): """Add an organisation segment. Args: organisation (str): Official name of an administrative body holding an election. Returns: IdBuilder Raises: ValueError """ if organisation is None: organisation = '' organisation = slugify(organisation) self._validate_organisation(organisation) self.organisation = organisation return self
[ "def", "with_organisation", "(", "self", ",", "organisation", ")", ":", "if", "organisation", "is", "None", ":", "organisation", "=", "''", "organisation", "=", "slugify", "(", "organisation", ")", "self", ".", "_validate_organisation", "(", "organisation", ")", "self", ".", "organisation", "=", "organisation", "return", "self" ]
26.263158
17
def add_category(self, category): """Add a category assigned to this message :rtype: Category """ self._categories = self._ensure_append(category, self._categories)
[ "def", "add_category", "(", "self", ",", "category", ")", ":", "self", ".", "_categories", "=", "self", ".", "_ensure_append", "(", "category", ",", "self", ".", "_categories", ")" ]
32
16.166667
def isVideo(self): """ Is the stream labelled as a video stream. """ val=False if self.__dict__['codec_type']: if self.codec_type == 'video': val=True return val
[ "def", "isVideo", "(", "self", ")", ":", "val", "=", "False", "if", "self", ".", "__dict__", "[", "'codec_type'", "]", ":", "if", "self", ".", "codec_type", "==", "'video'", ":", "val", "=", "True", "return", "val" ]
25.444444
10.555556
def put_blob(kwargs=None, storage_conn=None, call=None): ''' .. versionadded:: 2015.8.0 Upload a blob CLI Examples: .. code-block:: bash salt-cloud -f put_blob my-azure container=base name=top.sls blob_path=/srv/salt/top.sls salt-cloud -f put_blob my-azure container=base name=content.txt blob_content='Some content' container: Name of existing container. name: Name of existing blob. blob_path: The path on the local machine of the file to upload as a blob. Either this or blob_content must be specified. blob_content: The actual content to be uploaded as a blob. Either this or blob_path must me specified. cache_control: Optional. The Blob service stores this value but does not use or modify it. content_language: Optional. Specifies the natural languages used by this resource. content_md5: Optional. An MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. When this header is specified, the storage service checks the hash that has arrived with the one that was sent. If the two hashes do not match, the operation will fail with error code 400 (Bad Request). blob_content_type: Optional. Set the blob's content type. blob_content_encoding: Optional. Set the blob's content encoding. blob_content_language: Optional. Set the blob's content language. blob_content_md5: Optional. Set the blob's MD5 hash. blob_cache_control: Optional. Sets the blob's cache control. meta_name_values: A dict containing name, value for metadata. lease_id: Required if the blob has an active lease. ''' if call != 'function': raise SaltCloudSystemExit( 'The put_blob function must be called with -f or --function.' ) if kwargs is None: kwargs = {} if 'container' not in kwargs: raise SaltCloudSystemExit('The blob container name must be specified as "container"') if 'name' not in kwargs: raise SaltCloudSystemExit('The blob name must be specified as "name"') if 'blob_path' not in kwargs and 'blob_content' not in kwargs: raise SaltCloudSystemExit( 'Either a path to a file needs to be passed in as "blob_path" or ' 'the contents of a blob as "blob_content."' ) if not storage_conn: storage_conn = get_storage_conn(conn_kwargs=kwargs) return salt.utils.msazure.put_blob(storage_conn=storage_conn, **kwargs)
[ "def", "put_blob", "(", "kwargs", "=", "None", ",", "storage_conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The put_blob function must be called with -f or --function.'", ")", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "if", "'container'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'The blob container name must be specified as \"container\"'", ")", "if", "'name'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'The blob name must be specified as \"name\"'", ")", "if", "'blob_path'", "not", "in", "kwargs", "and", "'blob_content'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'Either a path to a file needs to be passed in as \"blob_path\" or '", "'the contents of a blob as \"blob_content.\"'", ")", "if", "not", "storage_conn", ":", "storage_conn", "=", "get_storage_conn", "(", "conn_kwargs", "=", "kwargs", ")", "return", "salt", ".", "utils", ".", "msazure", ".", "put_blob", "(", "storage_conn", "=", "storage_conn", ",", "*", "*", "kwargs", ")" ]
35.219178
25.082192
def _backup_file(self, file, patch): """ Creates a backup of file """ dest_dir = self.quilt_pc + patch.get_name() file_dir = file.get_directory() if file_dir: #TODO get relative path dest_dir = dest_dir + file_dir backup = Backup() backup.backup_file(file, dest_dir, copy_empty=True)
[ "def", "_backup_file", "(", "self", ",", "file", ",", "patch", ")", ":", "dest_dir", "=", "self", ".", "quilt_pc", "+", "patch", ".", "get_name", "(", ")", "file_dir", "=", "file", ".", "get_directory", "(", ")", "if", "file_dir", ":", "#TODO get relative path", "dest_dir", "=", "dest_dir", "+", "file_dir", "backup", "=", "Backup", "(", ")", "backup", ".", "backup_file", "(", "file", ",", "dest_dir", ",", "copy_empty", "=", "True", ")" ]
38.555556
8.555556
def _get(self, target, alias): """ Internal method to get a specific alias. """ if target not in self._aliases: return return self._aliases[target].get(alias)
[ "def", "_get", "(", "self", ",", "target", ",", "alias", ")", ":", "if", "target", "not", "in", "self", ".", "_aliases", ":", "return", "return", "self", ".", "_aliases", "[", "target", "]", ".", "get", "(", "alias", ")" ]
29.142857
6.857143
def get_json_tuples(self, prettyprint=False, translate=True): """ Get the data as JSON tuples """ j = self.get_json(prettyprint, translate) if len(j) > 2: if prettyprint: j = j[1:-2] + ",\n" else: j = j[1:-1] + "," else: j = "" return j
[ "def", "get_json_tuples", "(", "self", ",", "prettyprint", "=", "False", ",", "translate", "=", "True", ")", ":", "j", "=", "self", ".", "get_json", "(", "prettyprint", ",", "translate", ")", "if", "len", "(", "j", ")", ">", "2", ":", "if", "prettyprint", ":", "j", "=", "j", "[", "1", ":", "-", "2", "]", "+", "\",\\n\"", "else", ":", "j", "=", "j", "[", "1", ":", "-", "1", "]", "+", "\",\"", "else", ":", "j", "=", "\"\"", "return", "j" ]
26.769231
13.384615
def fill_superseqs(data, samples): """ Fills the superseqs array with seq data from cat.clust and fill the edges array with information about paired split locations. """ ## load super to get edges io5 = h5py.File(data.clust_database, 'r+') superseqs = io5["seqs"] splits = io5["splits"] ## samples are already sorted snames = [i.name for i in samples] LOGGER.info("snames %s", snames) ## get maxlen again maxlen = data._hackersonly["max_fragment_length"] + 20 LOGGER.info("maxlen inside fill_superseqs is %s", maxlen) ## data has to be entered in blocks infile = os.path.join(data.dirs.across, data.name+"_catclust.gz") clusters = gzip.open(infile, 'r') pairdealer = itertools.izip(*[iter(clusters)]*2) ## iterate over clusters chunks = superseqs.attrs["chunksize"] chunksize = chunks[0] done = 0 iloc = 0 cloc = 0 chunkseqs = np.zeros(chunks, dtype="|S1") chunkedge = np.zeros(chunksize, dtype=np.uint16) while 1: try: done, chunk = clustdealer(pairdealer, 1) except IndexError: raise IPyradWarningExit("clustfile formatting error in %s", chunk) ## if chunk is full put into superseqs and reset counter if cloc == chunksize: LOGGER.info("cloc chunk writing %s", cloc) superseqs[iloc-cloc:iloc] = chunkseqs splits[iloc-cloc:iloc] = chunkedge ## reset chunkseqs, chunkedge, cloc cloc = 0 chunkseqs = np.zeros((chunksize, len(samples), maxlen), dtype="|S1") chunkedge = np.zeros((chunksize), dtype=np.uint16) ## get seq and split it if chunk: try: fill = np.zeros((len(samples), maxlen), dtype="|S1") fill.fill("N") piece = chunk[0].strip().split("\n") names = piece[0::2] seqs = np.array([list(i) for i in piece[1::2]]) ## fill in the separator if it exists separator = np.where(np.all(seqs == 'n', axis=0))[0] if np.any(separator): chunkedge[cloc] = separator.min() ## fill in the hits ## seqs will be (5,) IF the seqs are variable lengths, which ## can happen if it had duplicaes AND there were indels, and ## so the indels did not get aligned try: shlen = seqs.shape[1] except IndexError as inst: shlen = min([len(x) for x in seqs]) for name, seq in zip(names, seqs): sidx = snames.index(name.rsplit("_", 1)[0]) #fill[sidx, :shlen] = seq[:maxlen] fill[sidx, :shlen] = seq[:shlen] ## PUT seqs INTO local ARRAY chunkseqs[cloc] = fill except Exception as inst: LOGGER.info(inst) LOGGER.info("\nfill: %s\nshlen %s\nmaxlen %s", fill.shape, shlen, maxlen) LOGGER.info("dupe chunk \n{}".format("\n".join(chunk))) ## increase counters if there was a chunk cloc += 1 iloc += 1 if done: break ## write final leftover chunk superseqs[iloc-cloc:,] = chunkseqs[:cloc] splits[iloc-cloc:] = chunkedge[:cloc] ## close super io5.close() clusters.close() ## edges is filled with splits for paired data. LOGGER.info("done filling superseqs")
[ "def", "fill_superseqs", "(", "data", ",", "samples", ")", ":", "## load super to get edges", "io5", "=", "h5py", ".", "File", "(", "data", ".", "clust_database", ",", "'r+'", ")", "superseqs", "=", "io5", "[", "\"seqs\"", "]", "splits", "=", "io5", "[", "\"splits\"", "]", "## samples are already sorted", "snames", "=", "[", "i", ".", "name", "for", "i", "in", "samples", "]", "LOGGER", ".", "info", "(", "\"snames %s\"", ",", "snames", ")", "## get maxlen again", "maxlen", "=", "data", ".", "_hackersonly", "[", "\"max_fragment_length\"", "]", "+", "20", "LOGGER", ".", "info", "(", "\"maxlen inside fill_superseqs is %s\"", ",", "maxlen", ")", "## data has to be entered in blocks", "infile", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "across", ",", "data", ".", "name", "+", "\"_catclust.gz\"", ")", "clusters", "=", "gzip", ".", "open", "(", "infile", ",", "'r'", ")", "pairdealer", "=", "itertools", ".", "izip", "(", "*", "[", "iter", "(", "clusters", ")", "]", "*", "2", ")", "## iterate over clusters", "chunks", "=", "superseqs", ".", "attrs", "[", "\"chunksize\"", "]", "chunksize", "=", "chunks", "[", "0", "]", "done", "=", "0", "iloc", "=", "0", "cloc", "=", "0", "chunkseqs", "=", "np", ".", "zeros", "(", "chunks", ",", "dtype", "=", "\"|S1\"", ")", "chunkedge", "=", "np", ".", "zeros", "(", "chunksize", ",", "dtype", "=", "np", ".", "uint16", ")", "while", "1", ":", "try", ":", "done", ",", "chunk", "=", "clustdealer", "(", "pairdealer", ",", "1", ")", "except", "IndexError", ":", "raise", "IPyradWarningExit", "(", "\"clustfile formatting error in %s\"", ",", "chunk", ")", "## if chunk is full put into superseqs and reset counter", "if", "cloc", "==", "chunksize", ":", "LOGGER", ".", "info", "(", "\"cloc chunk writing %s\"", ",", "cloc", ")", "superseqs", "[", "iloc", "-", "cloc", ":", "iloc", "]", "=", "chunkseqs", "splits", "[", "iloc", "-", "cloc", ":", "iloc", "]", "=", "chunkedge", "## reset chunkseqs, chunkedge, cloc", "cloc", "=", "0", "chunkseqs", "=", "np", ".", "zeros", "(", "(", "chunksize", ",", "len", "(", "samples", ")", ",", "maxlen", ")", ",", "dtype", "=", "\"|S1\"", ")", "chunkedge", "=", "np", ".", "zeros", "(", "(", "chunksize", ")", ",", "dtype", "=", "np", ".", "uint16", ")", "## get seq and split it", "if", "chunk", ":", "try", ":", "fill", "=", "np", ".", "zeros", "(", "(", "len", "(", "samples", ")", ",", "maxlen", ")", ",", "dtype", "=", "\"|S1\"", ")", "fill", ".", "fill", "(", "\"N\"", ")", "piece", "=", "chunk", "[", "0", "]", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "names", "=", "piece", "[", "0", ":", ":", "2", "]", "seqs", "=", "np", ".", "array", "(", "[", "list", "(", "i", ")", "for", "i", "in", "piece", "[", "1", ":", ":", "2", "]", "]", ")", "## fill in the separator if it exists", "separator", "=", "np", ".", "where", "(", "np", ".", "all", "(", "seqs", "==", "'n'", ",", "axis", "=", "0", ")", ")", "[", "0", "]", "if", "np", ".", "any", "(", "separator", ")", ":", "chunkedge", "[", "cloc", "]", "=", "separator", ".", "min", "(", ")", "## fill in the hits", "## seqs will be (5,) IF the seqs are variable lengths, which ", "## can happen if it had duplicaes AND there were indels, and ", "## so the indels did not get aligned", "try", ":", "shlen", "=", "seqs", ".", "shape", "[", "1", "]", "except", "IndexError", "as", "inst", ":", "shlen", "=", "min", "(", "[", "len", "(", "x", ")", "for", "x", "in", "seqs", "]", ")", "for", "name", ",", "seq", "in", "zip", "(", "names", ",", "seqs", ")", ":", "sidx", "=", "snames", ".", "index", "(", "name", ".", "rsplit", "(", "\"_\"", ",", "1", ")", "[", "0", "]", ")", "#fill[sidx, :shlen] = seq[:maxlen]", "fill", "[", "sidx", ",", ":", "shlen", "]", "=", "seq", "[", ":", "shlen", "]", "## PUT seqs INTO local ARRAY", "chunkseqs", "[", "cloc", "]", "=", "fill", "except", "Exception", "as", "inst", ":", "LOGGER", ".", "info", "(", "inst", ")", "LOGGER", ".", "info", "(", "\"\\nfill: %s\\nshlen %s\\nmaxlen %s\"", ",", "fill", ".", "shape", ",", "shlen", ",", "maxlen", ")", "LOGGER", ".", "info", "(", "\"dupe chunk \\n{}\"", ".", "format", "(", "\"\\n\"", ".", "join", "(", "chunk", ")", ")", ")", "## increase counters if there was a chunk", "cloc", "+=", "1", "iloc", "+=", "1", "if", "done", ":", "break", "## write final leftover chunk", "superseqs", "[", "iloc", "-", "cloc", ":", ",", "]", "=", "chunkseqs", "[", ":", "cloc", "]", "splits", "[", "iloc", "-", "cloc", ":", "]", "=", "chunkedge", "[", ":", "cloc", "]", "## close super", "io5", ".", "close", "(", ")", "clusters", ".", "close", "(", ")", "## edges is filled with splits for paired data.", "LOGGER", ".", "info", "(", "\"done filling superseqs\"", ")" ]
34.405941
19.277228
def fd_solve(self): """ w = fd_solve() where coeff is the sparse coefficient matrix output from function coeff_matrix and qs is the array of loads (stresses) Sparse solver for one-dimensional flexure of an elastic plate """ if self.Debug: print("qs", self.qs.shape) print("Te", self.Te.shape) self.calc_max_flexural_wavelength() print("maxFlexuralWavelength_ncells', self.maxFlexuralWavelength_ncells") if self.Solver == "iterative" or self.Solver == "Iterative": if self.Debug: print("Using generalized minimal residual method for iterative solution") if self.Verbose: print("Converging to a tolerance of", self.iterative_ConvergenceTolerance, "m between iterations") # qs negative so bends down with positive load, bends up with neative load # (i.e. material removed) w = isolve.lgmres(self.coeff_matrix, -self.qs, tol=self.iterative_ConvergenceTolerance) self.w = w[0] # Reach into tuple to get my array back else: if self.Solver == 'direct' or self.Solver == 'Direct': if self.Debug: print("Using direct solution with UMFpack") else: print("Solution type not understood:") print("Defaulting to direct solution with UMFpack") # UMFpack is now the default, but setting true just to be sure in case # anything changes # qs negative so bends down with positive load, bends up with neative load # (i.e. material removed) self.w = spsolve(self.coeff_matrix, -self.qs, use_umfpack=True) if self.Debug: print("w.shape:") print(self.w.shape) print("w:") print(self.w)
[ "def", "fd_solve", "(", "self", ")", ":", "if", "self", ".", "Debug", ":", "print", "(", "\"qs\"", ",", "self", ".", "qs", ".", "shape", ")", "print", "(", "\"Te\"", ",", "self", ".", "Te", ".", "shape", ")", "self", ".", "calc_max_flexural_wavelength", "(", ")", "print", "(", "\"maxFlexuralWavelength_ncells', self.maxFlexuralWavelength_ncells\"", ")", "if", "self", ".", "Solver", "==", "\"iterative\"", "or", "self", ".", "Solver", "==", "\"Iterative\"", ":", "if", "self", ".", "Debug", ":", "print", "(", "\"Using generalized minimal residual method for iterative solution\"", ")", "if", "self", ".", "Verbose", ":", "print", "(", "\"Converging to a tolerance of\"", ",", "self", ".", "iterative_ConvergenceTolerance", ",", "\"m between iterations\"", ")", "# qs negative so bends down with positive load, bends up with neative load \r", "# (i.e. material removed)\r", "w", "=", "isolve", ".", "lgmres", "(", "self", ".", "coeff_matrix", ",", "-", "self", ".", "qs", ",", "tol", "=", "self", ".", "iterative_ConvergenceTolerance", ")", "self", ".", "w", "=", "w", "[", "0", "]", "# Reach into tuple to get my array back\r", "else", ":", "if", "self", ".", "Solver", "==", "'direct'", "or", "self", ".", "Solver", "==", "'Direct'", ":", "if", "self", ".", "Debug", ":", "print", "(", "\"Using direct solution with UMFpack\"", ")", "else", ":", "print", "(", "\"Solution type not understood:\"", ")", "print", "(", "\"Defaulting to direct solution with UMFpack\"", ")", "# UMFpack is now the default, but setting true just to be sure in case\r", "# anything changes\r", "# qs negative so bends down with positive load, bends up with neative load \r", "# (i.e. material removed)\r", "self", ".", "w", "=", "spsolve", "(", "self", ".", "coeff_matrix", ",", "-", "self", ".", "qs", ",", "use_umfpack", "=", "True", ")", "if", "self", ".", "Debug", ":", "print", "(", "\"w.shape:\"", ")", "print", "(", "self", ".", "w", ".", "shape", ")", "print", "(", "\"w:\"", ")", "print", "(", "self", ".", "w", ")" ]
40.52381
23.761905
def get_sessionmaker(sqlalchemy_url, engine=None): """ Create session with database to work in """ if engine is None: engine = create_engine(sqlalchemy_url) return sessionmaker(bind=engine)
[ "def", "get_sessionmaker", "(", "sqlalchemy_url", ",", "engine", "=", "None", ")", ":", "if", "engine", "is", "None", ":", "engine", "=", "create_engine", "(", "sqlalchemy_url", ")", "return", "sessionmaker", "(", "bind", "=", "engine", ")" ]
30.142857
5.857143
def _logpdf(self, **kwargs): """ Returns the logpdf at the given angles. Parameters ---------- \**kwargs: The keyword arguments should specify the value for each angle, using the names of the polar and azimuthal angles as the keywords. Unrecognized arguments are ignored. Returns ------- float The value of the pdf at the given values. """ return self._polardist._logpdf(**kwargs) +\ self._azimuthaldist._logpdf(**kwargs)
[ "def", "_logpdf", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_polardist", ".", "_logpdf", "(", "*", "*", "kwargs", ")", "+", "self", ".", "_azimuthaldist", ".", "_logpdf", "(", "*", "*", "kwargs", ")" ]
30.333333
19.666667
def prune_taxonomy(taxF, level): """ :type taxF: file :param taxF: The taxonomy output file to parse :type level: string :param level: The level of the phylogenetic assignment at which to cut off every assigned taxonomic string. :rtype: dict :return: A dictionary of taxonomy strings keyed on OTU ID """ uniqueTax = {} nuTax = {} # non-unique taxonomies for i, line in enumerate(taxF): try: otuID, tax, floatVal, otuIDr = line.strip().split('\t') except ValueError as ve: print "ERROR: incorrect number of fields found on line {} of the input file. Entry skipped.".format(i) continue tax = split_taxonomy(pad_taxonomy(tax), level) if tax not in uniqueTax: uniqueTax[tax] = otuID, floatVal, otuIDr nuTax[uniqueTax[tax][0]] = [] else: nuTax[uniqueTax[tax][0]].append(otuID) ut = {otuID: [tax, floatVal, otuIDr] for tax, (otuID, floatVal, otuIDr) in uniqueTax.iteritems()} return ut, nuTax
[ "def", "prune_taxonomy", "(", "taxF", ",", "level", ")", ":", "uniqueTax", "=", "{", "}", "nuTax", "=", "{", "}", "# non-unique taxonomies", "for", "i", ",", "line", "in", "enumerate", "(", "taxF", ")", ":", "try", ":", "otuID", ",", "tax", ",", "floatVal", ",", "otuIDr", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "except", "ValueError", "as", "ve", ":", "print", "\"ERROR: incorrect number of fields found on line {} of the input file. Entry skipped.\"", ".", "format", "(", "i", ")", "continue", "tax", "=", "split_taxonomy", "(", "pad_taxonomy", "(", "tax", ")", ",", "level", ")", "if", "tax", "not", "in", "uniqueTax", ":", "uniqueTax", "[", "tax", "]", "=", "otuID", ",", "floatVal", ",", "otuIDr", "nuTax", "[", "uniqueTax", "[", "tax", "]", "[", "0", "]", "]", "=", "[", "]", "else", ":", "nuTax", "[", "uniqueTax", "[", "tax", "]", "[", "0", "]", "]", ".", "append", "(", "otuID", ")", "ut", "=", "{", "otuID", ":", "[", "tax", ",", "floatVal", ",", "otuIDr", "]", "for", "tax", ",", "(", "otuID", ",", "floatVal", ",", "otuIDr", ")", "in", "uniqueTax", ".", "iteritems", "(", ")", "}", "return", "ut", ",", "nuTax" ]
34
20.387097
def draw_frame(self, x, y, width, height, string, fg=Ellipsis, bg=Ellipsis): """Similar to L{draw_rect} but only draws the outline of the rectangle. `width or `height` can be None to extend to the bottom right of the console or can be a negative number to be sized reltive to the total size of the console. Args: x (int): The x-coordinate to start on. y (int): The y-coordinate to start on. width (Optional[int]): Width of the rectangle. height (Optional[int]): Height of the rectangle. string (Optional[Union[Text, int]]): An integer, single character string, or None. You can set this parameter as None if you only want to change the colors of an area. fg (Optional[Union[Tuple[int, int, int], int, Ellipsis]]) bg (Optional[Union[Tuple[int, int, int], int, Ellipsis]]) Raises: AssertionError: Having x or y values that can't be placed inside of the console will raise an AssertionError. You can use always use ``((x, y) in console)`` to check if a tile is drawable. .. seealso:: :any:`draw_rect`, :any:`Window` """ x, y, width, height = self._normalizeRect(x, y, width, height) fg, bg = _format_color(fg, self._fg), _format_color(bg, self._bg) char = _format_char(string) if width == 1 or height == 1: # it's just a single width line here return self.draw_rect(x, y, width, height, char, fg, bg) # draw sides of frame with draw_rect self.draw_rect(x, y, 1, height, char, fg, bg) self.draw_rect(x, y, width, 1, char, fg, bg) self.draw_rect(x + width - 1, y, 1, height, char, fg, bg) self.draw_rect(x, y + height - 1, width, 1, char, fg, bg)
[ "def", "draw_frame", "(", "self", ",", "x", ",", "y", ",", "width", ",", "height", ",", "string", ",", "fg", "=", "Ellipsis", ",", "bg", "=", "Ellipsis", ")", ":", "x", ",", "y", ",", "width", ",", "height", "=", "self", ".", "_normalizeRect", "(", "x", ",", "y", ",", "width", ",", "height", ")", "fg", ",", "bg", "=", "_format_color", "(", "fg", ",", "self", ".", "_fg", ")", ",", "_format_color", "(", "bg", ",", "self", ".", "_bg", ")", "char", "=", "_format_char", "(", "string", ")", "if", "width", "==", "1", "or", "height", "==", "1", ":", "# it's just a single width line here", "return", "self", ".", "draw_rect", "(", "x", ",", "y", ",", "width", ",", "height", ",", "char", ",", "fg", ",", "bg", ")", "# draw sides of frame with draw_rect", "self", ".", "draw_rect", "(", "x", ",", "y", ",", "1", ",", "height", ",", "char", ",", "fg", ",", "bg", ")", "self", ".", "draw_rect", "(", "x", ",", "y", ",", "width", ",", "1", ",", "char", ",", "fg", ",", "bg", ")", "self", ".", "draw_rect", "(", "x", "+", "width", "-", "1", ",", "y", ",", "1", ",", "height", ",", "char", ",", "fg", ",", "bg", ")", "self", ".", "draw_rect", "(", "x", ",", "y", "+", "height", "-", "1", ",", "width", ",", "1", ",", "char", ",", "fg", ",", "bg", ")" ]
47.589744
23.74359
def generate_and_write_ed25519_keypair(filepath=None, password=None): """ <Purpose> Generate an Ed25519 keypair, where the encrypted key (using 'password' as the passphrase) is saved to <'filepath'>. The public key portion of the generated Ed25519 key is saved to <'filepath'>.pub. If the filepath is not given, the KEYID is used as the filename and the keypair saved to the current working directory. The private key is encrypted according to 'cryptography's approach: "Encrypt using the best available encryption for a given key's backend. This is a curated encryption choice and the algorithm may change over time." <Arguments> filepath: The public and private key files are saved to <filepath>.pub and <filepath>, respectively. If the filepath is not given, the public and private keys are saved to the current working directory as <KEYID>.pub and <KEYID>. KEYID is the generated key's KEYID. password: The password, or passphrase, to encrypt the private portion of the generated Ed25519 key. A symmetric encryption key is derived from 'password', so it is not directly used. <Exceptions> securesystemslib.exceptions.FormatError, if the arguments are improperly formatted. securesystemslib.exceptions.CryptoError, if 'filepath' cannot be encrypted. <Side Effects> Writes key files to '<filepath>' and '<filepath>.pub'. <Returns> The 'filepath' of the written key. """ # Generate a new Ed25519 key object. ed25519_key = securesystemslib.keys.generate_ed25519_key() if not filepath: filepath = os.path.join(os.getcwd(), ed25519_key['keyid']) else: logger.debug('The filepath has been specified. Not using the key\'s' ' KEYID as the default filepath.') # Does 'filepath' have the correct format? # Ensure the arguments have the appropriate number of objects and object # types, and that all dict keys are properly named. # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. securesystemslib.formats.PATH_SCHEMA.check_match(filepath) # If the caller does not provide a password argument, prompt for one. if password is None: # pragma: no cover # It is safe to specify the full path of 'filepath' in the prompt and not # worry about leaking sensitive information about the key's location. # However, care should be taken when including the full path in exceptions # and log files. password = get_password('Enter a password for the Ed25519' ' key (' + Fore.RED + filepath + Fore.RESET + '): ', confirm=True) else: logger.debug('The password has been specified. Not prompting for one.') # Does 'password' have the correct format? securesystemslib.formats.PASSWORD_SCHEMA.check_match(password) # If the parent directory of filepath does not exist, # create it (and all its parent directories, if necessary). securesystemslib.util.ensure_parent_dir(filepath) # Create a temporary file, write the contents of the public key, and move # to final destination. file_object = securesystemslib.util.TempFile() # Generate the ed25519 public key file contents in metadata format (i.e., # does not include the keyid portion). keytype = ed25519_key['keytype'] keyval = ed25519_key['keyval'] scheme = ed25519_key['scheme'] ed25519key_metadata_format = securesystemslib.keys.format_keyval_to_metadata( keytype, scheme, keyval, private=False) file_object.write(json.dumps(ed25519key_metadata_format).encode('utf-8')) # Write the public key (i.e., 'public', which is in PEM format) to # '<filepath>.pub'. (1) Create a temporary file, (2) write the contents of # the public key, and (3) move to final destination. # The temporary file is closed after the final move. file_object.move(filepath + '.pub') # Write the encrypted key string, conformant to # 'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA', to '<filepath>'. file_object = securesystemslib.util.TempFile() # Encrypt the private key if 'password' is set. if len(password): ed25519_key = securesystemslib.keys.encrypt_key(ed25519_key, password) else: logger.debug('An empty password was given. ' 'Not encrypting the private key.') ed25519_key = json.dumps(ed25519_key) # Raise 'securesystemslib.exceptions.CryptoError' if 'ed25519_key' cannot be # encrypted. file_object.write(ed25519_key.encode('utf-8')) file_object.move(filepath) return filepath
[ "def", "generate_and_write_ed25519_keypair", "(", "filepath", "=", "None", ",", "password", "=", "None", ")", ":", "# Generate a new Ed25519 key object.", "ed25519_key", "=", "securesystemslib", ".", "keys", ".", "generate_ed25519_key", "(", ")", "if", "not", "filepath", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "ed25519_key", "[", "'keyid'", "]", ")", "else", ":", "logger", ".", "debug", "(", "'The filepath has been specified. Not using the key\\'s'", "' KEYID as the default filepath.'", ")", "# Does 'filepath' have the correct format?", "# Ensure the arguments have the appropriate number of objects and object", "# types, and that all dict keys are properly named.", "# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.", "securesystemslib", ".", "formats", ".", "PATH_SCHEMA", ".", "check_match", "(", "filepath", ")", "# If the caller does not provide a password argument, prompt for one.", "if", "password", "is", "None", ":", "# pragma: no cover", "# It is safe to specify the full path of 'filepath' in the prompt and not", "# worry about leaking sensitive information about the key's location.", "# However, care should be taken when including the full path in exceptions", "# and log files.", "password", "=", "get_password", "(", "'Enter a password for the Ed25519'", "' key ('", "+", "Fore", ".", "RED", "+", "filepath", "+", "Fore", ".", "RESET", "+", "'): '", ",", "confirm", "=", "True", ")", "else", ":", "logger", ".", "debug", "(", "'The password has been specified. Not prompting for one.'", ")", "# Does 'password' have the correct format?", "securesystemslib", ".", "formats", ".", "PASSWORD_SCHEMA", ".", "check_match", "(", "password", ")", "# If the parent directory of filepath does not exist,", "# create it (and all its parent directories, if necessary).", "securesystemslib", ".", "util", ".", "ensure_parent_dir", "(", "filepath", ")", "# Create a temporary file, write the contents of the public key, and move", "# to final destination.", "file_object", "=", "securesystemslib", ".", "util", ".", "TempFile", "(", ")", "# Generate the ed25519 public key file contents in metadata format (i.e.,", "# does not include the keyid portion).", "keytype", "=", "ed25519_key", "[", "'keytype'", "]", "keyval", "=", "ed25519_key", "[", "'keyval'", "]", "scheme", "=", "ed25519_key", "[", "'scheme'", "]", "ed25519key_metadata_format", "=", "securesystemslib", ".", "keys", ".", "format_keyval_to_metadata", "(", "keytype", ",", "scheme", ",", "keyval", ",", "private", "=", "False", ")", "file_object", ".", "write", "(", "json", ".", "dumps", "(", "ed25519key_metadata_format", ")", ".", "encode", "(", "'utf-8'", ")", ")", "# Write the public key (i.e., 'public', which is in PEM format) to", "# '<filepath>.pub'. (1) Create a temporary file, (2) write the contents of", "# the public key, and (3) move to final destination.", "# The temporary file is closed after the final move.", "file_object", ".", "move", "(", "filepath", "+", "'.pub'", ")", "# Write the encrypted key string, conformant to", "# 'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA', to '<filepath>'.", "file_object", "=", "securesystemslib", ".", "util", ".", "TempFile", "(", ")", "# Encrypt the private key if 'password' is set.", "if", "len", "(", "password", ")", ":", "ed25519_key", "=", "securesystemslib", ".", "keys", ".", "encrypt_key", "(", "ed25519_key", ",", "password", ")", "else", ":", "logger", ".", "debug", "(", "'An empty password was given. '", "'Not encrypting the private key.'", ")", "ed25519_key", "=", "json", ".", "dumps", "(", "ed25519_key", ")", "# Raise 'securesystemslib.exceptions.CryptoError' if 'ed25519_key' cannot be", "# encrypted.", "file_object", ".", "write", "(", "ed25519_key", ".", "encode", "(", "'utf-8'", ")", ")", "file_object", ".", "move", "(", "filepath", ")", "return", "filepath" ]
38.443478
25.852174
def BC_selector_and_coeff_matrix_creator(self): """ Selects the boundary conditions E-W is for inside each panel N-S is for the block diagonal matrix ("with fringes") Then calls the function to build the diagonal matrix The current method of coefficient matrix construction utilizes longer-range symmetry in the coefficient matrix to build it block-wise, as opposed to the much less computationally efficient row-by-row ("serial") method that was previously employed. The method is spread across the subroutines here. Important to this is the use of np.roll() to properly offset the diagonals that end up in the main matrix: spdiags() will put each vector on the proper diagonal, but will align them such that their first cell is along the first column, instead of using a 45 degrees to matrix corner baseline that would stagger them appropriately for this solution method. Therefore, np.roll() effectively does this staggering by having the appropriate cell in the vector start at the first column. """ # Zeroth, start the timer and print the boundary conditions to the screen self.coeff_start_time = time.time() if self.Verbose: print("Boundary condition, West:", self.BC_W, type(self.BC_W)) print("Boundary condition, East:", self.BC_E, type(self.BC_E)) print("Boundary condition, North:", self.BC_N, type(self.BC_N)) print("Boundary condition, South:", self.BC_S, type(self.BC_S)) # First, set flexural rigidity boundary conditions to flesh out this padded # array self.BC_Rigidity() # Second, build the coefficient arrays -- with the rigidity b.c.'s self.get_coeff_values() # Third, apply boundary conditions to the coeff_arrays to create the # flexural solution self.BC_Flexure() # Fourth, construct the sparse diagonal array self.build_diagonals() # Finally, compute the total time this process took self.coeff_creation_time = time.time() - self.coeff_start_time if self.Quiet == False: print("Time to construct coefficient (operator) array [s]:", self.coeff_creation_time)
[ "def", "BC_selector_and_coeff_matrix_creator", "(", "self", ")", ":", "# Zeroth, start the timer and print the boundary conditions to the screen", "self", ".", "coeff_start_time", "=", "time", ".", "time", "(", ")", "if", "self", ".", "Verbose", ":", "print", "(", "\"Boundary condition, West:\"", ",", "self", ".", "BC_W", ",", "type", "(", "self", ".", "BC_W", ")", ")", "print", "(", "\"Boundary condition, East:\"", ",", "self", ".", "BC_E", ",", "type", "(", "self", ".", "BC_E", ")", ")", "print", "(", "\"Boundary condition, North:\"", ",", "self", ".", "BC_N", ",", "type", "(", "self", ".", "BC_N", ")", ")", "print", "(", "\"Boundary condition, South:\"", ",", "self", ".", "BC_S", ",", "type", "(", "self", ".", "BC_S", ")", ")", "# First, set flexural rigidity boundary conditions to flesh out this padded", "# array", "self", ".", "BC_Rigidity", "(", ")", "# Second, build the coefficient arrays -- with the rigidity b.c.'s", "self", ".", "get_coeff_values", "(", ")", "# Third, apply boundary conditions to the coeff_arrays to create the ", "# flexural solution", "self", ".", "BC_Flexure", "(", ")", "# Fourth, construct the sparse diagonal array", "self", ".", "build_diagonals", "(", ")", "# Finally, compute the total time this process took ", "self", ".", "coeff_creation_time", "=", "time", ".", "time", "(", ")", "-", "self", ".", "coeff_start_time", "if", "self", ".", "Quiet", "==", "False", ":", "print", "(", "\"Time to construct coefficient (operator) array [s]:\"", ",", "self", ".", "coeff_creation_time", ")" ]
43.77551
25.367347
def setup_parameters(self): """Add any CloudFormation parameters to the template""" t = self.template parameters = self.get_parameter_definitions() if not parameters: logger.debug("No parameters defined.") return for name, attrs in parameters.items(): p = build_parameter(name, attrs) t.add_parameter(p)
[ "def", "setup_parameters", "(", "self", ")", ":", "t", "=", "self", ".", "template", "parameters", "=", "self", ".", "get_parameter_definitions", "(", ")", "if", "not", "parameters", ":", "logger", ".", "debug", "(", "\"No parameters defined.\"", ")", "return", "for", "name", ",", "attrs", "in", "parameters", ".", "items", "(", ")", ":", "p", "=", "build_parameter", "(", "name", ",", "attrs", ")", "t", ".", "add_parameter", "(", "p", ")" ]
31.833333
15.583333
def line(self, sentences, line_number=None): """ Return the bytes for a basic line. If no line number is given, current one + 10 will be used Sentences if a list of sentences """ if line_number is None: line_number = self.current_line + 10 self.current_line = line_number sep = [] result = [] for sentence in sentences: result.extend(sep) result.extend(self.sentence_bytes(sentence)) sep = [ord(':')] result.extend([ENTER]) result = self.line_number(line_number) + self.numberLH(len(result)) + result return result
[ "def", "line", "(", "self", ",", "sentences", ",", "line_number", "=", "None", ")", ":", "if", "line_number", "is", "None", ":", "line_number", "=", "self", ".", "current_line", "+", "10", "self", ".", "current_line", "=", "line_number", "sep", "=", "[", "]", "result", "=", "[", "]", "for", "sentence", "in", "sentences", ":", "result", ".", "extend", "(", "sep", ")", "result", ".", "extend", "(", "self", ".", "sentence_bytes", "(", "sentence", ")", ")", "sep", "=", "[", "ord", "(", "':'", ")", "]", "result", ".", "extend", "(", "[", "ENTER", "]", ")", "result", "=", "self", ".", "line_number", "(", "line_number", ")", "+", "self", ".", "numberLH", "(", "len", "(", "result", ")", ")", "+", "result", "return", "result" ]
32.1
16.45
def _wrap_chunks(self, chunks): """_wrap_chunks(chunks : [string]) -> [string] Wrap a sequence of text chunks and return a list of lines of length 'self.width' or less. (If 'break_long_words' is false, some lines may be longer than this.) Chunks correspond roughly to words and the whitespace between them: each chunk is indivisible (modulo 'break_long_words'), but a line break can come between any two chunks. Chunks should not have internal whitespace; ie. a chunk is either all whitespace or a "word". Whitespace chunks will be removed from the beginning and end of lines, but apart from that whitespace is preserved. """ lines = [] if self.width <= 0: raise ValueError("invalid width %r (must be > 0)" % self.width) # Arrange in reverse order so items can be efficiently popped # from a stack of chucks. chunks.reverse() while chunks: # Start the list of chunks that will make up the current line. # cur_len is just the length of all the chunks in cur_line. cur_line = [] cur_len = 0 # Figure out which static string will prefix this line. if lines: indent = self.subsequent_indent else: indent = self.initial_indent # Maximum width for this line. width = self.width - len(indent) # First chunk on line is whitespace -- drop it, unless this # is the very beginning of the text (ie. no lines started yet). if self.drop_whitespace and chunks[-1].strip() == '' and lines: # del chunks[-1] chunks.pop() while chunks: l = len(chunks[-1]) # Can at least squeeze this chunk onto the current line. if cur_len + l <= width: cur_line.append(chunks.pop()) cur_len += l # Nope, this line is full. else: break # The current line is full, and the next chunk is too big to # fit on *any* line (not just this one). if chunks and len(chunks[-1]) > width: self._handle_long_word(chunks, cur_line, cur_len, width) # If the last chunk on this line is all whitespace, drop it. if self.drop_whitespace and cur_line and cur_line[-1].strip() == '': # del cur_line[-1] cur_line.pop() # Convert current line back to a string and store it in list # of all lines (return value). if cur_line: lines.append(indent + ''.join(cur_line)) return lines
[ "def", "_wrap_chunks", "(", "self", ",", "chunks", ")", ":", "lines", "=", "[", "]", "if", "self", ".", "width", "<=", "0", ":", "raise", "ValueError", "(", "\"invalid width %r (must be > 0)\"", "%", "self", ".", "width", ")", "# Arrange in reverse order so items can be efficiently popped", "# from a stack of chucks.", "chunks", ".", "reverse", "(", ")", "while", "chunks", ":", "# Start the list of chunks that will make up the current line.", "# cur_len is just the length of all the chunks in cur_line.", "cur_line", "=", "[", "]", "cur_len", "=", "0", "# Figure out which static string will prefix this line.", "if", "lines", ":", "indent", "=", "self", ".", "subsequent_indent", "else", ":", "indent", "=", "self", ".", "initial_indent", "# Maximum width for this line.", "width", "=", "self", ".", "width", "-", "len", "(", "indent", ")", "# First chunk on line is whitespace -- drop it, unless this", "# is the very beginning of the text (ie. no lines started yet).", "if", "self", ".", "drop_whitespace", "and", "chunks", "[", "-", "1", "]", ".", "strip", "(", ")", "==", "''", "and", "lines", ":", "# del chunks[-1]", "chunks", ".", "pop", "(", ")", "while", "chunks", ":", "l", "=", "len", "(", "chunks", "[", "-", "1", "]", ")", "# Can at least squeeze this chunk onto the current line.", "if", "cur_len", "+", "l", "<=", "width", ":", "cur_line", ".", "append", "(", "chunks", ".", "pop", "(", ")", ")", "cur_len", "+=", "l", "# Nope, this line is full.", "else", ":", "break", "# The current line is full, and the next chunk is too big to", "# fit on *any* line (not just this one).", "if", "chunks", "and", "len", "(", "chunks", "[", "-", "1", "]", ")", ">", "width", ":", "self", ".", "_handle_long_word", "(", "chunks", ",", "cur_line", ",", "cur_len", ",", "width", ")", "# If the last chunk on this line is all whitespace, drop it.", "if", "self", ".", "drop_whitespace", "and", "cur_line", "and", "cur_line", "[", "-", "1", "]", ".", "strip", "(", ")", "==", "''", ":", "# del cur_line[-1]", "cur_line", ".", "pop", "(", ")", "# Convert current line back to a string and store it in list", "# of all lines (return value).", "if", "cur_line", ":", "lines", ".", "append", "(", "indent", "+", "''", ".", "join", "(", "cur_line", ")", ")", "return", "lines" ]
38.661972
22.929577
def param_names(scipy_dist): """Get names of fit parameters from a ``scipy.rv_*`` distribution.""" if not isinstance(scipy_dist, rv_continuous): raise TypeError names = ['loc', 'scale'] if scipy_dist.shapes is not None: names += scipy_dist.shapes.split() return names
[ "def", "param_names", "(", "scipy_dist", ")", ":", "if", "not", "isinstance", "(", "scipy_dist", ",", "rv_continuous", ")", ":", "raise", "TypeError", "names", "=", "[", "'loc'", ",", "'scale'", "]", "if", "scipy_dist", ".", "shapes", "is", "not", "None", ":", "names", "+=", "scipy_dist", ".", "shapes", ".", "split", "(", ")", "return", "names" ]
37
9.875
def download_and_expand(self): """Download and expand RPM Python binding.""" top_dir_name = None if self.git_branch: # Download a source by git clone. top_dir_name = self._download_and_expand_by_git() else: # Download a source from the arcihve URL. # Downloading the compressed archive is better than "git clone", # because it is faster. # If download failed due to URL not found, try "git clone". try: top_dir_name = self._download_and_expand_from_archive_url() except RemoteFileNotFoundError: Log.info('Try to download by git clone.') top_dir_name = self._download_and_expand_by_git() return top_dir_name
[ "def", "download_and_expand", "(", "self", ")", ":", "top_dir_name", "=", "None", "if", "self", ".", "git_branch", ":", "# Download a source by git clone.", "top_dir_name", "=", "self", ".", "_download_and_expand_by_git", "(", ")", "else", ":", "# Download a source from the arcihve URL.", "# Downloading the compressed archive is better than \"git clone\",", "# because it is faster.", "# If download failed due to URL not found, try \"git clone\".", "try", ":", "top_dir_name", "=", "self", ".", "_download_and_expand_from_archive_url", "(", ")", "except", "RemoteFileNotFoundError", ":", "Log", ".", "info", "(", "'Try to download by git clone.'", ")", "top_dir_name", "=", "self", ".", "_download_and_expand_by_git", "(", ")", "return", "top_dir_name" ]
45.529412
17.117647
def init_api(self, instance_config, custom_tags): """ Guarantees a valid auth scope for this instance, and returns it Communicates with the identity server and initializes a new scope when one is absent, or has been forcibly removed due to token expiry """ custom_tags = custom_tags or [] keystone_server_url = instance_config.get("keystone_server_url") proxy_config = self.get_instance_proxy(instance_config, keystone_server_url) if self._api is None: # We are missing the entire instance scope either because it is the first time we initialize it or because # authentication previously failed and got removed from the cache # Let's populate it now try: self.log.debug("Fetch scope for instance {}".format(self.instance_name)) # Set keystone api with proper token self._api = ApiFactory.create(self.log, proxy_config, instance_config) self.service_check( self.IDENTITY_API_SC, AgentCheck.OK, tags=["keystone_server: {}".format(keystone_server_url)] + custom_tags, ) except KeystoneUnreachable as e: self.warning( "The agent could not contact the specified identity server at {} . " "Are you sure it is up at that address?".format(keystone_server_url) ) self.log.debug("Problem grabbing auth token: %s", e) self.service_check( self.IDENTITY_API_SC, AgentCheck.CRITICAL, tags=["keystone_server: {}".format(keystone_server_url)] + custom_tags, ) # If Keystone is down/unreachable, we default the # Nova and Neutron APIs to UNKNOWN since we cannot access the service catalog self.service_check( self.NETWORK_API_SC, AgentCheck.UNKNOWN, tags=["keystone_server: {}".format(keystone_server_url)] + custom_tags, ) self.service_check( self.COMPUTE_API_SC, AgentCheck.UNKNOWN, tags=["keystone_server: {}".format(keystone_server_url)] + custom_tags, ) except MissingNovaEndpoint as e: self.warning("The agent could not find a compatible Nova endpoint in your service catalog!") self.log.debug("Failed to get nova endpoint for response catalog: %s", e) self.service_check( self.COMPUTE_API_SC, AgentCheck.CRITICAL, tags=["keystone_server: {}".format(keystone_server_url)] + custom_tags, ) except MissingNeutronEndpoint: self.warning("The agent could not find a compatible Neutron endpoint in your service catalog!") self.service_check( self.NETWORK_API_SC, AgentCheck.CRITICAL, tags=["keystone_server: {}".format(keystone_server_url)] + custom_tags, ) if self._api is None: # Fast fail in the absence of an api raise IncompleteConfig()
[ "def", "init_api", "(", "self", ",", "instance_config", ",", "custom_tags", ")", ":", "custom_tags", "=", "custom_tags", "or", "[", "]", "keystone_server_url", "=", "instance_config", ".", "get", "(", "\"keystone_server_url\"", ")", "proxy_config", "=", "self", ".", "get_instance_proxy", "(", "instance_config", ",", "keystone_server_url", ")", "if", "self", ".", "_api", "is", "None", ":", "# We are missing the entire instance scope either because it is the first time we initialize it or because", "# authentication previously failed and got removed from the cache", "# Let's populate it now", "try", ":", "self", ".", "log", ".", "debug", "(", "\"Fetch scope for instance {}\"", ".", "format", "(", "self", ".", "instance_name", ")", ")", "# Set keystone api with proper token", "self", ".", "_api", "=", "ApiFactory", ".", "create", "(", "self", ".", "log", ",", "proxy_config", ",", "instance_config", ")", "self", ".", "service_check", "(", "self", ".", "IDENTITY_API_SC", ",", "AgentCheck", ".", "OK", ",", "tags", "=", "[", "\"keystone_server: {}\"", ".", "format", "(", "keystone_server_url", ")", "]", "+", "custom_tags", ",", ")", "except", "KeystoneUnreachable", "as", "e", ":", "self", ".", "warning", "(", "\"The agent could not contact the specified identity server at {} . \"", "\"Are you sure it is up at that address?\"", ".", "format", "(", "keystone_server_url", ")", ")", "self", ".", "log", ".", "debug", "(", "\"Problem grabbing auth token: %s\"", ",", "e", ")", "self", ".", "service_check", "(", "self", ".", "IDENTITY_API_SC", ",", "AgentCheck", ".", "CRITICAL", ",", "tags", "=", "[", "\"keystone_server: {}\"", ".", "format", "(", "keystone_server_url", ")", "]", "+", "custom_tags", ",", ")", "# If Keystone is down/unreachable, we default the", "# Nova and Neutron APIs to UNKNOWN since we cannot access the service catalog", "self", ".", "service_check", "(", "self", ".", "NETWORK_API_SC", ",", "AgentCheck", ".", "UNKNOWN", ",", "tags", "=", "[", "\"keystone_server: {}\"", ".", "format", "(", "keystone_server_url", ")", "]", "+", "custom_tags", ",", ")", "self", ".", "service_check", "(", "self", ".", "COMPUTE_API_SC", ",", "AgentCheck", ".", "UNKNOWN", ",", "tags", "=", "[", "\"keystone_server: {}\"", ".", "format", "(", "keystone_server_url", ")", "]", "+", "custom_tags", ",", ")", "except", "MissingNovaEndpoint", "as", "e", ":", "self", ".", "warning", "(", "\"The agent could not find a compatible Nova endpoint in your service catalog!\"", ")", "self", ".", "log", ".", "debug", "(", "\"Failed to get nova endpoint for response catalog: %s\"", ",", "e", ")", "self", ".", "service_check", "(", "self", ".", "COMPUTE_API_SC", ",", "AgentCheck", ".", "CRITICAL", ",", "tags", "=", "[", "\"keystone_server: {}\"", ".", "format", "(", "keystone_server_url", ")", "]", "+", "custom_tags", ",", ")", "except", "MissingNeutronEndpoint", ":", "self", ".", "warning", "(", "\"The agent could not find a compatible Neutron endpoint in your service catalog!\"", ")", "self", ".", "service_check", "(", "self", ".", "NETWORK_API_SC", ",", "AgentCheck", ".", "CRITICAL", ",", "tags", "=", "[", "\"keystone_server: {}\"", ".", "format", "(", "keystone_server_url", ")", "]", "+", "custom_tags", ",", ")", "if", "self", ".", "_api", "is", "None", ":", "# Fast fail in the absence of an api", "raise", "IncompleteConfig", "(", ")" ]
48.086957
23.73913
def noise_from_psd(length, delta_t, psd, seed=None): """ Create noise with a given psd. Return noise with a given psd. Note that if unique noise is desired a unique seed should be provided. Parameters ---------- length : int The length of noise to generate in samples. delta_t : float The time step of the noise. psd : FrequencySeries The noise weighting to color the noise. seed : {0, int} The seed to generate the noise. Returns -------- noise : TimeSeries A TimeSeries containing gaussian noise colored by the given psd. """ noise_ts = TimeSeries(zeros(length), delta_t=delta_t) if seed is None: seed = numpy.random.randint(2**32) randomness = lal.gsl_rng("ranlux", seed) N = int (1.0 / delta_t / psd.delta_f) n = N//2+1 stride = N//2 if n > len(psd): raise ValueError("PSD not compatible with requested delta_t") psd = (psd[0:n]).lal() psd.data.data[n-1] = 0 segment = TimeSeries(zeros(N), delta_t=delta_t).lal() length_generated = 0 SimNoise(segment, 0, psd, randomness) while (length_generated < length): if (length_generated + stride) < length: noise_ts.data[length_generated:length_generated+stride] = segment.data.data[0:stride] else: noise_ts.data[length_generated:length] = segment.data.data[0:length-length_generated] length_generated += stride SimNoise(segment, stride, psd, randomness) return noise_ts
[ "def", "noise_from_psd", "(", "length", ",", "delta_t", ",", "psd", ",", "seed", "=", "None", ")", ":", "noise_ts", "=", "TimeSeries", "(", "zeros", "(", "length", ")", ",", "delta_t", "=", "delta_t", ")", "if", "seed", "is", "None", ":", "seed", "=", "numpy", ".", "random", ".", "randint", "(", "2", "**", "32", ")", "randomness", "=", "lal", ".", "gsl_rng", "(", "\"ranlux\"", ",", "seed", ")", "N", "=", "int", "(", "1.0", "/", "delta_t", "/", "psd", ".", "delta_f", ")", "n", "=", "N", "//", "2", "+", "1", "stride", "=", "N", "//", "2", "if", "n", ">", "len", "(", "psd", ")", ":", "raise", "ValueError", "(", "\"PSD not compatible with requested delta_t\"", ")", "psd", "=", "(", "psd", "[", "0", ":", "n", "]", ")", ".", "lal", "(", ")", "psd", ".", "data", ".", "data", "[", "n", "-", "1", "]", "=", "0", "segment", "=", "TimeSeries", "(", "zeros", "(", "N", ")", ",", "delta_t", "=", "delta_t", ")", ".", "lal", "(", ")", "length_generated", "=", "0", "SimNoise", "(", "segment", ",", "0", ",", "psd", ",", "randomness", ")", "while", "(", "length_generated", "<", "length", ")", ":", "if", "(", "length_generated", "+", "stride", ")", "<", "length", ":", "noise_ts", ".", "data", "[", "length_generated", ":", "length_generated", "+", "stride", "]", "=", "segment", ".", "data", ".", "data", "[", "0", ":", "stride", "]", "else", ":", "noise_ts", ".", "data", "[", "length_generated", ":", "length", "]", "=", "segment", ".", "data", ".", "data", "[", "0", ":", "length", "-", "length_generated", "]", "length_generated", "+=", "stride", "SimNoise", "(", "segment", ",", "stride", ",", "psd", ",", "randomness", ")", "return", "noise_ts" ]
28.207547
22.301887
def stop_NoteContainer(self, notecontainer): """Add note_off events for each note in the NoteContainer to the track_data.""" # if there is more than one note in the container, the deltatime should # be set back to zero after the first one has been stopped if len(notecontainer) <= 1: [self.stop_Note(x) for x in notecontainer] else: self.stop_Note(notecontainer[0]) self.set_deltatime(0) [self.stop_Note(x) for x in notecontainer[1:]]
[ "def", "stop_NoteContainer", "(", "self", ",", "notecontainer", ")", ":", "# if there is more than one note in the container, the deltatime should", "# be set back to zero after the first one has been stopped", "if", "len", "(", "notecontainer", ")", "<=", "1", ":", "[", "self", ".", "stop_Note", "(", "x", ")", "for", "x", "in", "notecontainer", "]", "else", ":", "self", ".", "stop_Note", "(", "notecontainer", "[", "0", "]", ")", "self", ".", "set_deltatime", "(", "0", ")", "[", "self", ".", "stop_Note", "(", "x", ")", "for", "x", "in", "notecontainer", "[", "1", ":", "]", "]" ]
47.272727
13.090909
def visit_Alt(self, node: parsing.Alt) -> [ast.stmt]: """Generates python code for alternatives. try: try: <code for clause> #raise AltFalse when alternative is False raise AltTrue() except AltFalse: pass return False except AltTrue: pass """ clauses = [self.visit(clause) for clause in node.ptlist] for clause in clauses: if not isinstance(clause, ast.expr): break else: return ast.BoolOp(ast.Or(), clauses) res = ast.Try([], [ast.ExceptHandler( ast.Name('AltTrue', ast.Load()), None, [ast.Pass()])], [], []) alt_true = [ast.Raise(ast.Call( ast.Name('AltTrue', ast.Load()), [], [], None, None), None)] alt_false = [ast.ExceptHandler( ast.Name('AltFalse', ast.Load()), None, [ast.Pass()])] self.in_try += 1 for clause in node.ptlist: res.body.append( ast.Try(self._clause(self.visit(clause)) + alt_true, alt_false, [], [])) self.in_try -= 1 res.body.append(self.__exit_scope()) return [res]
[ "def", "visit_Alt", "(", "self", ",", "node", ":", "parsing", ".", "Alt", ")", "->", "[", "ast", ".", "stmt", "]", ":", "clauses", "=", "[", "self", ".", "visit", "(", "clause", ")", "for", "clause", "in", "node", ".", "ptlist", "]", "for", "clause", "in", "clauses", ":", "if", "not", "isinstance", "(", "clause", ",", "ast", ".", "expr", ")", ":", "break", "else", ":", "return", "ast", ".", "BoolOp", "(", "ast", ".", "Or", "(", ")", ",", "clauses", ")", "res", "=", "ast", ".", "Try", "(", "[", "]", ",", "[", "ast", ".", "ExceptHandler", "(", "ast", ".", "Name", "(", "'AltTrue'", ",", "ast", ".", "Load", "(", ")", ")", ",", "None", ",", "[", "ast", ".", "Pass", "(", ")", "]", ")", "]", ",", "[", "]", ",", "[", "]", ")", "alt_true", "=", "[", "ast", ".", "Raise", "(", "ast", ".", "Call", "(", "ast", ".", "Name", "(", "'AltTrue'", ",", "ast", ".", "Load", "(", ")", ")", ",", "[", "]", ",", "[", "]", ",", "None", ",", "None", ")", ",", "None", ")", "]", "alt_false", "=", "[", "ast", ".", "ExceptHandler", "(", "ast", ".", "Name", "(", "'AltFalse'", ",", "ast", ".", "Load", "(", ")", ")", ",", "None", ",", "[", "ast", ".", "Pass", "(", ")", "]", ")", "]", "self", ".", "in_try", "+=", "1", "for", "clause", "in", "node", ".", "ptlist", ":", "res", ".", "body", ".", "append", "(", "ast", ".", "Try", "(", "self", ".", "_clause", "(", "self", ".", "visit", "(", "clause", ")", ")", "+", "alt_true", ",", "alt_false", ",", "[", "]", ",", "[", "]", ")", ")", "self", ".", "in_try", "-=", "1", "res", ".", "body", ".", "append", "(", "self", ".", "__exit_scope", "(", ")", ")", "return", "[", "res", "]" ]
36.484848
16.333333
def _add_potential(self, potential, parent_tag): """ Adds Potentials to the ProbModelXML. Parameters ---------- potential: dict Dictionary containing Potential data. For example: {'role': 'Utility', 'Variables': ['D0', 'D1', 'C0', 'C1'], 'type': 'Tree/ADD', 'UtilityVaribale': 'U1'} parent_tag: etree Element etree element which would contain potential tag For example: <Element Potentials at 0x7f315fc44b08> <Element Branch at 0x7f315fc44c88> <Element Branch at 0x7f315fc44d88> <Element Subpotentials at 0x7f315fc44e48> Examples -------- >>> writer = ProbModelXMLWriter(model) >>> writer._add_potential(potential, parent_tag) """ potential_type = potential['type'] try: potential_tag = etree.SubElement(parent_tag, 'Potential', attrib={ 'type': potential['type'], 'role': potential['role']}) except KeyError: potential_tag = etree.SubElement(parent_tag, 'Potential', attrib={ 'type': potential['type']}) self._add_element(potential, 'Comment', potential_tag) if 'AdditionalProperties' in potential: self._add_additional_properties(potential_tag, potential['AdditionalProperties']) if potential_type == "delta": etree.SubElement(potential_tag, 'Variable', attrib={'name': potential['Variable']}) self._add_element(potential, 'State', potential_tag) self._add_element(potential, 'StateIndex', potential_tag) self._add_element(potential, 'NumericValue', potential_tag) else: if 'UtilityVariable' in potential: etree.SubElement(potential_tag, 'UtilityVariable', attrib={ 'name': potential['UtilityVariable']}) if 'Variables' in potential: variable_tag = etree.SubElement(potential_tag, 'Variables') for var in sorted(potential['Variables']): etree.SubElement(variable_tag, 'Variable', attrib={'name': var}) for child in sorted(potential['Variables'][var]): etree.SubElement(variable_tag, 'Variable', attrib={'name': child}) self._add_element(potential, 'Values', potential_tag) if 'UncertainValues' in potential: value_tag = etree.SubElement(potential_tag, 'UncertainValues', attrib={}) for value in sorted(potential['UncertainValues']): try: etree.SubElement(value_tag, 'Value', attrib={ 'distribution': value['distribution'], 'name': value['name']}).text = value['value'] except KeyError: etree.SubElement(value_tag, 'Value', attrib={ 'distribution': value['distribution']}).text = value['value'] if 'TopVariable' in potential: etree.SubElement(potential_tag, 'TopVariable', attrib={'name': potential['TopVariable']}) if 'Branches' in potential: branches_tag = etree.SubElement(potential_tag, 'Branches') for branch in potential['Branches']: branch_tag = etree.SubElement(branches_tag, 'Branch') if 'States' in branch: states_tag = etree.SubElement(branch_tag, 'States') for state in sorted(branch['States']): etree.SubElement(states_tag, 'State', attrib={'name': state['name']}) if 'Potential' in branch: self._add_potential(branch['Potential'], branch_tag) self._add_element(potential, 'Label', potential_tag) self._add_element(potential, 'Reference', potential_tag) if 'Thresholds' in branch: thresholds_tag = etree.SubElement(branch_tag, 'Thresholds') for threshold in branch['Thresholds']: try: etree.SubElement(thresholds_tag, 'Threshold', attrib={ 'value': threshold['value'], 'belongsTo': threshold['belongsTo']}) except KeyError: etree.SubElement(thresholds_tag, 'Threshold', attrib={ 'value': threshold['value']}) self._add_element(potential, 'Model', potential_tag) self._add_element(potential, 'Coefficients', potential_tag) self._add_element(potential, 'CovarianceMatrix', potential_tag) if 'Subpotentials' in potential: subpotentials = etree.SubElement(potential_tag, 'Subpotentials') for subpotential in potential['Subpotentials']: self._add_potential(subpotential, subpotentials) if 'Potential' in potential: self._add_potential(potential['Potential'], potential_tag) if 'NumericVariables' in potential: numvar_tag = etree.SubElement(potential_tag, 'NumericVariables') for var in sorted(potential['NumericVariables']): etree.SubElement(numvar_tag, 'Variable', attrib={'name': var})
[ "def", "_add_potential", "(", "self", ",", "potential", ",", "parent_tag", ")", ":", "potential_type", "=", "potential", "[", "'type'", "]", "try", ":", "potential_tag", "=", "etree", ".", "SubElement", "(", "parent_tag", ",", "'Potential'", ",", "attrib", "=", "{", "'type'", ":", "potential", "[", "'type'", "]", ",", "'role'", ":", "potential", "[", "'role'", "]", "}", ")", "except", "KeyError", ":", "potential_tag", "=", "etree", ".", "SubElement", "(", "parent_tag", ",", "'Potential'", ",", "attrib", "=", "{", "'type'", ":", "potential", "[", "'type'", "]", "}", ")", "self", ".", "_add_element", "(", "potential", ",", "'Comment'", ",", "potential_tag", ")", "if", "'AdditionalProperties'", "in", "potential", ":", "self", ".", "_add_additional_properties", "(", "potential_tag", ",", "potential", "[", "'AdditionalProperties'", "]", ")", "if", "potential_type", "==", "\"delta\"", ":", "etree", ".", "SubElement", "(", "potential_tag", ",", "'Variable'", ",", "attrib", "=", "{", "'name'", ":", "potential", "[", "'Variable'", "]", "}", ")", "self", ".", "_add_element", "(", "potential", ",", "'State'", ",", "potential_tag", ")", "self", ".", "_add_element", "(", "potential", ",", "'StateIndex'", ",", "potential_tag", ")", "self", ".", "_add_element", "(", "potential", ",", "'NumericValue'", ",", "potential_tag", ")", "else", ":", "if", "'UtilityVariable'", "in", "potential", ":", "etree", ".", "SubElement", "(", "potential_tag", ",", "'UtilityVariable'", ",", "attrib", "=", "{", "'name'", ":", "potential", "[", "'UtilityVariable'", "]", "}", ")", "if", "'Variables'", "in", "potential", ":", "variable_tag", "=", "etree", ".", "SubElement", "(", "potential_tag", ",", "'Variables'", ")", "for", "var", "in", "sorted", "(", "potential", "[", "'Variables'", "]", ")", ":", "etree", ".", "SubElement", "(", "variable_tag", ",", "'Variable'", ",", "attrib", "=", "{", "'name'", ":", "var", "}", ")", "for", "child", "in", "sorted", "(", "potential", "[", "'Variables'", "]", "[", "var", "]", ")", ":", "etree", ".", "SubElement", "(", "variable_tag", ",", "'Variable'", ",", "attrib", "=", "{", "'name'", ":", "child", "}", ")", "self", ".", "_add_element", "(", "potential", ",", "'Values'", ",", "potential_tag", ")", "if", "'UncertainValues'", "in", "potential", ":", "value_tag", "=", "etree", ".", "SubElement", "(", "potential_tag", ",", "'UncertainValues'", ",", "attrib", "=", "{", "}", ")", "for", "value", "in", "sorted", "(", "potential", "[", "'UncertainValues'", "]", ")", ":", "try", ":", "etree", ".", "SubElement", "(", "value_tag", ",", "'Value'", ",", "attrib", "=", "{", "'distribution'", ":", "value", "[", "'distribution'", "]", ",", "'name'", ":", "value", "[", "'name'", "]", "}", ")", ".", "text", "=", "value", "[", "'value'", "]", "except", "KeyError", ":", "etree", ".", "SubElement", "(", "value_tag", ",", "'Value'", ",", "attrib", "=", "{", "'distribution'", ":", "value", "[", "'distribution'", "]", "}", ")", ".", "text", "=", "value", "[", "'value'", "]", "if", "'TopVariable'", "in", "potential", ":", "etree", ".", "SubElement", "(", "potential_tag", ",", "'TopVariable'", ",", "attrib", "=", "{", "'name'", ":", "potential", "[", "'TopVariable'", "]", "}", ")", "if", "'Branches'", "in", "potential", ":", "branches_tag", "=", "etree", ".", "SubElement", "(", "potential_tag", ",", "'Branches'", ")", "for", "branch", "in", "potential", "[", "'Branches'", "]", ":", "branch_tag", "=", "etree", ".", "SubElement", "(", "branches_tag", ",", "'Branch'", ")", "if", "'States'", "in", "branch", ":", "states_tag", "=", "etree", ".", "SubElement", "(", "branch_tag", ",", "'States'", ")", "for", "state", "in", "sorted", "(", "branch", "[", "'States'", "]", ")", ":", "etree", ".", "SubElement", "(", "states_tag", ",", "'State'", ",", "attrib", "=", "{", "'name'", ":", "state", "[", "'name'", "]", "}", ")", "if", "'Potential'", "in", "branch", ":", "self", ".", "_add_potential", "(", "branch", "[", "'Potential'", "]", ",", "branch_tag", ")", "self", ".", "_add_element", "(", "potential", ",", "'Label'", ",", "potential_tag", ")", "self", ".", "_add_element", "(", "potential", ",", "'Reference'", ",", "potential_tag", ")", "if", "'Thresholds'", "in", "branch", ":", "thresholds_tag", "=", "etree", ".", "SubElement", "(", "branch_tag", ",", "'Thresholds'", ")", "for", "threshold", "in", "branch", "[", "'Thresholds'", "]", ":", "try", ":", "etree", ".", "SubElement", "(", "thresholds_tag", ",", "'Threshold'", ",", "attrib", "=", "{", "'value'", ":", "threshold", "[", "'value'", "]", ",", "'belongsTo'", ":", "threshold", "[", "'belongsTo'", "]", "}", ")", "except", "KeyError", ":", "etree", ".", "SubElement", "(", "thresholds_tag", ",", "'Threshold'", ",", "attrib", "=", "{", "'value'", ":", "threshold", "[", "'value'", "]", "}", ")", "self", ".", "_add_element", "(", "potential", ",", "'Model'", ",", "potential_tag", ")", "self", ".", "_add_element", "(", "potential", ",", "'Coefficients'", ",", "potential_tag", ")", "self", ".", "_add_element", "(", "potential", ",", "'CovarianceMatrix'", ",", "potential_tag", ")", "if", "'Subpotentials'", "in", "potential", ":", "subpotentials", "=", "etree", ".", "SubElement", "(", "potential_tag", ",", "'Subpotentials'", ")", "for", "subpotential", "in", "potential", "[", "'Subpotentials'", "]", ":", "self", ".", "_add_potential", "(", "subpotential", ",", "subpotentials", ")", "if", "'Potential'", "in", "potential", ":", "self", ".", "_add_potential", "(", "potential", "[", "'Potential'", "]", ",", "potential_tag", ")", "if", "'NumericVariables'", "in", "potential", ":", "numvar_tag", "=", "etree", ".", "SubElement", "(", "potential_tag", ",", "'NumericVariables'", ")", "for", "var", "in", "sorted", "(", "potential", "[", "'NumericVariables'", "]", ")", ":", "etree", ".", "SubElement", "(", "numvar_tag", ",", "'Variable'", ",", "attrib", "=", "{", "'name'", ":", "var", "}", ")" ]
57
23.833333
def make_similar_sized_bins(x, n): """Utility function to create a set of bins over the range of values in `x` such that each bin contains roughly the same number of values. Parameters ---------- x : array_like The values to be binned. n : int The number of bins to create. Returns ------- bins : ndarray An array of bin edges. Notes ----- The actual number of bins returned may be less than `n` if `x` contains integer values and any single value is represented more than len(x)//n times. """ # copy and sort the array y = np.array(x).flatten() y.sort() # setup bins bins = [y[0]] # determine step size step = len(y) // n # add bin edges for i in range(step, len(y), step): # get value at this index v = y[i] # only add bin edge if larger than previous if v > bins[-1]: bins.append(v) # fix last bin edge bins[-1] = y[-1] return np.array(bins)
[ "def", "make_similar_sized_bins", "(", "x", ",", "n", ")", ":", "# copy and sort the array", "y", "=", "np", ".", "array", "(", "x", ")", ".", "flatten", "(", ")", "y", ".", "sort", "(", ")", "# setup bins", "bins", "=", "[", "y", "[", "0", "]", "]", "# determine step size", "step", "=", "len", "(", "y", ")", "//", "n", "# add bin edges", "for", "i", "in", "range", "(", "step", ",", "len", "(", "y", ")", ",", "step", ")", ":", "# get value at this index", "v", "=", "y", "[", "i", "]", "# only add bin edge if larger than previous", "if", "v", ">", "bins", "[", "-", "1", "]", ":", "bins", ".", "append", "(", "v", ")", "# fix last bin edge", "bins", "[", "-", "1", "]", "=", "y", "[", "-", "1", "]", "return", "np", ".", "array", "(", "bins", ")" ]
20.957447
23.680851
def escalation_date(self, escalation_date): """ Sets the task escalation_date Args: escalation_date: Converted to %Y-%m-%dT%H:%M:%SZ date format """ if not self.can_update(): self._tcex.handle_error(910, [self.type]) escalation_date = self._utils.format_datetime( escalation_date, date_format='%Y-%m-%dT%H:%M:%SZ' ) self._data['escalationDate'] = escalation_date request = {'escalationDate': escalation_date} return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
[ "def", "escalation_date", "(", "self", ",", "escalation_date", ")", ":", "if", "not", "self", ".", "can_update", "(", ")", ":", "self", ".", "_tcex", ".", "handle_error", "(", "910", ",", "[", "self", ".", "type", "]", ")", "escalation_date", "=", "self", ".", "_utils", ".", "format_datetime", "(", "escalation_date", ",", "date_format", "=", "'%Y-%m-%dT%H:%M:%SZ'", ")", "self", ".", "_data", "[", "'escalationDate'", "]", "=", "escalation_date", "request", "=", "{", "'escalationDate'", ":", "escalation_date", "}", "return", "self", ".", "tc_requests", ".", "update", "(", "self", ".", "api_type", ",", "self", ".", "api_sub_type", ",", "self", ".", "unique_id", ",", "request", ")" ]
40.066667
18.333333
def get_refs(self, location): """Return map of named refs (branches or tags) to commit hashes.""" output = call_subprocess([self.cmd, 'show-ref'], show_stdout=False, cwd=location) rv = {} for line in output.strip().splitlines(): commit, ref = line.split(' ', 1) ref = ref.strip() ref_name = None if ref.startswith('refs/remotes/'): ref_name = ref[len('refs/remotes/'):] elif ref.startswith('refs/heads/'): ref_name = ref[len('refs/heads/'):] elif ref.startswith('refs/tags/'): ref_name = ref[len('refs/tags/'):] if ref_name is not None: rv[ref_name] = commit.strip() return rv
[ "def", "get_refs", "(", "self", ",", "location", ")", ":", "output", "=", "call_subprocess", "(", "[", "self", ".", "cmd", ",", "'show-ref'", "]", ",", "show_stdout", "=", "False", ",", "cwd", "=", "location", ")", "rv", "=", "{", "}", "for", "line", "in", "output", ".", "strip", "(", ")", ".", "splitlines", "(", ")", ":", "commit", ",", "ref", "=", "line", ".", "split", "(", "' '", ",", "1", ")", "ref", "=", "ref", ".", "strip", "(", ")", "ref_name", "=", "None", "if", "ref", ".", "startswith", "(", "'refs/remotes/'", ")", ":", "ref_name", "=", "ref", "[", "len", "(", "'refs/remotes/'", ")", ":", "]", "elif", "ref", ".", "startswith", "(", "'refs/heads/'", ")", ":", "ref_name", "=", "ref", "[", "len", "(", "'refs/heads/'", ")", ":", "]", "elif", "ref", ".", "startswith", "(", "'refs/tags/'", ")", ":", "ref_name", "=", "ref", "[", "len", "(", "'refs/tags/'", ")", ":", "]", "if", "ref_name", "is", "not", "None", ":", "rv", "[", "ref_name", "]", "=", "commit", ".", "strip", "(", ")", "return", "rv" ]
43.333333
11.055556
def indicator_pivot(self, indicator_resource): """Pivot point on indicators for this resource. This method will return all *resources* (groups, tasks, victims, etc) for this resource that are associated with the provided resource id (indicator value). **Example Endpoints URI's** +--------+---------------------------------------------------------------------------------+ | Method | API Endpoint URI's | +========+=================================================================================+ | GET | /v2/indicators/{resourceType}/{resourceId}/groups/{resourceType} | +--------+---------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{resourceId}/groups/{resourceType}/{uniqueId} | +--------+---------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{resourceId}/tasks/ | +--------+---------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{resourceId}/tasks/{uniqueId} | +--------+---------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{resourceId}/victims/ | +--------+---------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{resourceId}/victims/{uniqueId} | +--------+---------------------------------------------------------------------------------+ Args: resource_type (string): The resource pivot resource type (indicator type). resource_id (integer): The resource pivot id (indicator value). """ resource = self.copy() resource._request_uri = '{}/{}'.format( indicator_resource.request_uri, resource._request_uri ) return resource
[ "def", "indicator_pivot", "(", "self", ",", "indicator_resource", ")", ":", "resource", "=", "self", ".", "copy", "(", ")", "resource", ".", "_request_uri", "=", "'{}/{}'", ".", "format", "(", "indicator_resource", ".", "request_uri", ",", "resource", ".", "_request_uri", ")", "return", "resource" ]
63.911765
39.852941
def extract_paths(self, paths, ignore_nopath): """ Extract the given paths from the domain Attempt to extract all files defined in ``paths`` with the method defined in :func:`~lago.plugins.vm.VMProviderPlugin.extract_paths`, if it fails, and `guestfs` is available it will try extracting the files with guestfs. Args: paths(list of tuples): files to extract in `[(src1, dst1), (src2, dst2)...]` format. ignore_nopath(boolean): if True will ignore none existing paths. Returns: None Raises: :exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing path was found on the VM, and `ignore_nopath` is False. :exc:`~lago.plugins.vm.ExtractPathError`: on all other failures. """ try: super().extract_paths( paths=paths, ignore_nopath=ignore_nopath, ) except ExtractPathError as err: LOGGER.debug( '%s: failed extracting files: %s', self.vm.name(), err.message ) if self._has_guestfs: self.extract_paths_dead(paths, ignore_nopath) else: raise
[ "def", "extract_paths", "(", "self", ",", "paths", ",", "ignore_nopath", ")", ":", "try", ":", "super", "(", ")", ".", "extract_paths", "(", "paths", "=", "paths", ",", "ignore_nopath", "=", "ignore_nopath", ",", ")", "except", "ExtractPathError", "as", "err", ":", "LOGGER", ".", "debug", "(", "'%s: failed extracting files: %s'", ",", "self", ".", "vm", ".", "name", "(", ")", ",", "err", ".", "message", ")", "if", "self", ".", "_has_guestfs", ":", "self", ".", "extract_paths_dead", "(", "paths", ",", "ignore_nopath", ")", "else", ":", "raise" ]
33.891892
23.459459
def _get_policies(self): """Returns all the policy names for a given user""" username = self._get_username_for_key() policies = self.client.list_user_policies( UserName=username ) return policies
[ "def", "_get_policies", "(", "self", ")", ":", "username", "=", "self", ".", "_get_username_for_key", "(", ")", "policies", "=", "self", ".", "client", ".", "list_user_policies", "(", "UserName", "=", "username", ")", "return", "policies" ]
34.428571
13.142857
def quaternion_from_euler(angles, order='yzy'): """Generate a quaternion from a set of Euler angles. Args: angles (array_like): Array of Euler angles. order (str): Order of Euler rotations. 'yzy' is default. Returns: Quaternion: Quaternion representation of Euler rotation. """ angles = np.asarray(angles, dtype=float) quat = quaternion_from_axis_rotation(angles[0], order[0])\ * (quaternion_from_axis_rotation(angles[1], order[1]) * quaternion_from_axis_rotation(angles[2], order[2])) quat.normalize(inplace=True) return quat
[ "def", "quaternion_from_euler", "(", "angles", ",", "order", "=", "'yzy'", ")", ":", "angles", "=", "np", ".", "asarray", "(", "angles", ",", "dtype", "=", "float", ")", "quat", "=", "quaternion_from_axis_rotation", "(", "angles", "[", "0", "]", ",", "order", "[", "0", "]", ")", "*", "(", "quaternion_from_axis_rotation", "(", "angles", "[", "1", "]", ",", "order", "[", "1", "]", ")", "*", "quaternion_from_axis_rotation", "(", "angles", "[", "2", "]", ",", "order", "[", "2", "]", ")", ")", "quat", ".", "normalize", "(", "inplace", "=", "True", ")", "return", "quat" ]
36.8125
19.375
def data_factory(value, encoding='UTF-8'): """Wrap a Python type in the equivalent C AMQP type. If the Python type has already been wrapped in a ~uamqp.types.AMQPType object - then this will be used to select the appropriate C type. - bool => c_uamqp.BoolValue - int => c_uamqp.IntValue, LongValue, DoubleValue - str => c_uamqp.StringValue - bytes => c_uamqp.BinaryValue - list/set/tuple => c_uamqp.ListValue - dict => c_uamqp.DictValue (AMQP map) - float => c_uamqp.DoubleValue - uuid.UUID => c_uamqp.UUIDValue :param value: The value to wrap. :type value: ~uamqp.types.AMQPType :rtype: uamqp.c_uamqp.AMQPValue """ result = None if value is None: result = c_uamqp.null_value() elif hasattr(value, 'c_data'): result = value.c_data elif isinstance(value, c_uamqp.AMQPValue): result = value elif isinstance(value, bool): result = c_uamqp.bool_value(value) elif isinstance(value, six.text_type): result = c_uamqp.string_value(value.encode(encoding)) elif isinstance(value, six.binary_type): result = c_uamqp.string_value(value) elif isinstance(value, uuid.UUID): result = c_uamqp.uuid_value(value) elif isinstance(value, bytearray): result = c_uamqp.binary_value(value) elif isinstance(value, six.integer_types): result = _convert_py_number(value) elif isinstance(value, float): result = c_uamqp.double_value(value) elif isinstance(value, dict): wrapped_dict = c_uamqp.dict_value() for key, item in value.items(): wrapped_dict[data_factory(key, encoding=encoding)] = data_factory(item, encoding=encoding) result = wrapped_dict elif isinstance(value, (list, set, tuple)): wrapped_list = c_uamqp.list_value() wrapped_list.size = len(value) for index, item in enumerate(value): wrapped_list[index] = data_factory(item, encoding=encoding) result = wrapped_list elif isinstance(value, datetime): timestamp = int((time.mktime(value.utctimetuple()) * 1000) + (value.microsecond/1000)) result = c_uamqp.timestamp_value(timestamp) return result
[ "def", "data_factory", "(", "value", ",", "encoding", "=", "'UTF-8'", ")", ":", "result", "=", "None", "if", "value", "is", "None", ":", "result", "=", "c_uamqp", ".", "null_value", "(", ")", "elif", "hasattr", "(", "value", ",", "'c_data'", ")", ":", "result", "=", "value", ".", "c_data", "elif", "isinstance", "(", "value", ",", "c_uamqp", ".", "AMQPValue", ")", ":", "result", "=", "value", "elif", "isinstance", "(", "value", ",", "bool", ")", ":", "result", "=", "c_uamqp", ".", "bool_value", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "six", ".", "text_type", ")", ":", "result", "=", "c_uamqp", ".", "string_value", "(", "value", ".", "encode", "(", "encoding", ")", ")", "elif", "isinstance", "(", "value", ",", "six", ".", "binary_type", ")", ":", "result", "=", "c_uamqp", ".", "string_value", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "uuid", ".", "UUID", ")", ":", "result", "=", "c_uamqp", ".", "uuid_value", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "bytearray", ")", ":", "result", "=", "c_uamqp", ".", "binary_value", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "six", ".", "integer_types", ")", ":", "result", "=", "_convert_py_number", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "float", ")", ":", "result", "=", "c_uamqp", ".", "double_value", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "wrapped_dict", "=", "c_uamqp", ".", "dict_value", "(", ")", "for", "key", ",", "item", "in", "value", ".", "items", "(", ")", ":", "wrapped_dict", "[", "data_factory", "(", "key", ",", "encoding", "=", "encoding", ")", "]", "=", "data_factory", "(", "item", ",", "encoding", "=", "encoding", ")", "result", "=", "wrapped_dict", "elif", "isinstance", "(", "value", ",", "(", "list", ",", "set", ",", "tuple", ")", ")", ":", "wrapped_list", "=", "c_uamqp", ".", "list_value", "(", ")", "wrapped_list", ".", "size", "=", "len", "(", "value", ")", "for", "index", ",", "item", "in", "enumerate", "(", "value", ")", ":", "wrapped_list", "[", "index", "]", "=", "data_factory", "(", "item", ",", "encoding", "=", "encoding", ")", "result", "=", "wrapped_list", "elif", "isinstance", "(", "value", ",", "datetime", ")", ":", "timestamp", "=", "int", "(", "(", "time", ".", "mktime", "(", "value", ".", "utctimetuple", "(", ")", ")", "*", "1000", ")", "+", "(", "value", ".", "microsecond", "/", "1000", ")", ")", "result", "=", "c_uamqp", ".", "timestamp_value", "(", "timestamp", ")", "return", "result" ]
41.075472
10.415094
def transform_point(self, x, y): """Transforms the point ``(x, y)`` by this matrix. :param x: X position. :param y: Y position. :type x: float :type y: float :returns: A ``(new_x, new_y)`` tuple of floats. """ xy = ffi.new('double[2]', [x, y]) cairo.cairo_matrix_transform_point(self._pointer, xy + 0, xy + 1) return tuple(xy)
[ "def", "transform_point", "(", "self", ",", "x", ",", "y", ")", ":", "xy", "=", "ffi", ".", "new", "(", "'double[2]'", ",", "[", "x", ",", "y", "]", ")", "cairo", ".", "cairo_matrix_transform_point", "(", "self", ".", "_pointer", ",", "xy", "+", "0", ",", "xy", "+", "1", ")", "return", "tuple", "(", "xy", ")" ]
30.461538
16.230769
def clean_features(df): """Fixes up columns of the passed DataFrame, such as casting T/F columns to boolean and filling in NaNs for team and opp. :param df: DataFrame of play-by-play data. :returns: Dataframe with cleaned columns. """ df = pd.DataFrame(df) bool_vals = set([True, False, None, np.nan]) sparse_cols = sparse_lineup_cols(df) for col in df: # make indicator columns boolean type (and fill in NaNs) if set(df[col].unique()[:5]) <= bool_vals: df[col] = (df[col] == True) # fill NaN's in sparse lineup columns to 0 elif col in sparse_cols: df[col] = df[col].fillna(0) # fix free throw columns on technicals df.loc[df.is_tech_fta, ['fta_num', 'tot_fta']] = 1 # fill in NaN's/fix off_team and def_team columns df.off_team.fillna(method='bfill', inplace=True) df.def_team.fillna(method='bfill', inplace=True) df.off_team.fillna(method='ffill', inplace=True) df.def_team.fillna(method='ffill', inplace=True) return df
[ "def", "clean_features", "(", "df", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "df", ")", "bool_vals", "=", "set", "(", "[", "True", ",", "False", ",", "None", ",", "np", ".", "nan", "]", ")", "sparse_cols", "=", "sparse_lineup_cols", "(", "df", ")", "for", "col", "in", "df", ":", "# make indicator columns boolean type (and fill in NaNs)", "if", "set", "(", "df", "[", "col", "]", ".", "unique", "(", ")", "[", ":", "5", "]", ")", "<=", "bool_vals", ":", "df", "[", "col", "]", "=", "(", "df", "[", "col", "]", "==", "True", ")", "# fill NaN's in sparse lineup columns to 0", "elif", "col", "in", "sparse_cols", ":", "df", "[", "col", "]", "=", "df", "[", "col", "]", ".", "fillna", "(", "0", ")", "# fix free throw columns on technicals", "df", ".", "loc", "[", "df", ".", "is_tech_fta", ",", "[", "'fta_num'", ",", "'tot_fta'", "]", "]", "=", "1", "# fill in NaN's/fix off_team and def_team columns", "df", ".", "off_team", ".", "fillna", "(", "method", "=", "'bfill'", ",", "inplace", "=", "True", ")", "df", ".", "def_team", ".", "fillna", "(", "method", "=", "'bfill'", ",", "inplace", "=", "True", ")", "df", ".", "off_team", ".", "fillna", "(", "method", "=", "'ffill'", ",", "inplace", "=", "True", ")", "df", ".", "def_team", ".", "fillna", "(", "method", "=", "'ffill'", ",", "inplace", "=", "True", ")", "return", "df" ]
33.032258
16.774194
def logout_view(self): """Process the logout link.""" """ Sign the user out.""" # Send user_logged_out signal signals.user_logged_out.send(current_app._get_current_object(), user=current_user) # Use Flask-Login to sign out user logout_user() # Flash a system message flash(_('You have signed out successfully.'), 'success') # Redirect to logout_next endpoint or '/' safe_next_url = self._get_safe_next_url('next', self.USER_AFTER_LOGOUT_ENDPOINT) return redirect(safe_next_url)
[ "def", "logout_view", "(", "self", ")", ":", "\"\"\" Sign the user out.\"\"\"", "# Send user_logged_out signal", "signals", ".", "user_logged_out", ".", "send", "(", "current_app", ".", "_get_current_object", "(", ")", ",", "user", "=", "current_user", ")", "# Use Flask-Login to sign out user", "logout_user", "(", ")", "# Flash a system message", "flash", "(", "_", "(", "'You have signed out successfully.'", ")", ",", "'success'", ")", "# Redirect to logout_next endpoint or '/'", "safe_next_url", "=", "self", ".", "_get_safe_next_url", "(", "'next'", ",", "self", ".", "USER_AFTER_LOGOUT_ENDPOINT", ")", "return", "redirect", "(", "safe_next_url", ")" ]
34.625
21.4375
def unmonitor_instances(self, instance_ids): """ Disable CloudWatch monitoring for the supplied instance. :type instance_id: list of string :param instance_id: The instance id :rtype: list :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` """ params = {} self.build_list_params(params, instance_ids, 'InstanceId') return self.get_list('UnmonitorInstances', params, [('item', InstanceInfo)], verb='POST')
[ "def", "unmonitor_instances", "(", "self", ",", "instance_ids", ")", ":", "params", "=", "{", "}", "self", ".", "build_list_params", "(", "params", ",", "instance_ids", ",", "'InstanceId'", ")", "return", "self", ".", "get_list", "(", "'UnmonitorInstances'", ",", "params", ",", "[", "(", "'item'", ",", "InstanceInfo", ")", "]", ",", "verb", "=", "'POST'", ")" ]
36.714286
18.142857
def plotAccuracyAndMCsDuringDecrementChange(results, title="", yaxis=""): """ Plot accuracy vs decrement value """ decrementRange = [] mcRange = [] for r in results: if r["basalPredictedSegmentDecrement"] not in decrementRange: decrementRange.append(r["basalPredictedSegmentDecrement"]) if r["inputSize"] not in mcRange: mcRange.append(r["inputSize"]) decrementRange.sort() mcRange.sort() print decrementRange print mcRange ######################################################################## # # Accumulate all the results per column in a convergence array. # # accuracy[o,f] = accuracy with o objects in training # and f unique features. accuracy = numpy.zeros((len(mcRange), len(decrementRange))) TMAccuracy = numpy.zeros((len(mcRange), len(decrementRange))) totals = numpy.zeros((len(mcRange), len(decrementRange))) for r in results: dec = r["basalPredictedSegmentDecrement"] nf = r["inputSize"] accuracy[mcRange.index(nf), decrementRange.index(dec)] += r["objectAccuracyL2"] TMAccuracy[mcRange.index(nf), decrementRange.index(dec)] += r["sequenceCorrectClassificationsTM"] totals[mcRange.index(nf), decrementRange.index(dec)] += 1 for i,f in enumerate(mcRange): print i, f, accuracy[i] / totals[i] print i, f, TMAccuracy[i] / totals[i] print i, f, totals[i] print
[ "def", "plotAccuracyAndMCsDuringDecrementChange", "(", "results", ",", "title", "=", "\"\"", ",", "yaxis", "=", "\"\"", ")", ":", "decrementRange", "=", "[", "]", "mcRange", "=", "[", "]", "for", "r", "in", "results", ":", "if", "r", "[", "\"basalPredictedSegmentDecrement\"", "]", "not", "in", "decrementRange", ":", "decrementRange", ".", "append", "(", "r", "[", "\"basalPredictedSegmentDecrement\"", "]", ")", "if", "r", "[", "\"inputSize\"", "]", "not", "in", "mcRange", ":", "mcRange", ".", "append", "(", "r", "[", "\"inputSize\"", "]", ")", "decrementRange", ".", "sort", "(", ")", "mcRange", ".", "sort", "(", ")", "print", "decrementRange", "print", "mcRange", "########################################################################", "#", "# Accumulate all the results per column in a convergence array.", "#", "# accuracy[o,f] = accuracy with o objects in training", "# and f unique features.", "accuracy", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "mcRange", ")", ",", "len", "(", "decrementRange", ")", ")", ")", "TMAccuracy", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "mcRange", ")", ",", "len", "(", "decrementRange", ")", ")", ")", "totals", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "mcRange", ")", ",", "len", "(", "decrementRange", ")", ")", ")", "for", "r", "in", "results", ":", "dec", "=", "r", "[", "\"basalPredictedSegmentDecrement\"", "]", "nf", "=", "r", "[", "\"inputSize\"", "]", "accuracy", "[", "mcRange", ".", "index", "(", "nf", ")", ",", "decrementRange", ".", "index", "(", "dec", ")", "]", "+=", "r", "[", "\"objectAccuracyL2\"", "]", "TMAccuracy", "[", "mcRange", ".", "index", "(", "nf", ")", ",", "decrementRange", ".", "index", "(", "dec", ")", "]", "+=", "r", "[", "\"sequenceCorrectClassificationsTM\"", "]", "totals", "[", "mcRange", ".", "index", "(", "nf", ")", ",", "decrementRange", ".", "index", "(", "dec", ")", "]", "+=", "1", "for", "i", ",", "f", "in", "enumerate", "(", "mcRange", ")", ":", "print", "i", ",", "f", ",", "accuracy", "[", "i", "]", "/", "totals", "[", "i", "]", "print", "i", ",", "f", ",", "TMAccuracy", "[", "i", "]", "/", "totals", "[", "i", "]", "print", "i", ",", "f", ",", "totals", "[", "i", "]", "print" ]
35.157895
21.421053
def _pair_samples_with_pipelines(run_info_yaml, config): """Map samples defined in input file to pipelines to run. """ samples = config_utils.load_config(run_info_yaml) if isinstance(samples, dict): resources = samples.pop("resources") samples = samples["details"] else: resources = {} ready_samples = [] for sample in samples: if "files" in sample: del sample["files"] # add any resources to this item to recalculate global configuration usample = copy.deepcopy(sample) usample.pop("algorithm", None) if "resources" not in usample: usample["resources"] = {} for prog, pkvs in resources.items(): if prog not in usample["resources"]: usample["resources"][prog] = {} if pkvs is not None: for key, val in pkvs.items(): usample["resources"][prog][key] = val config = config_utils.update_w_custom(config, usample) sample["resources"] = {} ready_samples.append(sample) paired = [(x, _get_pipeline(x)) for x in ready_samples] d = defaultdict(list) for x in paired: d[x[1]].append([x[0]]) return d, config
[ "def", "_pair_samples_with_pipelines", "(", "run_info_yaml", ",", "config", ")", ":", "samples", "=", "config_utils", ".", "load_config", "(", "run_info_yaml", ")", "if", "isinstance", "(", "samples", ",", "dict", ")", ":", "resources", "=", "samples", ".", "pop", "(", "\"resources\"", ")", "samples", "=", "samples", "[", "\"details\"", "]", "else", ":", "resources", "=", "{", "}", "ready_samples", "=", "[", "]", "for", "sample", "in", "samples", ":", "if", "\"files\"", "in", "sample", ":", "del", "sample", "[", "\"files\"", "]", "# add any resources to this item to recalculate global configuration", "usample", "=", "copy", ".", "deepcopy", "(", "sample", ")", "usample", ".", "pop", "(", "\"algorithm\"", ",", "None", ")", "if", "\"resources\"", "not", "in", "usample", ":", "usample", "[", "\"resources\"", "]", "=", "{", "}", "for", "prog", ",", "pkvs", "in", "resources", ".", "items", "(", ")", ":", "if", "prog", "not", "in", "usample", "[", "\"resources\"", "]", ":", "usample", "[", "\"resources\"", "]", "[", "prog", "]", "=", "{", "}", "if", "pkvs", "is", "not", "None", ":", "for", "key", ",", "val", "in", "pkvs", ".", "items", "(", ")", ":", "usample", "[", "\"resources\"", "]", "[", "prog", "]", "[", "key", "]", "=", "val", "config", "=", "config_utils", ".", "update_w_custom", "(", "config", ",", "usample", ")", "sample", "[", "\"resources\"", "]", "=", "{", "}", "ready_samples", ".", "append", "(", "sample", ")", "paired", "=", "[", "(", "x", ",", "_get_pipeline", "(", "x", ")", ")", "for", "x", "in", "ready_samples", "]", "d", "=", "defaultdict", "(", "list", ")", "for", "x", "in", "paired", ":", "d", "[", "x", "[", "1", "]", "]", ".", "append", "(", "[", "x", "[", "0", "]", "]", ")", "return", "d", ",", "config" ]
37.9375
11.125
def get_responses(self): """Gets list of the latest responses""" response_list = [] for question_map in self._my_map['questions']: response_list.append(self._get_response_from_question_map(question_map)) return ResponseList(response_list)
[ "def", "get_responses", "(", "self", ")", ":", "response_list", "=", "[", "]", "for", "question_map", "in", "self", ".", "_my_map", "[", "'questions'", "]", ":", "response_list", ".", "append", "(", "self", ".", "_get_response_from_question_map", "(", "question_map", ")", ")", "return", "ResponseList", "(", "response_list", ")" ]
46.166667
15
def write_to_datastore(self): """Writes all image batches to the datastore.""" client = self._datastore_client with client.no_transact_batch() as client_batch: for batch_id, batch_data in iteritems(self._data): batch_key = client.key(self._entity_kind_batches, batch_id) batch_entity = client.entity(batch_key) for k, v in iteritems(batch_data): if k != 'images': batch_entity[k] = v client_batch.put(batch_entity) self._write_single_batch_images_internal(batch_id, client_batch)
[ "def", "write_to_datastore", "(", "self", ")", ":", "client", "=", "self", ".", "_datastore_client", "with", "client", ".", "no_transact_batch", "(", ")", "as", "client_batch", ":", "for", "batch_id", ",", "batch_data", "in", "iteritems", "(", "self", ".", "_data", ")", ":", "batch_key", "=", "client", ".", "key", "(", "self", ".", "_entity_kind_batches", ",", "batch_id", ")", "batch_entity", "=", "client", ".", "entity", "(", "batch_key", ")", "for", "k", ",", "v", "in", "iteritems", "(", "batch_data", ")", ":", "if", "k", "!=", "'images'", ":", "batch_entity", "[", "k", "]", "=", "v", "client_batch", ".", "put", "(", "batch_entity", ")", "self", ".", "_write_single_batch_images_internal", "(", "batch_id", ",", "client_batch", ")" ]
45.666667
11.333333
def _initPermConnected(self): """ Returns a randomly generated permanence value for a synapses that is initialized in a connected state. The basic idea here is to initialize permanence values very close to synPermConnected so that a small number of learning steps could make it disconnected or connected. Note: experimentation was done a long time ago on the best way to initialize permanence values, but the history for this particular scheme has been lost. """ p = self._synPermConnected + ( self._synPermMax - self._synPermConnected)*self._random.getReal64() # Ensure we don't have too much unnecessary precision. A full 64 bits of # precision causes numerical stability issues across platforms and across # implementations p = int(p*100000) / 100000.0 return p
[ "def", "_initPermConnected", "(", "self", ")", ":", "p", "=", "self", ".", "_synPermConnected", "+", "(", "self", ".", "_synPermMax", "-", "self", ".", "_synPermConnected", ")", "*", "self", ".", "_random", ".", "getReal64", "(", ")", "# Ensure we don't have too much unnecessary precision. A full 64 bits of", "# precision causes numerical stability issues across platforms and across", "# implementations", "p", "=", "int", "(", "p", "*", "100000", ")", "/", "100000.0", "return", "p" ]
45.166667
25.722222
def bleakest_moves(self, start_game, end_game): """Given a range of games, return the bleakest moves. Returns a list of (game, move, q) sorted by q. """ bleak = b'bleakest_q' rows = self.bt_table.read_rows( ROW_PREFIX.format(start_game), ROW_PREFIX.format(end_game), filter_=bigtable_row_filters.ColumnRangeFilter( METADATA, bleak, bleak)) def parse(r): rk = str(r.row_key, 'utf-8') g, m = _game_row_key.match(rk).groups() q = r.cell_value(METADATA, bleak) return int(g), int(m), float(q) return sorted([parse(r) for r in rows], key=operator.itemgetter(2))
[ "def", "bleakest_moves", "(", "self", ",", "start_game", ",", "end_game", ")", ":", "bleak", "=", "b'bleakest_q'", "rows", "=", "self", ".", "bt_table", ".", "read_rows", "(", "ROW_PREFIX", ".", "format", "(", "start_game", ")", ",", "ROW_PREFIX", ".", "format", "(", "end_game", ")", ",", "filter_", "=", "bigtable_row_filters", ".", "ColumnRangeFilter", "(", "METADATA", ",", "bleak", ",", "bleak", ")", ")", "def", "parse", "(", "r", ")", ":", "rk", "=", "str", "(", "r", ".", "row_key", ",", "'utf-8'", ")", "g", ",", "m", "=", "_game_row_key", ".", "match", "(", "rk", ")", ".", "groups", "(", ")", "q", "=", "r", ".", "cell_value", "(", "METADATA", ",", "bleak", ")", "return", "int", "(", "g", ")", ",", "int", "(", "m", ")", ",", "float", "(", "q", ")", "return", "sorted", "(", "[", "parse", "(", "r", ")", "for", "r", "in", "rows", "]", ",", "key", "=", "operator", ".", "itemgetter", "(", "2", ")", ")" ]
38.722222
11.5
def strip_qos_cntrl(self, idx, prot_type): """strip(2 byte) wlan.qos :idx: int :prot_type: string 802.11 protocol type(.11ac, .11a, .11n, etc) :return: int number of processed bytes :return: int qos priority :return: int qos bit :return: int qos acknowledgement :return: int amsdupresent(aggregated mac service data unit) """ qos_cntrl, = struct.unpack('H', self._packet[idx:idx + 2]) qos_cntrl_bits = format(qos_cntrl, '016b')[::-1] qos_pri = qos_cntrl & 0x000f qos_bit = int(qos_cntrl_bits[5]) qos_ack = int(qos_cntrl_bits[6:8], 2) amsdupresent = 0 if prot_type == '.11ac': amsdupresent = int(qos_cntrl_bits[7]) return 2, qos_pri, qos_bit, qos_ack, amsdupresent
[ "def", "strip_qos_cntrl", "(", "self", ",", "idx", ",", "prot_type", ")", ":", "qos_cntrl", ",", "=", "struct", ".", "unpack", "(", "'H'", ",", "self", ".", "_packet", "[", "idx", ":", "idx", "+", "2", "]", ")", "qos_cntrl_bits", "=", "format", "(", "qos_cntrl", ",", "'016b'", ")", "[", ":", ":", "-", "1", "]", "qos_pri", "=", "qos_cntrl", "&", "0x000f", "qos_bit", "=", "int", "(", "qos_cntrl_bits", "[", "5", "]", ")", "qos_ack", "=", "int", "(", "qos_cntrl_bits", "[", "6", ":", "8", "]", ",", "2", ")", "amsdupresent", "=", "0", "if", "prot_type", "==", "'.11ac'", ":", "amsdupresent", "=", "int", "(", "qos_cntrl_bits", "[", "7", "]", ")", "return", "2", ",", "qos_pri", ",", "qos_bit", ",", "qos_ack", ",", "amsdupresent" ]
34.36
12.92
def get(self, request, bot_id, id, format=None): """ Get list of source state of a handler --- serializer: StateSerializer responseMessages: - code: 401 message: Not authenticated """ return super(SourceStateList, self).get(request, bot_id, id, format)
[ "def", "get", "(", "self", ",", "request", ",", "bot_id", ",", "id", ",", "format", "=", "None", ")", ":", "return", "super", "(", "SourceStateList", ",", "self", ")", ".", "get", "(", "request", ",", "bot_id", ",", "id", ",", "format", ")" ]
32.5
11.5
def _handle_mouse(self, ev): """ Handle mouse events. Return a list of KeyPress instances. """ FROM_LEFT_1ST_BUTTON_PRESSED = 0x1 result = [] # Check event type. if ev.ButtonState == FROM_LEFT_1ST_BUTTON_PRESSED: # On a key press, generate both the mouse down and up event. for event_type in [MouseEventType.MOUSE_DOWN, MouseEventType.MOUSE_UP]: data = ';'.join([ event_type, str(ev.MousePosition.X), str(ev.MousePosition.Y) ]) result.append(KeyPress(Keys.WindowsMouseEvent, data)) return result
[ "def", "_handle_mouse", "(", "self", ",", "ev", ")", ":", "FROM_LEFT_1ST_BUTTON_PRESSED", "=", "0x1", "result", "=", "[", "]", "# Check event type.", "if", "ev", ".", "ButtonState", "==", "FROM_LEFT_1ST_BUTTON_PRESSED", ":", "# On a key press, generate both the mouse down and up event.", "for", "event_type", "in", "[", "MouseEventType", ".", "MOUSE_DOWN", ",", "MouseEventType", ".", "MOUSE_UP", "]", ":", "data", "=", "';'", ".", "join", "(", "[", "event_type", ",", "str", "(", "ev", ".", "MousePosition", ".", "X", ")", ",", "str", "(", "ev", ".", "MousePosition", ".", "Y", ")", "]", ")", "result", ".", "append", "(", "KeyPress", "(", "Keys", ".", "WindowsMouseEvent", ",", "data", ")", ")", "return", "result" ]
33.6
18.9
def cleanup(self): """ Attempt to set a new current symlink if it is broken. If no other prefixes exist and the workdir is empty, try to delete the entire workdir. Raises: :exc:`~MalformedWorkdir`: if no prefixes were found, but the workdir is not empty. """ current = self.join('current') if not os.path.exists(current): LOGGER.debug('found broken current symlink, removing: %s', current) os.unlink(self.join('current')) self.current = None try: self._update_current() except PrefixNotFound: if not os.listdir(self.path): LOGGER.debug('workdir is empty, removing %s', self.path) os.rmdir(self.path) else: raise MalformedWorkdir( ( 'Unable to find any prefixes in {0}, ' 'but the directory looks malformed. ' 'Try deleting it manually.' ).format(self.path) )
[ "def", "cleanup", "(", "self", ")", ":", "current", "=", "self", ".", "join", "(", "'current'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "current", ")", ":", "LOGGER", ".", "debug", "(", "'found broken current symlink, removing: %s'", ",", "current", ")", "os", ".", "unlink", "(", "self", ".", "join", "(", "'current'", ")", ")", "self", ".", "current", "=", "None", "try", ":", "self", ".", "_update_current", "(", ")", "except", "PrefixNotFound", ":", "if", "not", "os", ".", "listdir", "(", "self", ".", "path", ")", ":", "LOGGER", ".", "debug", "(", "'workdir is empty, removing %s'", ",", "self", ".", "path", ")", "os", ".", "rmdir", "(", "self", ".", "path", ")", "else", ":", "raise", "MalformedWorkdir", "(", "(", "'Unable to find any prefixes in {0}, '", "'but the directory looks malformed. '", "'Try deleting it manually.'", ")", ".", "format", "(", "self", ".", "path", ")", ")" ]
38.1
16.833333
def parse_superbox(self, fptr): """Parse a superbox (box consisting of nothing but other boxes. Parameters ---------- fptr : file Open file object. Returns ------- list List of top-level boxes in the JPEG 2000 file. """ superbox = [] start = fptr.tell() while True: # Are we at the end of the superbox? if start >= self.offset + self.length: break read_buffer = fptr.read(8) if len(read_buffer) < 8: msg = "Extra bytes at end of file ignored." warnings.warn(msg, UserWarning) return superbox (box_length, box_id) = struct.unpack('>I4s', read_buffer) if box_length == 0: # The length of the box is presumed to last until the end of # the file. Compute the effective length of the box. num_bytes = os.path.getsize(fptr.name) - fptr.tell() + 8 elif box_length == 1: # The length of the box is in the XL field, a 64-bit value. read_buffer = fptr.read(8) num_bytes, = struct.unpack('>Q', read_buffer) else: # The box_length value really is the length of the box! num_bytes = box_length box = self._parse_this_box(fptr, box_id, start, num_bytes) superbox.append(box) # Position to the start of the next box. if num_bytes > self.length: # Length of the current box goes past the end of the # enclosing superbox. msg = '{0} box has incorrect box length ({1})' msg = msg.format(box_id, num_bytes) warnings.warn(msg) elif fptr.tell() > start + num_bytes: # The box must be invalid somehow, as the file pointer is # positioned past the end of the box. msg = ('{box_id} box may be invalid, the file pointer is ' 'positioned {num_bytes} bytes past the end of the box.') msg = msg.format(box_id=box_id, num_bytes=fptr.tell() - (start + num_bytes)) warnings.warn(msg, UserWarning) fptr.seek(start + num_bytes) start += num_bytes return superbox
[ "def", "parse_superbox", "(", "self", ",", "fptr", ")", ":", "superbox", "=", "[", "]", "start", "=", "fptr", ".", "tell", "(", ")", "while", "True", ":", "# Are we at the end of the superbox?", "if", "start", ">=", "self", ".", "offset", "+", "self", ".", "length", ":", "break", "read_buffer", "=", "fptr", ".", "read", "(", "8", ")", "if", "len", "(", "read_buffer", ")", "<", "8", ":", "msg", "=", "\"Extra bytes at end of file ignored.\"", "warnings", ".", "warn", "(", "msg", ",", "UserWarning", ")", "return", "superbox", "(", "box_length", ",", "box_id", ")", "=", "struct", ".", "unpack", "(", "'>I4s'", ",", "read_buffer", ")", "if", "box_length", "==", "0", ":", "# The length of the box is presumed to last until the end of", "# the file. Compute the effective length of the box.", "num_bytes", "=", "os", ".", "path", ".", "getsize", "(", "fptr", ".", "name", ")", "-", "fptr", ".", "tell", "(", ")", "+", "8", "elif", "box_length", "==", "1", ":", "# The length of the box is in the XL field, a 64-bit value.", "read_buffer", "=", "fptr", ".", "read", "(", "8", ")", "num_bytes", ",", "=", "struct", ".", "unpack", "(", "'>Q'", ",", "read_buffer", ")", "else", ":", "# The box_length value really is the length of the box!", "num_bytes", "=", "box_length", "box", "=", "self", ".", "_parse_this_box", "(", "fptr", ",", "box_id", ",", "start", ",", "num_bytes", ")", "superbox", ".", "append", "(", "box", ")", "# Position to the start of the next box.", "if", "num_bytes", ">", "self", ".", "length", ":", "# Length of the current box goes past the end of the", "# enclosing superbox.", "msg", "=", "'{0} box has incorrect box length ({1})'", "msg", "=", "msg", ".", "format", "(", "box_id", ",", "num_bytes", ")", "warnings", ".", "warn", "(", "msg", ")", "elif", "fptr", ".", "tell", "(", ")", ">", "start", "+", "num_bytes", ":", "# The box must be invalid somehow, as the file pointer is", "# positioned past the end of the box.", "msg", "=", "(", "'{box_id} box may be invalid, the file pointer is '", "'positioned {num_bytes} bytes past the end of the box.'", ")", "msg", "=", "msg", ".", "format", "(", "box_id", "=", "box_id", ",", "num_bytes", "=", "fptr", ".", "tell", "(", ")", "-", "(", "start", "+", "num_bytes", ")", ")", "warnings", ".", "warn", "(", "msg", ",", "UserWarning", ")", "fptr", ".", "seek", "(", "start", "+", "num_bytes", ")", "start", "+=", "num_bytes", "return", "superbox" ]
34.565217
21.666667
def hist(table, field=-1, class_column=None, title='', verbosity=2, **kwargs): """Plot discrete PDFs >>> df = pd.DataFrame(pd.np.random.randn(99,3), columns=list('ABC')) >>> df['Class'] = pd.np.array((pd.np.matrix([1,1,1])*pd.np.matrix(df).T).T > 0) >>> len(hist(df, verbosity=0, class_column='Class')) 3 """ field = fuzzy_index_match(table, field) if not isinstance(table, (pd.DataFrame, basestring)): try: table = make_dataframe(table.objects.filter(**{field + '__isnull': False})) except: table = table # labels = get_column_labels(table) try: table = table[pd.notnull(table[field])] except: pass series_labels = [] if class_column is not None: series_labels = sorted(set(table[class_column])) labels = [str(c) for c in series_labels] + ['all'] default_kwargs = { 'normed': False, 'histtype': 'bar', 'color': seaborn.color_palette(), 'label': labels, 'log': True, 'bins': 10, } default_kwargs.update(kwargs) num_colors = len(default_kwargs['color']) num_labels = len(default_kwargs['label']) default_kwargs['color'] = [default_kwargs['color'][i % num_colors] for i in range(num_labels)] if not title: title = '{} vs. {}'.format(titlecase(str(field).replace('_', ' ')), titlecase(str(class_column).replace('_', ' '))) if verbosity > 0: print('Plotting histogram titled: {}'.format(title)) if verbosity > 1: print('histogram configuration: {}'.format(default_kwargs)) x = [table[(table[class_column].isnull() if pd.isnull(c) else table[class_column] == c)] [field].values for c in series_labels] x += [table[field].values] if not default_kwargs['normed']: default_kwargs['weights'] = [pd.np.ones_like(x_c) / float(len(x_c)) for x_c in x] elif isinstance(default_kwargs['normed'], int) and default_kwargs['normed'] < 0: default_kwargs['normed'] = 0 bins = default_kwargs['bins'] # FIXME: x log scaling doesn't work if False and default_kwargs['log'] and isinstance(bins, int): max_x = max(pd.np.max(x_c) for x_c in x) min_x = min(pd.np.min(x_c) for x_c in x) if pd.isnull(min_x) or not(min_x): min_x = max_x / 10. default_kwargs['bins'] = pd.np.logspace(min_x, max_x, bins) fig, ax = plt.subplots() ans = plt.hist(x, **default_kwargs) # FIXME: x log scaling doesn't work if False and default_kwargs['log'] and isinstance(bins, int): ax.set_xscale('log') if verbosity > 1: plt.legend(default_kwargs['label']) try: plt.show(block=False) except: plt.show() plt.title(title) plt.xlabel(titlecase(field.replace('_', ' '))) if 'weights' in default_kwargs: plt.ylabel('Normalized Frequency or Probability') elif default_kwargs['normed']: plt.ylabel('Normalized Count') else: plt.ylabel('Count') if verbosity > 2: plt.savefig(make_timestamp() + '--' + title.replace(' ', '-') + '.png', transparent=True) return ans
[ "def", "hist", "(", "table", ",", "field", "=", "-", "1", ",", "class_column", "=", "None", ",", "title", "=", "''", ",", "verbosity", "=", "2", ",", "*", "*", "kwargs", ")", ":", "field", "=", "fuzzy_index_match", "(", "table", ",", "field", ")", "if", "not", "isinstance", "(", "table", ",", "(", "pd", ".", "DataFrame", ",", "basestring", ")", ")", ":", "try", ":", "table", "=", "make_dataframe", "(", "table", ".", "objects", ".", "filter", "(", "*", "*", "{", "field", "+", "'__isnull'", ":", "False", "}", ")", ")", "except", ":", "table", "=", "table", "# labels = get_column_labels(table)", "try", ":", "table", "=", "table", "[", "pd", ".", "notnull", "(", "table", "[", "field", "]", ")", "]", "except", ":", "pass", "series_labels", "=", "[", "]", "if", "class_column", "is", "not", "None", ":", "series_labels", "=", "sorted", "(", "set", "(", "table", "[", "class_column", "]", ")", ")", "labels", "=", "[", "str", "(", "c", ")", "for", "c", "in", "series_labels", "]", "+", "[", "'all'", "]", "default_kwargs", "=", "{", "'normed'", ":", "False", ",", "'histtype'", ":", "'bar'", ",", "'color'", ":", "seaborn", ".", "color_palette", "(", ")", ",", "'label'", ":", "labels", ",", "'log'", ":", "True", ",", "'bins'", ":", "10", ",", "}", "default_kwargs", ".", "update", "(", "kwargs", ")", "num_colors", "=", "len", "(", "default_kwargs", "[", "'color'", "]", ")", "num_labels", "=", "len", "(", "default_kwargs", "[", "'label'", "]", ")", "default_kwargs", "[", "'color'", "]", "=", "[", "default_kwargs", "[", "'color'", "]", "[", "i", "%", "num_colors", "]", "for", "i", "in", "range", "(", "num_labels", ")", "]", "if", "not", "title", ":", "title", "=", "'{} vs. {}'", ".", "format", "(", "titlecase", "(", "str", "(", "field", ")", ".", "replace", "(", "'_'", ",", "' '", ")", ")", ",", "titlecase", "(", "str", "(", "class_column", ")", ".", "replace", "(", "'_'", ",", "' '", ")", ")", ")", "if", "verbosity", ">", "0", ":", "print", "(", "'Plotting histogram titled: {}'", ".", "format", "(", "title", ")", ")", "if", "verbosity", ">", "1", ":", "print", "(", "'histogram configuration: {}'", ".", "format", "(", "default_kwargs", ")", ")", "x", "=", "[", "table", "[", "(", "table", "[", "class_column", "]", ".", "isnull", "(", ")", "if", "pd", ".", "isnull", "(", "c", ")", "else", "table", "[", "class_column", "]", "==", "c", ")", "]", "[", "field", "]", ".", "values", "for", "c", "in", "series_labels", "]", "x", "+=", "[", "table", "[", "field", "]", ".", "values", "]", "if", "not", "default_kwargs", "[", "'normed'", "]", ":", "default_kwargs", "[", "'weights'", "]", "=", "[", "pd", ".", "np", ".", "ones_like", "(", "x_c", ")", "/", "float", "(", "len", "(", "x_c", ")", ")", "for", "x_c", "in", "x", "]", "elif", "isinstance", "(", "default_kwargs", "[", "'normed'", "]", ",", "int", ")", "and", "default_kwargs", "[", "'normed'", "]", "<", "0", ":", "default_kwargs", "[", "'normed'", "]", "=", "0", "bins", "=", "default_kwargs", "[", "'bins'", "]", "# FIXME: x log scaling doesn't work", "if", "False", "and", "default_kwargs", "[", "'log'", "]", "and", "isinstance", "(", "bins", ",", "int", ")", ":", "max_x", "=", "max", "(", "pd", ".", "np", ".", "max", "(", "x_c", ")", "for", "x_c", "in", "x", ")", "min_x", "=", "min", "(", "pd", ".", "np", ".", "min", "(", "x_c", ")", "for", "x_c", "in", "x", ")", "if", "pd", ".", "isnull", "(", "min_x", ")", "or", "not", "(", "min_x", ")", ":", "min_x", "=", "max_x", "/", "10.", "default_kwargs", "[", "'bins'", "]", "=", "pd", ".", "np", ".", "logspace", "(", "min_x", ",", "max_x", ",", "bins", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "ans", "=", "plt", ".", "hist", "(", "x", ",", "*", "*", "default_kwargs", ")", "# FIXME: x log scaling doesn't work", "if", "False", "and", "default_kwargs", "[", "'log'", "]", "and", "isinstance", "(", "bins", ",", "int", ")", ":", "ax", ".", "set_xscale", "(", "'log'", ")", "if", "verbosity", ">", "1", ":", "plt", ".", "legend", "(", "default_kwargs", "[", "'label'", "]", ")", "try", ":", "plt", ".", "show", "(", "block", "=", "False", ")", "except", ":", "plt", ".", "show", "(", ")", "plt", ".", "title", "(", "title", ")", "plt", ".", "xlabel", "(", "titlecase", "(", "field", ".", "replace", "(", "'_'", ",", "' '", ")", ")", ")", "if", "'weights'", "in", "default_kwargs", ":", "plt", ".", "ylabel", "(", "'Normalized Frequency or Probability'", ")", "elif", "default_kwargs", "[", "'normed'", "]", ":", "plt", ".", "ylabel", "(", "'Normalized Count'", ")", "else", ":", "plt", ".", "ylabel", "(", "'Count'", ")", "if", "verbosity", ">", "2", ":", "plt", ".", "savefig", "(", "make_timestamp", "(", ")", "+", "'--'", "+", "title", ".", "replace", "(", "' '", ",", "'-'", ")", "+", "'.png'", ",", "transparent", "=", "True", ")", "return", "ans" ]
36.476744
19.895349