text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def genargs() -> ArgumentParser: """ Create a command line parser :return: parser """ parser = ArgumentParser() parser.add_argument("infile", help="Input JSG specification") parser.add_argument("-o", "--outfile", help="Output python file (Default: {infile}.py)") parser.add_argument("-e", "--evaluate", help="Evaluate resulting python file as a test", action="store_true") parser.add_argument("-v", "--verbose", help="Verbose output", action="store_true") return parser
[ "def", "genargs", "(", ")", "->", "ArgumentParser", ":", "parser", "=", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "\"infile\"", ",", "help", "=", "\"Input JSG specification\"", ")", "parser", ".", "add_argument", "(", "\"-o\"", ",", "\"--outfile\"", ",", "help", "=", "\"Output python file (Default: {infile}.py)\"", ")", "parser", ".", "add_argument", "(", "\"-e\"", ",", "\"--evaluate\"", ",", "help", "=", "\"Evaluate resulting python file as a test\"", ",", "action", "=", "\"store_true\"", ")", "parser", ".", "add_argument", "(", "\"-v\"", ",", "\"--verbose\"", ",", "help", "=", "\"Verbose output\"", ",", "action", "=", "\"store_true\"", ")", "return", "parser" ]
45.363636
24.272727
def get_train_err(htfa, data, F): """Calcuate training error Parameters ---------- htfa : HTFA An instance of HTFA, factor anaysis class in BrainIAK. data : 2D array Input data to HTFA. F : 2D array HTFA factor matrix. Returns ------- float Returns root mean squared error on training. """ W = htfa.get_weights(data, F) return recon_err(data, F, W)
[ "def", "get_train_err", "(", "htfa", ",", "data", ",", "F", ")", ":", "W", "=", "htfa", ".", "get_weights", "(", "data", ",", "F", ")", "return", "recon_err", "(", "data", ",", "F", ",", "W", ")" ]
17.75
23.291667
def filter_pem(data): '''Processes the bytes for PEM certificates. Returns: ``set`` containing each certificate ''' assert isinstance(data, bytes), 'Expect bytes. Got {}.'.format(type(data)) certs = set() new_list = [] in_pem_block = False for line in re.split(br'[\r\n]+', data): if line == b'-----BEGIN CERTIFICATE-----': assert not in_pem_block in_pem_block = True elif line == b'-----END CERTIFICATE-----': assert in_pem_block in_pem_block = False content = b''.join(new_list) content = rewrap_bytes(content) certs.add(b'-----BEGIN CERTIFICATE-----\n' + content + b'\n-----END CERTIFICATE-----\n') new_list = [] elif in_pem_block: new_list.append(line) return certs
[ "def", "filter_pem", "(", "data", ")", ":", "assert", "isinstance", "(", "data", ",", "bytes", ")", ",", "'Expect bytes. Got {}.'", ".", "format", "(", "type", "(", "data", ")", ")", "certs", "=", "set", "(", ")", "new_list", "=", "[", "]", "in_pem_block", "=", "False", "for", "line", "in", "re", ".", "split", "(", "br'[\\r\\n]+'", ",", "data", ")", ":", "if", "line", "==", "b'-----BEGIN CERTIFICATE-----'", ":", "assert", "not", "in_pem_block", "in_pem_block", "=", "True", "elif", "line", "==", "b'-----END CERTIFICATE-----'", ":", "assert", "in_pem_block", "in_pem_block", "=", "False", "content", "=", "b''", ".", "join", "(", "new_list", ")", "content", "=", "rewrap_bytes", "(", "content", ")", "certs", ".", "add", "(", "b'-----BEGIN CERTIFICATE-----\\n'", "+", "content", "+", "b'\\n-----END CERTIFICATE-----\\n'", ")", "new_list", "=", "[", "]", "elif", "in_pem_block", ":", "new_list", ".", "append", "(", "line", ")", "return", "certs" ]
27.903226
19
def encrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT): 'Encrypts a stream of bytes from in_stream to out_stream using mode.' encrypter = Encrypter(mode, padding = padding) _feed_stream(encrypter, in_stream, out_stream, block_size)
[ "def", "encrypt_stream", "(", "mode", ",", "in_stream", ",", "out_stream", ",", "block_size", "=", "BLOCK_SIZE", ",", "padding", "=", "PADDING_DEFAULT", ")", ":", "encrypter", "=", "Encrypter", "(", "mode", ",", "padding", "=", "padding", ")", "_feed_stream", "(", "encrypter", ",", "in_stream", ",", "out_stream", ",", "block_size", ")" ]
57
33
def _filter_commands(ctx, commands=None): """Return list of used commands.""" lookup = getattr(ctx.command, 'commands', {}) if not lookup and isinstance(ctx.command, click.MultiCommand): lookup = _get_lazyload_commands(ctx.command) if commands is None: return sorted(lookup.values(), key=lambda item: item.name) names = [name.strip() for name in commands.split(',')] return [lookup[name] for name in names if name in lookup]
[ "def", "_filter_commands", "(", "ctx", ",", "commands", "=", "None", ")", ":", "lookup", "=", "getattr", "(", "ctx", ".", "command", ",", "'commands'", ",", "{", "}", ")", "if", "not", "lookup", "and", "isinstance", "(", "ctx", ".", "command", ",", "click", ".", "MultiCommand", ")", ":", "lookup", "=", "_get_lazyload_commands", "(", "ctx", ".", "command", ")", "if", "commands", "is", "None", ":", "return", "sorted", "(", "lookup", ".", "values", "(", ")", ",", "key", "=", "lambda", "item", ":", "item", ".", "name", ")", "names", "=", "[", "name", ".", "strip", "(", ")", "for", "name", "in", "commands", ".", "split", "(", "','", ")", "]", "return", "[", "lookup", "[", "name", "]", "for", "name", "in", "names", "if", "name", "in", "lookup", "]" ]
41.454545
19
def swap_environment_cnames(self, from_env_name, to_env_name): """ Swaps cnames for an environment """ self.ebs.swap_environment_cnames(source_environment_name=from_env_name, destination_environment_name=to_env_name)
[ "def", "swap_environment_cnames", "(", "self", ",", "from_env_name", ",", "to_env_name", ")", ":", "self", ".", "ebs", ".", "swap_environment_cnames", "(", "source_environment_name", "=", "from_env_name", ",", "destination_environment_name", "=", "to_env_name", ")" ]
47.333333
17.333333
def get_instance(self, payload): """ Build an instance of SessionInstance :param dict payload: Payload response from the API :returns: twilio.rest.proxy.v1.service.session.SessionInstance :rtype: twilio.rest.proxy.v1.service.session.SessionInstance """ return SessionInstance(self._version, payload, service_sid=self._solution['service_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "SessionInstance", "(", "self", ".", "_version", ",", "payload", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", ")" ]
39.3
22.7
def users(self, user_base='active'): """Return dict of users""" if not getattr(self, '_%s_users' % user_base): self._get_users(user_base) return getattr(self, '_%s_users' % user_base)
[ "def", "users", "(", "self", ",", "user_base", "=", "'active'", ")", ":", "if", "not", "getattr", "(", "self", ",", "'_%s_users'", "%", "user_base", ")", ":", "self", ".", "_get_users", "(", "user_base", ")", "return", "getattr", "(", "self", ",", "'_%s_users'", "%", "user_base", ")" ]
43
6.6
def list_streams(self, file_type, start=0, limit=100, filter_path=None, **kwargs): """以视频、音频、图片及文档四种类型的视图获取所创建应用程序下的 文件列表. :param file_type: 类型分为video、audio、image及doc四种。 :param start: 返回条目控制起始值,缺省值为0。 :param limit: 返回条目控制长度,缺省为1000,可配置。 :param filter_path: 需要过滤的前缀路径,如:/apps/album .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :return: Response 对象 """ params = { 'type': file_type, 'start': start, 'limit': limit, 'filter_path': filter_path, } return self._request('stream', 'list', extra_params=params, **kwargs)
[ "def", "list_streams", "(", "self", ",", "file_type", ",", "start", "=", "0", ",", "limit", "=", "100", ",", "filter_path", "=", "None", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'type'", ":", "file_type", ",", "'start'", ":", "start", ",", "'limit'", ":", "limit", ",", "'filter_path'", ":", "filter_path", ",", "}", "return", "self", ".", "_request", "(", "'stream'", ",", "'list'", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
35.555556
15.333333
def delete(self, remote): """ Delete a file from server """ try: self.conn.delete(remote) except Exception: return False else: return True
[ "def", "delete", "(", "self", ",", "remote", ")", ":", "try", ":", "self", ".", "conn", ".", "delete", "(", "remote", ")", "except", "Exception", ":", "return", "False", "else", ":", "return", "True" ]
24.875
15.25
def zoom(image, factor, dimension, hdr = False, order = 3): """ Zooms the provided image by the supplied factor in the supplied dimension. The factor is an integer determining how many slices should be put between each existing pair. If an image header (hdr) is supplied, its voxel spacing gets updated. Returns the image and the updated header or false. """ # check if supplied dimension is valid if dimension >= image.ndim: raise argparse.ArgumentError('The supplied zoom-dimension {} exceeds the image dimensionality of 0 to {}.'.format(dimension, image.ndim - 1)) # get logger logger = Logger.getInstance() logger.debug('Old shape = {}.'.format(image.shape)) # perform the zoom zoom = [1] * image.ndim zoom[dimension] = (image.shape[dimension] + (image.shape[dimension] - 1) * factor) / float(image.shape[dimension]) logger.debug('Reshaping with = {}.'.format(zoom)) image = interpolation.zoom(image, zoom, order=order) logger.debug('New shape = {}.'.format(image.shape)) if hdr: new_spacing = list(header.get_pixel_spacing(hdr)) new_spacing[dimension] = new_spacing[dimension] / float(factor + 1) logger.debug('Setting pixel spacing from {} to {}....'.format(header.get_pixel_spacing(hdr), new_spacing)) header.set_pixel_spacing(hdr, tuple(new_spacing)) return image, hdr
[ "def", "zoom", "(", "image", ",", "factor", ",", "dimension", ",", "hdr", "=", "False", ",", "order", "=", "3", ")", ":", "# check if supplied dimension is valid", "if", "dimension", ">=", "image", ".", "ndim", ":", "raise", "argparse", ".", "ArgumentError", "(", "'The supplied zoom-dimension {} exceeds the image dimensionality of 0 to {}.'", ".", "format", "(", "dimension", ",", "image", ".", "ndim", "-", "1", ")", ")", "# get logger", "logger", "=", "Logger", ".", "getInstance", "(", ")", "logger", ".", "debug", "(", "'Old shape = {}.'", ".", "format", "(", "image", ".", "shape", ")", ")", "# perform the zoom", "zoom", "=", "[", "1", "]", "*", "image", ".", "ndim", "zoom", "[", "dimension", "]", "=", "(", "image", ".", "shape", "[", "dimension", "]", "+", "(", "image", ".", "shape", "[", "dimension", "]", "-", "1", ")", "*", "factor", ")", "/", "float", "(", "image", ".", "shape", "[", "dimension", "]", ")", "logger", ".", "debug", "(", "'Reshaping with = {}.'", ".", "format", "(", "zoom", ")", ")", "image", "=", "interpolation", ".", "zoom", "(", "image", ",", "zoom", ",", "order", "=", "order", ")", "logger", ".", "debug", "(", "'New shape = {}.'", ".", "format", "(", "image", ".", "shape", ")", ")", "if", "hdr", ":", "new_spacing", "=", "list", "(", "header", ".", "get_pixel_spacing", "(", "hdr", ")", ")", "new_spacing", "[", "dimension", "]", "=", "new_spacing", "[", "dimension", "]", "/", "float", "(", "factor", "+", "1", ")", "logger", ".", "debug", "(", "'Setting pixel spacing from {} to {}....'", ".", "format", "(", "header", ".", "get_pixel_spacing", "(", "hdr", ")", ",", "new_spacing", ")", ")", "header", ".", "set_pixel_spacing", "(", "hdr", ",", "tuple", "(", "new_spacing", ")", ")", "return", "image", ",", "hdr" ]
43.46875
28.09375
def __analizar_controles(self, ret): "Comprueba y extrae controles si existen en la respuesta XML" if 'arrayControles' in ret: controles = ret['arrayControles'] self.Controles = ["%(tipo)s: %(descripcion)s" % ctl['control'] for ctl in controles]
[ "def", "__analizar_controles", "(", "self", ",", "ret", ")", ":", "if", "'arrayControles'", "in", "ret", ":", "controles", "=", "ret", "[", "'arrayControles'", "]", "self", ".", "Controles", "=", "[", "\"%(tipo)s: %(descripcion)s\"", "%", "ctl", "[", "'control'", "]", "for", "ctl", "in", "controles", "]" ]
52.166667
15.166667
def match(self, ref): """ Get all concepts matching this ref. For a dimension, that is all its attributes, but not the dimension itself. """ try: concept = self[ref] if not isinstance(concept, Dimension): return [concept] return [a for a in concept.attributes] except KeyError: return []
[ "def", "match", "(", "self", ",", "ref", ")", ":", "try", ":", "concept", "=", "self", "[", "ref", "]", "if", "not", "isinstance", "(", "concept", ",", "Dimension", ")", ":", "return", "[", "concept", "]", "return", "[", "a", "for", "a", "in", "concept", ".", "attributes", "]", "except", "KeyError", ":", "return", "[", "]" ]
37.4
11.9
def _output_type_by_input_path(inpaths, itype, fmsg): """ :param inpaths: List of input file paths :param itype: Input type or None :param fmsg: message if it cannot detect otype by 'inpath' :return: Output type :: str """ msg = ("Specify inpath and/or outpath type[s] with -I/--itype " "or -O/--otype option explicitly") if itype is None: try: otype = API.find(inpaths[0]).type() except API.UnknownFileTypeError: _exit_with_output((fmsg % inpaths[0]) + msg, 1) except (ValueError, IndexError): _exit_with_output(msg, 1) else: otype = itype return otype
[ "def", "_output_type_by_input_path", "(", "inpaths", ",", "itype", ",", "fmsg", ")", ":", "msg", "=", "(", "\"Specify inpath and/or outpath type[s] with -I/--itype \"", "\"or -O/--otype option explicitly\"", ")", "if", "itype", "is", "None", ":", "try", ":", "otype", "=", "API", ".", "find", "(", "inpaths", "[", "0", "]", ")", ".", "type", "(", ")", "except", "API", ".", "UnknownFileTypeError", ":", "_exit_with_output", "(", "(", "fmsg", "%", "inpaths", "[", "0", "]", ")", "+", "msg", ",", "1", ")", "except", "(", "ValueError", ",", "IndexError", ")", ":", "_exit_with_output", "(", "msg", ",", "1", ")", "else", ":", "otype", "=", "itype", "return", "otype" ]
32.7
13.7
def get_single_submission_courses(self, user_id, course_id, assignment_id, include=None): """ Get a single submission. Get a single submission, based on user id. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # OPTIONAL - include """Associations to include with the group.""" if include is not None: self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "visibility", "course", "user"]) params["include"] = include self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id}".format(**path), data=data, params=params, no_data=True)
[ "def", "get_single_submission_courses", "(", "self", ",", "user_id", ",", "course_id", ",", "assignment_id", ",", "include", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=", "course_id", "# REQUIRED - PATH - assignment_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"assignment_id\"", "]", "=", "assignment_id", "# REQUIRED - PATH - user_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"user_id\"", "]", "=", "user_id", "# OPTIONAL - include\r", "\"\"\"Associations to include with the group.\"\"\"", "if", "include", "is", "not", "None", ":", "self", ".", "_validate_enum", "(", "include", ",", "[", "\"submission_history\"", ",", "\"submission_comments\"", ",", "\"rubric_assessment\"", ",", "\"visibility\"", ",", "\"course\"", ",", "\"user\"", "]", ")", "params", "[", "\"include\"", "]", "=", "include", "self", ".", "logger", ".", "debug", "(", "\"GET /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"GET\"", ",", "\"/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id}\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "no_data", "=", "True", ")" ]
40.7
26.933333
def set_approvers(self, approver_ids=[], approver_group_ids=[], **kwargs): """Change MR-level allowed approvers and approver groups. Args: approver_ids (list): User IDs that can approve MRs approver_group_ids (list): Group IDs whose members can approve MRs Raises: GitlabAuthenticationError: If authentication is not correct GitlabUpdateError: If the server failed to perform the request """ path = '%s/%s/approvers' % (self._parent.manager.path, self._parent.get_id()) data = {'approver_ids': approver_ids, 'approver_group_ids': approver_group_ids} self.gitlab.http_put(path, post_data=data, **kwargs)
[ "def", "set_approvers", "(", "self", ",", "approver_ids", "=", "[", "]", ",", "approver_group_ids", "=", "[", "]", ",", "*", "*", "kwargs", ")", ":", "path", "=", "'%s/%s/approvers'", "%", "(", "self", ".", "_parent", ".", "manager", ".", "path", ",", "self", ".", "_parent", ".", "get_id", "(", ")", ")", "data", "=", "{", "'approver_ids'", ":", "approver_ids", ",", "'approver_group_ids'", ":", "approver_group_ids", "}", "self", ".", "gitlab", ".", "http_put", "(", "path", ",", "post_data", "=", "data", ",", "*", "*", "kwargs", ")" ]
46.5625
23.3125
def _vel_disp_one(self, kwargs_profile, kwargs_aperture, kwargs_light, kwargs_anisotropy): """ computes one realisation of the velocity dispersion realized in the slit :param gamma: :param rho0_r0_gamma: :param r_eff: :param r_ani: :param R_slit: :param dR_slit: :param FWHM: :return: """ while True: r = self.lightProfile.draw_light(kwargs_light) # draw r R, x, y = util.R_r(r) # draw projected R x_, y_ = util.displace_PSF(x, y, self.FWHM) # displace via PSF bool = self.aperture.aperture_select(x_, y_, kwargs_aperture) if bool is True: break sigma_s2 = self.sigma_s2(r, R, kwargs_profile, kwargs_anisotropy, kwargs_light) return sigma_s2
[ "def", "_vel_disp_one", "(", "self", ",", "kwargs_profile", ",", "kwargs_aperture", ",", "kwargs_light", ",", "kwargs_anisotropy", ")", ":", "while", "True", ":", "r", "=", "self", ".", "lightProfile", ".", "draw_light", "(", "kwargs_light", ")", "# draw r", "R", ",", "x", ",", "y", "=", "util", ".", "R_r", "(", "r", ")", "# draw projected R", "x_", ",", "y_", "=", "util", ".", "displace_PSF", "(", "x", ",", "y", ",", "self", ".", "FWHM", ")", "# displace via PSF", "bool", "=", "self", ".", "aperture", ".", "aperture_select", "(", "x_", ",", "y_", ",", "kwargs_aperture", ")", "if", "bool", "is", "True", ":", "break", "sigma_s2", "=", "self", ".", "sigma_s2", "(", "r", ",", "R", ",", "kwargs_profile", ",", "kwargs_anisotropy", ",", "kwargs_light", ")", "return", "sigma_s2" ]
36.909091
22.818182
def expression_statement(self): """ expression_statement: assignment ';' """ node = self.assignment() self._process(Nature.SEMI) return node
[ "def", "expression_statement", "(", "self", ")", ":", "node", "=", "self", ".", "assignment", "(", ")", "self", ".", "_process", "(", "Nature", ".", "SEMI", ")", "return", "node" ]
26
6.857143
def transform_timeseries_data(timeseries, start, end=None): """Transforms a Go Metrics API metric result into a list of values for a given window period. start and end are expected to be Unix timestamps in microseconds. """ data = [] include = False for metric, points in timeseries.items(): for point in points: if point['x'] == start: include = True if include: data.append(point['y']) if end is not None and point['x'] == end: return data return data
[ "def", "transform_timeseries_data", "(", "timeseries", ",", "start", ",", "end", "=", "None", ")", ":", "data", "=", "[", "]", "include", "=", "False", "for", "metric", ",", "points", "in", "timeseries", ".", "items", "(", ")", ":", "for", "point", "in", "points", ":", "if", "point", "[", "'x'", "]", "==", "start", ":", "include", "=", "True", "if", "include", ":", "data", ".", "append", "(", "point", "[", "'y'", "]", ")", "if", "end", "is", "not", "None", "and", "point", "[", "'x'", "]", "==", "end", ":", "return", "data", "return", "data" ]
33.058824
14.117647
def cmd_module(args): '''module commands''' usage = "usage: module <list|load|reload|unload>" if len(args) < 1: print(usage) return if args[0] == "list": for (m,pm) in mpstate.modules: print("%s: %s" % (m.name, m.description)) elif args[0] == "load": if len(args) < 2: print("usage: module load <name>") return (modname, kwargs) = generate_kwargs(args[1]) try: load_module(modname, **kwargs) except TypeError as ex: print(ex) print("%s module does not support keyword arguments"% modname) return elif args[0] == "reload": if len(args) < 2: print("usage: module reload <name>") return (modname, kwargs) = generate_kwargs(args[1]) pmodule = None for (m,pm) in mpstate.modules: if m.name == modname: pmodule = pm if pmodule is None: print("Module %s not loaded" % modname) return if unload_module(modname): import zipimport try: reload(pmodule) except ImportError: clear_zipimport_cache() reload(pmodule) try: if load_module(modname, quiet=True, **kwargs): print("Reloaded module %s" % modname) except TypeError: print("%s module does not support keyword arguments" % modname) elif args[0] == "unload": if len(args) < 2: print("usage: module unload <name>") return modname = os.path.basename(args[1]) unload_module(modname) else: print(usage)
[ "def", "cmd_module", "(", "args", ")", ":", "usage", "=", "\"usage: module <list|load|reload|unload>\"", "if", "len", "(", "args", ")", "<", "1", ":", "print", "(", "usage", ")", "return", "if", "args", "[", "0", "]", "==", "\"list\"", ":", "for", "(", "m", ",", "pm", ")", "in", "mpstate", ".", "modules", ":", "print", "(", "\"%s: %s\"", "%", "(", "m", ".", "name", ",", "m", ".", "description", ")", ")", "elif", "args", "[", "0", "]", "==", "\"load\"", ":", "if", "len", "(", "args", ")", "<", "2", ":", "print", "(", "\"usage: module load <name>\"", ")", "return", "(", "modname", ",", "kwargs", ")", "=", "generate_kwargs", "(", "args", "[", "1", "]", ")", "try", ":", "load_module", "(", "modname", ",", "*", "*", "kwargs", ")", "except", "TypeError", "as", "ex", ":", "print", "(", "ex", ")", "print", "(", "\"%s module does not support keyword arguments\"", "%", "modname", ")", "return", "elif", "args", "[", "0", "]", "==", "\"reload\"", ":", "if", "len", "(", "args", ")", "<", "2", ":", "print", "(", "\"usage: module reload <name>\"", ")", "return", "(", "modname", ",", "kwargs", ")", "=", "generate_kwargs", "(", "args", "[", "1", "]", ")", "pmodule", "=", "None", "for", "(", "m", ",", "pm", ")", "in", "mpstate", ".", "modules", ":", "if", "m", ".", "name", "==", "modname", ":", "pmodule", "=", "pm", "if", "pmodule", "is", "None", ":", "print", "(", "\"Module %s not loaded\"", "%", "modname", ")", "return", "if", "unload_module", "(", "modname", ")", ":", "import", "zipimport", "try", ":", "reload", "(", "pmodule", ")", "except", "ImportError", ":", "clear_zipimport_cache", "(", ")", "reload", "(", "pmodule", ")", "try", ":", "if", "load_module", "(", "modname", ",", "quiet", "=", "True", ",", "*", "*", "kwargs", ")", ":", "print", "(", "\"Reloaded module %s\"", "%", "modname", ")", "except", "TypeError", ":", "print", "(", "\"%s module does not support keyword arguments\"", "%", "modname", ")", "elif", "args", "[", "0", "]", "==", "\"unload\"", ":", "if", "len", "(", "args", ")", "<", "2", ":", "print", "(", "\"usage: module unload <name>\"", ")", "return", "modname", "=", "os", ".", "path", ".", "basename", "(", "args", "[", "1", "]", ")", "unload_module", "(", "modname", ")", "else", ":", "print", "(", "usage", ")" ]
32.692308
15
def prt_hier_down(self, goid, prt=sys.stdout): """Write hierarchy for all GO IDs below GO ID in arg, goid.""" wrhiercfg = self._get_wrhiercfg() obj = WrHierPrt(self.gosubdag.go2obj, self.gosubdag.go2nt, wrhiercfg, prt) obj.prt_hier_rec(goid) return obj.items_list
[ "def", "prt_hier_down", "(", "self", ",", "goid", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "wrhiercfg", "=", "self", ".", "_get_wrhiercfg", "(", ")", "obj", "=", "WrHierPrt", "(", "self", ".", "gosubdag", ".", "go2obj", ",", "self", ".", "gosubdag", ".", "go2nt", ",", "wrhiercfg", ",", "prt", ")", "obj", ".", "prt_hier_rec", "(", "goid", ")", "return", "obj", ".", "items_list" ]
49.666667
11.666667
def get_language_tabs(self): """ Determine the language tabs to show. """ current_language = self.get_current_language() if self.object: available_languages = list(self.object.get_available_languages()) else: available_languages = [] return get_language_tabs(self.request, current_language, available_languages)
[ "def", "get_language_tabs", "(", "self", ")", ":", "current_language", "=", "self", ".", "get_current_language", "(", ")", "if", "self", ".", "object", ":", "available_languages", "=", "list", "(", "self", ".", "object", ".", "get_available_languages", "(", ")", ")", "else", ":", "available_languages", "=", "[", "]", "return", "get_language_tabs", "(", "self", ".", "request", ",", "current_language", ",", "available_languages", ")" ]
34.727273
18.181818
def ensure_indirect_subclass(class_, of): """Check whether given is an indirect subclass of another, i.e. there exists at least intermediate base between ``of`` and ``class_``. :param class_: Class to check :param of: Superclass to check against :return: ``class_``, if the check succeeds :raise TypeError: When the check fails .. versionadded:: 0.0.4 """ if not is_indirect_subclass(class_, of): raise TypeError( "expected an indirect subclass of %r, got %s instead" % ( of, class_.__name__)) return class_
[ "def", "ensure_indirect_subclass", "(", "class_", ",", "of", ")", ":", "if", "not", "is_indirect_subclass", "(", "class_", ",", "of", ")", ":", "raise", "TypeError", "(", "\"expected an indirect subclass of %r, got %s instead\"", "%", "(", "of", ",", "class_", ".", "__name__", ")", ")", "return", "class_" ]
33.529412
15.588235
def get_stored_metadata(self, temp_ver): """ Retrieves the metadata for the given template version from the store Args: temp_ver (TemplateVersion): template version to retrieve the metadata for Returns: dict: the metadata of the given template version """ with open(self._prefixed('%s.metadata' % temp_ver.name)) as f: return json.load(f)
[ "def", "get_stored_metadata", "(", "self", ",", "temp_ver", ")", ":", "with", "open", "(", "self", ".", "_prefixed", "(", "'%s.metadata'", "%", "temp_ver", ".", "name", ")", ")", "as", "f", ":", "return", "json", ".", "load", "(", "f", ")" ]
32.923077
20.769231
def load(self, service_name, api_version=None, cached=True): """ Loads the desired JSON for a service. (uncached) This will fall back through all the ``data_dirs`` provided to the constructor, returning the **first** one it finds. :param service_name: The name of the desired service :type service_name: string :param api_version: (Optional) The desired API version to load :type service_name: string :param cached: (Optional) Whether or not the cache should be used when attempting to load the data. Default is ``True``. :type cached: boolean :returns: The loaded JSON as a dict """ # Fetch from the cache first if it's there. if cached: if service_name in self._loaded_data: if api_version in self._loaded_data[service_name]: return self._loaded_data[service_name][api_version] data = {} options = self.get_available_options(service_name) match, version = self.get_best_match( options, service_name, api_version=api_version ) with open(match, 'r') as json_file: data = json.load(json_file) # Embed where we found it from for debugging purposes. data['__file__'] = match data['api_version'] = version if cached: self._loaded_data.setdefault(service_name, {}) self._loaded_data[service_name][api_version] = data return data
[ "def", "load", "(", "self", ",", "service_name", ",", "api_version", "=", "None", ",", "cached", "=", "True", ")", ":", "# Fetch from the cache first if it's there.", "if", "cached", ":", "if", "service_name", "in", "self", ".", "_loaded_data", ":", "if", "api_version", "in", "self", ".", "_loaded_data", "[", "service_name", "]", ":", "return", "self", ".", "_loaded_data", "[", "service_name", "]", "[", "api_version", "]", "data", "=", "{", "}", "options", "=", "self", ".", "get_available_options", "(", "service_name", ")", "match", ",", "version", "=", "self", ".", "get_best_match", "(", "options", ",", "service_name", ",", "api_version", "=", "api_version", ")", "with", "open", "(", "match", ",", "'r'", ")", "as", "json_file", ":", "data", "=", "json", ".", "load", "(", "json_file", ")", "# Embed where we found it from for debugging purposes.", "data", "[", "'__file__'", "]", "=", "match", "data", "[", "'api_version'", "]", "=", "version", "if", "cached", ":", "self", ".", "_loaded_data", ".", "setdefault", "(", "service_name", ",", "{", "}", ")", "self", ".", "_loaded_data", "[", "service_name", "]", "[", "api_version", "]", "=", "data", "return", "data" ]
34.659091
20.840909
def recent(self, username, project, limit=1, offset=0, branch=None, status_filter=""): """Return status of recent builds for given project. Retrieves build statuses for given project and branch. If branch is None it retrieves most recent build. Args: username (str): Name of the user. project (str): Name of the project. limit (int): Number of builds to return, default=1, max=100. offset (int): Returns builds starting from given offset. branch (str): Optional branch name as string. If specified only builds from given branch are returned. status_filter (str): Restricts which builds are returned. Set to "completed", "successful", "failed", "running", or defaults to no filter. Returns: A list of dictionaries with information about each build. """ method = 'GET' if branch is not None: url = ('/project/{username}/{project}/tree/{branch}?' 'circle-token={token}&limit={limit}&offset={offset}&filter={status_filter}'.format( username=username, project=project, branch=branch, token=self.client.api_token, limit=limit, offset=offset, status_filter=status_filter)) else: url = ('/project/{username}/{project}?' 'circle-token={token}&limit={limit}&offset={offset}&filter={status_filter}'.format( username=username, project=project, token=self.client.api_token, limit=limit, offset=offset, status_filter=status_filter)) json_data = self.client.request(method, url) return json_data
[ "def", "recent", "(", "self", ",", "username", ",", "project", ",", "limit", "=", "1", ",", "offset", "=", "0", ",", "branch", "=", "None", ",", "status_filter", "=", "\"\"", ")", ":", "method", "=", "'GET'", "if", "branch", "is", "not", "None", ":", "url", "=", "(", "'/project/{username}/{project}/tree/{branch}?'", "'circle-token={token}&limit={limit}&offset={offset}&filter={status_filter}'", ".", "format", "(", "username", "=", "username", ",", "project", "=", "project", ",", "branch", "=", "branch", ",", "token", "=", "self", ".", "client", ".", "api_token", ",", "limit", "=", "limit", ",", "offset", "=", "offset", ",", "status_filter", "=", "status_filter", ")", ")", "else", ":", "url", "=", "(", "'/project/{username}/{project}?'", "'circle-token={token}&limit={limit}&offset={offset}&filter={status_filter}'", ".", "format", "(", "username", "=", "username", ",", "project", "=", "project", ",", "token", "=", "self", ".", "client", ".", "api_token", ",", "limit", "=", "limit", ",", "offset", "=", "offset", ",", "status_filter", "=", "status_filter", ")", ")", "json_data", "=", "self", ".", "client", ".", "request", "(", "method", ",", "url", ")", "return", "json_data" ]
50.857143
25.514286
def locateChild(self, context, segments): """ Delegate dispatch to a sharing resource if the request is for a user subdomain, otherwise fall back to the wrapped resource's C{locateChild} implementation. """ request = IRequest(context) hostname = request.getHeader('host') info = self.subdomain(hostname) if info is not None: username, domain = info index = UserIndexPage(IRealm(self.siteStore), self.webViewer) resource = index.locateChild(None, [username])[0] return resource, segments return self.wrapped.locateChild(context, segments)
[ "def", "locateChild", "(", "self", ",", "context", ",", "segments", ")", ":", "request", "=", "IRequest", "(", "context", ")", "hostname", "=", "request", ".", "getHeader", "(", "'host'", ")", "info", "=", "self", ".", "subdomain", "(", "hostname", ")", "if", "info", "is", "not", "None", ":", "username", ",", "domain", "=", "info", "index", "=", "UserIndexPage", "(", "IRealm", "(", "self", ".", "siteStore", ")", ",", "self", ".", "webViewer", ")", "resource", "=", "index", ".", "locateChild", "(", "None", ",", "[", "username", "]", ")", "[", "0", "]", "return", "resource", ",", "segments", "return", "self", ".", "wrapped", ".", "locateChild", "(", "context", ",", "segments", ")" ]
40.235294
13.411765
def odt_to_ri(f, res, nm): r"""Convert the ODT object function to refractive index In :abbr:`ODT (Optical Diffraction Tomography)`, the object function is defined by the Helmholtz equation .. math:: f(\mathbf{r}) = k_\mathrm{m}^2 \left[ \left( \frac{n(\mathbf{r})}{n_\mathrm{m}} \right)^2 - 1 \right] with :math:`k_\mathrm{m} = \frac{2\pi n_\mathrm{m}}{\lambda}`. By inverting this equation, we obtain the refractive index :math:`n(\mathbf{r})`. .. math:: n(\mathbf{r}) = n_\mathrm{m} \sqrt{\frac{f(\mathbf{r})}{k_\mathrm{m}^2} + 1 } Parameters ---------- f: n-dimensional ndarray The reconstructed object function :math:`f(\mathbf{r})`. res: float The size of the vacuum wave length :math:`\lambda` in pixels. nm: float The refractive index of the medium :math:`n_\mathrm{m}` that surrounds the object in :math:`f(\mathbf{r})`. Returns ------- ri: n-dimensional ndarray The complex refractive index :math:`n(\mathbf{r})`. Notes ----- Because this function computes the root of a complex number, there are several solutions to the refractive index. Always the positive (real) root of the refractive index is used. """ km = (2 * np.pi * nm) / res ri = nm * np.sqrt(f / km**2 + 1) # Always take the positive root as the refractive index. # Because f can be imaginary, numpy cannot return the correct # positive root of f. However, we know that *ri* must be postive and # thus we take the absolute value of ri. # This also is what happens in Slaneys # diffract/Src/back.c in line 414. negrootcoord = np.where(ri.real < 0) ri[negrootcoord] *= -1 return ri
[ "def", "odt_to_ri", "(", "f", ",", "res", ",", "nm", ")", ":", "km", "=", "(", "2", "*", "np", ".", "pi", "*", "nm", ")", "/", "res", "ri", "=", "nm", "*", "np", ".", "sqrt", "(", "f", "/", "km", "**", "2", "+", "1", ")", "# Always take the positive root as the refractive index.", "# Because f can be imaginary, numpy cannot return the correct", "# positive root of f. However, we know that *ri* must be postive and", "# thus we take the absolute value of ri.", "# This also is what happens in Slaneys", "# diffract/Src/back.c in line 414.", "negrootcoord", "=", "np", ".", "where", "(", "ri", ".", "real", "<", "0", ")", "ri", "[", "negrootcoord", "]", "*=", "-", "1", "return", "ri" ]
32.735849
22.056604
def SimpleRowColumn(field, *args, **kwargs): """ Shortcut for simple row with only a full column """ if isinstance(field, basestring): field = Field(field, *args, **kwargs) return Row( Column(field), )
[ "def", "SimpleRowColumn", "(", "field", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "field", ",", "basestring", ")", ":", "field", "=", "Field", "(", "field", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "Row", "(", "Column", "(", "field", ")", ",", ")" ]
25.888889
11.222222
def _other_pipeline_samples(summary_file, cur_samples): """Retrieve samples produced previously by another pipeline in the summary output. """ cur_descriptions = set([s[0]["description"] for s in cur_samples]) out = [] if utils.file_exists(summary_file): with open(summary_file) as in_handle: for s in yaml.safe_load(in_handle).get("samples", []): if s["description"] not in cur_descriptions: out.append(s) return out
[ "def", "_other_pipeline_samples", "(", "summary_file", ",", "cur_samples", ")", ":", "cur_descriptions", "=", "set", "(", "[", "s", "[", "0", "]", "[", "\"description\"", "]", "for", "s", "in", "cur_samples", "]", ")", "out", "=", "[", "]", "if", "utils", ".", "file_exists", "(", "summary_file", ")", ":", "with", "open", "(", "summary_file", ")", "as", "in_handle", ":", "for", "s", "in", "yaml", ".", "safe_load", "(", "in_handle", ")", ".", "get", "(", "\"samples\"", ",", "[", "]", ")", ":", "if", "s", "[", "\"description\"", "]", "not", "in", "cur_descriptions", ":", "out", ".", "append", "(", "s", ")", "return", "out" ]
44.272727
14.363636
def check_ensembl_api_version(self): """ check the ensembl api version matches a currently working version This function is included so when the api version changes, we notice the change, and we can manually check the responses for the new version. """ self.attempt = 0 headers = {"content-type": "application/json"} ext = "/info/rest" r = self.ensembl_request(ext, headers) response = json.loads(r) self.cache.set_ensembl_api_version(response["release"])
[ "def", "check_ensembl_api_version", "(", "self", ")", ":", "self", ".", "attempt", "=", "0", "headers", "=", "{", "\"content-type\"", ":", "\"application/json\"", "}", "ext", "=", "\"/info/rest\"", "r", "=", "self", ".", "ensembl_request", "(", "ext", ",", "headers", ")", "response", "=", "json", ".", "loads", "(", "r", ")", "self", ".", "cache", ".", "set_ensembl_api_version", "(", "response", "[", "\"release\"", "]", ")" ]
39.214286
18.357143
def add_interim_values(module, input, output): """The forward hook used to save interim tensors, detached from the graph. Used to calculate the multipliers """ try: del module.x except AttributeError: pass try: del module.y except AttributeError: pass module_type = module.__class__.__name__ if module_type in op_handler: func_name = op_handler[module_type].__name__ # First, check for cases where we don't need to save the x and y tensors if func_name == 'passthrough': pass else: # check only the 0th input varies for i in range(len(input)): if i != 0 and type(output) is tuple: assert input[i] == output[i], "Only the 0th input may vary!" # if a new method is added, it must be added here too. This ensures tensors # are only saved if necessary if func_name in ['maxpool', 'nonlinear_1d']: # only save tensors if necessary if type(input) is tuple: setattr(module, 'x', torch.nn.Parameter(input[0].detach())) else: setattr(module, 'x', torch.nn.Parameter(input.detach())) if type(output) is tuple: setattr(module, 'y', torch.nn.Parameter(output[0].detach())) else: setattr(module, 'y', torch.nn.Parameter(output.detach())) if module_type in failure_case_modules: input[0].register_hook(deeplift_tensor_grad)
[ "def", "add_interim_values", "(", "module", ",", "input", ",", "output", ")", ":", "try", ":", "del", "module", ".", "x", "except", "AttributeError", ":", "pass", "try", ":", "del", "module", ".", "y", "except", "AttributeError", ":", "pass", "module_type", "=", "module", ".", "__class__", ".", "__name__", "if", "module_type", "in", "op_handler", ":", "func_name", "=", "op_handler", "[", "module_type", "]", ".", "__name__", "# First, check for cases where we don't need to save the x and y tensors", "if", "func_name", "==", "'passthrough'", ":", "pass", "else", ":", "# check only the 0th input varies", "for", "i", "in", "range", "(", "len", "(", "input", ")", ")", ":", "if", "i", "!=", "0", "and", "type", "(", "output", ")", "is", "tuple", ":", "assert", "input", "[", "i", "]", "==", "output", "[", "i", "]", ",", "\"Only the 0th input may vary!\"", "# if a new method is added, it must be added here too. This ensures tensors", "# are only saved if necessary", "if", "func_name", "in", "[", "'maxpool'", ",", "'nonlinear_1d'", "]", ":", "# only save tensors if necessary", "if", "type", "(", "input", ")", "is", "tuple", ":", "setattr", "(", "module", ",", "'x'", ",", "torch", ".", "nn", ".", "Parameter", "(", "input", "[", "0", "]", ".", "detach", "(", ")", ")", ")", "else", ":", "setattr", "(", "module", ",", "'x'", ",", "torch", ".", "nn", ".", "Parameter", "(", "input", ".", "detach", "(", ")", ")", ")", "if", "type", "(", "output", ")", "is", "tuple", ":", "setattr", "(", "module", ",", "'y'", ",", "torch", ".", "nn", ".", "Parameter", "(", "output", "[", "0", "]", ".", "detach", "(", ")", ")", ")", "else", ":", "setattr", "(", "module", ",", "'y'", ",", "torch", ".", "nn", ".", "Parameter", "(", "output", ".", "detach", "(", ")", ")", ")", "if", "module_type", "in", "failure_case_modules", ":", "input", "[", "0", "]", ".", "register_hook", "(", "deeplift_tensor_grad", ")" ]
42.405405
18.216216
def ancestors(obj, refattrs=(ALIGNMENT, SEGMENTATION)): """ >>> for anc in query.ancestors(igt.get_item('g1'), refattrs=(ALIGNMENT, SEGMENTATION)): ... print(anc) (<Tier object (id: g type: glosses) at ...>, 'alignment', <Tier object (id: m type: morphemes) at ...>, [<Item object (id: m1) at ...>]) (<Tier object (id: m type: morphemes) at ...>, 'segmentation', <Tier object (id: w type: words) at ...>, [<Item object (id: w1) at ...>]) (<Tier object (id: w type: words) at ...>, 'segmentation', <Tier object (id: p type: phrases) at ...>, [<Item object (id: p1) at ...>]) """ if hasattr(obj, 'tier'): tier = obj.tier items = [obj] else: tier = obj items = tier.items # a tier may be visited twice (e.g. A > B > A), but then it stops; # this is to avoid cycles visited = set([tier.id]) while True: # get the first specified attribute refattr = next((ra for ra in refattrs if ra in tier.attributes), None) if not refattr: break reftier = ref.dereference(tier, refattr) ids = set(chain.from_iterable( ref.ids(item.attributes.get(refattr, '')) for item in items )) refitems = [item for item in reftier.items if item.id in ids] yield (tier, refattr, reftier, refitems) # cycle detection; break if we've now encountered something twice if reftier.id in visited: break visited.update(reftier.id) tier = reftier items = refitems
[ "def", "ancestors", "(", "obj", ",", "refattrs", "=", "(", "ALIGNMENT", ",", "SEGMENTATION", ")", ")", ":", "if", "hasattr", "(", "obj", ",", "'tier'", ")", ":", "tier", "=", "obj", ".", "tier", "items", "=", "[", "obj", "]", "else", ":", "tier", "=", "obj", "items", "=", "tier", ".", "items", "# a tier may be visited twice (e.g. A > B > A), but then it stops;", "# this is to avoid cycles", "visited", "=", "set", "(", "[", "tier", ".", "id", "]", ")", "while", "True", ":", "# get the first specified attribute", "refattr", "=", "next", "(", "(", "ra", "for", "ra", "in", "refattrs", "if", "ra", "in", "tier", ".", "attributes", ")", ",", "None", ")", "if", "not", "refattr", ":", "break", "reftier", "=", "ref", ".", "dereference", "(", "tier", ",", "refattr", ")", "ids", "=", "set", "(", "chain", ".", "from_iterable", "(", "ref", ".", "ids", "(", "item", ".", "attributes", ".", "get", "(", "refattr", ",", "''", ")", ")", "for", "item", "in", "items", ")", ")", "refitems", "=", "[", "item", "for", "item", "in", "reftier", ".", "items", "if", "item", ".", "id", "in", "ids", "]", "yield", "(", "tier", ",", "refattr", ",", "reftier", ",", "refitems", ")", "# cycle detection; break if we've now encountered something twice", "if", "reftier", ".", "id", "in", "visited", ":", "break", "visited", ".", "update", "(", "reftier", ".", "id", ")", "tier", "=", "reftier", "items", "=", "refitems" ]
44.617647
25.558824
def _parse_use(self, string): """Extracts use dependencies from the innertext of a module.""" result = {} for ruse in self.RE_USE.finditer(string): #We also handle comments for individual use cases, the "only" section #won't pick up any comments. name = ruse.group("name").split("!")[0].strip() if name.lower() == "mpi": continue if ruse.group("only"): only = ruse.group("only").split(",") for method in only: key = "{}.{}".format(name, method.strip()) self._dict_increment(result, key) else: self._dict_increment(result, name) return result
[ "def", "_parse_use", "(", "self", ",", "string", ")", ":", "result", "=", "{", "}", "for", "ruse", "in", "self", ".", "RE_USE", ".", "finditer", "(", "string", ")", ":", "#We also handle comments for individual use cases, the \"only\" section", "#won't pick up any comments.", "name", "=", "ruse", ".", "group", "(", "\"name\"", ")", ".", "split", "(", "\"!\"", ")", "[", "0", "]", ".", "strip", "(", ")", "if", "name", ".", "lower", "(", ")", "==", "\"mpi\"", ":", "continue", "if", "ruse", ".", "group", "(", "\"only\"", ")", ":", "only", "=", "ruse", ".", "group", "(", "\"only\"", ")", ".", "split", "(", "\",\"", ")", "for", "method", "in", "only", ":", "key", "=", "\"{}.{}\"", ".", "format", "(", "name", ",", "method", ".", "strip", "(", ")", ")", "self", ".", "_dict_increment", "(", "result", ",", "key", ")", "else", ":", "self", ".", "_dict_increment", "(", "result", ",", "name", ")", "return", "result" ]
41.388889
14.333333
def import_data(target_zip): """ Import data from given zip-arc, this means database + __data__ :param target_zip: :param backup_zip_path: :return: """ from django_productline.context import PRODUCT_CONTEXT tasks.import_data_dir(target_zip) # product context is not reloaded if context file is changed tasks.import_database(target_zip, PRODUCT_CONTEXT.DB_NAME, PRODUCT_CONTEXT.DB_USER)
[ "def", "import_data", "(", "target_zip", ")", ":", "from", "django_productline", ".", "context", "import", "PRODUCT_CONTEXT", "tasks", ".", "import_data_dir", "(", "target_zip", ")", "# product context is not reloaded if context file is changed", "tasks", ".", "import_database", "(", "target_zip", ",", "PRODUCT_CONTEXT", ".", "DB_NAME", ",", "PRODUCT_CONTEXT", ".", "DB_USER", ")" ]
37.727273
17.181818
def construct(self, request=None, service=None, http_args=None, **kwargs): """ Constructing the Authorization header. The value of the Authorization header is "Bearer <access_token>". :param request: Request class instance :param service: Service :param http_args: HTTP header arguments :param kwargs: extra keyword arguments :return: """ if service.service_name == 'refresh_token': _acc_token = find_token(request, 'refresh_token', service, **kwargs) else: _acc_token = find_token(request, 'access_token', service, **kwargs) if not _acc_token: raise KeyError('No access or refresh token available') # The authorization value starts with 'Bearer' when bearer tokens # are used _bearer = "Bearer {}".format(_acc_token) # Add 'Authorization' to the headers if http_args is None: http_args = {"headers": {}} http_args["headers"]["Authorization"] = _bearer else: try: http_args["headers"]["Authorization"] = _bearer except KeyError: http_args["headers"] = {"Authorization": _bearer} return http_args
[ "def", "construct", "(", "self", ",", "request", "=", "None", ",", "service", "=", "None", ",", "http_args", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "service", ".", "service_name", "==", "'refresh_token'", ":", "_acc_token", "=", "find_token", "(", "request", ",", "'refresh_token'", ",", "service", ",", "*", "*", "kwargs", ")", "else", ":", "_acc_token", "=", "find_token", "(", "request", ",", "'access_token'", ",", "service", ",", "*", "*", "kwargs", ")", "if", "not", "_acc_token", ":", "raise", "KeyError", "(", "'No access or refresh token available'", ")", "# The authorization value starts with 'Bearer' when bearer tokens", "# are used", "_bearer", "=", "\"Bearer {}\"", ".", "format", "(", "_acc_token", ")", "# Add 'Authorization' to the headers", "if", "http_args", "is", "None", ":", "http_args", "=", "{", "\"headers\"", ":", "{", "}", "}", "http_args", "[", "\"headers\"", "]", "[", "\"Authorization\"", "]", "=", "_bearer", "else", ":", "try", ":", "http_args", "[", "\"headers\"", "]", "[", "\"Authorization\"", "]", "=", "_bearer", "except", "KeyError", ":", "http_args", "[", "\"headers\"", "]", "=", "{", "\"Authorization\"", ":", "_bearer", "}", "return", "http_args" ]
34.777778
20.777778
def get_all_anonymous_mappings(self, struct1, struct2, niggli=True, include_dist=False): """ Performs an anonymous fitting, which allows distinct species in one structure to map to another. Returns a dictionary of species substitutions that are within tolerance Args: struct1 (Structure): 1st structure struct2 (Structure): 2nd structure niggli (bool): Find niggli cell in preprocessing include_dist (bool): Return the maximin distance with each mapping Returns: list of species mappings that map struct1 to struct2. """ struct1, struct2 = self._process_species([struct1, struct2]) struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2, niggli) matches = self._anonymous_match(struct1, struct2, fu, s1_supercell, break_on_match=not include_dist) if matches: if include_dist: return [(m[0], m[1][0]) for m in matches] else: return [m[0] for m in matches]
[ "def", "get_all_anonymous_mappings", "(", "self", ",", "struct1", ",", "struct2", ",", "niggli", "=", "True", ",", "include_dist", "=", "False", ")", ":", "struct1", ",", "struct2", "=", "self", ".", "_process_species", "(", "[", "struct1", ",", "struct2", "]", ")", "struct1", ",", "struct2", ",", "fu", ",", "s1_supercell", "=", "self", ".", "_preprocess", "(", "struct1", ",", "struct2", ",", "niggli", ")", "matches", "=", "self", ".", "_anonymous_match", "(", "struct1", ",", "struct2", ",", "fu", ",", "s1_supercell", ",", "break_on_match", "=", "not", "include_dist", ")", "if", "matches", ":", "if", "include_dist", ":", "return", "[", "(", "m", "[", "0", "]", ",", "m", "[", "1", "]", "[", "0", "]", ")", "for", "m", "in", "matches", "]", "else", ":", "return", "[", "m", "[", "0", "]", "for", "m", "in", "matches", "]" ]
44
22.962963
def _repr_mimebundle_(self, include=None, exclude=None): """Display the visualization in the Jupyter notebook.""" id = uuid.uuid4() return ( {'application/javascript': self._generate_js(id)}, {'jupyter-vega': '#{0}'.format(id)}, )
[ "def", "_repr_mimebundle_", "(", "self", ",", "include", "=", "None", ",", "exclude", "=", "None", ")", ":", "id", "=", "uuid", ".", "uuid4", "(", ")", "return", "(", "{", "'application/javascript'", ":", "self", ".", "_generate_js", "(", "id", ")", "}", ",", "{", "'jupyter-vega'", ":", "'#{0}'", ".", "format", "(", "id", ")", "}", ",", ")" ]
40
16.571429
def add_file_handler(logger, logFilename=None): """Adds file handler to logger. File is opened in "a" mode (append) """ assert isinstance(logger, logging.Logger) ch = logging.FileHandler(logFilename, "a") # ch.setFormatter(logging._defaultFormatter) # todo may change to have same formatter as last handler of logger ch.setFormatter(_fmtr) logger.addHandler(ch)
[ "def", "add_file_handler", "(", "logger", ",", "logFilename", "=", "None", ")", ":", "assert", "isinstance", "(", "logger", ",", "logging", ".", "Logger", ")", "ch", "=", "logging", ".", "FileHandler", "(", "logFilename", ",", "\"a\"", ")", "# ch.setFormatter(logging._defaultFormatter) # todo may change to have same formatter as last handler of logger\r", "ch", ".", "setFormatter", "(", "_fmtr", ")", "logger", ".", "addHandler", "(", "ch", ")" ]
39.4
16.4
def update_classroom(self, course, classroomid, new_data): """ Update classroom and returns a list of errored students""" student_list, tutor_list, other_students, _ = self.get_user_lists(course, classroomid) # Check tutors new_data["tutors"] = [tutor for tutor in map(str.strip, new_data["tutors"]) if tutor in tutor_list] students, groups, errored_students = [], [], [] new_data["students"] = map(str.strip, new_data["students"]) # Check the students for student in new_data["students"]: if student in student_list: students.append(student) else: if student in other_students: # Remove user from the other classroom self.database.classrooms.find_one_and_update({"courseid": course.get_id(), "groups.students": student}, {"$pull": {"groups.$.students": student, "students": student}}) self.database.classrooms.find_one_and_update({"courseid": course.get_id(), "students": student}, {"$pull": {"students": student}}) students.append(student) else: # Check if user can be registered user_info = self.user_manager.get_user_info(student) if user_info is None or student in tutor_list: errored_students.append(student) else: students.append(student) removed_students = [student for student in student_list if student not in new_data["students"]] self.database.classrooms.find_one_and_update({"courseid": course.get_id(), "default": True}, {"$push": {"students": {"$each": removed_students}}}) new_data["students"] = students # Check the groups for group in new_data["groups"]: group["students"] = [student for student in map(str.strip, group["students"]) if student in new_data["students"]] if len(group["students"]) <= group["size"]: groups.append(group) new_data["groups"] = groups classroom = self.database.classrooms.find_one_and_update( {"_id": ObjectId(classroomid)}, {"$set": {"description": new_data["description"], "students": students, "tutors": new_data["tutors"], "groups": groups}}, return_document=ReturnDocument.AFTER) return classroom, errored_students
[ "def", "update_classroom", "(", "self", ",", "course", ",", "classroomid", ",", "new_data", ")", ":", "student_list", ",", "tutor_list", ",", "other_students", ",", "_", "=", "self", ".", "get_user_lists", "(", "course", ",", "classroomid", ")", "# Check tutors", "new_data", "[", "\"tutors\"", "]", "=", "[", "tutor", "for", "tutor", "in", "map", "(", "str", ".", "strip", ",", "new_data", "[", "\"tutors\"", "]", ")", "if", "tutor", "in", "tutor_list", "]", "students", ",", "groups", ",", "errored_students", "=", "[", "]", ",", "[", "]", ",", "[", "]", "new_data", "[", "\"students\"", "]", "=", "map", "(", "str", ".", "strip", ",", "new_data", "[", "\"students\"", "]", ")", "# Check the students", "for", "student", "in", "new_data", "[", "\"students\"", "]", ":", "if", "student", "in", "student_list", ":", "students", ".", "append", "(", "student", ")", "else", ":", "if", "student", "in", "other_students", ":", "# Remove user from the other classroom", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"groups.students\"", ":", "student", "}", ",", "{", "\"$pull\"", ":", "{", "\"groups.$.students\"", ":", "student", ",", "\"students\"", ":", "student", "}", "}", ")", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"students\"", ":", "student", "}", ",", "{", "\"$pull\"", ":", "{", "\"students\"", ":", "student", "}", "}", ")", "students", ".", "append", "(", "student", ")", "else", ":", "# Check if user can be registered", "user_info", "=", "self", ".", "user_manager", ".", "get_user_info", "(", "student", ")", "if", "user_info", "is", "None", "or", "student", "in", "tutor_list", ":", "errored_students", ".", "append", "(", "student", ")", "else", ":", "students", ".", "append", "(", "student", ")", "removed_students", "=", "[", "student", "for", "student", "in", "student_list", "if", "student", "not", "in", "new_data", "[", "\"students\"", "]", "]", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"default\"", ":", "True", "}", ",", "{", "\"$push\"", ":", "{", "\"students\"", ":", "{", "\"$each\"", ":", "removed_students", "}", "}", "}", ")", "new_data", "[", "\"students\"", "]", "=", "students", "# Check the groups", "for", "group", "in", "new_data", "[", "\"groups\"", "]", ":", "group", "[", "\"students\"", "]", "=", "[", "student", "for", "student", "in", "map", "(", "str", ".", "strip", ",", "group", "[", "\"students\"", "]", ")", "if", "student", "in", "new_data", "[", "\"students\"", "]", "]", "if", "len", "(", "group", "[", "\"students\"", "]", ")", "<=", "group", "[", "\"size\"", "]", ":", "groups", ".", "append", "(", "group", ")", "new_data", "[", "\"groups\"", "]", "=", "groups", "classroom", "=", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"_id\"", ":", "ObjectId", "(", "classroomid", ")", "}", ",", "{", "\"$set\"", ":", "{", "\"description\"", ":", "new_data", "[", "\"description\"", "]", ",", "\"students\"", ":", "students", ",", "\"tutors\"", ":", "new_data", "[", "\"tutors\"", "]", ",", "\"groups\"", ":", "groups", "}", "}", ",", "return_document", "=", "ReturnDocument", ".", "AFTER", ")", "return", "classroom", ",", "errored_students" ]
50.156863
29.647059
def proveG1(x,tTilde,kw,y): """ Generate a zero-knowledge proof that DL(Q*kw) == DL(e(x,tTilde)^kw) where <Q> = G1. """ # Verify types assertType(x, G1Element) assertType(tTilde, G2Element) # Compute the proof. beta = pair(x,tTilde) Q = generatorG1() p = Q*kw v = randomZ(orderGt()) t1 = Q*v t2 = beta**v t1.normalize() c = hashZ(Q,p,beta,y,t1,t2) u = (v-(c*kw)) % orderGt() return (p,c,u)
[ "def", "proveG1", "(", "x", ",", "tTilde", ",", "kw", ",", "y", ")", ":", "# Verify types", "assertType", "(", "x", ",", "G1Element", ")", "assertType", "(", "tTilde", ",", "G2Element", ")", "# Compute the proof.", "beta", "=", "pair", "(", "x", ",", "tTilde", ")", "Q", "=", "generatorG1", "(", ")", "p", "=", "Q", "*", "kw", "v", "=", "randomZ", "(", "orderGt", "(", ")", ")", "t1", "=", "Q", "*", "v", "t2", "=", "beta", "**", "v", "t1", ".", "normalize", "(", ")", "c", "=", "hashZ", "(", "Q", ",", "p", ",", "beta", ",", "y", ",", "t1", ",", "t2", ")", "u", "=", "(", "v", "-", "(", "c", "*", "kw", ")", ")", "%", "orderGt", "(", ")", "return", "(", "p", ",", "c", ",", "u", ")" ]
20.136364
20.227273
def work_get(self, wallet, account): """ Retrieves work for **account** in **wallet** .. enable_control required .. version 8.0 required :param wallet: Wallet to get account work for :type wallet: str :param account: Account to get work for :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.work_get( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... account="xrb_1111111111111111111111111111111111111111111111111111hifc8npp" ... ) "432e5cf728c90f4f" """ wallet = self._process_value(wallet, 'wallet') account = self._process_value(account, 'account') payload = {"wallet": wallet, "account": account} resp = self.call('work_get', payload) return resp['work']
[ "def", "work_get", "(", "self", ",", "wallet", ",", "account", ")", ":", "wallet", "=", "self", ".", "_process_value", "(", "wallet", ",", "'wallet'", ")", "account", "=", "self", ".", "_process_value", "(", "account", ",", "'account'", ")", "payload", "=", "{", "\"wallet\"", ":", "wallet", ",", "\"account\"", ":", "account", "}", "resp", "=", "self", ".", "call", "(", "'work_get'", ",", "payload", ")", "return", "resp", "[", "'work'", "]" ]
27.645161
22.870968
def remove_key(pki_dir, id_): ''' This method removes a specified key from the accepted keys dir ''' key = os.path.join(pki_dir, 'minions', id_) if os.path.isfile(key): os.remove(key) log.debug('Deleted \'%s\'', key)
[ "def", "remove_key", "(", "pki_dir", ",", "id_", ")", ":", "key", "=", "os", ".", "path", ".", "join", "(", "pki_dir", ",", "'minions'", ",", "id_", ")", "if", "os", ".", "path", ".", "isfile", "(", "key", ")", ":", "os", ".", "remove", "(", "key", ")", "log", ".", "debug", "(", "'Deleted \\'%s\\''", ",", "key", ")" ]
30.625
17.625
async def SetInstanceInfo(self, machines): ''' machines : typing.Sequence[~InstanceInfo] Returns -> typing.Sequence[~ErrorResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='Provisioner', request='SetInstanceInfo', version=7, params=_params) _params['machines'] = machines reply = await self.rpc(msg) return reply
[ "async", "def", "SetInstanceInfo", "(", "self", ",", "machines", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'Provisioner'", ",", "request", "=", "'SetInstanceInfo'", ",", "version", "=", "7", ",", "params", "=", "_params", ")", "_params", "[", "'machines'", "]", "=", "machines", "reply", "=", "await", "self", ".", "rpc", "(", "msg", ")", "return", "reply" ]
32.857143
10.571429
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz): """Projects vector in ecef onto different basis, with components also expressed in ECEF Parameters ---------- x : float or array-like ECEF-X component of vector y : float or array-like ECEF-Y component of vector z : float or array-like ECEF-Z component of vector xx : float or array-like ECEF-X component of the x unit vector of new basis xy : float or array-like ECEF-Y component of the x unit vector of new basis xz : float or array-like ECEF-Z component of the x unit vector of new basis """ out_x = x*xx + y*xy + z*xz out_y = x*yx + y*yy + z*yz out_z = x*zx + y*zy + z*zz return out_x, out_y, out_z
[ "def", "project_ecef_vector_onto_basis", "(", "x", ",", "y", ",", "z", ",", "xx", ",", "xy", ",", "xz", ",", "yx", ",", "yy", ",", "yz", ",", "zx", ",", "zy", ",", "zz", ")", ":", "out_x", "=", "x", "*", "xx", "+", "y", "*", "xy", "+", "z", "*", "xz", "out_y", "=", "x", "*", "yx", "+", "y", "*", "yy", "+", "z", "*", "yz", "out_z", "=", "x", "*", "zx", "+", "y", "*", "zy", "+", "z", "*", "zz", "return", "out_x", ",", "out_y", ",", "out_z" ]
31.44
17.04
def brain(self): """Catalog brain of the wrapped object """ if self._brain is None: logger.debug("SuperModel::brain: *Fetch catalog brain*") self._brain = self.get_brain_by_uid(self.uid) return self._brain
[ "def", "brain", "(", "self", ")", ":", "if", "self", ".", "_brain", "is", "None", ":", "logger", ".", "debug", "(", "\"SuperModel::brain: *Fetch catalog brain*\"", ")", "self", ".", "_brain", "=", "self", ".", "get_brain_by_uid", "(", "self", ".", "uid", ")", "return", "self", ".", "_brain" ]
36.428571
13.142857
def partial_update(self, index, doc_type, id, doc=None, script=None, params=None, upsert=None, querystring_args=None): """ Partially update a document with a script """ if querystring_args is None: querystring_args = {} if doc is None and script is None: raise InvalidQuery("script or doc can not both be None") if doc is None: cmd = {"script": script} if params: cmd["params"] = params if upsert: cmd["upsert"] = upsert else: cmd = {"doc": doc } path = make_path(index, doc_type, id, "_update") return self._send_request('POST', path, cmd, querystring_args)
[ "def", "partial_update", "(", "self", ",", "index", ",", "doc_type", ",", "id", ",", "doc", "=", "None", ",", "script", "=", "None", ",", "params", "=", "None", ",", "upsert", "=", "None", ",", "querystring_args", "=", "None", ")", ":", "if", "querystring_args", "is", "None", ":", "querystring_args", "=", "{", "}", "if", "doc", "is", "None", "and", "script", "is", "None", ":", "raise", "InvalidQuery", "(", "\"script or doc can not both be None\"", ")", "if", "doc", "is", "None", ":", "cmd", "=", "{", "\"script\"", ":", "script", "}", "if", "params", ":", "cmd", "[", "\"params\"", "]", "=", "params", "if", "upsert", ":", "cmd", "[", "\"upsert\"", "]", "=", "upsert", "else", ":", "cmd", "=", "{", "\"doc\"", ":", "doc", "}", "path", "=", "make_path", "(", "index", ",", "doc_type", ",", "id", ",", "\"_update\"", ")", "return", "self", ".", "_send_request", "(", "'POST'", ",", "path", ",", "cmd", ",", "querystring_args", ")" ]
33.590909
16.954545
def server_date_utc(self): """获取扇贝网服务器时间(UTC 时间)""" date_str = self.request('http://www.shanbay.com', 'head' ).headers['date'] date_utc = datetime.datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S GMT') return date_utc
[ "def", "server_date_utc", "(", "self", ")", ":", "date_str", "=", "self", ".", "request", "(", "'http://www.shanbay.com'", ",", "'head'", ")", ".", "headers", "[", "'date'", "]", "date_utc", "=", "datetime", ".", "datetime", ".", "strptime", "(", "date_str", ",", "'%a, %d %b %Y %H:%M:%S GMT'", ")", "return", "date_utc" ]
46.142857
16.142857
def matches(self, client, event_data): """True if all filters are matching.""" for f in self.filters: if not f(client, event_data): return False return True
[ "def", "matches", "(", "self", ",", "client", ",", "event_data", ")", ":", "for", "f", "in", "self", ".", "filters", ":", "if", "not", "f", "(", "client", ",", "event_data", ")", ":", "return", "False", "return", "True" ]
25.375
15.75
def factor_for_space(self, spc): """Return a tuple of two products, where the first product contains the given Hilbert space, and the second product is disjunct from it.""" if spc == TrivialSpace: ops_on_spc = [ o for o in self.operands if o.space is TrivialSpace] ops_not_on_spc = [ o for o in self.operands if o.space > TrivialSpace] else: ops_on_spc = [ o for o in self.operands if (o.space & spc) > TrivialSpace] ops_not_on_spc = [ o for o in self.operands if (o.space & spc) is TrivialSpace] return ( self.__class__._times_cls.create(*ops_on_spc), self.__class__._times_cls.create(*ops_not_on_spc))
[ "def", "factor_for_space", "(", "self", ",", "spc", ")", ":", "if", "spc", "==", "TrivialSpace", ":", "ops_on_spc", "=", "[", "o", "for", "o", "in", "self", ".", "operands", "if", "o", ".", "space", "is", "TrivialSpace", "]", "ops_not_on_spc", "=", "[", "o", "for", "o", "in", "self", ".", "operands", "if", "o", ".", "space", ">", "TrivialSpace", "]", "else", ":", "ops_on_spc", "=", "[", "o", "for", "o", "in", "self", ".", "operands", "if", "(", "o", ".", "space", "&", "spc", ")", ">", "TrivialSpace", "]", "ops_not_on_spc", "=", "[", "o", "for", "o", "in", "self", ".", "operands", "if", "(", "o", ".", "space", "&", "spc", ")", "is", "TrivialSpace", "]", "return", "(", "self", ".", "__class__", ".", "_times_cls", ".", "create", "(", "*", "ops_on_spc", ")", ",", "self", ".", "__class__", ".", "_times_cls", ".", "create", "(", "*", "ops_not_on_spc", ")", ")" ]
47.75
17.625
def _parse_members(self, contents, anexec, params, mode="insert"): """Parses the local variables for the contents of the specified executable.""" #First get the variables declared in the body of the executable, these can #be either locals or parameter declarations. members = self.vparser.parse(contents, anexec) #If the name matches one in the parameter list, we can connect them for param in list(params): lparam = param.lower() if lparam in members: if mode == "insert" and not lparam in anexec.parameters: anexec.add_parameter(members[lparam]) elif mode == "delete": anexec.remove_parameter(members[lparam]) #The remaining members that aren't in parameters are the local variables for key in members: if mode == "insert": if not key.lower() in anexec.parameters: anexec.members[key] = members[key] elif mode == "delete" and key in anexec.members: del anexec.members[key] #Next we need to get hold of the docstrings for these members if mode == "insert": memdocs = self.docparser.parse_docs(contents, anexec) if anexec.name in memdocs: docs = self.docparser.to_doc(memdocs[anexec.name][0], anexec.name) self.docparser.process_memberdocs(docs, anexec) #Also process the embedded types and executables who may have #docstrings just like regular executables/types do. self.docparser.process_embedded(memdocs, anexec)
[ "def", "_parse_members", "(", "self", ",", "contents", ",", "anexec", ",", "params", ",", "mode", "=", "\"insert\"", ")", ":", "#First get the variables declared in the body of the executable, these can", "#be either locals or parameter declarations.", "members", "=", "self", ".", "vparser", ".", "parse", "(", "contents", ",", "anexec", ")", "#If the name matches one in the parameter list, we can connect them", "for", "param", "in", "list", "(", "params", ")", ":", "lparam", "=", "param", ".", "lower", "(", ")", "if", "lparam", "in", "members", ":", "if", "mode", "==", "\"insert\"", "and", "not", "lparam", "in", "anexec", ".", "parameters", ":", "anexec", ".", "add_parameter", "(", "members", "[", "lparam", "]", ")", "elif", "mode", "==", "\"delete\"", ":", "anexec", ".", "remove_parameter", "(", "members", "[", "lparam", "]", ")", "#The remaining members that aren't in parameters are the local variables", "for", "key", "in", "members", ":", "if", "mode", "==", "\"insert\"", ":", "if", "not", "key", ".", "lower", "(", ")", "in", "anexec", ".", "parameters", ":", "anexec", ".", "members", "[", "key", "]", "=", "members", "[", "key", "]", "elif", "mode", "==", "\"delete\"", "and", "key", "in", "anexec", ".", "members", ":", "del", "anexec", ".", "members", "[", "key", "]", "#Next we need to get hold of the docstrings for these members", "if", "mode", "==", "\"insert\"", ":", "memdocs", "=", "self", ".", "docparser", ".", "parse_docs", "(", "contents", ",", "anexec", ")", "if", "anexec", ".", "name", "in", "memdocs", ":", "docs", "=", "self", ".", "docparser", ".", "to_doc", "(", "memdocs", "[", "anexec", ".", "name", "]", "[", "0", "]", ",", "anexec", ".", "name", ")", "self", ".", "docparser", ".", "process_memberdocs", "(", "docs", ",", "anexec", ")", "#Also process the embedded types and executables who may have", "#docstrings just like regular executables/types do.", "self", ".", "docparser", ".", "process_embedded", "(", "memdocs", ",", "anexec", ")" ]
49.818182
20.848485
def bedpe(args): """ %prog bedpe bedfile Convert to bedpe format. Use --span to write another bed file that contain the span of the read pairs. """ from jcvi.assembly.coverage import bed_to_bedpe p = OptionParser(bedpe.__doc__) p.add_option("--span", default=False, action="store_true", help="Write span bed file [default: %default]") p.add_option("--strand", default=False, action="store_true", help="Write the strand columns [default: %default]") p.add_option("--mates", help="Check the library stats from .mates file") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bedfile, = args pf = bedfile.rsplit(".", 1)[0] bedpefile = pf + ".bedpe" bedspanfile = pf + ".spans.bed" if opts.span else None bed_to_bedpe(bedfile, bedpefile, \ pairsbedfile=bedspanfile, matesfile=opts.mates, \ strand=opts.strand) return bedpefile, bedspanfile
[ "def", "bedpe", "(", "args", ")", ":", "from", "jcvi", ".", "assembly", ".", "coverage", "import", "bed_to_bedpe", "p", "=", "OptionParser", "(", "bedpe", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--span\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Write span bed file [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--strand\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Write the strand columns [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--mates\"", ",", "help", "=", "\"Check the library stats from .mates file\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "bedfile", ",", "=", "args", "pf", "=", "bedfile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "bedpefile", "=", "pf", "+", "\".bedpe\"", "bedspanfile", "=", "pf", "+", "\".spans.bed\"", "if", "opts", ".", "span", "else", "None", "bed_to_bedpe", "(", "bedfile", ",", "bedpefile", ",", "pairsbedfile", "=", "bedspanfile", ",", "matesfile", "=", "opts", ".", "mates", ",", "strand", "=", "opts", ".", "strand", ")", "return", "bedpefile", ",", "bedspanfile" ]
35.142857
18.428571
def subsample(self, rate, random=False): """Returns a subsampled version of the PointCloud. Parameters ---------- rate : int Only every rate-th element of the PointCloud is returned. Returns ------- :obj:`PointCloud` A subsampled point cloud with N / rate total samples. Raises ------ ValueError If rate is not a positive integer. """ if type(rate) != int and rate < 1: raise ValueError('Can only subsample with strictly positive integer rate') indices = np.arange(self.num_points) if random: np.random.shuffle(indices) subsample_inds = indices[::rate] subsampled_data = self._data[:,subsample_inds] return PointCloud(subsampled_data, self._frame), subsample_inds
[ "def", "subsample", "(", "self", ",", "rate", ",", "random", "=", "False", ")", ":", "if", "type", "(", "rate", ")", "!=", "int", "and", "rate", "<", "1", ":", "raise", "ValueError", "(", "'Can only subsample with strictly positive integer rate'", ")", "indices", "=", "np", ".", "arange", "(", "self", ".", "num_points", ")", "if", "random", ":", "np", ".", "random", ".", "shuffle", "(", "indices", ")", "subsample_inds", "=", "indices", "[", ":", ":", "rate", "]", "subsampled_data", "=", "self", ".", "_data", "[", ":", ",", "subsample_inds", "]", "return", "PointCloud", "(", "subsampled_data", ",", "self", ".", "_frame", ")", ",", "subsample_inds" ]
32.192308
19.461538
def glm(interactive=True, echo=True, testing=False): """GLM model demo.""" def demo_body(go): """ Demo of H2O's Generalized Linear Estimator. This demo uploads a dataset to h2o, parses it, and shows a description. Then it divides the dataset into training and test sets, builds a GLM from the training set, and makes predictions for the test set. Finally, default performance metrics are displayed. """ go() # Connect to H2O h2o.init() go() # Upload the prostate dataset that comes included in the h2o python package prostate = h2o.load_dataset("prostate") go() # Print a description of the prostate data prostate.describe() go() # Randomly split the dataset into ~70/30, training/test sets train, test = prostate.split_frame(ratios=[0.70]) go() # Convert the response columns to factors (for binary classification problems) train["CAPSULE"] = train["CAPSULE"].asfactor() test["CAPSULE"] = test["CAPSULE"].asfactor() go() # Build a (classification) GLM from h2o.estimators import H2OGeneralizedLinearEstimator prostate_glm = H2OGeneralizedLinearEstimator(family="binomial", alpha=[0.5]) prostate_glm.train(x=["AGE", "RACE", "PSA", "VOL", "GLEASON"], y="CAPSULE", training_frame=train) go() # Show the model prostate_glm.show() go() # Predict on the test set and show the first ten predictions predictions = prostate_glm.predict(test) predictions.show() go() # Show default performance metrics performance = prostate_glm.model_performance(test) performance.show() # Execute: _run_demo(demo_body, interactive, echo, testing)
[ "def", "glm", "(", "interactive", "=", "True", ",", "echo", "=", "True", ",", "testing", "=", "False", ")", ":", "def", "demo_body", "(", "go", ")", ":", "\"\"\"\n Demo of H2O's Generalized Linear Estimator.\n\n This demo uploads a dataset to h2o, parses it, and shows a description.\n Then it divides the dataset into training and test sets, builds a GLM\n from the training set, and makes predictions for the test set.\n Finally, default performance metrics are displayed.\n \"\"\"", "go", "(", ")", "# Connect to H2O", "h2o", ".", "init", "(", ")", "go", "(", ")", "# Upload the prostate dataset that comes included in the h2o python package", "prostate", "=", "h2o", ".", "load_dataset", "(", "\"prostate\"", ")", "go", "(", ")", "# Print a description of the prostate data", "prostate", ".", "describe", "(", ")", "go", "(", ")", "# Randomly split the dataset into ~70/30, training/test sets", "train", ",", "test", "=", "prostate", ".", "split_frame", "(", "ratios", "=", "[", "0.70", "]", ")", "go", "(", ")", "# Convert the response columns to factors (for binary classification problems)", "train", "[", "\"CAPSULE\"", "]", "=", "train", "[", "\"CAPSULE\"", "]", ".", "asfactor", "(", ")", "test", "[", "\"CAPSULE\"", "]", "=", "test", "[", "\"CAPSULE\"", "]", ".", "asfactor", "(", ")", "go", "(", ")", "# Build a (classification) GLM", "from", "h2o", ".", "estimators", "import", "H2OGeneralizedLinearEstimator", "prostate_glm", "=", "H2OGeneralizedLinearEstimator", "(", "family", "=", "\"binomial\"", ",", "alpha", "=", "[", "0.5", "]", ")", "prostate_glm", ".", "train", "(", "x", "=", "[", "\"AGE\"", ",", "\"RACE\"", ",", "\"PSA\"", ",", "\"VOL\"", ",", "\"GLEASON\"", "]", ",", "y", "=", "\"CAPSULE\"", ",", "training_frame", "=", "train", ")", "go", "(", ")", "# Show the model", "prostate_glm", ".", "show", "(", ")", "go", "(", ")", "# Predict on the test set and show the first ten predictions", "predictions", "=", "prostate_glm", ".", "predict", "(", "test", ")", "predictions", ".", "show", "(", ")", "go", "(", ")", "# Show default performance metrics", "performance", "=", "prostate_glm", ".", "model_performance", "(", "test", ")", "performance", ".", "show", "(", ")", "# Execute:", "_run_demo", "(", "demo_body", ",", "interactive", ",", "echo", ",", "testing", ")" ]
32.732143
24.25
async def get_data(self, *, chat: typing.Union[str, int, None] = None, user: typing.Union[str, int, None] = None, default: typing.Optional[typing.Dict] = None) -> typing.Dict: """ Get state-data for user in chat. Return `default` if no data is provided in storage. Chat or user is always required. If one of them is not provided, you have to set missing value based on the provided one. :param chat: :param user: :param default: :return: """ raise NotImplementedError
[ "async", "def", "get_data", "(", "self", ",", "*", ",", "chat", ":", "typing", ".", "Union", "[", "str", ",", "int", ",", "None", "]", "=", "None", ",", "user", ":", "typing", ".", "Union", "[", "str", ",", "int", ",", "None", "]", "=", "None", ",", "default", ":", "typing", ".", "Optional", "[", "typing", ".", "Dict", "]", "=", "None", ")", "->", "typing", ".", "Dict", ":", "raise", "NotImplementedError" ]
37.6875
23.9375
def head(self, msgid_article=None): """HEAD command. """ args = None if msgid_article is not None: args = utils.unparse_msgid_article(msgid_article) code, message = self.command("HEAD", args) if code != 221: raise NNTPReplyError(code, message) return utils.parse_headers(self.info_gen(code, message))
[ "def", "head", "(", "self", ",", "msgid_article", "=", "None", ")", ":", "args", "=", "None", "if", "msgid_article", "is", "not", "None", ":", "args", "=", "utils", ".", "unparse_msgid_article", "(", "msgid_article", ")", "code", ",", "message", "=", "self", ".", "command", "(", "\"HEAD\"", ",", "args", ")", "if", "code", "!=", "221", ":", "raise", "NNTPReplyError", "(", "code", ",", "message", ")", "return", "utils", ".", "parse_headers", "(", "self", ".", "info_gen", "(", "code", ",", "message", ")", ")" ]
30.916667
15.666667
def write_exports(self, exports): """ Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. """ rf = self.get_distinfo_file(EXPORTS_FILENAME) with open(rf, 'w') as f: write_exports(exports, f)
[ "def", "write_exports", "(", "self", ",", "exports", ")", ":", "rf", "=", "self", ".", "get_distinfo_file", "(", "EXPORTS_FILENAME", ")", "with", "open", "(", "rf", ",", "'w'", ")", "as", "f", ":", "write_exports", "(", "exports", ",", "f", ")" ]
44.7
14.1
def auth_get(user, computed=True): ''' List authorization for user user : string username computed : boolean merge results from `auths` command into data from user_attr CLI Example: .. code-block:: bash salt '*' rbac.auth_get leo ''' user_auths = [] ## read user_attr file (user:qualifier:res1:res2:attr) with salt.utils.files.fopen('/etc/user_attr', 'r') as user_attr: for auth in user_attr: auth = salt.utils.stringutils.to_unicode(auth) auth = auth.strip().split(':') # skip comments and non complaint lines if len(auth) != 5: continue # skip other users if auth[0] != user: continue # parse attr attrs = {} for attr in auth[4].strip().split(';'): attr_key, attr_val = attr.strip().split('=') if attr_key in ['auths', 'profiles', 'roles']: attrs[attr_key] = attr_val.strip().split(',') else: attrs[attr_key] = attr_val if 'auths' in attrs: user_auths.extend(attrs['auths']) ## also parse auths command if computed: res = __salt__['cmd.run_all']('auths {0}'.format(user)) if res['retcode'] == 0: for auth in res['stdout'].splitlines(): if ',' in auth: user_auths.extend(auth.strip().split(',')) else: user_auths.append(auth.strip()) return list(set(user_auths))
[ "def", "auth_get", "(", "user", ",", "computed", "=", "True", ")", ":", "user_auths", "=", "[", "]", "## read user_attr file (user:qualifier:res1:res2:attr)", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "'/etc/user_attr'", ",", "'r'", ")", "as", "user_attr", ":", "for", "auth", "in", "user_attr", ":", "auth", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "auth", ")", "auth", "=", "auth", ".", "strip", "(", ")", ".", "split", "(", "':'", ")", "# skip comments and non complaint lines", "if", "len", "(", "auth", ")", "!=", "5", ":", "continue", "# skip other users", "if", "auth", "[", "0", "]", "!=", "user", ":", "continue", "# parse attr", "attrs", "=", "{", "}", "for", "attr", "in", "auth", "[", "4", "]", ".", "strip", "(", ")", ".", "split", "(", "';'", ")", ":", "attr_key", ",", "attr_val", "=", "attr", ".", "strip", "(", ")", ".", "split", "(", "'='", ")", "if", "attr_key", "in", "[", "'auths'", ",", "'profiles'", ",", "'roles'", "]", ":", "attrs", "[", "attr_key", "]", "=", "attr_val", ".", "strip", "(", ")", ".", "split", "(", "','", ")", "else", ":", "attrs", "[", "attr_key", "]", "=", "attr_val", "if", "'auths'", "in", "attrs", ":", "user_auths", ".", "extend", "(", "attrs", "[", "'auths'", "]", ")", "## also parse auths command", "if", "computed", ":", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "'auths {0}'", ".", "format", "(", "user", ")", ")", "if", "res", "[", "'retcode'", "]", "==", "0", ":", "for", "auth", "in", "res", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ":", "if", "','", "in", "auth", ":", "user_auths", ".", "extend", "(", "auth", ".", "strip", "(", ")", ".", "split", "(", "','", ")", ")", "else", ":", "user_auths", ".", "append", "(", "auth", ".", "strip", "(", ")", ")", "return", "list", "(", "set", "(", "user_auths", ")", ")" ]
29.45283
20.509434
def correct_spectral_interference(self, target_analyte, source_analyte, f): """ Correct spectral interference. Subtract interference counts from target_analyte, based on the intensity of a source_analayte and a known fractional contribution (f). Correction takes the form: target_analyte -= source_analyte * f Only operates on background-corrected data ('bkgsub'). To undo a correction, rerun `self.bkg_subtract()`. Example ------- To correct 44Ca+ for an 88Sr++ interference, where both 43.5 and 44 Da peaks are known: f = abundance(88Sr) / (abundance(87Sr) counts(44Ca) = counts(44 Da) - counts(43.5 Da) * f Parameters ---------- target_analyte : str The name of the analyte to modify. source_analyte : str The name of the analyte to base the correction on. f : float The fraction of the intensity of the source_analyte to subtract from the target_analyte. Correction is: target_analyte - source_analyte * f Returns ------- None """ if target_analyte not in self.analytes: raise ValueError('target_analyte: {:} not in available analytes ({:})'.format(target_analyte, ', '.join(self.analytes))) if source_analyte not in self.analytes: raise ValueError('source_analyte: {:} not in available analytes ({:})'.format(source_analyte, ', '.join(self.analytes))) with self.pbar.set(total=len(self.data), desc='Interference Correction') as prog: for d in self.data.values(): d.correct_spectral_interference(target_analyte, source_analyte, f) prog.update()
[ "def", "correct_spectral_interference", "(", "self", ",", "target_analyte", ",", "source_analyte", ",", "f", ")", ":", "if", "target_analyte", "not", "in", "self", ".", "analytes", ":", "raise", "ValueError", "(", "'target_analyte: {:} not in available analytes ({:})'", ".", "format", "(", "target_analyte", ",", "', '", ".", "join", "(", "self", ".", "analytes", ")", ")", ")", "if", "source_analyte", "not", "in", "self", ".", "analytes", ":", "raise", "ValueError", "(", "'source_analyte: {:} not in available analytes ({:})'", ".", "format", "(", "source_analyte", ",", "', '", ".", "join", "(", "self", ".", "analytes", ")", ")", ")", "with", "self", ".", "pbar", ".", "set", "(", "total", "=", "len", "(", "self", ".", "data", ")", ",", "desc", "=", "'Interference Correction'", ")", "as", "prog", ":", "for", "d", "in", "self", ".", "data", ".", "values", "(", ")", ":", "d", ".", "correct_spectral_interference", "(", "target_analyte", ",", "source_analyte", ",", "f", ")", "prog", ".", "update", "(", ")" ]
35.693878
27
def deleteOverlapping(self, targetList): ''' Erase points from another list that overlap with points in this list ''' start = self.pointList[0][0] stop = self.pointList[-1][0] if self.netLeftShift < 0: start += self.netLeftShift if self.netRightShift > 0: stop += self.netRightShift targetList = _deletePoints(targetList, start, stop) return targetList
[ "def", "deleteOverlapping", "(", "self", ",", "targetList", ")", ":", "start", "=", "self", ".", "pointList", "[", "0", "]", "[", "0", "]", "stop", "=", "self", ".", "pointList", "[", "-", "1", "]", "[", "0", "]", "if", "self", ".", "netLeftShift", "<", "0", ":", "start", "+=", "self", ".", "netLeftShift", "if", "self", ".", "netRightShift", ">", "0", ":", "stop", "+=", "self", ".", "netRightShift", "targetList", "=", "_deletePoints", "(", "targetList", ",", "start", ",", "stop", ")", "return", "targetList" ]
29.8125
17.0625
def rename(self, new_name, *args, **kwargs): """Rename this multireddit. This function is a handy shortcut to :meth:`rename_multireddit` of the reddit_session. """ new = self.reddit_session.rename_multireddit(self.name, new_name, *args, **kwargs) self.__dict__ = new.__dict__ # pylint: disable=W0201 return self
[ "def", "rename", "(", "self", ",", "new_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "new", "=", "self", ".", "reddit_session", ".", "rename_multireddit", "(", "self", ".", "name", ",", "new_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "__dict__", "=", "new", ".", "__dict__", "# pylint: disable=W0201", "return", "self" ]
37.545455
19
def plot_coherence(xdata, ydata, std_error, fit, fit_function, xunit, exp_str, qubit_label): """Plot coherence data. Args: xdata ydata std_error fit fit_function xunit exp_str qubit_label Raises: ImportError: If matplotlib is not installed. """ if not HAS_MATPLOTLIB: raise ImportError('The function plot_coherence needs matplotlib. ' 'Run "pip install matplotlib" before.') plt.errorbar(xdata, ydata, std_error, marker='.', markersize=9, c='b', linestyle='') plt.plot(xdata, fit_function(xdata, *fit), c='r', linestyle='--', label=(exp_str + '= %s %s' % (str(round(fit[1])), xunit))) plt.xticks(fontsize=14, rotation=70) plt.yticks(fontsize=14) plt.xlabel('time [%s]' % (xunit), fontsize=16) plt.ylabel('P(1)', fontsize=16) plt.title(exp_str + ' measurement of Q$_{%s}$' % (str(qubit_label)), fontsize=18) plt.legend(fontsize=12) plt.grid(True) plt.show()
[ "def", "plot_coherence", "(", "xdata", ",", "ydata", ",", "std_error", ",", "fit", ",", "fit_function", ",", "xunit", ",", "exp_str", ",", "qubit_label", ")", ":", "if", "not", "HAS_MATPLOTLIB", ":", "raise", "ImportError", "(", "'The function plot_coherence needs matplotlib. '", "'Run \"pip install matplotlib\" before.'", ")", "plt", ".", "errorbar", "(", "xdata", ",", "ydata", ",", "std_error", ",", "marker", "=", "'.'", ",", "markersize", "=", "9", ",", "c", "=", "'b'", ",", "linestyle", "=", "''", ")", "plt", ".", "plot", "(", "xdata", ",", "fit_function", "(", "xdata", ",", "*", "fit", ")", ",", "c", "=", "'r'", ",", "linestyle", "=", "'--'", ",", "label", "=", "(", "exp_str", "+", "'= %s %s'", "%", "(", "str", "(", "round", "(", "fit", "[", "1", "]", ")", ")", ",", "xunit", ")", ")", ")", "plt", ".", "xticks", "(", "fontsize", "=", "14", ",", "rotation", "=", "70", ")", "plt", ".", "yticks", "(", "fontsize", "=", "14", ")", "plt", ".", "xlabel", "(", "'time [%s]'", "%", "(", "xunit", ")", ",", "fontsize", "=", "16", ")", "plt", ".", "ylabel", "(", "'P(1)'", ",", "fontsize", "=", "16", ")", "plt", ".", "title", "(", "exp_str", "+", "' measurement of Q$_{%s}$'", "%", "(", "str", "(", "qubit_label", ")", ")", ",", "fontsize", "=", "18", ")", "plt", ".", "legend", "(", "fontsize", "=", "12", ")", "plt", ".", "grid", "(", "True", ")", "plt", ".", "show", "(", ")" ]
33.612903
20.903226
def retrieve_data(self): """ retrieve data from an HTTP URL """ # shortcuts for readability url = self.config.get('url') timeout = float(self.config.get('timeout', 10)) # perform HTTP request and store content self.data = requests.get(url, verify=self.verify_ssl, timeout=timeout).content
[ "def", "retrieve_data", "(", "self", ")", ":", "# shortcuts for readability", "url", "=", "self", ".", "config", ".", "get", "(", "'url'", ")", "timeout", "=", "float", "(", "self", ".", "config", ".", "get", "(", "'timeout'", ",", "10", ")", ")", "# perform HTTP request and store content", "self", ".", "data", "=", "requests", ".", "get", "(", "url", ",", "verify", "=", "self", ".", "verify_ssl", ",", "timeout", "=", "timeout", ")", ".", "content" ]
47.142857
13.428571
def override_temp(replacement): """ Monkey-patch tempfile.tempdir with replacement, ensuring it exists """ pkg_resources.py31compat.makedirs(replacement, exist_ok=True) saved = tempfile.tempdir tempfile.tempdir = replacement try: yield finally: tempfile.tempdir = saved
[ "def", "override_temp", "(", "replacement", ")", ":", "pkg_resources", ".", "py31compat", ".", "makedirs", "(", "replacement", ",", "exist_ok", "=", "True", ")", "saved", "=", "tempfile", ".", "tempdir", "tempfile", ".", "tempdir", "=", "replacement", "try", ":", "yield", "finally", ":", "tempfile", ".", "tempdir", "=", "saved" ]
21.928571
21.214286
def _filter_row_ranges(self): """ Helper for :meth:`build_updated_request`""" new_row_ranges = [] for row_range in self.message.rows.row_ranges: # if current end_key (open or closed) is set, return its value, # if not, set to empty string (''). # NOTE: Empty string in end_key means "end of table" end_key = self._end_key_set(row_range) # if end_key is already read, skip to the next row_range if end_key and self._key_already_read(end_key): continue # if current start_key (open or closed) is set, return its value, # if not, then set to empty string ('') # NOTE: Empty string in start_key means "beginning of table" start_key = self._start_key_set(row_range) # if start_key was already read or doesn't exist, # create a row_range with last_scanned_key as start_key_open # to be passed to retry request retry_row_range = row_range if self._key_already_read(start_key): retry_row_range = copy.deepcopy(row_range) retry_row_range.start_key_closed = _to_bytes("") retry_row_range.start_key_open = self.last_scanned_key new_row_ranges.append(retry_row_range) return new_row_ranges
[ "def", "_filter_row_ranges", "(", "self", ")", ":", "new_row_ranges", "=", "[", "]", "for", "row_range", "in", "self", ".", "message", ".", "rows", ".", "row_ranges", ":", "# if current end_key (open or closed) is set, return its value,", "# if not, set to empty string ('').", "# NOTE: Empty string in end_key means \"end of table\"", "end_key", "=", "self", ".", "_end_key_set", "(", "row_range", ")", "# if end_key is already read, skip to the next row_range", "if", "end_key", "and", "self", ".", "_key_already_read", "(", "end_key", ")", ":", "continue", "# if current start_key (open or closed) is set, return its value,", "# if not, then set to empty string ('')", "# NOTE: Empty string in start_key means \"beginning of table\"", "start_key", "=", "self", ".", "_start_key_set", "(", "row_range", ")", "# if start_key was already read or doesn't exist,", "# create a row_range with last_scanned_key as start_key_open", "# to be passed to retry request", "retry_row_range", "=", "row_range", "if", "self", ".", "_key_already_read", "(", "start_key", ")", ":", "retry_row_range", "=", "copy", ".", "deepcopy", "(", "row_range", ")", "retry_row_range", ".", "start_key_closed", "=", "_to_bytes", "(", "\"\"", ")", "retry_row_range", ".", "start_key_open", "=", "self", ".", "last_scanned_key", "new_row_ranges", ".", "append", "(", "retry_row_range", ")", "return", "new_row_ranges" ]
44.7
21
def get(self, action, params=None, headers=None): """Makes a GET request """ return self.request(make_url(self.endpoint, action), method='GET', data=params, headers=headers)
[ "def", "get", "(", "self", ",", "action", ",", "params", "=", "None", ",", "headers", "=", "None", ")", ":", "return", "self", ".", "request", "(", "make_url", "(", "self", ".", "endpoint", ",", "action", ")", ",", "method", "=", "'GET'", ",", "data", "=", "params", ",", "headers", "=", "headers", ")" ]
44.2
12
def get_instance(self, payload): """ Build an instance of KeyInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.key.KeyInstance :rtype: twilio.rest.api.v2010.account.key.KeyInstance """ return KeyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "KeyInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", ")" ]
37.1
20.5
def _calc_hypothesis_probability(hypothesis): """ Get the probability (or rather a score) of a hypothesis. Parameters ---------- hypothesis : dict with keys 'segmentation', 'symbols', ... Returns ------- float in [0.0, 1.0] """ prob = 0.0 for symbol, seg in zip(hypothesis['symbols'], hypothesis['segmentation']): # symbol_latex = symbol['symbol'].split(";")[1] # TODO: Does p_strokes really improve the system? prob += symbol['probability'] # * p_strokes(symbol_latex, len(seg)) # Use language model to update probabilities pure_symbols = [symbol['symbol'].split(";")[1] for symbol in hypothesis['symbols']] pure_symbols = ["<s>"] + pure_symbols + ["</s>"] lm_prob = language_model.get_probability(pure_symbols) hypothesis['lm_probability'] = 2**lm_prob return (prob * float(hypothesis['lm_probability']) * (1.0 / len(hypothesis['segmentation'])))
[ "def", "_calc_hypothesis_probability", "(", "hypothesis", ")", ":", "prob", "=", "0.0", "for", "symbol", ",", "seg", "in", "zip", "(", "hypothesis", "[", "'symbols'", "]", ",", "hypothesis", "[", "'segmentation'", "]", ")", ":", "# symbol_latex = symbol['symbol'].split(\";\")[1]", "# TODO: Does p_strokes really improve the system?", "prob", "+=", "symbol", "[", "'probability'", "]", "# * p_strokes(symbol_latex, len(seg))", "# Use language model to update probabilities", "pure_symbols", "=", "[", "symbol", "[", "'symbol'", "]", ".", "split", "(", "\";\"", ")", "[", "1", "]", "for", "symbol", "in", "hypothesis", "[", "'symbols'", "]", "]", "pure_symbols", "=", "[", "\"<s>\"", "]", "+", "pure_symbols", "+", "[", "\"</s>\"", "]", "lm_prob", "=", "language_model", ".", "get_probability", "(", "pure_symbols", ")", "hypothesis", "[", "'lm_probability'", "]", "=", "2", "**", "lm_prob", "return", "(", "prob", "*", "float", "(", "hypothesis", "[", "'lm_probability'", "]", ")", "*", "(", "1.0", "/", "len", "(", "hypothesis", "[", "'segmentation'", "]", ")", ")", ")" ]
34.464286
20.035714
def construct_txt_file(self): """Construct the header of the txt file""" textlines = ['Prediction of noncovalent interactions for PDB structure %s' % self.mol.pymol_name.upper(), ] textlines.append("=" * len(textlines[0])) textlines.append('Created on %s using PLIP v%s\n' % (time.strftime("%Y/%m/%d"), __version__)) textlines.append('If you are using PLIP in your work, please cite:') textlines.append('Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler.') textlines.append('Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315\n') if len(self.excluded) != 0: textlines.append('Excluded molecules as ligands: %s\n' % ','.join([lig for lig in self.excluded])) if config.DNARECEPTOR: textlines.append('DNA/RNA in structure was chosen as the receptor part.\n') return textlines
[ "def", "construct_txt_file", "(", "self", ")", ":", "textlines", "=", "[", "'Prediction of noncovalent interactions for PDB structure %s'", "%", "self", ".", "mol", ".", "pymol_name", ".", "upper", "(", ")", ",", "]", "textlines", ".", "append", "(", "\"=\"", "*", "len", "(", "textlines", "[", "0", "]", ")", ")", "textlines", ".", "append", "(", "'Created on %s using PLIP v%s\\n'", "%", "(", "time", ".", "strftime", "(", "\"%Y/%m/%d\"", ")", ",", "__version__", ")", ")", "textlines", ".", "append", "(", "'If you are using PLIP in your work, please cite:'", ")", "textlines", ".", "append", "(", "'Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler.'", ")", "textlines", ".", "append", "(", "'Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315\\n'", ")", "if", "len", "(", "self", ".", "excluded", ")", "!=", "0", ":", "textlines", ".", "append", "(", "'Excluded molecules as ligands: %s\\n'", "%", "','", ".", "join", "(", "[", "lig", "for", "lig", "in", "self", ".", "excluded", "]", ")", ")", "if", "config", ".", "DNARECEPTOR", ":", "textlines", ".", "append", "(", "'DNA/RNA in structure was chosen as the receptor part.\\n'", ")", "return", "textlines" ]
70.384615
36.076923
def get_video_image_storage(): """ Return the configured django storage backend. """ if hasattr(settings, 'VIDEO_IMAGE_SETTINGS'): return get_storage_class( settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_CLASS'), )(**settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_KWARGS', {})) else: # during edx-platform loading this method gets called but settings are not ready yet # so in that case we will return default(FileSystemStorage) storage class instance return get_storage_class()()
[ "def", "get_video_image_storage", "(", ")", ":", "if", "hasattr", "(", "settings", ",", "'VIDEO_IMAGE_SETTINGS'", ")", ":", "return", "get_storage_class", "(", "settings", ".", "VIDEO_IMAGE_SETTINGS", ".", "get", "(", "'STORAGE_CLASS'", ")", ",", ")", "(", "*", "*", "settings", ".", "VIDEO_IMAGE_SETTINGS", ".", "get", "(", "'STORAGE_KWARGS'", ",", "{", "}", ")", ")", "else", ":", "# during edx-platform loading this method gets called but settings are not ready yet", "# so in that case we will return default(FileSystemStorage) storage class instance", "return", "get_storage_class", "(", ")", "(", ")" ]
44.416667
18.583333
def _run_aws(cmd, region, opts, user, **kwargs): ''' Runs the given command against AWS. cmd Command to run region Region to execute cmd in opts Pass in from salt user Pass in from salt kwargs Key-value arguments to pass to the command ''' # These args need a specific key value that aren't # valid python parameter keys receipthandle = kwargs.pop('receipthandle', None) if receipthandle: kwargs['receipt-handle'] = receipthandle num = kwargs.pop('num', None) if num: kwargs['max-number-of-messages'] = num _formatted_args = [ '--{0} "{1}"'.format(k, v) for k, v in six.iteritems(kwargs)] cmd = 'aws sqs {cmd} {args} {region} {out}'.format( cmd=cmd, args=' '.join(_formatted_args), region=_region(region), out=_OUTPUT) rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False) return salt.utils.json.loads(rtn) if rtn else ''
[ "def", "_run_aws", "(", "cmd", ",", "region", ",", "opts", ",", "user", ",", "*", "*", "kwargs", ")", ":", "# These args need a specific key value that aren't", "# valid python parameter keys", "receipthandle", "=", "kwargs", ".", "pop", "(", "'receipthandle'", ",", "None", ")", "if", "receipthandle", ":", "kwargs", "[", "'receipt-handle'", "]", "=", "receipthandle", "num", "=", "kwargs", ".", "pop", "(", "'num'", ",", "None", ")", "if", "num", ":", "kwargs", "[", "'max-number-of-messages'", "]", "=", "num", "_formatted_args", "=", "[", "'--{0} \"{1}\"'", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "kwargs", ")", "]", "cmd", "=", "'aws sqs {cmd} {args} {region} {out}'", ".", "format", "(", "cmd", "=", "cmd", ",", "args", "=", "' '", ".", "join", "(", "_formatted_args", ")", ",", "region", "=", "_region", "(", "region", ")", ",", "out", "=", "_OUTPUT", ")", "rtn", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "runas", "=", "user", ",", "python_shell", "=", "False", ")", "return", "salt", ".", "utils", ".", "json", ".", "loads", "(", "rtn", ")", "if", "rtn", "else", "''" ]
27.657143
20.4
def link_to(self, model, idx, self_idx): """ Register (self.name, self.idx) in `model._from` Returns ------- """ if model in self.system.loaded_groups: # access group instance grp = self.system.__dict__[model] # doing it one by one for i, self_i in zip(idx, self_idx): # query model name and access model instance mdl_name = grp._idx_model[i] mdl = self.system.__dict__[mdl_name] # query the corresponding uid u = mdl.get_uid(i) # update `mdl_from` name_idx_pair = (self._name, self_i) if name_idx_pair not in mdl.mdl_from[u]: mdl.mdl_from[u].append(name_idx_pair) else: # access model instance mdl = self.system.__dict__[model] uid = mdl.get_uid(idx) for u, self_i in zip(uid, self_idx): name_idx_pair = (self._name, self_i) if name_idx_pair not in mdl.mdl_from[u]: mdl.mdl_from[u].append(name_idx_pair)
[ "def", "link_to", "(", "self", ",", "model", ",", "idx", ",", "self_idx", ")", ":", "if", "model", "in", "self", ".", "system", ".", "loaded_groups", ":", "# access group instance", "grp", "=", "self", ".", "system", ".", "__dict__", "[", "model", "]", "# doing it one by one", "for", "i", ",", "self_i", "in", "zip", "(", "idx", ",", "self_idx", ")", ":", "# query model name and access model instance", "mdl_name", "=", "grp", ".", "_idx_model", "[", "i", "]", "mdl", "=", "self", ".", "system", ".", "__dict__", "[", "mdl_name", "]", "# query the corresponding uid", "u", "=", "mdl", ".", "get_uid", "(", "i", ")", "# update `mdl_from`", "name_idx_pair", "=", "(", "self", ".", "_name", ",", "self_i", ")", "if", "name_idx_pair", "not", "in", "mdl", ".", "mdl_from", "[", "u", "]", ":", "mdl", ".", "mdl_from", "[", "u", "]", ".", "append", "(", "name_idx_pair", ")", "else", ":", "# access model instance", "mdl", "=", "self", ".", "system", ".", "__dict__", "[", "model", "]", "uid", "=", "mdl", ".", "get_uid", "(", "idx", ")", "for", "u", ",", "self_i", "in", "zip", "(", "uid", ",", "self_idx", ")", ":", "name_idx_pair", "=", "(", "self", ".", "_name", ",", "self_i", ")", "if", "name_idx_pair", "not", "in", "mdl", ".", "mdl_from", "[", "u", "]", ":", "mdl", ".", "mdl_from", "[", "u", "]", ".", "append", "(", "name_idx_pair", ")" ]
30.513514
17.540541
def analyse_hydrogen_bonds_topology(self,distance=3): """ MDAnalysis.analysis.hbonds module is used to analyse hydrogen bonds formed between protein and ligand for each submitted trajectory. The hydrogen bonds are then counted by total value per frame (count_by_time), as well as obtaining the frequency of each individual hydrogen bond (count_by_type). This function is used in case no trajectory has been submitted for analysis. Takes: * distance * - distance between hydrogen bond donor and acceptor in angstroms Output: * self.hbonds * - array with information about all detected hydrogen bonds * self.hbonds_by_time * - total hbond number by frame * self.hbonds_by_type * - frequency of each hydrogen bond """ h = MDAnalysis.analysis.hbonds.HydrogenBondAnalysis(self.topology_data.universe,'(segid '+str(self.topology_data.universe.ligand.segids[0])+' and resid '+str(self.topology_data.universe.ligand.resids[0])+')',"protein",distance=3,acceptors=self.acceptors,donors=self.donors) h.run() h.generate_table() self.hbonds[0]=h.table self.hbonds_by_time[0] = h.count_by_time() self.hbonds_by_type[0] = h.count_by_type()
[ "def", "analyse_hydrogen_bonds_topology", "(", "self", ",", "distance", "=", "3", ")", ":", "h", "=", "MDAnalysis", ".", "analysis", ".", "hbonds", ".", "HydrogenBondAnalysis", "(", "self", ".", "topology_data", ".", "universe", ",", "'(segid '", "+", "str", "(", "self", ".", "topology_data", ".", "universe", ".", "ligand", ".", "segids", "[", "0", "]", ")", "+", "' and resid '", "+", "str", "(", "self", ".", "topology_data", ".", "universe", ".", "ligand", ".", "resids", "[", "0", "]", ")", "+", "')'", ",", "\"protein\"", ",", "distance", "=", "3", ",", "acceptors", "=", "self", ".", "acceptors", ",", "donors", "=", "self", ".", "donors", ")", "h", ".", "run", "(", ")", "h", ".", "generate_table", "(", ")", "self", ".", "hbonds", "[", "0", "]", "=", "h", ".", "table", "self", ".", "hbonds_by_time", "[", "0", "]", "=", "h", ".", "count_by_time", "(", ")", "self", ".", "hbonds_by_type", "[", "0", "]", "=", "h", ".", "count_by_type", "(", ")" ]
63.3
40.2
def get_interface_detail_input_request_type_get_request_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail input = ET.SubElement(get_interface_detail, "input") request_type = ET.SubElement(input, "request-type") get_request = ET.SubElement(request_type, "get-request") interface_name = ET.SubElement(get_request, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_interface_detail_input_request_type_get_request_interface_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_interface_detail", "=", "ET", ".", "Element", "(", "\"get_interface_detail\"", ")", "config", "=", "get_interface_detail", "input", "=", "ET", ".", "SubElement", "(", "get_interface_detail", ",", "\"input\"", ")", "request_type", "=", "ET", ".", "SubElement", "(", "input", ",", "\"request-type\"", ")", "get_request", "=", "ET", ".", "SubElement", "(", "request_type", ",", "\"get-request\"", ")", "interface_name", "=", "ET", ".", "SubElement", "(", "get_request", ",", "\"interface-name\"", ")", "interface_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
47.5
18.142857
def objective(self, params): """Compute the negative penalized log-likelihood.""" val = self._penalty * np.sum(params**2) for win, los in self._data: val += np.logaddexp(0, -(params[win] - params[los])) return val
[ "def", "objective", "(", "self", ",", "params", ")", ":", "val", "=", "self", ".", "_penalty", "*", "np", ".", "sum", "(", "params", "**", "2", ")", "for", "win", ",", "los", "in", "self", ".", "_data", ":", "val", "+=", "np", ".", "logaddexp", "(", "0", ",", "-", "(", "params", "[", "win", "]", "-", "params", "[", "los", "]", ")", ")", "return", "val" ]
42
11.666667
def pool(self, host, port, db, pools={}, **options): ''' Fetch a redis conenction pool for the unique combination of host and port. Will create a new one if there isn't one already. ''' key = (host, port, db) rval = pools.get(key) if not isinstance(rval, ConnectionPool): rval = ConnectionPool(host=host, port=port, db=db, **options) pools[key] = rval return rval
[ "def", "pool", "(", "self", ",", "host", ",", "port", ",", "db", ",", "pools", "=", "{", "}", ",", "*", "*", "options", ")", ":", "key", "=", "(", "host", ",", "port", ",", "db", ")", "rval", "=", "pools", ".", "get", "(", "key", ")", "if", "not", "isinstance", "(", "rval", ",", "ConnectionPool", ")", ":", "rval", "=", "ConnectionPool", "(", "host", "=", "host", ",", "port", "=", "port", ",", "db", "=", "db", ",", "*", "*", "options", ")", "pools", "[", "key", "]", "=", "rval", "return", "rval" ]
36.75
21.916667
async def send_script(self, conn_id, data): """Send a a script to a device. See :meth:`AbstractDeviceAdapter.send_script`. """ progress_callback = functools.partial(_on_progress, self, 'script', conn_id) resp = await self._execute(self._adapter.send_script_sync, conn_id, data, progress_callback) _raise_error(conn_id, 'send_rpc', resp)
[ "async", "def", "send_script", "(", "self", ",", "conn_id", ",", "data", ")", ":", "progress_callback", "=", "functools", ".", "partial", "(", "_on_progress", ",", "self", ",", "'script'", ",", "conn_id", ")", "resp", "=", "await", "self", ".", "_execute", "(", "self", ".", "_adapter", ".", "send_script_sync", ",", "conn_id", ",", "data", ",", "progress_callback", ")", "_raise_error", "(", "conn_id", ",", "'send_rpc'", ",", "resp", ")" ]
37.8
24.8
def get_changed_devices(self, timestamp): """Get data since last timestamp. This is done via a blocking call, pass NONE for initial state. """ if timestamp is None: payload = {} else: payload = { 'timeout': SUBSCRIPTION_WAIT, 'minimumdelay': SUBSCRIPTION_MIN_WAIT } payload.update(timestamp) # double the timeout here so requests doesn't timeout before vera payload.update({ 'id': 'lu_sdata', }) logger.debug("get_changed_devices() requesting payload %s", str(payload)) r = self.data_request(payload, TIMEOUT*2) r.raise_for_status() # If the Vera disconnects before writing a full response (as lu_sdata # will do when interrupted by a Luup reload), the requests module will # happily return 200 with an empty string. So, test for empty response, # so we don't rely on the JSON parser to throw an exception. if r.text == "": raise PyveraError("Empty response from Vera") # Catch a wide swath of what the JSON parser might throw, within # reason. Unfortunately, some parsers don't specifically return # json.decode.JSONDecodeError, but so far most seem to derive what # they do throw from ValueError, so that's helpful. try: result = r.json() except ValueError as ex: raise PyveraError("JSON decode error: " + str(ex)) if not ( type(result) is dict and 'loadtime' in result and 'dataversion' in result ): raise PyveraError("Unexpected/garbled response from Vera") # At this point, all good. Update timestamp and return change data. device_data = result.get('devices') timestamp = { 'loadtime': result.get('loadtime'), 'dataversion': result.get('dataversion') } return [device_data, timestamp]
[ "def", "get_changed_devices", "(", "self", ",", "timestamp", ")", ":", "if", "timestamp", "is", "None", ":", "payload", "=", "{", "}", "else", ":", "payload", "=", "{", "'timeout'", ":", "SUBSCRIPTION_WAIT", ",", "'minimumdelay'", ":", "SUBSCRIPTION_MIN_WAIT", "}", "payload", ".", "update", "(", "timestamp", ")", "# double the timeout here so requests doesn't timeout before vera", "payload", ".", "update", "(", "{", "'id'", ":", "'lu_sdata'", ",", "}", ")", "logger", ".", "debug", "(", "\"get_changed_devices() requesting payload %s\"", ",", "str", "(", "payload", ")", ")", "r", "=", "self", ".", "data_request", "(", "payload", ",", "TIMEOUT", "*", "2", ")", "r", ".", "raise_for_status", "(", ")", "# If the Vera disconnects before writing a full response (as lu_sdata", "# will do when interrupted by a Luup reload), the requests module will", "# happily return 200 with an empty string. So, test for empty response,", "# so we don't rely on the JSON parser to throw an exception.", "if", "r", ".", "text", "==", "\"\"", ":", "raise", "PyveraError", "(", "\"Empty response from Vera\"", ")", "# Catch a wide swath of what the JSON parser might throw, within", "# reason. Unfortunately, some parsers don't specifically return", "# json.decode.JSONDecodeError, but so far most seem to derive what", "# they do throw from ValueError, so that's helpful.", "try", ":", "result", "=", "r", ".", "json", "(", ")", "except", "ValueError", "as", "ex", ":", "raise", "PyveraError", "(", "\"JSON decode error: \"", "+", "str", "(", "ex", ")", ")", "if", "not", "(", "type", "(", "result", ")", "is", "dict", "and", "'loadtime'", "in", "result", "and", "'dataversion'", "in", "result", ")", ":", "raise", "PyveraError", "(", "\"Unexpected/garbled response from Vera\"", ")", "# At this point, all good. Update timestamp and return change data.", "device_data", "=", "result", ".", "get", "(", "'devices'", ")", "timestamp", "=", "{", "'loadtime'", ":", "result", ".", "get", "(", "'loadtime'", ")", ",", "'dataversion'", ":", "result", ".", "get", "(", "'dataversion'", ")", "}", "return", "[", "device_data", ",", "timestamp", "]" ]
39.857143
21.938776
def register_plugin(self): """Register plugin in Spyder's main window.""" self.focus_changed.connect(self.main.plugin_focus_changed) self.main.add_dockwidget(self) self.ipyconsole = self.main.ipyconsole self.create_new_client(give_focus=False) icon_path = os.path.join(PACKAGE_PATH, 'images', 'icon.svg') self.main.add_to_fileswitcher(self, self.tabwidget, self.clients, QIcon(icon_path)) self.recent_notebook_menu.aboutToShow.connect(self.setup_menu_actions)
[ "def", "register_plugin", "(", "self", ")", ":", "self", ".", "focus_changed", ".", "connect", "(", "self", ".", "main", ".", "plugin_focus_changed", ")", "self", ".", "main", ".", "add_dockwidget", "(", "self", ")", "self", ".", "ipyconsole", "=", "self", ".", "main", ".", "ipyconsole", "self", ".", "create_new_client", "(", "give_focus", "=", "False", ")", "icon_path", "=", "os", ".", "path", ".", "join", "(", "PACKAGE_PATH", ",", "'images'", ",", "'icon.svg'", ")", "self", ".", "main", ".", "add_to_fileswitcher", "(", "self", ",", "self", ".", "tabwidget", ",", "self", ".", "clients", ",", "QIcon", "(", "icon_path", ")", ")", "self", ".", "recent_notebook_menu", ".", "aboutToShow", ".", "connect", "(", "self", ".", "setup_menu_actions", ")" ]
56.1
17.4
def get_bandstructure_by_material_id(self, material_id, line_mode=True): """ Get a BandStructure corresponding to a material_id. REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure or https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure_uniform Args: material_id (str): Materials Project material_id. line_mode (bool): If True, fetch a BandStructureSymmLine object (default). If False, return the uniform band structure. Returns: A BandStructure object. """ prop = "bandstructure" if line_mode else "bandstructure_uniform" data = self.get_data(material_id, prop=prop) return data[0][prop]
[ "def", "get_bandstructure_by_material_id", "(", "self", ",", "material_id", ",", "line_mode", "=", "True", ")", ":", "prop", "=", "\"bandstructure\"", "if", "line_mode", "else", "\"bandstructure_uniform\"", "data", "=", "self", ".", "get_data", "(", "material_id", ",", "prop", "=", "prop", ")", "return", "data", "[", "0", "]", "[", "prop", "]" ]
42.888889
27
def perform_oauth(email, master_token, android_id, service, app, client_sig, device_country='us', operatorCountry='us', lang='en', sdk_version=17): """ Use a master token from master_login to perform OAuth to a specific Google service. Return a dict, eg:: { 'Auth': '...', 'LSID': '...', 'SID': '..', 'issueAdvice': 'auto', 'services': 'hist,mail,googleme,...' } To authenticate requests to this service, include a header ``Authorization: GoogleLogin auth=res['Auth']``. """ data = { 'accountType': 'HOSTED_OR_GOOGLE', 'Email': email, 'has_permission': 1, 'EncryptedPasswd': master_token, 'service': service, 'source': 'android', 'androidId': android_id, 'app': app, 'client_sig': client_sig, 'device_country': device_country, 'operatorCountry': device_country, 'lang': lang, 'sdk_version': sdk_version } return _perform_auth_request(data)
[ "def", "perform_oauth", "(", "email", ",", "master_token", ",", "android_id", ",", "service", ",", "app", ",", "client_sig", ",", "device_country", "=", "'us'", ",", "operatorCountry", "=", "'us'", ",", "lang", "=", "'en'", ",", "sdk_version", "=", "17", ")", ":", "data", "=", "{", "'accountType'", ":", "'HOSTED_OR_GOOGLE'", ",", "'Email'", ":", "email", ",", "'has_permission'", ":", "1", ",", "'EncryptedPasswd'", ":", "master_token", ",", "'service'", ":", "service", ",", "'source'", ":", "'android'", ",", "'androidId'", ":", "android_id", ",", "'app'", ":", "app", ",", "'client_sig'", ":", "client_sig", ",", "'device_country'", ":", "device_country", ",", "'operatorCountry'", ":", "device_country", ",", "'lang'", ":", "lang", ",", "'sdk_version'", ":", "sdk_version", "}", "return", "_perform_auth_request", "(", "data", ")" ]
28.342105
17.973684
def modified(self): """Union[datetime.datetime, None]: Datetime at which the dataset was last modified (:data:`None` until set from the server). """ modified_time = self._properties.get("lastModifiedTime") if modified_time is not None: # modified_time will be in milliseconds. return google.cloud._helpers._datetime_from_microseconds( 1000.0 * float(modified_time) )
[ "def", "modified", "(", "self", ")", ":", "modified_time", "=", "self", ".", "_properties", ".", "get", "(", "\"lastModifiedTime\"", ")", "if", "modified_time", "is", "not", "None", ":", "# modified_time will be in milliseconds.", "return", "google", ".", "cloud", ".", "_helpers", ".", "_datetime_from_microseconds", "(", "1000.0", "*", "float", "(", "modified_time", ")", ")" ]
44.9
14.4
def save(self, filepath): """ Saves the classifier to `filepath`. Because this classifier needs to save the dataset, it must be something that can be pickled and not something like an iterator. """ if not filepath or not isinstance(filepath, str): raise ValueError("Invalid filepath") with open(filepath, "wb") as filehandler: pickle.dump(self, filehandler)
[ "def", "save", "(", "self", ",", "filepath", ")", ":", "if", "not", "filepath", "or", "not", "isinstance", "(", "filepath", ",", "str", ")", ":", "raise", "ValueError", "(", "\"Invalid filepath\"", ")", "with", "open", "(", "filepath", ",", "\"wb\"", ")", "as", "filehandler", ":", "pickle", ".", "dump", "(", "self", ",", "filehandler", ")" ]
33.461538
16.076923
def tf_solve(self, fn_x, x_init, base_value, target_value, estimated_improvement=None): """ Iteratively optimizes $f(x)$ for $x$ on the line between $x'$ and $x_0$. Args: fn_x: A callable returning the value $f(x)$ at $x$. x_init: Initial solution guess $x_0$. base_value: Value $f(x')$ at $x = x'$. target_value: Value $f(x_0)$ at $x = x_0$. estimated_improvement: Estimated improvement for $x = x_0$, $f(x')$ if None. Returns: A solution $x$ to the problem as given by the solver. """ return super(LineSearch, self).tf_solve(fn_x, x_init, base_value, target_value, estimated_improvement)
[ "def", "tf_solve", "(", "self", ",", "fn_x", ",", "x_init", ",", "base_value", ",", "target_value", ",", "estimated_improvement", "=", "None", ")", ":", "return", "super", "(", "LineSearch", ",", "self", ")", ".", "tf_solve", "(", "fn_x", ",", "x_init", ",", "base_value", ",", "target_value", ",", "estimated_improvement", ")" ]
46.466667
27.8
def showInvLines(rh): """ Produce help output related to command synopsis Input: Request Handle """ if rh.subfunction != '': rh.printLn("N", "Usage:") rh.printLn("N", " python " + rh.cmdName + " GetHost " + "diskpoolnames") rh.printLn("N", " python " + rh.cmdName + " GetHost " + "diskpoolspace <poolName>") rh.printLn("N", " python " + rh.cmdName + " GetHost fcpdevices") rh.printLn("N", " python " + rh.cmdName + " GetHost general") rh.printLn("N", " python " + rh.cmdName + " GetHost help") rh.printLn("N", " python " + rh.cmdName + " GetHost version") return
[ "def", "showInvLines", "(", "rh", ")", ":", "if", "rh", ".", "subfunction", "!=", "''", ":", "rh", ".", "printLn", "(", "\"N\"", ",", "\"Usage:\"", ")", "rh", ".", "printLn", "(", "\"N\"", ",", "\" python \"", "+", "rh", ".", "cmdName", "+", "\" GetHost \"", "+", "\"diskpoolnames\"", ")", "rh", ".", "printLn", "(", "\"N\"", ",", "\" python \"", "+", "rh", ".", "cmdName", "+", "\" GetHost \"", "+", "\"diskpoolspace <poolName>\"", ")", "rh", ".", "printLn", "(", "\"N\"", ",", "\" python \"", "+", "rh", ".", "cmdName", "+", "\" GetHost fcpdevices\"", ")", "rh", ".", "printLn", "(", "\"N\"", ",", "\" python \"", "+", "rh", ".", "cmdName", "+", "\" GetHost general\"", ")", "rh", ".", "printLn", "(", "\"N\"", ",", "\" python \"", "+", "rh", ".", "cmdName", "+", "\" GetHost help\"", ")", "rh", ".", "printLn", "(", "\"N\"", ",", "\" python \"", "+", "rh", ".", "cmdName", "+", "\" GetHost version\"", ")", "return" ]
33.210526
19.631579
def dependency_sort(dependency_tree): """ Sorts items 'dependencies first' in a given dependency tree. A dependency tree is a dictionary mapping an object to a collection its dependency objects. Result is a properly sorted list of items, where each item is a 2-tuple containing an object and its dependency list, as given in the input dependency tree. If B is directly or indirectly dependent on A and they are not both a part of the same dependency cycle (i.e. then A is neither directly nor indirectly dependent on B) then A needs to come before B. If A and B are a part of the same dependency cycle, i.e. if they are both directly or indirectly dependent on each other, then it does not matter which comes first. Any entries found listed as dependencies, but that do not have their own dependencies listed as well, are logged & ignored. @return: The sorted items. @rtype: list """ sorted = [] processed = set() for key, deps in dependency_tree.iteritems(): _sort_r(sorted, processed, key, deps, dependency_tree) return sorted
[ "def", "dependency_sort", "(", "dependency_tree", ")", ":", "sorted", "=", "[", "]", "processed", "=", "set", "(", ")", "for", "key", ",", "deps", "in", "dependency_tree", ".", "iteritems", "(", ")", ":", "_sort_r", "(", "sorted", ",", "processed", ",", "key", ",", "deps", ",", "dependency_tree", ")", "return", "sorted" ]
35.516129
25.967742
def deregister(self, key): """ Deregisters an existing key. `key` String key to deregister. Returns boolean. """ if not key in self._actions: return False del self._actions[key] if key in self._cache: del self._cache[key] return True
[ "def", "deregister", "(", "self", ",", "key", ")", ":", "if", "not", "key", "in", "self", ".", "_actions", ":", "return", "False", "del", "self", ".", "_actions", "[", "key", "]", "if", "key", "in", "self", ".", "_cache", ":", "del", "self", ".", "_cache", "[", "key", "]", "return", "True" ]
18.777778
19.944444
async def allPurgeRequests(self, *args, **kwargs): """ All Open Purge Requests This is useful mostly for administors to view the set of open purge requests. It should not be used by workers. They should use the purgeRequests endpoint that is specific to their workerType and provisionerId. This method gives output: ``v1/all-purge-cache-request-list.json#`` This method is ``stable`` """ return await self._makeApiCall(self.funcinfo["allPurgeRequests"], *args, **kwargs)
[ "async", "def", "allPurgeRequests", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "await", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"allPurgeRequests\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
34.1875
22.0625
def _return_base_data(self, url, container, container_object=None, container_headers=None, object_headers=None): """Return headers and a parsed url. :param url: :param container: :param container_object: :param container_headers: :return: ``tuple`` """ headers = self.job_args['base_headers'] headers.update({'X-Auth-Token': self.job_args['os_token']}) _container_uri = url.geturl().rstrip('/') if container: _container_uri = '%s/%s' % ( _container_uri, cloud_utils.quoter(container) ) if container_object: _container_uri = '%s/%s' % ( _container_uri, cloud_utils.quoter(container_object) ) if object_headers: headers.update(object_headers) if container_headers: headers.update(container_headers) return headers, urlparse.urlparse(_container_uri)
[ "def", "_return_base_data", "(", "self", ",", "url", ",", "container", ",", "container_object", "=", "None", ",", "container_headers", "=", "None", ",", "object_headers", "=", "None", ")", ":", "headers", "=", "self", ".", "job_args", "[", "'base_headers'", "]", "headers", ".", "update", "(", "{", "'X-Auth-Token'", ":", "self", ".", "job_args", "[", "'os_token'", "]", "}", ")", "_container_uri", "=", "url", ".", "geturl", "(", ")", ".", "rstrip", "(", "'/'", ")", "if", "container", ":", "_container_uri", "=", "'%s/%s'", "%", "(", "_container_uri", ",", "cloud_utils", ".", "quoter", "(", "container", ")", ")", "if", "container_object", ":", "_container_uri", "=", "'%s/%s'", "%", "(", "_container_uri", ",", "cloud_utils", ".", "quoter", "(", "container_object", ")", ")", "if", "object_headers", ":", "headers", ".", "update", "(", "object_headers", ")", "if", "container_headers", ":", "headers", ".", "update", "(", "container_headers", ")", "return", "headers", ",", "urlparse", ".", "urlparse", "(", "_container_uri", ")" ]
30.375
19.625
def modify_conf(cfgfile, service_name, outfn): """Modify config file neutron and keystone to include enabler options.""" if not cfgfile or not outfn: print('ERROR: There is no config file.') sys.exit(0) options = service_options[service_name] with open(cfgfile, 'r') as cf: lines = cf.readlines() for opt in options: op = opt.get('option') res = [line for line in lines if line.startswith(op)] if len(res) > 1: print('ERROR: There are more than one %s option.' % res) sys.exit(0) if res: (op, sep, val) = (res[0].strip('\n').replace(' ', ''). partition('=')) new_val = None if opt.get('is_list'): # Value for this option can contain list of values. # Append the value if it does not exist. if not any(opt.get('value') == value for value in val.split(',')): new_val = ','.join((val, opt.get('value'))) else: if val != opt.get('value'): new_val = opt.get('value') if new_val: opt_idx = lines.index(res[0]) # The setting is different, replace it with new one. lines.pop(opt_idx) lines.insert(opt_idx, '='.join((opt.get('option'), new_val + '\n'))) else: # Option does not exist. Add the option. try: sec_idx = lines.index('[' + opt.get('section') + ']\n') lines.insert(sec_idx + 1, '='.join( (opt.get('option'), opt.get('value') + '\n'))) except ValueError: print('Invalid %s section name.' % opt.get('section')) sys.exit(0) with open(outfn, 'w') as fwp: all_lines = '' for line in lines: all_lines += line fwp.write(all_lines)
[ "def", "modify_conf", "(", "cfgfile", ",", "service_name", ",", "outfn", ")", ":", "if", "not", "cfgfile", "or", "not", "outfn", ":", "print", "(", "'ERROR: There is no config file.'", ")", "sys", ".", "exit", "(", "0", ")", "options", "=", "service_options", "[", "service_name", "]", "with", "open", "(", "cfgfile", ",", "'r'", ")", "as", "cf", ":", "lines", "=", "cf", ".", "readlines", "(", ")", "for", "opt", "in", "options", ":", "op", "=", "opt", ".", "get", "(", "'option'", ")", "res", "=", "[", "line", "for", "line", "in", "lines", "if", "line", ".", "startswith", "(", "op", ")", "]", "if", "len", "(", "res", ")", ">", "1", ":", "print", "(", "'ERROR: There are more than one %s option.'", "%", "res", ")", "sys", ".", "exit", "(", "0", ")", "if", "res", ":", "(", "op", ",", "sep", ",", "val", ")", "=", "(", "res", "[", "0", "]", ".", "strip", "(", "'\\n'", ")", ".", "replace", "(", "' '", ",", "''", ")", ".", "partition", "(", "'='", ")", ")", "new_val", "=", "None", "if", "opt", ".", "get", "(", "'is_list'", ")", ":", "# Value for this option can contain list of values.", "# Append the value if it does not exist.", "if", "not", "any", "(", "opt", ".", "get", "(", "'value'", ")", "==", "value", "for", "value", "in", "val", ".", "split", "(", "','", ")", ")", ":", "new_val", "=", "','", ".", "join", "(", "(", "val", ",", "opt", ".", "get", "(", "'value'", ")", ")", ")", "else", ":", "if", "val", "!=", "opt", ".", "get", "(", "'value'", ")", ":", "new_val", "=", "opt", ".", "get", "(", "'value'", ")", "if", "new_val", ":", "opt_idx", "=", "lines", ".", "index", "(", "res", "[", "0", "]", ")", "# The setting is different, replace it with new one.", "lines", ".", "pop", "(", "opt_idx", ")", "lines", ".", "insert", "(", "opt_idx", ",", "'='", ".", "join", "(", "(", "opt", ".", "get", "(", "'option'", ")", ",", "new_val", "+", "'\\n'", ")", ")", ")", "else", ":", "# Option does not exist. Add the option.", "try", ":", "sec_idx", "=", "lines", ".", "index", "(", "'['", "+", "opt", ".", "get", "(", "'section'", ")", "+", "']\\n'", ")", "lines", ".", "insert", "(", "sec_idx", "+", "1", ",", "'='", ".", "join", "(", "(", "opt", ".", "get", "(", "'option'", ")", ",", "opt", ".", "get", "(", "'value'", ")", "+", "'\\n'", ")", ")", ")", "except", "ValueError", ":", "print", "(", "'Invalid %s section name.'", "%", "opt", ".", "get", "(", "'section'", ")", ")", "sys", ".", "exit", "(", "0", ")", "with", "open", "(", "outfn", ",", "'w'", ")", "as", "fwp", ":", "all_lines", "=", "''", "for", "line", "in", "lines", ":", "all_lines", "+=", "line", "fwp", ".", "write", "(", "all_lines", ")" ]
37.692308
17.442308
def off_policy_train_batch(self, batch_info: BatchInfo): """ Perform an 'off-policy' training step of sampling the replay buffer and gradient descent """ self.model.train() rollout = self.env_roller.sample(batch_info, self.model, self.settings.number_of_steps).to_device(self.device) batch_result = self.algo.optimizer_step( batch_info=batch_info, device=self.device, model=self.model, rollout=rollout ) batch_info['sub_batch_data'].append(batch_result)
[ "def", "off_policy_train_batch", "(", "self", ",", "batch_info", ":", "BatchInfo", ")", ":", "self", ".", "model", ".", "train", "(", ")", "rollout", "=", "self", ".", "env_roller", ".", "sample", "(", "batch_info", ",", "self", ".", "model", ",", "self", ".", "settings", ".", "number_of_steps", ")", ".", "to_device", "(", "self", ".", "device", ")", "batch_result", "=", "self", ".", "algo", ".", "optimizer_step", "(", "batch_info", "=", "batch_info", ",", "device", "=", "self", ".", "device", ",", "model", "=", "self", ".", "model", ",", "rollout", "=", "rollout", ")", "batch_info", "[", "'sub_batch_data'", "]", ".", "append", "(", "batch_result", ")" ]
38.5
23.071429
def fetch_images(self, images, depth_level): """\ download the images to temp disk and set their dimensions - we're going to score the images in the order in which they appear so images higher up will have more importance, - we'll count the area of the 1st image as a score of 1 and then calculate how much larger or small each image after it is - we'll also make sure to try and weed out banner type ad blocks that have big widths and small heights or vice versa - so if the image is 3rd found in the dom it's sequence score would be 1 / 3 = .33 * diff in area from the first image """ image_results = {} initial_area = float(0.0) total_score = float(0.0) cnt = float(1.0) MIN_WIDTH = 50 for image in images[:30]: src = self.parser.getAttribute(image, attr='src') src = self.build_image_path(src) local_image = self.get_local_image(src) width = local_image.width height = local_image.height src = local_image.src file_extension = local_image.file_extension if file_extension != '.gif' or file_extension != 'NA': if (depth_level >= 1 and local_image.width > 300) or depth_level < 1: if not self.is_banner_dimensions(width, height): if width > MIN_WIDTH: sequence_score = float(1.0 / cnt) area = float(width * height) total_score = float(0.0) if initial_area == 0: initial_area = area * float(1.48) total_score = 1 else: area_difference = float(area / initial_area) total_score = sequence_score * area_difference image_results.update({local_image: total_score}) cnt += 1 cnt += 1 return image_results
[ "def", "fetch_images", "(", "self", ",", "images", ",", "depth_level", ")", ":", "image_results", "=", "{", "}", "initial_area", "=", "float", "(", "0.0", ")", "total_score", "=", "float", "(", "0.0", ")", "cnt", "=", "float", "(", "1.0", ")", "MIN_WIDTH", "=", "50", "for", "image", "in", "images", "[", ":", "30", "]", ":", "src", "=", "self", ".", "parser", ".", "getAttribute", "(", "image", ",", "attr", "=", "'src'", ")", "src", "=", "self", ".", "build_image_path", "(", "src", ")", "local_image", "=", "self", ".", "get_local_image", "(", "src", ")", "width", "=", "local_image", ".", "width", "height", "=", "local_image", ".", "height", "src", "=", "local_image", ".", "src", "file_extension", "=", "local_image", ".", "file_extension", "if", "file_extension", "!=", "'.gif'", "or", "file_extension", "!=", "'NA'", ":", "if", "(", "depth_level", ">=", "1", "and", "local_image", ".", "width", ">", "300", ")", "or", "depth_level", "<", "1", ":", "if", "not", "self", ".", "is_banner_dimensions", "(", "width", ",", "height", ")", ":", "if", "width", ">", "MIN_WIDTH", ":", "sequence_score", "=", "float", "(", "1.0", "/", "cnt", ")", "area", "=", "float", "(", "width", "*", "height", ")", "total_score", "=", "float", "(", "0.0", ")", "if", "initial_area", "==", "0", ":", "initial_area", "=", "area", "*", "float", "(", "1.48", ")", "total_score", "=", "1", "else", ":", "area_difference", "=", "float", "(", "area", "/", "initial_area", ")", "total_score", "=", "sequence_score", "*", "area_difference", "image_results", ".", "update", "(", "{", "local_image", ":", "total_score", "}", ")", "cnt", "+=", "1", "cnt", "+=", "1", "return", "image_results" ]
46.23913
17.478261
def lockFile(self, fileName, byteOffset, length, dokanFileInfo): """Lock a file. :param fileName: name of file to lock :type fileName: ctypes.c_wchar_p :param byteOffset: location to start lock :type byteOffset: ctypes.c_longlong :param length: number of bytes to lock :type length: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('lockFile', fileName, byteOffset, length)
[ "def", "lockFile", "(", "self", ",", "fileName", ",", "byteOffset", ",", "length", ",", "dokanFileInfo", ")", ":", "return", "self", ".", "operations", "(", "'lockFile'", ",", "fileName", ",", "byteOffset", ",", "length", ")" ]
35.9375
12.0625
def remove_edge_from_heap(self, segment_ids): """Remove an edge from the heap.""" self._initialize_heap() key = normalize_edge(segment_ids) if key in self.edge_map: self.edge_map[key][0] = None self.num_valid_edges -= 1
[ "def", "remove_edge_from_heap", "(", "self", ",", "segment_ids", ")", ":", "self", ".", "_initialize_heap", "(", ")", "key", "=", "normalize_edge", "(", "segment_ids", ")", "if", "key", "in", "self", ".", "edge_map", ":", "self", ".", "edge_map", "[", "key", "]", "[", "0", "]", "=", "None", "self", ".", "num_valid_edges", "-=", "1" ]
38.428571
3.714286
def QA_fetch_get_hkfund_list(ip=None, port=None): """[summary] Keyword Arguments: ip {[type]} -- [description] (default: {None}) port {[type]} -- [description] (default: {None}) # 港股 HKMARKET 27 5 香港指数 FH 31 2 香港主板 KH 48 2 香港创业板 KG 49 2 香港基金 KT 43 1 B股转H股 HB """ global extension_market_list extension_market_list = QA_fetch_get_extensionmarket_list( ) if extension_market_list is None else extension_market_list return extension_market_list.query('market==49')
[ "def", "QA_fetch_get_hkfund_list", "(", "ip", "=", "None", ",", "port", "=", "None", ")", ":", "global", "extension_market_list", "extension_market_list", "=", "QA_fetch_get_extensionmarket_list", "(", ")", "if", "extension_market_list", "is", "None", "else", "extension_market_list", "return", "extension_market_list", ".", "query", "(", "'market==49'", ")" ]
30.333333
16.761905
def default_versions(self, default_versions): ''' Set archive default read versions Parameters ---------- default_versions: dict Dictionary of archive_name, version pairs. On read/download, archives in this dictionary will download the specified version by default. Before assignment, archive_names are checked and normalized. ''' default_versions = { self._normalize_archive_name(arch)[1]: v for arch, v in default_versions.items()} self._default_versions = default_versions
[ "def", "default_versions", "(", "self", ",", "default_versions", ")", ":", "default_versions", "=", "{", "self", ".", "_normalize_archive_name", "(", "arch", ")", "[", "1", "]", ":", "v", "for", "arch", ",", "v", "in", "default_versions", ".", "items", "(", ")", "}", "self", ".", "_default_versions", "=", "default_versions" ]
33.166667
22.166667
def set_sampling_strategy(self, sensor_name, strategy_and_params): """Set sampling strategy for the sensors of all the group's clients. Only sensors that match the specified filter are considered. See the `KATCPResource.set_sampling_strategies` docstring for parameter definitions and more info. Returns ------- sensors_strategies : tornado Future Resolves with a dict with client names as keys and with the value as another dict. The value dict is similar to the return value described in the `KATCPResource.set_sampling_strategies` docstring. """ futures_dict = {} for res_obj in self.clients: futures_dict[res_obj.name] = res_obj.set_sampling_strategy( sensor_name, strategy_and_params) sensors_strategies = yield futures_dict raise tornado.gen.Return(sensors_strategies)
[ "def", "set_sampling_strategy", "(", "self", ",", "sensor_name", ",", "strategy_and_params", ")", ":", "futures_dict", "=", "{", "}", "for", "res_obj", "in", "self", ".", "clients", ":", "futures_dict", "[", "res_obj", ".", "name", "]", "=", "res_obj", ".", "set_sampling_strategy", "(", "sensor_name", ",", "strategy_and_params", ")", "sensors_strategies", "=", "yield", "futures_dict", "raise", "tornado", ".", "gen", ".", "Return", "(", "sensors_strategies", ")" ]
45.7
20.85
def get_new_tag(self, api_tag): """ Instantiate a new Tag from api data. :param api_tag: the api data for the Tag :return: the new Tag """ return Tag(site_id=self.site_id, wp_id=api_tag["ID"], **self.api_object_data("tag", api_tag))
[ "def", "get_new_tag", "(", "self", ",", "api_tag", ")", ":", "return", "Tag", "(", "site_id", "=", "self", ".", "site_id", ",", "wp_id", "=", "api_tag", "[", "\"ID\"", "]", ",", "*", "*", "self", ".", "api_object_data", "(", "\"tag\"", ",", "api_tag", ")", ")" ]
31
9.2
def calculate_leaf_paths(self): """Build map of reverse xrefs then traverse backwards marking path to leaf for all leaves. """ reverse_xref = {} leaves = set() for v in self.value.values(): if v.leaf: leaves.add(v) for xref in v.value_xref: reverse_xref.setdefault(xref, []).append(v.ident) for leaf in leaves: self.calculate_leaf_path(leaf, reverse_xref)
[ "def", "calculate_leaf_paths", "(", "self", ")", ":", "reverse_xref", "=", "{", "}", "leaves", "=", "set", "(", ")", "for", "v", "in", "self", ".", "value", ".", "values", "(", ")", ":", "if", "v", ".", "leaf", ":", "leaves", ".", "add", "(", "v", ")", "for", "xref", "in", "v", ".", "value_xref", ":", "reverse_xref", ".", "setdefault", "(", "xref", ",", "[", "]", ")", ".", "append", "(", "v", ".", "ident", ")", "for", "leaf", "in", "leaves", ":", "self", ".", "calculate_leaf_path", "(", "leaf", ",", "reverse_xref", ")" ]
38.333333
10.916667
def workflow_states_column(self, obj): """ Return text description of workflow states assigned to object """ workflow_states = models.WorkflowState.objects.filter( content_type=self._get_obj_ct(obj), object_id=obj.pk, ) return ', '.join([unicode(wfs) for wfs in workflow_states])
[ "def", "workflow_states_column", "(", "self", ",", "obj", ")", ":", "workflow_states", "=", "models", ".", "WorkflowState", ".", "objects", ".", "filter", "(", "content_type", "=", "self", ".", "_get_obj_ct", "(", "obj", ")", ",", "object_id", "=", "obj", ".", "pk", ",", ")", "return", "', '", ".", "join", "(", "[", "unicode", "(", "wfs", ")", "for", "wfs", "in", "workflow_states", "]", ")" ]
47
14.285714