text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def parameter_converter(*convert_types): ''' Decorator that denotes a function as a url parameter converter. ''' def inner(func): for ct in convert_types: ParameterConverter._register_converter(func, ct) return func return inner
[ "def", "parameter_converter", "(", "*", "convert_types", ")", ":", "def", "inner", "(", "func", ")", ":", "for", "ct", "in", "convert_types", ":", "ParameterConverter", ".", "_register_converter", "(", "func", ",", "ct", ")", "return", "func", "return", "inner" ]
29.777778
20.666667
def maps_get_rules_output_rules_rbridgeid(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") maps_get_rules = ET.Element("maps_get_rules") config = maps_get_rules output = ET.SubElement(maps_get_rules, "output") rules = ET.SubElement(output, "rules") rbridgeid = ET.SubElement(rules, "rbridgeid") rbridgeid.text = kwargs.pop('rbridgeid') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "maps_get_rules_output_rules_rbridgeid", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "maps_get_rules", "=", "ET", ".", "Element", "(", "\"maps_get_rules\"", ")", "config", "=", "maps_get_rules", "output", "=", "ET", ".", "SubElement", "(", "maps_get_rules", ",", "\"output\"", ")", "rules", "=", "ET", ".", "SubElement", "(", "output", ",", "\"rules\"", ")", "rbridgeid", "=", "ET", ".", "SubElement", "(", "rules", ",", "\"rbridgeid\"", ")", "rbridgeid", ".", "text", "=", "kwargs", ".", "pop", "(", "'rbridgeid'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
39.307692
11.692308
def fetch_next_block(self): """Returns a block of results with respecting retry policy. This method only exists for backward compatibility reasons. (Because QueryIterable has exposed fetch_next_block api). :return: List of results. :rtype: list """ if self._ex_context is None: # initiates execution context for the first time self._ex_context = self._create_execution_context() return self._ex_context.fetch_next_block()
[ "def", "fetch_next_block", "(", "self", ")", ":", "if", "self", ".", "_ex_context", "is", "None", ":", "# initiates execution context for the first time", "self", ".", "_ex_context", "=", "self", ".", "_create_execution_context", "(", ")", "return", "self", ".", "_ex_context", ".", "fetch_next_block", "(", ")" ]
32.529412
19.705882
def lti_sde_to_descrete(F,L,Qc,dt,compute_derivatives=False, grad_params_no=None, P_inf=None, dP_inf=None, dF = None, dQc=None): """ Linear Time-Invariant Stochastic Differential Equation (LTI SDE): dx(t) = F x(t) dt + L d \beta ,where x(t): (vector) stochastic process \beta: (vector) Brownian motion process F, L: (time invariant) matrices of corresponding dimensions Qc: covariance of noise. This function rewrites it into the corresponding state-space form: x_{k} = A_{k} * x_{k-1} + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1}) TODO: this function can be redone to "preprocess dataset", when close time points are handeled properly (with rounding parameter) and values are averaged accordingly. Input: -------------- F,L: LTI SDE matrices of corresponding dimensions Qc: matrix (n,n) Covarince between different dimensions of noise \beta. n is the dimensionality of the noise. dt: double or iterable Time difference used on this iteration. If dt is iterable, then A and Q_noise are computed for every unique dt compute_derivatives: boolean Whether derivatives of A and Q are required. grad_params_no: int Number of gradient parameters P_inf: (state_dim. state_dim) matrix dP_inf dF: 3D array Derivatives of F dQc: 3D array Derivatives of Qc dR: 3D array Derivatives of R Output: -------------- A: matrix A_{k}. Because we have LTI SDE only dt can affect on matrix difference for different k. Q_noise: matrix Covariance matrix of (vector) q_{k-1}. Only dt can affect the matrix difference for different k. reconstruct_index: array If dt was iterable return three dimensinal arrays A and Q_noise. Third dimension of these arrays correspond to unique dt's. This reconstruct_index contain indices of the original dt's in the uninue dt sequence. A[:,:, reconstruct_index[5]] is matrix A of 6-th(indices start from zero) dt in the original sequence. dA: 3D array Derivatives of A dQ: 3D array Derivatives of Q """ # Dimensionality n = F.shape[0] if not isinstance(dt, collections.Iterable): # not iterable, scalar #import pdb; pdb.set_trace() # The dynamical model A = matrix_exponent(F*dt) # The covariance matrix Q by matrix fraction decomposition -> Phi = np.zeros((2*n,2*n)) Phi[:n,:n] = F Phi[:n,n:] = L.dot(Qc).dot(L.T) Phi[n:,n:] = -F.T AB = matrix_exponent(Phi*dt) AB = np.dot(AB, np.vstack((np.zeros((n,n)),np.eye(n)))) Q_noise_1 = linalg.solve(AB[n:,:].T,AB[:n,:].T) Q_noise_2 = P_inf - A.dot(P_inf).dot(A.T) # The covariance matrix Q by matrix fraction decomposition <- if compute_derivatives: dA = np.zeros([n, n, grad_params_no]) dQ = np.zeros([n, n, grad_params_no]) #AA = np.zeros([2*n, 2*n, nparam]) FF = np.zeros([2*n, 2*n]) AA = np.zeros([2*n, 2*n, grad_params_no]) for p in range(0, grad_params_no): FF[:n,:n] = F FF[n:,:n] = dF[:,:,p] FF[n:,n:] = F # Solve the matrix exponential AA[:,:,p] = matrix_exponent(FF*dt) # Solve the differential equation #foo = AA[:,:,p].dot(np.vstack([m, dm[:,p]])) #mm = foo[:n,:] #dm[:,p] = foo[n:,:] # The discrete-time dynamical model* if p==0: A = AA[:n,:n,p] Q_noise_3 = P_inf - A.dot(P_inf).dot(A.T) Q_noise = Q_noise_3 #PP = A.dot(P).dot(A.T) + Q_noise_2 # The derivatives of A and Q dA[:,:,p] = AA[n:,:n,p] tmp = dA[:,:,p].dot(P_inf).dot(A.T) dQ[:,:,p] = dP_inf[:,:,p] - tmp \ - A.dot(dP_inf[:,:,p]).dot(A.T) - tmp.T dQ[:,:,p] = 0.5*(dQ[:,:,p] + dQ[:,:,p].T) # Symmetrize else: dA = None dQ = None Q_noise = Q_noise_2 # Innacuracies have been observed when Q_noise_1 was used. #Q_noise = Q_noise_1 Q_noise = 0.5*(Q_noise + Q_noise.T) # Symmetrize return A, Q_noise,None, dA, dQ else: # iterable, array # Time discretizations (round to 14 decimals to avoid problems) dt_unique, tmp, reconstruct_index = np.unique(np.round(dt,8), return_index=True,return_inverse=True) del tmp # Allocate space for A and Q A = np.empty((n,n,dt_unique.shape[0])) Q_noise = np.empty((n,n,dt_unique.shape[0])) if compute_derivatives: dA = np.empty((n,n,grad_params_no,dt_unique.shape[0])) dQ = np.empty((n,n,grad_params_no,dt_unique.shape[0])) else: dA = None dQ = None # Call this function for each unique dt for j in range(0,dt_unique.shape[0]): A[:,:,j], Q_noise[:,:,j], tmp1, dA_t, dQ_t = ContDescrStateSpace.lti_sde_to_descrete(F,L,Qc,dt_unique[j], compute_derivatives=compute_derivatives, grad_params_no=grad_params_no, P_inf=P_inf, dP_inf=dP_inf, dF = dF, dQc=dQc) if compute_derivatives: dA[:,:,:,j] = dA_t dQ[:,:,:,j] = dQ_t # Return return A, Q_noise, reconstruct_index, dA, dQ
[ "def", "lti_sde_to_descrete", "(", "F", ",", "L", ",", "Qc", ",", "dt", ",", "compute_derivatives", "=", "False", ",", "grad_params_no", "=", "None", ",", "P_inf", "=", "None", ",", "dP_inf", "=", "None", ",", "dF", "=", "None", ",", "dQc", "=", "None", ")", ":", "# Dimensionality", "n", "=", "F", ".", "shape", "[", "0", "]", "if", "not", "isinstance", "(", "dt", ",", "collections", ".", "Iterable", ")", ":", "# not iterable, scalar", "#import pdb; pdb.set_trace()", "# The dynamical model", "A", "=", "matrix_exponent", "(", "F", "*", "dt", ")", "# The covariance matrix Q by matrix fraction decomposition ->", "Phi", "=", "np", ".", "zeros", "(", "(", "2", "*", "n", ",", "2", "*", "n", ")", ")", "Phi", "[", ":", "n", ",", ":", "n", "]", "=", "F", "Phi", "[", ":", "n", ",", "n", ":", "]", "=", "L", ".", "dot", "(", "Qc", ")", ".", "dot", "(", "L", ".", "T", ")", "Phi", "[", "n", ":", ",", "n", ":", "]", "=", "-", "F", ".", "T", "AB", "=", "matrix_exponent", "(", "Phi", "*", "dt", ")", "AB", "=", "np", ".", "dot", "(", "AB", ",", "np", ".", "vstack", "(", "(", "np", ".", "zeros", "(", "(", "n", ",", "n", ")", ")", ",", "np", ".", "eye", "(", "n", ")", ")", ")", ")", "Q_noise_1", "=", "linalg", ".", "solve", "(", "AB", "[", "n", ":", ",", ":", "]", ".", "T", ",", "AB", "[", ":", "n", ",", ":", "]", ".", "T", ")", "Q_noise_2", "=", "P_inf", "-", "A", ".", "dot", "(", "P_inf", ")", ".", "dot", "(", "A", ".", "T", ")", "# The covariance matrix Q by matrix fraction decomposition <-", "if", "compute_derivatives", ":", "dA", "=", "np", ".", "zeros", "(", "[", "n", ",", "n", ",", "grad_params_no", "]", ")", "dQ", "=", "np", ".", "zeros", "(", "[", "n", ",", "n", ",", "grad_params_no", "]", ")", "#AA = np.zeros([2*n, 2*n, nparam])", "FF", "=", "np", ".", "zeros", "(", "[", "2", "*", "n", ",", "2", "*", "n", "]", ")", "AA", "=", "np", ".", "zeros", "(", "[", "2", "*", "n", ",", "2", "*", "n", ",", "grad_params_no", "]", ")", "for", "p", "in", "range", "(", "0", ",", "grad_params_no", ")", ":", "FF", "[", ":", "n", ",", ":", "n", "]", "=", "F", "FF", "[", "n", ":", ",", ":", "n", "]", "=", "dF", "[", ":", ",", ":", ",", "p", "]", "FF", "[", "n", ":", ",", "n", ":", "]", "=", "F", "# Solve the matrix exponential", "AA", "[", ":", ",", ":", ",", "p", "]", "=", "matrix_exponent", "(", "FF", "*", "dt", ")", "# Solve the differential equation", "#foo = AA[:,:,p].dot(np.vstack([m, dm[:,p]]))", "#mm = foo[:n,:]", "#dm[:,p] = foo[n:,:]", "# The discrete-time dynamical model*", "if", "p", "==", "0", ":", "A", "=", "AA", "[", ":", "n", ",", ":", "n", ",", "p", "]", "Q_noise_3", "=", "P_inf", "-", "A", ".", "dot", "(", "P_inf", ")", ".", "dot", "(", "A", ".", "T", ")", "Q_noise", "=", "Q_noise_3", "#PP = A.dot(P).dot(A.T) + Q_noise_2", "# The derivatives of A and Q", "dA", "[", ":", ",", ":", ",", "p", "]", "=", "AA", "[", "n", ":", ",", ":", "n", ",", "p", "]", "tmp", "=", "dA", "[", ":", ",", ":", ",", "p", "]", ".", "dot", "(", "P_inf", ")", ".", "dot", "(", "A", ".", "T", ")", "dQ", "[", ":", ",", ":", ",", "p", "]", "=", "dP_inf", "[", ":", ",", ":", ",", "p", "]", "-", "tmp", "-", "A", ".", "dot", "(", "dP_inf", "[", ":", ",", ":", ",", "p", "]", ")", ".", "dot", "(", "A", ".", "T", ")", "-", "tmp", ".", "T", "dQ", "[", ":", ",", ":", ",", "p", "]", "=", "0.5", "*", "(", "dQ", "[", ":", ",", ":", ",", "p", "]", "+", "dQ", "[", ":", ",", ":", ",", "p", "]", ".", "T", ")", "# Symmetrize", "else", ":", "dA", "=", "None", "dQ", "=", "None", "Q_noise", "=", "Q_noise_2", "# Innacuracies have been observed when Q_noise_1 was used.", "#Q_noise = Q_noise_1", "Q_noise", "=", "0.5", "*", "(", "Q_noise", "+", "Q_noise", ".", "T", ")", "# Symmetrize", "return", "A", ",", "Q_noise", ",", "None", ",", "dA", ",", "dQ", "else", ":", "# iterable, array", "# Time discretizations (round to 14 decimals to avoid problems)", "dt_unique", ",", "tmp", ",", "reconstruct_index", "=", "np", ".", "unique", "(", "np", ".", "round", "(", "dt", ",", "8", ")", ",", "return_index", "=", "True", ",", "return_inverse", "=", "True", ")", "del", "tmp", "# Allocate space for A and Q", "A", "=", "np", ".", "empty", "(", "(", "n", ",", "n", ",", "dt_unique", ".", "shape", "[", "0", "]", ")", ")", "Q_noise", "=", "np", ".", "empty", "(", "(", "n", ",", "n", ",", "dt_unique", ".", "shape", "[", "0", "]", ")", ")", "if", "compute_derivatives", ":", "dA", "=", "np", ".", "empty", "(", "(", "n", ",", "n", ",", "grad_params_no", ",", "dt_unique", ".", "shape", "[", "0", "]", ")", ")", "dQ", "=", "np", ".", "empty", "(", "(", "n", ",", "n", ",", "grad_params_no", ",", "dt_unique", ".", "shape", "[", "0", "]", ")", ")", "else", ":", "dA", "=", "None", "dQ", "=", "None", "# Call this function for each unique dt", "for", "j", "in", "range", "(", "0", ",", "dt_unique", ".", "shape", "[", "0", "]", ")", ":", "A", "[", ":", ",", ":", ",", "j", "]", ",", "Q_noise", "[", ":", ",", ":", ",", "j", "]", ",", "tmp1", ",", "dA_t", ",", "dQ_t", "=", "ContDescrStateSpace", ".", "lti_sde_to_descrete", "(", "F", ",", "L", ",", "Qc", ",", "dt_unique", "[", "j", "]", ",", "compute_derivatives", "=", "compute_derivatives", ",", "grad_params_no", "=", "grad_params_no", ",", "P_inf", "=", "P_inf", ",", "dP_inf", "=", "dP_inf", ",", "dF", "=", "dF", ",", "dQc", "=", "dQc", ")", "if", "compute_derivatives", ":", "dA", "[", ":", ",", ":", ",", ":", ",", "j", "]", "=", "dA_t", "dQ", "[", ":", ",", ":", ",", ":", ",", "j", "]", "=", "dQ_t", "# Return", "return", "A", ",", "Q_noise", ",", "reconstruct_index", ",", "dA", ",", "dQ" ]
36.041176
22.017647
def attributes_js(cls, attributes): """ Generates JS code to look up attributes on JS objects from an attributes specification dictionary. If the specification references a plotting particular plotting handle it will also generate JS code to get the ID of the object. Simple example (when referencing cb_data or cb_obj): Input : {'x': 'cb_data.geometry.x'} Output : data['x'] = cb_data['geometry']['x'] Example referencing plot handle: Input : {'x0': 'x_range.attributes.start'} Output : if ((x_range !== undefined)) { data['x0'] = {id: x_range['id'], value: x_range['attributes']['start']} } """ assign_template = '{assign}{{id: {obj_name}["id"], value: {obj_name}{attr_getters}}};\n' conditional_template = 'if (({obj_name} != undefined)) {{ {assign} }}' code = '' for key, attr_path in sorted(attributes.items()): data_assign = 'data["{key}"] = '.format(key=key) attrs = attr_path.split('.') obj_name = attrs[0] attr_getters = ''.join(['["{attr}"]'.format(attr=attr) for attr in attrs[1:]]) if obj_name not in ['cb_obj', 'cb_data']: assign_str = assign_template.format( assign=data_assign, obj_name=obj_name, attr_getters=attr_getters ) code += conditional_template.format( obj_name=obj_name, assign=assign_str ) else: assign_str = ''.join([data_assign, obj_name, attr_getters, ';\n']) code += assign_str return code
[ "def", "attributes_js", "(", "cls", ",", "attributes", ")", ":", "assign_template", "=", "'{assign}{{id: {obj_name}[\"id\"], value: {obj_name}{attr_getters}}};\\n'", "conditional_template", "=", "'if (({obj_name} != undefined)) {{ {assign} }}'", "code", "=", "''", "for", "key", ",", "attr_path", "in", "sorted", "(", "attributes", ".", "items", "(", ")", ")", ":", "data_assign", "=", "'data[\"{key}\"] = '", ".", "format", "(", "key", "=", "key", ")", "attrs", "=", "attr_path", ".", "split", "(", "'.'", ")", "obj_name", "=", "attrs", "[", "0", "]", "attr_getters", "=", "''", ".", "join", "(", "[", "'[\"{attr}\"]'", ".", "format", "(", "attr", "=", "attr", ")", "for", "attr", "in", "attrs", "[", "1", ":", "]", "]", ")", "if", "obj_name", "not", "in", "[", "'cb_obj'", ",", "'cb_data'", "]", ":", "assign_str", "=", "assign_template", ".", "format", "(", "assign", "=", "data_assign", ",", "obj_name", "=", "obj_name", ",", "attr_getters", "=", "attr_getters", ")", "code", "+=", "conditional_template", ".", "format", "(", "obj_name", "=", "obj_name", ",", "assign", "=", "assign_str", ")", "else", ":", "assign_str", "=", "''", ".", "join", "(", "[", "data_assign", ",", "obj_name", ",", "attr_getters", ",", "';\\n'", "]", ")", "code", "+=", "assign_str", "return", "code" ]
41.560976
22.243902
def note_on(self, channel, note, velocity): """Return bytes for a 'note_on' event.""" return self.midi_event(NOTE_ON, channel, note, velocity)
[ "def", "note_on", "(", "self", ",", "channel", ",", "note", ",", "velocity", ")", ":", "return", "self", ".", "midi_event", "(", "NOTE_ON", ",", "channel", ",", "note", ",", "velocity", ")" ]
52
9
def merge_enrollments(db, uuid, organization): """Merge overlapping enrollments. This function merges those enrollments, related to the given 'uuid' and 'organization', that have overlapping dates. Default start and end dates (1900-01-01 and 2100-01-01) are considered range limits and will be removed when a set of ranges overlap. For example: * [(1900-01-01, 2010-01-01), (2008-01-01, 2100-01-01)] --> (2008-01-01, 2010-01-01) * [(1900-01-01, 2010-01-01), (2008-01-01, 2010-01-01), (2010-01-02, 2100-01-01)] --> (2008-01-01, 2010-01-01),(2010-01-02, 2100-01-01) * [(1900-01-01, 2010-01-01), (2010-01-02, 2100-01-01)] --> (1900-01-01, 2010-01-01), (2010-01-02, 2100-01-01) It may raise a InvalidValueError when any date is out of bounds. In other words, when any date < 1900-01-01 or date > 2100-01-01. :param db: database manager :param uuid: unique identifier :param organization: name of the organization :raises NotFoundError: when either 'uuid' or 'organization' are not found in the registry. It is also raised when there are not enrollments related to 'uuid' and 'organization' :raises InvalidValueError: when any date is out of bounds """ # Merge enrollments with db.connect() as session: uidentity = find_unique_identity(session, uuid) if not uidentity: raise NotFoundError(entity=uuid) org = find_organization(session, organization) if not org: raise NotFoundError(entity=organization) disjoint = session.query(Enrollment).\ filter(Enrollment.uidentity == uidentity, Enrollment.organization == org).all() if not disjoint: entity = '-'.join((uuid, organization)) raise NotFoundError(entity=entity) dates = [(enr.start, enr.end) for enr in disjoint] for st, en in utils.merge_date_ranges(dates): # We prefer this method to find duplicates # to avoid integrity exceptions when creating # enrollments that are already in the database is_dup = lambda x, st, en: x.start == st and x.end == en filtered = [x for x in disjoint if not is_dup(x, st, en)] if len(filtered) != len(disjoint): disjoint = filtered continue # This means no dups where found so we need to add a # new enrollment try: enroll_db(session, uidentity, org, from_date=st, to_date=en) except ValueError as e: raise InvalidValueError(e) # Remove disjoint enrollments from the registry for enr in disjoint: delete_enrollment_db(session, enr)
[ "def", "merge_enrollments", "(", "db", ",", "uuid", ",", "organization", ")", ":", "# Merge enrollments", "with", "db", ".", "connect", "(", ")", "as", "session", ":", "uidentity", "=", "find_unique_identity", "(", "session", ",", "uuid", ")", "if", "not", "uidentity", ":", "raise", "NotFoundError", "(", "entity", "=", "uuid", ")", "org", "=", "find_organization", "(", "session", ",", "organization", ")", "if", "not", "org", ":", "raise", "NotFoundError", "(", "entity", "=", "organization", ")", "disjoint", "=", "session", ".", "query", "(", "Enrollment", ")", ".", "filter", "(", "Enrollment", ".", "uidentity", "==", "uidentity", ",", "Enrollment", ".", "organization", "==", "org", ")", ".", "all", "(", ")", "if", "not", "disjoint", ":", "entity", "=", "'-'", ".", "join", "(", "(", "uuid", ",", "organization", ")", ")", "raise", "NotFoundError", "(", "entity", "=", "entity", ")", "dates", "=", "[", "(", "enr", ".", "start", ",", "enr", ".", "end", ")", "for", "enr", "in", "disjoint", "]", "for", "st", ",", "en", "in", "utils", ".", "merge_date_ranges", "(", "dates", ")", ":", "# We prefer this method to find duplicates", "# to avoid integrity exceptions when creating", "# enrollments that are already in the database", "is_dup", "=", "lambda", "x", ",", "st", ",", "en", ":", "x", ".", "start", "==", "st", "and", "x", ".", "end", "==", "en", "filtered", "=", "[", "x", "for", "x", "in", "disjoint", "if", "not", "is_dup", "(", "x", ",", "st", ",", "en", ")", "]", "if", "len", "(", "filtered", ")", "!=", "len", "(", "disjoint", ")", ":", "disjoint", "=", "filtered", "continue", "# This means no dups where found so we need to add a", "# new enrollment", "try", ":", "enroll_db", "(", "session", ",", "uidentity", ",", "org", ",", "from_date", "=", "st", ",", "to_date", "=", "en", ")", "except", "ValueError", "as", "e", ":", "raise", "InvalidValueError", "(", "e", ")", "# Remove disjoint enrollments from the registry", "for", "enr", "in", "disjoint", ":", "delete_enrollment_db", "(", "session", ",", "enr", ")" ]
38.333333
21.222222
def lint(): "report pylint results" # report according to file extension report_formats = { ".html": "html", ".log": "parseable", ".txt": "text", } lint_build_dir = easy.path("build/lint") lint_build_dir.exists() or lint_build_dir.makedirs() # pylint: disable=expression-not-assigned argv = [] rcfile = easy.options.lint.get("rcfile") if not rcfile and easy.path("pylint.cfg").exists(): rcfile = "pylint.cfg" if rcfile: argv += ["--rcfile", os.path.abspath(rcfile)] if easy.options.lint.get("msg_only", False): argv += ["-rn"] argv += [ "--import-graph", (lint_build_dir / "imports.dot").abspath(), ] argv += support.toplevel_packages() sys.stderr.write("Running %s::pylint '%s'\n" % (sys.argv[0], "' '".join(argv))) outfile = easy.options.lint.get("output", None) if outfile: outfile = os.path.abspath(outfile) try: with easy.pushd("src" if easy.path("src").exists() else "."): if outfile: argv.extend(["-f", report_formats.get(easy.path(outfile).ext, "text")]) sys.stderr.write("Writing output to %r\n" % (str(outfile),)) outhandle = open(outfile, "w") try: subprocess.check_call(["pylint"] + argv, stdout=outhandle) finally: outhandle.close() else: subprocess.check_call(["pylint"] + argv, ) sys.stderr.write("paver::lint - No problems found.\n") except subprocess.CalledProcessError as exc: if exc.returncode & 32: # usage error (internal error in this code) sys.stderr.write("paver::lint - Usage error, bad arguments %r?!\n" % (argv,)) sys.exit(exc.returncode) else: bits = { 1: "fatal", 2: "error", 4: "warning", 8: "refactor", 16: "convention", } sys.stderr.write("paver::lint - Some %s message(s) issued.\n" % ( ", ".join([text for bit, text in bits.items() if exc.returncode & bit]) )) if exc.returncode & 3: sys.stderr.write("paver::lint - Exiting due to fatal / error message.\n") sys.exit(exc.returncode)
[ "def", "lint", "(", ")", ":", "# report according to file extension", "report_formats", "=", "{", "\".html\"", ":", "\"html\"", ",", "\".log\"", ":", "\"parseable\"", ",", "\".txt\"", ":", "\"text\"", ",", "}", "lint_build_dir", "=", "easy", ".", "path", "(", "\"build/lint\"", ")", "lint_build_dir", ".", "exists", "(", ")", "or", "lint_build_dir", ".", "makedirs", "(", ")", "# pylint: disable=expression-not-assigned", "argv", "=", "[", "]", "rcfile", "=", "easy", ".", "options", ".", "lint", ".", "get", "(", "\"rcfile\"", ")", "if", "not", "rcfile", "and", "easy", ".", "path", "(", "\"pylint.cfg\"", ")", ".", "exists", "(", ")", ":", "rcfile", "=", "\"pylint.cfg\"", "if", "rcfile", ":", "argv", "+=", "[", "\"--rcfile\"", ",", "os", ".", "path", ".", "abspath", "(", "rcfile", ")", "]", "if", "easy", ".", "options", ".", "lint", ".", "get", "(", "\"msg_only\"", ",", "False", ")", ":", "argv", "+=", "[", "\"-rn\"", "]", "argv", "+=", "[", "\"--import-graph\"", ",", "(", "lint_build_dir", "/", "\"imports.dot\"", ")", ".", "abspath", "(", ")", ",", "]", "argv", "+=", "support", ".", "toplevel_packages", "(", ")", "sys", ".", "stderr", ".", "write", "(", "\"Running %s::pylint '%s'\\n\"", "%", "(", "sys", ".", "argv", "[", "0", "]", ",", "\"' '\"", ".", "join", "(", "argv", ")", ")", ")", "outfile", "=", "easy", ".", "options", ".", "lint", ".", "get", "(", "\"output\"", ",", "None", ")", "if", "outfile", ":", "outfile", "=", "os", ".", "path", ".", "abspath", "(", "outfile", ")", "try", ":", "with", "easy", ".", "pushd", "(", "\"src\"", "if", "easy", ".", "path", "(", "\"src\"", ")", ".", "exists", "(", ")", "else", "\".\"", ")", ":", "if", "outfile", ":", "argv", ".", "extend", "(", "[", "\"-f\"", ",", "report_formats", ".", "get", "(", "easy", ".", "path", "(", "outfile", ")", ".", "ext", ",", "\"text\"", ")", "]", ")", "sys", ".", "stderr", ".", "write", "(", "\"Writing output to %r\\n\"", "%", "(", "str", "(", "outfile", ")", ",", ")", ")", "outhandle", "=", "open", "(", "outfile", ",", "\"w\"", ")", "try", ":", "subprocess", ".", "check_call", "(", "[", "\"pylint\"", "]", "+", "argv", ",", "stdout", "=", "outhandle", ")", "finally", ":", "outhandle", ".", "close", "(", ")", "else", ":", "subprocess", ".", "check_call", "(", "[", "\"pylint\"", "]", "+", "argv", ",", ")", "sys", ".", "stderr", ".", "write", "(", "\"paver::lint - No problems found.\\n\"", ")", "except", "subprocess", ".", "CalledProcessError", "as", "exc", ":", "if", "exc", ".", "returncode", "&", "32", ":", "# usage error (internal error in this code)", "sys", ".", "stderr", ".", "write", "(", "\"paver::lint - Usage error, bad arguments %r?!\\n\"", "%", "(", "argv", ",", ")", ")", "sys", ".", "exit", "(", "exc", ".", "returncode", ")", "else", ":", "bits", "=", "{", "1", ":", "\"fatal\"", ",", "2", ":", "\"error\"", ",", "4", ":", "\"warning\"", ",", "8", ":", "\"refactor\"", ",", "16", ":", "\"convention\"", ",", "}", "sys", ".", "stderr", ".", "write", "(", "\"paver::lint - Some %s message(s) issued.\\n\"", "%", "(", "\", \"", ".", "join", "(", "[", "text", "for", "bit", ",", "text", "in", "bits", ".", "items", "(", ")", "if", "exc", ".", "returncode", "&", "bit", "]", ")", ")", ")", "if", "exc", ".", "returncode", "&", "3", ":", "sys", ".", "stderr", ".", "write", "(", "\"paver::lint - Exiting due to fatal / error message.\\n\"", ")", "sys", ".", "exit", "(", "exc", ".", "returncode", ")" ]
37.419355
21.709677
def get_chatlist(chatfile): """Try reading ids of saved chats from file. If we fail, return empty set""" if not chatfile: return set() try: with open(chatfile) as file_contents: return set(int(chat) for chat in file_contents) except (OSError, IOError) as exc: LOGGER.error('could not load saved chats:\n%s', exc) return set()
[ "def", "get_chatlist", "(", "chatfile", ")", ":", "if", "not", "chatfile", ":", "return", "set", "(", ")", "try", ":", "with", "open", "(", "chatfile", ")", "as", "file_contents", ":", "return", "set", "(", "int", "(", "chat", ")", "for", "chat", "in", "file_contents", ")", "except", "(", "OSError", ",", "IOError", ")", "as", "exc", ":", "LOGGER", ".", "error", "(", "'could not load saved chats:\\n%s'", ",", "exc", ")", "return", "set", "(", ")" ]
34.454545
13.818182
def replace(self, nodes): """ Replaces user defined files search pattern occurrences with replacement pattern using given nodes. :param nodes: Nodes. :type nodes: list :return: Method success. :rtype: bool """ files = {} for node in nodes: if node.family == "SearchFile": files[node.file] = node.children elif node.family == "SearchOccurence": file = node.parent.file if not file in files: files[file] = [] files[file].append(node) replacement_pattern = self.Replace_With_comboBox.currentText() SearchAndReplace.insert_pattern(replacement_pattern, self.__replace_with_patterns_model) replace_results = {} for file, occurrences in files.iteritems(): editor = self.__container.get_editor(file) if editor: document = editor.document() else: cache_data = self.__files_cache.get_content(file) if cache_data is None: LOGGER.warning( "!> {0} | '{1}' file doesn't exists in files cache!".format(self.__class__.__name__, file)) continue content = self.__files_cache.get_content(file).content document = self.__get_document(content) self.__cache(file, content, document) replace_results[file] = self.__replace_within_document(document, occurrences, replacement_pattern) self.set_replace_results(replace_results) self.__container.engine.notifications_manager.notify( "{0} | '{1}' pattern occurence(s) replaced in '{2}' files!".format(self.__class__.__name__, sum(replace_results.values()), len(replace_results.keys())))
[ "def", "replace", "(", "self", ",", "nodes", ")", ":", "files", "=", "{", "}", "for", "node", "in", "nodes", ":", "if", "node", ".", "family", "==", "\"SearchFile\"", ":", "files", "[", "node", ".", "file", "]", "=", "node", ".", "children", "elif", "node", ".", "family", "==", "\"SearchOccurence\"", ":", "file", "=", "node", ".", "parent", ".", "file", "if", "not", "file", "in", "files", ":", "files", "[", "file", "]", "=", "[", "]", "files", "[", "file", "]", ".", "append", "(", "node", ")", "replacement_pattern", "=", "self", ".", "Replace_With_comboBox", ".", "currentText", "(", ")", "SearchAndReplace", ".", "insert_pattern", "(", "replacement_pattern", ",", "self", ".", "__replace_with_patterns_model", ")", "replace_results", "=", "{", "}", "for", "file", ",", "occurrences", "in", "files", ".", "iteritems", "(", ")", ":", "editor", "=", "self", ".", "__container", ".", "get_editor", "(", "file", ")", "if", "editor", ":", "document", "=", "editor", ".", "document", "(", ")", "else", ":", "cache_data", "=", "self", ".", "__files_cache", ".", "get_content", "(", "file", ")", "if", "cache_data", "is", "None", ":", "LOGGER", ".", "warning", "(", "\"!> {0} | '{1}' file doesn't exists in files cache!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "file", ")", ")", "continue", "content", "=", "self", ".", "__files_cache", ".", "get_content", "(", "file", ")", ".", "content", "document", "=", "self", ".", "__get_document", "(", "content", ")", "self", ".", "__cache", "(", "file", ",", "content", ",", "document", ")", "replace_results", "[", "file", "]", "=", "self", ".", "__replace_within_document", "(", "document", ",", "occurrences", ",", "replacement_pattern", ")", "self", ".", "set_replace_results", "(", "replace_results", ")", "self", ".", "__container", ".", "engine", ".", "notifications_manager", ".", "notify", "(", "\"{0} | '{1}' pattern occurence(s) replaced in '{2}' files!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "sum", "(", "replace_results", ".", "values", "(", ")", ")", ",", "len", "(", "replace_results", ".", "keys", "(", ")", ")", ")", ")" ]
43.911111
24.133333
def _get_event_id(object_type: str) -> str: """Return an event key for the event on the object type. This must be a unique event id for the object. Args: object_type (str): Type of object Returns: str, event id """ key = _keys.event_counter(object_type) DB.watch(key, pipeline=True) count = DB.get_value(key) DB.increment(key) DB.execute() if count is None: count = 0 return '{}_event_{:08d}'.format(object_type, int(count))
[ "def", "_get_event_id", "(", "object_type", ":", "str", ")", "->", "str", ":", "key", "=", "_keys", ".", "event_counter", "(", "object_type", ")", "DB", ".", "watch", "(", "key", ",", "pipeline", "=", "True", ")", "count", "=", "DB", ".", "get_value", "(", "key", ")", "DB", ".", "increment", "(", "key", ")", "DB", ".", "execute", "(", ")", "if", "count", "is", "None", ":", "count", "=", "0", "return", "'{}_event_{:08d}'", ".", "format", "(", "object_type", ",", "int", "(", "count", ")", ")" ]
24.05
18.9
def apply_next_patch(self, force=False, quiet=False): """ Apply next patch in series file """ self._check() top = self.db.top_patch() if not top: patch = self.series.first_patch() else: patch = self.series.patch_after(top) if not patch: raise AllPatchesApplied(self.series, top) self.applying(patch) self._apply_patch(patch, force, quiet) self.db.save() self.applied(self.db.top_patch())
[ "def", "apply_next_patch", "(", "self", ",", "force", "=", "False", ",", "quiet", "=", "False", ")", ":", "self", ".", "_check", "(", ")", "top", "=", "self", ".", "db", ".", "top_patch", "(", ")", "if", "not", "top", ":", "patch", "=", "self", ".", "series", ".", "first_patch", "(", ")", "else", ":", "patch", "=", "self", ".", "series", ".", "patch_after", "(", "top", ")", "if", "not", "patch", ":", "raise", "AllPatchesApplied", "(", "self", ".", "series", ",", "top", ")", "self", ".", "applying", "(", "patch", ")", "self", ".", "_apply_patch", "(", "patch", ",", "force", ",", "quiet", ")", "self", ".", "db", ".", "save", "(", ")", "self", ".", "applied", "(", "self", ".", "db", ".", "top_patch", "(", ")", ")" ]
25.789474
19.421053
def _export_model(models): """ Switch model tables to index-by-number :param dict models: Metadata :return dict _models: Metadata """ logger_jsons.info("enter export_model") _models = [] try: for name, model in models.items(): if "summaryTable" in model: model["summaryTable"] = _idx_table_by_num(model["summaryTable"]) # Process ensemble table (special two columns) if "ensembleTable" in model: model["ensembleTable"] = _idx_table_by_num(model["ensembleTable"]) if "distributionTable" in model: model["distributionTable"] = _idx_table_by_num(model["distributionTable"]) _models.append(model) except Exception as e: logger_jsons.error("export_model: {}".format(e)) print("Error: export_model: {}".format(e)) logger_jsons.info("exit export_model") return _models
[ "def", "_export_model", "(", "models", ")", ":", "logger_jsons", ".", "info", "(", "\"enter export_model\"", ")", "_models", "=", "[", "]", "try", ":", "for", "name", ",", "model", "in", "models", ".", "items", "(", ")", ":", "if", "\"summaryTable\"", "in", "model", ":", "model", "[", "\"summaryTable\"", "]", "=", "_idx_table_by_num", "(", "model", "[", "\"summaryTable\"", "]", ")", "# Process ensemble table (special two columns)", "if", "\"ensembleTable\"", "in", "model", ":", "model", "[", "\"ensembleTable\"", "]", "=", "_idx_table_by_num", "(", "model", "[", "\"ensembleTable\"", "]", ")", "if", "\"distributionTable\"", "in", "model", ":", "model", "[", "\"distributionTable\"", "]", "=", "_idx_table_by_num", "(", "model", "[", "\"distributionTable\"", "]", ")", "_models", ".", "append", "(", "model", ")", "except", "Exception", "as", "e", ":", "logger_jsons", ".", "error", "(", "\"export_model: {}\"", ".", "format", "(", "e", ")", ")", "print", "(", "\"Error: export_model: {}\"", ".", "format", "(", "e", ")", ")", "logger_jsons", ".", "info", "(", "\"exit export_model\"", ")", "return", "_models" ]
31.551724
19.206897
def copy(self, dst, **kwargs): """Copy file to a new destination. Returns JSON Patch with proposed change pointing to new copy. """ _fs, filename = opener.parse(self.uri) _fs_dst, filename_dst = opener.parse(dst) copyfile(_fs, filename, _fs_dst, filename_dst, **kwargs) return [{'op': 'replace', 'path': self.pointer, 'value': dst}]
[ "def", "copy", "(", "self", ",", "dst", ",", "*", "*", "kwargs", ")", ":", "_fs", ",", "filename", "=", "opener", ".", "parse", "(", "self", ".", "uri", ")", "_fs_dst", ",", "filename_dst", "=", "opener", ".", "parse", "(", "dst", ")", "copyfile", "(", "_fs", ",", "filename", ",", "_fs_dst", ",", "filename_dst", ",", "*", "*", "kwargs", ")", "return", "[", "{", "'op'", ":", "'replace'", ",", "'path'", ":", "self", ".", "pointer", ",", "'value'", ":", "dst", "}", "]" ]
42.333333
16.444444
def start(self): """ Starts the IOU process. """ self._check_requirements() if not self.is_running(): yield from self._library_check() try: self._rename_nvram_file() except OSError as e: raise IOUError("Could not rename nvram files: {}".format(e)) iourc_path = self.iourc_path if not iourc_path: raise IOUError("Could not find an iourc file (IOU license)") if not os.path.isfile(iourc_path): raise IOUError("The iourc path '{}' is not a regular file".format(iourc_path)) yield from self._check_iou_licence() yield from self._start_ubridge() self._create_netmap_config() if self.use_default_iou_values: # make sure we have the default nvram amount to correctly push the configs yield from self.update_default_iou_values() self._push_configs_to_nvram() # check if there is enough RAM to run self.check_available_ram(self.ram) self._nvram_watcher = FileWatcher(self._nvram_file(), self._nvram_changed, delay=2) # created a environment variable pointing to the iourc file. env = os.environ.copy() if "IOURC" not in os.environ: env["IOURC"] = iourc_path command = yield from self._build_command() try: log.info("Starting IOU: {}".format(command)) self.command_line = ' '.join(command) self._iou_process = yield from asyncio.create_subprocess_exec( *command, stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.working_dir, env=env) log.info("IOU instance {} started PID={}".format(self._id, self._iou_process.pid)) self._started = True self.status = "started" callback = functools.partial(self._termination_callback, "IOU") gns3server.utils.asyncio.monitor_process(self._iou_process, callback) except FileNotFoundError as e: raise IOUError("Could not start IOU: {}: 32-bit binary support is probably not installed".format(e)) except (OSError, subprocess.SubprocessError) as e: iou_stdout = self.read_iou_stdout() log.error("Could not start IOU {}: {}\n{}".format(self._path, e, iou_stdout)) raise IOUError("Could not start IOU {}: {}\n{}".format(self._path, e, iou_stdout)) server = AsyncioTelnetServer(reader=self._iou_process.stdout, writer=self._iou_process.stdin, binary=True, echo=True) self._telnet_server = yield from asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console) # configure networking support yield from self._networking()
[ "def", "start", "(", "self", ")", ":", "self", ".", "_check_requirements", "(", ")", "if", "not", "self", ".", "is_running", "(", ")", ":", "yield", "from", "self", ".", "_library_check", "(", ")", "try", ":", "self", ".", "_rename_nvram_file", "(", ")", "except", "OSError", "as", "e", ":", "raise", "IOUError", "(", "\"Could not rename nvram files: {}\"", ".", "format", "(", "e", ")", ")", "iourc_path", "=", "self", ".", "iourc_path", "if", "not", "iourc_path", ":", "raise", "IOUError", "(", "\"Could not find an iourc file (IOU license)\"", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "iourc_path", ")", ":", "raise", "IOUError", "(", "\"The iourc path '{}' is not a regular file\"", ".", "format", "(", "iourc_path", ")", ")", "yield", "from", "self", ".", "_check_iou_licence", "(", ")", "yield", "from", "self", ".", "_start_ubridge", "(", ")", "self", ".", "_create_netmap_config", "(", ")", "if", "self", ".", "use_default_iou_values", ":", "# make sure we have the default nvram amount to correctly push the configs", "yield", "from", "self", ".", "update_default_iou_values", "(", ")", "self", ".", "_push_configs_to_nvram", "(", ")", "# check if there is enough RAM to run", "self", ".", "check_available_ram", "(", "self", ".", "ram", ")", "self", ".", "_nvram_watcher", "=", "FileWatcher", "(", "self", ".", "_nvram_file", "(", ")", ",", "self", ".", "_nvram_changed", ",", "delay", "=", "2", ")", "# created a environment variable pointing to the iourc file.", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", "if", "\"IOURC\"", "not", "in", "os", ".", "environ", ":", "env", "[", "\"IOURC\"", "]", "=", "iourc_path", "command", "=", "yield", "from", "self", ".", "_build_command", "(", ")", "try", ":", "log", ".", "info", "(", "\"Starting IOU: {}\"", ".", "format", "(", "command", ")", ")", "self", ".", "command_line", "=", "' '", ".", "join", "(", "command", ")", "self", ".", "_iou_process", "=", "yield", "from", "asyncio", ".", "create_subprocess_exec", "(", "*", "command", ",", "stdout", "=", "asyncio", ".", "subprocess", ".", "PIPE", ",", "stdin", "=", "asyncio", ".", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "cwd", "=", "self", ".", "working_dir", ",", "env", "=", "env", ")", "log", ".", "info", "(", "\"IOU instance {} started PID={}\"", ".", "format", "(", "self", ".", "_id", ",", "self", ".", "_iou_process", ".", "pid", ")", ")", "self", ".", "_started", "=", "True", "self", ".", "status", "=", "\"started\"", "callback", "=", "functools", ".", "partial", "(", "self", ".", "_termination_callback", ",", "\"IOU\"", ")", "gns3server", ".", "utils", ".", "asyncio", ".", "monitor_process", "(", "self", ".", "_iou_process", ",", "callback", ")", "except", "FileNotFoundError", "as", "e", ":", "raise", "IOUError", "(", "\"Could not start IOU: {}: 32-bit binary support is probably not installed\"", ".", "format", "(", "e", ")", ")", "except", "(", "OSError", ",", "subprocess", ".", "SubprocessError", ")", "as", "e", ":", "iou_stdout", "=", "self", ".", "read_iou_stdout", "(", ")", "log", ".", "error", "(", "\"Could not start IOU {}: {}\\n{}\"", ".", "format", "(", "self", ".", "_path", ",", "e", ",", "iou_stdout", ")", ")", "raise", "IOUError", "(", "\"Could not start IOU {}: {}\\n{}\"", ".", "format", "(", "self", ".", "_path", ",", "e", ",", "iou_stdout", ")", ")", "server", "=", "AsyncioTelnetServer", "(", "reader", "=", "self", ".", "_iou_process", ".", "stdout", ",", "writer", "=", "self", ".", "_iou_process", ".", "stdin", ",", "binary", "=", "True", ",", "echo", "=", "True", ")", "self", ".", "_telnet_server", "=", "yield", "from", "asyncio", ".", "start_server", "(", "server", ".", "run", ",", "self", ".", "_manager", ".", "port_manager", ".", "console_host", ",", "self", ".", "console", ")", "# configure networking support", "yield", "from", "self", ".", "_networking", "(", ")" ]
44.455882
23.691176
def query_region(self, chrom=None, start=None, stop=None): """Query the table, returning row or rows within the given genomic region. Parameters ---------- chrom : string, optional Chromosome/contig. start : int, optional Region start position (1-based). stop : int, optional Region stop position (1-based). Returns ------- result : VariantTable """ if self.index is None: raise ValueError('no index has been set') if isinstance(self.index, SortedIndex): # ignore chrom loc = self.index.locate_range(start, stop) else: loc = self.index.locate_range(chrom, start, stop) return self[loc]
[ "def", "query_region", "(", "self", ",", "chrom", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "if", "self", ".", "index", "is", "None", ":", "raise", "ValueError", "(", "'no index has been set'", ")", "if", "isinstance", "(", "self", ".", "index", ",", "SortedIndex", ")", ":", "# ignore chrom", "loc", "=", "self", ".", "index", ".", "locate_range", "(", "start", ",", "stop", ")", "else", ":", "loc", "=", "self", ".", "index", ".", "locate_range", "(", "chrom", ",", "start", ",", "stop", ")", "return", "self", "[", "loc", "]" ]
29.5
16.846154
def add_iri_thermal_plasma(inst, glat_label='glat', glong_label='glong', alt_label='alt'): """ Uses IRI (International Reference Ionosphere) model to simulate an ionosphere. Uses pyglow module to run IRI. Configured to use actual solar parameters to run model. Example ------- # function added velow modifies the inst object upon every inst.load call inst.custom.add(add_iri_thermal_plasma, 'modify', glat_label='custom_label') Parameters ---------- inst : pysat.Instrument Designed with pysat_sgp4 in mind glat_label : string label used in inst to identify WGS84 geodetic latitude (degrees) glong_label : string label used in inst to identify WGS84 geodetic longitude (degrees) alt_label : string label used in inst to identify WGS84 geodetic altitude (km, height above surface) Returns ------- inst Input pysat.Instrument object modified to include thermal plasma parameters. 'ion_temp' for ion temperature in Kelvin 'e_temp' for electron temperature in Kelvin 'ion_dens' for the total ion density (O+ and H+) 'frac_dens_o' for the fraction of total density that is O+ 'frac_dens_h' for the fraction of total density that is H+ """ import pyglow from pyglow.pyglow import Point iri_params = [] # print 'IRI Simulations' for time,lat,lon,alt in zip(inst.data.index, inst[glat_label], inst[glong_label], inst[alt_label]): # Point class is instantiated. Its parameters are a function of time and spatial location pt = Point(time,lat,lon,alt) pt.run_iri() iri = {} # After the model is run, its members like Ti, ni[O+], etc. can be accessed iri['ion_temp'] = pt.Ti iri['e_temp'] = pt.Te iri['ion_dens'] = pt.ni['O+'] + pt.ni['H+'] + pt.ni['HE+']#pt.ne - pt.ni['NO+'] - pt.ni['O2+'] - pt.ni['HE+'] iri['frac_dens_o'] = pt.ni['O+']/iri['ion_dens'] iri['frac_dens_h'] = pt.ni['H+']/iri['ion_dens'] iri['frac_dens_he'] = pt.ni['HE+']/iri['ion_dens'] iri_params.append(iri) # print 'Complete.' iri = pds.DataFrame(iri_params) iri.index = inst.data.index inst[iri.keys()] = iri inst.meta['ion_temp'] = {'units':'Kelvin','long_name':'Ion Temperature'} inst.meta['ion_dens'] = {'units':'N/cc','long_name':'Ion Density', 'desc':'Total ion density including O+ and H+ from IRI model run.'} inst.meta['frac_dens_o'] = {'units':'','long_name':'Fractional O+ Density'} inst.meta['frac_dens_h'] = {'units':'','long_name':'Fractional H+ Density'}
[ "def", "add_iri_thermal_plasma", "(", "inst", ",", "glat_label", "=", "'glat'", ",", "glong_label", "=", "'glong'", ",", "alt_label", "=", "'alt'", ")", ":", "import", "pyglow", "from", "pyglow", ".", "pyglow", "import", "Point", "iri_params", "=", "[", "]", "# print 'IRI Simulations'", "for", "time", ",", "lat", ",", "lon", ",", "alt", "in", "zip", "(", "inst", ".", "data", ".", "index", ",", "inst", "[", "glat_label", "]", ",", "inst", "[", "glong_label", "]", ",", "inst", "[", "alt_label", "]", ")", ":", "# Point class is instantiated. Its parameters are a function of time and spatial location", "pt", "=", "Point", "(", "time", ",", "lat", ",", "lon", ",", "alt", ")", "pt", ".", "run_iri", "(", ")", "iri", "=", "{", "}", "# After the model is run, its members like Ti, ni[O+], etc. can be accessed", "iri", "[", "'ion_temp'", "]", "=", "pt", ".", "Ti", "iri", "[", "'e_temp'", "]", "=", "pt", ".", "Te", "iri", "[", "'ion_dens'", "]", "=", "pt", ".", "ni", "[", "'O+'", "]", "+", "pt", ".", "ni", "[", "'H+'", "]", "+", "pt", ".", "ni", "[", "'HE+'", "]", "#pt.ne - pt.ni['NO+'] - pt.ni['O2+'] - pt.ni['HE+']", "iri", "[", "'frac_dens_o'", "]", "=", "pt", ".", "ni", "[", "'O+'", "]", "/", "iri", "[", "'ion_dens'", "]", "iri", "[", "'frac_dens_h'", "]", "=", "pt", ".", "ni", "[", "'H+'", "]", "/", "iri", "[", "'ion_dens'", "]", "iri", "[", "'frac_dens_he'", "]", "=", "pt", ".", "ni", "[", "'HE+'", "]", "/", "iri", "[", "'ion_dens'", "]", "iri_params", ".", "append", "(", "iri", ")", "# print 'Complete.'", "iri", "=", "pds", ".", "DataFrame", "(", "iri_params", ")", "iri", ".", "index", "=", "inst", ".", "data", ".", "index", "inst", "[", "iri", ".", "keys", "(", ")", "]", "=", "iri", "inst", ".", "meta", "[", "'ion_temp'", "]", "=", "{", "'units'", ":", "'Kelvin'", ",", "'long_name'", ":", "'Ion Temperature'", "}", "inst", ".", "meta", "[", "'ion_dens'", "]", "=", "{", "'units'", ":", "'N/cc'", ",", "'long_name'", ":", "'Ion Density'", ",", "'desc'", ":", "'Total ion density including O+ and H+ from IRI model run.'", "}", "inst", ".", "meta", "[", "'frac_dens_o'", "]", "=", "{", "'units'", ":", "''", ",", "'long_name'", ":", "'Fractional O+ Density'", "}", "inst", ".", "meta", "[", "'frac_dens_h'", "]", "=", "{", "'units'", ":", "''", ",", "'long_name'", ":", "'Fractional H+ Density'", "}" ]
42.046875
26.59375
def no_use_pep517_callback(option, opt, value, parser): """ Process a value provided for the --no-use-pep517 option. This is an optparse.Option callback for the no_use_pep517 option. """ # Since --no-use-pep517 doesn't accept arguments, the value argument # will be None if --no-use-pep517 is passed via the command-line. # However, the value can be non-None if the option is triggered e.g. # by an environment variable, for example "PIP_NO_USE_PEP517=true". if value is not None: msg = """A value was passed for --no-use-pep517, probably using either the PIP_NO_USE_PEP517 environment variable or the "no-use-pep517" config file option. Use an appropriate value of the PIP_USE_PEP517 environment variable or the "use-pep517" config file option instead. """ raise_option_error(parser, option=option, msg=msg) # Otherwise, --no-use-pep517 was passed via the command-line. parser.values.use_pep517 = False
[ "def", "no_use_pep517_callback", "(", "option", ",", "opt", ",", "value", ",", "parser", ")", ":", "# Since --no-use-pep517 doesn't accept arguments, the value argument", "# will be None if --no-use-pep517 is passed via the command-line.", "# However, the value can be non-None if the option is triggered e.g.", "# by an environment variable, for example \"PIP_NO_USE_PEP517=true\".", "if", "value", "is", "not", "None", ":", "msg", "=", "\"\"\"A value was passed for --no-use-pep517,\n probably using either the PIP_NO_USE_PEP517 environment variable\n or the \"no-use-pep517\" config file option. Use an appropriate value\n of the PIP_USE_PEP517 environment variable or the \"use-pep517\"\n config file option instead.\n \"\"\"", "raise_option_error", "(", "parser", ",", "option", "=", "option", ",", "msg", "=", "msg", ")", "# Otherwise, --no-use-pep517 was passed via the command-line.", "parser", ".", "values", ".", "use_pep517", "=", "False" ]
46.904762
20.571429
def determine_end_point(http_request, url): """ returns detail, list or aggregates """ if url.endswith('aggregates') or url.endswith('aggregates/'): return 'aggregates' else: return 'detail' if is_detail_url(http_request, url) else 'list'
[ "def", "determine_end_point", "(", "http_request", ",", "url", ")", ":", "if", "url", ".", "endswith", "(", "'aggregates'", ")", "or", "url", ".", "endswith", "(", "'aggregates/'", ")", ":", "return", "'aggregates'", "else", ":", "return", "'detail'", "if", "is_detail_url", "(", "http_request", ",", "url", ")", "else", "'list'" ]
33.375
13.125
def calculate_leaf_paths(self): """Build map of reverse xrefs then traverse backwards marking path to leaf for all leaves. """ reverse_xref = {} leaves = set() for v in self.value.values(): if v.leaf: leaves.add(v) for xref in v.value_xref: reverse_xref.setdefault(xref, []).append(v.ident) for leaf in leaves: self.calculate_leaf_path(leaf, reverse_xref)
[ "def", "calculate_leaf_paths", "(", "self", ")", ":", "reverse_xref", "=", "{", "}", "leaves", "=", "set", "(", ")", "for", "v", "in", "self", ".", "value", ".", "values", "(", ")", ":", "if", "v", ".", "leaf", ":", "leaves", ".", "add", "(", "v", ")", "for", "xref", "in", "v", ".", "value_xref", ":", "reverse_xref", ".", "setdefault", "(", "xref", ",", "[", "]", ")", ".", "append", "(", "v", ".", "ident", ")", "for", "leaf", "in", "leaves", ":", "self", ".", "calculate_leaf_path", "(", "leaf", ",", "reverse_xref", ")" ]
38.333333
10.916667
def __parse_organizations(self, stream): """Parse organizations stream""" for aliases in self.__parse_stream(stream): # Parse identity identity = self.__parse_alias(aliases[1]) uuid = identity.email uid = self._identities.get(uuid, None) if not uid: uid = UniqueIdentity(uuid=uuid) identity.uuid = uuid uid.identities.append(identity) self._identities[uuid] = uid # Parse organization mailmap_id = aliases[0] name = self.__encode(mailmap_id[0]) if name in MAILMAP_NO_ORGS: continue org = Organization(name=name) self._organizations[name] = org enrollment = Enrollment(start=MIN_PERIOD_DATE, end=MAX_PERIOD_DATE, organization=org) uid.enrollments.append(enrollment)
[ "def", "__parse_organizations", "(", "self", ",", "stream", ")", ":", "for", "aliases", "in", "self", ".", "__parse_stream", "(", "stream", ")", ":", "# Parse identity", "identity", "=", "self", ".", "__parse_alias", "(", "aliases", "[", "1", "]", ")", "uuid", "=", "identity", ".", "email", "uid", "=", "self", ".", "_identities", ".", "get", "(", "uuid", ",", "None", ")", "if", "not", "uid", ":", "uid", "=", "UniqueIdentity", "(", "uuid", "=", "uuid", ")", "identity", ".", "uuid", "=", "uuid", "uid", ".", "identities", ".", "append", "(", "identity", ")", "self", ".", "_identities", "[", "uuid", "]", "=", "uid", "# Parse organization", "mailmap_id", "=", "aliases", "[", "0", "]", "name", "=", "self", ".", "__encode", "(", "mailmap_id", "[", "0", "]", ")", "if", "name", "in", "MAILMAP_NO_ORGS", ":", "continue", "org", "=", "Organization", "(", "name", "=", "name", ")", "self", ".", "_organizations", "[", "name", "]", "=", "org", "enrollment", "=", "Enrollment", "(", "start", "=", "MIN_PERIOD_DATE", ",", "end", "=", "MAX_PERIOD_DATE", ",", "organization", "=", "org", ")", "uid", ".", "enrollments", ".", "append", "(", "enrollment", ")" ]
32.103448
16.241379
def parse_plugin_metadata(content): """Parse summary metadata to a Python object. Arguments: content: The `content` field of a `SummaryMetadata` proto corresponding to the pr_curves plugin. Returns: A `PrCurvesPlugin` protobuf object. """ if not isinstance(content, bytes): raise TypeError('Content type must be bytes') result = plugin_data_pb2.PrCurvePluginData.FromString(content) if result.version == 0: return result else: logger.warn( 'Unknown metadata version: %s. The latest version known to ' 'this build of TensorBoard is %s; perhaps a newer build is ' 'available?', result.version, PROTO_VERSION) return result
[ "def", "parse_plugin_metadata", "(", "content", ")", ":", "if", "not", "isinstance", "(", "content", ",", "bytes", ")", ":", "raise", "TypeError", "(", "'Content type must be bytes'", ")", "result", "=", "plugin_data_pb2", ".", "PrCurvePluginData", ".", "FromString", "(", "content", ")", "if", "result", ".", "version", "==", "0", ":", "return", "result", "else", ":", "logger", ".", "warn", "(", "'Unknown metadata version: %s. The latest version known to '", "'this build of TensorBoard is %s; perhaps a newer build is '", "'available?'", ",", "result", ".", "version", ",", "PROTO_VERSION", ")", "return", "result" ]
32
18.666667
def project_gdf(gdf, to_crs=None, to_latlong=False): """ Project a GeoDataFrame to the UTM zone appropriate for its geometries' centroid. The simple calculation in this function works well for most latitudes, but won't work for some far northern locations like Svalbard and parts of far northern Norway. Parameters ---------- gdf : GeoDataFrame the gdf to be projected to_crs : dict if not None, just project to this CRS instead of to UTM to_latlong : bool if True, projects to latlong instead of to UTM Returns ------- GeoDataFrame """ assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.' start_time = time.time() # if gdf has no gdf_name attribute, create one now if not hasattr(gdf, 'gdf_name'): gdf.gdf_name = 'unnamed' # if to_crs was passed-in, use this value to project the gdf if to_crs is not None: projected_gdf = gdf.to_crs(to_crs) # if to_crs was not passed-in, calculate the centroid of the geometry to # determine UTM zone else: if to_latlong: # if to_latlong is True, project the gdf to latlong latlong_crs = settings.default_crs projected_gdf = gdf.to_crs(latlong_crs) log('Projected the GeoDataFrame "{}" to default_crs in {:,.2f} seconds'.format(gdf.gdf_name, time.time()-start_time)) else: # else, project the gdf to UTM # if GeoDataFrame is already in UTM, just return it if (gdf.crs is not None) and ('proj' in gdf.crs) and (gdf.crs['proj'] == 'utm'): return gdf # calculate the centroid of the union of all the geometries in the # GeoDataFrame avg_longitude = gdf['geometry'].unary_union.centroid.x # calculate the UTM zone from this avg longitude and define the UTM # CRS to project utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1) utm_crs = {'datum': 'WGS84', 'ellps': 'WGS84', 'proj' : 'utm', 'zone' : utm_zone, 'units': 'm'} # project the GeoDataFrame to the UTM CRS projected_gdf = gdf.to_crs(utm_crs) log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} seconds'.format(gdf.gdf_name, utm_zone, time.time()-start_time)) projected_gdf.gdf_name = gdf.gdf_name return projected_gdf
[ "def", "project_gdf", "(", "gdf", ",", "to_crs", "=", "None", ",", "to_latlong", "=", "False", ")", ":", "assert", "len", "(", "gdf", ")", ">", "0", ",", "'You cannot project an empty GeoDataFrame.'", "start_time", "=", "time", ".", "time", "(", ")", "# if gdf has no gdf_name attribute, create one now", "if", "not", "hasattr", "(", "gdf", ",", "'gdf_name'", ")", ":", "gdf", ".", "gdf_name", "=", "'unnamed'", "# if to_crs was passed-in, use this value to project the gdf", "if", "to_crs", "is", "not", "None", ":", "projected_gdf", "=", "gdf", ".", "to_crs", "(", "to_crs", ")", "# if to_crs was not passed-in, calculate the centroid of the geometry to", "# determine UTM zone", "else", ":", "if", "to_latlong", ":", "# if to_latlong is True, project the gdf to latlong", "latlong_crs", "=", "settings", ".", "default_crs", "projected_gdf", "=", "gdf", ".", "to_crs", "(", "latlong_crs", ")", "log", "(", "'Projected the GeoDataFrame \"{}\" to default_crs in {:,.2f} seconds'", ".", "format", "(", "gdf", ".", "gdf_name", ",", "time", ".", "time", "(", ")", "-", "start_time", ")", ")", "else", ":", "# else, project the gdf to UTM", "# if GeoDataFrame is already in UTM, just return it", "if", "(", "gdf", ".", "crs", "is", "not", "None", ")", "and", "(", "'proj'", "in", "gdf", ".", "crs", ")", "and", "(", "gdf", ".", "crs", "[", "'proj'", "]", "==", "'utm'", ")", ":", "return", "gdf", "# calculate the centroid of the union of all the geometries in the", "# GeoDataFrame", "avg_longitude", "=", "gdf", "[", "'geometry'", "]", ".", "unary_union", ".", "centroid", ".", "x", "# calculate the UTM zone from this avg longitude and define the UTM", "# CRS to project", "utm_zone", "=", "int", "(", "math", ".", "floor", "(", "(", "avg_longitude", "+", "180", ")", "/", "6.", ")", "+", "1", ")", "utm_crs", "=", "{", "'datum'", ":", "'WGS84'", ",", "'ellps'", ":", "'WGS84'", ",", "'proj'", ":", "'utm'", ",", "'zone'", ":", "utm_zone", ",", "'units'", ":", "'m'", "}", "# project the GeoDataFrame to the UTM CRS", "projected_gdf", "=", "gdf", ".", "to_crs", "(", "utm_crs", ")", "log", "(", "'Projected the GeoDataFrame \"{}\" to UTM-{} in {:,.2f} seconds'", ".", "format", "(", "gdf", ".", "gdf_name", ",", "utm_zone", ",", "time", ".", "time", "(", ")", "-", "start_time", ")", ")", "projected_gdf", ".", "gdf_name", "=", "gdf", ".", "gdf_name", "return", "projected_gdf" ]
37.106061
23.621212
def init_dict(data, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if columns is not None: from pandas.core.series import Series arrays = Series(data, index=columns, dtype=object) data_names = arrays.index missing = arrays.isnull() if index is None: # GH10856 # raise ValueError if only scalars in dict index = extract_index(arrays[~missing]) else: index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): if dtype is None or np.issubdtype(dtype, np.flexible): # GH#1783 nan_dtype = object else: nan_dtype = dtype val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) arrays.loc[missing] = [val] * missing.sum() else: keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) # GH#24096 need copy to be deep for datetime64tz case # TODO: See if we can avoid these copies arrays = [data[k] if not is_datetime64tz_dtype(data[k]) else data[k].copy(deep=True) for k in keys] return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
[ "def", "init_dict", "(", "data", ",", "index", ",", "columns", ",", "dtype", "=", "None", ")", ":", "if", "columns", "is", "not", "None", ":", "from", "pandas", ".", "core", ".", "series", "import", "Series", "arrays", "=", "Series", "(", "data", ",", "index", "=", "columns", ",", "dtype", "=", "object", ")", "data_names", "=", "arrays", ".", "index", "missing", "=", "arrays", ".", "isnull", "(", ")", "if", "index", "is", "None", ":", "# GH10856", "# raise ValueError if only scalars in dict", "index", "=", "extract_index", "(", "arrays", "[", "~", "missing", "]", ")", "else", ":", "index", "=", "ensure_index", "(", "index", ")", "# no obvious \"empty\" int column", "if", "missing", ".", "any", "(", ")", "and", "not", "is_integer_dtype", "(", "dtype", ")", ":", "if", "dtype", "is", "None", "or", "np", ".", "issubdtype", "(", "dtype", ",", "np", ".", "flexible", ")", ":", "# GH#1783", "nan_dtype", "=", "object", "else", ":", "nan_dtype", "=", "dtype", "val", "=", "construct_1d_arraylike_from_scalar", "(", "np", ".", "nan", ",", "len", "(", "index", ")", ",", "nan_dtype", ")", "arrays", ".", "loc", "[", "missing", "]", "=", "[", "val", "]", "*", "missing", ".", "sum", "(", ")", "else", ":", "keys", "=", "com", ".", "dict_keys_to_ordered_list", "(", "data", ")", "columns", "=", "data_names", "=", "Index", "(", "keys", ")", "# GH#24096 need copy to be deep for datetime64tz case", "# TODO: See if we can avoid these copies", "arrays", "=", "[", "data", "[", "k", "]", "if", "not", "is_datetime64tz_dtype", "(", "data", "[", "k", "]", ")", "else", "data", "[", "k", "]", ".", "copy", "(", "deep", "=", "True", ")", "for", "k", "in", "keys", "]", "return", "arrays_to_mgr", "(", "arrays", ",", "data_names", ",", "index", ",", "columns", ",", "dtype", "=", "dtype", ")" ]
38.810811
16.378378
def all(self, page=1, per_page=10): """ Get a single page from the list of all collections. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :return: [Array]: A single page of the Collection list. """ url = "/collections" result = self._all(url, page=page, per_page=per_page) return CollectionModel.parse_list(result)
[ "def", "all", "(", "self", ",", "page", "=", "1", ",", "per_page", "=", "10", ")", ":", "url", "=", "\"/collections\"", "result", "=", "self", ".", "_all", "(", "url", ",", "page", "=", "page", ",", "per_page", "=", "per_page", ")", "return", "CollectionModel", ".", "parse_list", "(", "result", ")" ]
43.545455
19.181818
def update(self, scaling_group, name=None, cooldown=None, min_entities=None, max_entities=None, metadata=None): """ Updates an existing ScalingGroup. One or more of the attributes can be specified. NOTE: if you specify metadata, it will *replace* any existing metadata. If you want to add to it, you either need to pass the complete dict of metadata, or call the update_metadata() method. """ return self._manager.update(scaling_group, name=name, cooldown=cooldown, min_entities=min_entities, max_entities=max_entities, metadata=metadata)
[ "def", "update", "(", "self", ",", "scaling_group", ",", "name", "=", "None", ",", "cooldown", "=", "None", ",", "min_entities", "=", "None", ",", "max_entities", "=", "None", ",", "metadata", "=", "None", ")", ":", "return", "self", ".", "_manager", ".", "update", "(", "scaling_group", ",", "name", "=", "name", ",", "cooldown", "=", "cooldown", ",", "min_entities", "=", "min_entities", ",", "max_entities", "=", "max_entities", ",", "metadata", "=", "metadata", ")" ]
48.846154
23.769231
def to_fixed(stype): """ Returns the instruction sequence for converting the given type stored in DE,HL to fixed DE,HL. """ output = [] # List of instructions if is_int_type(stype): output = to_word(stype) output.append('ex de, hl') output.append('ld hl, 0') # 'Truncate' the fixed point elif stype == 'f': output.append('call __FTOF16REG') REQUIRES.add('ftof16reg.asm') return output
[ "def", "to_fixed", "(", "stype", ")", ":", "output", "=", "[", "]", "# List of instructions", "if", "is_int_type", "(", "stype", ")", ":", "output", "=", "to_word", "(", "stype", ")", "output", ".", "append", "(", "'ex de, hl'", ")", "output", ".", "append", "(", "'ld hl, 0'", ")", "# 'Truncate' the fixed point", "elif", "stype", "==", "'f'", ":", "output", ".", "append", "(", "'call __FTOF16REG'", ")", "REQUIRES", ".", "add", "(", "'ftof16reg.asm'", ")", "return", "output" ]
29.466667
13.2
def stop(self): """Stop the current timer if there is one and cancel the async call.""" if self._timer: self._timer.stop() self._timer.deleteLater()
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "_timer", ":", "self", ".", "_timer", ".", "stop", "(", ")", "self", ".", "_timer", ".", "deleteLater", "(", ")" ]
36.8
11
def logout(): """ Log out the active user """ flogin.logout_user() next = flask.request.args.get('next') return flask.redirect(next or flask.url_for("user"))
[ "def", "logout", "(", ")", ":", "flogin", ".", "logout_user", "(", ")", "next", "=", "flask", ".", "request", ".", "args", ".", "get", "(", "'next'", ")", "return", "flask", ".", "redirect", "(", "next", "or", "flask", ".", "url_for", "(", "\"user\"", ")", ")" ]
28.666667
10
def get_qutip_module(required_version='3.2'): """ Attempts to return the qutip module, but silently returns ``None`` if it can't be imported, or doesn't have version at least ``required_version``. :param str required_version: Valid input to ``distutils.version.LooseVersion``. :return: The qutip module or ``None``. :rtype: ``module`` or ``NoneType`` """ try: import qutip as qt from distutils.version import LooseVersion _qt_version = LooseVersion(qt.version.version) if _qt_version < LooseVersion(required_version): return None except ImportError: return None return qt
[ "def", "get_qutip_module", "(", "required_version", "=", "'3.2'", ")", ":", "try", ":", "import", "qutip", "as", "qt", "from", "distutils", ".", "version", "import", "LooseVersion", "_qt_version", "=", "LooseVersion", "(", "qt", ".", "version", ".", "version", ")", "if", "_qt_version", "<", "LooseVersion", "(", "required_version", ")", ":", "return", "None", "except", "ImportError", ":", "return", "None", "return", "qt" ]
30
12.909091
def unitary(self, obj, qubits, label=None): """Apply u2 to q.""" if isinstance(qubits, QuantumRegister): qubits = qubits[:] return self.append(UnitaryGate(obj, label=label), qubits, [])
[ "def", "unitary", "(", "self", ",", "obj", ",", "qubits", ",", "label", "=", "None", ")", ":", "if", "isinstance", "(", "qubits", ",", "QuantumRegister", ")", ":", "qubits", "=", "qubits", "[", ":", "]", "return", "self", ".", "append", "(", "UnitaryGate", "(", "obj", ",", "label", "=", "label", ")", ",", "qubits", ",", "[", "]", ")" ]
40.2
9
def drawQuad(self, quad): """Draw a Quad. """ q = Quad(quad) return self.drawPolyline([q.ul, q.ll, q.lr, q.ur, q.ul])
[ "def", "drawQuad", "(", "self", ",", "quad", ")", ":", "q", "=", "Quad", "(", "quad", ")", "return", "self", ".", "drawPolyline", "(", "[", "q", ".", "ul", ",", "q", ".", "ll", ",", "q", ".", "lr", ",", "q", ".", "ur", ",", "q", ".", "ul", "]", ")" ]
29
11.4
def pulse(self): """ Calls when_rotated callback if detected changes """ new_b_value = self.gpio_b.is_active new_a_value = self.gpio_a.is_active value = self.table_values.value(new_b_value, new_a_value, self.old_b_value, self.old_a_value) self.old_b_value = new_b_value self.old_a_value = new_a_value if value != 0: self.when_rotated(value)
[ "def", "pulse", "(", "self", ")", ":", "new_b_value", "=", "self", ".", "gpio_b", ".", "is_active", "new_a_value", "=", "self", ".", "gpio_a", ".", "is_active", "value", "=", "self", ".", "table_values", ".", "value", "(", "new_b_value", ",", "new_a_value", ",", "self", ".", "old_b_value", ",", "self", ".", "old_a_value", ")", "self", ".", "old_b_value", "=", "new_b_value", "self", ".", "old_a_value", "=", "new_a_value", "if", "value", "!=", "0", ":", "self", ".", "when_rotated", "(", "value", ")" ]
29.571429
18
def pickle_load(path, compression=False): """Unpickle a possible compressed pickle. Parameters ---------- path: str path to the output file compression: bool if true assumes that pickle was compressed when created and attempts decompression. Returns ------- obj: object the unpickled object """ if compression: with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip: with myzip.open("data") as f: return pickle.load(f) else: with open(path, "rb") as f: return pickle.load(f)
[ "def", "pickle_load", "(", "path", ",", "compression", "=", "False", ")", ":", "if", "compression", ":", "with", "zipfile", ".", "ZipFile", "(", "path", ",", "\"r\"", ",", "compression", "=", "zipfile", ".", "ZIP_DEFLATED", ")", "as", "myzip", ":", "with", "myzip", ".", "open", "(", "\"data\"", ")", "as", "f", ":", "return", "pickle", ".", "load", "(", "f", ")", "else", ":", "with", "open", "(", "path", ",", "\"rb\"", ")", "as", "f", ":", "return", "pickle", ".", "load", "(", "f", ")" ]
26.043478
21.086957
def safe_mkdir_for_all(paths): """Make directories which would contain all of the passed paths. This avoids attempting to re-make the same directories, which may be noticeably expensive if many paths mostly fall in the same set of directories. :param list of str paths: The paths for which containing directories should be created. """ created_dirs = set() for path in paths: dir_to_make = os.path.dirname(path) if dir_to_make not in created_dirs: safe_mkdir(dir_to_make) created_dirs.add(dir_to_make)
[ "def", "safe_mkdir_for_all", "(", "paths", ")", ":", "created_dirs", "=", "set", "(", ")", "for", "path", "in", "paths", ":", "dir_to_make", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "if", "dir_to_make", "not", "in", "created_dirs", ":", "safe_mkdir", "(", "dir_to_make", ")", "created_dirs", ".", "add", "(", "dir_to_make", ")" ]
37.428571
18.928571
def draw_boundary_images(glf, glb, v, f, vpe, fpe, camera): """Assumes camera is set up correctly, and that glf has any texmapping on necessary.""" glf.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glb.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); # Figure out which edges are on pairs of differently visible triangles from opendr.geometry import TriNormals tn = TriNormals(v, f).r.reshape((-1,3)) campos = -cv2.Rodrigues(camera.rt.r)[0].T.dot(camera.t.r) rays_to_verts = v.reshape((-1,3)) - row(campos) rays_to_faces = rays_to_verts[f[:,0]] + rays_to_verts[f[:,1]] + rays_to_verts[f[:,2]] dps = np.sum(rays_to_faces * tn, axis=1) dps = dps[fpe[:,0]] * dps[fpe[:,1]] silhouette_edges = np.asarray(np.nonzero(dps<=0)[0], np.uint32) non_silhouette_edges = np.nonzero(dps>0)[0] lines_e = vpe[silhouette_edges] lines_v = v visibility = draw_edge_visibility(glb, lines_v, lines_e, f, hidden_wireframe=True) shape = visibility.shape visibility = visibility.ravel() visible = np.nonzero(visibility.ravel() != 4294967295)[0] visibility[visible] = silhouette_edges[visibility[visible]] result = visibility.reshape(shape) return result
[ "def", "draw_boundary_images", "(", "glf", ",", "glb", ",", "v", ",", "f", ",", "vpe", ",", "fpe", ",", "camera", ")", ":", "glf", ".", "Clear", "(", "GL_COLOR_BUFFER_BIT", "|", "GL_DEPTH_BUFFER_BIT", ")", "glb", ".", "Clear", "(", "GL_COLOR_BUFFER_BIT", "|", "GL_DEPTH_BUFFER_BIT", ")", "# Figure out which edges are on pairs of differently visible triangles", "from", "opendr", ".", "geometry", "import", "TriNormals", "tn", "=", "TriNormals", "(", "v", ",", "f", ")", ".", "r", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", "campos", "=", "-", "cv2", ".", "Rodrigues", "(", "camera", ".", "rt", ".", "r", ")", "[", "0", "]", ".", "T", ".", "dot", "(", "camera", ".", "t", ".", "r", ")", "rays_to_verts", "=", "v", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", "-", "row", "(", "campos", ")", "rays_to_faces", "=", "rays_to_verts", "[", "f", "[", ":", ",", "0", "]", "]", "+", "rays_to_verts", "[", "f", "[", ":", ",", "1", "]", "]", "+", "rays_to_verts", "[", "f", "[", ":", ",", "2", "]", "]", "dps", "=", "np", ".", "sum", "(", "rays_to_faces", "*", "tn", ",", "axis", "=", "1", ")", "dps", "=", "dps", "[", "fpe", "[", ":", ",", "0", "]", "]", "*", "dps", "[", "fpe", "[", ":", ",", "1", "]", "]", "silhouette_edges", "=", "np", ".", "asarray", "(", "np", ".", "nonzero", "(", "dps", "<=", "0", ")", "[", "0", "]", ",", "np", ".", "uint32", ")", "non_silhouette_edges", "=", "np", ".", "nonzero", "(", "dps", ">", "0", ")", "[", "0", "]", "lines_e", "=", "vpe", "[", "silhouette_edges", "]", "lines_v", "=", "v", "visibility", "=", "draw_edge_visibility", "(", "glb", ",", "lines_v", ",", "lines_e", ",", "f", ",", "hidden_wireframe", "=", "True", ")", "shape", "=", "visibility", ".", "shape", "visibility", "=", "visibility", ".", "ravel", "(", ")", "visible", "=", "np", ".", "nonzero", "(", "visibility", ".", "ravel", "(", ")", "!=", "4294967295", ")", "[", "0", "]", "visibility", "[", "visible", "]", "=", "silhouette_edges", "[", "visibility", "[", "visible", "]", "]", "result", "=", "visibility", ".", "reshape", "(", "shape", ")", "return", "result" ]
47.96
18.16
def submit_text(self, sr, title, text, follow=True): """Login required. POSTs a text submission. Returns :class:`things.Link` object if ``follow=True`` (default), or the string permalink of the new submission otherwise. Argument ``follow`` exists because reddit only returns the permalink after POSTing a submission. In order to get detailed info on the new submission, we need to make another request. If you don't want to make that additional request, set ``follow=False``. See https://github.com/reddit/reddit/wiki/API%3A-submit. URL: ``http://www.reddit.com/api/submit/`` :param sr: name of subreddit to submit to :param title: title of submission :param text: submission self text :param follow: set to ``True`` to follow retrieved permalink to return detailed :class:`things.Link` object. ``False`` to just return permalink. :type follow: bool """ return self._submit(sr, title, 'self', text=text, follow=follow)
[ "def", "submit_text", "(", "self", ",", "sr", ",", "title", ",", "text", ",", "follow", "=", "True", ")", ":", "return", "self", ".", "_submit", "(", "sr", ",", "title", ",", "'self'", ",", "text", "=", "text", ",", "follow", "=", "follow", ")" ]
64.5
35.5625
def get_resource_id(self): """Gets the ``resource _id`` for this authorization. return: (osid.id.Id) - the ``Resource Id`` raise: IllegalState - ``has_resource()`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.Resource.get_avatar_id_template if not bool(self._my_map['resourceId']): raise errors.IllegalState('this Authorization has no resource') else: return Id(self._my_map['resourceId'])
[ "def", "get_resource_id", "(", "self", ")", ":", "# Implemented from template for osid.resource.Resource.get_avatar_id_template", "if", "not", "bool", "(", "self", ".", "_my_map", "[", "'resourceId'", "]", ")", ":", "raise", "errors", ".", "IllegalState", "(", "'this Authorization has no resource'", ")", "else", ":", "return", "Id", "(", "self", ".", "_my_map", "[", "'resourceId'", "]", ")" ]
42
21.307692
def render_to_terminal(self, array, cursor_pos=(0, 0)): """Renders array to terminal, returns the number of lines scrolled offscreen Returns: Number of times scrolled Args: array (FSArray): Grid of styled characters to be rendered. If array received is of width too small, render it anyway if array received is of width too large, render it anyway if array received is of height too small, render it anyway if array received is of height too large, render it, scroll down, and render the rest of it, then return how much we scrolled down """ for_stdout = self.fmtstr_to_stdout_xform() # caching of write and tc (avoiding the self. lookups etc) made # no significant performance difference here if not self.hide_cursor: self.write(self.t.hide_cursor) # TODO race condition here? height, width = self.t.height, self.t.width if (height != self._last_rendered_height or width != self._last_rendered_width): self.on_terminal_size_change(height, width) current_lines_by_row = {} rows_for_use = list(range(self.top_usable_row, height)) # rows which we have content for and don't require scrolling # TODO rename shared shared = min(len(array), len(rows_for_use)) for row, line in zip(rows_for_use[:shared], array[:shared]): current_lines_by_row[row] = line if line == self._last_lines_by_row.get(row, None): continue self.write(self.t.move(row, 0)) self.write(for_stdout(line)) if len(line) < width: self.write(self.t.clear_eol) # rows already on screen that we don't have content for rest_of_lines = array[shared:] rest_of_rows = rows_for_use[shared:] for row in rest_of_rows: # if array too small if self._last_lines_by_row and row not in self._last_lines_by_row: continue self.write(self.t.move(row, 0)) self.write(self.t.clear_eol) # TODO probably not necessary - is first char cleared? self.write(self.t.clear_bol) current_lines_by_row[row] = None # lines for which we need to scroll down to render offscreen_scrolls = 0 for line in rest_of_lines: # if array too big self.scroll_down() if self.top_usable_row > 0: self.top_usable_row -= 1 else: offscreen_scrolls += 1 current_lines_by_row = dict( (k - 1, v) for k, v in current_lines_by_row.items() ) logger.debug('new top_usable_row: %d' % self.top_usable_row) # since scrolling moves the cursor self.write(self.t.move(height - 1, 0)) self.write(for_stdout(line)) current_lines_by_row[height - 1] = line logger.debug( 'lines in last lines by row: %r' % self._last_lines_by_row.keys() ) logger.debug( 'lines in current lines by row: %r' % current_lines_by_row.keys() ) self._last_cursor_row = max( 0, cursor_pos[0] - offscreen_scrolls + self.top_usable_row ) self._last_cursor_column = cursor_pos[1] self.write( self.t.move(self._last_cursor_row, self._last_cursor_column) ) self._last_lines_by_row = current_lines_by_row if not self.hide_cursor: self.write(self.t.normal_cursor) return offscreen_scrolls
[ "def", "render_to_terminal", "(", "self", ",", "array", ",", "cursor_pos", "=", "(", "0", ",", "0", ")", ")", ":", "for_stdout", "=", "self", ".", "fmtstr_to_stdout_xform", "(", ")", "# caching of write and tc (avoiding the self. lookups etc) made", "# no significant performance difference here", "if", "not", "self", ".", "hide_cursor", ":", "self", ".", "write", "(", "self", ".", "t", ".", "hide_cursor", ")", "# TODO race condition here?", "height", ",", "width", "=", "self", ".", "t", ".", "height", ",", "self", ".", "t", ".", "width", "if", "(", "height", "!=", "self", ".", "_last_rendered_height", "or", "width", "!=", "self", ".", "_last_rendered_width", ")", ":", "self", ".", "on_terminal_size_change", "(", "height", ",", "width", ")", "current_lines_by_row", "=", "{", "}", "rows_for_use", "=", "list", "(", "range", "(", "self", ".", "top_usable_row", ",", "height", ")", ")", "# rows which we have content for and don't require scrolling", "# TODO rename shared", "shared", "=", "min", "(", "len", "(", "array", ")", ",", "len", "(", "rows_for_use", ")", ")", "for", "row", ",", "line", "in", "zip", "(", "rows_for_use", "[", ":", "shared", "]", ",", "array", "[", ":", "shared", "]", ")", ":", "current_lines_by_row", "[", "row", "]", "=", "line", "if", "line", "==", "self", ".", "_last_lines_by_row", ".", "get", "(", "row", ",", "None", ")", ":", "continue", "self", ".", "write", "(", "self", ".", "t", ".", "move", "(", "row", ",", "0", ")", ")", "self", ".", "write", "(", "for_stdout", "(", "line", ")", ")", "if", "len", "(", "line", ")", "<", "width", ":", "self", ".", "write", "(", "self", ".", "t", ".", "clear_eol", ")", "# rows already on screen that we don't have content for", "rest_of_lines", "=", "array", "[", "shared", ":", "]", "rest_of_rows", "=", "rows_for_use", "[", "shared", ":", "]", "for", "row", "in", "rest_of_rows", ":", "# if array too small", "if", "self", ".", "_last_lines_by_row", "and", "row", "not", "in", "self", ".", "_last_lines_by_row", ":", "continue", "self", ".", "write", "(", "self", ".", "t", ".", "move", "(", "row", ",", "0", ")", ")", "self", ".", "write", "(", "self", ".", "t", ".", "clear_eol", ")", "# TODO probably not necessary - is first char cleared?", "self", ".", "write", "(", "self", ".", "t", ".", "clear_bol", ")", "current_lines_by_row", "[", "row", "]", "=", "None", "# lines for which we need to scroll down to render", "offscreen_scrolls", "=", "0", "for", "line", "in", "rest_of_lines", ":", "# if array too big", "self", ".", "scroll_down", "(", ")", "if", "self", ".", "top_usable_row", ">", "0", ":", "self", ".", "top_usable_row", "-=", "1", "else", ":", "offscreen_scrolls", "+=", "1", "current_lines_by_row", "=", "dict", "(", "(", "k", "-", "1", ",", "v", ")", "for", "k", ",", "v", "in", "current_lines_by_row", ".", "items", "(", ")", ")", "logger", ".", "debug", "(", "'new top_usable_row: %d'", "%", "self", ".", "top_usable_row", ")", "# since scrolling moves the cursor", "self", ".", "write", "(", "self", ".", "t", ".", "move", "(", "height", "-", "1", ",", "0", ")", ")", "self", ".", "write", "(", "for_stdout", "(", "line", ")", ")", "current_lines_by_row", "[", "height", "-", "1", "]", "=", "line", "logger", ".", "debug", "(", "'lines in last lines by row: %r'", "%", "self", ".", "_last_lines_by_row", ".", "keys", "(", ")", ")", "logger", ".", "debug", "(", "'lines in current lines by row: %r'", "%", "current_lines_by_row", ".", "keys", "(", ")", ")", "self", ".", "_last_cursor_row", "=", "max", "(", "0", ",", "cursor_pos", "[", "0", "]", "-", "offscreen_scrolls", "+", "self", ".", "top_usable_row", ")", "self", ".", "_last_cursor_column", "=", "cursor_pos", "[", "1", "]", "self", ".", "write", "(", "self", ".", "t", ".", "move", "(", "self", ".", "_last_cursor_row", ",", "self", ".", "_last_cursor_column", ")", ")", "self", ".", "_last_lines_by_row", "=", "current_lines_by_row", "if", "not", "self", ".", "hide_cursor", ":", "self", ".", "write", "(", "self", ".", "t", ".", "normal_cursor", ")", "return", "offscreen_scrolls" ]
39.054348
19
def normalized_flux_to_mag(lcdict, columns=('sap.sap_flux', 'sap.sap_flux_err', 'sap.sap_bkg', 'sap.sap_bkg_err', 'pdc.pdcsap_flux', 'pdc.pdcsap_flux_err')): '''This converts the normalized fluxes in the TESS lcdicts to TESS mags. Uses the object's TESS mag stored in lcdict['objectinfo']['tessmag']:: mag - object_tess_mag = -2.5 log (flux/median_flux) Parameters ---------- lcdict : lcdict An `lcdict` produced by `read_tess_fitslc` or `consolidate_tess_fitslc`. This must have normalized fluxes in its measurement columns (use the `normalize` kwarg for these functions). columns : sequence of str The column keys of the normalized flux and background measurements in the `lcdict` to operate on and convert to magnitudes in TESS band (T). Returns ------- lcdict The returned `lcdict` will contain extra columns corresponding to magnitudes for each input normalized flux/background column. ''' tess_mag = lcdict['objectinfo']['tessmag'] for key in columns: k1, k2 = key.split('.') if 'err' not in k2: lcdict[k1][k2.replace('flux','mag')] = ( tess_mag - 2.5*np.log10(lcdict[k1][k2]) ) else: lcdict[k1][k2.replace('flux','mag')] = ( - 2.5*np.log10(1.0 - lcdict[k1][k2]) ) return lcdict
[ "def", "normalized_flux_to_mag", "(", "lcdict", ",", "columns", "=", "(", "'sap.sap_flux'", ",", "'sap.sap_flux_err'", ",", "'sap.sap_bkg'", ",", "'sap.sap_bkg_err'", ",", "'pdc.pdcsap_flux'", ",", "'pdc.pdcsap_flux_err'", ")", ")", ":", "tess_mag", "=", "lcdict", "[", "'objectinfo'", "]", "[", "'tessmag'", "]", "for", "key", "in", "columns", ":", "k1", ",", "k2", "=", "key", ".", "split", "(", "'.'", ")", "if", "'err'", "not", "in", "k2", ":", "lcdict", "[", "k1", "]", "[", "k2", ".", "replace", "(", "'flux'", ",", "'mag'", ")", "]", "=", "(", "tess_mag", "-", "2.5", "*", "np", ".", "log10", "(", "lcdict", "[", "k1", "]", "[", "k2", "]", ")", ")", "else", ":", "lcdict", "[", "k1", "]", "[", "k2", ".", "replace", "(", "'flux'", ",", "'mag'", ")", "]", "=", "(", "-", "2.5", "*", "np", ".", "log10", "(", "1.0", "-", "lcdict", "[", "k1", "]", "[", "k2", "]", ")", ")", "return", "lcdict" ]
29.716981
27.226415
def health_check(self): """Uses head object to make sure the file exists in S3.""" logger.debug('Health Check on S3 file for: {namespace}'.format( namespace=self.namespace )) try: self.client.head_object(Bucket=self.bucket_name, Key=self.data_file) return True except ClientError as e: logger.debug('Error encountered with S3. Assume unhealthy')
[ "def", "health_check", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Health Check on S3 file for: {namespace}'", ".", "format", "(", "namespace", "=", "self", ".", "namespace", ")", ")", "try", ":", "self", ".", "client", ".", "head_object", "(", "Bucket", "=", "self", ".", "bucket_name", ",", "Key", "=", "self", ".", "data_file", ")", "return", "True", "except", "ClientError", "as", "e", ":", "logger", ".", "debug", "(", "'Error encountered with S3. Assume unhealthy'", ")" ]
38.636364
22.454545
def create_snapshot(self, name): """ :: POST /:login/machines/:id/snapshots :param name: identifier for snapshot :type name: :py:class:`basestring` :rtype: :py:class:`smartdc.machine.Snapshot` Create a snapshot for this machine's current state with the given `name`. """ params = {'name': name} j, _ = self.datacenter.request('POST', self.path + '/snapshots', data=params) return Snapshot(machine=self, data=j, name=name)
[ "def", "create_snapshot", "(", "self", ",", "name", ")", ":", "params", "=", "{", "'name'", ":", "name", "}", "j", ",", "_", "=", "self", ".", "datacenter", ".", "request", "(", "'POST'", ",", "self", ".", "path", "+", "'/snapshots'", ",", "data", "=", "params", ")", "return", "Snapshot", "(", "machine", "=", "self", ",", "data", "=", "j", ",", "name", "=", "name", ")" ]
30.777778
18
def _generate_idx(n, seed, n_train, n_test): """Generate train, test indices for a length-n array. Parameters ---------- n : int The length of the array seed : int Seed for a RandomState n_train, n_test : int, 0 < n_train, n_test < n Number of samples to use for the train or test index. Notes ----- """ idx = check_random_state(seed).permutation(n) ind_test = idx[:n_test] ind_train = idx[n_test : n_train + n_test] return ind_train, ind_test
[ "def", "_generate_idx", "(", "n", ",", "seed", ",", "n_train", ",", "n_test", ")", ":", "idx", "=", "check_random_state", "(", "seed", ")", ".", "permutation", "(", "n", ")", "ind_test", "=", "idx", "[", ":", "n_test", "]", "ind_train", "=", "idx", "[", "n_test", ":", "n_train", "+", "n_test", "]", "return", "ind_train", ",", "ind_test" ]
24.285714
18.571429
def _get_master_schema(version): # type: (Hashable) -> bytes """ Loads the master schema of given version as bytes. :param version: The version of the master schema whose path we wish to retrieve. :raises SchemaError: When the schema version is unknown. This usually means that either (a) clkhash is out of date, or (b) the schema version listed is incorrect. :return: Bytes of the schema. """ try: file_name = MASTER_SCHEMA_FILE_NAMES[version] except (TypeError, KeyError) as e: msg = ('Schema version {} is not supported. ' 'Consider updating clkhash.').format(version) raise_from(SchemaError(msg), e) try: schema_bytes = pkgutil.get_data('clkhash', 'schemas/{}'.format(file_name)) except IOError as e: # In Python 3 we can be more specific with # FileNotFoundError, but that doesn't exist in # Python 2. msg = ('The master schema could not be found. The schema cannot be ' 'validated. Please file a bug report.') raise_from(MasterSchemaError(msg), e) if schema_bytes is None: msg = ('The master schema could not be loaded. The schema cannot be ' 'validated. Please file a bug report.') raise MasterSchemaError(msg) return schema_bytes
[ "def", "_get_master_schema", "(", "version", ")", ":", "# type: (Hashable) -> bytes", "try", ":", "file_name", "=", "MASTER_SCHEMA_FILE_NAMES", "[", "version", "]", "except", "(", "TypeError", ",", "KeyError", ")", "as", "e", ":", "msg", "=", "(", "'Schema version {} is not supported. '", "'Consider updating clkhash.'", ")", ".", "format", "(", "version", ")", "raise_from", "(", "SchemaError", "(", "msg", ")", ",", "e", ")", "try", ":", "schema_bytes", "=", "pkgutil", ".", "get_data", "(", "'clkhash'", ",", "'schemas/{}'", ".", "format", "(", "file_name", ")", ")", "except", "IOError", "as", "e", ":", "# In Python 3 we can be more specific with", "# FileNotFoundError, but that doesn't exist in", "# Python 2.", "msg", "=", "(", "'The master schema could not be found. The schema cannot be '", "'validated. Please file a bug report.'", ")", "raise_from", "(", "MasterSchemaError", "(", "msg", ")", ",", "e", ")", "if", "schema_bytes", "is", "None", ":", "msg", "=", "(", "'The master schema could not be loaded. The schema cannot be '", "'validated. Please file a bug report.'", ")", "raise", "MasterSchemaError", "(", "msg", ")", "return", "schema_bytes" ]
40.333333
19.69697
def Cleanup(self): """ Cleanup Timed out connections Loop though all the connections and test if still active. If inactive close socket. @author: Nick Verbeck @since: 2/20/2009 """ self.lock.acquire() try: for bucket in self.connections.values(): try: for conn in bucket: conn.lock() try: open = conn.TestConnection(forceCheck=True) if open is True: conn.commit() else: #Remove the connection from the pool. Its dead better of recreating it. index = bucket.index(conn) del bucket[index] conn.release() except Exception: conn.release() except Exception: pass finally: self.lock.release()
[ "def", "Cleanup", "(", "self", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "try", ":", "for", "bucket", "in", "self", ".", "connections", ".", "values", "(", ")", ":", "try", ":", "for", "conn", "in", "bucket", ":", "conn", ".", "lock", "(", ")", "try", ":", "open", "=", "conn", ".", "TestConnection", "(", "forceCheck", "=", "True", ")", "if", "open", "is", "True", ":", "conn", ".", "commit", "(", ")", "else", ":", "#Remove the connection from the pool. Its dead better of recreating it.", "index", "=", "bucket", ".", "index", "(", "conn", ")", "del", "bucket", "[", "index", "]", "conn", ".", "release", "(", ")", "except", "Exception", ":", "conn", ".", "release", "(", ")", "except", "Exception", ":", "pass", "finally", ":", "self", ".", "lock", ".", "release", "(", ")" ]
23
21.133333
def in_(self, *values): ''' A query to check if this query field is one of the values in ``values``. Produces a MongoDB ``$in`` expression. ''' return QueryExpression({ self : { '$in' : [self.get_type().wrap_value(value) for value in values] } })
[ "def", "in_", "(", "self", ",", "*", "values", ")", ":", "return", "QueryExpression", "(", "{", "self", ":", "{", "'$in'", ":", "[", "self", ".", "get_type", "(", ")", ".", "wrap_value", "(", "value", ")", "for", "value", "in", "values", "]", "}", "}", ")" ]
42.428571
26.428571
def groupByNode(requestContext, seriesList, nodeNum, callback): """ Takes a serieslist and maps a callback to subgroups within as defined by a common node. Example:: &target=groupByNode(ganglia.by-function.*.*.cpu.load5,2,"sumSeries") Would return multiple series which are each the result of applying the "sumSeries" function to groups joined on the second node (0 indexed) resulting in a list of targets like:: sumSeries(ganglia.by-function.server1.*.cpu.load5), sumSeries(ganglia.by-function.server2.*.cpu.load5),... """ return groupByNodes(requestContext, seriesList, callback, nodeNum)
[ "def", "groupByNode", "(", "requestContext", ",", "seriesList", ",", "nodeNum", ",", "callback", ")", ":", "return", "groupByNodes", "(", "requestContext", ",", "seriesList", ",", "callback", ",", "nodeNum", ")" ]
35.444444
27
def prioSort(elements): """Sort a list of elements that have priority attributes""" # Randomize the services before sorting so that equal priority # elements are load-balanced. random.shuffle(elements) sorted_elems = sorted(elements, key=getPriority) return sorted_elems
[ "def", "prioSort", "(", "elements", ")", ":", "# Randomize the services before sorting so that equal priority", "# elements are load-balanced.", "random", ".", "shuffle", "(", "elements", ")", "sorted_elems", "=", "sorted", "(", "elements", ",", "key", "=", "getPriority", ")", "return", "sorted_elems" ]
36
16.375
def _process_list(self, l): """ Processes a list of widget names. If any name is between `` then it is supposed to be a regex. """ if hasattr(self, l): t = getattr(self, l) def proc(inp): w = inp.strip() if w.startswith('`'): r = re.compile(w[1:-1]) return [u for u in [m.group() for m in [r.match(x) for x in dir(self)] if m] if isinstance(getattr(self, u), QObject)] else: return [w] return list(set([y for x in map(proc, t.split(',')) for y in x])) return []
[ "def", "_process_list", "(", "self", ",", "l", ")", ":", "if", "hasattr", "(", "self", ",", "l", ")", ":", "t", "=", "getattr", "(", "self", ",", "l", ")", "def", "proc", "(", "inp", ")", ":", "w", "=", "inp", ".", "strip", "(", ")", "if", "w", ".", "startswith", "(", "'`'", ")", ":", "r", "=", "re", ".", "compile", "(", "w", "[", "1", ":", "-", "1", "]", ")", "return", "[", "u", "for", "u", "in", "[", "m", ".", "group", "(", ")", "for", "m", "in", "[", "r", ".", "match", "(", "x", ")", "for", "x", "in", "dir", "(", "self", ")", "]", "if", "m", "]", "if", "isinstance", "(", "getattr", "(", "self", ",", "u", ")", ",", "QObject", ")", "]", "else", ":", "return", "[", "w", "]", "return", "list", "(", "set", "(", "[", "y", "for", "x", "in", "map", "(", "proc", ",", "t", ".", "split", "(", "','", ")", ")", "for", "y", "in", "x", "]", ")", ")", "return", "[", "]" ]
30.380952
22.761905
def append_all_below(destination, source, join_str=None): """ Compared to xml.dom.minidom, lxml's treatment of text as .text and .tail attributes of elements is an oddity. It can even be a little frustrating when one is attempting to copy everything underneath some element to another element; one has to write in extra code to handle the text. This method provides the functionality of adding everything underneath the source element, in preserved order, to the destination element. """ if join_str is None: join_str = ' ' if source.text is not None: # If source has text if len(destination) == 0: # Destination has no children if destination.text is None: # Destination has no text destination.text = source.text else: # Destination has a text destination.text = join_str.join([destination.text, source.text]) else: # Destination has children #Select last child last = destination[-1] if last.tail is None: # Last child has no tail last.tail = source.text else: # Last child has a tail last.tail = join_str.join([last.tail, source.text]) for each_child in source: destination.append(deepcopy(each_child))
[ "def", "append_all_below", "(", "destination", ",", "source", ",", "join_str", "=", "None", ")", ":", "if", "join_str", "is", "None", ":", "join_str", "=", "' '", "if", "source", ".", "text", "is", "not", "None", ":", "# If source has text", "if", "len", "(", "destination", ")", "==", "0", ":", "# Destination has no children", "if", "destination", ".", "text", "is", "None", ":", "# Destination has no text", "destination", ".", "text", "=", "source", ".", "text", "else", ":", "# Destination has a text", "destination", ".", "text", "=", "join_str", ".", "join", "(", "[", "destination", ".", "text", ",", "source", ".", "text", "]", ")", "else", ":", "# Destination has children", "#Select last child", "last", "=", "destination", "[", "-", "1", "]", "if", "last", ".", "tail", "is", "None", ":", "# Last child has no tail", "last", ".", "tail", "=", "source", ".", "text", "else", ":", "# Last child has a tail", "last", ".", "tail", "=", "join_str", ".", "join", "(", "[", "last", ".", "tail", ",", "source", ".", "text", "]", ")", "for", "each_child", "in", "source", ":", "destination", ".", "append", "(", "deepcopy", "(", "each_child", ")", ")" ]
50
17.307692
def create(path,archiveList,xFilesFactor=None,aggregationMethod=None,sparse=False,useFallocate=False): """create(path,archiveList,xFilesFactor=0.5,aggregationMethod='average') path is a string archiveList is a list of archives, each of which is of the form (secondsPerPoint,numberOfPoints) xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur aggregationMethod specifies the function to use when propagating data (see ``whisper.aggregationMethods``) """ # Set default params if xFilesFactor is None: xFilesFactor = 0.5 if aggregationMethod is None: aggregationMethod = 'average' #Validate archive configurations... validateArchiveList(archiveList) #Looks good, now we create the file and write the header if os.path.exists(path): raise InvalidConfiguration("File %s already exists!" % path) fh = None try: fh = open(path,'wb') if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX ) aggregationType = struct.pack( longFormat, aggregationMethodToType.get(aggregationMethod, 1) ) oldest = max([secondsPerPoint * points for secondsPerPoint,points in archiveList]) maxRetention = struct.pack( longFormat, oldest ) xFilesFactor = struct.pack( floatFormat, float(xFilesFactor) ) archiveCount = struct.pack(longFormat, len(archiveList)) packedMetadata = aggregationType + maxRetention + xFilesFactor + archiveCount fh.write(packedMetadata) headerSize = metadataSize + (archiveInfoSize * len(archiveList)) archiveOffsetPointer = headerSize for secondsPerPoint,points in archiveList: archiveInfo = struct.pack(archiveInfoFormat, archiveOffsetPointer, secondsPerPoint, points) fh.write(archiveInfo) archiveOffsetPointer += (points * pointSize) #If configured to use fallocate and capable of fallocate use that, else #attempt sparse if configure or zero pre-allocate if sparse isn't configured. if CAN_FALLOCATE and useFallocate: remaining = archiveOffsetPointer - headerSize fallocate(fh, headerSize, remaining) elif sparse: fh.seek(archiveOffsetPointer - 1) fh.write('\x00') else: remaining = archiveOffsetPointer - headerSize chunksize = 16384 zeroes = b'\x00' * chunksize while remaining > chunksize: fh.write(zeroes) remaining -= chunksize fh.write(zeroes[:remaining]) if AUTOFLUSH: fh.flush() os.fsync(fh.fileno()) finally: if fh: fh.close()
[ "def", "create", "(", "path", ",", "archiveList", ",", "xFilesFactor", "=", "None", ",", "aggregationMethod", "=", "None", ",", "sparse", "=", "False", ",", "useFallocate", "=", "False", ")", ":", "# Set default params", "if", "xFilesFactor", "is", "None", ":", "xFilesFactor", "=", "0.5", "if", "aggregationMethod", "is", "None", ":", "aggregationMethod", "=", "'average'", "#Validate archive configurations...", "validateArchiveList", "(", "archiveList", ")", "#Looks good, now we create the file and write the header", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "InvalidConfiguration", "(", "\"File %s already exists!\"", "%", "path", ")", "fh", "=", "None", "try", ":", "fh", "=", "open", "(", "path", ",", "'wb'", ")", "if", "LOCK", ":", "fcntl", ".", "flock", "(", "fh", ".", "fileno", "(", ")", ",", "fcntl", ".", "LOCK_EX", ")", "aggregationType", "=", "struct", ".", "pack", "(", "longFormat", ",", "aggregationMethodToType", ".", "get", "(", "aggregationMethod", ",", "1", ")", ")", "oldest", "=", "max", "(", "[", "secondsPerPoint", "*", "points", "for", "secondsPerPoint", ",", "points", "in", "archiveList", "]", ")", "maxRetention", "=", "struct", ".", "pack", "(", "longFormat", ",", "oldest", ")", "xFilesFactor", "=", "struct", ".", "pack", "(", "floatFormat", ",", "float", "(", "xFilesFactor", ")", ")", "archiveCount", "=", "struct", ".", "pack", "(", "longFormat", ",", "len", "(", "archiveList", ")", ")", "packedMetadata", "=", "aggregationType", "+", "maxRetention", "+", "xFilesFactor", "+", "archiveCount", "fh", ".", "write", "(", "packedMetadata", ")", "headerSize", "=", "metadataSize", "+", "(", "archiveInfoSize", "*", "len", "(", "archiveList", ")", ")", "archiveOffsetPointer", "=", "headerSize", "for", "secondsPerPoint", ",", "points", "in", "archiveList", ":", "archiveInfo", "=", "struct", ".", "pack", "(", "archiveInfoFormat", ",", "archiveOffsetPointer", ",", "secondsPerPoint", ",", "points", ")", "fh", ".", "write", "(", "archiveInfo", ")", "archiveOffsetPointer", "+=", "(", "points", "*", "pointSize", ")", "#If configured to use fallocate and capable of fallocate use that, else", "#attempt sparse if configure or zero pre-allocate if sparse isn't configured.", "if", "CAN_FALLOCATE", "and", "useFallocate", ":", "remaining", "=", "archiveOffsetPointer", "-", "headerSize", "fallocate", "(", "fh", ",", "headerSize", ",", "remaining", ")", "elif", "sparse", ":", "fh", ".", "seek", "(", "archiveOffsetPointer", "-", "1", ")", "fh", ".", "write", "(", "'\\x00'", ")", "else", ":", "remaining", "=", "archiveOffsetPointer", "-", "headerSize", "chunksize", "=", "16384", "zeroes", "=", "b'\\x00'", "*", "chunksize", "while", "remaining", ">", "chunksize", ":", "fh", ".", "write", "(", "zeroes", ")", "remaining", "-=", "chunksize", "fh", ".", "write", "(", "zeroes", "[", ":", "remaining", "]", ")", "if", "AUTOFLUSH", ":", "fh", ".", "flush", "(", ")", "os", ".", "fsync", "(", "fh", ".", "fileno", "(", ")", ")", "finally", ":", "if", "fh", ":", "fh", ".", "close", "(", ")" ]
38.6875
24.015625
def call(self, my_args=None): """ publish the message in the topic :param my_args: dict like {msg: 'msg'} :return: nothing """ LOGGER.debug("zeromq.Publisher.call") if my_args is None: raise exceptions.ArianeConfError("publisher call arguments") if 'topic' not in my_args or my_args['topic'] is None or not my_args['topic']: raise exceptions.ArianeConfError("publisher topic") if 'msg' not in my_args or my_args['msg'] is None or not my_args['msg']: raise exceptions.ArianeConfError("publisher call msg") self.zmqsocket.send_string("%s %s" % (my_args['topic'], my_args['msg']))
[ "def", "call", "(", "self", ",", "my_args", "=", "None", ")", ":", "LOGGER", ".", "debug", "(", "\"zeromq.Publisher.call\"", ")", "if", "my_args", "is", "None", ":", "raise", "exceptions", ".", "ArianeConfError", "(", "\"publisher call arguments\"", ")", "if", "'topic'", "not", "in", "my_args", "or", "my_args", "[", "'topic'", "]", "is", "None", "or", "not", "my_args", "[", "'topic'", "]", ":", "raise", "exceptions", ".", "ArianeConfError", "(", "\"publisher topic\"", ")", "if", "'msg'", "not", "in", "my_args", "or", "my_args", "[", "'msg'", "]", "is", "None", "or", "not", "my_args", "[", "'msg'", "]", ":", "raise", "exceptions", ".", "ArianeConfError", "(", "\"publisher call msg\"", ")", "self", ".", "zmqsocket", ".", "send_string", "(", "\"%s %s\"", "%", "(", "my_args", "[", "'topic'", "]", ",", "my_args", "[", "'msg'", "]", ")", ")" ]
48.571429
18.428571
def size(self, destination): """ Size of the queue for specified destination. @param destination: The queue destination (e.g. /queue/foo) @type destination: C{str} @return: The number of frames in specified queue. @rtype: C{int} """ if not destination in self.queue_metadata: return 0 else: return len(self.queue_metadata[destination]['frames'])
[ "def", "size", "(", "self", ",", "destination", ")", ":", "if", "not", "destination", "in", "self", ".", "queue_metadata", ":", "return", "0", "else", ":", "return", "len", "(", "self", ".", "queue_metadata", "[", "destination", "]", "[", "'frames'", "]", ")" ]
30.714286
18.285714
def update(self, item): """ Add a collector item. Args: item (CollectorUpdate): event data like stage, timestampe and status. """ if item.matrix not in self.data: self.data[item.matrix] = [] result = Select(self.data[item.matrix]).where( lambda entry: entry.stage == item.stage).build() if len(result) > 0: stage = result[0] stage.status = item.status stage.add(item.timestamp, item.information) else: stage = CollectorStage(stage=item.stage, status=item.status) stage.add(item.timestamp, item.information) self.data[item.matrix].append(stage)
[ "def", "update", "(", "self", ",", "item", ")", ":", "if", "item", ".", "matrix", "not", "in", "self", ".", "data", ":", "self", ".", "data", "[", "item", ".", "matrix", "]", "=", "[", "]", "result", "=", "Select", "(", "self", ".", "data", "[", "item", ".", "matrix", "]", ")", ".", "where", "(", "lambda", "entry", ":", "entry", ".", "stage", "==", "item", ".", "stage", ")", ".", "build", "(", ")", "if", "len", "(", "result", ")", ">", "0", ":", "stage", "=", "result", "[", "0", "]", "stage", ".", "status", "=", "item", ".", "status", "stage", ".", "add", "(", "item", ".", "timestamp", ",", "item", ".", "information", ")", "else", ":", "stage", "=", "CollectorStage", "(", "stage", "=", "item", ".", "stage", ",", "status", "=", "item", ".", "status", ")", "stage", ".", "add", "(", "item", ".", "timestamp", ",", "item", ".", "information", ")", "self", ".", "data", "[", "item", ".", "matrix", "]", ".", "append", "(", "stage", ")" ]
33.238095
17.809524
def scopus_url(self): """URL to the abstract page on Scopus.""" scopus_url = self.coredata.find('link[@rel="scopus"]', ns) try: return scopus_url.get('href') except AttributeError: # scopus_url is None return None
[ "def", "scopus_url", "(", "self", ")", ":", "scopus_url", "=", "self", ".", "coredata", ".", "find", "(", "'link[@rel=\"scopus\"]'", ",", "ns", ")", "try", ":", "return", "scopus_url", ".", "get", "(", "'href'", ")", "except", "AttributeError", ":", "# scopus_url is None", "return", "None" ]
37.714286
14.714286
def bind(self, format, *args): """ Bind a socket to a formatted endpoint. For tcp:// endpoints, supports ephemeral ports, if you specify the port number as "*". By default zsock uses the IANA designated range from C000 (49152) to FFFF (65535). To override this range, follow the "*" with "[first-last]". Either or both first and last may be empty. To bind to a random port within the range, use "!" in place of "*". Examples: tcp://127.0.0.1:* bind to first free port from C000 up tcp://127.0.0.1:! bind to random port from C000 to FFFF tcp://127.0.0.1:*[60000-] bind to first free port from 60000 up tcp://127.0.0.1:![-60000] bind to random port from C000 to 60000 tcp://127.0.0.1:![55000-55999] bind to random port from 55000 to 55999 On success, returns the actual port number used, for tcp:// endpoints, and 0 for other transports. On failure, returns -1. Note that when using ephemeral ports, a port may be reused by different services without clients being aware. Protocols that run on ephemeral ports should take this into account. """ return lib.zsock_bind(self._as_parameter_, format, *args)
[ "def", "bind", "(", "self", ",", "format", ",", "*", "args", ")", ":", "return", "lib", ".", "zsock_bind", "(", "self", ".", "_as_parameter_", ",", "format", ",", "*", "args", ")" ]
49.458333
25.041667
def _atexit_verify() -> None: """Warns if the libtcod root console is implicitly deleted.""" if lib.TCOD_ctx.root: warnings.warn( "The libtcod root console was implicitly deleted.\n" "Make sure the 'with' statement is used with the root console to" " ensure that it closes properly.", ResourceWarning, stacklevel=2, ) lib.TCOD_console_delete(ffi.NULL)
[ "def", "_atexit_verify", "(", ")", "->", "None", ":", "if", "lib", ".", "TCOD_ctx", ".", "root", ":", "warnings", ".", "warn", "(", "\"The libtcod root console was implicitly deleted.\\n\"", "\"Make sure the 'with' statement is used with the root console to\"", "\" ensure that it closes properly.\"", ",", "ResourceWarning", ",", "stacklevel", "=", "2", ",", ")", "lib", ".", "TCOD_console_delete", "(", "ffi", ".", "NULL", ")" ]
39.363636
15.545455
def _geolocation_extract(response): """ Mimics the exception handling logic in ``client._get_body``, but for geolocation which uses a different response format. """ body = response.json() if response.status_code in (200, 404): return body try: error = body["error"]["errors"][0]["reason"] except KeyError: error = None if response.status_code == 403: raise exceptions._OverQueryLimit(response.status_code, error) else: raise exceptions.ApiError(response.status_code, error)
[ "def", "_geolocation_extract", "(", "response", ")", ":", "body", "=", "response", ".", "json", "(", ")", "if", "response", ".", "status_code", "in", "(", "200", ",", "404", ")", ":", "return", "body", "try", ":", "error", "=", "body", "[", "\"error\"", "]", "[", "\"errors\"", "]", "[", "0", "]", "[", "\"reason\"", "]", "except", "KeyError", ":", "error", "=", "None", "if", "response", ".", "status_code", "==", "403", ":", "raise", "exceptions", ".", "_OverQueryLimit", "(", "response", ".", "status_code", ",", "error", ")", "else", ":", "raise", "exceptions", ".", "ApiError", "(", "response", ".", "status_code", ",", "error", ")" ]
29.888889
18.888889
def check_throttles(self, request): """ Check if request should be throttled. Raises an appropriate exception if the request is throttled. """ for throttle in self.get_throttles(): if not throttle.allow_request(request, self): self.throttled(request, throttle.wait())
[ "def", "check_throttles", "(", "self", ",", "request", ")", ":", "for", "throttle", "in", "self", ".", "get_throttles", "(", ")", ":", "if", "not", "throttle", ".", "allow_request", "(", "request", ",", "self", ")", ":", "self", ".", "throttled", "(", "request", ",", "throttle", ".", "wait", "(", ")", ")" ]
41
9.5
def active_network_addresses(hypervisor): """Query libvirt for the already reserved addresses.""" active = [] for network in hypervisor.listNetworks(): try: xml = hypervisor.networkLookupByName(network).XMLDesc(0) except libvirt.libvirtError: # network has been destroyed meanwhile continue else: ip_element = etree.fromstring(xml).find('.//ip') address = ip_element.get('address') netmask = ip_element.get('netmask') active.append(ipaddress.IPv4Network(u'/'.join((address, netmask)), strict=False)) return active
[ "def", "active_network_addresses", "(", "hypervisor", ")", ":", "active", "=", "[", "]", "for", "network", "in", "hypervisor", ".", "listNetworks", "(", ")", ":", "try", ":", "xml", "=", "hypervisor", ".", "networkLookupByName", "(", "network", ")", ".", "XMLDesc", "(", "0", ")", "except", "libvirt", ".", "libvirtError", ":", "# network has been destroyed meanwhile", "continue", "else", ":", "ip_element", "=", "etree", ".", "fromstring", "(", "xml", ")", ".", "find", "(", "'.//ip'", ")", "address", "=", "ip_element", ".", "get", "(", "'address'", ")", "netmask", "=", "ip_element", ".", "get", "(", "'netmask'", ")", "active", ".", "append", "(", "ipaddress", ".", "IPv4Network", "(", "u'/'", ".", "join", "(", "(", "address", ",", "netmask", ")", ")", ",", "strict", "=", "False", ")", ")", "return", "active" ]
36.666667
22.611111
def _jcols(self, *cols): """Return a JVM Seq of Columns from a list of Column or column names If `cols` has only one list in it, cols[0] will be used as the list. """ if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] return self._jseq(cols, _to_java_column)
[ "def", "_jcols", "(", "self", ",", "*", "cols", ")", ":", "if", "len", "(", "cols", ")", "==", "1", "and", "isinstance", "(", "cols", "[", "0", "]", ",", "list", ")", ":", "cols", "=", "cols", "[", "0", "]", "return", "self", ".", "_jseq", "(", "cols", ",", "_to_java_column", ")" ]
39.625
16.25
def _cmd_run(cmd, as_json=False): ''' Ensure that the Pki module is loaded, and convert to and extract data from Json as needed. ''' cmd_full = ['Import-Module -Name PKI; '] if as_json: cmd_full.append(r'ConvertTo-Json -Compress -Depth 4 -InputObject ' r'@({0})'.format(cmd)) else: cmd_full.append(cmd) cmd_ret = __salt__['cmd.run_all']( six.text_type().join(cmd_full), shell='powershell', python_shell=True) if cmd_ret['retcode'] != 0: _LOG.error('Unable to execute command: %s\nError: %s', cmd, cmd_ret['stderr']) if as_json: try: items = salt.utils.json.loads(cmd_ret['stdout'], strict=False) return items except ValueError: _LOG.error('Unable to parse return data as Json.') return cmd_ret['stdout']
[ "def", "_cmd_run", "(", "cmd", ",", "as_json", "=", "False", ")", ":", "cmd_full", "=", "[", "'Import-Module -Name PKI; '", "]", "if", "as_json", ":", "cmd_full", ".", "append", "(", "r'ConvertTo-Json -Compress -Depth 4 -InputObject '", "r'@({0})'", ".", "format", "(", "cmd", ")", ")", "else", ":", "cmd_full", ".", "append", "(", "cmd", ")", "cmd_ret", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "six", ".", "text_type", "(", ")", ".", "join", "(", "cmd_full", ")", ",", "shell", "=", "'powershell'", ",", "python_shell", "=", "True", ")", "if", "cmd_ret", "[", "'retcode'", "]", "!=", "0", ":", "_LOG", ".", "error", "(", "'Unable to execute command: %s\\nError: %s'", ",", "cmd", ",", "cmd_ret", "[", "'stderr'", "]", ")", "if", "as_json", ":", "try", ":", "items", "=", "salt", ".", "utils", ".", "json", ".", "loads", "(", "cmd_ret", "[", "'stdout'", "]", ",", "strict", "=", "False", ")", "return", "items", "except", "ValueError", ":", "_LOG", ".", "error", "(", "'Unable to parse return data as Json.'", ")", "return", "cmd_ret", "[", "'stdout'", "]" ]
31.555556
23.481481
def resolve_job_references(io_hash, job_outputs, should_resolve=True): ''' :param io_hash: an input or output hash in which to resolve any job-based object references possible :type io_hash: dict :param job_outputs: a mapping of finished local jobs to their output hashes :type job_outputs: dict :param should_resolve: whether it is an error if a job-based object reference in *io_hash* cannot be resolved yet :type should_resolve: boolean Modifies *io_hash* in-place. ''' q = [] for field in io_hash: if is_job_ref(io_hash[field]): io_hash[field] = resolve_job_ref(io_hash[field], job_outputs, should_resolve) elif isinstance(io_hash[field], list) or isinstance(io_hash[field], dict): q.append(io_hash[field]) while len(q) > 0: thing = q.pop() if isinstance(thing, list): for i in range(len(thing)): if is_job_ref(thing[i]): thing[i] = resolve_job_ref(thing[i], job_outputs, should_resolve) elif isinstance(thing[i], list) or isinstance(thing[i], dict): q.append(thing[i]) else: for field in thing: if is_job_ref(thing[field]): thing[field] = resolve_job_ref(thing[field], job_outputs, should_resolve) elif isinstance(thing[field], list) or isinstance(thing[field], dict): q.append(thing[field])
[ "def", "resolve_job_references", "(", "io_hash", ",", "job_outputs", ",", "should_resolve", "=", "True", ")", ":", "q", "=", "[", "]", "for", "field", "in", "io_hash", ":", "if", "is_job_ref", "(", "io_hash", "[", "field", "]", ")", ":", "io_hash", "[", "field", "]", "=", "resolve_job_ref", "(", "io_hash", "[", "field", "]", ",", "job_outputs", ",", "should_resolve", ")", "elif", "isinstance", "(", "io_hash", "[", "field", "]", ",", "list", ")", "or", "isinstance", "(", "io_hash", "[", "field", "]", ",", "dict", ")", ":", "q", ".", "append", "(", "io_hash", "[", "field", "]", ")", "while", "len", "(", "q", ")", ">", "0", ":", "thing", "=", "q", ".", "pop", "(", ")", "if", "isinstance", "(", "thing", ",", "list", ")", ":", "for", "i", "in", "range", "(", "len", "(", "thing", ")", ")", ":", "if", "is_job_ref", "(", "thing", "[", "i", "]", ")", ":", "thing", "[", "i", "]", "=", "resolve_job_ref", "(", "thing", "[", "i", "]", ",", "job_outputs", ",", "should_resolve", ")", "elif", "isinstance", "(", "thing", "[", "i", "]", ",", "list", ")", "or", "isinstance", "(", "thing", "[", "i", "]", ",", "dict", ")", ":", "q", ".", "append", "(", "thing", "[", "i", "]", ")", "else", ":", "for", "field", "in", "thing", ":", "if", "is_job_ref", "(", "thing", "[", "field", "]", ")", ":", "thing", "[", "field", "]", "=", "resolve_job_ref", "(", "thing", "[", "field", "]", ",", "job_outputs", ",", "should_resolve", ")", "elif", "isinstance", "(", "thing", "[", "field", "]", ",", "list", ")", "or", "isinstance", "(", "thing", "[", "field", "]", ",", "dict", ")", ":", "q", ".", "append", "(", "thing", "[", "field", "]", ")" ]
43.848485
25.787879
def sam_parse_reply(line): """parse a reply line into a dict""" parts = line.split(' ') opts = {k: v for (k, v) in split_kv(parts[2:])} return SAMReply(parts[0], opts)
[ "def", "sam_parse_reply", "(", "line", ")", ":", "parts", "=", "line", ".", "split", "(", "' '", ")", "opts", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "split_kv", "(", "parts", "[", "2", ":", "]", ")", "}", "return", "SAMReply", "(", "parts", "[", "0", "]", ",", "opts", ")" ]
35.8
8.6
def bgmagenta(cls, string, auto=False): """Color-code entire string. :param str string: String to colorize. :param bool auto: Enable auto-color (dark/light terminal). :return: Class instance for colorized string. :rtype: Color """ return cls.colorize('bgmagenta', string, auto=auto)
[ "def", "bgmagenta", "(", "cls", ",", "string", ",", "auto", "=", "False", ")", ":", "return", "cls", ".", "colorize", "(", "'bgmagenta'", ",", "string", ",", "auto", "=", "auto", ")" ]
33.1
16.4
def _parse_command_response(response): """Parse an SCI command response into ElementTree XML This is a helper method that takes a Requests Response object of an SCI command response and will parse it into an ElementTree Element representing the root of the XML response. :param response: The requests response object :return: An ElementTree Element that is the root of the response XML :raises ResponseParseError: If the response XML is not well formed """ try: root = ET.fromstring(response.text) except ET.ParseError: raise ResponseParseError( "Unexpected response format, could not parse XML. Response: {}".format(response.text)) return root
[ "def", "_parse_command_response", "(", "response", ")", ":", "try", ":", "root", "=", "ET", ".", "fromstring", "(", "response", ".", "text", ")", "except", "ET", ".", "ParseError", ":", "raise", "ResponseParseError", "(", "\"Unexpected response format, could not parse XML. Response: {}\"", ".", "format", "(", "response", ".", "text", ")", ")", "return", "root" ]
39
22.222222
def schedule(self): """Initiate distribution of the test collection. Initiate scheduling of the items across the nodes. If this gets called again later it behaves the same as calling ``._reschedule()`` on all nodes so that newly added nodes will start to be used. If ``.collection_is_completed`` is True, this is called by the hook: - ``DSession.worker_collectionfinish``. """ assert self.collection_is_completed # Initial distribution already happened, reschedule on all nodes if self.collection is not None: for node in self.nodes: self._reschedule(node) return # Check that all nodes collected the same tests if not self._check_nodes_have_same_collection(): self.log("**Different tests collected, aborting run**") return # Collections are identical, create the final list of items self.collection = list(next(iter(self.registered_collections.values()))) if not self.collection: return # Determine chunks of work (scopes) for nodeid in self.collection: scope = self._split_scope(nodeid) work_unit = self.workqueue.setdefault(scope, default=OrderedDict()) work_unit[nodeid] = False # Avoid having more workers than work extra_nodes = len(self.nodes) - len(self.workqueue) if extra_nodes > 0: self.log("Shuting down {0} nodes".format(extra_nodes)) for _ in range(extra_nodes): unused_node, assigned = self.assigned_work.popitem(last=True) self.log("Shuting down unused node {0}".format(unused_node)) unused_node.shutdown() # Assign initial workload for node in self.nodes: self._assign_work_unit(node) # Ensure nodes start with at least two work units if possible (#277) for node in self.nodes: self._reschedule(node) # Initial distribution sent all tests, start node shutdown if not self.workqueue: for node in self.nodes: node.shutdown()
[ "def", "schedule", "(", "self", ")", ":", "assert", "self", ".", "collection_is_completed", "# Initial distribution already happened, reschedule on all nodes", "if", "self", ".", "collection", "is", "not", "None", ":", "for", "node", "in", "self", ".", "nodes", ":", "self", ".", "_reschedule", "(", "node", ")", "return", "# Check that all nodes collected the same tests", "if", "not", "self", ".", "_check_nodes_have_same_collection", "(", ")", ":", "self", ".", "log", "(", "\"**Different tests collected, aborting run**\"", ")", "return", "# Collections are identical, create the final list of items", "self", ".", "collection", "=", "list", "(", "next", "(", "iter", "(", "self", ".", "registered_collections", ".", "values", "(", ")", ")", ")", ")", "if", "not", "self", ".", "collection", ":", "return", "# Determine chunks of work (scopes)", "for", "nodeid", "in", "self", ".", "collection", ":", "scope", "=", "self", ".", "_split_scope", "(", "nodeid", ")", "work_unit", "=", "self", ".", "workqueue", ".", "setdefault", "(", "scope", ",", "default", "=", "OrderedDict", "(", ")", ")", "work_unit", "[", "nodeid", "]", "=", "False", "# Avoid having more workers than work", "extra_nodes", "=", "len", "(", "self", ".", "nodes", ")", "-", "len", "(", "self", ".", "workqueue", ")", "if", "extra_nodes", ">", "0", ":", "self", ".", "log", "(", "\"Shuting down {0} nodes\"", ".", "format", "(", "extra_nodes", ")", ")", "for", "_", "in", "range", "(", "extra_nodes", ")", ":", "unused_node", ",", "assigned", "=", "self", ".", "assigned_work", ".", "popitem", "(", "last", "=", "True", ")", "self", ".", "log", "(", "\"Shuting down unused node {0}\"", ".", "format", "(", "unused_node", ")", ")", "unused_node", ".", "shutdown", "(", ")", "# Assign initial workload", "for", "node", "in", "self", ".", "nodes", ":", "self", ".", "_assign_work_unit", "(", "node", ")", "# Ensure nodes start with at least two work units if possible (#277)", "for", "node", "in", "self", ".", "nodes", ":", "self", ".", "_reschedule", "(", "node", ")", "# Initial distribution sent all tests, start node shutdown", "if", "not", "self", ".", "workqueue", ":", "for", "node", "in", "self", ".", "nodes", ":", "node", ".", "shutdown", "(", ")" ]
36.271186
21.542373
def ac3(space): """ AC-3 algorithm. This reduces the domains of the variables by propagating constraints to ensure arc consistency. :param Space space: The space to reduce """ #determine arcs arcs = {} for name in space.variables: arcs[name] = set([]) for const in space.constraints: for vname1,vname2 in product(const.vnames,const.vnames): if vname1 != vname2: #this is pessimistic, we assume that each constraint #pairwisely couples all variables it affects arcs[vname1].add(vname2) #enforce node consistency for vname in space.variables: for const in space.constraints: _unary(space,const,vname) #assemble work list worklist = set([]) for v1 in space.variables: for v2 in space.variables: for const in space.constraints: if _binary(space,const,v1,v2): for name in arcs[v1]: worklist.add((v1,name)) #work through work list while worklist: v1,v2 = worklist.pop() for const in space.constraints: if _binary(space,const,v1,v2): for vname in arcs[v1]: worklist.add((v1,vname))
[ "def", "ac3", "(", "space", ")", ":", "#determine arcs", "arcs", "=", "{", "}", "for", "name", "in", "space", ".", "variables", ":", "arcs", "[", "name", "]", "=", "set", "(", "[", "]", ")", "for", "const", "in", "space", ".", "constraints", ":", "for", "vname1", ",", "vname2", "in", "product", "(", "const", ".", "vnames", ",", "const", ".", "vnames", ")", ":", "if", "vname1", "!=", "vname2", ":", "#this is pessimistic, we assume that each constraint", "#pairwisely couples all variables it affects", "arcs", "[", "vname1", "]", ".", "add", "(", "vname2", ")", "#enforce node consistency", "for", "vname", "in", "space", ".", "variables", ":", "for", "const", "in", "space", ".", "constraints", ":", "_unary", "(", "space", ",", "const", ",", "vname", ")", "#assemble work list", "worklist", "=", "set", "(", "[", "]", ")", "for", "v1", "in", "space", ".", "variables", ":", "for", "v2", "in", "space", ".", "variables", ":", "for", "const", "in", "space", ".", "constraints", ":", "if", "_binary", "(", "space", ",", "const", ",", "v1", ",", "v2", ")", ":", "for", "name", "in", "arcs", "[", "v1", "]", ":", "worklist", ".", "add", "(", "(", "v1", ",", "name", ")", ")", "#work through work list", "while", "worklist", ":", "v1", ",", "v2", "=", "worklist", ".", "pop", "(", ")", "for", "const", "in", "space", ".", "constraints", ":", "if", "_binary", "(", "space", ",", "const", ",", "v1", ",", "v2", ")", ":", "for", "vname", "in", "arcs", "[", "v1", "]", ":", "worklist", ".", "add", "(", "(", "v1", ",", "vname", ")", ")" ]
31.897436
13.384615
def status(self, status): """Sets the status of this StoreCreditPayment. :param status: The status of this StoreCreditPayment. :type: str """ allowed_values = ["pending", "awaitingRetry", "successful", "failed"] if status is not None and status not in allowed_values: raise ValueError( "Invalid value for `status` ({0}), must be one of {1}" .format(status, allowed_values) ) self._status = status
[ "def", "status", "(", "self", ",", "status", ")", ":", "allowed_values", "=", "[", "\"pending\"", ",", "\"awaitingRetry\"", ",", "\"successful\"", ",", "\"failed\"", "]", "if", "status", "is", "not", "None", "and", "status", "not", "in", "allowed_values", ":", "raise", "ValueError", "(", "\"Invalid value for `status` ({0}), must be one of {1}\"", ".", "format", "(", "status", ",", "allowed_values", ")", ")", "self", ".", "_status", "=", "status" ]
33.133333
21.6
def find_service_references( self, clazz=None, ldap_filter=None, only_one=False ): """ Finds all services references matching the given filter. :param clazz: Class implemented by the service :param ldap_filter: Service filter :param only_one: Return the first matching service reference only :return: A list of found references, or None :raise BundleException: An error occurred looking for service references """ with self.__svc_lock: if clazz is None and ldap_filter is None: # Return a sorted copy of the keys list # Do not return None, as the whole content was required return sorted(self.__svc_registry.keys()) if hasattr(clazz, "__name__"): # Escape the type name clazz = ldapfilter.escape_LDAP(clazz.__name__) elif is_string(clazz): # Escape the class name clazz = ldapfilter.escape_LDAP(clazz) if clazz is None: # Directly use the given filter refs_set = sorted(self.__svc_registry.keys()) else: try: # Only for references with the given specification refs_set = iter(self.__svc_specs[clazz]) except KeyError: # No matching specification return None # Parse the filter try: new_filter = ldapfilter.get_ldap_filter(ldap_filter) except ValueError as ex: raise BundleException(ex) if new_filter is not None: # Prepare a generator, as we might not need a complete # walk-through refs_set = ( ref for ref in refs_set if new_filter.matches(ref.get_properties()) ) if only_one: # Return the first element in the list/generator try: return [next(refs_set)] except StopIteration: # No match return None # Get all the matching references return list(refs_set) or None
[ "def", "find_service_references", "(", "self", ",", "clazz", "=", "None", ",", "ldap_filter", "=", "None", ",", "only_one", "=", "False", ")", ":", "with", "self", ".", "__svc_lock", ":", "if", "clazz", "is", "None", "and", "ldap_filter", "is", "None", ":", "# Return a sorted copy of the keys list", "# Do not return None, as the whole content was required", "return", "sorted", "(", "self", ".", "__svc_registry", ".", "keys", "(", ")", ")", "if", "hasattr", "(", "clazz", ",", "\"__name__\"", ")", ":", "# Escape the type name", "clazz", "=", "ldapfilter", ".", "escape_LDAP", "(", "clazz", ".", "__name__", ")", "elif", "is_string", "(", "clazz", ")", ":", "# Escape the class name", "clazz", "=", "ldapfilter", ".", "escape_LDAP", "(", "clazz", ")", "if", "clazz", "is", "None", ":", "# Directly use the given filter", "refs_set", "=", "sorted", "(", "self", ".", "__svc_registry", ".", "keys", "(", ")", ")", "else", ":", "try", ":", "# Only for references with the given specification", "refs_set", "=", "iter", "(", "self", ".", "__svc_specs", "[", "clazz", "]", ")", "except", "KeyError", ":", "# No matching specification", "return", "None", "# Parse the filter", "try", ":", "new_filter", "=", "ldapfilter", ".", "get_ldap_filter", "(", "ldap_filter", ")", "except", "ValueError", "as", "ex", ":", "raise", "BundleException", "(", "ex", ")", "if", "new_filter", "is", "not", "None", ":", "# Prepare a generator, as we might not need a complete", "# walk-through", "refs_set", "=", "(", "ref", "for", "ref", "in", "refs_set", "if", "new_filter", ".", "matches", "(", "ref", ".", "get_properties", "(", ")", ")", ")", "if", "only_one", ":", "# Return the first element in the list/generator", "try", ":", "return", "[", "next", "(", "refs_set", ")", "]", "except", "StopIteration", ":", "# No match", "return", "None", "# Get all the matching references", "return", "list", "(", "refs_set", ")", "or", "None" ]
36.951613
16.532258
def _fix_callback_item(self, item): 'Update component identifier' item.component_id = self._fix_id(item.component_id) return item
[ "def", "_fix_callback_item", "(", "self", ",", "item", ")", ":", "item", ".", "component_id", "=", "self", ".", "_fix_id", "(", "item", ".", "component_id", ")", "return", "item" ]
37.5
12
def set_power_levels(self, room_id, content): """Perform PUT /rooms/$room_id/state/m.room.power_levels Note that any power levels which are not explicitly specified in the content arg are reset to default values. Args: room_id (str): The room ID content (dict): The JSON content to send. See example content below. Example:: api = MatrixHttpApi("http://example.com", token="foobar") api.set_power_levels("!exampleroom:example.com", { "ban": 50, # defaults to 50 if unspecified "events": { "m.room.name": 100, # must have PL 100 to change room name "m.room.power_levels": 100 # must have PL 100 to change PLs }, "events_default": 0, # defaults to 0 "invite": 50, # defaults to 50 "kick": 50, # defaults to 50 "redact": 50, # defaults to 50 "state_default": 50, # defaults to 50 if m.room.power_levels exists "users": { "@someguy:example.com": 100 # defaults to 0 }, "users_default": 0 # defaults to 0 } ) """ # Synapse returns M_UNKNOWN if body['events'] is omitted, # as of 2016-10-31 if "events" not in content: content["events"] = {} return self.send_state_event(room_id, "m.room.power_levels", content)
[ "def", "set_power_levels", "(", "self", ",", "room_id", ",", "content", ")", ":", "# Synapse returns M_UNKNOWN if body['events'] is omitted,", "# as of 2016-10-31", "if", "\"events\"", "not", "in", "content", ":", "content", "[", "\"events\"", "]", "=", "{", "}", "return", "self", ".", "send_state_event", "(", "room_id", ",", "\"m.room.power_levels\"", ",", "content", ")" ]
40.789474
22.184211
def create_git_release(self, tag, name, message, draft=False, prerelease=False, target_commitish=github.GithubObject.NotSet): """ :calls: `POST /repos/:owner/:repo/releases <http://developer.github.com/v3/repos/releases>`_ :param tag: string :param name: string :param message: string :param draft: bool :param prerelease: bool :param target_commitish: string or :class:`github.Branch.Branch` or :class:`github.Commit.Commit` or :class:`github.GitCommit.GitCommit` :rtype: :class:`github.GitRelease.GitRelease` """ assert isinstance(tag, (str, unicode)), tag assert isinstance(name, (str, unicode)), name assert isinstance(message, (str, unicode)), message assert isinstance(draft, bool), draft assert isinstance(prerelease, bool), prerelease assert target_commitish is github.GithubObject.NotSet or isinstance(target_commitish, (str, unicode, github.Branch.Branch, github.Commit.Commit, github.GitCommit.GitCommit)), target_commitish post_parameters = { "tag_name": tag, "name": name, "body": message, "draft": draft, "prerelease": prerelease, } if isinstance(target_commitish, (str, unicode)): post_parameters["target_commitish"] = target_commitish elif isinstance(target_commitish, github.Branch.Branch): post_parameters["target_commitish"] = target_commitish.name elif isinstance(target_commitish, (github.Commit.Commit, github.GitCommit.GitCommit)): post_parameters["target_commitish"] = target_commitish.sha headers, data = self._requester.requestJsonAndCheck( "POST", self.url + "/releases", input=post_parameters ) return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)
[ "def", "create_git_release", "(", "self", ",", "tag", ",", "name", ",", "message", ",", "draft", "=", "False", ",", "prerelease", "=", "False", ",", "target_commitish", "=", "github", ".", "GithubObject", ".", "NotSet", ")", ":", "assert", "isinstance", "(", "tag", ",", "(", "str", ",", "unicode", ")", ")", ",", "tag", "assert", "isinstance", "(", "name", ",", "(", "str", ",", "unicode", ")", ")", ",", "name", "assert", "isinstance", "(", "message", ",", "(", "str", ",", "unicode", ")", ")", ",", "message", "assert", "isinstance", "(", "draft", ",", "bool", ")", ",", "draft", "assert", "isinstance", "(", "prerelease", ",", "bool", ")", ",", "prerelease", "assert", "target_commitish", "is", "github", ".", "GithubObject", ".", "NotSet", "or", "isinstance", "(", "target_commitish", ",", "(", "str", ",", "unicode", ",", "github", ".", "Branch", ".", "Branch", ",", "github", ".", "Commit", ".", "Commit", ",", "github", ".", "GitCommit", ".", "GitCommit", ")", ")", ",", "target_commitish", "post_parameters", "=", "{", "\"tag_name\"", ":", "tag", ",", "\"name\"", ":", "name", ",", "\"body\"", ":", "message", ",", "\"draft\"", ":", "draft", ",", "\"prerelease\"", ":", "prerelease", ",", "}", "if", "isinstance", "(", "target_commitish", ",", "(", "str", ",", "unicode", ")", ")", ":", "post_parameters", "[", "\"target_commitish\"", "]", "=", "target_commitish", "elif", "isinstance", "(", "target_commitish", ",", "github", ".", "Branch", ".", "Branch", ")", ":", "post_parameters", "[", "\"target_commitish\"", "]", "=", "target_commitish", ".", "name", "elif", "isinstance", "(", "target_commitish", ",", "(", "github", ".", "Commit", ".", "Commit", ",", "github", ".", "GitCommit", ".", "GitCommit", ")", ")", ":", "post_parameters", "[", "\"target_commitish\"", "]", "=", "target_commitish", ".", "sha", "headers", ",", "data", "=", "self", ".", "_requester", ".", "requestJsonAndCheck", "(", "\"POST\"", ",", "self", ".", "url", "+", "\"/releases\"", ",", "input", "=", "post_parameters", ")", "return", "github", ".", "GitRelease", ".", "GitRelease", "(", "self", ".", "_requester", ",", "headers", ",", "data", ",", "completed", "=", "True", ")" ]
52.638889
26.638889
def queryset(self): """This function sets the queryset according to the keyword arguments. For the crosstype, the input value is the the display value of CROSS_TYPE. This is done because the spaces in HET vs HET are not recognized. Therefore the queryset must be matched exactly (ie by case so Intercross not intercross). The function also filters the strain by the strain_slug keyword argument. """ from mousedb.animal.models import CROSS_TYPE crosstype_reverse = dict((v, k) for k, v in CROSS_TYPE) try: crosstype = crosstype_reverse[self.kwargs['breeding_type']] except KeyError: raise Http404 strain = get_object_or_404(Strain, Strain_slug=self.kwargs['strain_slug']) if strain: return Animal.objects.filter(Strain=strain,Breeding__Crosstype=crosstype) else: raise Http404
[ "def", "queryset", "(", "self", ")", ":", "from", "mousedb", ".", "animal", ".", "models", "import", "CROSS_TYPE", "crosstype_reverse", "=", "dict", "(", "(", "v", ",", "k", ")", "for", "k", ",", "v", "in", "CROSS_TYPE", ")", "try", ":", "crosstype", "=", "crosstype_reverse", "[", "self", ".", "kwargs", "[", "'breeding_type'", "]", "]", "except", "KeyError", ":", "raise", "Http404", "strain", "=", "get_object_or_404", "(", "Strain", ",", "Strain_slug", "=", "self", ".", "kwargs", "[", "'strain_slug'", "]", ")", "if", "strain", ":", "return", "Animal", ".", "objects", ".", "filter", "(", "Strain", "=", "strain", ",", "Breeding__Crosstype", "=", "crosstype", ")", "else", ":", "raise", "Http404" ]
48.736842
26.789474
def get_last_traded_dt(self, asset, dt): """ Get the latest minute on or before ``dt`` in which ``asset`` traded. If there are no trades on or before ``dt``, returns ``pd.NaT``. Parameters ---------- asset : zipline.asset.Asset The asset for which to get the last traded minute. dt : pd.Timestamp The minute at which to start searching for the last traded minute. Returns ------- last_traded : pd.Timestamp The dt of the last trade for the given asset, using the input dt as a vantage point. """ rf = self._roll_finders[asset.roll_style] sid = (rf.get_contract_center(asset.root_symbol, dt, asset.offset)) if sid is None: return pd.NaT contract = rf.asset_finder.retrieve_asset(sid) return self._bar_reader.get_last_traded_dt(contract, dt)
[ "def", "get_last_traded_dt", "(", "self", ",", "asset", ",", "dt", ")", ":", "rf", "=", "self", ".", "_roll_finders", "[", "asset", ".", "roll_style", "]", "sid", "=", "(", "rf", ".", "get_contract_center", "(", "asset", ".", "root_symbol", ",", "dt", ",", "asset", ".", "offset", ")", ")", "if", "sid", "is", "None", ":", "return", "pd", ".", "NaT", "contract", "=", "rf", ".", "asset_finder", ".", "retrieve_asset", "(", "sid", ")", "return", "self", ".", "_bar_reader", ".", "get_last_traded_dt", "(", "contract", ",", "dt", ")" ]
36.296296
19.037037
def _create_refinement_wrapper(transformation, baseattr, base, target_attrname): """ applies refinement ``transformation`` to ``baseattr`` attribute of ``base``. ``baseattr`` can be any type of callable (function, method, functor) this method handles the differences. docstrings are also rescued from the original if the refinement has no docstring set. """ # first step: extract the original special_refinement_type=None instance_refinement = _is_class_instance(base) if instance_refinement: dictelem = base.__class__.__dict__.get(target_attrname, None) else: dictelem = base.__dict__.get(target_attrname, None) if isinstance(dictelem, staticmethod): special_refinement_type = 'staticmethod' original = _extract_staticmethod(dictelem) elif isinstance(dictelem, classmethod): special_refinement_type = 'classmethod' original = _extract_classmethod(dictelem) else: if instance_refinement: # methods need a delegator original = _delegate(baseattr) # TODO: evaluate this: # original = base.__class__.__dict__[target_attrname] else: # default handling original = baseattr # step two: call the refinement passing it the original # the result is the wrapper wrapper = transformation(original) # rescue docstring if not wrapper.__doc__: wrapper.__doc__ = baseattr.__doc__ # step three: make wrapper ready for injection if special_refinement_type == 'staticmethod': wrapper = staticmethod(wrapper) elif special_refinement_type == 'classmethod': wrapper = classmethod(wrapper) if instance_refinement: wrapper = wrapper.__get__(base, base.__class__) return wrapper
[ "def", "_create_refinement_wrapper", "(", "transformation", ",", "baseattr", ",", "base", ",", "target_attrname", ")", ":", "# first step: extract the original", "special_refinement_type", "=", "None", "instance_refinement", "=", "_is_class_instance", "(", "base", ")", "if", "instance_refinement", ":", "dictelem", "=", "base", ".", "__class__", ".", "__dict__", ".", "get", "(", "target_attrname", ",", "None", ")", "else", ":", "dictelem", "=", "base", ".", "__dict__", ".", "get", "(", "target_attrname", ",", "None", ")", "if", "isinstance", "(", "dictelem", ",", "staticmethod", ")", ":", "special_refinement_type", "=", "'staticmethod'", "original", "=", "_extract_staticmethod", "(", "dictelem", ")", "elif", "isinstance", "(", "dictelem", ",", "classmethod", ")", ":", "special_refinement_type", "=", "'classmethod'", "original", "=", "_extract_classmethod", "(", "dictelem", ")", "else", ":", "if", "instance_refinement", ":", "# methods need a delegator", "original", "=", "_delegate", "(", "baseattr", ")", "# TODO: evaluate this:", "# original = base.__class__.__dict__[target_attrname]", "else", ":", "# default handling", "original", "=", "baseattr", "# step two: call the refinement passing it the original", "# the result is the wrapper", "wrapper", "=", "transformation", "(", "original", ")", "# rescue docstring", "if", "not", "wrapper", ".", "__doc__", ":", "wrapper", ".", "__doc__", "=", "baseattr", ".", "__doc__", "# step three: make wrapper ready for injection", "if", "special_refinement_type", "==", "'staticmethod'", ":", "wrapper", "=", "staticmethod", "(", "wrapper", ")", "elif", "special_refinement_type", "==", "'classmethod'", ":", "wrapper", "=", "classmethod", "(", "wrapper", ")", "if", "instance_refinement", ":", "wrapper", "=", "wrapper", ".", "__get__", "(", "base", ",", "base", ".", "__class__", ")", "return", "wrapper" ]
38.254902
17.196078
def to_lal_ligotimegps(gps): """Convert the given GPS time to a `lal.LIGOTimeGPS` object Parameters ---------- gps : `~gwpy.time.LIGOTimeGPS`, `float`, `str` input GPS time, can be anything parsable by :meth:`~gwpy.time.to_gps` Returns ------- ligotimegps : `lal.LIGOTimeGPS` a SWIG-LAL `~lal.LIGOTimeGPS` representation of the given GPS time """ gps = to_gps(gps) return lal.LIGOTimeGPS(gps.gpsSeconds, gps.gpsNanoSeconds)
[ "def", "to_lal_ligotimegps", "(", "gps", ")", ":", "gps", "=", "to_gps", "(", "gps", ")", "return", "lal", ".", "LIGOTimeGPS", "(", "gps", ".", "gpsSeconds", ",", "gps", ".", "gpsNanoSeconds", ")" ]
31.133333
21.933333
async def AddRelation(self, endpoints): ''' endpoints : typing.Sequence[str] Returns -> typing.Mapping[str, ~CharmRelation] ''' # map input types to rpc msg _params = dict() msg = dict(type='Application', request='AddRelation', version=3, params=_params) _params['endpoints'] = endpoints reply = await self.rpc(msg) return reply
[ "async", "def", "AddRelation", "(", "self", ",", "endpoints", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'Application'", ",", "request", "=", "'AddRelation'", ",", "version", "=", "3", ",", "params", "=", "_params", ")", "_params", "[", "'endpoints'", "]", "=", "endpoints", "reply", "=", "await", "self", ".", "rpc", "(", "msg", ")", "return", "reply" ]
32.285714
9.857143
def make_release(self): """Build and distribute the package. """ directory = self.directory infoflags = self.infoflags branch = self.branch develop = self.develop scmtype = self.scm.name tempdir = abspath(tempfile.mkdtemp(prefix='mkrelease-')) try: if self.isremote: directory = join(tempdir, 'build') self.scm.clone_url(self.remoteurl, directory) else: directory = abspath(expanduser(directory)) self.scm.check_valid_sandbox(directory) if self.isremote: branch = self.scm.make_branchid(directory, branch) if branch: self.scm.switch_branch(directory, branch) if scmtype != 'svn': branch = self.scm.get_branch_from_sandbox(directory) print('Releasing branch', branch) self.setuptools.check_valid_package(directory) if not (self.skipcommit and self.skiptag): self.scm.check_dirty_sandbox(directory) self.scm.check_unclean_sandbox(directory) name, version = self.setuptools.get_package_info(directory, develop) if self.isremote: print('Releasing', name, version) if not self.skiptag: print('Tagging', name, version) tagid = self.scm.make_tagid(directory, version) self.scm.check_tag_exists(directory, tagid) self.scm.create_tag(directory, tagid, name, version, self.push) if self.manifest: scmtype = 'none' for distcmd, distflags in self.distributions: manifest = self.setuptools.run_egg_info( directory, infoflags, scmtype, self.quiet) distfile = self.setuptools.run_dist( directory, infoflags, distcmd, distflags, scmtype, self.quiet) for location in self.locations: if self.locations.is_server(location): if not self.get_skipregister(location): self.setuptools.run_register( directory, infoflags, location, scmtype, self.quiet) if not self.get_skipupload(): uploadflags = self.get_uploadflags(location) if '--sign' in uploadflags and isfile(distfile+'.asc'): os.remove(distfile+'.asc') self.setuptools.run_upload( directory, infoflags, distcmd, distflags, location, uploadflags, scmtype, self.quiet) else: if not self.skipupload: if self.locations.is_ssh_url(location): scheme, location = self.urlparser.to_ssh_url(location) self.scp.run_upload(scheme, distfile, location) else: self.scp.run_upload('scp', distfile, location) finally: shutil.rmtree(tempdir)
[ "def", "make_release", "(", "self", ")", ":", "directory", "=", "self", ".", "directory", "infoflags", "=", "self", ".", "infoflags", "branch", "=", "self", ".", "branch", "develop", "=", "self", ".", "develop", "scmtype", "=", "self", ".", "scm", ".", "name", "tempdir", "=", "abspath", "(", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "'mkrelease-'", ")", ")", "try", ":", "if", "self", ".", "isremote", ":", "directory", "=", "join", "(", "tempdir", ",", "'build'", ")", "self", ".", "scm", ".", "clone_url", "(", "self", ".", "remoteurl", ",", "directory", ")", "else", ":", "directory", "=", "abspath", "(", "expanduser", "(", "directory", ")", ")", "self", ".", "scm", ".", "check_valid_sandbox", "(", "directory", ")", "if", "self", ".", "isremote", ":", "branch", "=", "self", ".", "scm", ".", "make_branchid", "(", "directory", ",", "branch", ")", "if", "branch", ":", "self", ".", "scm", ".", "switch_branch", "(", "directory", ",", "branch", ")", "if", "scmtype", "!=", "'svn'", ":", "branch", "=", "self", ".", "scm", ".", "get_branch_from_sandbox", "(", "directory", ")", "print", "(", "'Releasing branch'", ",", "branch", ")", "self", ".", "setuptools", ".", "check_valid_package", "(", "directory", ")", "if", "not", "(", "self", ".", "skipcommit", "and", "self", ".", "skiptag", ")", ":", "self", ".", "scm", ".", "check_dirty_sandbox", "(", "directory", ")", "self", ".", "scm", ".", "check_unclean_sandbox", "(", "directory", ")", "name", ",", "version", "=", "self", ".", "setuptools", ".", "get_package_info", "(", "directory", ",", "develop", ")", "if", "self", ".", "isremote", ":", "print", "(", "'Releasing'", ",", "name", ",", "version", ")", "if", "not", "self", ".", "skiptag", ":", "print", "(", "'Tagging'", ",", "name", ",", "version", ")", "tagid", "=", "self", ".", "scm", ".", "make_tagid", "(", "directory", ",", "version", ")", "self", ".", "scm", ".", "check_tag_exists", "(", "directory", ",", "tagid", ")", "self", ".", "scm", ".", "create_tag", "(", "directory", ",", "tagid", ",", "name", ",", "version", ",", "self", ".", "push", ")", "if", "self", ".", "manifest", ":", "scmtype", "=", "'none'", "for", "distcmd", ",", "distflags", "in", "self", ".", "distributions", ":", "manifest", "=", "self", ".", "setuptools", ".", "run_egg_info", "(", "directory", ",", "infoflags", ",", "scmtype", ",", "self", ".", "quiet", ")", "distfile", "=", "self", ".", "setuptools", ".", "run_dist", "(", "directory", ",", "infoflags", ",", "distcmd", ",", "distflags", ",", "scmtype", ",", "self", ".", "quiet", ")", "for", "location", "in", "self", ".", "locations", ":", "if", "self", ".", "locations", ".", "is_server", "(", "location", ")", ":", "if", "not", "self", ".", "get_skipregister", "(", "location", ")", ":", "self", ".", "setuptools", ".", "run_register", "(", "directory", ",", "infoflags", ",", "location", ",", "scmtype", ",", "self", ".", "quiet", ")", "if", "not", "self", ".", "get_skipupload", "(", ")", ":", "uploadflags", "=", "self", ".", "get_uploadflags", "(", "location", ")", "if", "'--sign'", "in", "uploadflags", "and", "isfile", "(", "distfile", "+", "'.asc'", ")", ":", "os", ".", "remove", "(", "distfile", "+", "'.asc'", ")", "self", ".", "setuptools", ".", "run_upload", "(", "directory", ",", "infoflags", ",", "distcmd", ",", "distflags", ",", "location", ",", "uploadflags", ",", "scmtype", ",", "self", ".", "quiet", ")", "else", ":", "if", "not", "self", ".", "skipupload", ":", "if", "self", ".", "locations", ".", "is_ssh_url", "(", "location", ")", ":", "scheme", ",", "location", "=", "self", ".", "urlparser", ".", "to_ssh_url", "(", "location", ")", "self", ".", "scp", ".", "run_upload", "(", "scheme", ",", "distfile", ",", "location", ")", "else", ":", "self", ".", "scp", ".", "run_upload", "(", "'scp'", ",", "distfile", ",", "location", ")", "finally", ":", "shutil", ".", "rmtree", "(", "tempdir", ")" ]
43.767123
21.671233
def _pop(self, model): """Pop all matching tags off the model and return them.""" tags = [] # collect any exsiting tags with matching prefix for tag in model.tags: if self.is_tag(tag): tags.append(tag) # remove collected tags from model if tags: for tag in tags: model.tags.remove(tag) return tags
[ "def", "_pop", "(", "self", ",", "model", ")", ":", "tags", "=", "[", "]", "# collect any exsiting tags with matching prefix", "for", "tag", "in", "model", ".", "tags", ":", "if", "self", ".", "is_tag", "(", "tag", ")", ":", "tags", ".", "append", "(", "tag", ")", "# remove collected tags from model", "if", "tags", ":", "for", "tag", "in", "tags", ":", "model", ".", "tags", ".", "remove", "(", "tag", ")", "return", "tags" ]
26.533333
17.6
def get_api_token(self, agent_key, user_key): """ Send POST request to get an auth token. This method doesn't require auth obviously. :param agent_key: str agent key (who is acting on behalf of the user) :param user_key: str secret user key :return: requests.Response containing the successful result """ data = { "agent_key": agent_key, "user_key": user_key, } return self._post("/software_agents/api_token", data)
[ "def", "get_api_token", "(", "self", ",", "agent_key", ",", "user_key", ")", ":", "data", "=", "{", "\"agent_key\"", ":", "agent_key", ",", "\"user_key\"", ":", "user_key", ",", "}", "return", "self", ".", "_post", "(", "\"/software_agents/api_token\"", ",", "data", ")" ]
39
13.769231
def _error_code_to_str(mod, type_, code): """ This method is registered as ofp_error_code_to_str(type_, code) method into ryu.ofproto.ofproto_v1_* modules. And this method returns the error code as a string value for given 'type' and 'code' defined in ofp_error_msg structure. Example:: >>> ofproto.ofp_error_code_to_str(4, 9) 'OFPBMC_BAD_PREREQ(9)' """ (_, c_name) = _get_error_names(mod, type_, code) return '%s(%d)' % (c_name, code)
[ "def", "_error_code_to_str", "(", "mod", ",", "type_", ",", "code", ")", ":", "(", "_", ",", "c_name", ")", "=", "_get_error_names", "(", "mod", ",", "type_", ",", "code", ")", "return", "'%s(%d)'", "%", "(", "c_name", ",", "code", ")" ]
34
16
def ComputeGMST(GPSTime): """ ComputeGMST - function to compute the Greenwich mean sidereal time from the GPS time. GPSTime - GPS time that the GW signal from the source reached the geocenter. Returns GMST - the Greenwich mean sidereal time corresponding to GPSTime. Sarah Gossan 2012. """ # Difference in Julian Date between GPSTime and the J2000.0 epoch # Subtract half a day as Julian Days start at noon D = np.round((GPSTime - EpochJ2000_0_UTC)/secPerDay) - 0.5 # Number of Julian centuries since J2000.0 epoch T = D/36525 # 1st approximation to GMST without corrections for leap seconds GMST0 = 6.697374558 + 2400.051336*T + 0.000025862*T*T # Keep within range [0,24]hrs GMST0 = np.mod(GMST0,24) # Corrections for leap seconds UTCSec = GPSTime - EpochJ2000_0_UTC - secPerDay/2 - \ LeapSeconds_2012_EpochJ2000 UTCHr = np.mod(UTCSec/3600,24) # Add corrections and keep within [0,24]hr range GMST = GMST0 + UTCHr*1.002737909 GMST = np.mod(GMST,24) # Convert from hours to degrees to radians GMST *= 15.*(np.pi/180.) return GMST
[ "def", "ComputeGMST", "(", "GPSTime", ")", ":", "# Difference in Julian Date between GPSTime and the J2000.0 epoch", "# Subtract half a day as Julian Days start at noon", "D", "=", "np", ".", "round", "(", "(", "GPSTime", "-", "EpochJ2000_0_UTC", ")", "/", "secPerDay", ")", "-", "0.5", "# Number of Julian centuries since J2000.0 epoch", "T", "=", "D", "/", "36525", "# 1st approximation to GMST without corrections for leap seconds", "GMST0", "=", "6.697374558", "+", "2400.051336", "*", "T", "+", "0.000025862", "*", "T", "*", "T", "# Keep within range [0,24]hrs", "GMST0", "=", "np", ".", "mod", "(", "GMST0", ",", "24", ")", "# Corrections for leap seconds", "UTCSec", "=", "GPSTime", "-", "EpochJ2000_0_UTC", "-", "secPerDay", "/", "2", "-", "LeapSeconds_2012_EpochJ2000", "UTCHr", "=", "np", ".", "mod", "(", "UTCSec", "/", "3600", ",", "24", ")", "# Add corrections and keep within [0,24]hr range", "GMST", "=", "GMST0", "+", "UTCHr", "*", "1.002737909", "GMST", "=", "np", ".", "mod", "(", "GMST", ",", "24", ")", "# Convert from hours to degrees to radians", "GMST", "*=", "15.", "*", "(", "np", ".", "pi", "/", "180.", ")", "return", "GMST" ]
31.864865
20.918919
async def msetup(self, text_channel): """Creates the gui Args: text_channel (discord.Channel): The channel for the embed ui to run in """ if self.mready: logger.warning("Attempt to init music when already initialised") return if self.state != 'starting': logger.error("Attempt to init from wrong state ('{}'), must be 'starting'.".format(self.state)) return self.logger.debug("Setting up gui") # Create gui self.mchannel = text_channel self.new_embed_ui() await self.embed.send() await self.embed.usend() await self.add_reactions() self.mready = True
[ "async", "def", "msetup", "(", "self", ",", "text_channel", ")", ":", "if", "self", ".", "mready", ":", "logger", ".", "warning", "(", "\"Attempt to init music when already initialised\"", ")", "return", "if", "self", ".", "state", "!=", "'starting'", ":", "logger", ".", "error", "(", "\"Attempt to init from wrong state ('{}'), must be 'starting'.\"", ".", "format", "(", "self", ".", "state", ")", ")", "return", "self", ".", "logger", ".", "debug", "(", "\"Setting up gui\"", ")", "# Create gui", "self", ".", "mchannel", "=", "text_channel", "self", ".", "new_embed_ui", "(", ")", "await", "self", ".", "embed", ".", "send", "(", ")", "await", "self", ".", "embed", ".", "usend", "(", ")", "await", "self", ".", "add_reactions", "(", ")", "self", ".", "mready", "=", "True" ]
27.84
22.28
def _dump_stats_group(self, title, items, normal_formatter=None, verbose_formatter=None): """Dump a statistics group. In verbose mode, do so as a config file so that other processors can load the information if they want to. :param normal_formatter: the callable to apply to the value before displaying it in normal mode :param verbose_formatter: the callable to apply to the value before displaying it in verbose mode """ if self.verbose: self.outf.write("[%s]\n" % (title,)) for name, value in items: if verbose_formatter is not None: value = verbose_formatter(value) if type(name) == str: name = name.replace(' ', '-') self.outf.write("%s = %s\n" % (name, value)) self.outf.write("\n") else: self.outf.write("%s:\n" % (title,)) for name, value in items: if normal_formatter is not None: value = normal_formatter(value) self.outf.write("\t%s\t%s\n" % (value, name))
[ "def", "_dump_stats_group", "(", "self", ",", "title", ",", "items", ",", "normal_formatter", "=", "None", ",", "verbose_formatter", "=", "None", ")", ":", "if", "self", ".", "verbose", ":", "self", ".", "outf", ".", "write", "(", "\"[%s]\\n\"", "%", "(", "title", ",", ")", ")", "for", "name", ",", "value", "in", "items", ":", "if", "verbose_formatter", "is", "not", "None", ":", "value", "=", "verbose_formatter", "(", "value", ")", "if", "type", "(", "name", ")", "==", "str", ":", "name", "=", "name", ".", "replace", "(", "' '", ",", "'-'", ")", "self", ".", "outf", ".", "write", "(", "\"%s = %s\\n\"", "%", "(", "name", ",", "value", ")", ")", "self", ".", "outf", ".", "write", "(", "\"\\n\"", ")", "else", ":", "self", ".", "outf", ".", "write", "(", "\"%s:\\n\"", "%", "(", "title", ",", ")", ")", "for", "name", ",", "value", "in", "items", ":", "if", "normal_formatter", "is", "not", "None", ":", "value", "=", "normal_formatter", "(", "value", ")", "self", ".", "outf", ".", "write", "(", "\"\\t%s\\t%s\\n\"", "%", "(", "value", ",", "name", ")", ")" ]
43.961538
12.884615
def dispatch(self): """Wraps the dispatch method to add session support.""" try: webapp2.RequestHandler.dispatch(self) finally: self.session_store.save_sessions(self.response)
[ "def", "dispatch", "(", "self", ")", ":", "try", ":", "webapp2", ".", "RequestHandler", ".", "dispatch", "(", "self", ")", "finally", ":", "self", ".", "session_store", ".", "save_sessions", "(", "self", ".", "response", ")" ]
32.333333
16.166667
def sample_surface_sphere(count): """ Correctly pick random points on the surface of a unit sphere Uses this method: http://mathworld.wolfram.com/SpherePointPicking.html Parameters ---------- count: int, number of points to return Returns ---------- points: (count,3) float, list of random points on a unit sphere """ u, v = np.random.random((2, count)) theta = np.pi * 2 * u phi = np.arccos((2 * v) - 1) points = util.spherical_to_vector(np.column_stack((theta, phi))) return points
[ "def", "sample_surface_sphere", "(", "count", ")", ":", "u", ",", "v", "=", "np", ".", "random", ".", "random", "(", "(", "2", ",", "count", ")", ")", "theta", "=", "np", ".", "pi", "*", "2", "*", "u", "phi", "=", "np", ".", "arccos", "(", "(", "2", "*", "v", ")", "-", "1", ")", "points", "=", "util", ".", "spherical_to_vector", "(", "np", ".", "column_stack", "(", "(", "theta", ",", "phi", ")", ")", ")", "return", "points" ]
23.086957
22.478261
def send_location(self, *args, **kwargs): """See :func:`send_location`""" return send_location(*args, **self._merge_overrides(**kwargs)).run()
[ "def", "send_location", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "send_location", "(", "*", "args", ",", "*", "*", "self", ".", "_merge_overrides", "(", "*", "*", "kwargs", ")", ")", ".", "run", "(", ")" ]
52
12.333333
def heightmap_new(w: int, h: int, order: str = "C") -> np.ndarray: """Return a new numpy.ndarray formatted for use with heightmap functions. `w` and `h` are the width and height of the array. `order` is given to the new NumPy array, it can be 'C' or 'F'. You can pass a NumPy array to any heightmap function as long as all the following are true:: * The array is 2 dimensional. * The array has the C_CONTIGUOUS or F_CONTIGUOUS flag. * The array's dtype is :any:`dtype.float32`. The returned NumPy array will fit all these conditions. .. versionchanged:: 8.1 Added the `order` parameter. """ if order == "C": return np.zeros((h, w), np.float32, order="C") elif order == "F": return np.zeros((w, h), np.float32, order="F") else: raise ValueError("Invalid order parameter, should be 'C' or 'F'.")
[ "def", "heightmap_new", "(", "w", ":", "int", ",", "h", ":", "int", ",", "order", ":", "str", "=", "\"C\"", ")", "->", "np", ".", "ndarray", ":", "if", "order", "==", "\"C\"", ":", "return", "np", ".", "zeros", "(", "(", "h", ",", "w", ")", ",", "np", ".", "float32", ",", "order", "=", "\"C\"", ")", "elif", "order", "==", "\"F\"", ":", "return", "np", ".", "zeros", "(", "(", "w", ",", "h", ")", ",", "np", ".", "float32", ",", "order", "=", "\"F\"", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid order parameter, should be 'C' or 'F'.\"", ")" ]
35.958333
21.541667
def load(self, file_key): """Load the data.""" var = self.sd.select(file_key) data = xr.DataArray(from_sds(var, chunks=CHUNK_SIZE), dims=['y', 'x']).astype(np.float32) data = data.where(data != var._FillValue) try: data = data * np.float32(var.scale_factor) except AttributeError: pass return data
[ "def", "load", "(", "self", ",", "file_key", ")", ":", "var", "=", "self", ".", "sd", ".", "select", "(", "file_key", ")", "data", "=", "xr", ".", "DataArray", "(", "from_sds", "(", "var", ",", "chunks", "=", "CHUNK_SIZE", ")", ",", "dims", "=", "[", "'y'", ",", "'x'", "]", ")", ".", "astype", "(", "np", ".", "float32", ")", "data", "=", "data", ".", "where", "(", "data", "!=", "var", ".", "_FillValue", ")", "try", ":", "data", "=", "data", "*", "np", ".", "float32", "(", "var", ".", "scale_factor", ")", "except", "AttributeError", ":", "pass", "return", "data" ]
35.909091
15.181818
def isordv(array, n): """ Determine whether an array of n items contains the integers 0 through n-1. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/isordv_c.html :param array: Array of integers. :type array: Array of ints :param n: Number of integers in array. :type n: int :return: The function returns True if the array contains the integers 0 through n-1, otherwise it returns False. :rtype: bool """ array = stypes.toIntVector(array) n = ctypes.c_int(n) return bool(libspice.isordv_c(array, n))
[ "def", "isordv", "(", "array", ",", "n", ")", ":", "array", "=", "stypes", ".", "toIntVector", "(", "array", ")", "n", "=", "ctypes", ".", "c_int", "(", "n", ")", "return", "bool", "(", "libspice", ".", "isordv_c", "(", "array", ",", "n", ")", ")" ]
30.052632
17.842105
def pairs(iterable): """ :return: iterator yielding overlapping pairs from iterable :Example: >>> list(pairs([1, 2, 3, 4]) [(1, 2), (2, 3), (3, 4)] """ a, b = itertools.tee(iterable) next(b, None) return zip(a, b)
[ "def", "pairs", "(", "iterable", ")", ":", "a", ",", "b", "=", "itertools", ".", "tee", "(", "iterable", ")", "next", "(", "b", ",", "None", ")", "return", "zip", "(", "a", ",", "b", ")" ]
20
18.166667
async def handle_agent_job_started(self, agent_addr, message: AgentJobStarted): """Handle an AgentJobStarted message. Send the data back to the client""" self._logger.debug("Job %s %s started on agent %s", message.job_id[0], message.job_id[1], agent_addr) await ZMQUtils.send_with_addr(self._client_socket, message.job_id[0], BackendJobStarted(message.job_id[1]))
[ "async", "def", "handle_agent_job_started", "(", "self", ",", "agent_addr", ",", "message", ":", "AgentJobStarted", ")", ":", "self", ".", "_logger", ".", "debug", "(", "\"Job %s %s started on agent %s\"", ",", "message", ".", "job_id", "[", "0", "]", ",", "message", ".", "job_id", "[", "1", "]", ",", "agent_addr", ")", "await", "ZMQUtils", ".", "send_with_addr", "(", "self", ".", "_client_socket", ",", "message", ".", "job_id", "[", "0", "]", ",", "BackendJobStarted", "(", "message", ".", "job_id", "[", "1", "]", ")", ")" ]
96
45.75
def _tseitin(ex, auxvarname, auxvars=None): """ Convert a factored expression to a literal, and a list of constraints. """ if isinstance(ex, Literal): return ex, list() else: if auxvars is None: auxvars = list() lits = list() constraints = list() for x in ex.xs: lit, subcons = _tseitin(x, auxvarname, auxvars) lits.append(lit) constraints.extend(subcons) auxvarindex = len(auxvars) auxvar = exprvar(auxvarname, auxvarindex) auxvars.append(auxvar) f = ASTOPS[ex.ASTOP](*lits) constraints.append((auxvar, f)) return auxvar, constraints
[ "def", "_tseitin", "(", "ex", ",", "auxvarname", ",", "auxvars", "=", "None", ")", ":", "if", "isinstance", "(", "ex", ",", "Literal", ")", ":", "return", "ex", ",", "list", "(", ")", "else", ":", "if", "auxvars", "is", "None", ":", "auxvars", "=", "list", "(", ")", "lits", "=", "list", "(", ")", "constraints", "=", "list", "(", ")", "for", "x", "in", "ex", ".", "xs", ":", "lit", ",", "subcons", "=", "_tseitin", "(", "x", ",", "auxvarname", ",", "auxvars", ")", "lits", ".", "append", "(", "lit", ")", "constraints", ".", "extend", "(", "subcons", ")", "auxvarindex", "=", "len", "(", "auxvars", ")", "auxvar", "=", "exprvar", "(", "auxvarname", ",", "auxvarindex", ")", "auxvars", ".", "append", "(", "auxvar", ")", "f", "=", "ASTOPS", "[", "ex", ".", "ASTOP", "]", "(", "*", "lits", ")", "constraints", ".", "append", "(", "(", "auxvar", ",", "f", ")", ")", "return", "auxvar", ",", "constraints" ]
27.916667
14.75
def get_port_channel_detail_input_last_aggregator_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_port_channel_detail = ET.Element("get_port_channel_detail") config = get_port_channel_detail input = ET.SubElement(get_port_channel_detail, "input") last_aggregator_id = ET.SubElement(input, "last-aggregator-id") last_aggregator_id.text = kwargs.pop('last_aggregator_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_port_channel_detail_input_last_aggregator_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_port_channel_detail", "=", "ET", ".", "Element", "(", "\"get_port_channel_detail\"", ")", "config", "=", "get_port_channel_detail", "input", "=", "ET", ".", "SubElement", "(", "get_port_channel_detail", ",", "\"input\"", ")", "last_aggregator_id", "=", "ET", ".", "SubElement", "(", "input", ",", "\"last-aggregator-id\"", ")", "last_aggregator_id", ".", "text", "=", "kwargs", ".", "pop", "(", "'last_aggregator_id'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
45.5
17.416667
def partial_safe_wraps(wrapped_func, *args, **kwargs): """ A version of `functools.wraps` that is safe to wrap a partial in. """ if isinstance(wrapped_func, functools.partial): return partial_safe_wraps(wrapped_func.func) else: return functools.wraps(wrapped_func)
[ "def", "partial_safe_wraps", "(", "wrapped_func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "wrapped_func", ",", "functools", ".", "partial", ")", ":", "return", "partial_safe_wraps", "(", "wrapped_func", ".", "func", ")", "else", ":", "return", "functools", ".", "wraps", "(", "wrapped_func", ")" ]
36.625
12.625
def _serve_individual_audio(self, request): """Serve encoded audio data.""" tag = request.args.get('tag') run = request.args.get('run') index = int(request.args.get('index')) sample = int(request.args.get('sample', 0)) events = self._filter_by_sample(self._multiplexer.Tensors(run, tag), sample) data = tensor_util.make_ndarray(events[index].tensor_proto)[sample, 0] mime_type = self._get_mime_type(run, tag) return http_util.Respond(request, data, mime_type)
[ "def", "_serve_individual_audio", "(", "self", ",", "request", ")", ":", "tag", "=", "request", ".", "args", ".", "get", "(", "'tag'", ")", "run", "=", "request", ".", "args", ".", "get", "(", "'run'", ")", "index", "=", "int", "(", "request", ".", "args", ".", "get", "(", "'index'", ")", ")", "sample", "=", "int", "(", "request", ".", "args", ".", "get", "(", "'sample'", ",", "0", ")", ")", "events", "=", "self", ".", "_filter_by_sample", "(", "self", ".", "_multiplexer", ".", "Tensors", "(", "run", ",", "tag", ")", ",", "sample", ")", "data", "=", "tensor_util", ".", "make_ndarray", "(", "events", "[", "index", "]", ".", "tensor_proto", ")", "[", "sample", ",", "0", "]", "mime_type", "=", "self", ".", "_get_mime_type", "(", "run", ",", "tag", ")", "return", "http_util", ".", "Respond", "(", "request", ",", "data", ",", "mime_type", ")" ]
48.6
11.9
def _CreateOutputFileHandles(self, output_type): """Creates a new gzipped output tempfile for the output type. We write to JSON data to gzip_filehandle to get compressed data. We hold a reference to the original filehandle (gzip_filehandle_parent) so we can pass the gzip data to bigquery. Args: output_type: string of export type to be used in filename. e.g. ExportedFile Returns: A TempOutputTracker object """ gzip_filehandle_parent = tempfile.NamedTemporaryFile(suffix=output_type) gzip_filehandle = gzip.GzipFile(gzip_filehandle_parent.name, "wb", self.GZIP_COMPRESSION_LEVEL, gzip_filehandle_parent) self.temp_output_trackers[output_type] = TempOutputTracker( output_type=output_type, gzip_filehandle=gzip_filehandle, gzip_filehandle_parent=gzip_filehandle_parent) return self.temp_output_trackers[output_type]
[ "def", "_CreateOutputFileHandles", "(", "self", ",", "output_type", ")", ":", "gzip_filehandle_parent", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "output_type", ")", "gzip_filehandle", "=", "gzip", ".", "GzipFile", "(", "gzip_filehandle_parent", ".", "name", ",", "\"wb\"", ",", "self", ".", "GZIP_COMPRESSION_LEVEL", ",", "gzip_filehandle_parent", ")", "self", ".", "temp_output_trackers", "[", "output_type", "]", "=", "TempOutputTracker", "(", "output_type", "=", "output_type", ",", "gzip_filehandle", "=", "gzip_filehandle", ",", "gzip_filehandle_parent", "=", "gzip_filehandle_parent", ")", "return", "self", ".", "temp_output_trackers", "[", "output_type", "]" ]
41.608696
21.521739
def safe_int(string, default=None): """ Safely parse a string into an int. On error return the ``default`` value. :param string string: string value to be converted :param int default: default value to be used in case of failure :rtype: int """ value = safe_float(string, default) if value is not None: value = int(value) return value
[ "def", "safe_int", "(", "string", ",", "default", "=", "None", ")", ":", "value", "=", "safe_float", "(", "string", ",", "default", ")", "if", "value", "is", "not", "None", ":", "value", "=", "int", "(", "value", ")", "return", "value" ]
26.5
14.928571
def has_creep(self, pos: Union[Point2, Point3, Unit]) -> bool: """ Returns True if there is creep on the grid point. """ assert isinstance(pos, (Point2, Point3, Unit)) pos = pos.position.to2.rounded return self.state.creep[pos] != 0
[ "def", "has_creep", "(", "self", ",", "pos", ":", "Union", "[", "Point2", ",", "Point3", ",", "Unit", "]", ")", "->", "bool", ":", "assert", "isinstance", "(", "pos", ",", "(", "Point2", ",", "Point3", ",", "Unit", ")", ")", "pos", "=", "pos", ".", "position", ".", "to2", ".", "rounded", "return", "self", ".", "state", ".", "creep", "[", "pos", "]", "!=", "0" ]
52
7.8