Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
2,000
def yesterday(date=None): if not date: return _date - datetime.timedelta(days=1) else: current_date = parse(date) return current_date - datetime.timedelta(days=1)
yesterday once more
2,001
def safe_size_check(checked_path, error_detail, max_bytes=500000000): actual_size = 0 for dirpath, dirnames, filenames in os.walk(checked_path): for f in filenames: fp = os.path.join(dirpath, f) actual_size += os.path.getsize(fp) assert actual_size <= max_bytes, "Path {} size of {} >= {} bytes. {}".format( checked_path, actual_size, max_bytes, error_detail)
Determines if a particular path is larger than expected. Useful before any recursive remove.
2,002
def valuecounter(table, *field, **kwargs): missing = kwargs.get(, None) counter = Counter() for v in values(table, field, missing=missing): try: counter[v] += 1 except IndexError: pass return counter
Find distinct values for the given field and count the number of occurrences. Returns a :class:`dict` mapping values to counts. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ... ['a', True], ... ['b'], ... ['b', True], ... ['c', False]] >>> etl.valuecounter(table, 'foo') Counter({'b': 2, 'a': 1, 'c': 1}) The `field` argument can be a single field name or index (starting from zero) or a tuple of field names and/or indexes.
2,003
def groups_roles(self, room_id=None, room_name=None, **kwargs): if room_id: return self.__call_api_get(, roomId=room_id, kwargs=kwargs) elif room_name: return self.__call_api_get(, roomName=room_name, kwargs=kwargs) else: raise RocketMissingParamException()
Lists all user’s roles in the private group.
2,004
def register(im1, im2, params, exact_params=False, verbose=1): tempdir = get_tempdir() _clear_temp_dir() refIm = im1 if isinstance(im1, (tuple,list)): refIm = im1[0] if not exact_params: params = _compile_params(params, refIm) if isinstance(params, Parameters): params = params.as_dict() if im2 is None: if not isinstance(im1, (tuple,list)): raise ValueError() ims = im1 ndim = ims[0].ndim N = len(ims) new_shape = (N,) + ims[0].shape im1 = np.zeros(new_shape, ims[0].dtype) for i in range(N): im1[i] = ims[i] params[] = im1.ndim params[] = im1.ndim params[] = params[] = params[] = params[] = params[] = params[] = True params[] = 5 params[] = True pyramidsamples = [] for i in range(params[]): pyramidsamples.extend( [0]+[2**i]*ndim ) pyramidsamples.reverse() params[] = pyramidsamples path_im1, path_im2 = _get_image_paths(im1, im2) path_params = _write_parameter_file(params) path_trafo_params = os.path.join(tempdir, ) if True: command = [get_elastix_exes()[0], , path_im1, , path_im2, , tempdir, , path_params] if verbose: print("Calling Elastix to register images ...") _system3(command, verbose) try: a = _read_image_data() except IOError as why: tmp = "An error occured during registration: " + str(why) raise RuntimeError(tmp) if True: command = [get_elastix_exes()[1], , , , tempdir, , path_trafo_params] _system3(command, verbose) try: b = _read_image_data() except IOError as why: tmp = "An error occured during transformation: " + str(why) raise RuntimeError(tmp) if im2 is None: fields = [b[i] for i in range(b.shape[0])] else: fields = [b] for i in range(len(fields)): field = fields[i] if field.ndim == 2: field = [field[:,d] for d in range(1)] elif field.ndim == 3: field = [field[:,:,d] for d in range(2)] elif field.ndim == 4: field = [field[:,:,:,d] for d in range(3)] elif field.ndim == 5: field = [field[:,:,:,:,d] for d in range(4)] fields[i] = tuple(field) if im2 is not None: fields = fields[0] _clear_temp_dir() return a, fields
register(im1, im2, params, exact_params=False, verbose=1) Perform the registration of `im1` to `im2`, using the given parameters. Returns `(im1_deformed, field)`, where `field` is a tuple with arrays describing the deformation for each dimension (x-y-z order, in world units). Parameters: * im1 (ndarray or file location): The moving image (the one to deform). * im2 (ndarray or file location): The static (reference) image. * params (dict or Parameters): The parameters of the registration. Default parameters can be obtained using the `get_default_params()` method. Note that any parameter known to Elastix can be added to the parameter struct, which enables tuning the registration in great detail. See `get_default_params()` and the Elastix docs for more info. * exact_params (bool): If True, use the exact given parameters. If False (default) will process the parameters, checking for incompatible parameters, extending values to lists if a value needs to be given for each dimension. * verbose (int): Verbosity level. If 0, will not print any progress. If 1, will print the progress only. If 2, will print the full output produced by the Elastix executable. Note that error messages produced by Elastix will be printed regardless of the verbose level. If `im1` is a list of images, performs a groupwise registration. In this case the resulting `field` is a list of fields, each indicating the deformation to the "average" image.
2,005
def reftrack_uptodate_data(rt, role): uptodate = rt.uptodate() if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: if uptodate: return "Yes" else: return "No" if role == QtCore.Qt.ForegroundRole: if uptodate: return QtGui.QColor(*UPTODATE_RGB) elif rt.status(): return QtGui.QColor(*OUTDATED_RGB)
Return the data for the uptodate status :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the uptodate status :rtype: depending on role :raises: None
2,006
def _process_file(input_file, output_file, apikey): bytes_ = read_binary(input_file) compressed = shrink(bytes_, apikey) if compressed.success and compressed.bytes: write_binary(output_file, compressed.bytes) else: if compressed.errno in FATAL_ERRORS: raise StopProcessing(compressed) elif compressed.errno == TinyPNGError.InternalServerError: raise RetryProcessing(compressed) return compressed
Shrinks input_file to output_file. This function should be used only inside process_directory. It takes input_file, tries to shrink it and if shrink was successful save compressed image to output_file. Otherwise raise exception. @return compressed: PNGResponse
2,007
def _build_migrated_variables(checkpoint_reader, name_value_fn): names_to_shapes = checkpoint_reader.get_variable_to_shape_map() new_name_to_variable = {} name_to_new_name = {} for name in names_to_shapes: value = checkpoint_reader.get_tensor(name) new_name, new_value = name_value_fn(name, value) if new_name is None: continue name_to_new_name[name] = new_name new_name_to_variable[new_name] = tf.Variable(new_value) return new_name_to_variable, name_to_new_name
Builds the TensorFlow variables of the migrated checkpoint. Args: checkpoint_reader: A `tf.train.NewCheckPointReader` of the checkpoint to be read from. name_value_fn: Function taking two arguments, `name` and `value`, which returns the pair of new name and value for that a variable of that name. Returns: Tuple of a dictionary with new variable names as keys and `tf.Variable`s as values, and a dictionary that maps the old variable names to the new variable names.
2,008
def fasper(x, y, ofac, hifac, n_threads, MACC=4): n = long(len(x)) if n != len(y): print() return nout = int(0.5*ofac*hifac*n) nfreqt = long(ofac*hifac*n*MACC) nfreq = 64 while nfreq < nfreqt: nfreq = 2*nfreq ndim = long(2*nfreq) ave = y.mean() var = ((y - y.mean())**2).sum()/(len(y) - 1) xmin = x.min() xmax = x.max() xdif = xmax - xmin if is_pyfftw: wk1 = pyfftw.n_byte_align_empty(int(ndim), 16, ) * 0. wk2 = pyfftw.n_byte_align_empty(int(ndim), 16, ) * 0. else: wk1 = zeros(ndim, dtype=) wk2 = zeros(ndim, dtype=) fac = ndim/(xdif*ofac) fndim = ndim ck = ((x - xmin)*fac) % fndim ckk = (2.0*ck) % fndim for j in range(0, n): __spread__(y[j] - ave, wk1, ndim, ck[j], MACC) __spread__(1.0, wk2, ndim, ckk[j], MACC) if is_pyfftw: fft_wk1 = pyfftw.builders.ifft(wk1, planner_effort=, threads=n_threads) wk1 = fft_wk1() * len(wk1) fft_wk2 = pyfftw.builders.ifft(wk2, planner_effort=, threads=n_threads) wk2 = fft_wk2() * len(wk2) else: wk1 = ifft(wk1)*len(wk1) wk2 = ifft(wk2)*len(wk1) wk1 = wk1[1:nout + 1] wk2 = wk2[1:nout + 1] rwk1 = wk1.real iwk1 = wk1.imag rwk2 = wk2.real iwk2 = wk2.imag df = 1.0/(xdif*ofac) hypo2 = 2.0*abs(wk2) hc2wt = rwk2/hypo2 hs2wt = iwk2/hypo2 cwt = sqrt(0.5 + hc2wt) swt = sign(hs2wt)*(sqrt(0.5 - hc2wt)) den = 0.5*n + hc2wt*rwk2 + hs2wt*iwk2 cterm = (cwt*rwk1 + swt*iwk1)**2./den sterm = (cwt*iwk1 - swt*rwk1)**2./(n - den) wk1 = df*(arange(nout, dtype=) + 1.) wk2 = (cterm + sterm)/(2.0*var) pmax = wk2.max() jmax = wk2.argmax() expy = exp(-pmax) effm = 2.0*(nout)/ofac prob = effm*expy if prob > 0.01: prob = 1.0 - (1.0 - expy)**effm return wk1, wk2, nout, jmax, prob
Given abscissas x (which need not be equally spaced) and ordinates y, and given a desired oversampling factor ofac (a typical value being 4 or larger). this routine creates an array wk1 with a sequence of nout increasing frequencies (not angular frequencies) up to hifac times the "average" Nyquist frequency, and creates an array wk2 with the values of the Lomb normalized periodogram at those frequencies. The arrays x and y are not altered. This routine also returns jmax such that wk2(jmax) is the maximum element in wk2, and prob, an estimate of the significance of that maximum against the hypothesis of random noise. A small value of prob indicates that a significant periodic signal is present. Reference: Press, W. H. & Rybicki, G. B. 1989 ApJ vol. 338, p. 277-280. Fast algorithm for spectral analysis of unevenly sampled data (1989ApJ...338..277P) Arguments: X : Abscissas array, (e.g. an array of times). Y : Ordinates array, (e.g. corresponding counts). Ofac : Oversampling factor. Hifac : Hifac * "average" Nyquist frequency = highest frequency for which values of the Lomb normalized periodogram will be calculated. n_threads : number of threads to use. Returns: Wk1 : An array of Lomb periodogram frequencies. Wk2 : An array of corresponding values of the Lomb periodogram. Nout : Wk1 & Wk2 dimensions (number of calculated frequencies) Jmax : The array index corresponding to the MAX( Wk2 ). Prob : False Alarm Probability of the largest Periodogram value MACC : Number of interpolation points per 1/4 cycle of highest frequency History: 02/23/2009, v1.0, MF Translation of IDL code (orig. Numerical recipies)
2,009
def clip_polygon(self, points): self.gsave() self._path_polygon(points) self.__clip_stack.append(self.__clip_box) self.__clip_box = _intersect_box(self.__clip_box, _compute_bounding_box(points)) self.clip_sub()
Create a polygonal clip region. You must call endclip() after you completed drawing. See also the polygon method.
2,010
def get_sections(self, gradebook_id=, simple=False): params = dict(includeMembers=) section_data = self.get( .format( gradebookId=gradebook_id or self.gradebook_id ), params=params ) if simple: sections = self.unravel_sections(section_data[]) return [{: x[]} for x in sections] return section_data[]
Get the sections for a gradebook. Return a dictionary of types of sections containing a list of that type for a given gradebook. Specified by a gradebookid. If simple=True, a list of dictionaries is provided for each section regardless of type. The dictionary only contains one key ``SectionName``. Args: gradebook_id (str): unique identifier for gradebook, i.e. ``2314`` simple (bool): return a list of section names only Raises: requests.RequestException: Exception connection error ValueError: Unable to decode response content Returns: dict: Dictionary of section types where each type has a list of sections An example return value is: .. code-block:: python { u'recitation': [ { u'editable': False, u'groupId': 1293925, u'groupingScheme': u'Recitation', u'members': None, u'name': u'Unassigned', u'shortName': u'DefaultGroupNoCollisionPlease1234', u'staffs': None }, { u'editable': True, u'groupId': 1327565, u'groupingScheme': u'Recitation', u'members': None, u'name': u'r01', u'shortName': u'r01', u'staffs': None}, {u'editable': True, u'groupId': 1327555, u'groupingScheme': u'Recitation', u'members': None, u'name': u'r02', u'shortName': u'r02', u'staffs': None } ] }
2,011
def send_message(self, message): if self._error: raise compat.saved_exc(self._error) elif self._transport is None: raise JsonRpcError() self._version.check_message(message) self._writer.write(serialize(message))
Send a raw JSON-RPC message. The *message* argument must be a dictionary containing a valid JSON-RPC message according to the version passed into the constructor.
2,012
def write(self): self._assure_writable("write") if not self._dirty: return if isinstance(self._file_or_files, (list, tuple)): raise AssertionError("Cannot write back if there is not exactly a single file to write to, have %i files" % len(self._file_or_files)) if self._has_includes(): log.debug("Skipping write-back of configuration file as include files were merged in." + "Set merge_includes=False to prevent this.") return fp = self._file_or_files is_file_lock = isinstance(fp, string_types + (FileType, )) if is_file_lock: self._lock._obtain_lock() if not hasattr(fp, "seek"): with open(self._file_or_files, "wb") as fp: self._write(fp) else: fp.seek(0) if hasattr(fp, ): fp.truncate() self._write(fp)
Write changes to our file, if there are changes at all :raise IOError: if this is a read-only writer instance or if we could not obtain a file lock
2,013
def asDigraph(self): from ._visualize import makeDigraph return makeDigraph( self._automaton, stateAsString=lambda state: state.method.__name__, inputAsString=lambda input: input.method.__name__, outputAsString=lambda output: output.method.__name__, )
Generate a L{graphviz.Digraph} that represents this machine's states and transitions. @return: L{graphviz.Digraph} object; for more information, please see the documentation for U{graphviz<https://graphviz.readthedocs.io/>}
2,014
def issue(self, issue_instance_id): with self.db.make_session() as session: selected_issue = ( session.query(IssueInstance) .filter(IssueInstance.id == issue_instance_id) .scalar() ) if selected_issue is None: self.warning( f"Issue {issue_instance_id} doesnissues' for available issues." ) return self.sources = self._get_leaves_issue_instance( session, issue_instance_id, SharedTextKind.SOURCE ) self.sinks = self._get_leaves_issue_instance( session, issue_instance_id, SharedTextKind.SINK ) self.current_issue_instance_id = int(selected_issue.id) self.current_frame_id = -1 self.current_trace_frame_index = 1 print(f"Set issue to {issue_instance_id}.") if int(selected_issue.run_id) != self.current_run_id: self.current_run_id = int(selected_issue.run_id) print(f"Set run to {self.current_run_id}.") print() self._generate_trace_from_issue() self.show()
Select an issue. Parameters: issue_instance_id: int id of the issue instance to select Note: We are selecting issue instances, even though the command is called issue.
2,015
def get_permissions(self, user_id): response = self.request( "{0}/{1}/permissions".format(self.version, user_id), {} )["data"] return {x["permission"] for x in response if x["status"] == "granted"}
Fetches the permissions object from the graph.
2,016
def addSourceId(self, value): if isinstance(value, Source_Id): self.source_ids.append(value) else: raise (TypeError, % type(source_id))
Adds SourceId to External_Info
2,017
def remove_user_from_acl(self, name, user): if name not in self._acl: return False if user in self._acl[name][]: self._acl[name][].remove(user) if user in self._acl[name][]: self._acl[name][].remove(user) return True
Remove a user from the given acl (both allow and deny).
2,018
def make_data(n,width): x = dict([(i,100*random.random()) for i in range(1,n+1)]) y = dict([(i,100*random.random()) for i in range(1,n+1)]) c = {} for i in range(1,n+1): for j in range(1,n+1): if j != i: c[i,j] = distance(x[i],y[i],x[j],y[j]) e = {1:0} l = {1:0} start = 0 delta = int(76.*math.sqrt(n)/n * width)+1 for i in range(1,n): j = i+1 start += c[i,j] e[j] = max(start-delta,0) l[j] = start + delta return c,x,y,e,l
make_data: compute matrix distance and time windows.
2,019
def _events(self): with self.app.events_lock: res = self.app.get_events() return serialize(res, True)
Get the monitoring events from the daemon This is used by the arbiter to get the monitoring events from all its satellites :return: Events list serialized :rtype: list
2,020
def get_layers_output(self, dataset): layers_out = [] with self.tf_graph.as_default(): with tf.Session() as self.tf_session: self.tf_saver.restore(self.tf_session, self.model_path) for l in self.layer_nodes: layers_out.append(l.eval({self.input_data: dataset, self.keep_prob: 1})) if layers_out == []: raise Exception("This method is not implemented for this model") else: return layers_out
Get output from each layer of the network. :param dataset: input data :return: list of np array, element i is the output of layer i
2,021
def symbol(self, index): if isinstance(index, str): return index elif (index < 0) or (index >= self.symtab.table_len): self.error("symbol table index out of range") sym = self.symtab.table[index] if sym.kind == SharedData.KINDS.LOCAL_VAR: return "-{0}(1:%14)".format(sym.attribute * 4 + 4) elif sym.kind == SharedData.KINDS.PARAMETER: return "{0}(1:%14)".format(8 + sym.attribute * 4) elif sym.kind == SharedData.KINDS.CONSTANT: return "${0}".format(sym.name) else: return "{0}".format(sym.name)
Generates symbol name from index
2,022
def readSB(self, bits): shift = 32 - bits return int32(self.readbits(bits) << shift) >> shift
Read a signed int using the specified number of bits
2,023
def traverse_imports(names): pending = [names] while pending: node = pending.pop() if node.type == token.NAME: yield node.value elif node.type == syms.dotted_name: yield "".join([ch.value for ch in node.children]) elif node.type == syms.dotted_as_name: pending.append(node.children[0]) elif node.type == syms.dotted_as_names: pending.extend(node.children[::-2]) else: raise AssertionError("unkown node type")
Walks over all the names imported in a dotted_as_names node.
2,024
def constraint_matrices(model, array_type=, include_vars=False, zero_tol=1e-6): if array_type not in (, ) and not dok_matrix: raise ValueError() array_builder = { : np.array, : dok_matrix, : lil_matrix, : pd.DataFrame, }[array_type] Problem = namedtuple("Problem", ["equalities", "b", "inequalities", "bounds", "variable_fixed", "variable_bounds"]) equality_rows = [] inequality_rows = [] inequality_bounds = [] b = [] for const in model.constraints: lb = -np.inf if const.lb is None else const.lb ub = np.inf if const.ub is None else const.ub equality = (ub - lb) < zero_tol coefs = const.get_linear_coefficients(model.variables) coefs = [coefs[v] for v in model.variables] if equality: b.append(lb if abs(lb) > zero_tol else 0.0) equality_rows.append(coefs) else: inequality_rows.append(coefs) inequality_bounds.append([lb, ub]) var_bounds = np.array([[v.lb, v.ub] for v in model.variables]) fixed = var_bounds[:, 1] - var_bounds[:, 0] < zero_tol results = Problem( equalities=array_builder(equality_rows), b=np.array(b), inequalities=array_builder(inequality_rows), bounds=array_builder(inequality_bounds), variable_fixed=np.array(fixed), variable_bounds=array_builder(var_bounds)) return results
Create a matrix representation of the problem. This is used for alternative solution approaches that do not use optlang. The function will construct the equality matrix, inequality matrix and bounds for the complete problem. Notes ----- To accomodate non-zero equalities the problem will add the variable "const_one" which is a variable that equals one. Arguments --------- model : cobra.Model The model from which to obtain the LP problem. array_type : string The type of array to construct. if 'dense', return a standard numpy.array, 'dok', or 'lil' will construct a sparse array using scipy of the corresponding type and 'DataFrame' will give a pandas `DataFrame` with metabolite indices and reaction columns. zero_tol : float The zero tolerance used to judge whether two bounds are the same. Returns ------- collections.namedtuple A named tuple consisting of 6 matrices and 2 vectors: - "equalities" is a matrix S such that S*vars = b. It includes a row for each constraint and one column for each variable. - "b" the right side of the equality equation such that S*vars = b. - "inequalities" is a matrix M such that lb <= M*vars <= ub. It contains a row for each inequality and as many columns as variables. - "bounds" is a compound matrix [lb ub] containing the lower and upper bounds for the inequality constraints in M. - "variable_fixed" is a boolean vector indicating whether the variable at that index is fixed (lower bound == upper_bound) and is thus bounded by an equality constraint. - "variable_bounds" is a compound matrix [lb ub] containing the lower and upper bounds for all variables.
2,025
def create_from_pytz(cls, tz_info): zone_name = tz_info.zone utc_transition_times_list_raw = getattr(tz_info, , None) utc_transition_times_list = [tuple(utt.timetuple()) for utt in utc_transition_times_list_raw] \ if utc_transition_times_list_raw is not None \ else None transition_info_list_raw = getattr(tz_info, , None) transition_info_list = [(utcoffset_td.total_seconds(), dst_td.total_seconds(), tzname) for (utcoffset_td, dst_td, tzname) in transition_info_list_raw] \ if transition_info_list_raw is not None \ else None try: utcoffset_dt = tz_info._utcoffset except AttributeError: utcoffset = None else: utcoffset = utcoffset_dt.total_seconds() tzname = getattr(tz_info, , None) parent_class_name = getmro(tz_info.__class__)[1].__name__ return cls(zone_name, parent_class_name, utc_transition_times_list, transition_info_list, utcoffset, tzname)
Create an instance using the result of the timezone() call in "pytz".
2,026
def get_snapshots(self): ec2 = self.get_ec2_connection() rs = ec2.get_all_snapshots() all_vols = [self.volume_id] + self.past_volume_ids snaps = [] for snapshot in rs: if snapshot.volume_id in all_vols: if snapshot.progress == : snapshot.date = boto.utils.parse_ts(snapshot.start_time) snapshot.keep = True snaps.append(snapshot) snaps.sort(cmp=lambda x,y: cmp(x.date, y.date)) return snaps
Returns a list of all completed snapshots for this volume ID.
2,027
def remote(*args, **kwargs): worker = get_global_worker() if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return make_decorator(worker=worker)(args[0]) error_string = ("The @ray.remote decorator must be applied either " "with no arguments and no parentheses, for example " ", or it must be applied using some of " "the arguments , , , " ", , " "or , like " ".") assert len(args) == 0 and len(kwargs) > 0, error_string for key in kwargs: assert key in [ "num_return_vals", "num_cpus", "num_gpus", "resources", "max_calls", "max_reconstructions" ], error_string num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None resources = kwargs.get("resources") if not isinstance(resources, dict) and resources is not None: raise Exception("The keyword argument must be a " "dictionary, but received type {}.".format( type(resources))) if resources is not None: assert "CPU" not in resources, "Use the argument." assert "GPU" not in resources, "Use the argument." num_return_vals = kwargs.get("num_return_vals") max_calls = kwargs.get("max_calls") max_reconstructions = kwargs.get("max_reconstructions") return make_decorator( num_return_vals=num_return_vals, num_cpus=num_cpus, num_gpus=num_gpus, resources=resources, max_calls=max_calls, max_reconstructions=max_reconstructions, worker=worker)
Define a remote function or an actor class. This can be used with no arguments to define a remote function or actor as follows: .. code-block:: python @ray.remote def f(): return 1 @ray.remote class Foo(object): def method(self): return 1 It can also be used with specific keyword arguments: * **num_return_vals:** This is only for *remote functions*. It specifies the number of object IDs returned by the remote function invocation. * **num_cpus:** The quantity of CPU cores to reserve for this task or for the lifetime of the actor. * **num_gpus:** The quantity of GPUs to reserve for this task or for the lifetime of the actor. * **resources:** The quantity of various custom resources to reserve for this task or for the lifetime of the actor. This is a dictionary mapping strings (resource names) to numbers. * **max_calls:** Only for *remote functions*. This specifies the maximum number of times that a given worker can execute the given remote function before it must exit (this can be used to address memory leaks in third-party libraries or to reclaim resources that cannot easily be released, e.g., GPU memory that was acquired by TensorFlow). By default this is infinite. * **max_reconstructions**: Only for *actors*. This specifies the maximum number of times that the actor should be reconstructed when it dies unexpectedly. The minimum valid value is 0 (default), which indicates that the actor doesn't need to be reconstructed. And the maximum valid value is ray.ray_constants.INFINITE_RECONSTRUCTIONS. This can be done as follows: .. code-block:: python @ray.remote(num_gpus=1, max_calls=1, num_return_vals=2) def f(): return 1, 2 @ray.remote(num_cpus=2, resources={"CustomResource": 1}) class Foo(object): def method(self): return 1
2,028
def initialize(): global is_initialized yaml.add_multi_constructor(, multi_constructor) yaml.add_multi_constructor(, multi_constructor_pkl) yaml.add_multi_constructor(, multi_constructor_import) yaml.add_multi_constructor(, multi_constructor_include) def import_constructor(loader, node): value = loader.construct_scalar(node) return try_to_import(value) yaml.add_constructor(, import_constructor) yaml.add_implicit_resolver( , re.compile(r) ) is_initialized = True
Initialize the configuration system by installing YAML handlers. Automatically done on first call to load() specified in this file.
2,029
def all(self, data={}, **kwargs): return super(VirtualAccount, self).all(data, **kwargs)
Fetch all Virtual Account entities Returns: Dictionary of Virtual Account data
2,030
def receive_response(self, transaction): host, port = transaction.response.source key_token = hash(str(host) + str(port) + str(transaction.response.token)) if key_token in self._block1_sent and transaction.response.block1 is not None: item = self._block1_sent[key_token] transaction.block_transfer = True if item.m == 0: transaction.block_transfer = False del transaction.request.block1 return transaction n_num, n_m, n_size = transaction.response.block1 if n_num != item.num: logger.warning("Blockwise num acknowledged error, expected " + str(item.num) + " received " + str(n_num)) return None if n_size < item.size: logger.debug("Scale down size, was " + str(item.size) + " become " + str(n_size)) item.size = n_size request = transaction.request del request.mid del request.block1 request.payload = item.payload[item.byte: item.byte+item.size] item.num += 1 item.byte += item.size if len(item.payload) <= item.byte: item.m = 0 else: item.m = 1 request.block1 = (item.num, item.m, item.size) elif transaction.response.block2 is not None: num, m, size = transaction.response.block2 if m == 1: transaction.block_transfer = True if key_token in self._block2_sent: item = self._block2_sent[key_token] if num != item.num: logger.error("Receive unwanted block") return self.error(transaction, defines.Codes.REQUEST_ENTITY_INCOMPLETE.number) if item.content_type is None: item.content_type = transaction.response.content_type if item.content_type != transaction.response.content_type: logger.error("Content-type Error") return self.error(transaction, defines.Codes.UNSUPPORTED_CONTENT_FORMAT.number) item.byte += size item.num = num + 1 item.size = size item.m = m item.payload += transaction.response.payload else: item = BlockItem(size, num + 1, m, size, transaction.response.payload, transaction.response.content_type) self._block2_sent[key_token] = item request = transaction.request del request.mid del request.block2 request.block2 = (item.num, 0, item.size) else: transaction.block_transfer = False if key_token in self._block2_sent: if self._block2_sent[key_token].content_type != transaction.response.content_type: logger.error("Content-type Error") return self.error(transaction, defines.Codes.UNSUPPORTED_CONTENT_FORMAT.number) transaction.response.payload = self._block2_sent[key_token].payload + transaction.response.payload del self._block2_sent[key_token] else: transaction.block_transfer = False return transaction
Handles the Blocks option in a incoming response. :type transaction: Transaction :param transaction: the transaction that owns the response :rtype : Transaction :return: the edited transaction
2,031
def _request(self, *args, **kwargs): self._amend_request_kwargs(kwargs) _response = self._requests_session.request(*args, **kwargs) try: _response.raise_for_status() except HTTPError as e: if e.response is not None: raise_from(ConjureHTTPError(e), e) raise e return _response
Make requests using configured :class:`requests.Session`. Any error details will be extracted to an :class:`HTTPError` which will contain relevant error details when printed.
2,032
def _fail_with_undefined_error(self, *args, **kwargs): if self._undefined_hint is None: if self._undefined_obj is missing: hint = % self._undefined_name elif not isinstance(self._undefined_name, basestring): hint = % ( object_type_repr(self._undefined_obj), self._undefined_name ) else: hint = % ( object_type_repr(self._undefined_obj), self._undefined_name ) else: hint = self._undefined_hint raise self._undefined_exception(hint)
Regular callback function for undefined objects that raises an `UndefinedError` on call.
2,033
def remove_group(self, group = None): if group is None: raise KPError("Need group to remove a group") elif type(group) is not v1Group: raise KPError("group must be v1Group") children = [] entries = [] if group in self.groups: children.extend(group.children) entries.extend(group.entries) group.parent.children.remove(group) self.groups.remove(group) else: raise KPError("Given group doesn't exist") self._num_groups -= 1 for i in children: self.remove_group(i) for i in entries: self.remove_entry(i) return True
This method removes a group. The group needed to remove the group. group must be a v1Group.
2,034
def _FormatExpression(self, frame, expression): rc, value = _EvaluateExpression(frame, expression) if not rc: message = _FormatMessage(value[][], value[].get()) return + message + return self._FormatValue(value)
Evaluates a single watched expression and formats it into a string form. If expression evaluation fails, returns error message string. Args: frame: Python stack frame in which the expression is evaluated. expression: string expression to evaluate. Returns: Formatted expression value that can be used in the log message.
2,035
def hash160(msg_bytes): h = hashlib.new() if in riemann.get_current_network_name(): h.update(blake256(msg_bytes)) return h.digest() h.update(sha256(msg_bytes)) return h.digest()
byte-like -> bytes
2,036
def GetOptionBool(self, section, option): return (not self.config.has_option(section, option) or self.config.getboolean(section, option))
Get the value of an option in the config file. Args: section: string, the section of the config file to check. option: string, the option to retrieve the value of. Returns: bool, True if the option is enabled or not set.
2,037
def get_files_by_path(path): if os.path.isfile(path): return [path] if os.path.isdir(path): return get_morph_files(path) raise IOError( % path)
Get a file or set of files from a file path Return list of files with path
2,038
def _get_data_from_rawfile(path_to_data, raw_data_id): loaded = pickle.load(open(path_to_data, "rb")) raw_datasets = loaded[] for raw_dataset in raw_datasets: if raw_dataset[].raw_data_id == raw_data_id: return raw_dataset[] return None
Get a HandwrittenData object that has ``raw_data_id`` from a pickle file ``path_to_data``. :returns: The HandwrittenData object if ``raw_data_id`` is in path_to_data, otherwise ``None``.
2,039
def components(self, visible=True): if self._on: self._quality.append_on_chord(self.on, self.root) return self._quality.get_components(root=self._root, visible=visible)
Return the component notes of chord :param bool visible: returns the name of notes if True else list of int :rtype: list[(str or int)] :return: component notes of chord
2,040
def make_mask(filename, ext, trail_coords, sublen=75, subwidth=200, order=3, sigma=4, pad=10, plot=False, verbose=False): if not HAS_OPDEP: raise ImportError() if verbose: t_beg = time.time() fname = .format(filename, ext) image = fits.getdata(filename, ext) dx = image.max() if dx <= 0: raise ValueError() image = image / dx image[image < 0] = 0 (x0, y0), (x1, y1) = trail_coords rad = np.arctan2(y1 - y0, x1 - x0) newrad = (np.pi * 2) - rad deg = np.degrees(rad) if verbose: print(.format(deg)) rotate = transform.rotate(image, deg, resize=True, order=order) if plot and plt is not None: plt.ion() mean = np.median(image) stddev = image.std() lower = mean - stddev upper = mean + stddev fig1, ax1 = plt.subplots() ax1.imshow(image, vmin=lower, vmax=upper, cmap=plt.cm.gray) ax1.set_title(fname) fig2, ax2 = plt.subplots() ax2.imshow(rotate, vmin=lower, vmax=upper, cmap=plt.cm.gray) ax2.set_title(.format(fname, deg)) plt.draw() sx, sy = _rotate_point((x0, y0), newrad, image.shape, rotate.shape) dx = int(subwidth / 2) ix0, ix1, iy0, iy1 = _get_valid_indices( rotate.shape, sx - dx, sx + dx, sy - sublen, sy + sublen) subr = rotate[iy0:iy1, ix0:ix1] if len(subr) <= sublen: raise ValueError( .format(len(subr), sublen)) medarr = np.median(subr, axis=1) flat = [medarr] mean = sigma_clipped_stats(medarr)[0] stddev = biweight_midvariance(medarr) z = np.where(medarr > (mean + (sigma * stddev)))[0] if plot and plt is not None: fig1, ax1 = plt.subplots() ax1.plot(medarr, ) ax1.plot(z, medarr[z], ) ax1.set_xlabel() ax1.set_ylabel() ax1.set_title() plt.draw() if len(z) < 1: raise ValueError( .format(sigma)) lower = z.min() upper = z.max() diff = upper - lower lower = lower - pad upper = upper + pad if plot and plt is not None: padind = np.arange(lower, upper) ax1.plot(padind, medarr[padind], ) plt.draw() mask = np.zeros(rotate.shape) lowerx, upperx, lowery, uppery = _get_valid_indices( mask.shape, np.floor(sx - subwidth), np.ceil(sx + subwidth), np.floor(sy - sublen + lower), np.ceil(sy - sublen + upper)) mask[lowery:uppery, lowerx:upperx] = 1 done = False first = True nextx = upperx centery = np.ceil(lowery + diff) counter = 0 while not done: ix0, ix1, iy0, iy1 = _get_valid_indices( rotate.shape, nextx - dx, nextx + dx, centery - sublen, centery + sublen) subr = rotate[iy0:iy1, ix0:ix1] if 0 in subr.shape: if verbose: print(.format( subr.shape, first)) if first: first = False centery = sy nextx = sx else: done = True continue medarr = np.median(subr, axis=1) flat.append(medarr) mean = sigma_clipped_stats(medarr, sigma=sigma)[0] stddev = biweight_midvariance(medarr) z = np.where(medarr > (mean + (sigma * stddev)))[0] if len(z) < 1: if first: if verbose: print( .format(counter)) centery = sy nextx = sx first = False else: if verbose: print( .format(z, subr.shape)) done = True continue lower = z.min() upper = z.max() diff = upper - lower lower = np.floor(lower - pad) upper = np.ceil(upper + pad) lowerx, upperx, lowery, uppery = _get_valid_indices( mask.shape, np.floor(nextx - subwidth), np.ceil(nextx + subwidth), np.floor(centery - sublen + lower), np.ceil(centery - sublen + upper)) mask[lowery:uppery, lowerx:upperx] = 1 upper_p = (upperx, uppery) upper_t = _rotate_point( upper_p, newrad, image.shape, rotate.shape, reverse=True) highy = np.ceil(upper_t[1]) highx = np.ceil(upper_t[0]) if first: nextx = nextx + dx centery = lowery + diff if (nextx + subwidth) > rotate.shape[1]: if verbose: print(.format(counter)) first = False elif (highy > image.shape[0]) or (highx > image.shape[1]): if verbose: print(.format(counter)) first = False if not first: centery = sy nextx = sx else: nextx = nextx - dx centery = lowery + diff if (nextx - subwidth) < 0: if verbose: print(.format(counter)) done = True elif (highy > image.shape[0]) or (highx > image.shape[1]): if verbose: print(.format(counter)) done = True counter += 1 if counter > 500: if verbose: print() done = True rot = transform.rotate(mask, -deg, resize=True, order=1) ix0 = (rot.shape[1] - image.shape[1]) / 2 iy0 = (rot.shape[0] - image.shape[0]) / 2 lowerx, upperx, lowery, uppery = _get_valid_indices( rot.shape, ix0, image.shape[1] + ix0, iy0, image.shape[0] + iy0) mask = rot[lowery:uppery, lowerx:upperx] if mask.shape != image.shape: warnings.warn( .format(mask.shape, image.shape), AstropyUserWarning) mask = mask.astype(np.bool) if plot and plt is not None: test = image.copy() test[mask] = 0 mean = np.median(test) stddev = test.std() lower = mean - stddev upper = mean + stddev fig1, ax1 = plt.subplots() ax1.imshow(test, vmin=lower, vmax=upper, cmap=plt.cm.gray) ax1.set_title() fig2, ax2 = plt.subplots() ax2.imshow(mask, cmap=plt.cm.gray) ax2.set_title() plt.draw() if verbose: t_end = time.time() print(.format(t_end - t_beg)) return mask
Create DQ mask for an image for a given satellite trail. This mask can be added to existing DQ data using :func:`update_dq`. .. note:: Unlike :func:`detsat`, multiprocessing is not available for this function. Parameters ---------- filename : str FITS image filename. ext : int, str, or tuple Extension for science data, as accepted by ``astropy.io.fits``. trail_coords : ndarray One of the trails returned by :func:`detsat`. This must be in the format of ``[[x0, y0], [x1, y1]]``. sublen : int, optional Length of strip to use as the fitting window for the trail. subwidth : int, optional Width of box to fit trail on. order : int, optional The order of the spline interpolation for image rotation. See :func:`skimage.transform.rotate`. sigma : float, optional Sigma of the satellite trail for detection. If points are a given sigma above the background in the subregion then it is marked as a satellite. This may need to be lowered for resolved trails. pad : int, optional Amount of extra padding in pixels to give the satellite mask. plot : bool, optional Plot the result. verbose : bool, optional Print extra information to the terminal, mostly for debugging. Returns ------- mask : ndarray Boolean array marking the satellite trail with `True`. Raises ------ ImportError Missing scipy or skimage>=0.11 packages. IndexError Invalid subarray indices. ValueError Image has no positive values, trail subarray too small, or trail profile not found.
2,041
def network_info(name=None, **kwargs): * result = {} conn = __get_conn(**kwargs) def _net_get_leases(net): leases = net.DHCPLeases() for lease in leases: if lease[] == libvirt.VIR_IP_ADDR_TYPE_IPV4: lease[] = elif lease[] == libvirt.VIR_IP_ADDR_TYPE_IPV6: lease[] = else: lease[] = return leases try: nets = [net for net in conn.listAllNetworks() if name is None or net.name() == name] result = {net.name(): { : net.UUIDString(), : net.bridgeName(), : net.autostart(), : net.isActive(), : net.isPersistent(), : _net_get_leases(net)} for net in nets} except libvirt.libvirtError as err: log.debug(, str(err)) finally: conn.close() return result
Return informations on a virtual network provided its name. :param name: virtual network name :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults If no name is provided, return the infos for all defined virtual networks. .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.network_info default
2,042
def create_router(self, name, tenant_id, subnet_lst): try: body = {: {: name, : tenant_id, : True}} router = self.neutronclient.create_router(body=body) rout_dict = router.get() rout_id = rout_dict.get() except Exception as exc: LOG.error("Failed to create router with name %(name)s" " Exc %(exc)s", {: name, : str(exc)}) return None ret = self.add_intf_router(rout_id, tenant_id, subnet_lst) if not ret: try: ret = self.neutronclient.delete_router(rout_id) except Exception as exc: LOG.error("Failed to delete router %(name)s, Exc %(exc)s", {: name, : str(exc)}) return None return rout_id
Create a openstack router and add the interfaces.
2,043
def _validate_response(url, response): if response[] not in [GooglePlaces.RESPONSE_STATUS_OK, GooglePlaces.RESPONSE_STATUS_ZERO_RESULTS]: error_detail = ( % (url, response[])) raise GooglePlacesError(error_detail)
Validates that the response from Google was successful.
2,044
def complete_pool_name(arg): search_string = if arg is not None: search_string += arg res = Pool.search({ : , : , : search_string }) ret = [] for p in res[]: ret.append(p.name) return ret
Returns list of matching pool names
2,045
def get(cls, user_id, db_session=None): db_session = get_db_session(db_session) return db_session.query(cls.model).get(user_id)
Fetch row using primary key - will use existing object in session if already present :param user_id: :param db_session: :return:
2,046
def main(): parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument( , , help=, action= ) args = parser.parse_args() generator = SignatureGenerator(debug=args.verbose) crash_data = json.loads(sys.stdin.read()) ret = generator.generate(crash_data) print(json.dumps(ret, indent=2))
Takes crash data via stdin and generates a Socorro signature
2,047
def font_size_splitter(font_map): small_font = [] medium_font = [] large_font = [] xlarge_font = [] fonts = set(font_map.keys()) - set(RANDOM_FILTERED_FONTS) for font in fonts: length = max(map(len, font_map[font][0].values())) if length <= FONT_SMALL_THRESHOLD: small_font.append(font) elif length > FONT_SMALL_THRESHOLD and length <= FONT_MEDIUM_THRESHOLD: medium_font.append(font) elif length > FONT_MEDIUM_THRESHOLD and length <= FONT_LARGE_THRESHOLD: large_font.append(font) else: xlarge_font.append(font) return { "small_list": small_font, "medium_list": medium_font, "large_list": large_font, "xlarge_list": xlarge_font}
Split fonts to 4 category (small,medium,large,xlarge) by maximum length of letter in each font. :param font_map: input fontmap :type font_map : dict :return: splitted fonts as dict
2,048
def is_promisc(ip, fake_bcast="ff:ff:00:00:00:00", **kargs): responses = srp1(Ether(dst=fake_bcast) / ARP(op="who-has", pdst=ip), type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0, **kargs) return responses is not None
Try to guess if target is in Promisc mode. The target is provided by its ip.
2,049
def json_decode(data_type, serialized_obj, caller_permissions=None, alias_validators=None, strict=True, old_style=False): try: deserialized_obj = json.loads(serialized_obj) except ValueError: raise bv.ValidationError() else: return json_compat_obj_decode( data_type, deserialized_obj, caller_permissions=caller_permissions, alias_validators=alias_validators, strict=strict, old_style=old_style)
Performs the reverse operation of json_encode. Args: data_type (Validator): Validator for serialized_obj. serialized_obj (str): The JSON string to deserialize. caller_permissions (list): The list of raw-string caller permissions with which to serialize. alias_validators (Optional[Mapping[bv.Validator, Callable[[], None]]]): Custom validation functions. These must raise bv.ValidationError on failure. strict (bool): If strict, then unknown struct fields will raise an error, and unknown union variants will raise an error even if a catch all field is specified. strict should only be used by a recipient of serialized JSON if it's guaranteed that its Stone specs are at least as recent as the senders it receives messages from. Returns: The returned object depends on the input data_type. - Boolean -> bool - Bytes -> bytes - Float -> float - Integer -> long - List -> list - Map -> dict - Nullable -> None or its wrapped type. - String -> unicode (PY2) or str (PY3) - Struct -> An instance of its definition attribute. - Timestamp -> datetime.datetime - Union -> An instance of its definition attribute.
2,050
def add_highlight(self, artist, *args, **kwargs): hl = _pick_info.make_highlight( artist, *args, **ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs)) if hl: artist.axes.add_artist(hl) return hl
Create, add, and return a highlighting artist. This method is should be called with an "unpacked" `Selection`, possibly with some fields set to None. It is up to the caller to register the artist with the proper `Selection` (by calling ``sel.extras.append`` on the result of this method) in order to ensure cleanup upon deselection.
2,051
def dump(self): print("pagesize=%08x, reccount=%08x, pagecount=%08x" % (self.pagesize, self.reccount, self.pagecount)) self.dumpfree() self.dumptree(self.firstindex)
raw dump of all records in the b-tree
2,052
def replace_volume_attachment(self, name, body, **kwargs): kwargs[] = True if kwargs.get(): return self.replace_volume_attachment_with_http_info(name, body, **kwargs) else: (data) = self.replace_volume_attachment_with_http_info(name, body, **kwargs) return data
replace the specified VolumeAttachment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_volume_attachment(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the VolumeAttachment (required) :param V1VolumeAttachment body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1VolumeAttachment If the method is called asynchronously, returns the request thread.
2,053
def get_subgraph(graph, seed_method: Optional[str] = None, seed_data: Optional[Any] = None, expand_nodes: Optional[List[BaseEntity]] = None, remove_nodes: Optional[List[BaseEntity]] = None, ): if seed_method == SEED_TYPE_INDUCTION: result = get_subgraph_by_induction(graph, seed_data) elif seed_method == SEED_TYPE_PATHS: result = get_subgraph_by_all_shortest_paths(graph, seed_data) elif seed_method == SEED_TYPE_NEIGHBORS: result = get_subgraph_by_neighborhood(graph, seed_data) elif seed_method == SEED_TYPE_DOUBLE_NEIGHBORS: result = get_subgraph_by_second_neighbors(graph, seed_data) elif seed_method == SEED_TYPE_UPSTREAM: result = get_multi_causal_upstream(graph, seed_data) elif seed_method == SEED_TYPE_DOWNSTREAM: result = get_multi_causal_downstream(graph, seed_data) elif seed_method == SEED_TYPE_PUBMED: result = get_subgraph_by_pubmed(graph, seed_data) elif seed_method == SEED_TYPE_AUTHOR: result = get_subgraph_by_authors(graph, seed_data) elif seed_method == SEED_TYPE_ANNOTATION: result = get_subgraph_by_annotations(graph, seed_data[], or_=seed_data.get()) elif seed_method == SEED_TYPE_SAMPLE: result = get_random_subgraph( graph, number_edges=seed_data.get(), seed=seed_data.get() ) elif not seed_method: seed_method, seed_data, result.number_of_nodes(), result.number_of_edges() ) return result
Run a pipeline query on graph with multiple sub-graph filters and expanders. Order of Operations: 1. Seeding by given function name and data 2. Add nodes 3. Remove nodes :param pybel.BELGraph graph: A BEL graph :param seed_method: The name of the get_subgraph_by_* function to use :param seed_data: The argument to pass to the get_subgraph function :param expand_nodes: Add the neighborhoods around all of these nodes :param remove_nodes: Remove these nodes and all of their in/out edges :rtype: Optional[pybel.BELGraph]
2,054
def _edge_list_to_dataframe(ls, src_column_name, dst_column_name): assert HAS_PANDAS, cols = reduce(set.union, (set(e.attr.keys()) for e in ls)) df = pd.DataFrame({ src_column_name: [e.src_vid for e in ls], dst_column_name: [e.dst_vid for e in ls]}) for c in cols: df[c] = [e.attr.get(c) for e in ls] return df
Convert a list of edges into dataframe.
2,055
def get_storage_hash(storage): if isinstance(storage, LazyObject): if storage._wrapped is None: storage._setup() storage = storage._wrapped if not isinstance(storage, six.string_types): storage_cls = storage.__class__ storage = % (storage_cls.__module__, storage_cls.__name__) return hashlib.md5(storage.encode()).hexdigest()
Return a hex string hash for a storage object (or string containing 'full.path.ClassName' referring to a storage object).
2,056
def list(self, id, seq): schema = CaptureSchema(exclude=(, )) resp = self.service.list(self._base(id, seq)) return self.service.decode(schema, resp, many=True)
Get a list of captures. :param id: Result ID as an int. :param seq: TestResult sequence ID as an int. :return: :class:`captures.Capture <captures.Capture>` list
2,057
def getColorHSV(name): try: x = getColorInfoList()[getColorList().index(name.upper())] except: return (-1, -1, -1) r = x[1] / 255. g = x[2] / 255. b = x[3] / 255. cmax = max(r, g, b) V = round(cmax * 100, 1) cmin = min(r, g, b) delta = cmax - cmin if delta == 0: hue = 0 elif cmax == r: hue = 60. * (((g - b)/delta) % 6) elif cmax == g: hue = 60. * (((b - r)/delta) + 2) else: hue = 60. * (((r - g)/delta) + 4) H = int(round(hue)) if cmax == 0: sat = 0 else: sat = delta / cmax S = int(round(sat * 100)) return (H, S, V)
Retrieve the hue, saturation, value triple of a color name. Returns: a triple (degree, percent, percent). If not found (-1, -1, -1) is returned.
2,058
def package_locations(self, package_keyname): mask = "mask[description, keyname, locations]" package = self.get_package_by_key(package_keyname, mask=) regions = self.package_svc.getRegions(id=package[], mask=mask) return regions
List datacenter locations for a package keyname :param str package_keyname: The package for which to get the items. :returns: List of locations a package is orderable in
2,059
def get_permission_requests(parser, token): return PermissionsForObjectNode.handle_token(parser, token, approved=False, name=)
Retrieves all permissions requests associated with the given obj and user and assigns the result to a context variable. Syntax:: {% get_permission_requests obj %} {% for perm in permissions %} {{ perm }} {% endfor %} {% get_permission_requests obj as "my_permissions" %} {% get_permission_requests obj for request.user as "my_permissions" %}
2,060
def error(self, error): if self.direction not in [, , ] and error is not None: raise ValueError("error only accepted for x, y, z dimensions") if isinstance(error, u.Quantity): error = error.to(self.unit).value self._error = error
set the error
2,061
def _get_stats_columns(cls, table, relation_type): column_names = cls._get_stats_column_names() clustering_value = None if table.clustering_fields is not None: clustering_value = .join(table.clustering_fields) column_values = ( , str(table.num_bytes), , relation_type == , , str(table.num_rows), , relation_type == , , table.location, , True, , table.partitioning_type, , relation_type == , , clustering_value, , relation_type == , ) return zip(column_names, column_values)
Given a table, return an iterator of key/value pairs for stats column names/values.
2,062
def _match_type(self, i): self.col_match = self.RE_TYPE.match(self._source[i]) if self.col_match is not None: self.section = "types" self.el_type = CustomType self.el_name = self.col_match.group("name") return True else: return False
Looks at line 'i' to see if the line matches a module user type def.
2,063
def distinct_words(string_matrix: List[List[str]]) -> Set[str]: return set([word for sentence in string_matrix for word in sentence])
Diagnostic function :param string_matrix: :return: >>> dl = distinct_words([['the', 'quick', 'brown'], ['here', 'lies', 'the', 'fox']]) >>> sorted(dl) ['brown', 'fox', 'here', 'lies', 'quick', 'the']
2,064
def get_slopes(data, s_freq, level=, smooth=0.05): data = negative(data) nan_array = empty((5,)) nan_array[:] = nan idx_trough = data.argmin() idx_peak = data.argmax() if idx_trough >= idx_peak: return nan_array, nan_array zero_crossings_0 = where(diff(sign(data[:idx_trough])))[0] zero_crossings_1 = where(diff(sign(data[idx_trough:idx_peak])))[0] zero_crossings_2 = where(diff(sign(data[idx_peak:])))[0] if zero_crossings_1.any(): idx_zero_1 = idx_trough + zero_crossings_1[0] else: return nan_array, nan_array if zero_crossings_0.any(): idx_zero_0 = zero_crossings_0[-1] else: idx_zero_0 = 0 if zero_crossings_2.any(): idx_zero_2 = idx_peak + zero_crossings_2[0] else: idx_zero_2 = len(data) - 1 avgsl = nan_array if level in [, ]: q1 = data[idx_trough] / ((idx_trough - idx_zero_0) / s_freq) q2 = data[idx_trough] / ((idx_zero_1 - idx_trough) / s_freq) q3 = data[idx_peak] / ((idx_peak - idx_zero_1) / s_freq) q4 = data[idx_peak] / ((idx_zero_2 - idx_peak) / s_freq) q23 = (data[idx_peak] - data[idx_trough]) \ / ((idx_peak - idx_trough) / s_freq) avgsl = asarray([q1, q2, q3, q4, q23]) avgsl[isinf(avgsl)] = nan maxsl = nan_array if level in [, ]: if smooth is not None: win = int(smooth * s_freq) flat = ones(win) data = fftconvolve(data, flat / sum(flat), mode=) if idx_trough - idx_zero_0 >= win: maxsl[0] = min(diff(data[idx_zero_0:idx_trough])) if idx_zero_1 - idx_trough >= win: maxsl[1] = max(diff(data[idx_trough:idx_zero_1])) if idx_peak - idx_zero_1 >= win: maxsl[2] = max(diff(data[idx_zero_1:idx_peak])) if idx_zero_2 - idx_peak >= win: maxsl[3] = min(diff(data[idx_peak:idx_zero_2])) if idx_peak - idx_trough >= win: maxsl[4] = max(diff(data[idx_trough:idx_peak])) maxsl[isinf(maxsl)] = nan return avgsl, maxsl
Get the slopes (average and/or maximum) for each quadrant of a slow wave, as well as the combination of quadrants 2 and 3. Parameters ---------- data : ndarray raw data as vector s_freq : int sampling frequency level : str if 'average', returns average slopes (uV / s). if 'maximum', returns the maximum of the slope derivative (uV / s**2). if 'all', returns all. smooth : float or None if not None, signal will be smoothed by moving average, with a window of this duration Returns ------- tuple of ndarray each array is len 5, with q1, q2, q3, q4 and q23. First array is average slopes and second is maximum slopes. Notes ----- This function is made to take automatically detected start and end times AS WELL AS manually delimited ones. In the latter case, the first and last zero has to be detected within this function.
2,065
def health(self, index=None, params=None): return self.transport.perform_request(, _make_path(, , index), params=params)
Get a very simple status on the health of the cluster. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html>`_ :arg index: Limit the information returned to a specific index :arg level: Specify the level of detail for returned information, default 'cluster', valid choices are: 'cluster', 'indices', 'shards' :arg local: Return local information, do not retrieve the state from master node (default: false) :arg master_timeout: Explicit operation timeout for connection to master node :arg timeout: Explicit operation timeout :arg wait_for_active_shards: Wait until the specified number of shards is active :arg wait_for_events: Wait until all currently queued events with the given priority are processed, valid choices are: 'immediate', 'urgent', 'high', 'normal', 'low', 'languid' :arg wait_for_no_relocating_shards: Whether to wait until there are no relocating shards in the cluster :arg wait_for_nodes: Wait until the specified number of nodes is available :arg wait_for_status: Wait until cluster is in a specific state, default None, valid choices are: 'green', 'yellow', 'red'
2,066
def should_include_file_in_search(file_name, extensions, exclude_dirs): return (exclude_dirs is None or not any(file_name.startswith(d) for d in exclude_dirs)) and \ any(file_name.endswith(e) for e in extensions)
Whether or not a filename matches a search criteria according to arguments. Args: file_name (str): A file path to check. extensions (list): A list of file extensions file should match. exclude_dirs (list): A list of directories to exclude from search. Returns: A boolean of whether or not file matches search criteria.
2,067
def setdim(P, dim=None): P = P.copy() ldim = P.dim if not dim: dim = ldim+1 if dim==ldim: return P P.dim = dim if dim>ldim: key = numpy.zeros(dim, dtype=int) for lkey in P.keys: key[:ldim] = lkey P.A[tuple(key)] = P.A.pop(lkey) else: key = numpy.zeros(dim, dtype=int) for lkey in P.keys: if not sum(lkey[ldim-1:]) or not sum(lkey): P.A[lkey[:dim]] = P.A.pop(lkey) else: del P.A[lkey] P.keys = sorted(P.A.keys(), key=sort_key) return P
Adjust the dimensions of a polynomial. Output the results into Poly object Args: P (Poly) : Input polynomial dim (int) : The dimensions of the output polynomial. If omitted, increase polynomial with one dimension. If the new dim is smaller then P's dimensions, variables with cut components are all cut. Examples: >>> x,y = chaospy.variable(2) >>> P = x*x-x*y >>> print(chaospy.setdim(P, 1)) q0^2
2,068
def slice_around_gaps (values, maxgap): if not (maxgap > 0): raise ValueError ( % maxgap) values = np.asarray (values) delta = values[1:] - values[:-1] if np.any (delta < 0): raise ValueError () whgap = np.where (delta > maxgap)[0] + 1 prev_idx = None for gap_idx in whgap: yield slice (prev_idx, gap_idx) prev_idx = gap_idx yield slice (prev_idx, None)
Given an ordered array of values, generate a set of slices that traverse all of the values. Within each slice, no gap between adjacent values is larger than `maxgap`. In other words, these slices break the array into chunks separated by gaps of size larger than maxgap.
2,069
def _check_channel_state_for_update( self, channel_identifier: ChannelID, closer: Address, update_nonce: Nonce, block_identifier: BlockSpecification, ) -> Optional[str]: msg = None closer_details = self._detail_participant( channel_identifier=channel_identifier, participant=closer, partner=self.node_address, block_identifier=block_identifier, ) if closer_details.nonce == update_nonce: msg = ( ) return msg
Check the channel state on chain to see if it has been updated. Compare the nonce, we are about to update the contract with, with the updated nonce in the onchain state and, if it's the same, return a message with which the caller should raise a RaidenRecoverableError. If all is okay return None.
2,070
def perform_remote_action(i): import urllib try: import urllib.request as urllib2 except: import urllib2 try: from urllib.parse import urlencode except: from urllib import urlencode rr={:0} act=i.get(,) o=i.get(,) if o==: i[]= i[]= if act==: i[]= else: i[]= d=r[] if in d: d[]=int(d[]) if d.get(,0)>0: return d if act==: if o!= and o!=: x=d.get(,) fn=d.get(,) if fn==: fn=cfg[] r=convert_upload_string_to_file({:x, :fn}) if r[]>0: return r if in d: del(d[]) rr.update(d) i[]=o return rr
Input: { See 'perform_action' function } Output: { See 'perform_action' function }
2,071
def get_term_pillar(filter_name, term_name, pillar_key=, pillarenv=None, saltenv=None): return __salt__[](filter_name, term_name, pillar_key=pillar_key, pillarenv=pillarenv, saltenv=saltenv)
Helper that can be used inside a state SLS, in order to get the term configuration given its name, under a certain filter uniquely identified by its name. filter_name The name of the filter. term_name The name of the term. pillar_key: ``acl`` The root key of the whole policy config. Default: ``acl``. pillarenv Query the master to generate fresh pillar data on the fly, specifically from the requested pillar environment. saltenv Included only for compatibility with :conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
2,072
def check(self): status = True synced = True xbin = self.xbin.value() ybin = self.ybin.value() nwin = self.nwin.value() g = get_root(self).globals for xsw, ysw, nxw, nyw in \ zip(self.xs[:nwin], self.ys[:nwin], self.nx[:nwin], self.ny[:nwin]): xsw.config(bg=g.COL[]) ysw.config(bg=g.COL[]) nxw.config(bg=g.COL[]) nyw.config(bg=g.COL[]) status = status if xsw.ok() else False status = status if ysw.ok() else False status = status if nxw.ok() else False status = status if nyw.ok() else False xs = xsw.value() ys = ysw.value() nx = nxw.value() ny = nyw.value() if nx is None or nx % xbin != 0: nxw.config(bg=g.COL[]) status = False elif (nx // xbin) % 4 != 0: nxw.config(bg=g.COL[]) status = False if ny is None or ny % ybin != 0: nyw.config(bg=g.COL[]) status = False return status
Checks the values of the windows. If any problems are found, it flags them by changing the background colour. Only active windows are checked. Returns status, flag for whether parameters are viable.
2,073
def read_csv(filename, delimiter=",", skip=0, guess_type=True, has_header=True, use_types={}): with open(filename, ) as f: if has_header: header = f.readline().strip().split(delimiter) else: header = None for i in range(skip): f.readline() for line in csv.DictReader(f, delimiter=delimiter, fieldnames=header): if use_types: yield apply_types(use_types, guess_type, line) elif guess_type: yield dmap(determine_type, line) else: yield line
Read a CSV file Usage ----- >>> data = read_csv(filename, delimiter=delimiter, skip=skip, guess_type=guess_type, has_header=True, use_types={}) # Use specific types >>> types = {"sepal.length": int, "petal.width": float} >>> data = read_csv(filename, guess_type=guess_type, use_types=types) keywords :has_header: Determine whether the file has a header or not
2,074
def osd_page_handler(config=None, identifier=None, prefix=None, **args): template_dir = os.path.join(os.path.dirname(__file__), ) with open(os.path.join(template_dir, ), ) as f: template = f.read() d = dict(prefix=prefix, identifier=identifier, api_version=config.api_version, osd_version=, osd_uri=, osd_images_prefix=, osd_height=500, osd_width=500, info_json_uri=) return make_response(Template(template).safe_substitute(d))
Flask handler to produce HTML response for OpenSeadragon view of identifier. Arguments: config - Config object for this IIIF handler identifier - identifier of image/generator prefix - path prefix **args - other aguments ignored
2,075
def ref2names2commdct(ref2names, commdct): for comm in commdct: for cdct in comm: try: refs = cdct[][0] validobjects = ref2names[refs] cdct.update({:validobjects}) except KeyError as e: continue return commdct
embed ref2names into commdct
2,076
def create(cls, name, division, api=None): division = Transform.to_division(division) api = api if api else cls._API data = { : name, : division } extra = { : cls.__name__, : data } logger.info(, extra=extra) created_team = api.post(cls._URL[], data=data).json() return Team(api=api, **created_team)
Create team within a division :param name: Team name. :param division: Parent division. :param api: Api instance. :return: Team object.
2,077
def deref(self, ctx): if self in ctx.call_nodes: raise CyclicReferenceError(ctx, self) if self in ctx.cached_results: return ctx.cached_results[self] try: ctx.call_nodes.add(self) ctx.call_stack.append(self) result = self.evaluate(ctx) ctx.cached_results[self] = result return result except: if ctx.exception_call_stack is None: ctx.exception_call_stack = list(ctx.call_stack) raise finally: ctx.call_stack.pop() ctx.call_nodes.remove(self)
Returns the value this reference is pointing to. This method uses 'ctx' to resolve the reference and return the value this reference references. If the call was already made, it returns a cached result. It also makes sure there's no cyclic reference, and if so raises CyclicReferenceError.
2,078
def visit_ellipsis(self, node, parent): return nodes.Ellipsis( getattr(node, "lineno", None), getattr(node, "col_offset", None), parent )
visit an Ellipsis node by returning a fresh instance of it
2,079
def program_files(self, executable): if self._get_version() == 6: paths = self.REQUIRED_PATHS_6 elif self._get_version() > 6: paths = self.REQUIRED_PATHS_7_1 return paths
Determine the file paths to be adopted
2,080
def _match_processes(self, pid, name, cur_process): cur_pid, cur_name = self._get_tuple(cur_process.split()) pid_match = False if not pid: pid_match = True elif pid == cur_pid: pid_match = True name_match = False if not name: name_match = True elif name == cur_name: name_match = True return pid_match and name_match
Determine whether user-specified "pid/processes" contain this process :param pid: The user input of pid :param name: The user input of process name :param process: current process info :return: True or Not; (if both pid/process are given, then both of them need to match)
2,081
def get(self, name, param=None): if name not in self.attribs: raise exceptions.SoftLayerError() call_details = self.attribs[name] if call_details.get(): if not param: raise exceptions.SoftLayerError( ) params = tuple() if param is not None: params = (param,) try: return self.client.call(, self.attribs[name][], *params) except exceptions.SoftLayerAPIError as ex: if ex.faultCode == 404: return None raise ex
Retreive a metadata attribute. :param string name: name of the attribute to retrieve. See `attribs` :param param: Required parameter for some attributes
2,082
def getlocals(back=2): import inspect fr = inspect.currentframe() try: while fr and back != 0: fr1 = fr fr = fr.f_back back -= 1 except: pass return fr1.f_locals
Get the local variables some levels back (-1 is top).
2,083
def network(n): tpm(n.tpm) connectivity_matrix(n.cm) if n.cm.shape[0] != n.size: raise ValueError("Connectivity matrix must be NxN, where N is the " "number of nodes in the network.") return True
Validate a |Network|. Checks the TPM and connectivity matrix.
2,084
def validate(self): if not isinstance(self.location, Location): raise TypeError(u.format( type(self.location).__name__, self.location)) if not self.location.field: raise ValueError(u u.format(self.location)) if not is_graphql_type(self.field_type): raise ValueError(u.format(self.field_type)) stripped_field_type = strip_non_null_from_type(self.field_type) if isinstance(stripped_field_type, GraphQLList): inner_type = strip_non_null_from_type(stripped_field_type.of_type) if GraphQLDate.is_same_type(inner_type) or GraphQLDateTime.is_same_type(inner_type):
Validate that the OutputContextField is correctly representable.
2,085
def sign_execute_deposit(deposit_params, key_pair): signature = sign_transaction(transaction=deposit_params[], private_key_hex=private_key_to_hex(key_pair=key_pair)) return {: signature}
Function to execute the deposit request by signing the transaction generated by the create deposit function. Execution of this function is as follows:: sign_execute_deposit(deposit_details=create_deposit, key_pair=key_pair) The expected return result for this function is as follows:: { 'signature': '3cc4a5cb7b7d50383e799add2ba35382b6f2f1b2e3b97802....' } :param deposit_params: The parameters generated by the create deposit function that now requires signature. :type deposit_params: dict :param key_pair: The KeyPair for the wallet being used to sign deposit message. :type key_pair: KeyPair :return: Dictionary with the result status of the deposit attempt.
2,086
def compile_file_into_spirv(filepath, stage, optimization=, warnings_as_errors=False): with open(filepath, ) as f: content = f.read() return compile_into_spirv(content, stage, filepath, optimization=optimization, warnings_as_errors=warnings_as_errors)
Compile shader file into Spir-V binary. This function uses shaderc to compile your glsl file code into Spir-V code. Args: filepath (strs): Absolute path to your shader file stage (str): Pipeline stage in ['vert', 'tesc', 'tese', 'geom', 'frag', 'comp'] optimization (str): 'zero' (no optimization) or 'size' (reduce size) warnings_as_errors (bool): Turn warnings into errors Returns: bytes: Compiled Spir-V binary. Raises: CompilationError: If compilation fails.
2,087
def _dstr(degrees, places=1, signed=False): r if isnan(degrees): return sgn, d, m, s, etc = _sexagesimalize_to_int(degrees, places) sign = if sgn < 0.0 else if signed else return %02d.%0*d"' % (sign, d, m, s, places, etc)
r"""Convert floating point `degrees` into a sexagesimal string. >>> _dstr(181.875) '181deg 52\' 30.0"' >>> _dstr(181.875, places=3) '181deg 52\' 30.000"' >>> _dstr(181.875, signed=True) '+181deg 52\' 30.0"' >>> _dstr(float('nan')) 'nan'
2,088
def isSet(self, param): param = self._resolveParam(param) return param in self._paramMap
Checks whether a param is explicitly set by user.
2,089
def send_event_to_salt(self, result): s a dictionary which has the final data and topic. senddatatopic__rolemastersock_direvent.fire_master'](data=data, tag=topic)
This function identifies whether the engine is running on the master or the minion and sends the data to the master event bus accordingly. :param result: It's a dictionary which has the final data and topic.
2,090
def unhex(s): bits = 0 for c in s: if <= c <= : i = ord() elif <= c <= : i = ord()-10 elif <= c <= : i = ord()-10 else: break bits = bits*16 + (ord(c) - i) return bits
Get the integer value of a hexadecimal number.
2,091
def save_config(self): if not self.opts[][1]: if logger.isEnabledFor(logging.INFO): logger.info() return 1 txt =utf-8dark window copyfile(self.config_file, self.config_file + ) if self.opts[][1] is None: self.opts[][1] = try: with open(self.config_file, ) as cfgfile: cfgfile.write(txt.format(self.opts[][1], self.opts[][1], self.opts[][1], self.opts[][1], self.opts[][1], self.opts[][1], self.opts[][1], self.opts[][1], self.opts[][1], self.opts[][1])) except: if logger.isEnabledFor(logging.ERROR): logger.error() return -1 try: remove(self.config_file + ) except: pass if logger.isEnabledFor(logging.INFO): logger.info() self.opts[][1] = False return 0
Save config file Creates config.restore (back up file) Returns: -1: Error saving config 0: Config saved successfully 1: Config not saved (not modified
2,092
def check_cgroup_availability_in_thread(options): thread = _CheckCgroupsThread(options) thread.start() thread.join() if thread.error: raise thread.error
Run check_cgroup_availability() in a separate thread to detect the following problem: If "cgexec --sticky" is used to tell cgrulesengd to not interfere with our child processes, the sticky flag unfortunately works only for processes spawned by the main thread, not those spawned by other threads (and this will happen if "benchexec -N" is used).
2,093
def run(): global WORKBENCH args = client_helper.grab_server_args() WORKBENCH = zerorpc.Client(timeout=300, heartbeat=60) WORKBENCH.connect(+args[]++args[]) data_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), ) file_list = [os.path.join(data_path, child) for child in \ os.listdir(data_path)] results = [] for filename in file_list: if in filename: continue with open(filename,) as f: md5 = WORKBENCH.store_sample(f.read(), filename, ) result = WORKBENCH.work_request(, md5) result.update(WORKBENCH.work_request(, result[][])) result[] = result[][].split()[-1] results.append(result) return results
This client pulls PCAP files for building report. Returns: A list with `view_pcap` , `meta` and `filename` objects.
2,094
def __ensure_provisioning_writes( table_name, table_key, gsi_name, gsi_key, num_consec_write_checks): if not get_gsi_option(table_key, gsi_key, ): logger.info( .format( table_name, gsi_name)) return False, dynamodb.get_provisioned_gsi_write_units( table_name, gsi_name), 0 update_needed = False try: lookback_window_start = get_gsi_option( table_key, gsi_key, ) lookback_period = get_gsi_option( table_key, gsi_key, ) current_write_units = dynamodb.get_provisioned_gsi_write_units( table_name, gsi_name) consumed_write_units_percent = \ gsi_stats.get_consumed_write_units_percent( table_name, gsi_name, lookback_window_start, lookback_period) throttled_write_count = \ gsi_stats.get_throttled_write_event_count( table_name, gsi_name, lookback_window_start, lookback_period) throttled_by_provisioned_write_percent = \ gsi_stats.get_throttled_by_provisioned_write_event_percent( table_name, gsi_name, lookback_window_start, lookback_period) throttled_by_consumed_write_percent = \ gsi_stats.get_throttled_by_consumed_write_percent( table_name, gsi_name, lookback_window_start, lookback_period) writes_upper_threshold = \ get_gsi_option(table_key, gsi_key, ) writes_lower_threshold = \ get_gsi_option(table_key, gsi_key, ) throttled_writes_upper_threshold = \ get_gsi_option( table_key, gsi_key, ) increase_writes_unit = \ get_gsi_option(table_key, gsi_key, ) increase_writes_with = \ get_gsi_option(table_key, gsi_key, ) decrease_writes_unit = \ get_gsi_option(table_key, gsi_key, ) decrease_writes_with = \ get_gsi_option(table_key, gsi_key, ) min_provisioned_writes = \ get_gsi_option(table_key, gsi_key, ) max_provisioned_writes = \ get_gsi_option(table_key, gsi_key, ) num_write_checks_before_scale_down = \ get_gsi_option( table_key, gsi_key, ) num_write_checks_reset_percent = \ get_gsi_option( table_key, gsi_key, ) increase_throttled_by_provisioned_writes_unit = \ get_gsi_option( table_key, gsi_key, ) increase_throttled_by_provisioned_writes_scale = \ get_gsi_option( table_key, gsi_key, ) increase_throttled_by_consumed_writes_unit = \ get_gsi_option( table_key, gsi_key, ) increase_throttled_by_consumed_writes_scale = \ get_gsi_option( table_key, gsi_key, ) increase_consumed_writes_unit = \ get_gsi_option(table_key, gsi_key, ) increase_consumed_writes_with = \ get_gsi_option(table_key, gsi_key, ) increase_consumed_writes_scale = \ get_gsi_option( table_key, gsi_key, ) decrease_consumed_writes_unit = \ get_gsi_option(table_key, gsi_key, ) decrease_consumed_writes_with = \ get_gsi_option(table_key, gsi_key, ) decrease_consumed_writes_scale = \ get_gsi_option( table_key, gsi_key, ) except JSONResponseError: raise except BotoServerError: raise updated_write_units = current_write_units if num_write_checks_reset_percent: if consumed_write_units_percent >= num_write_checks_reset_percent: logger.info( .format( table_name, gsi_name, consumed_write_units_percent, num_write_checks_reset_percent)) num_consec_write_checks = 0 if not get_gsi_option(table_key, gsi_key, ): logger.debug( .format( table_name, gsi_name)) else: increase_consumed_writes_unit = \ increase_consumed_writes_unit or increase_writes_unit increase_throttled_by_provisioned_writes_unit = ( increase_throttled_by_provisioned_writes_unit or increase_writes_unit) increase_throttled_by_consumed_writes_unit = \ increase_throttled_by_consumed_writes_unit or increase_writes_unit increase_consumed_writes_with = \ increase_consumed_writes_with or increase_writes_with throttled_by_provisioned_calculated_provisioning = scale_reader( increase_throttled_by_provisioned_writes_scale, throttled_by_provisioned_write_percent) throttled_by_consumed_calculated_provisioning = scale_reader( increase_throttled_by_consumed_writes_scale, throttled_by_consumed_write_percent) consumed_calculated_provisioning = scale_reader( increase_consumed_writes_scale, consumed_write_units_percent) throttled_count_calculated_provisioning = 0 calculated_provisioning = 0 if throttled_by_provisioned_calculated_provisioning: if increase_throttled_by_provisioned_writes_unit == : throttled_by_provisioned_calculated_provisioning = \ calculators.increase_writes_in_percent( current_write_units, throttled_by_provisioned_calculated_provisioning, get_gsi_option( table_key, gsi_key, ), consumed_write_units_percent, .format(table_name, gsi_name)) else: throttled_by_provisioned_calculated_provisioning = \ calculators.increase_writes_in_units( current_write_units, throttled_by_provisioned_calculated_provisioning, get_gsi_option( table_key, gsi_key, ), consumed_write_units_percent, .format(table_name, gsi_name)) if throttled_by_consumed_calculated_provisioning: if increase_throttled_by_consumed_writes_unit == : throttled_by_consumed_calculated_provisioning = \ calculators.increase_writes_in_percent( current_write_units, throttled_by_consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, ), consumed_write_units_percent, .format(table_name, gsi_name)) else: throttled_by_consumed_calculated_provisioning = \ calculators.increase_writes_in_units( current_write_units, throttled_by_consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, ), consumed_write_units_percent, .format(table_name, gsi_name)) if consumed_calculated_provisioning: if increase_consumed_writes_unit == : consumed_calculated_provisioning = \ calculators.increase_writes_in_percent( current_write_units, consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, ), consumed_write_units_percent, .format(table_name, gsi_name)) else: consumed_calculated_provisioning = \ calculators.increase_writes_in_units( current_write_units, consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, ), consumed_write_units_percent, .format(table_name, gsi_name)) elif (writes_upper_threshold and consumed_write_units_percent > writes_upper_threshold and not increase_consumed_writes_scale): if increase_consumed_writes_unit == : consumed_calculated_provisioning = \ calculators.increase_writes_in_percent( current_write_units, increase_consumed_writes_with, get_gsi_option( table_key, gsi_key, ), consumed_write_units_percent, .format(table_name, gsi_name)) else: consumed_calculated_provisioning = \ calculators.increase_writes_in_units( current_write_units, increase_consumed_writes_with, get_gsi_option( table_key, gsi_key, ), consumed_write_units_percent, .format(table_name, gsi_name)) if (throttled_writes_upper_threshold and throttled_write_count > throttled_writes_upper_threshold): if increase_writes_unit == : throttled_count_calculated_provisioning = \ calculators.increase_writes_in_percent( updated_write_units, increase_writes_with, get_gsi_option( table_key, gsi_key, ), consumed_write_units_percent, .format(table_name, gsi_name)) else: throttled_count_calculated_provisioning = \ calculators.increase_writes_in_units( updated_write_units, increase_writes_with, get_gsi_option( table_key, gsi_key, ), consumed_write_units_percent, .format(table_name, gsi_name)) if (throttled_by_provisioned_calculated_provisioning > calculated_provisioning): calculated_provisioning = \ throttled_by_provisioned_calculated_provisioning scale_reason = ( "due to throttled events by provisioned " "units threshold being exceeded") if (throttled_by_consumed_calculated_provisioning > calculated_provisioning): calculated_provisioning = \ throttled_by_consumed_calculated_provisioning scale_reason = ( "due to throttled events by consumed " "units threshold being exceeded") if consumed_calculated_provisioning > calculated_provisioning: calculated_provisioning = consumed_calculated_provisioning scale_reason = "due to consumed threshold being exceeded" if throttled_count_calculated_provisioning > calculated_provisioning: calculated_provisioning = throttled_count_calculated_provisioning scale_reason = "due to throttled events threshold being exceeded" if calculated_provisioning > current_write_units: logger.info( .format( table_name, gsi_name, scale_reason)) num_consec_write_checks = 0 update_needed = True updated_write_units = calculated_provisioning if not update_needed: decrease_consumed_writes_unit = \ decrease_consumed_writes_unit or decrease_writes_unit decrease_consumed_writes_with = \ decrease_consumed_writes_with or decrease_writes_with consumed_calculated_provisioning = scale_reader_decrease( decrease_consumed_writes_scale, consumed_write_units_percent) calculated_provisioning = None if not get_gsi_option( table_key, gsi_key, ): logger.debug( .format( table_name, gsi_name)) elif (consumed_write_units_percent == 0 and not get_gsi_option( table_key, gsi_key, )): logger.info( .format(table_name, gsi_name)) else: if consumed_calculated_provisioning: if decrease_consumed_writes_unit == : calculated_provisioning = \ calculators.decrease_writes_in_percent( updated_write_units, consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, ), .format(table_name, gsi_name)) else: calculated_provisioning = \ calculators.decrease_writes_in_units( updated_write_units, consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, ), .format(table_name, gsi_name)) elif (writes_lower_threshold and consumed_write_units_percent < writes_lower_threshold and not decrease_consumed_writes_scale): if decrease_consumed_writes_unit == : calculated_provisioning = \ calculators.decrease_writes_in_percent( updated_write_units, decrease_consumed_writes_with, get_gsi_option( table_key, gsi_key, ), .format(table_name, gsi_name)) else: calculated_provisioning = \ calculators.decrease_writes_in_units( updated_write_units, decrease_consumed_writes_with, get_gsi_option( table_key, gsi_key, ), .format(table_name, gsi_name)) if (calculated_provisioning and current_write_units != calculated_provisioning): num_consec_write_checks += 1 if num_consec_write_checks >= \ num_write_checks_before_scale_down: update_needed = True updated_write_units = calculated_provisioning if max_provisioned_writes: if int(updated_write_units) > int(max_provisioned_writes): update_needed = True updated_write_units = int(max_provisioned_writes) logger.info( .format( table_name, gsi_name, updated_write_units)) if min_provisioned_writes: if int(min_provisioned_writes) > int(updated_write_units): update_needed = True updated_write_units = int(min_provisioned_writes) logger.info( .format( table_name, gsi_name, updated_write_units)) if calculators.is_consumed_over_proposed( current_write_units, updated_write_units, consumed_write_units_percent): update_needed = False updated_write_units = current_write_units logger.info( .format(table_name, gsi_name)) logger.info(.format( table_name, gsi_name, num_consec_write_checks, num_write_checks_before_scale_down)) return update_needed, updated_write_units, num_consec_write_checks
Ensure that provisioning of writes is correct :type table_name: str :param table_name: Name of the DynamoDB table :type table_key: str :param table_key: Table configuration option key name :type gsi_name: str :param gsi_name: Name of the GSI :type gsi_key: str :param gsi_key: Configuration option key name :type num_consec_write_checks: int :param num_consec_write_checks: How many consecutive checks have we had :returns: (bool, int, int) update_needed, updated_write_units, num_consec_write_checks
2,095
def im2mat(I): return I.reshape((I.shape[0] * I.shape[1], I.shape[2]))
Converts and image to matrix (one pixel per line)
2,096
def _rds_cluster_tags(model, dbs, session_factory, generator, retry): client = local_session(session_factory).client() def process_tags(db): try: db[] = retry( client.list_tags_for_resource, ResourceName=generator(db[model.id]))[] return db except client.exceptions.DBClusterNotFoundFault: return None return list(filter(None, map(process_tags, dbs)))
Augment rds clusters with their respective tags.
2,097
def revoke(self, auth, codetype, code, defer=False): return self._call(, auth, [codetype, code], defer)
Given an activation code, the associated entity is revoked after which the activation code can no longer be used. Args: auth: Takes the owner's cik codetype: The type of code to revoke (client | share) code: Code specified by <codetype> (cik | share-activation-code)
2,098
def _get_model_parameters_estimations(self, error_model): if error_model.dependance == NIDM_INDEPEDENT_ERROR: if error_model.variance_homo: estimation_method = STATO_OLS else: estimation_method = STATO_WLS else: estimation_method = STATO_GLS mpe = ModelParametersEstimation(estimation_method, self.software.id) return mpe
Infer model estimation method from the 'error_model'. Return an object of type ModelParametersEstimation.
2,099
def order_assets(self, asset_ids, composition_id): if (not isinstance(composition_id, ABCId) and composition_id.get_identifier_namespace() != ): raise errors.InvalidArgument() composition_map, collection = self._get_composition_collection(composition_id) composition_map[] = order_ids(asset_ids, composition_map[]) collection.save(composition_map)
Reorders a set of assets in a composition. arg: asset_ids (osid.id.Id[]): ``Ids`` for a set of ``Assets`` arg: composition_id (osid.id.Id): ``Id`` of the ``Composition`` raise: NotFound - ``composition_id`` not found or, an ``asset_id`` not related to ``composition_id`` raise: NullArgument - ``instruction_ids`` or ``agenda_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*