Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
382,100
def execute(self, transition): self._transitions.append(transition) if self._thread is None or not self._thread.isAlive(): self._thread = threading.Thread(target=self._transition_loop) self._thread.setDaemon(True) self._thread.start()
Queue a transition for execution. :param transition: The transition
382,101
def _isinstance(self, model, raise_error=True): rv = isinstance(model, self.__model__) if not rv and raise_error: raise ValueError( % (model, self.__model__)) return rv
Checks if the specified model instance matches the class model. By default this method will raise a `ValueError` if the model is not of expected type. Args: model (Model) : The instance to be type checked raise_error (bool) : Flag to specify whether to raise error on type check failure Raises: ValueError: If `model` is not an instance of the respective Model class
382,102
def Search(self,key): results = [] for alert in self.alerts: if alert.id.lower().find(key.lower()) != -1: results.append(alert) elif alert.name.lower().find(key.lower()) != -1: results.append(alert) return(results)
Search alert list by providing partial name, ID, or other key.
382,103
def argval(self): if self.arg is None or any(x is None for x in self.arg): return None for x in self.arg: if not isinstance(x, int): raise InvalidArgError(self.arg) return self.arg
Returns the value of the arg (if any) or None. If the arg. is not an integer, an error be triggered.
382,104
def top_n_list(lang, n, wordlist=, ascii_only=False): results = [] for word in iter_wordlist(lang, wordlist): if (not ascii_only) or max(word) <= : results.append(word) if len(results) >= n: break return results
Return a frequency list of length `n` in descending order of frequency. This list contains words from `wordlist`, of the given language. If `ascii_only`, then only ascii words are considered.
382,105
def upsert_entities(self, entities, sync=False): if entities: select_for_update_query = ( ).format( table_name=Entity._meta.db_table ) select_for_update_query_params = [] if not sync: select_for_update_query = ( ).format( table_name=Entity._meta.db_table ) select_for_update_query_params = [tuple( (entity.entity_type_id, entity.entity_id) for entity in entities )] with connection.cursor() as cursor: cursor.execute(select_for_update_query, select_for_update_query_params) if sync: upserted_entities = manager_utils.sync( queryset=Entity.all_objects.all(), model_objs=entities, unique_fields=[, ], update_fields=[, , , ], return_upserts=True ) else: upserted_entities = manager_utils.bulk_upsert( queryset=Entity.all_objects.extra( where=[], params=[tuple( (entity.entity_type_id, entity.entity_id) for entity in entities )] ), model_objs=entities, unique_fields=[, ], update_fields=[, , , ], return_upserts=True ) return upserted_entities
Upsert a list of entities to the database :param entities: The entities to sync :param sync: Do a sync instead of an upsert
382,106
def combine(items, k=None): length_items = len(items) lengths = [len(i) for i in items] length = reduce(lambda x, y: x * y, lengths) repeats = [reduce(lambda x, y: x * y, lengths[i:]) for i in range(1, length_items)] + [1] if k is not None: k = k % length indices = [old_div((k % (lengths[i] * repeats[i])), repeats[i]) for i in range(length_items)] return [items[i][indices[i]] for i in range(length_items)] else: matrix = [] for i, item in enumerate(items): row = [] for subset in item: row.extend([subset] * repeats[i]) times = old_div(length, len(row)) matrix.append(row * times) return list(zip(*matrix))
Create a matrix in wich each row is a tuple containing one of solutions or solution k-esima.
382,107
def _validate(data_type, parent_path): if isinstance(data_type, _CLASS_TYPES): raise TypeError( "The data type is expected to be an instance object, but got the " "type instead." % (_format_type(data_type),)) base = _find_base_type(data_type) if not base: raise TypeError( "Objects of type arenname%s.%s%s.%sNone%s.%sNone%s.%s%s%s.fields%s%s.fields%s%s.fieldsname%s%s.fieldstype%s%s.fieldsread_onlybool%s%s.%s%s' was provided multiple times." % (duplicates[0]),) return True
Implementation for the `validate` function.
382,108
def _output_from_file(self, entry=): try: vfile = os.path.join(os.path.dirname(self.fpath), ) with open(vfile, ) as f: return json.loads(f.read()).get(entry, None) except: return None
Read the version from a .version file that may exist alongside __init__.py. This file can be generated by piping the following output to file: git describe --long --match v*.*
382,109
def pearson_correlation_coefficient(predictions, labels, weights_fn=None): del weights_fn _, pearson = tf.contrib.metrics.streaming_pearson_correlation(predictions, labels) return pearson, tf.constant(1.0)
Calculate pearson correlation coefficient. Args: predictions: The raw predictions. labels: The actual labels. weights_fn: Weighting function. Returns: The pearson correlation coefficient.
382,110
def copydb(self, sourcedb, destslab, destdbname=None, progresscb=None): destdb = destslab.initdb(destdbname, sourcedb.dupsort) statdict = destslab.stat(db=destdb) if statdict[] > 0: raise s_exc.DataAlreadyExists() rowcount = 0 for chunk in s_common.chunks(self.scanByFull(db=sourcedb), COPY_CHUNKSIZE): ccount, acount = destslab.putmulti(chunk, dupdata=True, append=True, db=destdb) if ccount != len(chunk) or acount != len(chunk): raise s_exc.BadCoreStore(mesg=) rowcount += len(chunk) if progresscb is not None and 0 == (rowcount % PROGRESS_PERIOD): progresscb(rowcount) return rowcount
Copy an entire database in this slab to a new database in potentially another slab. Args: sourcedb (LmdbDatabase): which database in this slab to copy rows from destslab (LmdbSlab): which slab to copy rows to destdbname (str): the name of the database to copy rows to in destslab progresscb (Callable[int]): if not None, this function will be periodically called with the number of rows completed Returns: (int): the number of rows copied Note: If any rows already exist in the target database, this method returns an error. This means that one cannot use destdbname=None unless there are no explicit databases in the destination slab.
382,111
def get_translated_items(fapi, file_uri, use_cache, cache_dir=None): items = None cache_file = os.path.join(cache_dir, sha1(file_uri)) if use_cache else None if use_cache and os.path.exists(cache_file): print("Using cache file %s for translated items for: %s" % (cache_file, file_uri)) items = json.loads(read_from_file(cache_file)) if not items: print("Downloading %s from smartling" % file_uri) (response, code) = fapi.last_modified(file_uri) items = response.data.items if cache_file: print("Caching %s to %s" % (file_uri, cache_file)) write_to_file(cache_file, json.dumps(items)) return items
Returns the last modified from smarterling
382,112
def check(cls, dap, network=False, yamls=True, raises=False, logger=logger): dap._check_raises = raises dap._problematic = False dap._logger = logger problems = list() problems += cls.check_meta(dap) problems += cls.check_no_self_dependency(dap) problems += cls.check_topdir(dap) problems += cls.check_files(dap) if yamls: problems += cls.check_yamls(dap) if network: problems += cls.check_name_not_on_dapi(dap) for problem in problems: dap._report_problem(problem.message, problem.level) del dap._check_raises return not dap._problematic
Checks if the dap is valid, reports problems Parameters: network -- whether to run checks that requires network connection output -- where to write() problems, might be None raises -- whether to raise an exception immediately after problem is detected
382,113
def key_pair(i, region): if i == 0: return ("{}_{}".format(RAY, region), os.path.expanduser("~/.ssh/{}_{}.pem".format(RAY, region))) return ("{}_{}_{}".format(RAY, i, region), os.path.expanduser("~/.ssh/{}_{}_{}.pem".format(RAY, i, region)))
Returns the ith default (aws_key_pair_name, key_pair_path).
382,114
def generate_single_return_period(args): qout_file, return_period_file, rivid_index_list, step, num_years, \ method, mp_lock = args skewvals = [-3.0, -2.8, -2.6, -2.4, -2.2, -2.0, -1.8, -1.6, -1.4, -1.2, -1.0, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0] kfac2 = [0.396, 0.384, 0.368, 0.351, 0.33, 0.307, 0.282, 0.254, 0.225, 0.195, 0.164, 0.132, 0.099, 0.066, 0.033, 0, -0.033, -0.066, -0.099, -0.132, -0.164, -0.195, -0.225, -0.254, -0.282, -0.307, -0.33, -0.351, -0.368, -0.384, -0.396] kfac10 = [0.66, 0.702, 0.747, 0.795, 0.844, 0.895, 0.945, 0.994, 1.041, 1.086, 1.128, 1.166, 1.2, 1.231, 1.258, 1.282, 1.301, 1.317, 1.328, 1.336, 1.34, 1.34, 1.337, 1.329, 1.318, 1.302, 1.284, 1.262, 1.238, 1.21, 1.18] kfac25 = [.666, .712, .764, .823, .888, .959, 1.035, 1.116, 1.198, 1.282, 1.366, 1.448, 1.528, 1.606, 1.680, 1.751, 1.818, 1.880, 1.939, 1.993, 2.043, 2.087, 2.128, 2.163, 2.193, 2.219, 2.240, 2.256, 2.267, 2.275, 2.278] kfac50 = [0.666, 0.714, 0.768, 0.83, 0.9, 0.98, 1.069, 1.166, 1.27, 1.379, 1.492, 1.606, 1.72, 1.834, 1.945, 2.054, 2.159, 2.261, 2.359, 2.453, 2.542, 2.626, 2.706, 2.78, 2.848, 2.912, 2.97, 3.023, 3.071, 3.114, 3.152] kfac100 = [0.667, 0.714, 0.769, 0.832, 0.905, 0.99, 1.087, 1.197, 1.318, 1.499, 1.588, 1.733, 1.88, 2.029, 2.178, 2.326, 2.472, 2.615, 2.755, 2.891, 3.022, 3.149, 3.271, 3.388, 3.499, 3.605, 3.705, 3.8, 3.889, 3.973, 4.051] with RAPIDDataset(qout_file) as qout_nc_file: if method == : rp_index_20 = int((num_years + 1)/20.0) rp_index_10 = int((num_years + 1)/10.0) rp_index_2 = int((num_years + 1)/2.0) if method == : return_20_array = np.zeros(len(rivid_index_list)) elif method == : return_100_array = np.zeros(len(rivid_index_list)) return_50_array = np.zeros(len(rivid_index_list)) return_20_array = np.zeros(len(rivid_index_list)) elif method == : return_100_array = np.zeros(len(rivid_index_list)) return_50_array = np.zeros(len(rivid_index_list)) return_25_array = np.zeros(len(rivid_index_list)) return_10_array = np.zeros(len(rivid_index_list)) return_2_array = np.zeros(len(rivid_index_list)) max_flow_array = np.zeros(len(rivid_index_list)) for iter_idx, rivid_index in enumerate(rivid_index_list): filtered_flow_data = qout_nc_file.get_qout_index( rivid_index, pd_filter="{0}D".format(step), filter_mode="max") sorted_flow_data = np.sort(filtered_flow_data)[:num_years:-1] max_flow = sorted_flow_data[0] if max_flow < 0.01: log("Return period data < 0.01 generated for rivid {0}" .format(qout_nc_file.qout_nc.variables[ qout_nc_file.river_id_dimension][rivid_index]), "WARNING") max_flow_array[iter_idx] = max_flow if method == : return_20_array[iter_idx] = sorted_flow_data[rp_index_20] return_10_array[iter_idx] = sorted_flow_data[rp_index_10] return_2_array[iter_idx] = sorted_flow_data[rp_index_2] elif method == : mean_flow = np.mean(filtered_flow_data) stddev = np.std(filtered_flow_data) return_100_array[iter_idx] = mean_flow + 3.14*stddev return_50_array[iter_idx] = mean_flow + 2.59*stddev return_20_array[iter_idx] = mean_flow + 1.87*stddev return_10_array[iter_idx] = mean_flow + 1.3*stddev return_2_array[iter_idx] = mean_flow - .164*stddev elif method == : log_flow = np.log10(filtered_flow_data[filtered_flow_data > 0]) if len(log_flow) <= 0: continue mean_log_flow = np.mean(log_flow) std_log_flow = np.std(log_flow) log_flow_array = np.array(log_flow) skew = (num_years * (np.sum( np.power((log_flow_array - mean_log_flow), 3)))) / \ ((num_years - 1) * (num_years - 2) * std_log_flow ** 3) k2 = np.interp(skew, skewvals, kfac2) k10 = np.interp(skew, skewvals, kfac10) k25 = np.interp(skew, skewvals, kfac25) k50 = np.interp(skew, skewvals, kfac50) k100 = np.interp(skew, skewvals, kfac100) return_100_array[iter_idx] = \ np.power(10, (mean_log_flow + k100*std_log_flow)) return_50_array[iter_idx] = \ np.power(10, (mean_log_flow + k50*std_log_flow)) return_25_array[iter_idx] = \ np.power(10, (mean_log_flow + k25*std_log_flow)) return_10_array[iter_idx] = \ np.power(10, (mean_log_flow + k10*std_log_flow)) return_2_array[iter_idx] = \ np.power(10, (mean_log_flow + k2*std_log_flow)) mp_lock.acquire() return_period_nc = Dataset(return_period_file, ) return_period_nc.variables[][rivid_index_list] = \ max_flow_array if method == : return_period_nc.variables[][ rivid_index_list] = return_20_array elif method in : return_period_nc.variables[][ rivid_index_list] = return_100_array return_period_nc.variables[][ rivid_index_list] = return_50_array return_period_nc.variables[][ rivid_index_list] = return_20_array elif method == : return_period_nc.variables[][ rivid_index_list] = return_100_array return_period_nc.variables[][ rivid_index_list] = return_50_array return_period_nc.variables[][ rivid_index_list] = return_25_array return_period_nc.variables[][ rivid_index_list] = return_10_array return_period_nc.variables[][ rivid_index_list] = return_2_array return_period_nc.close() mp_lock.release()
This function calculates a single return period for a single reach
382,115
def startMultiple(self, zones): path = payload = {: zones} return self.rachio.put(path, payload)
Start multiple zones.
382,116
def reset(self, params, repetition): print params["name"], ":", repetition self.debug = params.get("debug", False) L2Params = json.loads( + params["l2_params"] + ) L4Params = json.loads( + params["l4_params"] + ) L6aParams = json.loads( + params["l6a_params"] + ) seed = params.get("seed", 42) np.random.seed(seed + repetition) random.seed(seed + repetition) L2Params["seed"] = seed + repetition L4Params["seed"] = seed + repetition L6aParams["seed"] = seed + repetition numModules = params["num_modules"] L6aParams["scale"] = [params["scale"]] * numModules angle = params["angle"] / numModules orientation = range(angle / 2, angle * numModules, angle) L6aParams["orientation"] = np.radians(orientation).tolist() self.numColumns = params["num_cortical_columns"] network = Network() network = createMultipleL246aLocationColumn(network=network, numberOfColumns=self.numColumns, L2Params=L2Params, L4Params=L4Params, L6aParams=L6aParams) network.initialize() self.network = network self.sensorInput = [] self.motorInput = [] self.L2Regions = [] self.L4Regions = [] self.L6aRegions = [] for i in xrange(self.numColumns): col = str(i) self.sensorInput.append(network.regions["sensorInput_" + col].getSelf()) self.motorInput.append(network.regions["motorInput_" + col].getSelf()) self.L2Regions.append(network.regions["L2_" + col]) self.L4Regions.append(network.regions["L4_" + col]) self.L6aRegions.append(network.regions["L6a_" + col]) numObjects = params["iterations"] numFeatures = params["num_features"] numOfMinicolumns = L4Params["columnCount"] numOfActiveMinicolumns = params["num_active_minicolumns"] self.featureSDR = [{ str(f): sorted(np.random.choice(numOfMinicolumns, numOfActiveMinicolumns)) for f in xrange(numFeatures) } for _ in xrange(self.numColumns)] self.objects = generateObjects(numObjects=numObjects, featuresPerObject=params["features_per_object"], objectWidth=params["object_width"], numFeatures=numFeatures, distribution=params["feature_distribution"]) uniqueObjs = np.unique([{"features": obj["features"]} for obj in self.objects]) assert len(uniqueObjs) == len(self.objects) self.sdrSize = L2Params["sdrSize"] self.numLearningPoints = params["num_learning_points"] self.numOfSensations = params["num_sensations"] self.learnedObjects = {} self.learn()
Take the steps necessary to reset the experiment before each repetition: - Make sure random seed is different for each repetition - Create the L2-L4-L6a network - Generate objects used by the experiment - Learn all objects used by the experiment
382,117
def metamodel_from_file(file_name, **kwargs): with codecs.open(file_name, , ) as f: lang_desc = f.read() metamodel = metamodel_from_str(lang_desc=lang_desc, file_name=file_name, **kwargs) return metamodel
Creates new metamodel from the given file. Args: file_name(str): The name of the file with textX language description. other params: See metamodel_from_str.
382,118
def set_console(stream=STDOUT, foreground=None, background=None, style=None): if foreground is None: foreground = _default_foreground if background is None: background = _default_background if style is None: style = _default_style attrs = get_attrs(foreground, background, style) SetConsoleTextAttribute(stream, attrs)
Set console foreground and background attributes.
382,119
def init(): if not os.path.exists(__opts__[]): log.debug(, __opts__[]) os.makedirs(__opts__[]) if not os.path.exists(__opts__[]): log.debug(, __opts__[]) sqlite3.enable_callback_tracebacks(True) conn = sqlite3.connect(__opts__[], isolation_level=None) try: conn.execute() except OperationalError: conn.execute() try: conn.execute() except OperationalError: conn.execute() return conn
Get an sqlite3 connection, and initialize the package database if necessary
382,120
def as_bits( region_start, region_length, intervals ): bits = BitSet( region_length ) for chr, start, stop in intervals: bits.set_range( start - region_start, stop - start ) return bits
Convert a set of intervals overlapping a region of a chromosome into a bitset for just that region with the bits covered by the intervals set.
382,121
def _worker_thread_upload(self): max_set_len = self._general_options.concurrency.transfer_threads << 2 while not self.termination_check: try: if len(self._transfer_set) > max_set_len: time.sleep(0.1) continue else: ud = self._upload_queue.get(block=False, timeout=0.1) except queue.Empty: continue try: self._process_upload_descriptor(ud) except Exception as e: with self._upload_lock: self._exceptions.append(e)
Worker thread upload :param Uploader self: this
382,122
def ping(self, message=None): return self.write(self.parser.ping(message), encode=False)
Write a ping ``frame``.
382,123
def _get_callable_from_trace_tuple( self, trace_tuple: TraceTuple ) -> Tuple[str, str]: trace_frame = trace_tuple.trace_frame if trace_tuple.placeholder: return trace_frame.caller, trace_frame.caller_port return trace_frame.callee, trace_frame.callee_port
Returns either (caller, caller_port) or (callee, callee_port).
382,124
def add_body(self, body): body.system = self self.bodies.append(body) self.unfrozen = np.concatenate(( self.unfrozen[:-2], np.zeros(7, dtype=bool), self.unfrozen[-2:] ))
Add a :class:`Body` to the system. This function also sets the ``system`` attribute of the body. :param body: The :class:`Body` to add.
382,125
def load_graphs(): mestate.graphs = [] gfiles = [] if in os.environ: for dirname, dirnames, filenames in os.walk(os.path.join(os.environ[], ".mavproxy")): for filename in filenames: if filename.lower().endswith(): gfiles.append(os.path.join(dirname, filename)) for file in gfiles: if not os.path.exists(file): continue graphs = load_graph_xml(open(file).read(), file) if graphs: mestate.graphs.extend(graphs) mestate.console.writeln("Loaded %s" % file) dlist = pkg_resources.resource_listdir("MAVProxy", "tools/graphs") for f in dlist: raw = pkg_resources.resource_stream("MAVProxy", "tools/graphs/%s" % f).read() graphs = load_graph_xml(raw, None) if graphs: mestate.graphs.extend(graphs) mestate.console.writeln("Loaded %s" % f) mestate.graphs = sorted(mestate.graphs, key=lambda g: g.name)
load graphs from mavgraphs.xml
382,126
def main() -> None: parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--outdir", help="output directory", default=os.path.dirname(__file__)) args = parser.parse_args() outdir = pathlib.Path(args.outdir) if not outdir.exists(): raise FileNotFoundError("Output directory is missing: {}".format(outdir)) for contracts in [0, 1, 5, 10]: if contracts == 0: pth = outdir / "functions_100_with_no_contract.py" elif contracts == 1: pth = outdir / "functions_100_with_1_contract.py" else: pth = outdir / "functions_100_with_{}_contracts.py".format(contracts) text = generate_functions(functions=100, contracts=contracts, disabled=False) pth.write_text(text) for contracts in [1, 5, 10]: if contracts == 1: pth = outdir / "functions_100_with_1_disabled_contract.py" else: pth = outdir / "functions_100_with_{}_disabled_contracts.py".format(contracts) text = generate_functions(functions=100, contracts=contracts, disabled=True) pth.write_text(text) for invariants in [0, 1, 5, 10]: if invariants == 0: pth = outdir / "classes_100_with_no_invariant.py" elif invariants == 1: pth = outdir / "classes_100_with_1_invariant.py" else: pth = outdir / "classes_100_with_{}_invariants.py".format(invariants) text = generate_classes(classes=100, invariants=invariants, disabled=False) pth.write_text(text) for invariants in [1, 5, 10]: if invariants == 1: pth = outdir / "classes_100_with_1_disabled_invariant.py" else: pth = outdir / "classes_100_with_{}_disabled_invariants.py".format(invariants) text = generate_classes(classes=100, invariants=invariants, disabled=True) pth.write_text(text)
Execute the main routine.
382,127
def get_boundaries_of_elements_in_dict(models_dict, clearance=0.): right = 0. bottom = 0. if in models_dict and models_dict[]: left = list(models_dict[].items())[0][1].get_meta_data_editor()[][0] top = list(models_dict[].items())[0][1].get_meta_data_editor()[][1] elif in models_dict and models_dict[]: left = list(models_dict[].items())[0][1].get_meta_data_editor()[][0] top = list(models_dict[].items())[0][1].get_meta_data_editor()[][1] else: all_ports = list(models_dict[].values()) + list(models_dict[].values()) + \ list(models_dict[].values()) + list(models_dict[].values()) if len(set([port_m.core_element.parent for port_m in all_ports])) == 1: logger.info("Only one parent {0} {1}".format(all_ports[0].core_element.parent, all_ports[0].parent.get_meta_data_editor())) if all_ports: left = all_ports[0].parent.get_meta_data_editor()[][0] top = all_ports[0].parent.get_meta_data_editor()[][1] else: raise ValueError("Get boundary method does not aspects all list elements empty in dictionary. {0}" "".format(models_dict)) def cal_max(max_x, max_y, rel_pos, size): max_x = size[0] + rel_pos[0] if size[0] + rel_pos[0] > max_x else max_x max_y = rel_pos[1] + size[1] if rel_pos[1] + size[1] > max_y else max_y return max_x, max_y def cal_min(min_x, min_y, rel_pos, size): min_x = rel_pos[0] if rel_pos[0] < min_x else min_x min_y = rel_pos[1] if rel_pos[1] < min_y else min_y return min_x, min_y parts = [, , ] for key in parts: elems_dict = models_dict[key] rel_positions = [] for model in elems_dict.values(): _size = (0., 0.) if key == : rel_positions = [model.get_meta_data_editor()[]] _size = model.get_meta_data_editor()[] elif key in [, , ]: rel_positions = [model.get_meta_data_editor()[]] elif key in [, ]: if key is "data_flows": rel_positions = mirror_waypoints(deepcopy(model.get_meta_data_editor()))[] else: rel_positions = model.get_meta_data_editor()[] for rel_position in rel_positions: if not contains_geometric_info(rel_position): continue right, bottom = cal_max(right, bottom, rel_position, _size) left, top = cal_min(left, top, rel_position, _size) left, right, top, bottom = add_boundary_clearance(left, right, top, bottom, {: (0., 0.)}, clearance) return left, right, top, bottom
Get boundaries of all handed models The function checks all model meta data positions to increase boundary starting with a state or scoped variables. It is finally iterated over all states, data and logical port models and linkage if sufficient for respective graphical editor. At the end a clearance is added to the boundary if needed e.g. to secure size for opengl. :param models_dict: dict of all handed models :return: tuple of left, right, top and bottom value
382,128
def has_same_bins(self, other: "HistogramBase") -> bool: if self.shape != other.shape: return False elif self.ndim == 1: return np.allclose(self.bins, other.bins) elif self.ndim > 1: for i in range(self.ndim): if not np.allclose(self.bins[i], other.bins[i]): return False return True
Whether two histograms share the same binning.
382,129
def handle_pubcomp(self): self.logger.info("PUBCOMP received") ret, mid = self.in_packet.read_uint16() if ret != NC.ERR_SUCCESS: return ret evt = event.EventPubcomp(mid) self.push_event(evt) return NC.ERR_SUCCESS
Handle incoming PUBCOMP packet.
382,130
def run_step(context): logger.debug("started") deprecated(context) StreamReplacePairsRewriterStep(__name__, , context).run_step() logger.debug("done")
Parse input file and replace a search string. This also does string substitutions from context on the fileReplacePairs. It does this before it search & replaces the in file. Be careful of order. If fileReplacePairs is not an ordered collection, replacements could evaluate in any given order. If this is coming in from pipeline yaml it will be an ordered dictionary, so life is good. Args: context: pypyr.context.Context. Mandatory. The following context keys expected: - fileReplace - in. mandatory. str, path-like, or an iterable (list/tuple) of strings/paths. Each str/path can be a glob, relative or absolute path. - out. optional. path-like. Can refer to a file or a directory. will create directory structure if it doesn't exist. If in-path refers to >1 file (e.g it's a glob or list), out path can only be a directory - it doesn't make sense to write >1 file to the same single file (this is not an appender.) To ensure out_path is read as a directory and not a file, be sure to have the path separator (/) at the end. If out_path is not specified or None, will in-place edit and overwrite the in-files. - replacePairs. mandatory. Dictionary where items are: 'find_string': 'replace_string' Returns: None. Raises: FileNotFoundError: take a guess pypyr.errors.KeyNotInContextError: Any of the required keys missing in context. pypyr.errors.KeyInContextHasNoValueError: Any of the required keys exists but is None.
382,131
async def close(self) -> None: LOGGER.debug() if self.cfg.get(, False): await self.load_cache(True) Caches.purge_archives(self.dir_cache, True) await super().close() for path_rr_id in Tails.links(self._dir_tails): rr_id = basename(path_rr_id) try: await self._sync_revoc(rr_id) except ClosedPool: LOGGER.warning(, rr_id) LOGGER.debug()
Explicit exit. If so configured, populate cache to prove all creds in wallet offline if need be, archive cache, and purge prior cache archives. :return: current object
382,132
def to_mask(self, method=, subpixels=5): use_exact, subpixels = self._translate_mask_mode(method, subpixels) if hasattr(self, ): radius = self.r elif hasattr(self, ): radius = self.r_out else: raise ValueError() masks = [] for bbox, edges in zip(self.bounding_boxes, self._centered_edges): ny, nx = bbox.shape mask = circular_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, radius, use_exact, subpixels) if hasattr(self, ): mask -= circular_overlap_grid(edges[0], edges[1], edges[2], edges[3], nx, ny, self.r_in, use_exact, subpixels) masks.append(ApertureMask(mask, bbox)) return masks
Return a list of `~photutils.ApertureMask` objects, one for each aperture position. Parameters ---------- method : {'exact', 'center', 'subpixel'}, optional The method used to determine the overlap of the aperture on the pixel grid. Not all options are available for all aperture types. Note that the more precise methods are generally slower. The following methods are available: * ``'exact'`` (default): The the exact fractional overlap of the aperture and each pixel is calculated. The returned mask will contain values between 0 and 1. * ``'center'``: A pixel is considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. The returned mask will contain values only of 0 (out) and 1 (in). * ``'subpixel'``: A pixel is divided into subpixels (see the ``subpixels`` keyword), each of which are considered to be entirely in or out of the aperture depending on whether its center is in or out of the aperture. If ``subpixels=1``, this method is equivalent to ``'center'``. The returned mask will contain values between 0 and 1. subpixels : int, optional For the ``'subpixel'`` method, resample pixels by this factor in each dimension. That is, each pixel is divided into ``subpixels ** 2`` subpixels. Returns ------- mask : list of `~photutils.ApertureMask` A list of aperture mask objects.
382,133
def to_iso8601(dt, tz=None): if tz is not None: dt = dt.replace(tzinfo=tz) iso8601 = dt.isoformat() return iso8601
Returns an ISO-8601 representation of a given datetime instance. >>> to_iso8601(datetime.datetime.now()) '2014-10-01T23:21:33.718508Z' :param dt: a :class:`~datetime.datetime` instance :param tz: a :class:`~datetime.tzinfo` to use; if None - use a default one
382,134
def list(self, filter_title=None, filter_ids=None, page=None): filters = [ .format(filter_title) if filter_title else None, .format(.join([str(dash_id) for dash_id in filter_ids])) if filter_ids else None, .format(page) if page else None ] return self._get( url=.format(self.URL), headers=self.headers, params=self.build_param_string(filters) )
:type filter_title: str :param filter_title: Filter by dashboard title :type filter_ids: list of ints :param filter_ids: Filter by dashboard ids :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'page' key if there are paginated results :: { "dashboards": [ { "id": "integer", "title": "string", "description": "string", "icon": "string", "created_at": "time", "updated_at": "time", "visibility": "string", "editable": "string", "ui_url": "string", "api_url": "string", "owner_email": "string", "filter": { "event_types": ["string"], "attributes": ["string"] } } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100", "rel": "next" } } }
382,135
def _apply_orthogonal_view(self): left, right, bottom, top = self.get_view_coordinates() glOrtho(left, right, bottom, top, -10, 0)
Orthogonal view with respect to current aspect ratio
382,136
def calculate_retry_delay(attempt, max_delay=300): delay = int(random.uniform(2, 4) ** attempt) if delay > max_delay: delay = int(random.uniform(max_delay - 20, max_delay + 20)) return delay
Calculates an exponential backoff for retry attempts with a small amount of jitter.
382,137
def to_dict(self): data = {"model": {}} data["model"]["description"] = self.description data["model"]["entity_name"] = self.entity_name data["model"]["package"] = self.package data["model"]["resource_name"] = self.resource_name data["model"]["rest_name"] = self.rest_name data["model"]["extends"] = self.extends data["model"]["get"] = self.allows_get data["model"]["update"] = self.allows_update data["model"]["create"] = self.allows_create data["model"]["delete"] = self.allows_delete data["model"]["root"] = self.is_root data["model"]["userlabel"] = self.userlabel data["model"]["template"] = self.template data["model"]["allowed_job_commands"] = self.allowed_job_commands data["attributes"] = [] for attribute in self.attributes: data["attributes"].append(attribute.to_dict()) data["children"] = [] for api in self.child_apis: data["children"].append(api.to_dict()) return data
Transform the current specification to a dictionary
382,138
def declare_example(self, source): with patch_modules(): code = compile(source, "<docs>", "exec") exec(code, self.namespace)
Execute the given code, adding it to the runner's namespace.
382,139
def recalculate_satistics(self): pars gframe = wx.BusyInfo( "Re-calculating statistics for all specimens\n Please wait..", self) for specimen in list(self.Data.keys()): if not in list(self.Data[specimen].keys()): continue if not in list(self.Data[specimen][].keys()): continue tmin = self.Data[specimen][][] tmax = self.Data[specimen][][] pars = thellier_gui_lib.get_PI_parameters( self.Data, self.acceptance_criteria, self.preferences, specimen, tmin, tmax, self.GUI_log, THERMAL, MICROWAVE) self.Data[specimen][] = pars self.Data[specimen][][] = self.Data[specimen][] self.Data[specimen][][] = self.Data[specimen][] self.Data[specimen][][] = self.Data[specimen][] del gframe
update self.Data[specimen]['pars'] for all specimens.
382,140
def init_heartbeat(self): hb_ctx = zmq.Context() self.heartbeat = Heartbeat(hb_ctx, (self.ip, self.hb_port)) self.hb_port = self.heartbeat.port self.log.debug("Heartbeat REP Channel on port: %i"%self.hb_port) self.heartbeat.start() self.log.critical("To connect another client to this kernel, use:")
start the heart beating
382,141
def _bind_method(self, name, unconditionally=False): exists = self.run_func(, name)[] in [2, 3, 5] if not unconditionally and not exists: raise AttributeError(" object has no attribute " % name) method_instance = MatlabFunction(weakref.ref(self), name) method_instance.__name__ = name if sys.version.startswith(): method = types.MethodType(method_instance, weakref.ref(self)) else: method = types.MethodType(method_instance, weakref.ref(self), _Session) setattr(self, name, method) return getattr(self, name)
Generate a Matlab function and bind it to the instance This is where the magic happens. When an unknown attribute of the Matlab class is requested, it is assumed to be a call to a Matlab function, and is generated and bound to the instance. This works because getattr() falls back to __getattr__ only if no attributes of the requested name can be found through normal routes (__getattribute__, __dict__, class tree). bind_method first checks whether the requested name is a callable Matlab function before generating a binding. Parameters ---------- name : str The name of the Matlab function to call e.g. 'sqrt', 'sum', 'svd', etc unconditionally : bool, optional Bind the method without performing checks. Used to bootstrap methods that are required and know to exist Returns ------- MatlabFunction A reference to a newly bound MatlabFunction instance if the requested name is determined to be a callable function Raises ------ AttributeError: if the requested name is not a callable Matlab function
382,142
def _get_arrays(self, wavelengths, **kwargs): x = self._validate_wavelengths(wavelengths) y = self(x, **kwargs) if isinstance(wavelengths, u.Quantity): w = x.to(wavelengths.unit, u.spectral()) else: w = x return w, y
Get sampled spectrum or bandpass in user units.
382,143
def delete_api_key(apiKey, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_api_key(apiKey=apiKey) return {: True} except ClientError as e: return {: False, : __utils__[](e)}
Deletes a given apiKey CLI Example: .. code-block:: bash salt myminion boto_apigateway.delete_api_key apikeystring
382,144
def create(self, name): return Bucket(name, context=self._context).create(self._project_id)
Creates a new bucket. Args: name: a unique name for the new bucket. Returns: The newly created bucket. Raises: Exception if there was an error creating the bucket.
382,145
def _prime_user_perm_caches(self): perm_cache, group_perm_cache = self._get_user_cached_perms() self.user._authority_perm_cache = perm_cache self.user._authority_group_perm_cache = group_perm_cache self.user._authority_perm_cache_filled = True
Prime both the user and group caches and put them on the ``self.user``. In addition add a cache filled flag on ``self.user``.
382,146
def set_continue(self, name, action, seqno, value=None, default=False, disable=False): commands = [ % (name, action, seqno)] if default: commands.append() elif disable: commands.append() else: if not str(value).isdigit() or value < 1: raise ValueError( ) commands.append( % value) return self.configure(commands)
Configures the routemap continue value Args: name (string): The full name of the routemap. action (string): The action to take for this routemap clause. seqno (integer): The sequence number for the routemap clause. value (integer): The value to configure for the routemap continue default (bool): Specifies to default the routemap continue value disable (bool): Specifies to negate the routemap continue value Returns: True if the operation succeeds otherwise False is returned
382,147
def get_vouchers(self, vid_encoded=None, uid_from=None, uid_to=None, gid=None, valid_after=None, valid_before=None, last=None, first=None): resource = self.kvpath( , (, vid_encoded), **{ : (, uid_from), : (, uid_to), : (, gid), : (, absdatetime(valid_after)), : (, absdatetime(valid_before)), : (, first), : (, last) } ) return self.request(, resource)
FETCHES a filtered list of vouchers. :type vid_encoded: ``alphanumeric(64)`` :param vid_encoded: Voucher ID, as a string with CRC. :type uid_from: ``bigint`` :param uid_from: Filter by source account UID. :type uid_to: ``bigint`` :param uid_to: Filter by destination account UID. :type gid: ``alphanumeric(32)`` :param gid: Filter by voucher Group ID. GID is localized to `uid_from`. :type valid_after: ``datetime``/``dict`` :param valid_after: Voucher has to be valid after this timestamp. Absolute (``datetime``) or relative (``dict``) timestamps are accepted. Valid keys for relative timestamp dictionary are same as keyword arguments for `datetime.timedelta` (``days``, ``seconds``, ``minutes``, ``hours``, ``weeks``). :type valid_before: ``datetime``/``dict`` :param valid_before: Voucher was valid until this timestamp (for format, see the `valid_after` above). :type last: ``bigint`` :param last: The number of newest vouchers (that satisfy all other criteria) to return. :type first: ``bigint`` :param first: The number of oldest vouchers (that satisfy all other criteria) to return. :note: If `first` or `last` are used, the vouchers list is sorted by time created, otherwise it is sorted alphabetically by `vid_encoded`. :rtype: ``list``/``dict`` :returns: A list of voucher description dictionaries. If `vid_encoded` is specified, a single dictionary is returned instead of a list. :raises GeneralException: :resource: ``vouchers[/<vid_encoded>][/from=<uid_from>][/to=<uid_to>]`` ``[/valid_after=<valid_after>][/valid_before=<valid_before>]`` ``[/last=<last>][/first=<first>]`` :access: authorized users (ACL flag: ``voucher.get``)
382,148
def make_stream_tls_features(self, stream, features): if self.stream and stream is not self.stream: raise ValueError("Single StreamTLSHandler instance can handle" " only one stream") self.stream = stream if self.settings["starttls"] and not stream.tls_established: tls = ElementTree.SubElement(features, STARTTLS_TAG) if self.settings["tls_require"]: ElementTree.SubElement(tls, REQUIRED_TAG) return features
Update the <features/> element with StartTLS feature. [receving entity only] :Parameters: - `features`: the <features/> element of the stream. :Types: - `features`: :etree:`ElementTree.Element` :returns: update <features/> element. :returntype: :etree:`ElementTree.Element`
382,149
def missing_pids(self): missing = [] for p in self.pids: try: PersistentIdentifier.get(p.pid_type, p.pid_value) except PIDDoesNotExistError: missing.append(p) return missing
Filter persistent identifiers.
382,150
def filter_step(G, covY, pred, yt): data_pred_mean = np.matmul(pred.mean, G.T) data_pred_cov = dotdot(G, pred.cov, G.T) + covY if covY.shape[0] == 1: logpyt = dists.Normal(loc=data_pred_mean, scale=np.sqrt(data_pred_cov)).logpdf(yt) else: logpyt = dists.MvNormal(loc=data_pred_mean, cov=data_pred_cov).logpdf(yt) residual = yt - data_pred_mean gain = dotdot(pred.cov, G.T, inv(data_pred_cov)) filt_mean = pred.mean + np.matmul(residual, gain.T) filt_cov = pred.cov - dotdot(gain, G, pred.cov) return MeanAndCov(mean=filt_mean, cov=filt_cov), logpyt
Filtering step of Kalman filter. Parameters ---------- G: (dy, dx) numpy array mean of Y_t | X_t is G * X_t covX: (dx, dx) numpy array covariance of Y_t | X_t pred: MeanAndCov object predictive distribution at time t Returns ------- pred: MeanAndCov object filtering distribution at time t logpyt: float log density of Y_t | Y_{0:t-1}
382,151
def replace_namespaced_custom_object_scale(self, group, version, namespace, plural, name, body, **kwargs): kwargs[] = True if kwargs.get(): return self.replace_namespaced_custom_object_scale_with_http_info(group, version, namespace, plural, name, body, **kwargs) else: (data) = self.replace_namespaced_custom_object_scale_with_http_info(group, version, namespace, plural, name, body, **kwargs) return data
replace_namespaced_custom_object_scale # noqa: E501 replace scale of the specified namespace scoped custom object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_custom_object_scale(group, version, namespace, plural, name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str group: the custom resource's group (required) :param str version: the custom resource's version (required) :param str namespace: The custom resource's namespace (required) :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required) :param str name: the custom object's name (required) :param UNKNOWN_BASE_TYPE body: (required) :return: object If the method is called asynchronously, returns the request thread.
382,152
def ReadHuntCounters(self, hunt_id): num_clients = self.CountHuntFlows(hunt_id) num_successful_clients = self.CountHuntFlows( hunt_id, filter_condition=db.HuntFlowsCondition.SUCCEEDED_FLOWS_ONLY) num_failed_clients = self.CountHuntFlows( hunt_id, filter_condition=db.HuntFlowsCondition.FAILED_FLOWS_ONLY) num_clients_with_results = len( set(r[0].client_id for r in self.flow_results.values() if r and r[0].hunt_id == hunt_id)) num_crashed_clients = self.CountHuntFlows( hunt_id, filter_condition=db.HuntFlowsCondition.CRASHED_FLOWS_ONLY) num_results = self.CountHuntResults(hunt_id) total_cpu_seconds = 0 total_network_bytes_sent = 0 for f in self.ReadHuntFlows(hunt_id, 0, sys.maxsize): total_cpu_seconds += ( f.cpu_time_used.user_cpu_time + f.cpu_time_used.system_cpu_time) total_network_bytes_sent += f.network_bytes_sent return db.HuntCounters( num_clients=num_clients, num_successful_clients=num_successful_clients, num_failed_clients=num_failed_clients, num_clients_with_results=num_clients_with_results, num_crashed_clients=num_crashed_clients, num_results=num_results, total_cpu_seconds=total_cpu_seconds, total_network_bytes_sent=total_network_bytes_sent)
Reads hunt counters.
382,153
def get_item_hrefs(result_collection): assert result_collection is not None result = [] links = result_collection.get() if links is not None: items = links.get() if items is not None: for item in items: result.append(item.get()) return result
Given a result_collection (returned by a previous API call that returns a collection, like get_bundle_list() or search()), return a list of item hrefs. 'result_collection' a JSON object returned by a previous API call. Returns a list, which may be empty if no items were found.
382,154
def get_url(self, url, dest, makedirs=False, saltenv=, no_cache=False, cachedir=None, source_hash=None): url_data = urlparse(url) url_scheme = url_data.scheme url_path = os.path.join( url_data.netloc, url_data.path).rstrip(os.sep) if dest is not None \ and (os.path.isdir(dest) or dest.endswith((, ))): if url_data.query or len(url_data.path) > 1 and not url_data.path.endswith(): strpath = url.split()[-1] else: strpath = if salt.utils.platform.is_windows(): strpath = salt.utils.path.sanitize_win_path(strpath) dest = os.path.join(dest, strpath) if url_scheme and url_scheme.lower() in string.ascii_lowercase: url_path = .join((url_scheme, url_path)) url_scheme = if url_scheme in (, ): if not os.path.isabs(url_path): raise CommandExecutionError( {0}\.format(url_path) ) if dest is None: with salt.utils.files.fopen(url_path, ) as fp_: data = fp_.read() return data return url_path if url_scheme == : result = self.get_file(url, dest, makedirs, saltenv, cachedir=cachedir) if result and dest is None: with salt.utils.files.fopen(result, ) as fp_: data = fp_.read() return data return result if dest: destdir = os.path.dirname(dest) if not os.path.isdir(destdir): if makedirs: os.makedirs(destdir) else: return elif not no_cache: dest = self._extrn_path(url, saltenv, cachedir=cachedir) if source_hash is not None: try: source_hash = source_hash.split()[-1] form = salt.utils.files.HASHES_REVMAP[len(source_hash)] if salt.utils.hashutils.get_hash(dest, form) == source_hash: log.debug( , url, dest, source_hash ) return dest except (AttributeError, KeyError, IOError, OSError): pass destdir = os.path.dirname(dest) if not os.path.isdir(destdir): os.makedirs(destdir) if url_data.scheme == : try: def s3_opt(key, default=None): if + key in self.opts: return self.opts[ + key] try: return self.opts[][][key] except (KeyError, TypeError): return default self.utils[](method=, bucket=url_data.netloc, path=url_data.path[1:], return_bin=False, local_file=dest, action=None, key=s3_opt(), keyid=s3_opt(), service_url=s3_opt(), verify_ssl=s3_opt(, True), location=s3_opt(), path_style=s3_opt(, False), https_enable=s3_opt(, True)) return dest except Exception as exc: raise MinionError( .format(url, exc) ) if url_data.scheme == : try: ftp = ftplib.FTP() ftp.connect(url_data.hostname, url_data.port) ftp.login(url_data.username, url_data.password) remote_file_path = url_data.path.lstrip() with salt.utils.files.fopen(dest, ) as fp_: ftp.retrbinary(.format(remote_file_path), fp_.write) ftp.quit() return dest except Exception as exc: raise MinionError(.format(url, exc)) if url_data.scheme == : try: def swift_opt(key, default): if key in self.opts: return self.opts[key] try: return self.opts[][key] except (KeyError, TypeError): return default swift_conn = SaltSwift(swift_opt(, None), swift_opt(, None), swift_opt(, None), swift_opt(, None)) swift_conn.get_object(url_data.netloc, url_data.path[1:], dest) return dest except Exception: raise MinionError(.format(url)) get_kwargs = {} if url_data.username is not None \ and url_data.scheme in (, ): netloc = url_data.netloc at_sign_pos = netloc.rfind() if at_sign_pos != -1: netloc = netloc[at_sign_pos + 1:] fixed_url = urlunparse( (url_data.scheme, netloc, url_data.path, url_data.params, url_data.query, url_data.fragment)) get_kwargs[] = (url_data.username, url_data.password) else: fixed_url = url destfp = None try: write_body[1] = False return write_body[1].parse_line(hdr) if in write_body[1]: content_type = write_body[1].get() if not content_type.startswith(): write_body[1] = write_body[2] = False else: encoding = fields = content_type.split() for field in fields: if in field: encoding = field.split()[-1] write_body[2] = encoding write_body[1] = False finally: if destfp is not None: destfp.close()
Get a single file from a URL.
382,155
def _calc_delta(self,ensemble,scaling_matrix=None): mean = np.array(ensemble.mean(axis=0)) delta = ensemble.as_pyemu_matrix() for i in range(ensemble.shape[0]): delta.x[i,:] -= mean if scaling_matrix is not None: delta = scaling_matrix * delta.T delta *= (1.0 / np.sqrt(float(ensemble.shape[0] - 1.0))) return delta
calc the scaled ensemble differences from the mean
382,156
def SubmitJob(self, *params, **kw): fp = self.__expandparamstodict(params, kw) return self._get_subfolder(, GPJob, fp)._jobstatus
Asynchronously execute the specified GP task. This will return a Geoprocessing Job object. Parameters are passed in either in order or as keywords.
382,157
def extract(self, start, end): from copy import deepcopy eaf_out = deepcopy(self) for t in eaf_out.get_tier_names(): for ab, ae, value in eaf_out.get_annotation_data_for_tier(t): if ab > end or ae < start: eaf_out.remove_annotation(t, (start-end)//2, False) eaf_out.clean_time_slots() return eaf_out
Extracts the selected time frame as a new object. :param int start: Start time. :param int end: End time. :returns: class:`pympi.Elan.Eaf` object containing the extracted frame.
382,158
def index(in_bam, config, check_timestamp=True): assert is_bam(in_bam), "%s in not a BAM file" % in_bam index_file = "%s.bai" % in_bam alt_index_file = "%s.bai" % os.path.splitext(in_bam)[0] if check_timestamp: bai_exists = utils.file_uptodate(index_file, in_bam) or utils.file_uptodate(alt_index_file, in_bam) else: bai_exists = utils.file_exists(index_file) or utils.file_exists(alt_index_file) if not bai_exists: for fname in [index_file, alt_index_file]: utils.remove_safe(fname) samtools = config_utils.get_program("samtools", config) num_cores = config["algorithm"].get("num_cores", 1) with file_transaction(config, index_file) as tx_index_file: cmd = "{samtools} index -@ {num_cores} {in_bam} {tx_index_file}" do.run(cmd.format(**locals()), "Index BAM file: %s" % os.path.basename(in_bam)) return index_file if utils.file_exists(index_file) else alt_index_file
Index a BAM file, skipping if index present. Centralizes BAM indexing providing ability to switch indexing approaches.
382,159
def _create_update_from_file(mode=, uuid=None, path=None): ret = {} if not os.path.isfile(path) or path is None: ret[] = .format(path) return ret cmd = .format( mode=mode, brand=get(uuid)[] if uuid is not None else , path=path ) res = __salt__[](cmd) retcode = res[] if retcode != 0: ret[] = _exit_status(retcode) if in res: if res[][0] == : ret[] = salt.utils.json.loads(res[]) else: ret[] = res[] return ret cmd = .format( mode=mode, uuid=uuid if uuid is not None else , path=path ) res = __salt__[](cmd) retcode = res[] if retcode != 0: ret[] = _exit_status(retcode) if in res: if res[][0] == : ret[] = salt.utils.json.loads(res[]) else: ret[] = res[] return ret else: if res[].startswith(): return res[][24:] return True
Create vm from file
382,160
def watt_m(simulated_array, observed_array, replace_nan=None, replace_inf=None, remove_neg=False, remove_zero=False): simulated_array, observed_array = treat_values( simulated_array, observed_array, replace_nan=replace_nan, replace_inf=replace_inf, remove_neg=remove_neg, remove_zero=remove_zero ) a = 2 / np.pi b = np.mean((simulated_array - observed_array) ** 2) c = np.std(observed_array, ddof=1) ** 2 + np.std(simulated_array, ddof=1) ** 2 e = (np.mean(simulated_array) - np.mean(observed_array)) ** 2 f = c + e return a * np.arcsin(1 - (b / f))
Compute Watterson's M (M). .. image:: /pictures/M.png **Range:** -1 ≤ M < 1, does not indicate bias, larger is better. **Notes:** Parameters ---------- simulated_array: one dimensional ndarray An array of simulated data from the time series. observed_array: one dimensional ndarray An array of observed data from the time series. replace_nan: float, optional If given, indicates which value to replace NaN values with in the two arrays. If None, when a NaN value is found at the i-th position in the observed OR simulated array, the i-th value of the observed and simulated array are removed before the computation. replace_inf: float, optional If given, indicates which value to replace Inf values with in the two arrays. If None, when an inf value is found at the i-th position in the observed OR simulated array, the i-th value of the observed and simulated array are removed before the computation. remove_neg: boolean, optional If True, when a negative value is found at the i-th position in the observed OR simulated array, the i-th value of the observed AND simulated array are removed before the computation. remove_zero: boolean, optional If true, when a zero value is found at the i-th position in the observed OR simulated array, the i-th value of the observed AND simulated array are removed before the computation. Returns ------- float Watterson's M value. Examples -------- >>> import HydroErr as he >>> import numpy as np >>> sim = np.array([5, 7, 9, 2, 4.5, 6.7]) >>> obs = np.array([4.7, 6, 10, 2.5, 4, 7]) >>> he.watt_m(sim, obs) 0.8307913876595929 References ---------- - Watterson, I.G., 1996. Non‐dimensional measures of climate model performance. International Journal of Climatology 16(4) 379-391.
382,161
def lightcurve_moments(ftimes, fmags, ferrs): ndet = len(fmags) if ndet > 9: series_median = npmedian(fmags) series_wmean = ( npsum(fmags*(1.0/(ferrs*ferrs)))/npsum(1.0/(ferrs*ferrs)) ) series_mad = npmedian(npabs(fmags - series_median)) series_stdev = 1.483*series_mad series_skew = spskew(fmags) series_kurtosis = spkurtosis(fmags) series_above1std = len(fmags[fmags > (series_median + series_stdev)]) series_below1std = len(fmags[fmags < (series_median - series_stdev)]) series_beyond1std = (series_above1std + series_below1std)/float(ndet) series_mag_percentiles = nppercentile( fmags, [5.0,10,17.5,25,32.5,40,60,67.5,75,82.5,90,95] ) return { :series_median, :series_wmean, :series_mad, :series_stdev, :series_skew, :series_kurtosis, :series_beyond1std, :series_mag_percentiles, : series_mag_percentiles[8] - series_mag_percentiles[3], } else: LOGERROR( ) return None
This calculates the weighted mean, stdev, median, MAD, percentiles, skew, kurtosis, fraction of LC beyond 1-stdev, and IQR. Parameters ---------- ftimes,fmags,ferrs : np.array The input mag/flux time-series with all non-finite elements removed. Returns ------- dict A dict with all of the light curve moments calculated.
382,162
def _all_get_table_col(self, key, column, fullname): val = column[0] try: if type(val) is int: return pt.IntCol() if isinstance(val, (str, bytes)): itemsize = int(self._prm_get_longest_stringsize(column)) return pt.StringCol(itemsize) if isinstance(val, np.ndarray): if (np.issubdtype(val.dtype, str) or np.issubdtype(val.dtype, bytes)): itemsize = int(self._prm_get_longest_stringsize(column)) return pt.StringCol(itemsize, shape=val.shape) else: return pt.Col.from_dtype(np.dtype((val.dtype, val.shape))) else: return pt.Col.from_dtype(np.dtype(type(val))) except Exception: self._logger.error( % (key, fullname, repr(type(val)))) raise
Creates a pytables column instance. The type of column depends on the type of `column[0]`. Note that data in `column` must be homogeneous!
382,163
def _send(self, key, value, metric_type): try: payload = self._build_payload(key, value, metric_type) LOGGER.debug(, payload) self._socket.sendto(payload.encode(), self._address) except socket.error: LOGGER.exception()
Send the specified value to the statsd daemon via UDP without a direct socket connection. :param str key: The key name to send :param int or float value: The value for the key
382,164
def mdaArray(arry, dtype=numpy.float, mask=None): a = numpy.array(arry, dtype) res = MaskedDistArray(a.shape, a.dtype) res[:] = a res.mask = mask return res
Array constructor for masked distributed array @param arry numpy-like array @param mask mask array (or None if all data elements are valid)
382,165
def main(): arguments = docopt.docopt(__doc__, version=) if arguments[]: diag.show() if arguments[]: diag.reporting() diag.show() if arguments[]: try: if couchdb.ping(): print else: print except: print if arguments[]: (username, password) = couchdb.get_admin() print .format(username) print .format(password) if arguments[]: admins = couchdb.get_couchdb_admins() print for admin in admins: print .format(admin) if arguments[]: couchdb.delete_token() if arguments[]: print couchdb.create_token() if arguments[]: if arguments[]: db_name = arguments.get(, ) else: db_name = couchdb.create_cozy_db(db_name) print .format(db_name) if arguments[]: couchdb.reset_token() print print couchdb.get_admin()[0] if arguments[]: print couchdb.get_cozy_param(arguments[]) if arguments[]: ssl.normalize_cert_dir() if arguments[]: filename = arguments[] if filename: print ssl.get_crt_common_name(filename) else: print ssl.get_crt_common_name() if arguments[]: ssl.clean_links() if arguments[]: ssl.make_links(arguments[]) if arguments[]: common_name = arguments[] if arguments[]: key_size = int(arguments[]) else: key_size = ssl.DEFAULT_KEY_SIZE print .format(common_name, key_size) ssl.generate_certificate(common_name, key_size) if arguments[]: common_name = arguments[] print "Sign certificate for {} with Letrenew_certificatescompare_version<current><operator><reference>is_cozy_registeredunregister_cozyfix_oom_scoresget_oom_scoresrebuild_app--not-force--restart<app>rebuild_all_apps--not-force--restartmigrate_2_node4install_requirementsinstall_cozywait_couchdbwait_cozy_stackcheck_lsb_codenameemulate_smtp127.0.0.125--bind<ip>--port<port><port><ip>Emulate SMTP server on {}:{}backup<backup_filename><backup_filename>restore<backup_filename>install_weboobupdate_weboobupdate_weboob_modules']: weboob.update()
Main part of command line utility
382,166
def export_users(self, body): return self.client.post(self._url(), data=body)
Export all users to a file using a long running job. Check job status with get(). URL pointing to the export file will be included in the status once the job is complete. Args: body (dict): Please see: https://auth0.com/docs/api/management/v2#!/Jobs/post_users_exports
382,167
def expireat(self, key, when): expire_time = datetime.fromtimestamp(when) key = self._encode(key) if key in self.redis: self.timeouts[key] = expire_time return True return False
Emulate expireat
382,168
def get_pdos(dos, lm_orbitals=None, atoms=None, elements=None): if not elements: symbols = dos.structure.symbol_set elements = dict(zip(symbols, [None] * len(symbols))) pdos = {} for el in elements: if atoms and el not in atoms: continue element_sites = [site for site in dos.structure.sites if site.specie == get_el_sp(el)] sites = [site for i, site in enumerate(element_sites) if not atoms or (el in atoms and i in atoms[el])] lm = lm_orbitals[el] if (lm_orbitals and el in lm_orbitals) else None orbitals = elements[el] if elements and el in elements else None pdos[el] = get_element_pdos(dos, el, sites, lm, orbitals) return pdos
Extract the projected density of states from a CompleteDos object. Args: dos (:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The density of states. elements (:obj:`dict`, optional): The elements and orbitals to extract from the projected density of states. Should be provided as a :obj:`dict` with the keys as the element names and corresponding values as a :obj:`tuple` of orbitals. For example, the following would extract the Bi s, px, py and d orbitals:: {'Bi': ('s', 'px', 'py', 'd')} If an element is included with an empty :obj:`tuple`, all orbitals for that species will be extracted. If ``elements`` is not set or set to ``None``, all elements for all species will be extracted. lm_orbitals (:obj:`dict`, optional): The orbitals to decompose into their lm contributions (e.g. p -> px, py, pz). Should be provided as a :obj:`dict`, with the elements names as keys and a :obj:`tuple` of orbitals as the corresponding values. For example, the following would be used to decompose the oxygen p and d orbitals:: {'O': ('p', 'd')} atoms (:obj:`dict`, optional): Which atomic sites to use when calculating the projected density of states. Should be provided as a :obj:`dict`, with the element names as keys and a :obj:`tuple` of :obj:`int` specifying the atomic indices as the corresponding values. The elemental projected density of states will be summed only over the atom indices specified. If an element is included with an empty :obj:`tuple`, then all sites for that element will be included. The indices are 0 based for each element specified in the POSCAR. For example, the following will calculate the density of states for the first 4 Sn atoms and all O atoms in the structure:: {'Sn': (1, 2, 3, 4), 'O': (, )} If ``atoms`` is not set or set to ``None`` then all atomic sites for all elements will be considered. Returns: dict: The projected density of states. Formatted as a :obj:`dict` of :obj:`dict` mapping the elements and their orbitals to :obj:`~pymatgen.electronic_structure.dos.Dos` objects. For example:: { 'Bi': {'s': Dos, 'p': Dos ... }, 'S': {'s': Dos} }
382,169
def default_antenna1(self, context): ant1, ant2 = default_base_ant_pairs(self, context) (tl, tu), (bl, bu) = context.dim_extents(, ) ant1_result = np.empty(context.shape, context.dtype) ant1_result[:,:] = ant1[np.newaxis,bl:bu] return ant1_result
Default antenna1 values
382,170
def compile(self, pretty=True): self._object_names = {} self._shader_deps = {} for shader_name, shader in self.shaders.items(): this_shader_deps = [] self._shader_deps[shader_name] = this_shader_deps dep_set = set() for dep in shader.dependencies(sort=True): if dep.name is None or dep in dep_set: continue this_shader_deps.append(dep) dep_set.add(dep) if pretty: self._rename_objects_pretty() else: self._rename_objects_fast() compiled = {} obj_names = self._object_names for shader_name, shader in self.shaders.items(): code = [] for dep in self._shader_deps[shader_name]: dep_code = dep.definition(obj_names) if dep_code is not None: regex = r m = re.search(regex, dep_code) if m is not None: if m.group(1) != : raise RuntimeError("Currently only GLSL "120 is supported.") dep_code = re.sub(regex, , dep_code) code.append(dep_code) compiled[shader_name] = .join(code) self.code = compiled return compiled
Compile all code and return a dict {name: code} where the keys are determined by the keyword arguments passed to __init__(). Parameters ---------- pretty : bool If True, use a slower method to mangle object names. This produces GLSL that is more readable. If False, then the output is mostly unreadable GLSL, but is about 10x faster to compile.
382,171
def mtabstr2doestr(st1): seperator = alist = st1.split(seperator) for num in range(0, len(alist)): alist[num] = alist[num].lstrip() st2 = for num in range(0, len(alist)): alist = tabstr2list(alist[num]) st2 = st2 + list2doe(alist) lss = st2.split() mylib1.write_str2file(, st2) print(len(lss)) st3 = tree2doe(st2) lsss = st3.split() print(len(lsss)) return st3
mtabstr2doestr
382,172
def get_pmids(self): pmids = [] for ea in self._edge_attributes.values(): edge_pmids = ea.get() if edge_pmids: pmids += edge_pmids return list(set(pmids))
Get list of all PMIDs associated with edges in the network.
382,173
def valid_conkey(self, conkey): for prefix in _COND_PREFIXES: trailing = conkey.lstrip(prefix) if trailing == and conkey: return True try: int(trailing) return True except ValueError: pass return False
Check that the conkey is a valid one. Return True if valid. A condition key is valid if it is one in the _COND_PREFIXES list. With the prefix removed, the remaining string must be either a number or the empty string.
382,174
def getBagTags(bagInfoPath): try: bagInfoString = open(bagInfoPath, "r").read().decode() except UnicodeDecodeError: bagInfoString = open(bagInfoPath, "r").read().decode() bagTags = anvl.readANVLString(bagInfoString) return bagTags
get bag tags
382,175
def retention_period(self, value): policy = self._properties.setdefault("retentionPolicy", {}) if value is not None: policy["retentionPeriod"] = str(value) else: policy = None self._patch_property("retentionPolicy", policy)
Set the retention period for items in the bucket. :type value: int :param value: number of seconds to retain items after upload or release from event-based lock. :raises ValueError: if the bucket's retention policy is locked.
382,176
def matchingAnalyseIndexes(self, tokenJson): s analysis of a single word token; ' matchingResults = self.matchingAnalyses(tokenJson) if matchingResults: indexes = [ tokenJson[ANALYSIS].index(analysis) for analysis in matchingResults ] return indexes return matchingResults
Determines whether given token (tokenJson) satisfies all the rules listed in the WordTemplate and returns a list of analyse indexes that correspond to tokenJson[ANALYSIS] elements that are matching all the rules. An empty list is returned if none of the analyses match (all the rules), or (!) if none of the rules are describing the ANALYSIS part of the token; Parameters ---------- tokenJson: pyvabamorf's analysis of a single word token;
382,177
def form(value): if isinstance(value, FLOAT + INT): if value <= 0: return str(value) elif value < .001: return % value elif value < 10 and isinstance(value, FLOAT): return % value elif value > 1000: return .format(int(round(value))) elif numpy.isnan(value): return else: return str(int(value)) elif isinstance(value, bytes): return decode(value) elif isinstance(value, str): return value elif isinstance(value, numpy.object_): return str(value) elif hasattr(value, ) and len(value) > 1: return .join(map(form, value)) return str(value)
Format numbers in a nice way. >>> form(0) '0' >>> form(0.0) '0.0' >>> form(0.0001) '1.000E-04' >>> form(1003.4) '1,003' >>> form(103.4) '103' >>> form(9.3) '9.30000' >>> form(-1.2) '-1.2'
382,178
def _visible(self, element): if element.name in self._disallowed_names: return False elif re.match(u, six.text_type(element.extract())): return False return True
Used to filter text elements that have invisible text on the page.
382,179
def putResult(self, result): self._lock_prev_output.acquire() for tube in self._tubes_result_output: tube.put((result, 0)) self._lock_next_output.release()
Register the *result* by putting it on all the output tubes.
382,180
def _random_subprocessors(self): if self._processors is not None: return (p for p in self._processors) elif 2**len(self._evil) <= 8 * self._proc_limit: deletions = self._compute_all_deletions() if len(deletions) > self._proc_limit: deletions = sample(deletions, self._proc_limit) return (self._subprocessor(d) for d in deletions) else: return (self._random_subprocessor() for i in range(self._proc_limit))
Produces an iterator of subprocessors. If there are fewer than self._proc_limit subprocessors to consider (by knocking out a minimal subset of working qubits incident to broken couplers), we work exhaustively. Otherwise, we generate a random set of ``self._proc_limit`` subprocessors. If the total number of possibilities is rather small, then we deliberately pick a random minimum subset to avoid coincidences. Otherwise, we give up on minimum, satisfy ourselves with minimal, and randomly generate subprocessors with :func:`self._random_subprocessor`. OUTPUT: an iterator of eden_processor instances.
382,181
def add_child(self, child): if not isinstance(child, DependencyNode): raise TypeError() self._children.append(child)
Add a child node
382,182
def register_up(self): with self.regcond: self.runningcount += 1 tid = thread.get_ident() self.tids.append(tid) self.logger.debug("register_up: (%d) count is %d" % (tid, self.runningcount)) if self.runningcount == self.numthreads: self.status = self.regcond.notify()
Called by WorkerThread objects to register themselves. Acquire the condition variable for the WorkerThread objects. Increment the running-thread count. If we are the last thread to start, set status to 'up'. This allows startall() to complete if it was called with wait=True.
382,183
def add_obograph_digraph(self, og, node_type=None, predicates=None, xref_graph=None, logical_definitions=None, property_chain_axioms=None, parse_meta=True, **args): digraph = self.digraph logging.info("NODES: {}".format(len(og[]))) if xref_graph is not None: parse_meta = True for n in og[]: is_obsolete = in n and n[] == if is_obsolete: continue if node_type is not None and ( not in n or n[] != node_type): continue id = self.contract_uri(n[]) digraph.add_node(id, **n) if in n: digraph.node[id][] = n[] if parse_meta and in n: if n[] is None: n[] = {} meta = self.transform_meta(n[]) if xref_graph is not None and in meta: for x in meta[]: xref_graph.add_edge(self.contract_uri(x[]), id, source=id) logging.info("EDGES: {}".format(len(og[]))) for e in og[]: sub = self.contract_uri(e[]) obj = self.contract_uri(e[]) pred = self.contract_uri(e[]) pred = map_legacy_pred(pred) if pred == : pred = if predicates is None or pred in predicates: digraph.add_edge(obj, sub, pred=pred) if in og: nslist = og[] logging.info("CLIQUES: {}".format(len(nslist))) for ns in nslist: equivNodeIds = ns[] for i in ns[]: ix = self.contract_uri(i) for j in ns[]: if i != j: jx = self.contract_uri(j) digraph.add_edge(ix, jx, pred=) if logical_definitions is not None and in og: for a in og[]: ld = LogicalDefinition(self.contract_uri(a[]), [self.contract_uri(x) for x in a[]], [(self.contract_uri(x[]), self.contract_uri(x[])) for x in a[] if x is not None]) logical_definitions.append(ld) if property_chain_axioms is not None and in og: for a in og[]: pca = PropertyChainAxiom(predicate_id=self.contract_uri(a[]), chain_predicate_ids=[self.contract_uri(x) for x in a[]]) property_chain_axioms.append(pca)
Converts a single obograph to Digraph edges and adds to an existing networkx DiGraph
382,184
def _send(self, event): _LOGGER.debug(, event) try: with async_timeout.timeout(10, loop=self._loop): response = yield from self._websession.post( self.ALARMDOTCOM_URL + .format( self._login_info[]), data={ self.VIEWSTATE: , self.VIEWSTATEENCRYPTED: , self.EVENTVALIDATION: self.COMMAND_LIST[event][], self.COMMAND_LIST[event][]: event}, headers={: } ) _LOGGER.debug( , response.status) text = yield from response.text() tree = BeautifulSoup(text, ) try: message = tree.select( .format(self.MESSAGE_CONTROL))[0].get_text() if in message: _LOGGER.debug(message) yield from self.async_update() except IndexError: yield from self.async_login() if event == : yield from self.async_alarm_disarm() elif event == : yield from self.async_alarm_arm_away() elif event == : yield from self.async_alarm_arm_away() except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error() finally: if response is not None: yield from response.release()
Generic function for sending commands to Alarm.com :param event: Event command to send to alarm.com
382,185
def elem_to_container(elem, container=dict, **options): dic = container() if elem is None: return dic elem.tag = _tweak_ns(elem.tag, **options) subdic = dic[elem.tag] = container() options["container"] = container if elem.text: _process_elem_text(elem, dic, subdic, **options) if elem.attrib: _process_elem_attrs(elem, dic, subdic, **options) if len(elem): _process_children_elems(elem, dic, subdic, **options) elif not elem.text and not elem.attrib: dic[elem.tag] = None return dic
Convert XML ElementTree Element to a collection of container objects. Elements are transformed to a node under special tagged nodes, attrs, text and children, to store the type of these elements basically, however, in some special cases like the followings, these nodes are attached to the parent node directly for later convenience. - There is only text element - There are only children elements each has unique keys among all :param elem: ET Element object or None :param container: callble to make a container object :param options: Keyword options - nspaces: A namespaces dict, {uri: prefix} or None - attrs, text, children: Tags for special nodes to keep XML info - merge_attrs: Merge attributes and mix with children nodes, and the information of attributes are lost after its transformation.
382,186
def headers(self): action = self.method.soap.action stock = { : , : action } result = dict(stock, **self.options.headers) log.debug(, result) return result
Get http headers or the http/https request. @return: A dictionary of header/values. @rtype: dict
382,187
def _init_metadata(self): QuestionFilesFormRecord._init_metadata(self) FirstAngleProjectionFormRecord._init_metadata(self) super(MultiChoiceOrthoQuestionFormRecord, self)._init_metadata()
stub
382,188
def _decorate_namespace_property(bases: List[type], namespace: MutableMapping[str, Any], key: str) -> None: value = namespace[key] assert isinstance(value, property) fget = value.fget fset = value.fset fdel = value.fdel for func in [value.fget, value.fset, value.fdel]: func = cast(Callable[..., Any], func) if func is None: continue base_preconditions = [] base_snapshots = [] base_postconditions = [] bases_have_func = False for base in bases: if hasattr(base, key): base_property = getattr(base, key) assert isinstance(base_property, property), \ "Expected base {} to have {} as property, but got: {}".format(base, key, base_property) if func == value.fget: base_func = getattr(base, key).fget elif func == value.fset: base_func = getattr(base, key).fset elif func == value.fdel: base_func = getattr(base, key).fdel else: raise NotImplementedError("Unhandled case: func neither value.fget, value.fset nor value.fdel") if base_func is None: continue bases_have_func = True base_contract_checker = icontract._checkers.find_checker(func=base_func) if base_contract_checker is not None: base_preconditions.extend(base_contract_checker.__preconditions__) base_snapshots.extend(base_contract_checker.__postcondition_snapshots__) base_postconditions.extend(base_contract_checker.__postconditions__) preconditions = [] snapshots = [] postconditions = [] contract_checker = icontract._checkers.find_checker(func=func) if contract_checker is not None: preconditions = contract_checker.__preconditions__ snapshots = contract_checker.__postcondition_snapshots__ postconditions = contract_checker.__postconditions__ preconditions = _collapse_preconditions( base_preconditions=base_preconditions, bases_have_func=bases_have_func, preconditions=preconditions, func=func) snapshots = _collapse_snapshots(base_snapshots=base_snapshots, snapshots=snapshots) postconditions = _collapse_postconditions( base_postconditions=base_postconditions, postconditions=postconditions) if preconditions or postconditions: if contract_checker is None: contract_checker = icontract._checkers.decorate_with_checker(func=func) if func == value.fget: fget = contract_checker elif func == value.fset: fset = contract_checker elif func == value.fdel: fdel = contract_checker else: raise NotImplementedError("Unhandled case: func neither fget, fset nor fdel") contract_checker.__preconditions__ = preconditions contract_checker.__postcondition_snapshots__ = snapshots contract_checker.__postconditions__ = postconditions if fget != value.fget or fset != value.fset or fdel != value.fdel: namespace[key] = property(fget=fget, fset=fset, fdel=fdel)
Collect contracts for all getters/setters/deleters corresponding to ``key`` and decorate them.
382,189
def register(cls, range_mixin): def decorator(range_set_mixin): cls.add(range_mixin, range_set_mixin) return range_set_mixin return decorator
Decorator for registering range set mixins for global use. This works the same as :meth:`~spans.settypes.MetaRangeSet.add` :param range_mixin: A :class:`~spans.types.Range` mixin class to to register a decorated range set mixin class for :return: A decorator to use on a range set mixin class
382,190
def get_parent(self): if not self.parent: parent = self._safe_get_element() if parent: self.parent = self.api.lookup(ItemId=parent) return self.parent
Get Parent. Fetch parent product if it exists. Use `parent_asin` to check if a parent exist before fetching. :return: An instance of :class:`~.AmazonProduct` representing the parent product.
382,191
def load(self): if not op.exists(self.path): logger.debug("The GUI state file `%s` doesn't exist.", self.path) return assert op.exists(self.path) logger.debug("Load the GUI state from `%s`.", self.path) self.update(_bunchify(_load_json(self.path)))
Load the state from the JSON file in the config dir.
382,192
def line_iterator_to_intermediary(line_iterator): current_table = None tables = [] relations = [] errors = [] for line_nb, line, raw_line in filter_lines_from_comments(line_iterator): try: new_obj = parse_line(line) current_table, tables, relations = update_models(new_obj, current_table, tables, relations) except ParsingException as e: e.line_nb = line_nb e.line = raw_line errors.append(e) if len(errors) != 0: msg = t complete the generation due the {} following errors\n\n'.join(e.traceback for e in errors)) return tables, relations
Parse an iterator of str (one string per line) to the intermediary syntax
382,193
def remove_class(self, ioclass): current_ioclasses = self.ioclasses new_ioclasses = filter(lambda x: x.name != ioclass.name, current_ioclasses) self.modify(new_ioclasses=new_ioclasses)
Remove VNXIOClass instance from policy.
382,194
def surface_to_image(surface): from IPython.display import Image buf = BytesIO() surface.write_to_png(buf) data = buf.getvalue() buf.close() return Image(data=data)
Renders current buffer surface to IPython image
382,195
def tagmask(self, tags): mask = numpy.zeros(len(tags), bool) for t, tag in enumerate(tags): tagname, tagvalue = tag.split() mask[t] = self.tagvalue(tagname) == tagvalue return mask
:returns: a boolean array with True where the assets has tags
382,196
def get(self, endpoint, params=None): response = self._session.get( url=self._url + endpoint, params=params, timeout=self._timeout ) return self._handle_response(response)
Send an HTTP GET request to QuadrigaCX. :param endpoint: API endpoint. :type endpoint: str | unicode :param params: URL parameters. :type params: dict :return: Response body from QuadrigaCX. :rtype: dict :raise quadriga.exceptions.RequestError: If HTTP OK was not returned.
382,197
def clearness_index(ghi, solar_zenith, extra_radiation, min_cos_zenith=0.065, max_clearness_index=2.0): cos_zenith = tools.cosd(solar_zenith) I0h = extra_radiation * np.maximum(cos_zenith, min_cos_zenith) kt = ghi / I0h kt = np.maximum(kt, 0) kt = np.minimum(kt, max_clearness_index) return kt
Calculate the clearness index. The clearness index is the ratio of global to extraterrestrial irradiance on a horizontal plane. Parameters ---------- ghi : numeric Global horizontal irradiance in W/m^2. solar_zenith : numeric True (not refraction-corrected) solar zenith angle in decimal degrees. extra_radiation : numeric Irradiance incident at the top of the atmosphere min_cos_zenith : numeric, default 0.065 Minimum value of cos(zenith) to allow when calculating global clearness index `kt`. Equivalent to zenith = 86.273 degrees. max_clearness_index : numeric, default 2.0 Maximum value of the clearness index. The default, 2.0, allows for over-irradiance events typically seen in sub-hourly data. NREL's SRRL Fortran code used 0.82 for hourly data. Returns ------- kt : numeric Clearness index References ---------- .. [1] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly Global Horizontal to Direct Normal Insolation", Technical Report No. SERI/TR-215-3087, Golden, CO: Solar Energy Research Institute, 1987.
382,198
def calculate_manual_reading(basic_data: BasicMeterData) -> Reading: t_start = basic_data.previous_register_read_datetime t_end = basic_data.current_register_read_datetime read_start = basic_data.previous_register_read read_end = basic_data.current_register_read value = basic_data.quantity uom = basic_data.uom quality_method = basic_data.current_quality_method return Reading(t_start, t_end, value, uom, quality_method, "", "", read_start, read_end)
Calculate the interval between two manual readings
382,199
def one(iterable, cmp=None): the_one = False for i in iterable: if cmp(i) if cmp else i: if the_one: return False the_one = i return the_one
Return the object in the given iterable that evaluates to True. If the given iterable has more than one object that evaluates to True, or if there is no object that fulfills such condition, return False. If a callable ``cmp`` is given, it's used to evaluate each element. >>> one((True, False, False)) True >>> one((True, False, True)) False >>> one((0, 0, 'a')) 'a' >>> one((0, False, None)) False >>> one((True, True)) False >>> bool(one(('', 1))) True >>> one((10, 20, 30, 42), lambda i: i > 40) 42