code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _normalize_value_ms(cls, value): value = round(value / 1000) * 1000 sorted_units = sorted(cls.UNITS_IN_MILLISECONDS.items(), key=lambda x: x[1], reverse=True) for unit, unit_in_ms in sorted_units: unit_value = value / unit_in_ms if unit_value.is_integer(): return int(unit_value), unit return value, MILLISECOND
Normalize a value in ms to the largest unit possible without decimal places. Note that this ignores fractions of a second and always returns a value _at least_ in seconds. :return: the normalized value and unit name :rtype: Tuple[Union[int, float], str]
def alerts(self): endpoint = '/'.join((self.endpoint, self.id, 'alerts')) return self.alertFactory.find( endpoint=endpoint, api_key=self.api_key, )
Query for alerts attached to this incident.
def _format_default(client, value): if isinstance(value, File): return os.path.relpath( str((client.workflow_path / value.path).resolve()) ) return value
Format default values.
def fft(a, n=None, axis=-1, norm=None): output = mkl_fft.fft(a, n, axis) if _unitary(norm): output *= 1 / sqrt(output.shape[axis]) return output
Compute the one-dimensional discrete Fourier Transform. This function computes the one-dimensional *n*-point discrete Fourier Transform (DFT) with the efficient Fast Fourier Transform (FFT) algorithm [CT]. Parameters ---------- a : array_like Input array, can be complex. n : int, optional Length of the transformed axis of the output. If `n` is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If `n` is not given, the length of the input along the axis specified by `axis` is used. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. Raises ------ IndexError if `axes` is larger than the last axis of `a`. See Also -------- numpy.fft : for definition of the DFT and conventions used. ifft : The inverse of `fft`. fft2 : The two-dimensional FFT. fftn : The *n*-dimensional FFT. rfftn : The *n*-dimensional FFT of real input. fftfreq : Frequency bins for given FFT parameters. Notes ----- FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform (DFT) can be calculated efficiently, by using symmetries in the calculated terms. The symmetry is highest when `n` is a power of 2, and the transform is therefore most efficient for these sizes. The DFT is defined, with the conventions used in this implementation, in the documentation for the `numpy.fft` module. References ---------- .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the machine calculation of complex Fourier series," *Math. Comput.* 19: 297-301. Examples -------- >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) array([ -3.44505240e-16 +1.14383329e-17j, 8.00000000e+00 -5.71092652e-15j, 2.33482938e-16 +1.22460635e-16j, 1.64863782e-15 +1.77635684e-15j, 9.95839695e-17 +2.33482938e-16j, 0.00000000e+00 +1.66837030e-15j, 1.14383329e-17 +1.22460635e-16j, -1.64863782e-15 +1.77635684e-15j]) >>> import matplotlib.pyplot as plt >>> t = np.arange(256) >>> sp = np.fft.fft(np.sin(t)) >>> freq = np.fft.fftfreq(t.shape[-1]) >>> plt.plot(freq, sp.real, freq, sp.imag) [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>] >>> plt.show() In this example, real input has an FFT which is Hermitian, i.e., symmetric in the real part and anti-symmetric in the imaginary part, as described in the `numpy.fft` documentation.
def sign(self, msg, key): h = hmac.HMAC(key, self.algorithm(), default_backend()) h.update(msg) return h.finalize()
Create a signature over a message as defined in RFC7515 using a symmetric key :param msg: The message :param key: The key :return: A signature
def default_start(): (config, daemon, pidfile, startup, fork) = parsearg() if config is None: if os.path.isfile('/etc/vlcp.conf'): config = '/etc/vlcp.conf' else: print('/etc/vlcp.conf is not found; start without configurations.') elif not config: config = None main(config, startup, daemon, pidfile, fork)
Use `sys.argv` for starting parameters. This is the entry-point of `vlcp-start`
def add_context_menu_items(self, items, replace_items=False): for label, action in items: assert isinstance(label, basestring) assert isinstance(action, basestring) if replace_items: self._context_menu_items = [] self._context_menu_items.extend(items) self._listitem.addContextMenuItems(items, replace_items)
Adds context menu items. If replace_items is True all previous context menu items will be removed.
def _assert_can_do_op(self, value): if not is_scalar(value): msg = "'value' must be a scalar, passed: {0}" raise TypeError(msg.format(type(value).__name__))
Check value is valid for scalar op.
def pwgen(length=None): if length is None: length = random.choice(range(35, 45)) alphanumeric_chars = [ l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] random_generator = random.SystemRandom() random_chars = [ random_generator.choice(alphanumeric_chars) for _ in range(length)] return(''.join(random_chars))
Generate a random pasword.
def _unsubscribe_myself(self): url = UNSUBSCRIBE_ENDPOINT return self._session.query(url, method='GET', raw=True, stream=False)
Unsubscribe this base station for all events.
def make_frequency_series(vec): if isinstance(vec, FrequencySeries): return vec if isinstance(vec, TimeSeries): N = len(vec) n = N/2+1 delta_f = 1.0 / N / vec.delta_t vectilde = FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)), delta_f=delta_f, copy=False) fft(vec, vectilde) return vectilde else: raise TypeError("Can only convert a TimeSeries to a FrequencySeries")
Return a frequency series of the input vector. If the input is a frequency series it is returned, else if the input vector is a real time series it is fourier transformed and returned as a frequency series. Parameters ---------- vector : TimeSeries or FrequencySeries Returns ------- Frequency Series: FrequencySeries A frequency domain version of the input vector.
def create_args(line, namespace): args = [] for arg in shlex.split(line): if not arg: continue if arg[0] == '$': var_name = arg[1:] if var_name in namespace: args.append((namespace[var_name])) else: raise Exception('Undefined variable referenced in command line: %s' % arg) else: args.append(arg) return args
Expand any meta-variable references in the argument list.
def open(self, filename, mode='r', **kwargs): if 'r' in mode and not self.backend.exists(filename): raise FileNotFound(filename) return self.backend.open(filename, mode, **kwargs)
Open the file and return a file-like object. :param str filename: The storage root-relative filename :param str mode: The open mode (``(r|w)b?``) :raises FileNotFound: If trying to read a file that does not exists
def _get_queue_for_the_action(self, action): mod = getattr(action, 'module_type', 'fork') queues = list(self.q_by_mod[mod].items()) if not queues: return (0, None) self.rr_qid = (self.rr_qid + 1) % len(queues) (worker_id, queue) = queues[self.rr_qid] return (worker_id, queue)
Find action queue for the action depending on the module. The id is found with action modulo on action id :param a: the action that need action queue to be assigned :type action: object :return: worker id and queue. (0, None) if no queue for the module_type :rtype: tuple
def search_associations_go( subject_category=None, object_category=None, relation=None, subject=None, **kwargs): go_golr_url = "http://golr.geneontology.org/solr/" go_solr = pysolr.Solr(go_golr_url, timeout=5) go_solr.get_session().headers['User-Agent'] = get_user_agent(caller_name=__name__) return search_associations(subject_category, object_category, relation, subject, solr=go_solr, field_mapping=goassoc_fieldmap(), **kwargs)
Perform association search using Monarch golr
def print_token(self, token_node_index): err_msg = "The given node is not a token node." assert isinstance(self.nodes[token_node_index], TokenNode), err_msg onset = self.nodes[token_node_index].onset offset = self.nodes[token_node_index].offset return self.text[onset:offset]
returns the string representation of a token.
def regexer_for_targets(targets): for target in targets: path, file_ext = os.path.splitext(target) regexer = config.regexers[file_ext] yield target, regexer
Pairs up target files with their correct regex
def write(self, equities=None, futures=None, exchanges=None, root_symbols=None, equity_supplementary_mappings=None, chunk_size=DEFAULT_CHUNK_SIZE): if exchanges is None: exchange_names = [ df['exchange'] for df in (equities, futures, root_symbols) if df is not None ] if exchange_names: exchanges = pd.DataFrame({ 'exchange': pd.concat(exchange_names).unique(), }) data = self._load_data( equities if equities is not None else pd.DataFrame(), futures if futures is not None else pd.DataFrame(), exchanges if exchanges is not None else pd.DataFrame(), root_symbols if root_symbols is not None else pd.DataFrame(), ( equity_supplementary_mappings if equity_supplementary_mappings is not None else pd.DataFrame() ), ) self._real_write( equities=data.equities, equity_symbol_mappings=data.equities_mappings, equity_supplementary_mappings=data.equity_supplementary_mappings, futures=data.futures, root_symbols=data.root_symbols, exchanges=data.exchanges, chunk_size=chunk_size, )
Write asset metadata to a sqlite database. Parameters ---------- equities : pd.DataFrame, optional The equity metadata. The columns for this dataframe are: symbol : str The ticker symbol for this equity. asset_name : str The full name for this asset. start_date : datetime The date when this asset was created. end_date : datetime, optional The last date we have trade data for this asset. first_traded : datetime, optional The first date we have trade data for this asset. auto_close_date : datetime, optional The date on which to close any positions in this asset. exchange : str The exchange where this asset is traded. The index of this dataframe should contain the sids. futures : pd.DataFrame, optional The future contract metadata. The columns for this dataframe are: symbol : str The ticker symbol for this futures contract. root_symbol : str The root symbol, or the symbol with the expiration stripped out. asset_name : str The full name for this asset. start_date : datetime, optional The date when this asset was created. end_date : datetime, optional The last date we have trade data for this asset. first_traded : datetime, optional The first date we have trade data for this asset. exchange : str The exchange where this asset is traded. notice_date : datetime The date when the owner of the contract may be forced to take physical delivery of the contract's asset. expiration_date : datetime The date when the contract expires. auto_close_date : datetime The date when the broker will automatically close any positions in this contract. tick_size : float The minimum price movement of the contract. multiplier: float The amount of the underlying asset represented by this contract. exchanges : pd.DataFrame, optional The exchanges where assets can be traded. The columns of this dataframe are: exchange : str The full name of the exchange. canonical_name : str The canonical name of the exchange. country_code : str The ISO 3166 alpha-2 country code of the exchange. root_symbols : pd.DataFrame, optional The root symbols for the futures contracts. The columns for this dataframe are: root_symbol : str The root symbol name. root_symbol_id : int The unique id for this root symbol. sector : string, optional The sector of this root symbol. description : string, optional A short description of this root symbol. exchange : str The exchange where this root symbol is traded. equity_supplementary_mappings : pd.DataFrame, optional Additional mappings from values of abitrary type to assets. chunk_size : int, optional The amount of rows to write to the SQLite table at once. This defaults to the default number of bind params in sqlite. If you have compiled sqlite3 with more bind or less params you may want to pass that value here. See Also -------- zipline.assets.asset_finder
def namedb_is_name_zonefile_hash(cur, name, zonefile_hash): select_query = 'SELECT COUNT(value_hash) FROM history WHERE history_id = ? AND value_hash = ?' select_args = (name,zonefile_hash) rows = namedb_query_execute(cur, select_query, select_args) count = None for r in rows: count = r['COUNT(value_hash)'] break return count > 0
Determine if a zone file hash was sent by a name. Return True if so, false if not
def decode_mysql_literal(text): if MYSQL_NULL_PATTERN.match(text): return None if MYSQL_BOOLEAN_PATTERN.match(text): return text.lower() == "true" if MYSQL_FLOAT_PATTERN.match(text): return float(text) if MYSQL_INT_PATTERN.match(text): return int(text) if MYSQL_STRING_PATTERN.match(text): return decode_mysql_string_literal(text) raise ValueError("Unable to decode given value: %r" % (text,))
Attempts to decode given MySQL literal into Python value. :param text: Value to be decoded, as MySQL literal. :type text: str :return: Python version of the given MySQL literal. :rtype: any
def importPreflibFile(self, fileName): elecFileObj = open(fileName, 'r') self.candMap, rankMaps, wmgMapsCounts, self.numVoters = prefpy_io.read_election_file(elecFileObj) elecFileObj.close() self.numCands = len(self.candMap.keys()) self.preferences = [] for i in range(0, len(rankMaps)): wmgMap = self.genWmgMapFromRankMap(rankMaps[i]) self.preferences.append(Preference(wmgMap, wmgMapsCounts[i]))
Imports a preflib format file that contains all the information of a Profile. This function will completely override all members of the current Profile object. Currently, we assume that in an election where incomplete ordering are allowed, if a voter ranks only one candidate, then the voter did not prefer any candidates over another. This may lead to some discrepancies when importing and exporting a .toi preflib file or a .soi preflib file. :ivar str fileName: The name of the input file to be imported.
def get_locale_choices(locale_dir): file_name_s = os.listdir(locale_dir) choice_s = [] for file_name in file_name_s: if file_name.endswith(I18n.TT_FILE_EXT_STXT): file_name_noext, _ = os.path.splitext(file_name) if file_name_noext: choice_s.append(file_name_noext) choice_s = sorted(choice_s) return choice_s
Get a list of locale file names in the given locale dir.
def __prepare_info_from_dicomdir_file(self, writedicomdirfile=True): createdcmdir = True dicomdirfile = os.path.join(self.dirpath, self.dicomdir_filename) ftype = 'pickle' if os.path.exists(dicomdirfile): try: dcmdirplus = misc.obj_from_file(dicomdirfile, ftype) if dcmdirplus['version'] == __version__: createdcmdir = False dcmdir = dcmdirplus['filesinfo'] except Exception: logger.debug('Found dicomdir.pkl with wrong version') createdcmdir = True if createdcmdir or self.force_create_dicomdir: dcmdirplus = self._create_dicomdir_info() dcmdir = dcmdirplus['filesinfo'] if (writedicomdirfile) and len(dcmdir) > 0: try: misc.obj_to_file(dcmdirplus, dicomdirfile, ftype) except: logger.warning('Cannot write dcmdir file') traceback.print_exc() dcmdir = dcmdirplus['filesinfo'] self.dcmdirplus = dcmdirplus self.files_with_info = dcmdir return dcmdir
Check if exists dicomdir file and load it or cerate it dcmdir = get_dir(dirpath) dcmdir: list with filenames, SeriesNumber and SliceLocation
def tilt_residual(params, data, mask): bg = tilt_model(params, shape=data.shape) res = (data - bg)[mask] return res.flatten()
lmfit tilt residuals
def _try_to_compute_deterministic_class_id(cls, depth=5): class_id = pickle.dumps(cls) for _ in range(depth): new_class_id = pickle.dumps(pickle.loads(class_id)) if new_class_id == class_id: return hashlib.sha1(new_class_id).digest() class_id = new_class_id logger.warning( "WARNING: Could not produce a deterministic class ID for class " "{}".format(cls)) return hashlib.sha1(new_class_id).digest()
Attempt to produce a deterministic class ID for a given class. The goal here is for the class ID to be the same when this is run on different worker processes. Pickling, loading, and pickling again seems to produce more consistent results than simply pickling. This is a bit crazy and could cause problems, in which case we should revert it and figure out something better. Args: cls: The class to produce an ID for. depth: The number of times to repeatedly try to load and dump the string while trying to reach a fixed point. Returns: A class ID for this class. We attempt to make the class ID the same when this function is run on different workers, but that is not guaranteed. Raises: Exception: This could raise an exception if cloudpickle raises an exception.
def _get_float(data, position, dummy0, dummy1, dummy2): end = position + 8 return _UNPACK_FLOAT(data[position:end])[0], end
Decode a BSON double to python float.
def apply_boundary_conditions(self, **kwargs): polarval = kwargs[self._polar_angle] azval = kwargs[self._azimuthal_angle] polarval = self._polardist._domain.apply_conditions(polarval) azval = self._azimuthaldist._domain.apply_conditions(azval) polarval = self._bounds[self._polar_angle].apply_conditions(polarval) azval = self._bounds[self._azimuthal_angle].apply_conditions(azval) return {self._polar_angle: polarval, self._azimuthal_angle: azval}
Maps the given values to be within the domain of the azimuthal and polar angles, before applying any other boundary conditions. Parameters ---------- \**kwargs : The keyword args must include values for both the azimuthal and polar angle, using the names they were initilialized with. For example, if `polar_angle='theta'` and `azimuthal_angle=`phi`, then the keyword args must be `theta={val1}, phi={val2}`. Returns ------- dict A dictionary of the parameter names and the conditioned values.
def by_median_home_value(self, lower=-1, upper=2 ** 31, zipcode_type=ZipcodeType.Standard, sort_by=SimpleZipcode.median_home_value.name, ascending=False, returns=DEFAULT_LIMIT): return self.query( median_home_value_lower=lower, median_home_value_upper=upper, sort_by=sort_by, zipcode_type=zipcode_type, ascending=ascending, returns=returns, )
Search zipcode information by median home value.
def pivot_wavelength(self): wl = self.registry._pivot_wavelengths.get((self.telescope, self.band)) if wl is not None: return wl wl = self.calc_pivot_wavelength() self.registry.register_pivot_wavelength(self.telescope, self.band, wl) return wl
Get the bandpass' pivot wavelength. Unlike calc_pivot_wavelength(), this function will use a cached value if available.
def get_last(self): query = self.table().where('batch', self.get_last_batch_number()) return query.order_by('migration', 'desc').get()
Get the last migration batch. :rtype: list
def add(self, document_data, document_id=None): if document_id is None: parent_path, expected_prefix = self._parent_info() document_pb = document_pb2.Document() created_document_pb = self._client._firestore_api.create_document( parent_path, collection_id=self.id, document_id=None, document=document_pb, mask=None, metadata=self._client._rpc_metadata, ) new_document_id = _helpers.get_doc_id(created_document_pb, expected_prefix) document_ref = self.document(new_document_id) set_result = document_ref.set(document_data) return set_result.update_time, document_ref else: document_ref = self.document(document_id) write_result = document_ref.create(document_data) return write_result.update_time, document_ref
Create a document in the Firestore database with the provided data. Args: document_data (dict): Property names and values to use for creating the document. document_id (Optional[str]): The document identifier within the current collection. If not provided, an ID will be automatically assigned by the server (the assigned ID will be a random 20 character string composed of digits, uppercase and lowercase letters). Returns: Tuple[google.protobuf.timestamp_pb2.Timestamp, \ ~.firestore_v1beta1.document.DocumentReference]: Pair of * The ``update_time`` when the document was created (or overwritten). * A document reference for the created document. Raises: ~google.cloud.exceptions.Conflict: If ``document_id`` is provided and the document already exists.
def query_by_student(self, student_id, end_time=None, start_time=None): path = {} data = {} params = {} path["student_id"] = student_id if start_time is not None: params["start_time"] = start_time if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/grade_change/students/{student_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/grade_change/students/{student_id}".format(**path), data=data, params=params, all_pages=True)
Query by student. List grade change events for a given student.
def retrieve_customer(self, handle, with_additional_data=False): response = self.request(E.retrieveCustomerRequest( E.handle(handle), E.withAdditionalData(int(with_additional_data)), )) return response.as_model(Customer)
Retrieve information of an existing customer.
def clear_list_value(self, value): if not value: return self.empty_value if self.clean_empty: value = [v for v in value if v] return value or self.empty_value
Clean the argument value to eliminate None or Falsy values if needed.
def _wire_events(self): self._device.on_open += self._on_open self._device.on_close += self._on_close self._device.on_read += self._on_read self._device.on_write += self._on_write self._zonetracker.on_fault += self._on_zone_fault self._zonetracker.on_restore += self._on_zone_restore
Wires up the internal device events.
def reward_bonus(self, assignment_id, amount, reason): try: return self.mturkservice.grant_bonus(assignment_id, amount, reason) except MTurkServiceException as ex: logger.exception(str(ex))
Reward the Turker for a specified assignment with a bonus.
def inspect_select_calculation(self): try: node = self.ctx.cif_select self.ctx.cif = node.outputs.cif except exceptions.NotExistent: self.report('aborting: CifSelectCalculation<{}> did not return the required cif output'.format(node.uuid)) return self.exit_codes.ERROR_CIF_SELECT_FAILED
Inspect the result of the CifSelectCalculation, verifying that it produced a CifData output node.
def from_chords(self, chords, duration=1): tun = self.get_tuning() def add_chord(chord, duration): if type(chord) == list: for c in chord: add_chord(c, duration * 2) else: chord = NoteContainer().from_chord(chord) if tun: chord = tun.find_chord_fingering(chord, return_best_as_NoteContainer=True) if not self.add_notes(chord, duration): dur = self.bars[-1].value_left() self.add_notes(chord, dur) self.add_notes(chord, value.subtract(duration, dur)) for c in chords: if c is not None: add_chord(c, duration) else: self.add_notes(None, duration) return self
Add chords to the Track. The given chords should be a list of shorthand strings or list of list of shorthand strings, etc. Each sublist divides the value by 2. If a tuning is set, chords will be expanded so they have a proper fingering. Example: >>> t = Track().from_chords(['C', ['Am', 'Dm'], 'G7', 'C#'], 1)
def extract_coverage(self, container: Container) -> FileLineSet: uid = container.uid r = self.__api.post('containers/{}/read-coverage'.format(uid)) if r.status_code == 200: return FileLineSet.from_dict(r.json()) self.__api.handle_erroneous_response(r)
Extracts a report of the lines that have been executed since the last time that a coverage report was extracted.
def size_r_img_inches(width, height): aspect_ratio = height / (1.0 * width) return R_IMAGE_SIZE, round(aspect_ratio * R_IMAGE_SIZE, 2)
Compute the width and height for an R image for display in IPython Neight width nor height can be null but should be integer pixel values > 0. Returns a tuple of (width, height) that should be used by ggsave in R to produce an appropriately sized jpeg/png/pdf image with the right aspect ratio. The returned values are in inches.
def package_releases(self, project_name): try: return self._connection.package_releases(project_name) except Exception as err: raise PyPIClientError(err)
Retrieve the versions from PyPI by ``project_name``. Args: project_name (str): The name of the project we wish to retrieve the versions of. Returns: list: Of string versions.
def get_remote_port_id_local(self, tlv_data): ret, parsed_val = self._check_common_tlv_format( tlv_data, "Local:", "Port ID TLV") if not ret: return None local = parsed_val[1].split('\n') return local[0].strip()
Returns Remote Port ID Local from the TLV.
def _generate_badges(self): daycount = self._stats.downloads_per_day day = self._generate_badge('Downloads', '%d/day' % daycount) self._badges['per-day'] = day weekcount = self._stats.downloads_per_week if weekcount is None: return week = self._generate_badge('Downloads', '%d/week' % weekcount) self._badges['per-week'] = week monthcount = self._stats.downloads_per_month if monthcount is None: return month = self._generate_badge('Downloads', '%d/month' % monthcount) self._badges['per-month'] = month
Generate download badges. Append them to ``self._badges``.
def _init_add_goid_alt(self): goid_alts = set() go2cnt_add = {} aspect_counts = self.aspect_counts gocnts = self.gocnts go2obj = self.go2obj for go_id, cnt in gocnts.items(): goobj = go2obj[go_id] assert cnt, "NO TERM COUNTS FOR {GO}".format(GO=goobj.item_id) if go_id != goobj.item_id: go2cnt_add[goobj.item_id] = cnt goid_alts |= goobj.alt_ids aspect_counts[goobj.namespace] += cnt for goid, cnt in go2cnt_add.items(): gocnts[goid] = cnt for alt_goid in goid_alts.difference(gocnts): goobj = go2obj[alt_goid] cnt = gocnts[goobj.item_id] assert cnt, "NO TERM COUNTS FOR ALT_ID({GOa}) ID({GO}): {NAME}".format( GOa=alt_goid, GO=goobj.item_id, NAME=goobj.name) gocnts[alt_goid] = cnt
Add alternate GO IDs to term counts.
def on_resolve(target, func, *args, **kwargs): return _register_hook(ON_RESOLVE, target, func, *args, **kwargs)
Register a resolution hook.
def toggleDrawingSensitive(self, drawing=True): self.actions.editMode.setEnabled(not drawing) if not drawing and self.beginner(): print('Cancel creation.') self.canvas.setEditing(True) self.canvas.restoreCursor() self.actions.create.setEnabled(True)
In the middle of drawing, toggling between modes should be disabled.
def angle2vecs(vec1, vec2): dot = np.dot(vec1, vec2) vec1_modulus = np.sqrt(np.multiply(vec1, vec1).sum()) vec2_modulus = np.sqrt(np.multiply(vec2, vec2).sum()) if (vec1_modulus * vec2_modulus) == 0: cos_angle = 1 else: cos_angle = dot / (vec1_modulus * vec2_modulus) return math.degrees(acos(cos_angle))
angle between two vectors
def _update_prx(self): qx = scipy.ones(N_CODON, dtype='float') for j in range(3): for w in range(N_NT): qx[CODON_NT[j][w]] *= self.phi[w] frx = self.pi_codon**self.beta self.prx = frx * qx with scipy.errstate(divide='raise', under='raise', over='raise', invalid='raise'): for r in range(self.nsites): self.prx[r] /= self.prx[r].sum()
Update `prx` from `phi`, `pi_codon`, and `beta`.
def fmt_transition(t): return "Transition({} {} {})".format( fmt_mechanism(t.cause_indices, t.node_labels), ARROW_RIGHT, fmt_mechanism(t.effect_indices, t.node_labels))
Format a |Transition|.
def delete(self): response = self._client._request('DELETE', self._client._build_url('service', service_id=self.id)) if response.status_code != requests.codes.no_content: raise APIError("Could not delete service: {} with id {}".format(self.name, self.id))
Delete this service. :raises APIError: if delete was not succesfull.
def update(self, **args): data = json.dumps(args) r = requests.put( "https://kippt.com/api/clips/%s" % (self.id), headers=self.kippt.header, data=data) return (r.json())
Updates a Clip. Parameters: - args Dictionary of other fields Accepted fields can be found here: https://github.com/kippt/api-documentation/blob/master/objects/clip.md
def _collapse_header(self, header): out = [] for i, h in enumerate(header): if h.startswith(self._col_quals): out[-1].append(i) else: out.append([i]) return out
Combine header columns into related groups.
def set(self, code): if self.update: self.vertices_substitution_dict, self.edges_substitution_dict, self.match_info\ = self.match.get_variables_substitution_dictionaries(self.g, self.matching_graph) try: self.matching_graph = self.__apply_code_to_graph(code, self.matching_graph) except: pass try: code = self.__substitute_names_in_code(code) self.g = self.__apply_code_to_graph(code, self.g) except: pass return True
Executes the code and apply it to the self.g :param code: the LISP code to execute :return: True/False, depending on the result of the LISP code
def delete_session(self, ticket): assert isinstance(self.session_storage_adapter, CASSessionAdapter) logging.debug('[CAS] Deleting session for ticket {}'.format(ticket)) self.session_storage_adapter.delete(ticket)
Delete a session record associated with a service ticket.
def display_arr(screen, arr, video_size, transpose): if transpose: pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1)) else: pyg_img = arr pyg_img = pygame.transform.scale(pyg_img, video_size) screen.blit(pyg_img, (0, 0))
Display an image to the pygame screen. Args: screen (pygame.Surface): the pygame surface to write frames to arr (np.ndarray): numpy array representing a single frame of gameplay video_size (tuple): the size to render the frame as transpose (bool): whether to transpose the frame before displaying Returns: None
def _caveat_v1_to_dict(c): serialized = {} if len(c.caveat_id) > 0: serialized['cid'] = c.caveat_id if c.verification_key_id: serialized['vid'] = utils.raw_urlsafe_b64encode( c.verification_key_id).decode('utf-8') if c.location: serialized['cl'] = c.location return serialized
Return a caveat as a dictionary for export as the JSON macaroon v1 format.
def _sumDiceRolls(self, rollList): if isinstance(rollList, RollList): self.rolls.append(rollList) return rollList.sum() else: return rollList
convert from dice roll structure to a single integer result
def _peek(self, *types): tok = self._scanner.token(self._pos, types) return tok[2]
Returns the token type for lookahead; if there are any args then the list of args is the set of token types to allow
def remove_from_tor(self, protocol): r = yield protocol.queue_command('DEL_ONION %s' % self.hostname[:-6]) if r.strip() != 'OK': raise RuntimeError('Failed to remove hidden service: "%s".' % r)
Returns a Deferred which fires with None
def remove_autosave_file(self, fileinfo): filename = fileinfo.filename if filename not in self.name_mapping: return autosave_filename = self.name_mapping[filename] try: os.remove(autosave_filename) except EnvironmentError as error: action = (_('Error while removing autosave file {}') .format(autosave_filename)) msgbox = AutosaveErrorDialog(action, error) msgbox.exec_if_enabled() del self.name_mapping[filename] self.stack.sig_option_changed.emit( 'autosave_mapping', self.name_mapping) logger.debug('Removing autosave file %s', autosave_filename)
Remove autosave file for specified file. This function also updates `self.autosave_mapping` and clears the `changed_since_autosave` flag.
def show_instance(name, call=None): if call != 'action': raise SaltCloudException( 'The show_instance action must be called with -a or --action.' ) node_id = get_linode_id_from_name(name) node_data = get_linode(kwargs={'linode_id': node_id}) ips = get_ips(node_id) state = int(node_data['STATUS']) ret = {'id': node_data['LINODEID'], 'image': node_data['DISTRIBUTIONVENDOR'], 'name': node_data['LABEL'], 'size': node_data['TOTALRAM'], 'state': _get_status_descr_by_id(state), 'private_ips': ips['private_ips'], 'public_ips': ips['public_ips']} return ret
Displays details about a particular Linode VM. Either a name or a linode_id must be provided. .. versionadded:: 2015.8.0 name The name of the VM for which to display details. CLI Example: .. code-block:: bash salt-cloud -a show_instance vm_name .. note:: The ``image`` label only displays information about the VM's distribution vendor, such as "Debian" or "RHEL" and does not display the actual image name. This is due to a limitation of the Linode API.
def is_in_current_deployment(server, extra_prefix=""): return re.match(r"^%s" % '-'.join([DEFAULT_PREFIX, extra_prefix]), server.name) is not None
Check if an existing server in the system take part to the current deployment
def object_factory(api, api_version, kind): resource_list = api.resource_list(api_version) resource = next((resource for resource in resource_list["resources"] if resource["kind"] == kind), None) base = NamespacedAPIObject if resource["namespaced"] else APIObject return type(kind, (base,), { "version": api_version, "endpoint": resource["name"], "kind": kind })
Dynamically builds a Python class for the given Kubernetes object in an API. For example: api = pykube.HTTPClient(...) NetworkPolicy = pykube.object_factory(api, "networking.k8s.io/v1", "NetworkPolicy") This enables construction of any Kubernetes object kind without explicit support from pykube. Currently, the HTTPClient passed to this function will not be bound to the returned type. It is planned to fix this, but in the mean time pass it as you would normally.
def remove_diagonal(S): if not isspmatrix_csr(S): raise TypeError('expected csr_matrix') if S.shape[0] != S.shape[1]: raise ValueError('expected square matrix, shape=%s' % (S.shape,)) S = coo_matrix(S) mask = S.row != S.col S.row = S.row[mask] S.col = S.col[mask] S.data = S.data[mask] return S.tocsr()
Remove the diagonal of the matrix S. Parameters ---------- S : csr_matrix Square matrix Returns ------- S : csr_matrix Strength matrix with the diagonal removed Notes ----- This is needed by all the splitting routines which operate on matrix graphs with an assumed zero diagonal Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.util.utils import remove_diagonal >>> A = poisson( (4,), format='csr' ) >>> C = remove_diagonal(A) >>> C.todense() matrix([[ 0., -1., 0., 0.], [-1., 0., -1., 0.], [ 0., -1., 0., -1.], [ 0., 0., -1., 0.]])
def size(self): import tensorflow as tf if self._size is None: self._size = 0 options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP) for tfexample_file in self.files: self._size += sum(1 for x in tf.python_io.tf_record_iterator(tfexample_file, options=options)) return self._size
The number of instances in the data. If the underlying data source changes, it may be outdated.
def insert(self, index, item): super(ObservableList, self).insert(index, item) length = len(self) if index >= length: index = length - 1 elif index < 0: index += length - 1 if index < 0: index = 0 self._notify_add_at(index)
See list.insert.
def expectation(self, operator: Union[PauliTerm, PauliSum]): if not isinstance(operator, PauliSum): operator = PauliSum([operator]) return sum(_term_expectation(self.wf, term, n_qubits=self.n_qubits) for term in operator)
Compute the expectation of an operator. :param operator: The operator :return: The operator's expectation value
def pp(i, base=1024): degree = 0 pattern = "%4d %s" while i > base: pattern = "%7.2f %s" i = i / float(base) degree += 1 scales = ['B', 'KB', 'MB', 'GB', 'TB', 'EB'] return pattern % (i, scales[degree])
Pretty-print the integer `i` as a human-readable size representation.
def _original_images(self, **kwargs): def test(image): if not image.original: return False for filter, value in kwargs.items(): if getattr(image, filter) != value: return False return True if Session.object_session(self.instance) is None: images = [] for image, store in self._stored_images: if test(image): images.append(image) state = instance_state(self.instance) try: added = state.committed_state[self.attr.key].added_items except KeyError: pass else: for image in added: if test(image): images.append(image) if self.session: for image in self.session.new: if test(image): images.append(image) else: query = self.filter_by(original=True, **kwargs) images = query.all() return images
A list of the original images. :returns: A list of the original images. :rtype: :class:`typing.Sequence`\ [:class:`Image`]
def base64_b64decode(instr): decoded = base64.b64decode(salt.utils.stringutils.to_bytes(instr)) try: return salt.utils.stringutils.to_unicode( decoded, encoding='utf8' if salt.utils.platform.is_windows() else None ) except UnicodeDecodeError: return decoded
Decode a base64-encoded string using the "modern" Python interface.
async def get_analog_map(self): current_time = time.time() if self.query_reply_data.get( PrivateConstants.ANALOG_MAPPING_RESPONSE) is None: await self._send_sysex(PrivateConstants.ANALOG_MAPPING_QUERY) while self.query_reply_data.get( PrivateConstants.ANALOG_MAPPING_RESPONSE) is None: elapsed_time = time.time() if elapsed_time - current_time > 4: return None await asyncio.sleep(self.sleep_tune) return self.query_reply_data.get( PrivateConstants.ANALOG_MAPPING_RESPONSE)
This method requests a Firmata analog map query and returns the results. :returns: An analog map response or None if a timeout occurs
def list_documents(self, limit=None): limit_str = '' if limit: try: limit_str = 'LIMIT {}'.format(int(limit)) except (TypeError, ValueError): pass query = ('SELECT identifier FROM identifier_index ' + limit_str) for row in self.backend.library.database.connection.execute(query).fetchall(): yield row['identifier']
Generates vids of all indexed identifiers. Args: limit (int, optional): If not empty, the maximum number of results to return Generates: str: vid of the document.
def reset(self): "Initialises all needed variables to default values" self.metadata = {} self.items = [] self.spine = [] self.guide = [] self.pages = [] self.toc = [] self.bindings = [] self.IDENTIFIER_ID = 'id' self.FOLDER_NAME = 'EPUB' self._id_html = 0 self._id_image = 0 self._id_static = 0 self.title = '' self.language = 'en' self.direction = None self.templates = { 'ncx': NCX_XML, 'nav': NAV_XML, 'chapter': CHAPTER_XML, 'cover': COVER_XML } self.add_metadata('OPF', 'generator', '', { 'name': 'generator', 'content': 'Ebook-lib %s' % '.'.join([str(s) for s in VERSION]) }) self.set_identifier(str(uuid.uuid4())) self.prefixes = [] self.namespaces = {}
Initialises all needed variables to default values
def previous(self, day_of_week=None): if day_of_week is None: day_of_week = self.day_of_week if day_of_week < SUNDAY or day_of_week > SATURDAY: raise ValueError("Invalid day of week") dt = self.subtract(days=1) while dt.day_of_week != day_of_week: dt = dt.subtract(days=1) return dt
Modify to the previous occurrence of a given day of the week. If no day_of_week is provided, modify to the previous occurrence of the current day of the week. Use the supplied consts to indicate the desired day_of_week, ex. pendulum.MONDAY. :param day_of_week: The previous day of week to reset to. :type day_of_week: int or None :rtype: Date
def AddMapping(self, filename, new_mapping): for field in self._REQUIRED_MAPPING_FIELDS: if field not in new_mapping: raise problems.InvalidMapping(field) if filename in self.GetKnownFilenames(): raise problems.DuplicateMapping(filename) self._file_mapping[filename] = new_mapping
Adds an entry to the list of known filenames. Args: filename: The filename whose mapping is being added. new_mapping: A dictionary with the mapping to add. Must contain all fields in _REQUIRED_MAPPING_FIELDS. Raises: DuplicateMapping if the filename already exists in the mapping InvalidMapping if not all required fields are present
def format_file_path(filepath): try: is_windows_network_mount = WINDOWS_NETWORK_MOUNT_PATTERN.match(filepath) filepath = os.path.realpath(os.path.abspath(filepath)) filepath = re.sub(BACKSLASH_REPLACE_PATTERN, '/', filepath) is_windows_drive = WINDOWS_DRIVE_PATTERN.match(filepath) if is_windows_drive: filepath = filepath.capitalize() if is_windows_network_mount: filepath = '/' + filepath except: pass return filepath
Formats a path as absolute and with the correct platform separator.
def _validate_auth(self, path, obj, _): errs = [] if obj.type == 'apiKey': if not obj.passAs: errs.append('need "passAs" for apiKey') if not obj.keyname: errs.append('need "keyname" for apiKey') elif obj.type == 'oauth2': if not obj.grantTypes: errs.append('need "grantTypes" for oauth2') return path, obj.__class__.__name__, errs
validate that apiKey and oauth2 requirements
def concretize_load_idx(self, idx, strategies=None): if isinstance(idx, int): return [idx] elif not self.state.solver.symbolic(idx): return [self.state.solver.eval(idx)] strategies = self.load_strategies if strategies is None else strategies return self._apply_concretization_strategies(idx, strategies, 'load')
Concretizes a load index. :param idx: An expression for the index. :param strategies: A list of concretization strategies (to override the default). :param min_idx: Minimum value for a concretized index (inclusive). :param max_idx: Maximum value for a concretized index (exclusive). :returns: A list of concrete indexes.
def update_ff(self, ff, mol2=False, force_ff_assign=False): aff = False if force_ff_assign: aff = True elif 'assigned_ff' not in self.tags: aff = True elif not self.tags['assigned_ff']: aff = True if aff: self.assign_force_field(ff, mol2=mol2) return
Manages assigning the force field parameters. The aim of this method is to avoid unnecessary assignment of the force field. Parameters ---------- ff: BuffForceField The force field to be used for scoring. mol2: bool, optional If true, mol2 style labels will also be used. force_ff_assign: bool, optional If true, the force field will be completely reassigned, ignoring the cached parameters.
def Fierz_to_Bern_chrom(C, dd, parameters): e = sqrt(4 * pi * parameters['alpha_e']) gs = sqrt(4 * pi * parameters['alpha_s']) if dd == 'sb' or dd == 'db': mq = parameters['m_b'] elif dd == 'ds': mq = parameters['m_s'] else: KeyError("Not sure what to do with quark mass for flavour {}".format(dd)) return { '7gamma' + dd : gs**2 / e / mq * C['F7gamma' + dd ], '8g' + dd : gs / mq * C['F8g' + dd ], '7pgamma' + dd : gs**2 / e /mq * C['F7pgamma' + dd], '8pg' + dd : gs / mq * C['F8pg' + dd] }
From Fierz to chromomagnetic Bern basis for Class V. dd should be of the form 'sb', 'ds' etc.
def is_cached(self, link): if link is None: return False elif hasattr(link, 'uri'): return link.uri in self.id_map else: return link in self.id_map
Returns whether the current navigator is cached. Intended to be overwritten and customized by subclasses.
def reconnect(connection): if isinstance(connection, FflConnection): return type(connection)(connection.ffldir) kw = {'context': connection._context} if connection.port != 80 else {} return connection.__class__(connection.host, port=connection.port, **kw)
Open a new datafind connection based on an existing connection This is required because of https://git.ligo.org/lscsoft/glue/issues/1 Parameters ---------- connection : :class:`~gwdatafind.http.HTTPConnection` or `FflConnection` a connection object (doesn't need to be open) Returns ------- newconn : :class:`~gwdatafind.http.HTTPConnection` or `FflConnection` the new open connection to the same `host:port` server
def write_csv_header(mol, csv_writer): line = [] line.append('id') line.append('status') queryList = mol.properties.keys() for queryLabel in queryList: line.append(queryLabel) csv_writer.writerow(line)
Write the csv header
def HasColumn(self, table_name, column_name): if not self._connection: raise IOError('Not opened.') if not column_name: return False table_name = table_name.lower() column_names = self._column_names_per_table.get(table_name, None) if column_names is None: column_names = [] self._cursor.execute(self._HAS_COLUMN_QUERY.format(table_name)) for row in self._cursor.fetchall(): if not row[1]: continue row_column_name = row[1] if isinstance(row_column_name, bytes): row_column_name = row_column_name.decode('utf-8') column_names.append(row_column_name.lower()) self._column_names_per_table[table_name] = column_names column_name = column_name.lower() return column_name in column_names
Determines if a specific column exists. Args: table_name (str): name of the table. column_name (str): name of the column. Returns: bool: True if the column exists. Raises: IOError: if the database file is not opened. OSError: if the database file is not opened.
def show_syspath(self): editor = CollectionsEditor(parent=self) editor.setup(sys.path, title="sys.path", readonly=True, width=600, icon=ima.icon('syspath')) self.dialog_manager.show(editor)
Show sys.path
async def restart_walk(self): if not self._restartwalk: self._restartwalk = True await self.wait_for_send(FlowUpdaterNotification(self, FlowUpdaterNotification.STARTWALK))
Force a re-walk
def source(self, source): BaseView.source.fset(self, source) if self.main_pane: self.main_pane.object = self.contents self.label_pane.object = self.label
When the source gets updated, update the pane object
def _GetAuthCookie(self, auth_token): continue_location = "http://localhost/" args = {"continue": continue_location, "auth": auth_token} req = self._CreateRequest("https://%s/_ah/login?%s" % (self.host, urllib.urlencode(args))) try: response = self.opener.open(req) except urllib2.HTTPError, e: response = e if (response.code != 302 or response.info()["location"] != continue_location): raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp) self.authenticated = True
Fetches authentication cookies for an authentication token. Args: auth_token: The authentication token returned by ClientLogin. Raises: HTTPError: If there was an error fetching the authentication cookies.
def distribution(self, start=None, end=None, normalized=True, mask=None): start, end, mask = self._check_boundaries(start, end, mask=mask) counter = histogram.Histogram() for start, end, _ in mask.iterperiods(value=True): for t0, t1, value in self.iterperiods(start, end): duration = utils.duration_to_number( t1 - t0, units='seconds', ) try: counter[value] += duration except histogram.UnorderableElements as e: counter = histogram.Histogram.from_dict( dict(counter), key=hash) counter[value] += duration if normalized: return counter.normalized() else: return counter
Calculate the distribution of values over the given time range from `start` to `end`. Args: start (orderable, optional): The lower time bound of when to calculate the distribution. By default, the first time point will be used. end (orderable, optional): The upper time bound of when to calculate the distribution. By default, the last time point will be used. normalized (bool): If True, distribution will sum to one. If False and the time values of the TimeSeries are datetimes, the units will be seconds. mask (:obj:`TimeSeries`, optional): A domain on which to calculate the distribution. Returns: :obj:`Histogram` with the results.
def query(self, design, view, use_devmode=False, **kwargs): design = self._mk_devmode(design, use_devmode) itercls = kwargs.pop('itercls', View) return itercls(self, design, view, **kwargs)
Query a pre-defined MapReduce view, passing parameters. This method executes a view on the cluster. It accepts various parameters for the view and returns an iterable object (specifically, a :class:`~.View`). :param string design: The design document :param string view: The view function contained within the design document :param boolean use_devmode: Whether the view name should be transformed into a development-mode view. See documentation on :meth:`~.BucketManager.design_create` for more explanation. :param kwargs: Extra arguments passed to the :class:`~.View` object constructor. :param kwargs: Additional parameters passed to the :class:`~.View` constructor. See that class' documentation for accepted parameters. .. seealso:: :class:`~.View` contains more extensive documentation and examples :class:`couchbase.views.params.Query` contains documentation on the available query options :class:`~.SpatialQuery` contains documentation on the available query options for Geospatial views. .. note:: To query a spatial view, you must explicitly use the :class:`.SpatialQuery`. Passing key-value view parameters in ``kwargs`` is not supported for spatial views.
def get_container_names(self): current_containers = self.containers(all=True) return set(c_name[1:] for c in current_containers for c_name in c['Names'])
Fetches names of all present containers from Docker. :return: All container names. :rtype: set
def search_index_simple(self,index,key,search_term): request = self.session url = 'http://%s:%s/%s/_search?q=%s:%s' % (self.host,self.port,index,key,search_term) response = request.get(url) return response
Search the index using a simple key and search_term @param index Name of the index @param key Search Key @param search_term The term to be searched for
def cls_get_by_name(cls, name): try: val = getattr(cls, name) except AttributeError: for attr in (a for a in dir(cls) if not a.startswith('_')): try: val = getattr(cls, attr) except AttributeError: continue valname = getattr(val, 'name', None) if valname == name: return val else: raise ValueError('No {} with that name: {}'.format( cls.__name__, name, )) else: return val
Return a class attribute by searching the attributes `name` attribute.
def login_service_description(self): label = 'Login to ' + self.name if (self.auth_type): label = label + ' (' + self.auth_type + ')' desc = {"@id": self.login_uri, "profile": self.profile_base + self.auth_pattern, "label": label} if (self.header): desc['header'] = self.header if (self.description): desc['description'] = self.description return desc
Login service description. The login service description _MUST_ include the token service description. The authentication pattern is indicated via the profile URI which is built using self.auth_pattern.
def compute_nats_and_bits_per_dim(data_dim, latent_dim, average_reconstruction, average_prior): with tf.name_scope(None, default_name="compute_nats_per_dim"): data_dim = tf.cast(data_dim, average_reconstruction.dtype) latent_dim = tf.cast(latent_dim, average_prior.dtype) negative_log_likelihood = data_dim * average_reconstruction negative_log_prior = latent_dim * average_prior negative_elbo = negative_log_likelihood + negative_log_prior nats_per_dim = tf.divide(negative_elbo, data_dim, name="nats_per_dim") bits_per_dim = tf.divide(nats_per_dim, tf.log(2.), name="bits_per_dim") return nats_per_dim, bits_per_dim
Computes negative ELBO, which is an upper bound on the negative likelihood. Args: data_dim: int-like indicating data dimensionality. latent_dim: int-like indicating latent dimensionality. average_reconstruction: Scalar Tensor indicating the reconstruction cost averaged over all data dimensions and any data batches. average_prior: Scalar Tensor indicating the negative log-prior probability averaged over all latent dimensions and any data batches. Returns: Tuple of scalar Tensors, representing the nats and bits per data dimension (e.g., subpixels) respectively.
def sendContact(self, context={}): for recipient in self.recipients: super(ContactFormMail, self).__init__(recipient, self.async) self.sendEmail('contactForm', 'New contact form message', context)
Send contact form message to single or multiple recipients
def _build_matches(matches, uuids, no_filtered, fastmode=False): result = [] for m in matches: mk = m[0].uuid if not fastmode else m[0] subset = [uuids[mk]] for id_ in m[1:]: uk = id_.uuid if not fastmode else id_ u = uuids[uk] if u not in subset: subset.append(u) result.append(subset) result += no_filtered result.sort(key=len, reverse=True) sresult = [] for r in result: r.sort(key=lambda id_: id_.uuid) sresult.append(r) return sresult
Build a list with matching subsets
def has_adjacent_leaves_only(self): leaves = self.leaves() for i in range(len(leaves) - 1): current_interval = leaves[i].interval next_interval = leaves[i + 1].interval if not current_interval.is_adjacent_before(next_interval): return False return True
Return ``True`` if the sync map fragments which are the leaves of the sync map tree are all adjacent. :rtype: bool .. versionadded:: 1.7.0
def define_zip_index_for_species(names_ppn_world, number_names_ppn_world): global cl cl={} for a,b in zip(names_ppn_world,number_names_ppn_world): cl[a] = b
This just give back cl, that is the original index as it is read from files from a data file.
def send_msg_multi(name, profile, recipients=None, rooms=None): ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if recipients is None and rooms is None: ret['comment'] = "Recipients and rooms are empty, no need to send" return ret comment = '' if recipients: comment += ' users {0}'.format(recipients) if rooms: comment += ' rooms {0}'.format(rooms) comment += ', message: {0}'.format(name) if __opts__['test']: ret['comment'] = 'Need to send' + comment return ret __salt__['xmpp.send_msg_multi']( message=name, recipients=recipients, rooms=rooms, profile=profile, ) ret['result'] = True ret['comment'] = 'Sent message to' + comment return ret
Send a message to an list of recipients or rooms .. code-block:: yaml server-warning-message: xmpp.send_msg: - name: 'This is a server warning message' - profile: my-xmpp-account - recipients: - [email protected]/salt - rooms: - [email protected] name The message to send to the XMPP user