Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
18,900
def paint(str,color=): r if color in switcher: str = switcher[color]+str+colorama.Style.RESET_ALL return str
Utility func, for printing colorful logs in console... @args: -- str : String to be modified. color : color code to which the string will be formed. default is 'r'=RED @returns: -- str : final modified string with foreground color as per parameters.
18,901
def lomb_scargle_fast(t, y, dy=1, f0=0, df=None, Nf=None, center_data=True, fit_offset=True, use_fft=True, freq_oversampling=5, nyquist_factor=2, trig_sum_kwds=None): t, y, dy = map(np.ravel, np.broadcast_arrays(t, y, dy)) w = 1. / (dy ** 2) w /= w.sum() if df is None: peak_width = 1. / (t.max() - t.min()) df = peak_width / freq_oversampling if Nf is None: avg_Nyquist = 0.5 * len(t) / (t.max() - t.min()) Nf = max(16, (nyquist_factor * avg_Nyquist - f0) / df) Nf = int(Nf) assert(df > 0) assert(Nf > 0) freq = f0 + df * np.arange(Nf) S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau) C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau) Cw = np.sqrt(0.5) * np.sqrt(1 + C2w) Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w) YY = np.dot(w, y ** 2) YC = Ch * Cw + Sh * Sw YS = Sh * Cw - Ch * Sw CC = 0.5 * (1 + C2 * C2w + S2 * S2w) SS = 0.5 * (1 - C2 * C2w - S2 * S2w) if fit_offset: CC -= (C * Cw + S * Sw) ** 2 SS -= (S * Cw - C * Sw) ** 2 with warnings.catch_warnings(): if fit_offset and f0 == 0: warnings.simplefilter("ignore") power = (YC * YC / CC + YS * YS / SS) / YY if np.isnan(power[0]) or np.isinf(power[0]): power[0] = 0 return freq, power
Compute a lomb-scargle periodogram for the given data This implements both an O[N^2] method if use_fft==False, or an O[NlogN] method if use_fft==True. Parameters ---------- t, y, dy : array_like times, values, and errors of the data points. These should be broadcastable to the same shape. If dy is not specified, a constant error will be used. f0, df, Nf : (float, float, int) parameters describing the frequency grid, f = f0 + df * arange(Nf). Defaults, with T = t.max() - t.min(): - f0 = 0 - df is set such that there are ``freq_oversampling`` points per peak width. ``freq_oversampling`` defaults to 5. - Nf is set such that the highest frequency is ``nyquist_factor`` times the so-called "average Nyquist frequency". ``nyquist_factor`` defaults to 2. Note that for unevenly-spaced data, the periodogram can be sensitive to frequencies far higher than the average Nyquist frequency. center_data : bool (default=True) Specify whether to subtract the mean of the data before the fit fit_offset : bool (default=True) If True, then compute the floating-mean periodogram; i.e. let the mean vary with the fit. use_fft : bool (default=True) If True, then use the Press & Rybicki O[NlogN] algorithm to compute the result. Otherwise, use a slower O[N^2] algorithm Other Parameters ---------------- freq_oversampling : float (default=5) Oversampling factor for the frequency bins. Only referenced if ``df`` is not specified nyquist_factor : float (default=2) Parameter controlling the highest probed frequency. Only referenced if ``Nf`` is not specified. trig_sum_kwds : dict or None (optional) extra keyword arguments to pass to the ``trig_sum`` utility. Options are ``oversampling`` and ``Mfft``. See documentation of ``trig_sum`` for details. Notes ----- Note that the ``use_fft=True`` algorithm is an approximation to the true Lomb-Scargle periodogram, and as the number of points grows this approximation improves. On the other hand, for very small datasets (<~50 points or so) this approximation may not be useful. References ---------- .. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis of unevenly sampled data". ApJ 1:338, p277, 1989 .. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [3] W. Press et al, Numerical Recipies in C (2002)
18,902
def open_repository(path, spor_dir=): root = _find_root_dir(path, spor_dir) return Repository(root, spor_dir)
Open an existing repository. Args: path: Path to any file or directory within the repository. spor_dir: The name of the directory containing spor data. Returns: A `Repository` instance. Raises: ValueError: No repository is found.
18,903
def draw(self, drawDC=None): DEBUG_MSG("draw()", 1, self) self.renderer = RendererWx(self.bitmap, self.figure.dpi) self.figure.draw(self.renderer) self._isDrawn = True self.gui_repaint(drawDC=drawDC)
Render the figure using RendererWx instance renderer, or using a previously defined renderer if none is specified.
18,904
def get_username(details, backend, response, *args, **kwargs): user = details.get() if not user: user_uuid = kwargs.get() if not user_uuid: return username = uuid_to_username(user_uuid) else: username = user.username return { : username }
Sets the `username` argument. If the user exists already, use the existing username. Otherwise generate username from the `new_uuid` using the `helusers.utils.uuid_to_username` function.
18,905
def lf_empirical_accuracies(L, Y): Y = arraylike_to_numpy(Y) L = L.toarray() X = np.where(L == 0, 0, np.where(L == np.vstack([Y] * L.shape[1]).T, 1, -1)) return 0.5 * (X.sum(axis=0) / (L != 0).sum(axis=0) + 1)
Return the **empirical accuracy** against a set of labels Y (e.g. dev set) for each LF. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate Y: an [n] or [n, 1] np.ndarray of gold labels
18,906
def vcenter_activate(self, **kwargs): config = ET.Element("config") vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch") id_key = ET.SubElement(vcenter, "id") id_key.text = kwargs.pop() activate = ET.SubElement(vcenter, "activate") callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
18,907
def opt_with_frequency_flattener(cls, qchem_command, multimode="openmp", input_file="mol.qin", output_file="mol.qout", qclog_file="mol.qclog", max_iterations=10, max_molecule_perturb_scale=0.3, check_connectivity=True, **QCJob_kwargs): min_molecule_perturb_scale = 0.1 scale_grid = 10 perturb_scale_grid = ( max_molecule_perturb_scale - min_molecule_perturb_scale ) / scale_grid if not os.path.exists(input_file): raise AssertionError() orig_opt_input = QCInput.from_file(input_file) orig_opt_rem = copy.deepcopy(orig_opt_input.rem) orig_freq_rem = copy.deepcopy(orig_opt_input.rem) orig_freq_rem["job_type"] = "freq" first = True reversed_direction = False num_neg_freqs = [] for ii in range(max_iterations): yield (QCJob( qchem_command=qchem_command, multimode=multimode, input_file=input_file, output_file=output_file, qclog_file=qclog_file, suffix=".opt_" + str(ii), backup=first, **QCJob_kwargs)) first = False opt_outdata = QCOutput(output_file + ".opt_" + str(ii)).data if opt_outdata["structure_change"] == "unconnected_fragments" and not opt_outdata["completion"]: print("Unstable molecule broke into unconnected fragments which failed to optimize! Exiting...") break else: freq_QCInput = QCInput( molecule=opt_outdata.get("molecule_from_optimized_geometry"), rem=orig_freq_rem, opt=orig_opt_input.opt, pcm=orig_opt_input.pcm, solvent=orig_opt_input.solvent) freq_QCInput.write_file(input_file) yield (QCJob( qchem_command=qchem_command, multimode=multimode, input_file=input_file, output_file=output_file, qclog_file=qclog_file, suffix=".freq_" + str(ii), backup=first, **QCJob_kwargs)) outdata = QCOutput(output_file + ".freq_" + str(ii)).data errors = outdata.get("errors") if len(errors) != 0: raise AssertionError() if outdata.get()[0] > 0.0: print("All frequencies positive!") break else: num_neg_freqs += [sum(1 for freq in outdata.get() if freq < 0)] if len(num_neg_freqs) > 1: if num_neg_freqs[-1] == num_neg_freqs[-2] and not reversed_direction: reversed_direction = True elif num_neg_freqs[-1] == num_neg_freqs[-2] and reversed_direction: if len(num_neg_freqs) < 3: raise AssertionError("ERROR: This should only be possible after at least three frequency flattening iterations! Exiting...") else: raise Exception("ERROR: Reversing the perturbation direction still could not flatten any frequencies. Exiting...") elif num_neg_freqs[-1] != num_neg_freqs[-2] and reversed_direction: reversed_direction = False negative_freq_vecs = outdata.get("frequency_mode_vectors")[0] structure_successfully_perturbed = False for molecule_perturb_scale in np.arange( max_molecule_perturb_scale, min_molecule_perturb_scale, -perturb_scale_grid): new_coords = perturb_coordinates( old_coords=outdata.get("initial_geometry"), negative_freq_vecs=negative_freq_vecs, molecule_perturb_scale=molecule_perturb_scale, reversed_direction=reversed_direction) new_molecule = Molecule( species=outdata.get(), coords=new_coords, charge=outdata.get(), spin_multiplicity=outdata.get()) if check_connectivity: old_molgraph = MoleculeGraph.with_local_env_strategy(outdata.get("initial_molecule"), OpenBabelNN(), reorder=False, extend_structure=False) new_molgraph = MoleculeGraph.with_local_env_strategy(new_molecule, OpenBabelNN(), reorder=False, extend_structure=False) if old_molgraph.isomorphic_to(new_molgraph): structure_successfully_perturbed = True break if not structure_successfully_perturbed: raise Exception( "ERROR: Unable to perturb coordinates to remove negative frequency without changing the connectivity! Exiting..." ) new_opt_QCInput = QCInput( molecule=new_molecule, rem=orig_opt_rem, opt=orig_opt_input.opt, pcm=orig_opt_input.pcm, solvent=orig_opt_input.solvent) new_opt_QCInput.write_file(input_file)
Optimize a structure and calculate vibrational frequencies to check if the structure is in a true minima. If a frequency is negative, iteratively perturbe the geometry, optimize, and recalculate frequencies until all are positive, aka a true minima has been found. Args: qchem_command (str): Command to run QChem. multimode (str): Parallelization scheme, either openmp or mpi. input_file (str): Name of the QChem input file. output_file (str): Name of the QChem output file. max_iterations (int): Number of perturbation -> optimization -> frequency iterations to perform. Defaults to 10. max_molecule_perturb_scale (float): The maximum scaled perturbation that can be applied to the molecule. Defaults to 0.3. check_connectivity (bool): Whether to check differences in connectivity introduced by structural perturbation. Defaults to True. **QCJob_kwargs: Passthrough kwargs to QCJob. See :class:`custodian.qchem.jobs.QCJob`.
18,908
def _assertIndex(self, index): if type(index) is not int: raise TypeError() if index < 0 or index >= self.nelems: raise IndexError()
Raise TypeError or IndexError if index is not an integer or out of range for the number of elements in this array, respectively.
18,909
def main(): config = Config() core = HostSearch() hosts = core.get_hosts(tags=[], up=True) hosts = [host for host in hosts] host_ips = ",".join([str(host.address) for host in hosts]) url = config.get(, ) access = config.get(, ) secret = config.get(, ) template_name = config.get(, ) nessus = Nessus(access, secret, url, template_name) scan_id = nessus.create_scan(host_ips) nessus.start_scan(scan_id) for host in hosts: host.add_tag() host.save() Logger().log("nessus", "Nessus scan started on {} hosts".format(len(hosts)), {: len(hosts)})
This function obtains hosts from core and starts a nessus scan on these hosts. The nessus tag is appended to the host tags.
18,910
def get_examples(self, compact=False): examples = copy.deepcopy(self._examples) if not compact: return examples def make_compact(d): if not isinstance(d, dict): return for key in d: if isinstance(d[key], dict): inner_d = d[key] if len(inner_d) == 1 and in inner_d: d[key] = inner_d[] else: make_compact(inner_d) if isinstance(d[key], list): for item in d[key]: make_compact(item) for example in examples.values(): if (isinstance(example.value, dict) and len(example.value) == 1 and in example.value): example.value = example.value[] else: make_compact(example.value) return examples
Returns an OrderedDict mapping labels to Example objects. Args: compact (bool): If True, union members of void type are converted to their compact representation: no ".tag" key or containing dict, just the tag as a string.
18,911
def assign_statement(self): left = self.variable() op = self.cur_token self.eat(TokenTypes.ASSIGN) right = self.expression() smt = None if Features.TYPE_ARRAY in self.features and isinstance(left, GetArrayItem): smt = SetArrayItem(left.left, left.right, right) else: smt = Assign(op, left, right) if self.cur_token.type == TokenTypes.SEMI_COLON: self.eat(TokenTypes.SEMI_COLON) return smt
assign smt : variable ASSIGN expression(;) Feature Type Array adds: | variable SETITEM expression(;)
18,912
def mail_session(self, name, host, username, mail_from, props): return MailSession(self.__endpoint, name, host, username, mail_from, props)
Domain mail session. :param str name: Mail session name. :param str host: Mail host. :param str username: Mail username. :param str mail_from: Mail "from" address. :param dict props: Extra properties. :rtype: MailSession
18,913
def help_center_article_translation_update(self, article_id, locale, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/translations api_path = "/api/v2/help_center/articles/{article_id}/translations/{locale}.json" api_path = api_path.format(article_id=article_id, locale=locale) return self.call(api_path, method="PUT", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/help_center/translations#update-translation
18,914
def _init_data_map(self): if self._data_map is None: self._data_map = {: None} self._data_map.update({}.fromkeys(self._metadata_props))
Default data map initialization: MUST be overridden in children
18,915
def adb_cmd(self, command, **kwargs): pull/data/local/tmp/a.png kwargs[] = kwargs.get(, self._adb_shell_timeout) if isinstance(command, list) or isinstance(command, tuple): return self.adb_device.run_cmd(*list(command), **kwargs) return self.adb_device.run_cmd(command, **kwargs)
Run adb command, for example: adb(['pull', '/data/local/tmp/a.png']) Args: command: string or list of string Returns: command output
18,916
def dos_plot_data(self, yscale=1, xmin=-6., xmax=6., colours=None, plot_total=True, legend_cutoff=3, subplot=False, zero_to_efermi=True, cache=None): if cache is None: cache = colour_cache dos = self._dos pdos = self._pdos eners = dos.energies - dos.efermi if zero_to_efermi else dos.energies mask = (eners >= xmin - 0.05) & (eners <= xmax + 0.05) plot_data = {: mask, : eners} spins = dos.densities.keys() ymax = 0 if plot_total: if in matplotlib.rcParams: tdos_colour = matplotlib.rcParams[] if tdos_colour is None: tdos_colour = else: tdos_colour = lines = [] tdos = {: , : dos.densities, : tdos_colour, : 0.15} lines.append([tdos]) dmax = max([max(d[mask]) for d in dos.densities.values()]) ymax = dmax if dmax > ymax else ymax elif not subplot: lines = [[]] else: lines = [] cutoff = (legend_cutoff / 100.) * (ymax / 1.05) for el, el_pdos in pdos.items(): el_lines = [] for orb in sort_orbitals(el_pdos): dmax = max([max(d[mask]) for d in el_pdos[orb].densities.values()]) ymax = dmax if dmax > ymax else ymax label = None if dmax < cutoff else .format(el, orb) colour, cache = get_cached_colour(el, orb, colours, cache=cache) el_lines.append({: label, : 0.25, : colour, : el_pdos[orb].densities}) if subplot: lines.append(el_lines) else: lines[0].extend(el_lines) ymax = ymax * empty_space / yscale ymin = 0 if len(spins) == 1 else -ymax plot_data.update({: lines, : ymax, : ymin}) return plot_data
Get the plotting data. Args: yscale (:obj:`float`, optional): Scaling factor for the y-axis. xmin (:obj:`float`, optional): The minimum energy to mask the energy and density of states data (reduces plotting load). xmax (:obj:`float`, optional): The maximum energy to mask the energy and density of states data (reduces plotting load). colours (:obj:`dict`, optional): Use custom colours for specific element and orbital combinations. Specified as a :obj:`dict` of :obj:`dict` of the colours. For example:: { 'Sn': {'s': 'r', 'p': 'b'}, 'O': {'s': '#000000'} } The colour can be a hex code, series of rgb value, or any other format supported by matplotlib. plot_total (:obj:`bool`, optional): Plot the total density of states. Defaults to ``True``. legend_cutoff (:obj:`float`, optional): The cut-off (in % of the maximum density of states within the plotting range) for an elemental orbital to be labelled in the legend. This prevents the legend from containing labels for orbitals that have very little contribution in the plotting range. subplot (:obj:`bool`, optional): Plot the density of states for each element on separate subplots. Defaults to ``False``. zero_to_efermi (:obj:`bool`, optional): Normalise the plot such that the Fermi level is set as 0 eV. cache (:obj:`dict`, optional): Cache object tracking how colours have been assigned to orbitals. The format is the same as the "colours" dict. This defaults to the module-level sumo.plotting.colour_cache object, but an empty dict can be used as a fresh cache. This object will be modified in-place. Returns: dict: The plotting data. Formatted with the following keys: "energies" (:obj:`numpy.ndarray`) The energies. "mask" (:obj:`numpy.ndarray`) A mask used to trim the density of states data and prevent unwanted data being included in the output file. "lines" (:obj:`list`) A :obj:`list` of :obj:`dict` containing the density data and some metadata. Each line :obj:`dict` contains the keys: "label" (:obj:`str`) The label for the legend. "dens" (:obj:`numpy.ndarray`) The density of states data. "colour" (:obj:`str`) The colour of the line. "alpha" (:obj:`float`) The alpha value for line fill. "ymin" (:obj:`float`) The minimum y-axis limit. "ymax" (:obj:`float`) The maximum y-axis limit.
18,917
async def shuffle_participants(self): res = await self.connection(, .format(self._id)) self._refresh_participants_from_json(res)
Shuffle participants' seeds |methcoro| Note: |from_api| Randomize seeds among participants. Only applicable before a tournament has started. Raises: APIException
18,918
def process_response(self, request, response): add_never_cache_headers(response) if not hasattr(request, ) or not request._cache_update_cache: return response if not response.status_code == 200: return response if request._cache_middleware_key: cache_key = request._cache_middleware_key else: cache_key = learn_cache_key(request, response, self.cache_timeout, self.key_prefix) cache.set(cache_key, (time.time(), response), self.cache_timeout) return response
Sets the cache, if needed.
18,919
def cbpdnmd_xstep(k): YU0 = mp_Z_Y0[k] + mp_S[k] - mp_Z_U0[k] YU1 = mp_Z_Y1[k] - mp_Z_U1[k] if mp_cri.Cd == 1: b = np.conj(mp_Df) * sl.rfftn(YU0, None, mp_cri.axisN) + \ sl.rfftn(YU1, None, mp_cri.axisN) Xf = sl.solvedbi_sm(mp_Df, 1.0, b, axis=mp_cri.axisM) else: b = sl.inner(np.conj(mp_Df), sl.rfftn(YU0, None, mp_cri.axisN), axis=mp_cri.axisC) + \ sl.rfftn(YU1, None, mp_cri.axisN) Xf = sl.solvemdbi_ism(mp_Df, 1.0, b, mp_cri.axisM, mp_cri.axisC) mp_Z_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN) mp_DX[k] = sl.irfftn(sl.inner(mp_Df, Xf), mp_cri.Nv, mp_cri.axisN)
Do the X step of the cbpdn stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables.
18,920
def stops(freq, interval=1, count=None, wkst=None, bysetpos=None, bymonth=None, bymonthday=None, byyearday=None, byeaster=None, byweekno=None, byweekday=None, byhour=None, byminute=None, bysecond=None, timezone=, start=None, stop=None): if all([(start is None or is_datetime_naive(start)), (stop is None or is_datetime_naive(stop))]): pass else: raise DeloreanInvalidDatetime() if start is None: start = datetime_timezone(timezone) for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos, bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster, byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute, bysecond=bysecond, until=stop, dtstart=start): dt = dt.replace(tzinfo=None) d = Delorean(datetime=dt, timezone=timezone) yield d
This will create a list of delorean objects the apply to setting possed in.
18,921
def ch_duration(self, *channels: List[Channel]) -> int: return self.timeslots.ch_duration(*channels)
Return duration of supplied channels. Args: *channels: Supplied channels
18,922
def tracked(self, tag=None, fromdate=None, todate=None): return self.call("GET", "/stats/outbound/tracked", tag=tag, fromdate=fromdate, todate=todate)
Gets a total count of emails you’ve sent with open tracking or link tracking enabled.
18,923
def _copy_hdxobjects(self, hdxobjects, hdxobjectclass, attribute_to_copy=None): newhdxobjects = list() for hdxobject in hdxobjects: newhdxobjectdata = copy.deepcopy(hdxobject.data) newhdxobject = hdxobjectclass(newhdxobjectdata, configuration=self.configuration) if attribute_to_copy: value = getattr(hdxobject, attribute_to_copy) setattr(newhdxobject, attribute_to_copy, value) newhdxobjects.append(newhdxobject) return newhdxobjects
Helper function to make a deep copy of a supplied list of HDX objects Args: hdxobjects (List[T <= HDXObject]): list of HDX objects to copy hdxobjectclass (type): Type of the HDX Objects to be copied attribute_to_copy (Optional[str]): An attribute to copy over from the HDX object. Defaults to None. Returns: List[T <= HDXObject]: Deep copy of list of HDX objects
18,924
def _gatherLookupIndexes(gpos): kernFeatureIndexes = [index for index, featureRecord in enumerate(gpos.FeatureList.FeatureRecord) if featureRecord.FeatureTag == "kern"] scriptKernFeatureIndexes = {} for scriptRecord in gpos.ScriptList.ScriptRecord: script = scriptRecord.ScriptTag thisScriptKernFeatureIndexes = [] defaultLangSysRecord = scriptRecord.Script.DefaultLangSys if defaultLangSysRecord is not None: f = [] for featureIndex in defaultLangSysRecord.FeatureIndex: if featureIndex not in kernFeatureIndexes: continue f.append(featureIndex) if f: thisScriptKernFeatureIndexes.append((None, f)) if scriptRecord.Script.LangSysRecord is not None: for langSysRecord in scriptRecord.Script.LangSysRecord: langSys = langSysRecord.LangSysTag f = [] for featureIndex in langSysRecord.LangSys.FeatureIndex: if featureIndex not in kernFeatureIndexes: continue f.append(featureIndex) if f: thisScriptKernFeatureIndexes.append((langSys, f)) scriptKernFeatureIndexes[script] = thisScriptKernFeatureIndexes scriptLookupIndexes = {} for script, featureDefinitions in scriptKernFeatureIndexes.items(): lookupIndexes = scriptLookupIndexes[script] = [] for language, featureIndexes in featureDefinitions: for featureIndex in featureIndexes: featureRecord = gpos.FeatureList.FeatureRecord[featureIndex] for lookupIndex in featureRecord.Feature.LookupListIndex: if lookupIndex not in lookupIndexes: lookupIndexes.append(lookupIndex) return scriptLookupIndexes
Gather a mapping of script to lookup indexes referenced by the kern feature for each script. Returns a dictionary of this structure: { "latn" : [0], "DFLT" : [0] }
18,925
def series64bitto32bit(s): if s.dtype == np.float64: return s.astype() elif s.dtype == np.int64: return s.astype() return s
Convert a Pandas series from 64 bit types to 32 bit types to save memory or disk space. Parameters ---------- s : The series to convert Returns ------- The converted series
18,926
def makestate(im, pos, rad, slab=None, mem_level=): if slab is not None: o = comp.ComponentCollection( [ objs.PlatonicSpheresCollection(pos, rad, zscale=zscale), slab ], category= ) else: o = objs.PlatonicSpheresCollection(pos, rad, zscale=zscale) p = exactpsf.FixedSSChebLinePSF() npts, iorder = _calc_ilm_order(im.get_image().shape) i = ilms.BarnesStreakLegPoly2P1D(npts=npts, zorder=iorder) b = ilms.LegendrePoly2P1D(order=(9 ,3, 5), category=) c = comp.GlobalScalar(, 0.0) s = states.ImageState(im, [o, i, b, c, p]) runner.link_zscale(s) if mem_level != : s.set_mem_level(mem_level) opt.do_levmarq(s, [], max_iter=1, run_length=6, max_mem=1e4) return s
Workhorse for creating & optimizing states with an initial centroid guess. This is an example function that works for a particular microscope. For your own microscope, you'll need to change particulars such as the psf type and the orders of the background and illumination. Parameters ---------- im : :class:`~peri.util.RawImage` A RawImage of the data. pos : [N,3] element numpy.ndarray. The initial guess for the N particle positions. rad : N element numpy.ndarray. The initial guess for the N particle radii. slab : :class:`peri.comp.objs.Slab` or None, optional If not None, a slab corresponding to that in the image. Default is None. mem_level : {'lo', 'med-lo', 'med', 'med-hi', 'hi'}, optional A valid memory level for the state to control the memory overhead at the expense of accuracy. Default is `'hi'` Returns ------- :class:`~peri.states.ImageState` An ImageState with a linked z-scale, a ConfocalImageModel, and all the necessary components with orders at which are useful for my particular test case.
18,927
def get_personal_module(self): return PersonalModule( layout=, draggable=False, deletable=False, collapsible=False, )
Instantiate the :class:`~fluent_dashboard.modules.PersonalModule` for use in the dashboard.
18,928
def evalSamples(self, x): self._N_dv = len(_makeIter(x)) if self.verbose: print() if self.surrogate is None: def fqoi(u): return self.fqoi(x, u) def fgrad(u): return self.jac(x, u) jac = self.jac else: fqoi, fgrad, surr_jac = self._makeSurrogates(x) jac = surr_jac u_samples = self._getParameterSamples() if self.verbose: print() q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac) return q_samples, grad_samples
Evalautes the samples of quantity of interest and its gradient (if supplied) at the given values of the design variables :param iterable x: values of the design variables, this is passed as the first argument to the function fqoi :return: (values of the quantity of interest, values of the gradient) :rtype: Tuple
18,929
def user_admin_urlname(action): user = get_user_model() return % ( user._meta.app_label, user._meta.model_name, action)
Return the admin URLs for the user app used.
18,930
def train_local(self, closest_point, label_vector_description=None, N=None, pivot=True, **kwargs): lv = self._cannon_label_vector if label_vector_description is None else\ self._interpret_label_vector(label_vector_description) if N is None: N = self._configuration.get("settings", {}).get("grid_subset", 0.10) if 1 >= N > 0: N = int(np.round(N * self.grid_points.size)) logger.debug("Using {} nearest points for local Cannon model".format(N)) dtype = [(name, ) for name in self.grid_points.dtype.names] grid_points \ = self.grid_points.astype(dtype).view(float).reshape(-1, len(dtype)) distance = np.sum(np.abs(grid_points - np.array(closest_point))/ np.ptp(grid_points, axis=0), axis=1) grid_indices = np.argsort(distance)[:N] lv_array, _, offsets = _build_label_vector_array( self.grid_points[grid_indices], lv, pivot=pivot) return self._train(lv_array, grid_indices, offsets, lv, **kwargs)
Train the model in a Cannon-like fashion using the grid points as labels and the intensities as normalsied rest-frame fluxes within some local regime.
18,931
def loadFromDisk(self, calculation): suffixes = { : , : , : , : , : , : , : , } self.raw = list() for spectrumName in self.toPlot: suffix = suffixes[spectrumName] path = .format(calculation.baseName, suffix) try: data = np.loadtxt(path, skiprows=5) except (OSError, IOError) as e: raise e rows, columns = data.shape if calculation.experiment in [, , ]: xMin = calculation.xMin xMax = calculation.xMax xNPoints = calculation.xNPoints if calculation.experiment == : x = np.linspace(xMin, xMax, xNPoints + 1) x = x[::-1] y = data[:, 2] y = y / np.abs(y.max()) else: x = np.linspace(xMin, xMax, xNPoints + 1) y = data[:, 2::2].flatten() spectrum = Spectrum1D(x, y) spectrum.name = spectrumName if len(suffix) > 2: spectrum.shortName = suffix.title() else: spectrum.shortName = suffix.upper() if calculation.experiment in [, ]: spectrum.xLabel = elif calculation.experiment in [, ]: spectrum.xLabel = elif calculation.experiment in [, ]: spectrum.xLabel = spectrum.yLabel = self.broadenings = {: (calculation.xGaussian, ), } else: xMin = calculation.xMin xMax = calculation.xMax xNPoints = calculation.xNPoints yMin = calculation.yMin yMax = calculation.yMax yNPoints = calculation.yNPoints x = np.linspace(xMin, xMax, xNPoints + 1) y = np.linspace(yMin, yMax, yNPoints + 1) z = data[:, 2::2] spectrum = Spectrum2D(x, y, z) spectrum.name = spectrumName if len(suffix) > 2: spectrum.shortName = suffix.title() else: spectrum.shortName = suffix.upper() spectrum.xLabel = spectrum.yLabel = self.broadenings = {: (calculation.xGaussian, calculation.yGaussian), } self.raw.append(spectrum) self.process()
Read the spectra from the files generated by Quanty and store them as a list of spectum objects.
18,932
def get_template(self, R): centers, widths = self.init_centers_widths(R) template_prior =\ np.zeros(self.K * (self.n_dim + 2 + self.cov_vec_size)) template_centers_cov = np.cov(R.T) * math.pow(self.K, -2 / 3.0) template_widths_var = self._get_max_sigma(R) centers_cov_all = np.tile(from_sym_2_tri(template_centers_cov), self.K) widths_var_all = np.tile(template_widths_var, self.K) self.set_centers(template_prior, centers) self.set_widths(template_prior, widths) self.set_centers_mean_cov(template_prior, centers_cov_all) self.set_widths_mean_var(template_prior, widths_var_all) return template_prior, template_centers_cov, template_widths_var
Compute a template on latent factors Parameters ---------- R : 2D array, in format [n_voxel, n_dim] The scanner coordinate matrix of one subject's fMRI data Returns ------- template_prior : 1D array The template prior. template_centers_cov: 2D array, in shape [n_dim, n_dim] The template on centers' covariance. template_widths_var: float The template on widths' variance
18,933
def visdom_send_metrics(vis, metrics, update=): visited = {} sorted_metrics = sorted(metrics.columns, key=_column_original_name) for metric_basename, metric_list in it.groupby(sorted_metrics, key=_column_original_name): metric_list = list(metric_list) for metric in metric_list: if vis.win_exists(metric_basename) and (not visited.get(metric, False)): update = update elif not vis.win_exists(metric_basename): update = None else: update = vis.line( metrics[metric].values, metrics.index.values, win=metric_basename, name=metric, opts={ : metric_basename, : True }, update=update ) if metric_basename != metric and len(metric_list) > 1: if vis.win_exists(metric): update = update else: update = None vis.line( metrics[metric].values, metrics.index.values, win=metric, name=metric, opts={ : metric, : True }, update=update )
Send set of metrics to visdom
18,934
def bookmark(ctx): user, project_name, _group = get_project_group_or_local(ctx.obj.get(), ctx.obj.get()) try: PolyaxonClient().experiment_group.bookmark(user, project_name, _group) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error(.format(_group)) Printer.print_error(.format(e)) sys.exit(1) Printer.print_success("Experiments group is bookmarked.")
Bookmark group. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon group bookmark ``` \b ```bash $ polyaxon group -g 2 bookmark ```
18,935
def maskedEqual(array, missingValue): if array_is_structured(array): if not isinstance(array, ma.MaskedArray): array = ma.MaskedArray(array) for nr, field in enumerate(array.dtype.names): if hasattr(missingValue, ): fieldMissingValue = missingValue[nr] else: fieldMissingValue = missingValue array[field] = ma.masked_equal(array[field], fieldMissingValue) check_class(array, ma.MaskedArray) return array else: result = ma.masked_equal(array, missingValue, copy=False) check_class(result, ma.MaskedArray) return result
Mask an array where equal to a given (missing)value. Unfortunately ma.masked_equal does not work with structured arrays. See: https://mail.scipy.org/pipermail/numpy-discussion/2011-July/057669.html If the data is a structured array the mask is applied for every field (i.e. forming a logical-and). Otherwise ma.masked_equal is called.
18,936
def __decrypt_assertion(self, dom): key = self.__settings.get_sp_key() debug = self.__settings.is_debug_active() if not key: raise OneLogin_Saml2_Error( , OneLogin_Saml2_Error.PRIVATE_KEY_NOT_FOUND ) encrypted_assertion_nodes = OneLogin_Saml2_Utils.query(dom, ) if encrypted_assertion_nodes: encrypted_data_nodes = OneLogin_Saml2_Utils.query(encrypted_assertion_nodes[0], ) if encrypted_data_nodes: keyinfo = OneLogin_Saml2_Utils.query(encrypted_assertion_nodes[0], ) if not keyinfo: raise OneLogin_Saml2_ValidationError( , OneLogin_Saml2_ValidationError.KEYINFO_NOT_FOUND_IN_ENCRYPTED_DATA ) keyinfo = keyinfo[0] children = keyinfo.getchildren() if not children: raise OneLogin_Saml2_ValidationError( , OneLogin_Saml2_ValidationError.CHILDREN_NODE_NOT_FOUND_IN_KEYINFO ) for child in children: if in child.tag: if child.attrib[] != : raise OneLogin_Saml2_ValidationError( , OneLogin_Saml2_ValidationError.UNSUPPORTED_RETRIEVAL_METHOD ) uri = child.attrib[] if not uri.startswith(): break uri = uri.split()[1] encrypted_key = OneLogin_Saml2_Utils.query(encrypted_assertion_nodes[0], , None, uri) if encrypted_key: keyinfo.append(encrypted_key[0]) encrypted_data = encrypted_data_nodes[0] decrypted = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key, debug=debug, inplace=True) dom.replace(encrypted_assertion_nodes[0], decrypted) return dom
Decrypts the Assertion :raises: Exception if no private key available :param dom: Encrypted Assertion :type dom: Element :returns: Decrypted Assertion :rtype: Element
18,937
def properties_from_mapping(self, bt_addr): for addr, properties in self.eddystone_mappings: if addr == bt_addr: return properties return None
Retrieve properties (namespace, instance) for the specified bt address.
18,938
def Decode(self, encoded_data): try: decoded_data = base64.b64decode(encoded_data) except (TypeError, binascii.Error) as exception: raise errors.BackEndError( .format( exception)) return decoded_data, b
Decode the encoded data. Args: encoded_data (byte): encoded data. Returns: tuple(bytes, bytes): decoded data and remaining encoded data. Raises: BackEndError: if the base64 stream cannot be decoded.
18,939
def fromfuncs(funcs, n_sessions, eqdata, **kwargs): _skipatstart = kwargs.get(, 0) _constfeat = kwargs.get(, True) _outcols = [] if _constfeat else [] _n_allrows = len(eqdata.index) _n_featrows = _n_allrows - _skipatstart - n_sessions + 1 for _func in funcs: _outcols += map(partial(_concat, strval= + _func.title), range(-n_sessions + 1, 1)) _features = pd.DataFrame(index=eqdata.index[_skipatstart + n_sessions - 1:], columns=_outcols, dtype=np.float64) _offset = 0 if _constfeat: _features.iloc[:, 0] = 1. _offset += 1 for _func in funcs: _values = _func(eqdata).values _n_values = len(_values) for i in range(n_sessions): _val_end = _n_values - n_sessions + i + 1 _features.iloc[:, _offset + i] = _values[_val_end - _n_featrows:_val_end] _offset += n_sessions return _features
Generate features using a list of functions to apply to input data Parameters ---------- funcs : list of function Functions to apply to eqdata. Each function is expected to output a dataframe with index identical to a slice of `eqdata`. The slice must include at least `eqdata.index[skipatstart + n_sessions - 1:]`. Each function is also expected to have a function attribute `title`, which is used to generate the column names of the output features. n_sessions : int Number of sessions over which to create features. eqdata : DataFrame Data from which to generate features. The data will often be retrieved using `pn.get()`. constfeat : bool, optional Whether or not the returned features will have the constant feature. skipatstart : int, optional Number of rows to omit at the start of the output DataFrame. This parameter is necessary if any of the functions requires a rampup period before returning valid results, e.g. `sma()` or functions calculating volume relative to a past baseline. Defaults to 0. Returns ---------- features : DataFrame
18,940
def _F(self, X): if isinstance(X, int) or isinstance(X, float): if X < 1 and X > 0: a = 1/(X**2-1)*(1-2/np.sqrt(1-X**2)*np.arctanh(np.sqrt((1-X)/(1+X)))) elif X == 1: a = 1./3 elif X > 1: a = 1/(X**2-1)*(1-2/np.sqrt(X**2-1)*np.arctan(np.sqrt((X-1)/(1+X)))) else: c = 0.0000001 a = 1/(-1)*(1-2/np.sqrt(1)*np.arctanh(np.sqrt((1-c)/(1+c)))) else: a = np.empty_like(X) x = X[(X < 1) & (X > 0)] a[(X < 1) & (X > 0)] = 1/(x**2-1)*(1-2/np.sqrt(1-x**2)*np.arctanh(np.sqrt((1-x)/(1+x)))) a[X == 1] = 1./3. x = X[X > 1] a[X > 1] = 1/(x**2-1)*(1-2/np.sqrt(x**2-1)*np.arctan(np.sqrt((x-1)/(1+x)))) c = 0.0000001 a[X == 0] = 1/(-1)*(1-2/np.sqrt(1)*np.arctanh(np.sqrt((1-c)/(1+c)))) return a
analytic solution of the projection integral :param x: R/Rs :type x: float >0
18,941
def _is_unpacked_egg(path): return ( _is_egg_path(path) and os.path.isfile(os.path.join(path, , )) )
Determine if given path appears to be an unpacked egg.
18,942
def remove(self): title = % self.__class__.__name__ for id, name, mimetype in self._list_directory(): try: self.drive.delete(fileId=id).execute() except Exception as err: if str(err).find() > -1: pass else: raise DriveConnectionError(title) insert = if self.collection_name: insert = self.collection_name exit_msg = % insert return exit_msg
a method to remove all records in the collection NOTE: this method removes all the files in the collection, but the collection folder itself created by oauth2 cannot be removed. only the user can remove access to the app folder :return: string with confirmation of deletion
18,943
def load_raw(path): _import_pil() from PIL import Image return np.array(Image.open(path))
Load image using PIL/Pillow without any processing. This is particularly useful for palette images, which will be loaded using their palette index values as opposed to `load` which will convert them to RGB. Parameters ---------- path : str Path to image file.
18,944
def _broadcast_arg(U, arg, argtype, name): if arg is None or isinstance(arg, argtype): return [arg for _ in range(U.ndim)] elif np.iterable(arg): if len(arg) != U.ndim: raise ValueError( .format(name, U.ndim)) elif not all([isinstance(a, argtype) for a in arg]): raise TypeError( .format(name, argtype)) else: return arg else: raise TypeError( .format(name, type(arg), argtype))
Broadcasts plotting option `arg` to all factors. Args: U : KTensor arg : argument provided by the user argtype : expected type for arg name : name of the variable, used for error handling Returns: iterable version of arg of length U.ndim
18,945
def to_decimal(self): high = self.__high low = self.__low sign = 1 if (high & _SIGN) else 0 if (high & _SNAN) == _SNAN: return decimal.Decimal((sign, (), )) elif (high & _NAN) == _NAN: return decimal.Decimal((sign, (), )) elif (high & _INF) == _INF: return decimal.Decimal((sign, (), )) if (high & _EXPONENT_MASK) == _EXPONENT_MASK: exponent = ((high & 0x1fffe00000000000) >> 47) - _EXPONENT_BIAS return decimal.Decimal((sign, (0,), exponent)) else: exponent = ((high & 0x7fff800000000000) >> 49) - _EXPONENT_BIAS arr = bytearray(15) mask = 0x00000000000000ff for i in range(14, 6, -1): arr[i] = (low & mask) >> ((14 - i) << 3) mask = mask << 8 mask = 0x00000000000000ff for i in range(6, 0, -1): arr[i] = (high & mask) >> ((6 - i) << 3) mask = mask << 8 mask = 0x0001000000000000 arr[0] = (high & mask) >> 48 digits = tuple( int(digit) for digit in str(_from_bytes(bytes(arr), ))) with decimal.localcontext(_DEC128_CTX) as ctx: return ctx.create_decimal((sign, digits, exponent))
Returns an instance of :class:`decimal.Decimal` for this :class:`Decimal128`.
18,946
def text(self): return self.template.format(name=self.name, type=self.type)
Formatted param definition Equivalent to ``self.template.format(name=self.name, type=self.type)``.
18,947
def _inputrc_enables_vi_mode(): for filepath in (os.path.expanduser(), ): try: with open(filepath) as f: for line in f: if _setre.fullmatch(line): return True except IOError: continue return False
Emulate a small bit of readline behavior. Returns: (bool) True if current user enabled vi mode ("set editing-mode vi") in .inputrc
18,948
def _get_tcntobj(goids, go2obj, **kws): if in kws or in kws: _gosubdag = GoSubDag(goids, go2obj, rcntobj=False, prt=None) return get_tcntobj(_gosubdag.go2obj, **kws)
Get a TermCounts object if the user provides an annotation file, otherwise None.
18,949
def find_vasp_calculations(): dir_list = [ + re.sub( r, , path ) for path in glob.iglob( , recursive=True ) ] gz_dir_list = [ + re.sub( r, , path ) for path in glob.iglob( , recursive=True ) ] return dir_list + gz_dir_list
Returns a list of all subdirectories that contain either a vasprun.xml file or a compressed vasprun.xml.gz file. Args: None Returns: (List): list of all VASP calculation subdirectories.
18,950
def encodeMotorInput(self, motorInput): if not hasattr(motorInput, "__iter__"): motorInput = list([motorInput]) return self.motorEncoder.encode(motorInput)
Encode motor command to bit vector. @param motorInput (1D numpy.array) Motor command to be encoded. @return (1D numpy.array) Encoded motor command.
18,951
def db_dp990(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( .format(value)) self._db_dp990 = value
Corresponds to IDD Field `db_dp990` mean coincident drybulb temperature corresponding to Dew-point temperature corresponding to 90.0% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db_dp990` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
18,952
def get_token_settings(cls, token, default=None): setting_dict = {} for key, value in iteritems(cls.__dict__): if % token in key and not callable(key) and not isinstance(value, tokens.TokenAttr): setting_dict[key] = cls.__dict__.get(key, default) return setting_dict
Get the value for a specific token as a dictionary or replace with default :param token: str, token to query the nomenclate for :param default: object, substitution if the token is not found :return: (dict, object, None), token setting dictionary or default
18,953
def to_equivalent(self, unit, equivalence, **kwargs): conv_unit = Unit(unit, registry=self.units.registry) if self.units.same_dimensions_as(conv_unit): return self.in_units(conv_unit) this_equiv = equivalence_registry[equivalence]() if self.has_equivalent(equivalence): new_arr = this_equiv.convert(self, conv_unit.dimensions, **kwargs) return new_arr.in_units(conv_unit) else: raise InvalidUnitEquivalence(equivalence, self.units, unit)
Return a copy of the unyt_array in the units specified units, assuming the given equivalency. The dimensions of the specified units and the dimensions of the original array need not match so long as there is an appropriate conversion in the specified equivalency. Parameters ---------- unit : string The unit that you wish to convert to. equivalence : string The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Examples -------- >>> from unyt import K >>> a = 1.0e7*K >>> print(a.to_equivalent("keV", "thermal")) 0.8617332401096504 keV
18,954
def get_likes(self, likable_type, likable_id): return _get_request(_LIKES.format(c_api=_C_API_BEGINNING, api=_API_VERSION, lt=likable_type, li=likable_id, at=self.access_token))
likable_type: 'Comment', 'Press', 'Review', 'StartupRole', 'StatusUpdate' likable_id: id of the object that the likes of it you are interested
18,955
def start(self, interval=None, iterations=None): if self.running: return
Start the timer. A timeout event will be generated every *interval* seconds. If *interval* is None, then self.interval will be used. If *iterations* is specified, the timer will stop after emitting that number of events. If unspecified, then the previous value of self.iterations will be used. If the value is negative, then the timer will continue running until stop() is called. If the timer is already running when this function is called, nothing happens (timer continues running as it did previously, without changing the interval, number of iterations, or emitting a timer start event).
18,956
def Laliberte_density_i(T, w_w, c0, c1, c2, c3, c4): r7647-14-5 t = T - 273.15 return ((c0*(1 - w_w)+c1)*exp(1E-6*(t + c4)**2))/((1 - w_w) + c2 + c3*t)
r'''Calculate the density of a solute using the form proposed by Laliberte [1]_. Parameters are needed, and a temperature, and water fraction. Units are Kelvin and Pa*s. .. math:: \rho_{app,i} = \frac{(c_0[1-w_w]+c_1)\exp(10^{-6}[t+c_4]^2)} {(1-w_w) + c_2 + c_3 t} Parameters ---------- T : float Temperature of fluid [K] w_w : float Weight fraction of water in the solution c0-c4 : floats Function fit parameters Returns ------- rho_i : float Solute partial density, [kg/m^3] Notes ----- Temperature range check is TODO Examples -------- >>> d = _Laliberte_Density_ParametersDict['7647-14-5'] >>> Laliberte_density_i(273.15+0, 1-0.0037838838, d["C0"], d["C1"], d["C2"], d["C3"], d["C4"]) 3761.8917585699983 References ---------- .. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of Aqueous Solutions, with Updated Density and Viscosity Data." Journal of Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60. doi:10.1021/je8008123
18,957
def version(self): if self._version != : return self._version if self._version == : try: data = self.execute_get() self._version = data[][0][] except GhostException: return self.DEFAULT_VERSION return self._version
:return: The version of the server when initialized as 'auto', otherwise the version passed in at initialization
18,958
def instruction_DEC_register(self, opcode, register): a = register.value r = self.DEC(a) register.set(r)
Decrement accumulator
18,959
def create_client_socket(self, config): client_socket = WUDPNetworkNativeTransport.create_client_socket(self, config) client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) return client_socket
Create client broadcast socket :param config: client configuration :return: socket.socket
18,960
def _drop_oldest_chunk(self): abcdabcdabcdaeff chunk_id = min(self.chunked_counts.keys()) chunk = self.chunked_counts.pop(chunk_id) self.n_counts -= len(chunk) for k, v in list(chunk.items()): self.counts[k] -= v self.counts_total -= v
To handle the case when the items comming in the chunk is more than the maximum capacity of the chunk. Our intent behind is to remove the oldest chunk. So that the items come flowing in. >>> s = StreamCounter(5,5) >>> data_stream = ['a','b','c','d'] >>> for item in data_stream: ... s.add(item) >>> min(s.chunked_counts.keys()) 0 >>> s.chunked_counts {0: {'a': 1, 'b': 1, 'c': 1, 'd': 1}} >>> data_stream = ['a','b','c','d','a','e','f'] >>> for item in data_stream: ... s.add(item) >>> min(s.chunked_counts.keys()) 2 >>> s.chunked_counts {2: {'f': 1}}
18,961
def Place(self, x, flags): N.enforce_number(x, flags) self.head = self.head - flags.bytewidth encode.Write(flags.packer_type, self.Bytes, self.Head(), x)
Place prepends a value specified by `flags` to the Builder, without checking for available space.
18,962
def getParameters(self): parameters = lock_and_call( lambda: self._impl.getParameters(), self._lock ) return EntityMap(parameters, Parameter)
Get all the parameters declared.
18,963
def get_sockaddr(host, port, family): if family == af_unix: return host.split("://", 1)[1] try: res = socket.getaddrinfo( host, port, family, socket.SOCK_STREAM, socket.IPPROTO_TCP ) except socket.gaierror: return host, port return res[0][4]
Return a fully qualified socket address that can be passed to :func:`socket.bind`.
18,964
def _aggregations(search, definitions): if definitions: for name, agg in definitions.items(): search.aggs[name] = agg if not callable(agg) else agg() return search
Add aggregations to query.
18,965
def categorical_to_numeric(table): def transform(column): if is_categorical_dtype(column.dtype): return column.cat.codes if column.dtype.char == "O": try: nc = column.astype(numpy.int64) except ValueError: classes = column.dropna().unique() classes.sort(kind="mergesort") nc = column.replace(classes, numpy.arange(classes.shape[0])) return nc elif column.dtype == bool: return column.astype(numpy.int64) return column if isinstance(table, pandas.Series): return pandas.Series(transform(table), name=table.name, index=table.index) else: if _pandas_version_under0p23: return table.apply(transform, axis=0, reduce=False) else: return table.apply(transform, axis=0, result_type=)
Encode categorical columns to numeric by converting each category to an integer value. Parameters ---------- table : pandas.DataFrame Table with categorical columns to encode. Returns ------- encoded : pandas.DataFrame Table with categorical columns encoded as numeric. Numeric columns in the input table remain unchanged.
18,966
def update_distribution( name, config, tags=None, region=None, key=None, keyid=None, profile=None, ): {"Comment":"partial configuration","Enabled":true} distribution_ret = get_distribution( name, region=region, key=key, keyid=keyid, profile=profile ) if in distribution_ret: return distribution_ret dist_with_tags = distribution_ret[] current_distribution = dist_with_tags[] current_config = current_distribution[] current_tags = dist_with_tags[] etag = dist_with_tags[] config_diff = __utils__[](current_config, config) if tags: tags_diff = __utils__[](current_tags, tags) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: if in config_diff or in config_diff: conn.update_distribution( DistributionConfig=config, Id=current_distribution[], IfMatch=etag, ) if tags: arn = current_distribution[] if in tags_diff: tags_to_add = { : [ {: k, : v} for k, v in six.iteritems(tags_diff[]) ], } conn.tag_resource( Resource=arn, Tags=tags_to_add, ) if in tags_diff: tags_to_remove = { : list(tags_diff[].keys()), } conn.untag_resource( Resource=arn, TagKeys=tags_to_remove, ) except botocore.exceptions.ClientError as err: return {: __utils__[](err)} finally: _cache_id( , sub_resource=name, invalidate=True, region=region, key=key, keyid=keyid, profile=profile, ) return {: True}
Update the config (and optionally tags) for the CloudFront distribution with the given name. name Name of the CloudFront distribution config Configuration for the distribution tags Tags to associate with the distribution region Region to connect to key Secret key to use keyid Access key to use profile A dict with region, key, and keyid, or a pillar key (string) that contains such a dict. CLI Example: .. code-block:: bash salt myminion boto_cloudfront.update_distribution name=mydistribution profile=awsprofile \ config='{"Comment":"partial configuration","Enabled":true}'
18,967
def write_err(self, text): stderr = self.stderr if self.stderr.closed: stderr = sys.stderr stderr.write(decode_output(u"\r", target_stream=stderr)) stderr.write(decode_output(CLEAR_LINE, target_stream=stderr)) if text is None: text = "" text = decode_output(u"{0}\n".format(text), target_stream=stderr) self.stderr.write(text) self.out_buff.write(decode_output(text, target_stream=self.out_buff))
Write error text in the terminal without breaking the spinner.
18,968
def ris(self): if self.aggregationType != : raise ValueError() template = u ris = template.format( title=self.title, journal=self.publicationName, volume=self.volume, date=self.coverDate, pages=self.pageRange, year=self.coverDate[0:4], doi=self.doi) for au in self.authors: ris += .format(au.indexed_name) if self.issueIdentifier is not None: ris += .format(self.issueIdentifier) ris += return ris
Bibliographic entry in RIS (Research Information System Format) format. Returns ------- ris : str The RIS string representing an item. Raises ------ ValueError : If the item's aggregationType is not Journal.
18,969
def _parse_line(self, line): msg_info = {: line} if in line: info, msg = [i.strip() for i in line.split(, 1)] msg_info[] = msg info_splits = info.split() if len(info_splits) == 5: msg_info[] = .join(info_splits[:3]) msg_info[] = info_splits[3] msg_info[] = info_splits[4] return msg_info
Parsed result:: {'timestamp':'May 18 14:24:14', 'procname': 'kernel', 'hostname':'lxc-rhel68-sat56', 'message': '...', 'raw_message': '...: ...' }
18,970
def _traverse(summary, function, *args): function(summary, *args) for row in summary: function(row, *args) for item in row: function(item, *args)
Traverse all objects of a summary and call function with each as a parameter. Using this function, the following objects will be traversed: - the summary - each row - each item of a row
18,971
def create_couchdb_admin(username, password): curl_couchdb(.format(username), method=, data=.format(password))
Create a CouchDB user
18,972
def destroy(self): widget = self.widget if widget is not None: del self.widget super(UiKitToolkitObject, self).destroy()
A reimplemented destructor. This destructor will clear the reference to the toolkit widget and set its parent to None.
18,973
def search(self, query, category, uid=None, latitude=None, longitude=None, city=None, region=None): if isinstance(category, (tuple, list)): category = .join(category) data = optionaldict() data[] = query data[] = category data[] = uid data[] = latitude data[] = longitude data[] = city data[] = region data[] = self._client.appid return self._post( url=, data=data )
发送语义理解请求 详情请参考 http://mp.weixin.qq.com/wiki/0/0ce78b3c9524811fee34aba3e33f3448.html :param query: 输入文本串 :param category: 需要使用的服务类型,多个可传入列表 :param uid: 可选,用户唯一id(非开发者id),用户区分公众号下的不同用户(建议填入用户openid) :param latitude: 可选,纬度坐标,与经度同时传入;与城市二选一传入 :param longitude: 可选,经度坐标,与纬度同时传入;与城市二选一传入 :param city: 可选,城市名称,与经纬度二选一传入 :param region: 可选,区域名称,在城市存在的情况下可省;与经纬度二选一传入 :return: 返回的 JSON 数据包 使用示例:: from wechatpy import WeChatClient client = WeChatClient('appid', 'secret') res = client.semantic.search( '查一下明天从北京到上海的南航机票', 'flight,hotel', city='北京' )
18,974
def synchelp(f): def wrap(*args, **kwargs): coro = f(*args, **kwargs) if not iAmLoop(): return sync(coro) return coro return wrap
The synchelp decorator allows the transparent execution of a coroutine using the global loop from a thread other than the event loop. In both use cases, teh actual work is done by the global event loop. Examples: Use as a decorator:: @s_glob.synchelp async def stuff(x, y): await dostuff() Calling the stuff function as regular async code using the standard await syntax:: valu = await stuff(x, y) Calling the stuff function as regular sync code outside of the event loop thread:: valu = stuff(x, y)
18,975
def get_level(level_string): levels = {: logging.DEBUG, : logging.INFO, : logging.WARNING, : logging.ERROR, : logging.CRITICAL} try: level = levels[level_string.lower()] except KeyError: sys.exit(.format(level_string)) else: return level
Returns an appropriate logging level integer from a string name
18,976
def is_excluded_path(args, filepath): for regexp_exclude_path in args.regexp: if re.match(regexp_exclude_path, filepath): return True abspath = os.path.abspath(filepath) if args.include: out_of_include_dirs = True for incl_path in args.include: absolute_include_path = os.path.abspath(os.path.join(args.root, incl_path)) if is_child_dir(absolute_include_path, abspath): out_of_include_dirs = False break if out_of_include_dirs: return True excl_rules = create_exclude_rules(args) for i, rule in enumerate(excl_rules): if rule[0] == abspath: return rule[1] if is_child_dir(rule[0], abspath): last_result = rule[1] for j in range(i + 1, len(excl_rules)): rule_deep = excl_rules[j] if not is_child_dir(rule_deep[0], abspath): break last_result = rule_deep[1] return last_result return False
Returns true if the filepath is under the one of the exclude path.
18,977
def add(self, transport, address=None): if not address: address = str(uuid.uuid1()) if address in self.recipients: self.recipients[address].add(transport) else: self.recipients[address] = RecipientManager(transport, address) return address
add a new recipient to be addressable by this MessageDispatcher generate a new uuid address if one is not specified
18,978
def set_options(pool_or_cursor,row_instance): "for connection-level options that need to be set on Row instances" for option in (,): setattr(row_instance,option,getattr(pool_or_cursor,option,None)) return row_instance
for connection-level options that need to be set on Row instances
18,979
def get_viewer(self, v_id, viewer_class=None, width=512, height=512, force_new=False): if not force_new: try: return self.viewers[v_id] except KeyError: pass window = self.app.make_window("Viewer %s" % v_id, wid=v_id) v_info = self.make_viewer(window, viewer_class=viewer_class, width=width, height=height) self.viewers[v_id] = v_info return v_info
Get an existing viewer by viewer id. If the viewer does not yet exist, make a new one.
18,980
def clean_columns(columns, valid_regex=r, lower=True, max_len=32): rettype = None if isinstance(columns, str): rettype = type(columns) columns = [columns] columns = [c.strip() for c in columns] columns = [c[:max_len] for c in columns] columns = np.array(columns) if rettype is None else rettype(columns[0]) return columns
Ensure all column name strings are valid python variable/attribute names >>> df = pd.DataFrame(np.zeros((2, 3)), columns=['WAT??', "Don't do th!s, way too long. ya-think????", 'ok-this123.456']) >>> df.columns = clean_columns(df.columns, max_len=12) >>> df.head() wat dont_do_ths_ okthis123456 0 0.0 0.0 0.0 1 0.0 0.0 0.0
18,981
def compress_to(self, archive_path=None): if archive_path is None: archive = tempfile.NamedTemporaryFile(delete=False) tar_args = () tar_kwargs = {: archive} _return = archive.name else: tar_args = (archive_path) tar_kwargs = {} _return = archive_path tar_kwargs.update({: }) with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar: tar.add(self.path, arcname=self.file) return _return
Compress the directory with gzip using tarlib. :type archive_path: str :param archive_path: Path to the archive, if None, a tempfile is created
18,982
def member(self, phlo_id, node_id, member_id, action, node_type=): data = { : member_id, : phlo_id, : node_id, : node_type } member = Member(self.client, data) return getattr(member, action)()
:param phlo_id: :param node_id: :param member_id: :param action: :param node_type: default value `conference_bridge` :return:
18,983
def iter_list_market_book(self, market_ids, chunk_size, **kwargs): return itertools.chain(*( self.list_market_book(market_chunk, **kwargs) for market_chunk in utils.get_chunks(market_ids, chunk_size) ))
Split call to `list_market_book` into separate requests. :param list market_ids: List of market IDs :param int chunk_size: Number of records per chunk :param dict kwargs: Arguments passed to `list_market_book`
18,984
def project(self, project, entity=None): query = gql() return self.gql(query, variable_values={ : entity, : project})[]
Retrive project Args: project (str): The project to get details for entity (str, optional): The entity to scope this project to. Returns: [{"id","name","repo","dockerImage","description"}]
18,985
def _importSNPs_CasavaSNP(setName, species, genomeSource, snpsFile) : "This function will also create an index on start->chromosomeNumber->setName. Warning : pyGeno positions are 0 based" printf( % (setName, species)) snpData = SNPsTxtFile(snpsFile) CasavaSNP.dropIndex((, , )) conf.db.beginTransaction() pBar = ProgressBar(len(snpData)) pLabel = currChrNumber = None for snpEntry in snpData : tmpChr = snpEntry[] if tmpChr != currChrNumber : currChrNumber = tmpChr pLabel = % currChrNumber snp = CasavaSNP() snp.species = species snp.setName = setName for f in snp.getFields() : try : setattr(snp, f, snpEntry[f]) except KeyError : if f != and f != : printf("Warning filetype as no key %s", f) snp.start -= 1 snp.end -= 1 snp.save() pBar.update(label = pLabel) pBar.close() snpMaster = SNPMaster() snpMaster.set(setName = setName, SNPType = , species = species) snpMaster.save() printf() conf.db.endTransaction() printf() CasavaSNP.ensureGlobalIndex((, , )) printf( %(setName, species)) return True
This function will also create an index on start->chromosomeNumber->setName. Warning : pyGeno positions are 0 based
18,986
def __store_record(self, record): if isinstance(record, WSimpleTrackerStorage.Record) is False: raise TypeError() limit = self.record_limit() if limit is not None and len(self.__registry) >= limit: self.__registry.pop(0) self.__registry.append(record)
Save record in a internal storage :param record: record to save :return: None
18,987
def generate_matches(self, nodes): if self.content is None: for count in xrange(self.min, 1 + min(len(nodes), self.max)): r = {} if self.name: r[self.name] = nodes[:count] yield count, r elif self.name == "bare_name": yield self._bare_name_matches(nodes) else: if hasattr(sys, "getrefcount"): save_stderr = sys.stderr sys.stderr = StringIO() try: for count, r in self._recursive_matches(nodes, 0): if self.name: r[self.name] = nodes[:count] yield count, r except RuntimeError: for count, r in self._iterative_matches(nodes): if self.name: r[self.name] = nodes[:count] yield count, r finally: if hasattr(sys, "getrefcount"): sys.stderr = save_stderr
Generator yielding matches for a sequence of nodes. Args: nodes: sequence of nodes Yields: (count, results) tuples where: count: the match comprises nodes[:count]; results: dict containing named submatches.
18,988
def create_alarm(deployment_id, metric_name, data, api_key=None, profile="telemetry"): auth = _auth(api_key, profile) request_uri = _get_telemetry_base(profile) + "/alerts" key = "telemetry.{0}.alerts".format(deployment_id) post_body = { "deployment": deployment_id, "filter": data.get(), "notificationChannel": get_notification_channel_id(data.get()).split(), "condition": { "metric": metric_name, "max": data.get(), "min": data.get() } } try: response = requests.post(request_uri, data=salt.utils.json.dumps(post_body), headers=auth) except requests.exceptions.RequestException as e: log.error(six.text_type(e)) if response.status_code >= 200 and response.status_code < 300: log.info(, metric_name, deployment_id) log.debug(, metric_name, deployment_id, response.json()) _update_cache(deployment_id, metric_name, response.json()) else: log.error( , metric_name, deployment_id, salt.utils.json.dumps(post_body) ) return response.status_code >= 200 and response.status_code < 300, response.json()
create an telemetry alarms. data is a dict of alert configuration data. Returns (bool success, str message) tuple. CLI Example: salt myminion telemetry.create_alarm rs-ds033197 {} profile=telemetry
18,989
def get_current(self, channel, unit=): values = self._get_adc_value(address=self._ch_map[channel][][]) raw = values[self._ch_map[channel][][]] dac_offset = self._ch_cal[channel][][] dac_gain = self._ch_cal[channel][][] if in channel: current = ((raw - dac_offset) / dac_gain) if unit == : return raw elif unit == : return current / 1000 elif unit == : return current elif unit == : return current * 1000 else: raise TypeError("Invalid unit type.") else: voltage = values[self._ch_map[channel][][]] current = (((raw - voltage) - dac_offset) / dac_gain) if unit == : return raw elif unit == : return current / 1000000 elif unit == : return current / 1000 elif unit == : return current else: raise TypeError("Invalid unit type.")
Reading current
18,990
def validate_allowed_to_pay(self): re allowed to pay, otherwise raise a ValidationError. ' self._refresh() if not self.invoice.is_unpaid: raise ValidationError("You can only pay for unpaid invoices.") if not self.invoice.cart: return if not self._invoice_matches_cart(): raise ValidationError("The registration has been amended since " "generating this invoice.") CartController(self.invoice.cart).validate_cart()
Passes cleanly if we're allowed to pay, otherwise raise a ValidationError.
18,991
def fromdeltas(cls, deltas): return cls((key, value) for (refkey, key), value in deltas.items())
Construct an offsetvector from a dictionary of offset deltas as returned by the .deltas attribute. Example: >>> x = offsetvector({"H1": 0, "L1": 10, "V1": 20}) >>> y = offsetvector.fromdeltas(x.deltas) >>> y offsetvector({'V1': 20, 'H1': 0, 'L1': 10}) >>> y == x True See also .deltas, .fromkeys()
18,992
def license_is_oa(license): for oal in OA_LICENSES: if re.search(oal, license): return True return False
Return True if license is compatible with Open Access
18,993
def nodal_production_balance( network, snapshot=, scaling=0.00001, filename=None): fig, ax = plt.subplots(1, 1) gen = network.generators_t.p.groupby(network.generators.bus, axis=1).sum() load = network.loads_t.p.groupby(network.loads.bus, axis=1).sum() if snapshot == : diff = (gen - load).sum() else: timestep = network.snapshots[snapshot] diff = (gen - load).loc[timestep] colors = {s[0]: if s[1] > 0 else for s in diff.iteritems()} subcolors = {: , : } diff = diff.abs() network.plot( bus_sizes=diff * scaling, bus_colors=colors, line_widths=0.2, margin=0.01, ax=ax) patchList = [] for key in subcolors: data_key = mpatches.Patch(color=subcolors[key], label=key) patchList.append(data_key) ax.legend(handles=patchList, loc=) ax.autoscale() if filename: plt.savefig(filename) plt.close() return
Plots the nodal difference between generation and consumption. Parameters ---------- network : PyPSA network container Holds topology of grid including results from powerflow analysis snapshot : int or 'all' Snapshot to plot. default 'all' scaling : int Scaling to change plot sizes. default 0.0001 filename : path to folder
18,994
def can_update_topics_to_sticky_topics(self, forum, user): return ( self._perform_basic_permission_check(forum, user, ) and self._perform_basic_permission_check(forum, user, ) )
Given a forum, checks whether the user can change its topic types to sticky topics.
18,995
def _check_rules(browser, rules_js, config): audit_run_script = dedent(u).format( rules_js=rules_js, custom_rules=config.custom_rules, context=config.context, options=config.rules ) audit_results_script = dedent(u) browser.execute_script(audit_run_script) def audit_results_check_func(): unicode_results = browser.execute_script(audit_results_script) try: results = json.loads(unicode_results) except (TypeError, ValueError): results = None if results: return True, results return False, None result = Promise( audit_results_check_func, "Timed out waiting for a11y audit results.", timeout=5, ).fulfill() return audit_results
Run an accessibility audit on the page using the axe-core ruleset. Args: browser: a browser instance. rules_js: the ruleset JavaScript as a string. config: an AxsAuditConfig instance. Returns: A list of violations. Related documentation: https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object __Caution__: You probably don't really want to call this method directly! It will be used by `AxeCoreAudit.do_audit`.
18,996
def ahrs_send(self, omegaIx, omegaIy, omegaIz, accel_weight, renorm_val, error_rp, error_yaw, force_mavlink1=False): return self.send(self.ahrs_encode(omegaIx, omegaIy, omegaIz, accel_weight, renorm_val, error_rp, error_yaw), force_mavlink1=force_mavlink1)
Status of DCM attitude estimator omegaIx : X gyro drift estimate rad/s (float) omegaIy : Y gyro drift estimate rad/s (float) omegaIz : Z gyro drift estimate rad/s (float) accel_weight : average accel_weight (float) renorm_val : average renormalisation value (float) error_rp : average error_roll_pitch value (float) error_yaw : average error_yaw value (float)
18,997
def is_enum_type(type_): return isinstance(type_, type) and issubclass(type_, tuple(_get_types(Types.ENUM)))
Checks if the given type is an enum type. :param type_: The type to check :return: True if the type is a enum type, otherwise False :rtype: bool
18,998
def fms(x, y, z, context=None): return _apply_function_in_current_context( BigFloat, mpfr.mpfr_fms, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), BigFloat._implicit_convert(z), ), context, )
Return (x * y) - z, with a single rounding according to the current context.
18,999
def actions_delete(): filename = action_listlocal() ONTOSPY_LOCAL_MODELS = get_home_location() if filename: fullpath = ONTOSPY_LOCAL_MODELS + filename if os.path.exists(fullpath): var = input("Are you sure you want to delete this file? (y/n)") if var == "y": os.remove(fullpath) printDebug("Deleted %s" % fullpath, "important") cachepath = ONTOSPY_LOCAL_CACHE + filename + ".pickle" if os.path.exists(cachepath): os.remove(cachepath) printDebug("---------") printDebug("File deleted [%s]" % cachepath, "important") return True else: printDebug("Goodbye") return False
DEPRECATED (v 1.9.4) delete an ontology from the local repo