Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
384,500
def _on_dynamodb_exception(self, error): if isinstance(error, exceptions.ConditionalCheckFailedException): raise web.HTTPError(409, reason=) elif isinstance(error, exceptions.NoCredentialsError): if _no_creds_should_return_429(): raise web.HTTPError(429, reason=) elif isinstance(error, (exceptions.ThroughputExceeded, exceptions.ThrottlingException)): raise web.HTTPError(429, reason=) if hasattr(self, ): self.logger.error(, error) raise web.HTTPError(500, reason=str(error))
Dynamically handle DynamoDB exceptions, returning HTTP error responses. :param exceptions.DynamoDBException error:
384,501
def moveToReplayContext(self, r): if not r.sheet: return self if options.replay_movement: while vs.cursorVisibleColIndex != vcolidx: vs.cursorVisibleColIndex += 1 if (vcolidx - vs.cursorVisibleColIndex) > 0 else -1 while not self.delay(0.5): pass assert vs.cursorVisibleColIndex == vcolidx else: vs.cursorVisibleColIndex = vcolidx return vs
set the sheet/row/col to the values in the replay row. return sheet
384,502
def overdrive(self, gain_db=20.0, colour=20.0): if not is_number(gain_db): raise ValueError() if not is_number(colour): raise ValueError() effect_args = [ , .format(gain_db), .format(colour) ] self.effects.extend(effect_args) self.effects_log.append() return self
Apply non-linear distortion. Parameters ---------- gain_db : float, default=20 Controls the amount of distortion (dB). colour : float, default=20 Controls the amount of even harmonic content in the output (dB).
384,503
def recursive_map(func, data): def recurse(item): return recursive_map(func, item) items_mapped = map_collection(recurse, data) return func(items_mapped)
Apply func to data, and any collection items inside data (using map_collection). Define func so that it only applies to the type of value that you want it to apply to.
384,504
async def _heartbeat_callback(self): query = { "MPRPC": self.VERSION, "HEARTBEAT": "ping" } queryb = self.encoder(query) while True: await asyncio.sleep(self.heart_beat) self.writer.write(queryb) if self.debug is True: print("ping")
如果设置了心跳,则调用这个协程.
384,505
def container( state, host, name, present=True, image=, ): container = get_container_named(name, host.fact.lxd_containers) if not container and present: yield .format(name=name, image=image)
Add/remove LXD containers. Note: does not check if an existing container is based on the specified image. + name: name of the container + image: image to base the container on + present: whether the container should be present or absent
384,506
def system_call(cmd, **kwargs): proc = Popen(cmd, universal_newlines=True, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = proc.communicate() return_value = proc.returncode if return_value != 0: raise ValueError("Failed to execute: %s\nstdout: %s\nstderr: %s" % (cmd, stdout, stderr)) return stdout, stderr, return_value
Call cmd and return (stdout, stderr, return_value). Parameters ---------- cmd: str Can be either a string containing the command to be run, or a sequence of strings that are the tokens of the command. kwargs : dict, optional Ignored. Available so that this function is compatible with _redis_wrap. Notes ----- This function is ported from QIIME (http://www.qiime.org), previously named qiime_system_call. QIIME is a GPL project, but we obtained permission from the authors of this function to port it to pyqi (and keep it under pyqi's BSD license).
384,507
def geom_symm_match(g, atwts, ax, theta, do_refl): import numpy as np from scipy import linalg as spla g = make_nd_vec(g, nd=None, t=np.float64, norm=False) atwts = make_nd_vec(atwts, nd=None, t=np.float64, norm=False) if not g.shape[0] == 3 * atwts.shape[0]: raise ValueError("Size of is not 3*size of ") gx = symm_op(g, ax, theta, do_refl) g = g.reshape((g.shape[0],1)) ex_wts = atwts.repeat(3,axis=0).T.reshape((atwts.shape[0]*3,1)) * 1.j g = np.add(g, ex_wts) gx = np.add(gx, ex_wts) calc_g = g.reshape((g.shape[0] // 3, 3)) calc_gx = gx.reshape((gx.shape[0] // 3, 3)) calc = [[spla.norm(np.subtract(calc_g[i,:], calc_gx[j,:])) \ for j in range(calc_gx.shape[0])] \ for i in range(calc_g.shape[0])] scale_g = np.array([spla.norm(calc_g[i,:]) for i in \ range(calc_g.shape[0])]).reshape((calc_g.shape[0],1)) \ .repeat(calc_g.shape[0], axis=1) scale_gx = np.array([spla.norm(calc_gx[j,:]) for j in \ range(calc_g.shape[0])]).reshape((1,calc_gx.shape[0])) \ .repeat(calc_gx.shape[0], axis=0) scale = np.maximum(np.maximum(scale_g, scale_gx), np.ones_like(scale_g, dtype=np.float64)) calc = np.divide(calc, scale) mins = np.min(calc, axis=1) fac = np.max(mins) fac = min(fac, 1.0) return fac
[Revised match factor calculation] .. todo:: Complete geom_symm_match docstring
384,508
def query_transactions(self, initial_date, final_date, page=None, max_results=None): last_page = False results = [] while last_page is False: search_result = self._consume_query_transactions( initial_date, final_date, page, max_results) results.extend(search_result.transactions) if search_result.current_page is None or \ search_result.total_pages is None or \ search_result.current_page == search_result.total_pages: last_page = True else: page = search_result.current_page + 1 return results
query transaction by date range
384,509
def mouseMoveEvent(self, event): if (event.pos() - self.dragStartPosition).manhattanLength() < 10: return QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor)) factory = self.factoryclass() mimeData = QtCore.QMimeData() try: mimeData.setData("application/x-protocol", factory.serialize()) except: mimeData.setData("application/x-protocol", cPickle.dumps(factory)) drag = QtGui.QDrag(self) drag.setMimeData(mimeData) pixmap = QtGui.QPixmap() pixmap = pixmap.grabWidget(self, self.frameRect()) drag.setPixmap(pixmap) drag.setHotSpot(QtCore.QPoint(pixmap.width()/2, pixmap.height()/2)) drag.setPixmap(pixmap) self.dragActive.emit(True) result = drag.exec_(QtCore.Qt.MoveAction) QtGui.QApplication.restoreOverrideCursor()
Determines if a drag is taking place, and initiates it
384,510
def get_file_path(self, name, relative_path): dist = self.get_distribution(name) if dist is None: raise LookupError( % name) return dist.get_resource_path(relative_path)
Return the path to a resource file.
384,511
def create_model(self): properties = { : self.name, : 1000 } objective = self.get_active_objective() if objective is not None: reactions = dict(objective.reactions) if len(reactions) == 1: reaction, value = next(iteritems(reactions)) if ((value < 0 and objective.type == ) or (value > 0 and objective.type == )): properties[] = reaction model = NativeModel(properties) for compartment in self.compartments: model.compartments.add_entry(compartment) for compound in self.species: model.compounds.add_entry(compound) for reaction in self.reactions: model.reactions.add_entry(reaction) for reaction in model.reactions: model.model[reaction.id] = None for reaction in model.reactions: props = reaction.properties if in props or in props: lower = props.get() upper = props.get() model.limits[reaction.id] = reaction.id, lower, upper limits_lower = {} limits_upper = {} for bounds in self.flux_bounds: reaction = bounds.reaction if reaction in model.limits: continue if bounds.operation == SBMLFluxBoundEntry.LESS_EQUAL: if reaction not in limits_upper: limits_upper[reaction] = bounds.value else: raise ParseError( .format(reaction)) elif bounds.operation == SBMLFluxBoundEntry.GREATER_EQUAL: if reaction not in limits_lower: limits_lower[reaction] = bounds.value else: raise ParseError( .format(reaction)) elif bounds.operation == SBMLFluxBoundEntry.EQUAL: if (reaction not in limits_lower and reaction not in limits_upper): limits_lower[reaction] = bounds.value limits_upper[reaction] = bounds.value else: raise ParseError( .format(reaction)) for reaction in model.reactions: if reaction.id in limits_lower or reaction.id in limits_upper: lower = limits_lower.get(reaction.id, None) upper = limits_upper.get(reaction.id, None) model.limits[reaction.id] = reaction.id, lower, upper return model
Create model from reader. Returns: :class:`psamm.datasource.native.NativeModel`.
384,512
def read_cstring(self, terminator=b): null_index = self.data.find(terminator, self.offset) if null_index == -1: raise RuntimeError("Reached end of buffer") result = self.data[self.offset:null_index] self.offset = null_index + len(terminator) return result
Reads a single null termianted string :return: string without bytes :rtype: :class:`bytes`
384,513
def _calc_recip(self): numsites = self._s.num_sites prefactor = 2 * pi / self._vol erecip = np.zeros((numsites, numsites), dtype=np.float) forces = np.zeros((numsites, 3), dtype=np.float) coords = self._coords rcp_latt = self._s.lattice.reciprocal_lattice recip_nn = rcp_latt.get_points_in_sphere([[0, 0, 0]], [0, 0, 0], self._gmax) frac_coords = [fcoords for (fcoords, dist, i, img) in recip_nn if dist != 0] gs = rcp_latt.get_cartesian_coords(frac_coords) g2s = np.sum(gs ** 2, 1) expvals = np.exp(-g2s / (4 * self._eta)) grs = np.sum(gs[:, None] * coords[None, :], 2) oxistates = np.array(self._oxi_states) qiqj = oxistates[None, :] * oxistates[:, None] sreals = np.sum(oxistates[None, :] * np.cos(grs), 1) simags = np.sum(oxistates[None, :] * np.sin(grs), 1) for g, g2, gr, expval, sreal, simag in zip(gs, g2s, grs, expvals, sreals, simags): m = (gr[None, :] + pi / 4) - gr[:, None] np.sin(m, m) m *= expval / g2 erecip += m if self._compute_forces: pref = 2 * expval / g2 * oxistates factor = prefactor * pref * ( sreal * np.sin(gr) - simag * np.cos(gr)) forces += factor[:, None] * g[None, :] forces *= EwaldSummation.CONV_FACT erecip *= prefactor * EwaldSummation.CONV_FACT * qiqj * 2 ** 0.5 return erecip, forces
Perform the reciprocal space summation. Calculates the quantity E_recip = 1/(2PiV) sum_{G < Gmax} exp(-(G.G/4/eta))/(G.G) S(G)S(-G) where S(G) = sum_{k=1,N} q_k exp(-i G.r_k) S(G)S(-G) = |S(G)|**2 This method is heavily vectorized to utilize numpy's C backend for speed.
384,514
def run(self): with Popen(self.command, stdout=PIPE, shell=True) as process: while True: current_line = process.stdout.readline().rstrip() if not current_line: break yield self._decode_output(current_line)
Run the given command and yield each line(s) one by one. .. note:: The difference between this method and :code:`self.execute()` is that :code:`self.execute()` wait for the process to end in order to return its output.
384,515
def get_interval(ticker, session) -> Session: if not in session: session = f interval = Intervals(ticker=ticker) ss_info = session.split() return getattr(interval, f)(*ss_info)
Get interval from defined session Args: ticker: ticker session: session Returns: Session of start_time and end_time Examples: >>> get_interval('005490 KS Equity', 'day_open_30') Session(start_time='09:00', end_time='09:30') >>> get_interval('005490 KS Equity', 'day_normal_30_20') Session(start_time='09:31', end_time='15:00') >>> get_interval('005490 KS Equity', 'day_close_20') Session(start_time='15:01', end_time='15:20') >>> get_interval('700 HK Equity', 'am_open_30') Session(start_time='09:30', end_time='10:00') >>> get_interval('700 HK Equity', 'am_normal_30_30') Session(start_time='10:01', end_time='11:30') >>> get_interval('700 HK Equity', 'am_close_30') Session(start_time='11:31', end_time='12:00') >>> get_interval('ES1 Index', 'day_exact_2130_2230') Session(start_time=None, end_time=None) >>> get_interval('ES1 Index', 'allday_exact_2130_2230') Session(start_time='21:30', end_time='22:30') >>> get_interval('ES1 Index', 'allday_exact_2130_0230') Session(start_time='21:30', end_time='02:30') >>> get_interval('AMLP US', 'day_open_30') Session(start_time=None, end_time=None) >>> get_interval('7974 JP Equity', 'day_normal_180_300') is SessNA True >>> get_interval('Z 1 Index', 'allday_normal_30_30') Session(start_time='01:31', end_time='20:30') >>> get_interval('GBP Curncy', 'day') Session(start_time='17:02', end_time='17:00')
384,516
def writes(nb, format, **kwargs): format = unicode(format) if format == u or format == u: return writes_json(nb, **kwargs) elif format == u: return writes_py(nb, **kwargs) else: raise NBFormatError( % format)
Write a notebook to a string in a given format in the current nbformat version. This function always writes the notebook in the current nbformat version. Parameters ---------- nb : NotebookNode The notebook to write. format : (u'json', u'ipynb', u'py') The format to write the notebook in. Returns ------- s : unicode The notebook string.
384,517
def applet_run(object_id, input_params={}, always_retry=True, **kwargs): input_params_cp = Nonce.update_nonce(input_params) return DXHTTPRequest( % object_id, input_params_cp, always_retry=always_retry, **kwargs)
Invokes the /applet-xxxx/run API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fapplet-xxxx%2Frun
384,518
def ensureBones(self,data): if "_bones" not in data: data["_bones"]={} if self.name not in data["_bones"]: data["_bones"][self.name]={"rot":self.start_rot[:],"length":self.blength}
Helper method ensuring per-entity bone data has been properly initialized. Should be called at the start of every method accessing per-entity data. ``data`` is the entity to check in dictionary form.
384,519
def format_search(q, **kwargs): m = search(q, **kwargs) count = m[] if not count: raise DapiCommError() return for mdap in m[]: mdap = mdap[] return _format_dap_with_description(mdap)
Formats the results of a search
384,520
def update_column(self, header, column): index = self.get_column_index(header) if not isinstance(header, basestring): raise TypeError("header must be of type str") for row, new_item in zip(self._table, column): row[index] = new_item
Update a column named `header` in the table. If length of column is smaller than number of rows, lets say `k`, only the first `k` values in the column is updated. Parameters ---------- header : str Header of the column column : iterable Any iterable of appropriate length. Raises ------ TypeError: If length of `column` is shorter than number of rows. ValueError: If no column exists with title `header`.
384,521
def insert(self, item, safe=None): warnings.warn(, PendingDeprecationWarning) self.add(item, safe=safe)
[DEPRECATED] Please use save() instead. This actually calls the underlying save function, so the name is confusing. Insert an item into the work queue and flushes.
384,522
def _detect_encoding(data=None): import locale enc_list = [, , , , , ] code = locale.getpreferredencoding(False) if data is None: return code if code.lower() not in enc_list: enc_list.insert(0, code.lower()) for c in enc_list: try: for line in data: line.decode(c) except (UnicodeDecodeError, UnicodeError, AttributeError): continue return c print("Encoding not detected. Please pass encoding value manually")
Return the default system encoding. If data is passed, try to decode the data with the default system encoding or from a short list of encoding types to test. Args: data - list of lists Returns: enc - system encoding
384,523
def hash(self): if self._hash is None: fsh = [self.path.with_name(self._mid + "_camera.ini"), self.path.with_name(self._mid + "_para.ini")] tohash = [hashfile(f) for f in fsh] tohash.append(self.path.name) tohash.append(hashfile(self.path, blocksize=65536, count=20)) self._hash = hashobj(tohash) return self._hash
Hash value based on file name and .ini file content
384,524
def _on_closed(self): self._connected.clear() if not self._closing: if self._on_close_callback: self._on_close_callback() else: raise exceptions.ConnectionError()
Invoked by connections when they are closed.
384,525
def t_STRING(t): r"\\]+|\\" t.value = t.value.replace(r, chr(92)).replace(r"\")[1:-1] return t
r"'([^'\\]+|\\'|\\\\)*
384,526
def trk50(msg): d = hex2bin(data(msg)) if d[11] == : return None sign = int(d[12]) value = bin2int(d[13:23]) if sign: value = value - 1024 trk = value * 90.0 / 512.0 if trk < 0: trk = 360 + trk return round(trk, 3)
True track angle, BDS 5,0 message Args: msg (String): 28 bytes hexadecimal message (BDS50) string Returns: float: angle in degrees to true north (from 0 to 360)
384,527
def list_build_configurations_for_project(id=None, name=None, page_size=200, page_index=0, sort="", q=""): data = list_build_configurations_for_project_raw(id, name, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
List all BuildConfigurations associated with the given Project.
384,528
def rgb2rgba(rgb): rgba = [] for i in range(0, len(rgb), 3): rgba += rgb[i:i+3] rgba.append(255) return rgba
Take a row of RGB bytes, and convert to a row of RGBA bytes.
384,529
def _labels_cost(Xnum, Xcat, centroids, num_dissim, cat_dissim, gamma, membship=None): n_points = Xnum.shape[0] Xnum = check_array(Xnum) cost = 0. labels = np.empty(n_points, dtype=np.uint16) for ipoint in range(n_points): num_costs = num_dissim(centroids[0], Xnum[ipoint]) cat_costs = cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship) tot_costs = num_costs + gamma * cat_costs clust = np.argmin(tot_costs) labels[ipoint] = clust cost += tot_costs[clust] return labels, cost
Calculate labels and cost function given a matrix of points and a list of centroids for the k-prototypes algorithm.
384,530
def set_cookie(self, name: str, value: str, *, expires: Optional[str]=None, domain: Optional[str]=None, max_age: Optional[Union[int, str]]=None, path: str=, secure: Optional[str]=None, httponly: Optional[str]=None, version: Optional[str]=None) -> None: old = self._cookies.get(name) if old is not None and old.coded_value == : self._cookies.pop(name, None) self._cookies[name] = value c = self._cookies[name] if expires is not None: c[] = expires elif c.get() == : del c[] if domain is not None: c[] = domain if max_age is not None: c[] = str(max_age) elif in c: del c[] c[] = path if secure is not None: c[] = secure if httponly is not None: c[] = httponly if version is not None: c[] = version
Set or update response cookie. Sets new cookie or updates existent with new value. Also updates only those params which are not None.
384,531
def getReadNoise(self, exten): rn = self._image[exten]._rdnoise if self.proc_unit == : rn = self._rdnoise / self.getGain(exten) return rn
Method for returning the readnoise of a detector (in counts). Returns ------- readnoise : float The readnoise of the detector in **units of counts/electrons**.
384,532
def remove_license_requests(cursor, uuid_, uids): if not isinstance(uids, (list, set, tuple,)): raise TypeError("``uids`` is an invalid type: {}".format(type(uids))) acceptors = list(set(uids)) cursor.execute(, (uuid_, acceptors,))
Given a ``uuid`` and list of ``uids`` (user identifiers) remove the identified users' license acceptance entries.
384,533
def ip_address_delete(session, ifname, ifaddr): def _remove_inet_addr(intf_inet, addr): addr_list = intf_inet.split() if addr not in addr_list: LOG.debug( , intf.ifname, addr) return intf_inet else: addr_list.remove(addr) return .join(addr_list) intf = ip_link_show(session, ifname=ifname) if not intf: LOG.debug(, ifname) return None if ip.valid_ipv4(ifaddr): intf.inet = _remove_inet_addr(intf.inet, ifaddr) elif ip.valid_ipv6(ifaddr): intf.inet6 = _remove_inet_addr(intf.inet6, ifaddr) else: LOG.debug(, ifaddr) return None return intf
Deletes an IP address from interface record identified with the given "ifname". The arguments are similar to "ip address delete" command of iproute2. :param session: Session instance connecting to database. :param ifname: Name of interface. :param ifaddr: IPv4 or IPv6 address. :return: Instance of record or "None" if failed.
384,534
def update(self, iterable): if iterable: return PBag(reduce(_add_to_counters, iterable, self._counts)) return self
Update bag with all elements in iterable. >>> s = pbag([1]) >>> s.update([1, 2]) pbag([1, 1, 2])
384,535
def getBlockParams(ws): dynBCParams = ws.get_dynamic_global_properties() ref_block_num = dynBCParams["head_block_number"] & 0xFFFF ref_block_prefix = struct.unpack_from( "<I", unhexlify(dynBCParams["head_block_id"]), 4 )[0] return ref_block_num, ref_block_prefix
Auxiliary method to obtain ``ref_block_num`` and ``ref_block_prefix``. Requires a websocket connection to a witness node!
384,536
def instance_default(self, obj): return self.property.themed_default(obj.__class__, self.name, obj.themed_values())
Get the default value that will be used for a specific instance. Args: obj (HasProps) : The instance to get the default value for. Returns: object
384,537
def predict(self, x, batch_size=None, verbose=None, is_distributed=False): if batch_size or verbose: raise Exception("we don't support batch_size or verbose for now") if is_distributed: if isinstance(x, np.ndarray): input = to_sample_rdd(x, np.zeros([x.shape[0]])) elif isinstance(x, RDD): input = x return self.bmodel.predict(input) else: if isinstance(x, np.ndarray): return self.bmodel.predict_local(x) raise Exception("not supported type: %s" % x)
Generates output predictions for the input samples, processing the samples in a batched way. # Arguments x: the input data, as a Numpy array or list of Numpy array for local mode. as RDD[Sample] for distributed mode is_distributed: used to control run in local or cluster. the default value is False # Returns A Numpy array or RDD[Sample] of predictions.
384,538
def parse_alignment_summary_metrics(fn): df = pd.read_table(fn, index_col=0, skiprows=range(6) + [10, 11]).T return df
Parse the output from Picard's CollectAlignmentSummaryMetrics and return as pandas Dataframe. Parameters ---------- filename : str of filename or file handle Filename of the Picard output you want to parse. Returns ------- df : pandas.DataFrame Data from output file.
384,539
def download_results(self, savedir=None, raw=True, calib=False, index=None): obsids = self.obsids if index is None else [self.obsids[index]] for obsid in obsids: pm = io.PathManager(obsid.img_id, savedir=savedir) pm.basepath.mkdir(exist_ok=True) to_download = [] if raw is True: to_download.extend(obsid.raw_urls) if calib is True: to_download.extend(obsid.calib_urls) for url in to_download: basename = Path(url).name print("Downloading", basename) store_path = str(pm.basepath / basename) try: urlretrieve(url, store_path) except Exception as e: urlretrieve(url.replace("https", "http"), store_path) return str(pm.basepath)
Download the previously found and stored Opus obsids. Parameters ========== savedir: str or pathlib.Path, optional If the database root folder as defined by the config.ini should not be used, provide a different savedir here. It will be handed to PathManager.
384,540
def get_or_create_exh_obj(full_cname=False, exclude=None, callables_fname=None): r if not hasattr(__builtin__, "_EXH"): set_exh_obj( ExHandle( full_cname=full_cname, exclude=exclude, callables_fname=callables_fname ) ) return get_exh_obj()
r""" Return global exception handler if set, otherwise create a new one and return it. :param full_cname: Flag that indicates whether fully qualified function/method/class property names are obtained for functions/methods/class properties that use the exception manager (True) or not (False). There is a performance penalty if the flag is True as the call stack needs to be traced. This argument is only relevant if the global exception handler is not set and a new one is created :type full_cname: boolean :param exclude: Module exclusion list. A particular callable in an otherwise fully qualified name is omitted if it belongs to a module in this list. If None all callables are included :type exclude: list of strings or None :param callables_fname: File name that contains traced modules information. File can be produced by either the :py:meth:`pexdoc.pinspect.Callables.save` or :py:meth:`pexdoc.ExHandle.save_callables` methods :type callables_fname: :ref:`FileNameExists` or None :rtype: :py:class:`pexdoc.ExHandle` :raises: * OSError (File *[callables_fname]* could not be found * RuntimeError (Argument \\`exclude\\` is not valid) * RuntimeError (Argument \\`callables_fname\\` is not valid) * RuntimeError (Argument \\`full_cname\\` is not valid)
384,541
def user_exists(self, username): path = "/users/{}".format(username) return self._get(path).ok
Returns whether a user with username ``username`` exists. :param str username: username of user :return: whether a user with the specified username exists :rtype: bool :raises NetworkFailure: if there is an error communicating with the server :return:
384,542
def p_configure_sentence(self, t): if len(t) == 3: t[0] = configure(t[2], reference=True, line=t.lineno(1)) else: t[0] = configure(t[2], t[5], line=t.lineno(1))
configure_sentence : CONFIGURE VAR | CONFIGURE VAR LPAREN RECIPE_BEGIN recipe RECIPE_END RPAREN
384,543
def get_cached(self): def id_in_list(list, id): if id: if [i for i in list if i.id == id]: return True else: raise PyPumpException("id %r not in feed." % self._since) tmp = [] if self._before is not None: if not id_in_list(self.feed._items, self._before): return tmp if isinstance(self._before, six.string_types): found = False for i in self.feed._items: if not found: if i.id == self._before: found = True continue else: tmp.append(i) self._before = False return tmp if self._since is not None: if not id_in_list(self.feed._items, self._since): return tmp if isinstance(self._since, six.string_types): found = False for i in self.feed._items: if i.id == self._since: found = True break else: tmp.append(i) self._since = False return reversed(tmp) if not hasattr(self, ): self.usedcache = True if isinstance(self._offset, int): return self.feed._items[self._offset:] return self.feed._items else: return tmp
Get items from feed cache while trying to emulate how API handles offset/since/before parameters
384,544
def _wrapper(self, q, start): try: func_name = self.fnc.__name__ except AttributeError: func_name = str(self.fnc) logger.debug("Running \"%s\" with parameters: \"%s\":\t%s/%s" % (func_name, str(self.kwargs), round(time.time() - start), self.timeout)) try: result = self.fnc(**self.kwargs) logger.debug("callback result = %s", str(result)[:50]) q.put(result) except self.expected_exceptions as ex: logger.debug("expected exception was caught: %s", ex) q.put(False) except Exception as ex: logger.debug("adding exception %s to queue", ex) q.put(ex)
_wrapper checks return status of Probe.fnc and provides the result for process managing :param q: Queue for function results :param start: Time of function run (used for logging) :return: Return value or Exception
384,545
def draw(self, tree, bar_desc=None, save_cursor=True, flush=True): if save_cursor: self.cursor.save() tree = deepcopy(tree) lines_required = self.lines_required(tree) ensure(lines_required <= self.cursor.term.height, LengthOverflowError, "Terminal is not long ({} rows) enough to fit all bars " "({} rows).".format(self.cursor.term.height, lines_required)) bar_desc = BarDescriptor(type=Bar) if not bar_desc else bar_desc self._calculate_values(tree, bar_desc) self._draw(tree) if flush: self.cursor.flush()
Draw ``tree`` to the terminal :type tree: dict :param tree: ``tree`` should be a tree representing a hierarchy; each key should be a string describing that hierarchy level and value should also be ``dict`` except for leaves which should be ``BarDescriptors``. See ``BarDescriptor`` for a tree example. :type bar_desc: BarDescriptor|NoneType :param bar_desc: For describing non-leaf bars in that will be drawn from ``tree``; certain attributes such as ``value`` and ``kwargs["max_value"]`` will of course be overridden if provided. :type flush: bool :param flush: If this is set, output written will be flushed :type save_cursor: bool :param save_cursor: If this is set, cursor location will be saved before drawing; this will OVERWRITE a previous save, so be sure to set this accordingly (to your needs).
384,546
def get_reports(): if False: pass else: rows = _Constants._DATABASE.execute("SELECT data FROM energy".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data)
Returns energy data from 1960 to 2014 across various factors.
384,547
def _execute(self, workdir, with_mpirun=False, exec_args=None): qadapter = self.manager.qadapter if not with_mpirun: qadapter.name = None if self.verbose: print("Working in:", workdir) script = qadapter.get_script_str( job_name=self.name, launch_dir=workdir, executable=self.executable, qout_path="qout_file.path", qerr_path="qerr_file.path", stdin=self.stdin_fname, stdout=self.stdout_fname, stderr=self.stderr_fname, exec_args=exec_args ) script_file = os.path.join(workdir, "run" + self.name + ".sh") with open(script_file, "w") as fh: fh.write(script) os.chmod(script_file, 0o740) qjob, process = qadapter.submit_to_queue(script_file) self.stdout_data, self.stderr_data = process.communicate() self.returncode = process.returncode return self.returncode
Execute the executable in a subprocess inside workdir. Some executables fail if we try to launch them with mpirun. Use with_mpirun=False to run the binary without it.
384,548
def _handleClassAttr(self): t when no classes are present on associated tag. TODO: I don if len(self.tag._classNames) > 0: dict.__setitem__(self, "class", self.tag.className) else: try: dict.__delitem__(self, "class") except: pass styleAttr = self.tag.style if styleAttr.isEmpty() is False: dict.__setitem__(self, "style", styleAttr) else: try: dict.__delitem__(self, "style") except: pass
_handleClassAttr - Hack to ensure "class" and "style" show up in attributes when classes are set, and doesn't when no classes are present on associated tag. TODO: I don't like this hack.
384,549
def commit(self) -> None: if len(self._transactions) == 0: raise RuntimeError("commit called outside transaction") if len(self._transactions) > 1: for on_rollback in reversed(self._transactions[-1]): self._transactions[-2].insert(0, on_rollback) _debug("commit") self.reset()
Attempt to commit all changes to LDAP database. i.e. forget all rollbacks. However stay inside transaction management.
384,550
def Many2ManyThroughModel(field): from stdnet.odm import ModelType, StdModel, ForeignKey, CompositeIdField name_model = field.model._meta.name name_relmodel = field.relmodel._meta.name if name_model == name_relmodel: name_relmodel += through = field.through if through is None: name = .format(name_model, name_relmodel) class Meta: app_label = field.model._meta.app_label through = ModelType(name, (StdModel,), {: Meta}) field.through = through field1 = ForeignKey(field.model, related_name=field.name, related_manager_class=makeMany2ManyRelatedManager( field.relmodel, name_model, name_relmodel) ) field1.register_with_model(name_model, through) field2 = ForeignKey(field.relmodel, related_name=field.related_name, related_manager_class=makeMany2ManyRelatedManager( field.model, name_relmodel, name_model) ) field2.register_with_model(name_relmodel, through) pk = CompositeIdField(name_model, name_relmodel) pk.register_with_model(, through)
Create a Many2Many through model with two foreign key fields and a CompositeFieldId depending on the two foreign keys.
384,551
def get_decomp_and_e_above_hull(self, entry, allow_negative=False): if entry in self.stable_entries: return {entry: 1}, 0 comp = entry.composition facet, simplex = self._get_facet_and_simplex(comp) decomp_amts = simplex.bary_coords(self.pd_coords(comp)) decomp = {self.qhull_entries[f]: amt for f, amt in zip(facet, decomp_amts) if abs(amt) > PhaseDiagram.numerical_tol} energies = [self.qhull_entries[i].energy_per_atom for i in facet] ehull = entry.energy_per_atom - np.dot(decomp_amts, energies) if allow_negative or ehull >= -PhaseDiagram.numerical_tol: return decomp, ehull raise ValueError("No valid decomp found!")
Provides the decomposition and energy above convex hull for an entry. Due to caching, can be much faster if entries with the same composition are processed together. Args: entry: A PDEntry like object allow_negative: Whether to allow negative e_above_hulls. Used to calculate equilibrium reaction energies. Defaults to False. Returns: (decomp, energy above convex hull) Stable entries should have energy above hull of 0. The decomposition is provided as a dict of {Entry: amount}.
384,552
def error(self, message, *args, **kwargs): self.system.error(message, *args, **kwargs)
Log error event. Compatible with logging.error signature.
384,553
def _get_dump_item_context(self, index, name, opts): c = { : index, : name, : name, : .join(opts[]), : , } if opts.get(, False): c[] = c.update(self.get_global_context()) return c
Return a formated dict context
384,554
def link(self): if not self.IsLink(): return location = getattr(self.path_spec, , None) if location is None: return return self._file_system.GetDataByPath(location)
str: full path of the linked file entry.
384,555
def set_motor_position(self, motor_name, position): self.call_remote_api(, self.get_object_handle(motor_name), position, sending=True)
Sets the motor target position.
384,556
def _get_possible_circular_ref_contigs(self, nucmer_hits, log_fh=None, log_outprefix=None): writing_log_file = None not in [log_fh, log_outprefix] maybe_circular = {} all_nucmer_hits = [] for l in nucmer_hits.values(): all_nucmer_hits.extend(l) nucmer_hits_by_qry = self._hits_hashed_by_query(all_nucmer_hits) for ref_name, list_of_hits in nucmer_hits.items(): if writing_log_file: print(log_outprefix, ref_name, + str(len(list_of_hits)) + , sep=, file=log_fh) longest_start_hit = self._get_longest_hit_at_ref_start(list_of_hits) longest_end_hit = self._get_longest_hit_at_ref_end(list_of_hits) if longest_start_hit == longest_end_hit: second_longest_start_hit = self._get_longest_hit_at_ref_start(list_of_hits, hits_to_exclude={longest_start_hit}) second_longest_end_hit = self._get_longest_hit_at_ref_end(list_of_hits, hits_to_exclude={longest_end_hit}) if second_longest_start_hit is not None: longest_start_hit = self._get_hit_nearest_ref_start([longest_start_hit, second_longest_start_hit]) if second_longest_end_hit is not None: longest_end_hit = self._get_hit_nearest_ref_end([longest_end_hit, second_longest_end_hit]) if ( longest_start_hit is not None and longest_end_hit is not None and longest_start_hit != longest_end_hit and self._hits_have_same_query(longest_start_hit, longest_end_hit) ): if writing_log_file: print(log_outprefix, ref_name, , sep=, file=log_fh) print(log_outprefix, ref_name, , longest_start_hit, sep=, file=log_fh) print(log_outprefix, ref_name, , longest_end_hit, sep=, file=log_fh) shortest_hit_length = self._min_qry_hit_length([longest_start_hit, longest_end_hit]) has_longer_hit = self._has_qry_hit_longer_than( nucmer_hits_by_qry[longest_start_hit.qry_name], shortest_hit_length, hits_to_exclude={longest_start_hit, longest_end_hit} ) if writing_log_file and has_longer_hit: print(log_outprefix, ref_name, , sep=, file=log_fh) can_circularise = self._can_circularise(longest_start_hit, longest_end_hit) if writing_log_file and not can_circularise: print(log_outprefix, ref_name, , sep=, file=log_fh) if (not has_longer_hit) and can_circularise: print(log_outprefix, ref_name, , sep=, file=log_fh) maybe_circular[ref_name] = (longest_start_hit, longest_end_hit) return maybe_circular
Returns a dict ref name => tuple(hit at start, hit at end) for each ref sequence in the hash nucmer_hits (each value is a list of nucmer hits)
384,557
def AD(frame, high_col=, low_col=, close_col=, vol_col=): return _frame_to_series(frame, [high_col, low_col, close_col, vol_col], talib.AD)
Chaikin A/D Line
384,558
def parse_navigation_html_to_tree(html, id): def xpath(x): return html.xpath(x, namespaces=HTML_DOCUMENT_NAMESPACES) try: value = xpath()[0] is_translucent = value == except IndexError: is_translucent = False if is_translucent: id = TRANSLUCENT_BINDER_ID tree = {: id, : xpath()[0], : [x for x in _nav_to_tree(xpath()[0])] } return tree
Parse the given ``html`` (an etree object) to a tree. The ``id`` is required in order to assign the top-level tree id value.
384,559
def system_listMethods(self): methods = set(self.funcs.keys()) if self.instance is not None: if hasattr(self.instance, ): methods |= set(self.instance._listMethods()) methods |= set(list_public_methods(self.instance)) return sorted(methods)
system.listMethods() => ['add', 'subtract', 'multiple'] Returns a list of the methods supported by the server.
384,560
def source_title_header_element(feature, parent): _ = feature, parent header = source_title_header[] return header.capitalize()
Retrieve source title header string from definitions.
384,561
def get_cache_item(self): t been set.Caching disabled in DEBUG modetemplate_cache_key'])
Gets the cached item. Raises AttributeError if it hasn't been set.
384,562
def as_obj(func): @wraps(func) def wrapper(*args, **kwargs): response = func(*args, **kwargs) return render_json_obj_with_requested_structure(response) return wrapper
A decorator used to return a JSON response with a dict representation of the model instance. It expects the decorated function to return a Model instance. It then converts the instance to dicts and serializes it into a json response Examples: >>> @app.route('/api/shipments/<id>') ... @as_obj ... def get_shipment(id): ... return Shipment.get(id)
384,563
def AllBalancesZeroOrLess(self): for key, fixed8 in self.Balances.items(): if fixed8.value > 0: return False return True
Flag indicating if all balances are 0 or less. Returns: bool: True if all balances are <= 0. False, otherwise.
384,564
def wait_for_lock(self, lockname, locktime=60, auto_renewal=False): pid = os.getpid() caller = inspect.stack()[0][3] try: rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal) except AssertionError: if self.logger: self.logger.error(.format(pid, caller, lockname, traceback.format_exc())) return False cont = 1 t0 = time.time() lock = None while not lock: time.sleep(.05) cont += 1 if cont % 20 == 0: if self.logger: self.logger.debug(.format(pid, caller, lockname, time.time() - t0)) try: lock = rl.acquire() except RedisError: pass if self.logger: self.logger.debug(.format(pid, caller, lockname, locktime)) return rl
Gets a lock or waits until it is able to get it
384,565
def _log_sum_sq(x, axis=None): return tf.reduce_logsumexp( input_tensor=2. * tf.math.log(tf.abs(x)), axis=axis)
Computes log(sum(x**2)).
384,566
def send(self, config, log, obs_id, beam_id): log.info() socket = self._ftp.transfercmd(.format(obs_id, beam_id)) socket.send(json.dumps(config).encode()) socket.send(bytearray(1000 * 1000)) config[][] = socket.send(json.dumps(config).encode()) socket.send(bytearray(1000 * 1000)) socket.close() log.info()
Send the pulsar data to the ftp server Args: config (dict): Dictionary of settings log (logging.Logger): Python logging object obs_id: observation id beam_id: beam id
384,567
def _find_plugin_dir(module_type): for install_dir in _get_plugin_install_dirs(): candidate = os.path.join(install_dir, module_type) if os.path.isdir(candidate): return candidate else: raise PluginCandidateError( .format( module_type, .join(_get_plugin_install_dirs())))
Find the directory containing the plugin definition for the given type. Do this by searching all the paths where plugins can live for a dir that matches the type name.
384,568
def read_vensim(mdl_file): from .py_backend.vensim.vensim2py import translate_vensim from .py_backend import functions py_model_file = translate_vensim(mdl_file) model = functions.Model(py_model_file) model.mdl_file = mdl_file return model
Construct a model from Vensim `.mdl` file. Parameters ---------- mdl_file : <string> The relative path filename for a raw Vensim `.mdl` file Returns ------- model: a PySD class object Elements from the python model are loaded into the PySD class and ready to run Examples -------- >>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl')
384,569
def _update_conda_devel(): conda_bin = _get_conda_bin() channels = _get_conda_channels(conda_bin) assert conda_bin, "Could not find anaconda distribution for upgrading bcbio" subprocess.check_call([conda_bin, "install", "--quiet", "--yes"] + channels + ["bcbio-nextgen>=%s" % version.__version__.replace("a0", "a")]) return os.path.dirname(os.path.dirname(conda_bin))
Update to the latest development conda package.
384,570
def from_json_str(cls, json_str): return cls.from_json(json.loads(json_str, cls=JsonDecoder))
Convert json string representation into class instance. Args: json_str: json representation as string. Returns: New instance of the class with data loaded from json string.
384,571
def allocate_IPv6( self, name, id_network_type, id_environment, description, id_environment_vip=None): vlan_map = dict() vlan_map[] = name vlan_map[] = id_network_type vlan_map[] = id_environment vlan_map[] = description vlan_map[] = id_environment_vip code, xml = self.submit({: vlan_map}, , ) return self.response(code, xml)
Inserts a new VLAN. :param name: Name of Vlan. String with a maximum of 50 characters. :param id_network_type: Identifier of the Netwok Type. Integer value and greater than zero. :param id_environment: Identifier of the Environment. Integer value and greater than zero. :param description: Description of Vlan. String with a maximum of 200 characters. :param id_environment_vip: Identifier of the Environment Vip. Integer value and greater than zero. :return: Following dictionary: :: {'vlan': {'id': < id_vlan >, 'nome': < nome_vlan >, 'num_vlan': < num_vlan >, 'id_tipo_rede': < id_tipo_rede >, 'id_ambiente': < id_ambiente >, 'bloco1': < bloco1 >, 'bloco2': < bloco2 >, 'bloco3': < bloco3 >, 'bloco4': < bloco4 >, 'bloco5': < bloco5 >, 'bloco6': < bloco6 >, 'bloco7': < bloco7 >, 'bloco8': < bloco8 >, 'bloco': < bloco >, 'mask_bloco1': < mask_bloco1 >, 'mask_bloco2': < mask_bloco2 >, 'mask_bloco3': < mask_bloco3 >, 'mask_bloco4': < mask_bloco4 >, 'mask_bloco5': < mask_bloco5 >, 'mask_bloco6': < mask_bloco6 >, 'mask_bloco7': < mask_bloco7 >, 'mask_bloco8': < mask_bloco8 >, 'descricao': < descricao >, 'acl_file_name': < acl_file_name >, 'acl_valida': < acl_valida >, 'acl_file_name_v6': < acl_file_name_v6 >, 'acl_valida_v6': < acl_valida_v6 >, 'ativada': < ativada >}} :raise VlanError: VLAN name already exists, VLAN name already exists, DC division of the environment invalid or does not exist VLAN number available. :raise VlanNaoExisteError: VLAN not found. :raise TipoRedeNaoExisteError: Network Type not registered. :raise AmbienteNaoExisteError: Environment not registered. :raise EnvironmentVipNotFoundError: Environment VIP not registered. :raise InvalidParameterError: Name of Vlan and/or the identifier of the Environment is null or invalid. :raise IPNaoDisponivelError: There is no network address is available to create the VLAN. :raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
384,572
def _parse_remote_response(self, response): try: if response.headers["Content-Type"] != : logger.warning(.format( response.headers["Content-Type"])) except KeyError: pass logger.debug("Loaded JWKS: %s from %s" % (response.text, self.source)) try: return json.loads(response.text) except ValueError: return None
Parse JWKS from the HTTP response. Should be overriden by subclasses for adding support of e.g. signed JWKS. :param response: HTTP response from the 'jwks_uri' endpoint :return: response parsed as JSON
384,573
def order_events(events, d=False): ordered_events = {} for event in events: try: for occ in event.occurrence: try: ordered_events[occ].append(event) except Exception: ordered_events[occ] = [event] except AttributeError: pass if d: return ordered_events else: return sorted(ordered_events.items())
Group events that occur on the same day, then sort them alphabetically by title, then sort by day. Returns a list of tuples that looks like [(day: [events])], where day is the day of the event(s), and [events] is an alphabetically sorted list of the events for the day.
384,574
def softDeactivate(rh): rh.printSysLog("Enter powerVM.softDeactivate, userid: " + rh.userid) strCmd = "echo " iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd) if iucvResults[] == 0: strCmd = "shutdown -h now" iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd) if iucvResults[] == 0: time.sleep(15) else: rh.printSysLog("powerVM.softDeactivate " + rh.userid + " is unreachable. Treating it as already shutdown.") else: rh.printSysLog("powerVM.softDeactivate " + rh.userid + " is unreachable. Treating it as already shutdown.") parms = ["-T", rh.userid] smcliResults = invokeSMCLI(rh, "Image_Deactivate", parms) if smcliResults[] == 0: pass elif (smcliResults[] == 8 and smcliResults[] == 200 and (smcliResults[] == 12 or + smcliResults[] == 16)): rh.printLn("N", rh.userid + " is already logged off.") else: rh.printLn("ES", smcliResults[]) rh.updateResults(smcliResults) if rh.results[] == 0 and in rh.parms: waitResults = waitForVMState( rh, rh.userid, , maxQueries=rh.parms[], sleepSecs=rh.parms[]) if waitResults[] == 0: rh.printLn("N", "Userid overallRCoverallRC']
Deactivate a virtual machine by first shutting down Linux and then log it off. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'SOFTOFF' userid - userid of the virtual machine parms['maxQueries'] - Maximum number of queries to issue. Optional. parms['maxWait'] - Maximum time to wait in seconds. Optional, unless 'maxQueries' is specified. parms['poll'] - Polling interval in seconds. Optional, unless 'maxQueries' is specified. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error
384,575
def _completion_move(self, p_step, p_size): current_position = self.completion_box.focus_position try: self.completion_box.set_focus(current_position + p_step) except IndexError: position = 0 if p_step > 0 else len(self.completion_box) - 1 self.completion_box.set_focus(position) maxcols, = p_size size = (maxcols, self.completion_box.height) self.completion_box.calculate_visible(size) candidate = self.completion_box.focus.original_widget.text self.insert_completion(candidate)
Visually selects completion specified by p_step (positive numbers forwards, negative numbers backwards) and inserts it into edit_text. If p_step results in value out of range of currently evaluated completion candidates, list is rewinded to the start (if cycling forwards) or to the end (if cycling backwards).
384,576
def optimize(thumbnail_file, jpg_command=None, png_command=None, gif_command=None): temp_dir = get_or_create_temp_dir() thumbnail_filename = os.path.join(temp_dir, "%s" % shortuuid.uuid()) f = open(thumbnail_filename, ) f.write(thumbnail_file.read()) f.close() filetype = imghdr.what(thumbnail_filename) command = None if filetype == "jpg" or filetype == "jpeg": command = jpg_command elif filetype == "png": command = png_command elif filetype == "gif": command = gif_command if command: command = command % {: thumbnail_filename} call(command, shell=True) optimized_file = File(open(thumbnail_filename, )) os.remove(thumbnail_filename) return optimized_file
A post processing function to optimize file size. Accepts commands to optimize JPG, PNG and GIF images as arguments. Example: THUMBNAILS = { # Other options... 'POST_PROCESSORS': [ { 'processor': 'thumbnails.post_processors.optimize', 'png_command': 'optipng -force -o3 "%(filename)s"', 'jpg_command': 'jpegoptim -f --strip-all "%(filename)s"', }, ], } Note: using output redirection in commands may cause unpredictable results. For example 'optipng -force -o3 "%(filename)s" &> /dev/null' may cause optimize command to fail on some systems.
384,577
def wait_any(futures, timeout=None): for fut in futures: if fut.complete: return fut wait = _Wait(futures) for fut in futures: fut._waits.add(wait) if wait.done.wait(timeout): raise errors.WaitTimeout() return wait.completed_future
Wait for the completion of any (the first) one of multiple futures :param list futures: A list of :class:`Future`\s :param timeout: The maximum time to wait. With ``None``, will block indefinitely. :type timeout: float or None :returns: One of the futures from the provided list -- the first one to become complete (or any of the ones that were already complete). :raises WaitTimeout: if a timeout is provided and hit
384,578
def on_queue_declareok(self, method_frame): logger.info(, exchange=self._exchange, queue=self._queue) self._channel.queue_bind(self.on_bindok, self._queue, self._exchange)
Method invoked by pika when the Queue.Declare RPC call made in setup_queue has completed. In this method we will bind the queue and exchange together with the routing key by issuing the Queue.Bind RPC command. When this command is complete, the on_bindok method will be invoked by pika. :param pika.frame.Method method_frame: The Queue.DeclareOk frame
384,579
def f_add_result_group(self, *args, **kwargs): return self._nn_interface._add_generic(self, type_name=RESULT_GROUP, group_type_name=RESULT_GROUP, args=args, kwargs=kwargs)
Adds an empty result group under the current node. Adds the full name of the current node as prefix to the name of the group. If current node is a single run (root) adds the prefix `'results.runs.run_08%d%'` to the full name where `'08%d'` is replaced by the index of the current run. The `name` can also contain subgroups separated via colons, for example: `name=subgroup1.subgroup2.subgroup3`. These other parent groups will be automatically be created.
384,580
def render_chart_to_file(self, template_name: str, chart: Any, path: str): tpl = self.env.get_template(template_name) html = tpl.render(chart=self.generate_js_link(chart)) write_utf8_html_file(path, self._reg_replace(html))
Render a chart or page to local html files. :param chart: A Chart or Page object :param path: The destination file which the html code write to :param template_name: The name of template file.
384,581
def _CreateIndexIfNotExists(self, index_name, mappings): try: if not self._client.indices.exists(index_name): self._client.indices.create( body={: mappings}, index=index_name) except elasticsearch.exceptions.ConnectionError as exception: raise RuntimeError( .format( exception))
Creates an Elasticsearch index if it does not exist. Args: index_name (str): mame of the index. mappings (dict[str, object]): mappings of the index. Raises: RuntimeError: if the Elasticsearch index cannot be created.
384,582
def get_client_cache_key(request_or_attempt: Union[HttpRequest, Any], credentials: dict = None) -> str: if isinstance(request_or_attempt, HttpRequest): username = get_client_username(request_or_attempt, credentials) ip_address = get_client_ip_address(request_or_attempt) user_agent = get_client_user_agent(request_or_attempt) else: username = request_or_attempt.username ip_address = request_or_attempt.ip_address user_agent = request_or_attempt.user_agent filter_kwargs = get_client_parameters(username, ip_address, user_agent) cache_key_components = .join(filter_kwargs.values()) cache_key_digest = md5(cache_key_components.encode()).hexdigest() cache_key = f return cache_key
Build cache key name from request or AccessAttempt object. :param request_or_attempt: HttpRequest or AccessAttempt object :param credentials: credentials containing user information :return cache_key: Hash key that is usable for Django cache backends
384,583
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ): if config_dir is None: config_dir = get_config_dir() tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir ) for key_info in recipient_key_infos: res = gpg_stash_key( "encrypt", key_info[], config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {: % key_info[]} try: sender_privkey = gpg_export_key( sender_key_info[], sender_key_info[], include_private=True, config_dir=config_dir ) except Exception, e: log.exception(e) shutil.rmtree(tmpdir) return {: } res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {: } recipient_key_ids = [r[] for r in recipient_key_infos] gpg = gnupg.GPG( homedir=tmpdir ) res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info[], passphrase=passphrase, output=path_out, always_trust=True ) shutil.rmtree(tmpdir) if res.status != : log.debug("encrypt_file error: %s" % res.__dict__) log.debug("recipients: %s" % recipient_key_ids) log.debug("signer: %s" % sender_key_info[]) return {: } return {: True}
Encrypt a stream of data for a set of keys. @sender_key_info should be a dict with { 'key_id': ... 'key_data': ... 'app_name'; ... } Return {'status': True} on success Return {'error': ...} on error
384,584
def _wordAfterCursor(self): cursor = self._qpart.textCursor() textAfterCursor = cursor.block().text()[cursor.positionInBlock():] match = _wordAtStartRegExp.search(textAfterCursor) if match: return match.group(0) else: return
Get word, which is located before cursor
384,585
def enable_reporting(self): if self.status == Stats.ENABLED: return if not self.enableable: logger.critical("Can't enable reporting") return self.status = Stats.ENABLED self.write_config(self.status)
Call this method to explicitly enable reporting. The current report will be uploaded, plus the previously recorded ones, and the configuration will be updated so that future runs also upload automatically.
384,586
def get_content(self, url, params=None, limit=0, place_holder=None, root_field=, thing_field=, after_field=, object_filter=None, **kwargs): _use_oauth = kwargs.get(, self.is_oauth_session()) objects_found = 0 params = params or {} fetch_all = fetch_once = False if limit is None: fetch_all = True params[] = 1024 elif limit > 0: params[] = limit else: fetch_once = True if hasattr(self, ): url = self._url_update(url) while fetch_once or fetch_all or objects_found < limit: if _use_oauth: assert self._use_oauth is False self._use_oauth = _use_oauth try: page_data = self.request_json(url, params=params) if object_filter: page_data = page_data[object_filter] finally: if _use_oauth: self._use_oauth = False fetch_once = False root = page_data.get(root_field, page_data) for thing in root[thing_field]: yield thing objects_found += 1 else: return
A generator method to return reddit content from a URL. Starts at the initial url, and fetches content using the `after` JSON data until `limit` entries have been fetched, or the `place_holder` has been reached. :param url: the url to start fetching content from :param params: dictionary containing extra GET data to put in the url :param limit: the number of content entries to fetch. If limit <= 0, fetch the default for your account (25 for unauthenticated users). If limit is None, then fetch as many entries as possible (reddit returns at most 100 per request, however, PRAW will automatically make additional requests as necessary). :param place_holder: if not None, the method will fetch `limit` content, stopping if it finds content with `id` equal to `place_holder`. The place_holder item is the last item to be yielded from this generator. Note that the use of `place_holder` is not 100% reliable as the place holder item may no longer exist due to being removed or deleted. :param root_field: indicates the field in the json response that holds the data. Most objects use 'data', however some (flairlist) don't have the 'data' object. Use None for the root object. :param thing_field: indicates the field under the root_field which contains the list of things. Most objects use 'children'. :param after_field: indicates the field which holds the after item element :param object_filter: if set to an integer value, fetch content from the corresponding list index in the JSON response. For example the JSON response for submission duplicates is a list of objects, and the object we want to fetch from is at index 1. So we set object_filter=1 to filter out the other useless list elements. :type place_holder: a string corresponding to a reddit base36 id without prefix, e.g. 'asdfasdf' :returns: a list of reddit content, of type Subreddit, Comment, Submission or user flair.
384,587
def delete_split(self, split_name): if self.has_split(split_name): shutil.rmtree(os.path.join(self.split_dir, split_name))
Delete a split of the dataset. Parameters ---------- split_name : str name of the split to delete
384,588
def readline(self, prompt=, use_raw=None): line = self.input.readline() if not line: raise EOFError return line.rstrip("\n")
Read a line of input. Prompt and use_raw exist to be compatible with other input routines and are ignored. EOFError will be raised on EOF.
384,589
def ReadFile(self, filename): with io.open(filename, , encoding=) as file_object: for artifact_definition in self.ReadFileObject(file_object): yield artifact_definition
Reads artifact definitions from a file. Args: filename (str): name of the file to read from. Yields: ArtifactDefinition: an artifact definition.
384,590
def add_to_message(data, indent_level=0) -> list: message = [] if isinstance(data, str): message.append(indent( dedent(data.strip()).strip(), indent_level * )) return message for line in data: offset = 0 if isinstance(line, str) else 1 message += add_to_message(line, indent_level + offset) return message
Adds data to the message object
384,591
def saml_name_id_format_to_hash_type(name_format): msg = "saml_name_id_format_to_hash_type is deprecated and will be removed." _warnings.warn(msg, DeprecationWarning) name_id_format_to_hash_type = { NAMEID_FORMAT_TRANSIENT: UserIdHashType.transient, NAMEID_FORMAT_PERSISTENT: UserIdHashType.persistent, NAMEID_FORMAT_EMAILADDRESS: UserIdHashType.emailaddress, NAMEID_FORMAT_UNSPECIFIED: UserIdHashType.unspecified, } return name_id_format_to_hash_type.get( name_format, UserIdHashType.transient )
Translate pySAML2 name format to satosa format :type name_format: str :rtype: satosa.internal_data.UserIdHashType :param name_format: SAML2 name format :return: satosa format
384,592
def grant_user_access(self, user, db_names, strict=True): return self._user_manager.grant_user_access(user, db_names, strict=strict)
Gives access to the databases listed in `db_names` to the user.
384,593
def get_exchanges(self, vhost=None): if vhost: vhost = quote(vhost, ) path = Client.urls[] % vhost else: path = Client.urls[] exchanges = self._call(path, ) return exchanges
:returns: A list of dicts :param string vhost: A vhost to query for exchanges, or None (default), which triggers a query for all exchanges in all vhosts.
384,594
async def rollback(self): if not self._parent._is_active: return await self._do_rollback() self._is_active = False
Roll back this transaction.
384,595
def _get_initial_args(objective_function, initial_population, initial_position, population_size, population_stddev, max_iterations, func_tolerance, position_tolerance, differential_weight, crossover_prob, seed): was_iterable = False if initial_position is not None: initial_position, was_iterable = _ensure_list(initial_position) if initial_population is not None: initial_population, was_iterable = _ensure_list(initial_population) population = _get_starting_population(initial_population, initial_position, population_size, population_stddev, seed=seed) differential_weight = tf.convert_to_tensor( value=differential_weight, dtype=population[0].dtype.base_dtype) crossover_prob = tf.convert_to_tensor(value=crossover_prob) population_values = objective_function(*population) if max_iterations is not None: max_iterations = tf.convert_to_tensor(value=max_iterations) func_tolerance = tf.convert_to_tensor( value=func_tolerance, dtype=population_values.dtype.base_dtype) position_tolerance = tf.convert_to_tensor( value=position_tolerance, dtype=population[0].dtype.base_dtype) return (was_iterable, population, population_values, max_iterations, func_tolerance, position_tolerance, differential_weight, crossover_prob)
Processes initial args.
384,596
def _generate_ffmpeg_cmd( self, cmd: List[str], input_source: Optional[str], output: Optional[str], extra_cmd: Optional[str] = None, ) -> None: self._argv = [self._ffmpeg] if input_source is not None: self._put_input(input_source) self._argv.extend(cmd) if extra_cmd is not None: self._argv.extend(shlex.split(extra_cmd)) self._merge_filters() self._put_output(output)
Generate ffmpeg command line.
384,597
def sc_cuts_alg(self, viewer, event, msg=True): if self.cancut: direction = self.get_direction(event.direction) self._cycle_cuts_alg(viewer, msg, direction=direction) return True
Adjust cuts algorithm interactively.
384,598
def end_experience_collection_timer(self): if self.time_start_experience_collection: curr_delta = time() - self.time_start_experience_collection if self.delta_last_experience_collection is None: self.delta_last_experience_collection = curr_delta else: self.delta_last_experience_collection += curr_delta self.time_start_experience_collection = None
Inform Metrics class that experience collection is done.
384,599
def register_hooked(self, hooks, func, args_gen=None ): if self.hooked is None: self.hooked = {} if args_gen is None: args_gen = getattr(func, "call_types", {}).keys if not isinstance(hooks, Sequence): hooks = [hooks] for hook_cls in hooks: self.hooked[hook_cls] = (func, args_gen)
Register func to be run when any of the hooks are run by parent Args: hooks: A Hook class or list of Hook classes of interest func: The callable that should be run on that Hook args_gen: Optionally specify the argument names that should be passed to func. If not given then use func.call_types.keys