Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
15,300
def make_data(self, message): if not isinstance(message, Message): return message return message.export(self.transport_content_type)
make data string from message according to transport_content_type Returns: str: message data
15,301
def iyang(imgIn, krnl, imgSeg, Cnt, itr=5): dim = imgIn.shape m = np.int32(np.max(imgSeg)) m_a = np.zeros(( m+1, itr ), dtype=np.float32) for jr in range(0,m+1): m_a[jr, 0] = np.mean( imgIn[imgSeg==jr] ) imgOut = np.copy(imgIn) for i in range(0, itr): if Cnt[]: print , i imgPWC = imgOut imgPWC[imgPWC<0] = 0 for jr in range(0,m+1): imgPWC[imgSeg==jr] = np.mean( imgPWC[imgSeg==jr] ) if in Cnt and in Cnt[]: imin_d = np.transpose(imgPWC, (1, 2, 0)) imout_d = np.zeros(imin_d.shape, dtype=np.float32) improc.convolve(imout_d, imin_d, krnl, Cnt) imgSmo = np.transpose(imout_d, (2,0,1)) else: hxy = np.outer(krnl[1,:], krnl[2,:]) hxyz = np.multiply.outer(krnl[0,:], hxy) imgSmo = ndi.convolve(imgPWC, hxyz, mode=, cval=0.) imgCrr = np.ones(dim, dtype=np.float32) imgCrr[imgSmo>0] = imgPWC[imgSmo>0] / imgSmo[imgSmo>0] imgOut = imgIn * imgCrr; for jr in range(0,m+1): m_a[jr, i] = np.mean( imgOut[imgSeg==jr] ) return imgOut, m_a
partial volume correction using iterative Yang method imgIn: input image which is blurred due to the PSF of the scanner krnl: shift invariant kernel of the PSF imgSeg: segmentation into regions starting with 0 (e.g., background) and then next integer numbers itr: number of iteration (default 5)
15,302
def convert_references_json(ref_content, soup=None): "Check for references that will not pass schema validation, fix or convert them to unknown" if ( (ref_content.get("type") == "other") or (ref_content.get("type") == "book-chapter" and "editors" not in ref_content) or (ref_content.get("type") == "journal" and "articleTitle" not in ref_content) or (ref_content.get("type") in ["journal", "book-chapter"] and not "pages" in ref_content) or (ref_content.get("type") == "journal" and "journal" not in ref_content) or (ref_content.get("type") in ["book", "book-chapter", "report", "thesis", "software"] and "publisher" not in ref_content) or (ref_content.get("type") == "book" and "bookTitle" not in ref_content) or (ref_content.get("type") == "data" and "source" not in ref_content) or (ref_content.get("type") == "conference-proceeding" and "conference" not in ref_content) ): ref_content = references_json_to_unknown(ref_content, soup) return ref_content
Check for references that will not pass schema validation, fix or convert them to unknown
15,303
def post(self, request, bot_id, format=None): return super(StateList, self).post(request, bot_id, format)
Add a new state --- serializer: StateSerializer responseMessages: - code: 401 message: Not authenticated - code: 400 message: Not valid request
15,304
def _cleanup_pods(namespace, labels): api = kubernetes.client.CoreV1Api() pods = api.list_namespaced_pod(namespace, label_selector=format_labels(labels)) for pod in pods.items: try: api.delete_namespaced_pod(pod.metadata.name, namespace) logger.info(, pod.metadata.name) except kubernetes.client.rest.ApiException as e: if e.status != 404: raise
Remove all pods with these labels in this namespace
15,305
def update_node_attributes(self, attributes_flags=int(Qt.ItemIsSelectable | Qt.ItemIsEnabled)): self.traced.value = foundations.trace.is_traced(self.__module) self.traced.roles[Qt.DisplayRole] = foundations.strings.to_string(self.traced.value).title()
Updates the Node attributes. :param attributes_flags: Attributes flags. :type attributes_flags: int :return: Method success. :rtype: bool
15,306
def run(self): case = self.case baseMVA = case.base_mva buses = self.case.connected_buses branches = case.online_branches generators = case.online_generators meas = self.measurements self.case.index_buses() self.case.index_branches() pv = [b._i for b in buses if b.type == PV] pq = [b._i for b in buses if b.type == PQ] Ybus, Yf, Yt = case.Y V0 = self.getV0(self.v_mag_guess, buses, generators) t0 = time() converged = False i = 0 V = V0 Va = angle(V0) Vm = abs(V0) nb = Ybus.shape[0] f = [b.from_bus._i for b in branches] t = [b.to_bus._i for b in branches] nonref = pv + pq z = array([m.value for m in meas]) idx_zPf = [m.b_or_l._i for m in meas if m.type == PF] idx_zPt = [m.b_or_l._i for m in meas if m.type == PT] idx_zQf = [m.b_or_l._i for m in meas if m.type == QF] idx_zQt = [m.b_or_l._i for m in meas if m.type == QT] idx_zPg = [m.b_or_l._i for m in meas if m.type == PG] idx_zQg = [m.b_or_l._i for m in meas if m.type == QG] idx_zVm = [m.b_or_l._i for m in meas if m.type == VM] idx_zVa = [m.b_or_l._i for m in meas if m.type == VA] def col(seq): return [[k] for k in seq] sigma_vector = r_[ self.sigma[0] * ones(len(idx_zPf)), self.sigma[1] * ones(len(idx_zPt)), self.sigma[2] * ones(len(idx_zQf)), self.sigma[3] * ones(len(idx_zQt)), self.sigma[4] * ones(len(idx_zPg)), self.sigma[5] * ones(len(idx_zQg)), self.sigma[6] * ones(len(idx_zVm)), self.sigma[7] * ones(len(idx_zVa)) ] sigma_squared = sigma_vector**2 rsig = range(len(sigma_squared)) Rinv = csr_matrix((1.0 / sigma_squared, (rsig, rsig))) while (not converged) and (i < self.max_iter): i += 1 Sfe = V[f] * conj(Yf * V) Ste = V[t] * conj(Yt * V) gbus = [g.bus._i for g in generators] Sgbus = V[gbus] * conj(Ybus[gbus, :] * V) Sd = array([complex(b.p_demand, b.q_demand) for b in buses]) Sgen = (Sgbus * baseMVA + Sd) / baseMVA z_est = r_[ Sfe[idx_zPf].real, Ste[idx_zPt].real, Sfe[idx_zQf].imag, Ste[idx_zQt].imag, Sgen[idx_zPg].real, Sgen[idx_zQg].imag, abs(V[idx_zVm]), angle(V[idx_zVa]) ] dSbus_dVm, dSbus_dVa = case.dSbus_dV(Ybus, V) dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, _, _ = case.dSbr_dV(Yf, Yt,V) dPF_dVa = dSf_dVa.real dQF_dVa = dSf_dVa.imag dPF_dVm = dSf_dVm.real dQF_dVm = dSf_dVm.imag dPT_dVa = dSt_dVa.real dQT_dVa = dSt_dVa.imag dPT_dVm = dSt_dVm.real dQT_dVm = dSt_dVm.imag dPG_dVa = dSbus_dVa[gbus, :].real dQG_dVa = dSbus_dVa[gbus, :].imag dPG_dVm = dSbus_dVm[gbus, :].real dQG_dVm = dSbus_dVm[gbus, :].imag dVa_dVa = csr_matrix((ones(nb), (range(nb), range(nb)))) dVa_dVm = csr_matrix((nb, nb)) dVm_dVa = csr_matrix((nb, nb)) dVm_dVm = csr_matrix((ones(nb), (range(nb), range(nb)))) h = [(col(idx_zPf), dPF_dVa, dPF_dVm), (col(idx_zQf), dQF_dVa, dQF_dVm), (col(idx_zPt), dPT_dVa, dPT_dVm), (col(idx_zQt), dQT_dVa, dQT_dVm), (col(idx_zPg), dPG_dVa, dPG_dVm), (col(idx_zQg), dQG_dVa, dQG_dVm), (col(idx_zVm), dVm_dVa, dVm_dVm), (col(idx_zVa), dVa_dVa, dVa_dVm)] H = vstack([hstack([dVa[idx, nonref], dVm[idx, nonref]]) for idx, dVa, dVm in h if len(idx) > 0 ]) J = H.T * Rinv * H F = H.T * Rinv * (z - z_est) dx = spsolve(J, F) normF = linalg.norm(F, Inf) if self.verbose: logger.info("Iteration [%d]: Norm of mismatch: %.3f" % (i, normF)) if normF < self.tolerance: converged = True npvpq = len(nonref) Va[nonref] = Va[nonref] + dx[:npvpq] Vm[nonref] = Vm[nonref] + dx[npvpq:2 * npvpq] V = Vm * exp(1j * Va) Va = angle(V) Vm = abs(V) error_sqrsum = sum((z - z_est)**2 / sigma_squared) case.pf_solution(Ybus, Yf, Yt, V) elapsed = time() - t0 if self.verbose and converged: print "State estimation converged in: %.3fs (%d iterations)" % \ (elapsed, i) solution = {"V": V, "converged": converged, "iterations": i, "z": z, "z_est": z_est, "error_sqrsum": error_sqrsum, "elapsed": elapsed} return solution
Solves a state estimation problem.
15,307
def check_error(self, response, status, err_cd): " Check an error in the response." if not in response: return False if response[] != status: return False if not in response: return False if not isinstance(response[], list): return False for msg in response[]: if in msg and msg[] != : continue if in msg and msg[] == err_cd: return True return False
Check an error in the response.
15,308
def link(url, text=, classes=, target=, get="", **kwargs): if not (url.startswith() or url.startswith()): urlargs = {} for arg, val in kwargs.items(): if arg[:4] == "url_": urlargs[arg[4:]] = val url = reverse(url, kwargs=urlargs) if get: url += + get return html.tag(, text or url, { : classes, : target, : url})
Output a link tag.
15,309
def newline(self): self.carriage_return() if self._cy + (2 * self._ch) >= self._device.height: copy = self._backing_image.crop((0, self._ch, self._device.width, self._device.height)) self._backing_image.paste(copy, (0, 0)) self._canvas.rectangle((0, copy.height, self._device.width, self._device.height), fill=self.default_bgcolor) else: self._cy += self._ch self.flush() if self.animate: time.sleep(0.2)
Advances the cursor position ot the left hand side, and to the next line. If the cursor is on the lowest line, the displayed contents are scrolled, causing the top line to be lost.
15,310
def add_secondary_ip(self, ip_address, interface=1): log = logging.getLogger(self.cls_logger + ) eni_id = self.get_eni_id(interface) if eni_id is None: msg = . \ format(i=interface) log.error(msg) raise EC2UtilError(msg) else: log.info(.format(e=eni_id)) log.info() try: self.client.assign_private_ip_addresses( NetworkInterfaceId=eni_id, PrivateIpAddresses=[ ip_address, ], AllowReassignment=True ) except ClientError: _, ex, trace = sys.exc_info() msg = .format(e=str(ex)) log.error(msg) raise AWSAPIError, msg, trace log.info(.format( s=ip_address, e=eni_id, i=interface))
Adds an IP address as a secondary IP address :param ip_address: String IP address to add as a secondary IP :param interface: Integer associated to the interface/device number :return: None :raises: AWSAPIError, EC2UtilError
15,311
def _systemd_notify_once(): notify_socket = os.getenv() if notify_socket: if notify_socket.startswith(): notify_socket = % notify_socket[1:] sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) with contextlib.closing(sock): try: sock.connect(notify_socket) sock.sendall(b) del os.environ[] except EnvironmentError: LOG.debug("Systemd notification failed", exc_info=True)
Send notification once to Systemd that service is ready. Systemd sets NOTIFY_SOCKET environment variable with the name of the socket listening for notifications from services. This method removes the NOTIFY_SOCKET environment variable to ensure notification is sent only once.
15,312
def gateways_info(): data = netifaces.gateways() results = {: {}} with suppress(KeyError): results[] = data[netifaces.AF_INET] results[][] = data[][netifaces.AF_INET] with suppress(KeyError): results[] = data[netifaces.AF_INET6] results[][] = data[][netifaces.AF_INET6] return results
Returns gateways data.
15,313
def _set_visible(self, visibility, grid_index=None): if grid_index is None: for ax in self.flat_grid: ax.set_visible(visibility) else: if grid_index < 0 or grid_index >= len(self.grids): raise IndexError(.format(len(self.grids) - 1)) for ax in self.grids[grid_index]: ax.set_visible(visibility)
Sets the visibility property of all axes.
15,314
def _has_y(self, kwargs): return (( in kwargs) or (self._element_y in kwargs) or (self._type == 3 and self._element_1my in kwargs))
Returns True if y is explicitly defined in kwargs
15,315
def save_image(image, filename=None): local_name = filename or .format(image) cli.save_image(image, local_name)
Saves a Docker image from the remote to a local files. For performance reasons, uses the Docker command line client on the host, generates a gzip-tarball and downloads that. :param image: Image name or id. :type image: unicode :param filename: File name to store the local file. If not provided, will use ``<image>.tar.gz`` in the current working directory. :type filename: unicode
15,316
def body(quantity=2, separator=, wrap_start=, wrap_end=, html=False, sentences_quantity=3, as_list=False): return lorem_ipsum.paragraphs(quantity=quantity, separator=separator, wrap_start=wrap_start, wrap_end=wrap_end, html=html, sentences_quantity=sentences_quantity, as_list=as_list)
Return a random email text.
15,317
def select_many_with_index( self, collection_selector=IndexedElement, result_selector=lambda source_element, collection_element: collection_element): if self.closed(): raise ValueError("Attempt to call select_many_with_index() on a " "closed Queryable.") if not is_callable(collection_selector): raise TypeError("select_many_with_index() parameter " "projector={0} is not callable".format(repr(collection_selector))) if not is_callable(result_selector): raise TypeError("select_many_with_index() parameter " "selector={0} is not callable".format(repr(result_selector))) return self._create( self._generate_select_many_with_index(collection_selector, result_selector))
Projects each element of a sequence to an intermediate new sequence, incorporating the index of the element, flattens the resulting sequence into one sequence and optionally transforms the flattened sequence using a selector function. Note: This method uses deferred execution. Args: collection_selector: A binary function mapping each element of the source sequence into an intermediate sequence, by incorporating its index in the source sequence. The two positional arguments to the function are the zero-based index of the source element and the value of the element. The result of the function should be an iterable derived from the index and element value. If no collection_selector is provided, the elements of the intermediate sequence will consist of tuples of (index, element) from the source sequence. result_selector: An optional binary function mapping the elements in the flattened intermediate sequence together with their corresponding source elements to elements of the result sequence. The two positional arguments of the result_selector are, first the source element corresponding to an element from the intermediate sequence, and second the actual element from the intermediate sequence. The return value should be the corresponding value in the result sequence. If no result_selector function is provided, the elements of the flattened intermediate sequence are returned untransformed. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence which incorporates both the index and value of the source element, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If projector [and selector] are not callable.
15,318
def tag_labels(self): if not self.is_tagged(ANALYSIS): self.tag_analysis() if self.__ner_tagger is None: self.__ner_tagger = load_default_ner_tagger() self.__ner_tagger.tag_document(self) return self
Tag named entity labels in the ``words`` layer.
15,319
def stats_set_value(self, key, value=1): if not self._measurement: if not self.IGNORE_OOB_STATS: self.logger.warning( ) return self._measurement.set_value(key, value)
Set the specified key/value in the per-message measurements .. versionadded:: 3.13.0 .. note:: If this method is called when there is not a message being processed, a message will be logged at the ``warning`` level to indicate the value is being dropped. To suppress these warnings, set the :attr:`rejected.consumer.Consumer.IGNORE_OOB_STATS` attribute to :data:`True`. :param key: The key to set the value for :type key: :class:`str` :param value: The value :type value: :class:`int` or :class:`float`
15,320
def save_to_file(self, file_path, labels=None, predict_proba=True, show_predicted_value=True, **kwargs): file_ = open(file_path, , encoding=) file_.write(self.as_html(labels=labels, predict_proba=predict_proba, show_predicted_value=show_predicted_value, **kwargs)) file_.close()
Saves html explanation to file. . Params: file_path: file to save explanations to See as_html() for additional parameters.
15,321
def prepare_transaction(*, operation=, signers=None, recipients=None, asset=None, metadata=None, inputs=None): operation = _normalize_operation(operation) return _prepare_transaction( operation, signers=signers, recipients=recipients, asset=asset, metadata=metadata, inputs=inputs, )
Prepares a transaction payload, ready to be fulfilled. Depending on the value of ``operation``, simply dispatches to either :func:`~.prepare_create_transaction` or :func:`~.prepare_transfer_transaction`. Args: operation (str): The operation to perform. Must be ``'CREATE'`` or ``'TRANSFER'``. Case insensitive. Defaults to ``'CREATE'``. signers (:obj:`list` | :obj:`tuple` | :obj:`str`, optional): One or more public keys representing the issuer(s) of the asset being created. Only applies for ``'CREATE'`` operations. Defaults to ``None``. recipients (:obj:`list` | :obj:`tuple` | :obj:`str`, optional): One or more public keys representing the new recipients(s) of the asset being created or transferred. Defaults to ``None``. asset (:obj:`dict`, optional): The asset to be created or transferred. MUST be supplied for ``'TRANSFER'`` operations. Defaults to ``None``. metadata (:obj:`dict`, optional): Metadata associated with the transaction. Defaults to ``None``. inputs (:obj:`dict` | :obj:`list` | :obj:`tuple`, optional): One or more inputs holding the condition(s) that this transaction intends to fulfill. Each input is expected to be a :obj:`dict`. Only applies to, and MUST be supplied for, ``'TRANSFER'`` operations. Returns: dict: The prepared transaction. Raises: :class:`~.exceptions.BigchaindbException`: If ``operation`` is not ``'CREATE'`` or ``'TRANSFER'``. .. important:: **CREATE operations** * ``signers`` MUST be set. * ``recipients``, ``asset``, and ``metadata`` MAY be set. * If ``asset`` is set, it MUST be in the form of:: { 'data': { ... } } * The argument ``inputs`` is ignored. * If ``recipients`` is not given, or evaluates to ``False``, it will be set equal to ``signers``:: if not recipients: recipients = signers **TRANSFER operations** * ``recipients``, ``asset``, and ``inputs`` MUST be set. * ``asset`` MUST be in the form of:: { 'id': '<Asset ID (i.e. TX ID of its CREATE transaction)>' } * ``metadata`` MAY be set. * The argument ``signers`` is ignored.
15,322
def read_line(self, line): if self.ignore: return for i, char in enumerate(line): if char not in [, "\\': continue if self.single == char: self.single = None continue if self.single is not None: continue if not self.python: continue if self.triple == char: if line[i - 2:i + 1] == 3 * char: self.triple = None continue if self.triple is not None: continue if line[i - 2:i + 1] == 3 * char: self.triple = char continue self.single = char if self.python: self.single = None
Read a new line
15,323
def translate_wp_comment(self, e): comment_dict = {} comment_dict[] = e.find().text comment_dict[] = e.find().text comment_dict[] = e.find().text comment_dict[] = e.find().text comment_dict[] = "approved" if comment_dict[] == "1" else "rejected" comment_dict[] = e.find().text comment_dict[] = e.find().text comment_dict[] = time.strptime(comment_dict[], ) comment_dict[] = time.strftime(, comment_dict[]) return comment_dict
<wp:comment> <wp:comment_id>1234</wp:comment_id> <wp:comment_author><![CDATA[John Doe]]></wp:comment_author> <wp:comment_author_email><![CDATA[[email protected]]]></wp:comment_author_email> <wp:comment_author_url>http://myhomepage.com/</wp:comment_author_url> <wp:comment_author_IP><![CDATA[12.123.123.123]]></wp:comment_author_IP> <wp:comment_date><![CDATA[2008-09-25 14:24:51]]></wp:comment_date> <wp:comment_date_gmt><![CDATA[2008-09-25 13:24:51]]></wp:comment_date_gmt> <wp:comment_content><![CDATA[Hey dude :)]]></wp:comment_content> <wp:comment_approved><![CDATA[1]]></wp:comment_approved> <wp:comment_type><![CDATA[]]></wp:comment_type> <wp:comment_parent>0</wp:comment_parent> <wp:comment_user_id>0</wp:comment_user_id> </wp:comment>
15,324
def charts_slug_get(self, slug, **kwargs): kwargs[] = True if kwargs.get(): return self.charts_slug_get_with_http_info(slug, **kwargs) else: (data) = self.charts_slug_get_with_http_info(slug, **kwargs) return data
Chart A Chart is chosen by Pollster editors. One example is \"Obama job approval - Democrats\". It is always based upon a single Question. Users should strongly consider basing their analysis on Questions instead. Charts are derived data; Pollster editors publish them and change them as editorial priorities change. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.charts_slug_get(slug, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str slug: Unique identifier for a Chart (required) :return: Chart If the method is called asynchronously, returns the request thread.
15,325
def match(cls, event): if not in event: return False if not in event[]: return False k = event[][] if k in cls.trail_events: v = dict(cls.trail_events[k]) if isinstance(v[], six.string_types): v[] = e = jmespath.compile( % v[]) cls.trail_events[k][] = e return v return False
Match a given cwe event as cloudtrail with an api call That has its information filled out.
15,326
def is_rfc822(self) -> bool: ct_hdr = self.header.parsed.content_type if ct_hdr is None: return False else: return ct_hdr.content_type ==
True if the content-type of the message is ``message/rfc822``.
15,327
def register_layouts(layouts, app, url="/api/props/", brand="Pyxley"): def props(name): if name not in layouts: name = list(layouts.keys())[0] return jsonify({"layouts": layouts[name]["layout"]}) def apps(): paths = [] for i, k in enumerate(layouts.keys()): if i == 0: paths.append({ "path": "/", "label": layouts[k].get("title", k) }) paths.append({ "path": "/"+k, "label": layouts[k].get("title", k) }) return jsonify({"brand": brand, "navlinks": paths}) app.add_url_rule(url+"<string:name>/", view_func=props) app.add_url_rule(url, view_func=apps)
register UILayout with the flask app create a function that will send props for each UILayout Args: layouts (dict): dict of UILayout objects by name app (object): flask app url (string): address of props; default is /api/props/
15,328
def register_preset(cls, name, preset): if cls._presets is None: cls._presets = {} cls._presets[name] = preset
Register a preset instance with the class of the hub it corresponds to. This allows individual plugin objects to automatically register themselves with a preset by using a classmethod of their own with only the name of the preset to register with.
15,329
def get_thread_info(self, enforce_re=True, latest_date=None): result = [] my_re = re.compile(self.topic_re) url = % (self.base_url) latest_date = self.parse_date(latest_date) if latest_date else None while url: kwargs = {} if not self.gh_info.user else {: ( self.gh_info.user, self.gh_info.token)} my_req = requests.get(url, params=self.params, **kwargs) my_json = my_req.json() for item in my_json: if (not enforce_re) or my_re.search(item[]): idate = self.parse_date(item[]) if (latest_date is not None and idate > latest_date): logging.debug(, item[], idate, latest_date) continue result.append(item) if self.max_threads is not None and len( result) >= self.max_threads: logging.debug(, len(result)) return result url = None if in my_req.headers: link = my_req.headers[].split() for thing in link: potential_url, part = thing.split() if part == : url = potential_url.lstrip().rstrip() return result
Return a json list with information about threads in the group. :param enforce_re=True: Whether to require titles to match regexp in self.topic_re. :param latest_date=None: Optional datetime.datetime for latest date to consider. Things past this are ignored. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- :return: List of github items found. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- PURPOSE: Return a json list with information about threads in the group. Along with latest_date, this can be used to show issues.
15,330
def _logpdf(self, **kwargs): for p in self._params: if p not in kwargs.keys(): raise ValueError( .format(p)) if kwargs in self: log_pdf = self._lognorm + \ (self.dim - 1) * \ numpy.log([kwargs[p] for p in self._params]).sum() return log_pdf else: return -numpy.inf
Returns the log of the pdf at the given values. The keyword arguments must contain all of parameters in self's params. Unrecognized arguments are ignored.
15,331
def dict_factory(self, cursor, row): d = {} for idx, col in enumerate(cursor.description): val = row[idx] name = col[0] if name == Field.Time_Stamp: d[col[0]] = str(val) continue if name == "Raw_A" or name == "Raw_B": continue if name not in self.m_all_fields: continue if (str(val) != "None") and ((val > 0) or (val < 0)): d[name] = str(val) return d
Sqlite callback accepting the cursor and the original row as a tuple. Simple return of JSON safe types. Args: cursor (sqlite cursor): Original cursory row (sqlite row tuple): Original row. Returns: dict: modified row.
15,332
def linestrings_intersect(line1, line2): intersects = [] for i in range(0, len(line1[]) - 1): for j in range(0, len(line2[]) - 1): a1_x = line1[][i][1] a1_y = line1[][i][0] a2_x = line1[][i + 1][1] a2_y = line1[][i + 1][0] b1_x = line2[][j][1] b1_y = line2[][j][0] b2_x = line2[][j + 1][1] b2_y = line2[][j + 1][0] ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \ (b2_y - b1_y) * (a1_x - b1_x) ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \ (a2_y - a1_y) * (a1_x - b1_x) u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y) if not u_b == 0: u_a = ua_t / u_b u_b = ub_t / u_b if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1: intersects.append({: , : [ a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]}) return intersects
To valid whether linestrings from geojson are intersected with each other. reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword arguments: line1 -- first line geojson object line2 -- second line geojson object if(line1 intersects with other) return intersect point array else empty array
15,333
def spawn_missing_classes(self, context=None): if context is None: if self.context is not None: context = self.context else: frame = inspect.currentframe().f_back context = frame.f_locals del frame tables = [ row[0] for row in self.connection.query( % self.database) if lookup_class_name(.format(db=self.database, tab=row[0]), context, 0) is None] master_classes = (Lookup, Manual, Imported, Computed) part_tables = [] for table_name in tables: class_name = to_camel_case(table_name) if class_name not in context: try: cls = next(cls for cls in master_classes if re.fullmatch(cls.tier_regexp, table_name)) except StopIteration: if re.fullmatch(Part.tier_regexp, table_name): part_tables.append(table_name) else: context[class_name] = self(type(class_name, (cls,), dict())) for table_name in part_tables: groups = re.fullmatch(Part.tier_regexp, table_name).groupdict() class_name = to_camel_case(groups[]) try: master_class = context[to_camel_case(groups[])] except KeyError: raise DataJointError( % table_name) part_class = type(class_name, (Part,), dict(definition=...)) part_class._master = master_class self.process_relation_class(part_class, context=context, assert_declared=True) setattr(master_class, class_name, part_class)
Creates the appropriate python user relation classes from tables in the schema and places them in the context. :param context: alternative context to place the missing classes into, e.g. locals()
15,334
def dict_2_mat(data, fill = True): if any([type(k) != int for k in list(data.keys())]): raise RuntimeError("Dictionary cannot be converted to matrix, " + "not all keys are ints") base_shape = np.array(list(data.values())[0]).shape result_shape = list(base_shape) if fill: result_shape.insert(0, max(data.keys()) + 1) else: result_shape.insert(0, len(list(data.keys()))) result = np.empty(result_shape) + np.nan for (i, (k, v)) in enumerate(data.items()): v = np.array(v) if v.shape != base_shape: raise RuntimeError("Dictionary cannot be converted to matrix, " + "not all values have same dimensions") result[fill and [k][0] or [i][0]] = v return result
Creates a NumPy array from a dictionary with only integers as keys and NumPy arrays as values. Dimension 0 of the resulting array is formed from data.keys(). Missing values in keys can be filled up with np.nan (default) or ignored. Parameters ---------- data : dict a dictionary with integers as keys and array-likes of the same shape as values fill : boolean flag specifying if the resulting matrix will keep a correspondence between dictionary keys and matrix indices by filling up missing keys with matrices of NaNs. Defaults to True Returns ------- numpy array with one more dimension than the values of the input dict
15,335
def parse_version(package): init_file = f with open(init_file, , encoding=) as f: for line in f.readlines(): if in line: return line.split()[1].strip()[1:-1] return
Parse versions
15,336
def multigrad_dict(fun): "Takes gradients wrt all arguments simultaneously," "returns a dict mapping to " import funcsigs sig = funcsigs.signature(fun) def select(preds, lst): idx = lambda item: next( (i for i, pred in enumerate(preds) if pred(item)), len(preds)) results = [[] for _ in preds] + [[]] for item in lst: results[idx(item)].append(item) return results is_var_pos = lambda name: sig.parameters[name].kind == sig.parameters[name].VAR_POSITIONAL is_var_kwd = lambda name: sig.parameters[name].kind == sig.parameters[name].VAR_KEYWORD var_pos, var_kwd, argnames = select([is_var_pos, is_var_kwd], sig.parameters) todict = lambda dct: {key:dct[key] for key in dct} def apply_defaults(arguments): defaults = {name: param.default for name, param in sig.parameters.items() if param.default is not param.empty} return OrderedDict((name, arguments[name] if name in arguments else defaults[name]) for name in sig.parameters) def gradfun(*args, **kwargs): bindings = sig.bind(*args, **kwargs) args = lambda dct: tuple(dct[var_pos[0]]) if var_pos else () kwargs = lambda dct: todict(dct[var_kwd[0]]) if var_kwd else {} others = lambda dct: tuple(dct[argname] for argname in argnames if argname not in var_kwd + var_pos) newfun = lambda dct: fun(*(others(dct) + args(dct)), **kwargs(dct)) argdict = apply_defaults(bindings.arguments) grad_dict = grad(newfun)(dict(argdict)) return OrderedDict((argname, grad_dict[argname]) for argname in argdict) return gradfun
Takes gradients wrt all arguments simultaneously,
15,337
def isprintable(string): string = string.strip() if not string: return True if sys.version_info[0] == 3: try: return string.isprintable() except Exception: pass try: return string.decode().isprintable() except Exception: pass else: if string.isalnum(): return True printable = ( ()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c') return all(c in printable for c in string)
Return if all characters in string are printable. >>> isprintable('abc') True >>> isprintable(b'\01') False
15,338
def nlp(self, inputString, sourceTime=None, version=None): orig_inputstring = inputString inputString = re.sub(r, r, inputString).lower() inputString = re.sub(r|")(\s|$)\1 \3(\s|^)(\, r, inputString) startpos = 0 matches = [] while startpos < len(inputString): leftmost_match = [0, 0, None, 0, None] m = self.ptc.CRE_MODIFIER.search(inputString[startpos:]) if m is not None: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start() + startpos: leftmost_match[0] = m.start() + startpos leftmost_match[1] = m.end() + startpos leftmost_match[2] = m.group() leftmost_match[3] = 0 leftmost_match[4] = m = self.ptc.CRE_UNITS.search(inputString[startpos:]) if m is not None: debug and log.debug() if self._UnitsTrapped(inputString[startpos:], m, ): debug and log.debug() else: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start() + startpos: leftmost_match[0] = m.start() + startpos leftmost_match[1] = m.end() + startpos leftmost_match[2] = m.group() leftmost_match[3] = 3 leftmost_match[4] = if m.start() > 0 and \ inputString[m.start() - 1] == : leftmost_match[0] = leftmost_match[0] - 1 leftmost_match[2] = + leftmost_match[2] m = self.ptc.CRE_QUNITS.search(inputString[startpos:]) if m is not None: debug and log.debug() if self._UnitsTrapped(inputString[startpos:], m, ): debug and log.debug() else: if leftmost_match[1] == 0 or \ leftmost_match[0] > m.start() + startpos: leftmost_match[0] = m.start() + startpos leftmost_match[1] = m.end() + startpos leftmost_match[2] = m.group() leftmost_match[3] = 3 leftmost_match[4] = if m.start() > 0 and \ inputString[m.start() - 1] == : leftmost_match[0] = leftmost_match[0] - 1 leftmost_match[2] = + leftmost_match[2] m = self.ptc.CRE_DATE3.search(inputString[startpos:]) if date or time or units: combined = orig_inputstring[matches[from_match_index] [0]:matches[i - 1][1]] parsed_datetime, flags = self.parse(combined, sourceTime, version) proximity_matches.append(( datetime.datetime(*parsed_datetime[:6]), flags, matches[from_match_index][0], matches[i - 1][1], combined)) from_match_index = i date = matches[i][3] == 1 time = matches[i][3] == 2 units = matches[i][3] == 3 continue else: if matches[i][3] == 1: date = True if matches[i][3] == 2: time = True if matches[i][3] == 3: units = True if date or time or units: combined = orig_inputstring[matches[from_match_index][0]: matches[len(matches) - 1][1]] parsed_datetime, flags = self.parse(combined, sourceTime, version) proximity_matches.append(( datetime.datetime(*parsed_datetime[:6]), flags, matches[from_match_index][0], matches[len(matches) - 1][1], combined)) elif len(matches) == 0: return None else: if matches[0][3] == 0: return None else: combined = orig_inputstring[matches[0][0]:matches[0][1]] parsed_datetime, flags = self.parse(matches[0][2], sourceTime, version) proximity_matches.append(( datetime.datetime(*parsed_datetime[:6]), flags, matches[0][0], matches[0][1], combined)) return tuple(proximity_matches)
Utilizes parse() after making judgements about what datetime information belongs together. It makes logical groupings based on proximity and returns a parsed datetime for each matched grouping of datetime text, along with location info within the given inputString. @type inputString: string @param inputString: natural language text to evaluate @type sourceTime: struct_time @param sourceTime: C{struct_time} value to use as the base @type version: integer @param version: style version, default will use L{Calendar} parameter version value @rtype: tuple or None @return: tuple of tuples in the format (parsed_datetime as datetime.datetime, flags as int, start_pos as int, end_pos as int, matched_text as string) or None if there were no matches
15,339
def wifi_status(self): return self._info_json.get(CONST.STATUS, {}).get(CONST.WIFI_LINK)
Get the wifi status.
15,340
def get_usedby_and_readonly(self, id): uri = self.URI + "/" + id + "/usedby/readonly" return self._client.get(uri)
Gets the build plans details os teh selected plan script as per the selected attributes. Args: id: ID of the Plan Script. Returns: array of build plans
15,341
def update_body(app, pagename, templatename, context, doctree): STATIC_URL = context.get(, DEFAULT_STATIC_URL) online_builders = [ , , ] if app.builder.name == : if in context and context[] == : theme_css = else: theme_css = elif app.builder.name in online_builders: if in context and context[] == : theme_css = % STATIC_URL else: theme_css = % STATIC_URL else: return inject_css = True if theme_css.endswith(): try: import sphinx_rtd_theme inject_css = LooseVersion(sphinx_rtd_theme.__version__) < LooseVersion() except ImportError: pass if inject_css and theme_css not in app.builder.css_files: if sphinx.version_info < (1, 8): app.builder.css_files.insert(0, theme_css) else: app.add_css_file(theme_css) return content rtd_render._patched = True app.builder.templates.render = types.MethodType(rtd_render, app.builder.templates)
Add Read the Docs content to Sphinx body content. This is the most reliable way to inject our content into the page.
15,342
async def get_state(self): await self._protocol.send_command("getstate", callback=False) return await self._protocol.await_event()
Get the latest state change of QTM. If the :func:`~qtm.connect` on_event callback was set the callback will be called as well. :rtype: A :class:`qtm.QRTEvent`
15,343
def get_userinfo(self, access_token, id_token, payload): user_response = requests.get( self.OIDC_OP_USER_ENDPOINT, headers={ : .format(access_token) }, verify=self.get_settings(, True)) user_response.raise_for_status() return user_response.json()
Return user details dictionary. The id_token and payload are not used in the default implementation, but may be used when overriding this method
15,344
def status_message(self): if self.is_available: return "INSTALLED {0!s}" elif self.why and self.package: return "MISSING {0!s:<20}needed for {0.why}, part of the {0.package} package" elif self.why: return "MISSING {0!s:<20}needed for {0.why}" elif self.package: return "MISSING {0!s:<20}part of the {0.package} package" else: return "MISSING {0!s:<20}"
Detailed message about whether the dependency is installed. :rtype: str
15,345
def P(self): try: return self._diff_op except AttributeError: self._diff_op = normalize(self.kernel, , axis=1) return self._diff_op
Diffusion operator (cached) Return or calculate the diffusion operator Returns ------- P : array-like, shape=[n_samples, n_samples] diffusion operator defined as a row-stochastic form of the kernel matrix
15,346
def _setup_aggregation(self, aggregator=None): from nefertari.elasticsearch import ES if aggregator is None: aggregator = ESAggregator aggregations_enabled = ( ES.settings and ES.settings.asbool()) if not aggregations_enabled: log.debug() return index = getattr(self, , None) index_defined = index and index != self.not_allowed_action if index_defined: self.index = aggregator(self).wrap(self.index)
Wrap `self.index` method with ESAggregator. This makes `self.index` to first try to run aggregation and only on fail original method is run. Method is wrapped only if it is defined and `elasticsearch.enable_aggregations` setting is true.
15,347
def load(robot, container_name, slot, label=None, share=False): def is_ot_one_slot_name(s): return isinstance(s, str) and len(s) == 2 and s[0] in def convert_ot_one_slot_names(s): col = .index(slot[0]) row = int(slot[1]) - 1 slot_number = col + (row * robot.get_max_robot_cols()) + 1 log.warning(.format( slot, slot_number)) return slot_number if isinstance(slot, str): try: slot = int(slot) except (ValueError, TypeError): if is_ot_one_slot_name(slot): slot = convert_ot_one_slot_names(slot) if helpers.is_number(slot): if not (1 <= slot <= len(robot.deck)): raise ValueError(.format(slot)) slot = str(slot) return robot.add_container(container_name, slot, label, share)
Examples -------- >>> from opentrons import containers >>> containers.load('96-flat', '1') <Deck>/<Slot 1>/<Container 96-flat> >>> containers.load('96-flat', '4', 'plate') <Deck>/<Slot 4>/<Container plate> >>> containers.load('non-existent-type', '4') # doctest: +ELLIPSIS Exception: Container type "non-existent-type" not found in file ...
15,348
def put_method(restApiId=None, resourceId=None, httpMethod=None, authorizationType=None, authorizerId=None, apiKeyRequired=None, operationName=None, requestParameters=None, requestModels=None, requestValidatorId=None): pass
Add a method to an existing Resource resource. See also: AWS API Documentation :example: response = client.put_method( restApiId='string', resourceId='string', httpMethod='string', authorizationType='string', authorizerId='string', apiKeyRequired=True|False, operationName='string', requestParameters={ 'string': True|False }, requestModels={ 'string': 'string' }, requestValidatorId='string' ) :type restApiId: string :param restApiId: [REQUIRED] The RestApi identifier for the new Method resource. :type resourceId: string :param resourceId: [REQUIRED] The Resource identifier for the new Method resource. :type httpMethod: string :param httpMethod: [REQUIRED] Specifies the method request's HTTP method type. :type authorizationType: string :param authorizationType: [REQUIRED] The method's authorization type. Valid values are NONE for open access, AWS_IAM for using AWS IAM permissions, CUSTOM for using a custom authorizer, or COGNITO_USER_POOLS for using a Cognito user pool. :type authorizerId: string :param authorizerId: Specifies the identifier of an Authorizer to use on this Method, if the type is CUSTOM. :type apiKeyRequired: boolean :param apiKeyRequired: Specifies whether the method required a valid ApiKey . :type operationName: string :param operationName: A human-friendly operation identifier for the method. For example, you can assign the operationName of ListPets for the GET /pets method in PetStore example. :type requestParameters: dict :param requestParameters: A key-value map defining required or optional method request parameters that can be accepted by Amazon API Gateway. A key defines a method request parameter name matching the pattern of method.request.{location}.{name} , where location is querystring , path , or header and name is a valid and unique parameter name. The value associated with the key is a Boolean flag indicating whether the parameter is required (true ) or optional (false ). The method request parameter names defined here are available in Integration to be mapped to integration request parameters or body-mapping templates. (string) -- (boolean) -- :type requestModels: dict :param requestModels: Specifies the Model resources used for the request's content type. Request models are represented as a key/value map, with a content type as the key and a Model name as the value. (string) -- (string) -- :type requestValidatorId: string :param requestValidatorId: The identifier of a RequestValidator for validating the method request. :rtype: dict :return: { 'httpMethod': 'string', 'authorizationType': 'string', 'authorizerId': 'string', 'apiKeyRequired': True|False, 'requestValidatorId': 'string', 'operationName': 'string', 'requestParameters': { 'string': True|False }, 'requestModels': { 'string': 'string' }, 'methodResponses': { 'string': { 'statusCode': 'string', 'responseParameters': { 'string': True|False }, 'responseModels': { 'string': 'string' } } }, 'methodIntegration': { 'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY', 'httpMethod': 'string', 'uri': 'string', 'credentials': 'string', 'requestParameters': { 'string': 'string' }, 'requestTemplates': { 'string': 'string' }, 'passthroughBehavior': 'string', 'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT', 'cacheNamespace': 'string', 'cacheKeyParameters': [ 'string', ], 'integrationResponses': { 'string': { 'statusCode': 'string', 'selectionPattern': 'string', 'responseParameters': { 'string': 'string' }, 'responseTemplates': { 'string': 'string' }, 'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT' } } } } :returns: (string) -- (boolean) --
15,349
def url(self): if self._url is not None: url = self._url else: url = getattr(self.nb.metadata, , None) if url is not None: return nbviewer_link(url)
The url on jupyter nbviewer for this notebook or None if unknown
15,350
def convert_ages_to_calendar_year(self, er_ages_rec): if "age" not in list(er_ages_rec.keys()): return(er_ages_rec) if "age_unit" not in list(er_ages_rec.keys()): return(er_ages_rec) if er_ages_rec["age_unit"] == "": return(er_ages_rec) if er_ages_rec["age"] == "": if "age_range_high" in list(er_ages_rec.keys()) and "age_range_low" in list(er_ages_rec.keys()): if er_ages_rec["age_range_high"] != "" and er_ages_rec["age_range_low"] != "": er_ages_rec["age"] = scipy.mean( [float(er_ages_rec["age_range_high"]), float(er_ages_rec["age_range_low"])]) if er_ages_rec["age"] == "": return(er_ages_rec) age_unit = er_ages_rec["age_unit"] mutliplier = 1 if age_unit == "Ga": mutliplier = -1e9 if age_unit == "Ma": mutliplier = -1e6 if age_unit == "Ka": mutliplier = -1e3 if age_unit == "Years AD (+/-)" or age_unit == "Years Cal AD (+/-)": mutliplier = 1 if age_unit == "Years BP" or age_unit == "Years Cal BP": mutliplier = 1 age = float(er_ages_rec["age"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age = 1950-age er_ages_rec[] = age age_range_low = age age_range_high = age age_sigma = 0 if "age_sigma" in list(er_ages_rec.keys()) and er_ages_rec["age_sigma"] != "": age_sigma = float(er_ages_rec["age_sigma"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_sigma = 1950-age_sigma age_range_low = age-age_sigma age_range_high = age+age_sigma if "age_range_high" in list(er_ages_rec.keys()) and "age_range_low" in list(er_ages_rec.keys()): if er_ages_rec["age_range_high"] != "" and er_ages_rec["age_range_low"] != "": age_range_high = float( er_ages_rec["age_range_high"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_range_high = 1950-age_range_high age_range_low = float(er_ages_rec["age_range_low"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_range_low = 1950-age_range_low er_ages_rec[] = age_range_low er_ages_rec[] = age_range_high return(er_ages_rec)
convert all age units to calendar year Parameters ---------- er_ages_rec : Dict type object containing preferbly at least keys 'age', 'age_unit', and either 'age_range_high', 'age_range_low' or 'age_sigma' Returns ------- er_ages_rec : Same dict object input but altered to have new records 'age_cal_year_range_low' and 'age_cal_year_range_high'
15,351
def query(self): if self._rawquery==True: return self.__query return flatten(self.__query)
The mongo query object which would be executed if this Query object were used
15,352
def _create_raw_data(self): result = {} for section in self.get_sections(): result[section.get()] = return result
Gathers the different sections ids and creates a string as first cookie data. :return: A dictionary like: {'analyses':'all','analysisrequest':'all','worksheets':'all'}
15,353
def start(self): if self._send_greenlet is None: self._send_greenlet = gevent.spawn(self._send_loop)
Start the message sending loop.
15,354
def strip_unreferenced_labels(asm_lines): asm_stripped = [] for line in asm_lines: if re.match(r, line): label = line[0:line.find()] if not any([re.match(r + re.escape(label) + , l) for l in asm_lines]): line = asm_stripped.append(line) return asm_stripped
Strip all labels, which are never referenced.
15,355
def getattribute(value, arg): if hasattr(value, str(arg)): return getattr(value, arg) elif hasattr(value, ) and value.has_key(arg): return value[arg] elif numeric_test.match(str(arg)) and len(value) > int(arg): return value[int(arg)] else: return settings.TEMPLATE_STRING_IF_INVALID
Gets an attribute of an object dynamically from a string name
15,356
def main(): print("\t\tCalculator\n\n") while True: user_input = input("expression or exit: ") if user_input == "exit": break try: print("The result is {0}".format(evaluate(user_input))) except Exception: print("invalid syntax!") user_input = input("expression or exit: ") print("program end")
simple user-interface
15,357
def pad_equal_whitespace(string, pad=None): if pad is None: pad = max(map(len, string.split())) + 1 return .join(( % pad).format(line) for line in string.split())
Given a multiline string, add whitespaces to every line so that every line has the same length.
15,358
def _rts_smoother_update_step(k, p_m , p_P, p_m_pred, p_P_pred, p_m_prev_step, p_P_prev_step, p_dynamic_callables): A = p_dynamic_callables.Ak(k,p_m,p_P) tmp = np.dot( A, p_P.T) if A.shape[0] == 1: G = tmp.T / p_P_pred else: try: LL,islower = linalg.cho_factor(p_P_pred) G = linalg.cho_solve((LL,islower),tmp).T except: res = sp.linalg.lstsq(p_P_pred, tmp) G = res[0].T m_upd = p_m + G.dot( p_m_prev_step-p_m_pred ) P_upd = p_P + G.dot( p_P_prev_step-p_P_pred).dot(G.T) P_upd = 0.5*(P_upd + P_upd.T) return m_upd, P_upd, G
Rauch–Tung–Striebel(RTS) update step Input: ----------------------------- k: int Iteration No. Starts at 0. Total number of iterations equal to the number of measurements. p_m: matrix of size (state_dim, time_series_no) Filter mean on step k p_P: matrix of size (state_dim,state_dim) Filter Covariance on step k p_m_pred: matrix of size (state_dim, time_series_no) Means from the smoother prediction step. p_P_pred: Covariance from the smoother prediction step. p_m_prev_step Smoother mean from the previous step. p_P_prev_step: Smoother covariance from the previous step. p_f_A: function (k, m, P) return Jacobian of dynamic function, it is passed into p_a. k (iteration number), starts at 0 m: point where Jacobian is evaluated P: parameter for Jacobian, usually covariance matrix.
15,359
def rename_tokens(docgraph_with_old_names, docgraph_with_new_names, verbose=False): old2new = create_token_mapping(docgraph_with_old_names, docgraph_with_new_names, verbose=verbose) if hasattr(docgraph_with_new_names, ): docgraph_with_new_names.renamed_nodes.update(old2new) else: docgraph_with_new_names.renamed_nodes = old2new relabel_nodes(docgraph_with_old_names, old2new, copy=False) new_token_ids = old2new.values() if new_token_ids: docgraph_with_old_names.tokens = new_token_ids
Renames the tokens of a graph (``docgraph_with_old_names``) in-place, using the token names of another document graph (``docgraph_with_new_names``). Also updates the ``.tokens`` list of the old graph. This will only work, iff both graphs have the same tokenization.
15,360
def left_complement(clr): left = split_complementary(clr)[1] colors = complementary(clr) colors[3].h = left.h colors[4].h = left.h colors[5].h = left.h colors = colorlist( colors[0], colors[2], colors[1], colors[3], colors[4], colors[5] ) return colors
Returns the left half of the split complement. A list is returned with the same darker and softer colors as in the complementary list, but using the hue of the left split complement instead of the complement itself.
15,361
def create_git_release(self, tag, name, message, draft=False, prerelease=False, target_commitish=github.GithubObject.NotSet): assert isinstance(tag, (str, unicode)), tag assert isinstance(name, (str, unicode)), name assert isinstance(message, (str, unicode)), message assert isinstance(draft, bool), draft assert isinstance(prerelease, bool), prerelease assert target_commitish is github.GithubObject.NotSet or isinstance(target_commitish, (str, unicode, github.Branch.Branch, github.Commit.Commit, github.GitCommit.GitCommit)), target_commitish post_parameters = { "tag_name": tag, "name": name, "body": message, "draft": draft, "prerelease": prerelease, } if isinstance(target_commitish, (str, unicode)): post_parameters["target_commitish"] = target_commitish elif isinstance(target_commitish, github.Branch.Branch): post_parameters["target_commitish"] = target_commitish.name elif isinstance(target_commitish, (github.Commit.Commit, github.GitCommit.GitCommit)): post_parameters["target_commitish"] = target_commitish.sha headers, data = self._requester.requestJsonAndCheck( "POST", self.url + "/releases", input=post_parameters ) return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)
:calls: `POST /repos/:owner/:repo/releases <http://developer.github.com/v3/repos/releases>`_ :param tag: string :param name: string :param message: string :param draft: bool :param prerelease: bool :param target_commitish: string or :class:`github.Branch.Branch` or :class:`github.Commit.Commit` or :class:`github.GitCommit.GitCommit` :rtype: :class:`github.GitRelease.GitRelease`
15,362
def get_rendered_toctree(builder, docname, prune=False, collapse=True): fulltoc = build_full_toctree(builder, docname, prune=prune, collapse=collapse, ) rendered_toc = builder.render_partial(fulltoc)[] return rendered_toc
Build the toctree relative to the named document, with the given parameters, and then return the rendered HTML fragment.
15,363
def tail(collection, filter=None, projection=None, limit=0, timeout=None, aggregate=False): if not collection.options().get(, False): raise TypeError("Can only tail capped collections.") if timeout: if aggregate: cursor = cursor.max_time_ms(int(timeout * 1000)).max_await_time_ms(int(timeout * 1000)) else: cursor = cursor.max_await_time_ms(int(timeout * 1000)) return cursor
A generator which will block and yield entries as they are added to a capped collection. Only use this on capped collections; behaviour is undefined against non-tailable cursors. Accepts a timeout as an integer or floating point number of seconds, indicating how long to wait for a result. Correct operation requires a modern MongoDB installation, version 3.2 or newer, and the client driver to support it. Use is trivial: for obj in tail(db.collection, timeout=10): print(obj) An optional argument, aggregate, allows you to control how the timeout value is interpreted. By default, False, the timeout is used as the longest period of time to wait for a new record, resetting on each retrieved record. Additional important note: tailing will fail (badly) if the collection is empty. Always prime the collection with an empty or otherwise unimportant record before attempting to use this feature.
15,364
def latex_defs_to_katex_macros(defs): r Example ------- import sphinxcontrib.katex as katex latex_macros = katex.import_macros_from_latex(latex_defs) katex_options = + latex_macros + ^\\def[ ]?( return macros
r'''Converts LaTeX \def statements to KaTeX macros. This is a helper function that can be used in conf.py to translate your already specified LaTeX definitions. https://github.com/Khan/KaTeX#rendering-options, e.g. `\def \e #1{\mathrm{e}^{#1}}` => `"\\e:" "\\mathrm{e}^{#1}"`' Example ------- import sphinxcontrib.katex as katex # Get your LaTeX defs into `latex_defs` and then do latex_macros = katex.import_macros_from_latex(latex_defs) katex_options = 'macros: {' + latex_macros + '}'
15,365
def validate_swagger_schema(schema_dir, resource_listing): schema_filepath = os.path.join(schema_dir, API_DOCS_FILENAME) swagger_spec_validator.validator12.validate_spec( resource_listing, urlparse.urljoin(, pathname2url(os.path.abspath(schema_filepath))), )
Validate the structure of Swagger schemas against the spec. **Valid only for Swagger v1.2 spec** Note: It is possible that resource_listing is not present in the schema_dir. The path is passed in the call so that ssv can fetch the api-declaration files from the path. :param resource_listing: Swagger Spec v1.2 resource listing :type resource_listing: dict :param schema_dir: A path to Swagger spec directory :type schema_dir: string :raises: :py:class:`swagger_spec_validator.SwaggerValidationError`
15,366
def results(self, trial_ids): metadata_folder = os.path.join(self.log_dir, constants.METADATA_FOLDER) dfs = [] for trial_id in trial_ids: result_file = os.path.join( metadata_folder, trial_id + "_" + constants.RESULT_SUFFIX) assert os.path.isfile(result_file), result_file dfs.append(pd.read_json(result_file, typ=, lines=True)) df = pd.concat(dfs, axis=0, ignore_index=True, sort=False) return df
Accepts a sequence of trial ids and returns a pandas dataframe with the schema trial_id, iteration?, *metric_schema_union where iteration is an optional column that specifies the iteration when a user logged a metric, if the user supplied one. The iteration column is added if any metric was logged with an iteration. Then, every metric name that was ever logged is a column in the metric_schema_union.
15,367
def calledTwice(cls, spy): cls.__is_spy(spy) if not (spy.calledTwice): raise cls.failException(cls.message)
Checking the inspector is called twice Args: SinonSpy
15,368
def render_table(self, headers, rows, style=None): table = self.table(headers, rows, style) table.render(self._io)
Format input to textual table.
15,369
def append(self, parent, content): log.debug(, parent, content) if self.start(content): self.appender.append(parent, content) self.end(parent, content)
Append the specified L{content} to the I{parent}. @param parent: The parent node to append to. @type parent: L{Element} @param content: The content to append. @type content: L{Object}
15,370
def validate_response(self): if self.is_success(): return if self.details: error = self.details.get(, None) if error == PushResponse.ERROR_DEVICE_NOT_REGISTERED: raise DeviceNotRegisteredError(self) elif error == PushResponse.ERROR_MESSAGE_TOO_BIG: raise MessageTooBigError(self) elif error == PushResponse.ERROR_MESSAGE_RATE_EXCEEDED: raise MessageRateExceededError(self) raise PushResponseError(self)
Raises an exception if there was an error. Otherwise, do nothing. Clients should handle these errors, since these require custom handling to properly resolve.
15,371
def promote_transaction( self, transaction, depth=3, min_weight_magnitude=None, ): if min_weight_magnitude is None: min_weight_magnitude = self.default_min_weight_magnitude return extended.PromoteTransactionCommand(self.adapter)( transaction=transaction, depth=depth, minWeightMagnitude=min_weight_magnitude, )
Promotes a transaction by adding spam on top of it. :return: Dict with the following structure:: { 'bundle': Bundle, The newly-published bundle. }
15,372
def parse_default_property_value(property_name, property_type_id, default_value_string): if property_type_id == PROPERTY_TYPE_EMBEDDED_SET_ID and default_value_string == : return set() elif property_type_id == PROPERTY_TYPE_EMBEDDED_LIST_ID and default_value_string == : return list() elif (property_type_id == PROPERTY_TYPE_STRING_ID and isinstance(default_value_string, six.string_types)): return default_value_string elif property_type_id == PROPERTY_TYPE_BOOLEAN_ID: return _parse_bool_default_value(property_name, default_value_string) elif property_type_id == PROPERTY_TYPE_DATETIME_ID: return _parse_datetime_default_value(property_name, default_value_string) elif property_type_id == PROPERTY_TYPE_DATE_ID: return _parse_date_default_value(property_name, default_value_string) else: raise AssertionError(u u.format(property_name, property_type_id, default_value_string))
Parse the default value string into its proper form given the property type ID. Args: property_name: string, the name of the property whose default value is being parsed. Used primarily to construct meaningful error messages, should the default value prove invalid. property_type_id: int, one of the property type ID constants defined in this file that OrientDB uses to designate the native type of a given property. default_value_string: string, the textual representation of the default value for for the property, as returned by OrientDB's schema introspection code. Returns: an object of type matching the property that can be used as the property's default value. For example, if the property is of string type, the return type will be a string, and if the property is of list type, the return type will be a list. Raises: AssertionError, if the default value is not supported or does not match the property's declared type (e.g. if a default of "[]" is set on an integer property).
15,373
def keys_values(data, *keys): values = [] if is_mapping(data): for key in keys: if key in data: values.extend(ensure_list(data[key])) return values
Get an entry as a list from a dict. Provide a fallback key.
15,374
def eval_callx(self, exp): "dispatch for CallX" return (self.eval_agg_call if consumes_rows(exp) else self.eval_nonagg_call)(exp)
dispatch for CallX
15,375
def Satisfy_Constraints(U, B, BtBinv): RowsPerBlock = U.blocksize[0] ColsPerBlock = U.blocksize[1] num_block_rows = int(U.shape[0]/RowsPerBlock) UB = np.ravel(U*B) pyamg.amg_core.satisfy_constraints_helper(RowsPerBlock, ColsPerBlock, num_block_rows, B.shape[1], np.conjugate(np.ravel(B)), UB, np.ravel(BtBinv), U.indptr, U.indices, np.ravel(U.data)) return U
U is the prolongator update. Project out components of U such that U*B = 0. Parameters ---------- U : bsr_matrix m x n sparse bsr matrix Update to the prolongator B : array n x k array of the coarse grid near nullspace vectors BtBinv : array Local inv(B_i.H*B_i) matrices for each supernode, i B_i is B restricted to the sparsity pattern of supernode i in U Returns ------- Updated U, so that U*B = 0. Update is computed by orthogonally (in 2-norm) projecting out the components of span(B) in U in a row-wise fashion. See Also -------- The principal calling routine, pyamg.aggregation.smooth.energy_prolongation_smoother
15,376
def publish_active_scene(self, scene_id): self.sequence_number += 1 self.publisher.send_multipart(msgs.MessageBuilder.scene_active(self.sequence_number, scene_id)) return self.sequence_number
publish changed active scene
15,377
def stream_time(self, significant_digits=3): try: return round( self._timestamps[] - self._timestamps[], significant_digits) except Exception as e: return None
:param significant_digits: int of the number of significant digits in the return :return: float of the time in seconds of how long the data took to stream
15,378
def date_time_this_century( self, before_now=True, after_now=False, tzinfo=None): now = datetime.now(tzinfo) this_century_start = datetime( now.year - (now.year % 100), 1, 1, tzinfo=tzinfo) next_century_start = datetime( min(this_century_start.year + 100, MAXYEAR), 1, 1, tzinfo=tzinfo) if before_now and after_now: return self.date_time_between_dates( this_century_start, next_century_start, tzinfo) elif not before_now and after_now: return self.date_time_between_dates(now, next_century_start, tzinfo) elif not after_now and before_now: return self.date_time_between_dates(this_century_start, now, tzinfo) else: return now
Gets a DateTime object for the current century. :param before_now: include days in current century before today :param after_now: include days in current century after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime
15,379
def _to_DOM(self): root_node = ET.Element() created_at_node = ET.SubElement(root_node, "created_at") created_at_node.text = \ timeformatutils.to_ISO8601(self.created_at)if self.created_at is not None else updated_at_node = ET.SubElement(root_node, "updated_at") updated_at_node.text = \ timeformatutils.to_ISO8601(self.updated_at)if self.updated_at is not None else station_id_node = ET.SubElement(root_node, ) station_id_node.text = str(self.id) station_id_node = ET.SubElement(root_node, ) station_id_node.text = str(self.external_id) station_name_node = ET.SubElement(root_node, ) station_name_node.text = str(self.name) if self.name is not None else lat_node = ET.SubElement(root_node, ) lat_node.text = str(self.lat) lon_node = ET.SubElement(root_node, ) lon_node.text = str(self.lon) alt_node = ET.SubElement(root_node, ) alt_node.text = str(self.alt) if self.alt is not None else rank_node = ET.SubElement(root_node, ) rank_node.text = str(self.rank) if self.rank is not None else return root_node
Dumps object data to a fully traversable DOM representation of the object. :returns: a ``xml.etree.Element`` object
15,380
def Search(pattern, s): if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].search(s)
Searches the string for the pattern, caching the compiled regexp.
15,381
def GetValues(self): if not self._registry_key and self._registry: self._GetKeyFromRegistry() if self._registry_key: return self._registry_key.GetValues() return iter([])
Retrieves all values within the key. Returns: generator[WinRegistryValue]: Windows Registry value generator.
15,382
def numa_nodemask_to_set(mask): result = set() for i in range(0, get_max_node() + 1): if __nodemask_isset(mask, i): result.add(i) return result
Convert NUMA nodemask to Python set.
15,383
def surfaceIntersection(actor1, actor2, tol=1e-06, lw=3): bf = vtk.vtkIntersectionPolyDataFilter() poly1 = actor1.GetMapper().GetInput() poly2 = actor2.GetMapper().GetInput() bf.SetInputData(0, poly1) bf.SetInputData(1, poly2) bf.Update() actor = Actor(bf.GetOutput(), "k", 1) actor.GetProperty().SetLineWidth(lw) return actor
Intersect 2 surfaces and return a line actor. .. hint:: |surfIntersect.py|_
15,384
def resolve_push_to(push_to, default_url, default_namespace): s default_index value (e.g. docker.io). :return: tuple: registry_url, namespace http://http://https:///.:localhost': registry_url = default_url namespace = push_to else: registry_url = protocol + parts[0] else: registry_url = protocol + parts[0] namespace = parts[1] return registry_url, namespace
Given a push-to value, return the registry and namespace. :param push_to: string: User supplied --push-to value. :param default_url: string: Container engine's default_index value (e.g. docker.io). :return: tuple: registry_url, namespace
15,385
def backward(self, diff_x, influences, activations, **kwargs): diff_y = kwargs[] bmu = self._get_bmu(activations) influence = influences[bmu] x_update = np.multiply(diff_x, influence) y_update = np.multiply(diff_y, influence) return x_update, y_update
Backward pass through the network, including update. Parameters ---------- diff_x : numpy array A matrix containing the differences between the input and neurons. influences : numpy array A matrix containing the influence each neuron has on each other neuron. This is used to calculate the updates. activations : numpy array The activations each neuron has to each data point. This is used to calculate the BMU. differency_y : numpy array The differences between the input and context neurons. Returns ------- updates : tuple of arrays The updates to the weights and context weights, respectively.
15,386
def remove_editor(self, username, *args, **kwargs): return self.add_editor(username=username, _delete=True, *args, **kwargs)
Remove an editor from this wiki page. :param username: The name or Redditor object of the user to remove. This method points to :meth:`add_editor` with _delete=True. Additional parameters are are passed to :meth:`add_editor` and subsequently into :meth:`~praw.__init__.BaseReddit.request_json`.
15,387
def from_hex(cls, value): if len(value) == 8: return cls(int(value, 16)) elif len(value) == 32: return cls(int(value, 16)) else: raise ValueError( % (value,))
Initialize a new network from hexadecimal notation.
15,388
def spawn_worker(params): setup_logging(params) log.info("Adding worker: idx=%s\tconcurrency=%s\tresults=%s", params.worker_index, params.concurrency, params.report) worker = Worker(params) worker.start() worker.join()
This method has to be module level function :type params: Params
15,389
def load_excel(self, filepath, **kwargs): try: df = pd.read_excel(filepath, **kwargs) if len(df.index) == 0: self.warning("Empty Excel file. Can not set the dataframe.") return self.df = df except Exception as e: self.err(e, "Can not load Excel file")
Set the main dataframe with the content of an Excel file :param filepath: url of the csv file to load, can be absolute if it starts with ``/`` or relative if it starts with ``./`` :type filepath: str :param kwargs: keyword arguments to pass to Pandas ``read_excel`` function :example: ``ds.load_excel("./myfile.xlsx")``
15,390
def _formatter(self, x=None, y=None, z=None, s=None, label=None, **kwargs): def is_date(axis): fmt = axis.get_major_formatter() return (isinstance(fmt, mdates.DateFormatter) or isinstance(fmt, mdates.AutoDateFormatter)) def format_date(num): if num is not None: return mdates.num2date(num).strftime(self.date_format) ax = kwargs[].artist.axes if is_date(ax.xaxis): x = format_date(x) else: limits = ax.get_xlim() x = self._format_coord(x, limits) kwargs[] = self._format_coord(kwargs.get(), limits) if is_date(ax.yaxis): y = format_date(y) else: limits = ax.get_ylim() y = self._format_coord(y, limits) kwargs[] = self._format_coord(kwargs.get(), limits) output = [] for key, val in zip([, , , ], [x, y, z, s]): if val is not None: try: output.append(u.format(key=key, val=val)) except ValueError: output.append(u.format(key=key, val=val))
Default formatter function, if no `formatter` kwarg is specified. Takes information about the pick event as a series of kwargs and returns the string to be displayed.
15,391
def expandRecs(G, RecCollect, nodeType, weighted): for Rec in RecCollect: fullCiteList = [makeID(c, nodeType) for c in Rec.createCitation(multiCite = True)] if len(fullCiteList) > 1: for i, citeID1 in enumerate(fullCiteList): if citeID1 in G: for citeID2 in fullCiteList[i + 1:]: if citeID2 not in G: G.add_node(citeID2, **G.node[citeID1]) if weighted: G.add_edge(citeID1, citeID2, weight = 1) else: G.add_edge(citeID1, citeID2) elif weighted: try: G.edges[citeID1, citeID2][] += 1 except KeyError: G.add_edge(citeID1, citeID2, weight = 1) for e1, e2, data in G.edges(citeID1, data = True): G.add_edge(citeID2, e2, **data)
Expand all the citations from _RecCollect_
15,392
def get_asset_content(self, asset_content_id): return AssetContent(self._provider_session.get_asset_content(asset_content_id), self._config_map)
Gets the ``AssetContent`` specified by its ``Id``. In plenary mode, the exact ``Id`` is found or a ``NotFound`` results. Otherwise, the returned ``AssetContent`` may have a different ``Id`` than requested, such as the case where a duplicate ``Id`` was assigned to an ``AssetContent`` and retained for compatibility. :param asset_content_id: the ``Id`` of the ``AssetContent`` to retrieve :type asset_content_id: ``osid.id.Id`` :return: the returned ``AssetContent`` :rtype: ``osid.repository.Asset`` :raise: ``NotFound`` -- no ``AssetContent`` found with the given ``Id`` :raise: ``NullArgument`` -- ``asset_content_id`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.*
15,393
def handle(self, *args, **options): cron_classes = options[] if cron_classes: cron_class_names = cron_classes else: cron_class_names = getattr(settings, , []) try: crons_to_run = [get_class(x) for x in cron_class_names] except Exception: error = traceback.format_exc() self.stdout.write( % (cron_class_names, error)) return for cron_class in crons_to_run: run_cron_with_cache_check( cron_class, force=options[], silent=options[] ) clear_old_log_entries() close_old_connections()
Iterates over all the CRON_CLASSES (or if passed in as a commandline argument) and runs them.
15,394
def list_path(root_dir): res = [] if os.path.isdir(root_dir): for name in os.listdir(root_dir): res.append(name) return res
List directory if exists. :param dir: str :return: list
15,395
def get_valences(self, structure): els = [Element(el.symbol) for el in structure.composition.elements] if not set(els).issubset(set(BV_PARAMS.keys())): raise ValueError( "Structure contains elements not in set of BV parameters!" ) if self.symm_tol: finder = SpacegroupAnalyzer(structure, self.symm_tol) symm_structure = finder.get_symmetrized_structure() equi_sites = symm_structure.equivalent_sites else: equi_sites = [[site] for site in structure] equi_sites = sorted(equi_sites, key=lambda sites: -sites[0].species .average_electroneg) valences = [] all_prob = [] if structure.is_ordered: for sites in equi_sites: test_site = sites[0] nn = structure.get_neighbors(test_site, self.max_radius) prob = self._calc_site_probabilities(test_site, nn) all_prob.append(prob) val = list(prob.keys()) val = sorted(val, key=lambda v: -prob[v]) valences.append( list(filter(lambda v: prob[v] > 0.01 * prob[val[0]], val))) else: full_all_prob = [] for sites in equi_sites: test_site = sites[0] nn = structure.get_neighbors(test_site, self.max_radius) prob = self._calc_site_probabilities_unordered(test_site, nn) all_prob.append(prob) full_all_prob.extend(prob.values()) vals = [] for (elsp, occ) in get_z_ordered_elmap( test_site.species): val = list(prob[elsp.symbol].keys()) val = sorted(val, key=lambda v: -prob[elsp.symbol][v]) vals.append( list(filter( lambda v: prob[elsp.symbol][v] > 0.001 * prob[ elsp.symbol][val[0]], val))) valences.append(vals) if structure.is_ordered: nsites = np.array([len(i) for i in equi_sites]) vmin = np.array([min(i) for i in valences]) vmax = np.array([max(i) for i in valences]) self._n = 0 self._best_score = 0 self._best_vset = None def evaluate_assignment(v_set): el_oxi = collections.defaultdict(list) for i, sites in enumerate(equi_sites): el_oxi[sites[0].specie.symbol].append(v_set[i]) max_diff = max([max(v) - min(v) for v in el_oxi.values()]) if max_diff > 1: return score = functools.reduce( operator.mul, [all_prob[i][v] for i, v in enumerate(v_set)]) if score > self._best_score: self._best_vset = v_set self._best_score = score def _recurse(assigned=[]): if self._n > self.max_permutations: return i = len(assigned) highest = vmax.copy() highest[:i] = assigned highest *= nsites highest = np.sum(highest) lowest = vmin.copy() lowest[:i] = assigned lowest *= nsites lowest = np.sum(lowest) if highest < 0 or lowest > 0: self._n += 1 return if i == len(valences): evaluate_assignment(assigned) self._n += 1 return else: for v in valences[i]: new_assigned = list(assigned) _recurse(new_assigned + [v]) else: nsites = np.array([len(i) for i in equi_sites]) tmp = [] attrib = [] for insite, nsite in enumerate(nsites): for val in valences[insite]: tmp.append(nsite) attrib.append(insite) new_nsites = np.array(tmp) fractions = [] elements = [] for sites in equi_sites: for sp, occu in get_z_ordered_elmap(sites[0].species): elements.append(sp.symbol) fractions.append(occu) fractions = np.array(fractions, np.float) new_valences = [] for vals in valences: for val in vals: new_valences.append(val) vmin = np.array([min(i) for i in new_valences], np.float) vmax = np.array([max(i) for i in new_valences], np.float) self._n = 0 self._best_score = 0 self._best_vset = None def evaluate_assignment(v_set): el_oxi = collections.defaultdict(list) jj = 0 for i, sites in enumerate(equi_sites): for specie, occu in get_z_ordered_elmap( sites[0].species): el_oxi[specie.symbol].append(v_set[jj]) jj += 1 max_diff = max([max(v) - min(v) for v in el_oxi.values()]) if max_diff > 2: return score = six.moves.reduce( operator.mul, [all_prob[attrib[iv]][elements[iv]][vv] for iv, vv in enumerate(v_set)]) if score > self._best_score: self._best_vset = v_set self._best_score = score def _recurse(assigned=[]): if self._n > self.max_permutations: return i = len(assigned) highest = vmax.copy() highest[:i] = assigned highest *= new_nsites highest *= fractions highest = np.sum(highest) lowest = vmin.copy() lowest[:i] = assigned lowest *= new_nsites lowest *= fractions lowest = np.sum(lowest) if (highest < -self.charge_neutrality_tolerance or lowest > self.charge_neutrality_tolerance): self._n += 1 return if i == len(new_valences): evaluate_assignment(assigned) self._n += 1 return else: for v in new_valences[i]: new_assigned = list(assigned) _recurse(new_assigned + [v]) _recurse() if self._best_vset: if structure.is_ordered: assigned = {} for val, sites in zip(self._best_vset, equi_sites): for site in sites: assigned[site] = val return [int(assigned[site]) for site in structure] else: assigned = {} new_best_vset = [] for ii in range(len(equi_sites)): new_best_vset.append(list()) for ival, val in enumerate(self._best_vset): new_best_vset[attrib[ival]].append(val) for val, sites in zip(new_best_vset, equi_sites): for site in sites: assigned[site] = val return [[int(frac_site) for frac_site in assigned[site]] for site in structure] else: raise ValueError("Valences cannot be assigned!")
Returns a list of valences for the structure. This currently works only for ordered structures only. Args: structure: Structure to analyze Returns: A list of valences for each site in the structure (for an ordered structure), e.g., [1, 1, -2] or a list of lists with the valences for each fractional element of each site in the structure (for an unordered structure), e.g., [[2, 4], [3], [-2], [-2], [-2]] Raises: A ValueError if the valences cannot be determined.
15,396
def _get_menu_meta_width(self, max_width, complete_state): if self._show_meta(complete_state): return min(max_width, max(get_cwidth(c.display_meta) for c in complete_state.current_completions) + 2) else: return 0
Return the width of the meta column.
15,397
def _GetEarliestYearFromFileEntry(self): file_entry = self.GetFileEntry() if not file_entry: return None stat_object = file_entry.GetStat() posix_time = getattr(stat_object, , None) if posix_time is None: posix_time = getattr(stat_object, , None) return None
Retrieves the year from the file entry date and time values. This function uses the creation time if available otherwise the change time (metadata last modification time) is used. Returns: int: year of the file entry or None.
15,398
def get_app_dir(app_name, roaming=True, force_posix=False): r if WIN: key = roaming and or folder = os.environ.get(key) if folder is None: folder = os.path.expanduser() return os.path.join(folder, app_name) if force_posix: return os.path.join(os.path.expanduser( + _posixify(app_name))) if sys.platform == : return os.path.join(os.path.expanduser( ), app_name) return os.path.join( os.environ.get(, os.path.expanduser()), _posixify(app_name))
r"""Returns the config folder for the application. The default behavior is to return whatever is most appropriate for the operating system. To give you an idea, for an app called ``"Foo Bar"``, something like the following folders could be returned: Mac OS X: ``~/Library/Application Support/Foo Bar`` Mac OS X (POSIX): ``~/.foo-bar`` Unix: ``~/.config/foo-bar`` Unix (POSIX): ``~/.foo-bar`` Win XP (roaming): ``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar`` Win XP (not roaming): ``C:\Documents and Settings\<user>\Application Data\Foo Bar`` Win 7 (roaming): ``C:\Users\<user>\AppData\Roaming\Foo Bar`` Win 7 (not roaming): ``C:\Users\<user>\AppData\Local\Foo Bar`` .. versionadded:: 2.0 :param app_name: the application name. This should be properly capitalized and can contain whitespace. :param roaming: controls if the folder should be roaming or not on Windows. Has no affect otherwise. :param force_posix: if this is set to `True` then on any POSIX system the folder will be stored in the home folder with a leading dot instead of the XDG config home or darwin's application support folder.
15,399
def get_results(self, job_id): url = self._url( % job_id) return self.client.get(url)
Get results of a job Args: job_id (str): The ID of the job. See: https://auth0.com/docs/api/management/v2#!/Jobs/get_results