Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
2,900
def add_user(name, profile=): client = _get_client(profile) organization = client.get_organization( _get_config_value(profile, ) ) try: github_named_user = client.get_user(name) except UnknownObjectException: log.exception("Resource not found") return False headers, data = organization._requester.requestJsonAndCheck( "PUT", organization.url + "/memberships/" + github_named_user._identity ) return data.get() ==
Add a GitHub user. name The user for which to obtain information. profile The name of the profile configuration to use. Defaults to ``github``. CLI Example: .. code-block:: bash salt myminion github.add_user github-handle
2,901
def actionAngleTorus_xvFreqs_c(pot,jr,jphi,jz, angler,anglephi,anglez, tol=0.003): from galpy.orbit.integrateFullOrbit import _parse_pot npot, pot_type, pot_args= _parse_pot(pot,potfortorus=True) R= numpy.empty(len(angler)) vR= numpy.empty(len(angler)) vT= numpy.empty(len(angler)) z= numpy.empty(len(angler)) vz= numpy.empty(len(angler)) phi= numpy.empty(len(angler)) Omegar= numpy.empty(1) Omegaphi= numpy.empty(1) Omegaz= numpy.empty(1) flag= ctypes.c_int(0) ndarrayFlags= (,) actionAngleTorus_xvFreqsFunc= _lib.actionAngleTorus_xvFreqs actionAngleTorus_xvFreqsFunc.argtypes=\ [ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_int, ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ctypes.c_int, ndpointer(dtype=numpy.int32,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ctypes.c_double, ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ctypes.POINTER(ctypes.c_int)] f_cont= [angler.flags[], anglephi.flags[], anglez.flags[]] angler= numpy.require(angler,dtype=numpy.float64,requirements=[,]) anglephi= numpy.require(anglephi,dtype=numpy.float64,requirements=[,]) anglez= numpy.require(anglez,dtype=numpy.float64,requirements=[,]) R= numpy.require(R,dtype=numpy.float64,requirements=[,]) vR= numpy.require(vR,dtype=numpy.float64,requirements=[,]) vT= numpy.require(vT,dtype=numpy.float64,requirements=[,]) z= numpy.require(z,dtype=numpy.float64,requirements=[,]) vz= numpy.require(vz,dtype=numpy.float64,requirements=[,]) phi= numpy.require(phi,dtype=numpy.float64,requirements=[,]) Omegar= numpy.require(Omegar,dtype=numpy.float64,requirements=[,]) Omegaphi= numpy.require(Omegaphi,dtype=numpy.float64,requirements=[,]) Omegaz= numpy.require(Omegaz,dtype=numpy.float64,requirements=[,]) actionAngleTorus_xvFreqsFunc(ctypes.c_double(jr), ctypes.c_double(jphi), ctypes.c_double(jz), ctypes.c_int(len(angler)), angler, anglephi, anglez, ctypes.c_int(npot), pot_type, pot_args, ctypes.c_double(tol), R,vR,vT,z,vz,phi, Omegar,Omegaphi,Omegaz, ctypes.byref(flag)) if f_cont[0]: angler= numpy.asfortranarray(angler) if f_cont[1]: anglephi= numpy.asfortranarray(anglephi) if f_cont[2]: anglez= numpy.asfortranarray(anglez) return (R,vR,vT,z,vz,phi,Omegar[0],Omegaphi[0],Omegaz[0],flag.value)
NAME: actionAngleTorus_xvFreqs_c PURPOSE: compute configuration (x,v) and frequencies of a set of angles on a single torus INPUT: pot - Potential object or list thereof jr - radial action (scalar) jphi - azimuthal action (scalar) jz - vertical action (scalar) angler - radial angle (array [N]) anglephi - azimuthal angle (array [N]) anglez - vertical angle (array [N]) tol= (0.003) goal for |dJ|/|J| along the torus OUTPUT: (R,vR,vT,z,vz,phi,Omegar,Omegaphi,Omegaz,flag) HISTORY: 2015-08-05/07 - Written - Bovy (UofT)
2,902
def to_dict(cls, obj): t prefixed with __, isnt callable. ' return { k: getattr(obj, k) for k in dir(obj) if cls.serialisable(k, obj) }
Serialises the object, by default serialises anything that isn't prefixed with __, isn't in the blacklist, and isn't callable.
2,903
def create_folder_structure(self): self.info_file, directories = create_folder_structure(self.project, self.name) self.project_dir, self.batch_dir, self.raw_dir = directories logger.debug("create folders:" + str(directories))
Creates a folder structure based on the project and batch name. Project - Batch-name - Raw-data-dir The info_df JSON-file will be stored in the Project folder. The summary-files will be saved in the Batch-name folder. The raw data (including exported cycles and ica-data) will be saved to the Raw-data-dir.
2,904
def timed_operation(msg, log_start=False): assert len(msg) if log_start: logger.info(.format(msg)) start = timer() yield msg = msg[0].upper() + msg[1:] logger.info(.format( msg, timer() - start))
Surround a context with a timer. Args: msg(str): the log to print. log_start(bool): whether to print also at the beginning. Example: .. code-block:: python with timed_operation('Good Stuff'): time.sleep(1) Will print: .. code-block:: python Good stuff finished, time:1sec.
2,905
def mcast_ip_mask(ip_addr_and_mask, return_tuple=True): regex_mcast_ip_and_mask = __re.compile("^(((2[2-3][4-9])|(23[0-3]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2][0-9])|[3-9]))$") if return_tuple: while not regex_mcast_ip_and_mask.match(ip_addr_and_mask): print("Not a good multicast IP and CIDR mask combo.") print("Please try again.") ip_addr_and_mask = input("Please enter a multicast IP address and mask in the follwing format x.x.x.x/x: ") ip_cidr_split = ip_addr_and_mask.split("/") ip_addr = ip_cidr_split[0] cidr = ip_cidr_split[1] return ip_addr, cidr elif not return_tuple: if not regex_mcast_ip_and_mask.match(ip_addr_and_mask): return False else: return True
Function to check if a address is multicast and that the CIDR mask is good Args: ip_addr_and_mask: Multicast IP address and mask in the following format 239.1.1.1/24 return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False Returns: see return_tuple for return options
2,906
def join_room(self, room_id_or_alias): if not room_id_or_alias: raise MatrixError("No alias or room ID to join.") path = "/join/%s" % quote(room_id_or_alias) return self._send("POST", path)
Performs /join/$room_id Args: room_id_or_alias (str): The room ID or room alias to join.
2,907
def split_header_words(header_values): r assert not isinstance(header_values, str) result = [] for text in header_values: orig_text = text pairs = [] while text: m = HEADER_TOKEN_RE.search(text) if m: text = unmatched(m) name = m.group(1) m = HEADER_QUOTED_VALUE_RE.search(text) if m: text = unmatched(m) value = m.group(1) value = HEADER_ESCAPE_RE.sub(r"\1", value) else: m = HEADER_VALUE_RE.search(text) if m: text = unmatched(m) value = m.group(1) value = value.rstrip() else: value = None pairs.append((name, value)) elif text.lstrip().startswith(","): text = text.lstrip()[1:] if pairs: result.append(pairs) pairs = [] else: non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text) assert nr_junk_chars > 0, ( "split_header_words bug: , , %s" % (orig_text, text, pairs)) text = non_junk if pairs: result.append(pairs) return result
r"""Parse header values into a list of lists containing key,value pairs. The function knows how to deal with ",", ";" and "=" as well as quoted values after "=". A list of space separated tokens are parsed as if they were separated by ";". If the header_values passed as argument contains multiple values, then they are treated as if they were a single value separated by comma ",". This means that this function is useful for parsing header fields that follow this syntax (BNF as from the HTTP/1.1 specification, but we relax the requirement for tokens). headers = #header header = (token | parameter) *( [";"] (token | parameter)) token = 1*<any CHAR except CTLs or separators> separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT quoted-string = ( <"> *(qdtext | quoted-pair ) <"> ) qdtext = <any TEXT except <">> quoted-pair = "\" CHAR parameter = attribute "=" value attribute = token value = token | quoted-string Each header is represented by a list of key/value pairs. The value for a simple token (not part of a parameter) is None. Syntactically incorrect headers will not necessarily be parsed as you would want. This is easier to describe with some examples: >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz']) [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]] >>> split_header_words(['text/html; charset="iso-8859-1"']) [[('text/html', None), ('charset', 'iso-8859-1')]] >>> split_header_words([r'Basic realm="\"foo\bar\""']) [[('Basic', None), ('realm', '"foobar"')]]
2,908
def MRA(biomf, sampleIDs=None, transform=None): ra = relative_abundance(biomf, sampleIDs) if transform is not None: ra = {sample: {otuID: transform(abd) for otuID, abd in ra[sample].items()} for sample in ra.keys()} otuIDs = biomf.ids(axis="observation") return mean_otu_pct_abundance(ra, otuIDs)
Calculate the mean relative abundance percentage. :type biomf: A BIOM file. :param biomf: OTU table format. :type sampleIDs: list :param sampleIDs: A list of sample id's from BIOM format OTU table. :param transform: Mathematical function which is used to transform smax to another format. By default, the function has been set to None. :rtype: dict :return: A dictionary keyed on OTUID's and their mean relative abundance for a given number of sampleIDs.
2,909
def run_shell_command(commands, **kwargs): p = subprocess.Popen(commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) output, error = p.communicate() return p.returncode, output, error
Run a shell command.
2,910
def xml_filter(self, content): r content = utils.strip_whitespace(content, True) if self.__options[] else content.strip() if not self.__options[]: encoding = self.guess_xml_encoding(content) or self.__encoding self.set_options(encoding=encoding) if self.__options[].lower() != self.__encoding: content = self.strip_xml_header(content.decode(self.__options[], errors=self.__options[])) if self.__options[]: content = utils.html_entity_decode(content) return content
r"""Filter and preprocess xml content :param content: xml content :rtype: str
2,911
def load_simple_endpoint(category, name): for ep in pkg_resources.iter_entry_points(category): if ep.name == name: return ep.load() raise KeyError(name)
fetches the entry point for a plugin and calls it with the given aux_info
2,912
def removeTab(self, index): view = self.widget(index) if isinstance(view, XView): try: view.windowTitleChanged.disconnect(self.refreshTitles) view.sizeConstraintChanged.disconnect(self.adjustSizeConstraint) except: pass return super(XViewPanel, self).removeTab(index)
Removes the view at the inputed index and disconnects it from the \ panel. :param index | <int>
2,913
def readACTIONRECORD(self): action = None actionCode = self.readUI8() if actionCode != 0: actionLength = self.readUI16() if actionCode >= 0x80 else 0 action = SWFActionFactory.create(actionCode, actionLength) action.parse(self) return action
Read a SWFActionRecord
2,914
def cutR_seq(seq, cutR, max_palindrome): complement_dict = {: , : , : , : } if cutR < max_palindrome: seq = seq + .join([complement_dict[nt] for nt in seq[cutR - max_palindrome:]][::-1]) else: seq = seq[:len(seq) - cutR + max_palindrome] return seq
Cut genomic sequence from the right. Parameters ---------- seq : str Nucleotide sequence to be cut from the right cutR : int cutR - max_palindrome = how many nucleotides to cut from the right. Negative cutR implies complementary palindromic insertions. max_palindrome : int Length of the maximum palindromic insertion. Returns ------- seq : str Nucleotide sequence after being cut from the right Examples -------- >>> cutR_seq('TGCGCCAGCAGTGAGTC', 0, 4) 'TGCGCCAGCAGTGAGTCGACT' >>> cutR_seq('TGCGCCAGCAGTGAGTC', 8, 4) 'TGCGCCAGCAGTG'
2,915
def to_tgt(self): enc_part = EncryptedData({: 1, : b}) tgt_rep = {} tgt_rep[] = krb5_pvno tgt_rep[] = MESSAGE_TYPE.KRB_AS_REP.value tgt_rep[] = self.server.realm.to_string() tgt_rep[] = self.client.to_asn1()[0] tgt_rep[] = Ticket.load(self.ticket.to_asn1()).native tgt_rep[] = enc_part.native t = EncryptionKey(self.key.to_asn1()).native return tgt_rep, t
Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format
2,916
def from_string(cls, link): ma = cls._pattern.search(link) if ma is None: raise ValueError(link) id = ma.group() return cls(id)
Return a new SheetUrl instance from parsed URL string. >>> SheetUrl.from_string('https://docs.google.com/spreadsheets/d/spam') <SheetUrl id='spam' gid=0>
2,917
def run_with(self, inputs, options): self._inputs = inputs self._options = options
Store the run parameters (inputs and options)
2,918
def from_json(cls, data): required_keys = (, , , , , , ) for key in required_keys: assert key in data, .format(key) return cls(data[], data[], Location.from_json(data[]), DryBulbCondition.from_json(data[]), HumidityCondition.from_json(data[]), WindCondition.from_json(data[]), SkyCondition.from_json(data[]))
Create a Design Day from a dictionary. Args: data = { "name": string, "day_type": string, "location": ladybug Location schema, "dry_bulb_condition": ladybug DryBulbCondition schema, "humidity_condition": ladybug HumidityCondition schema, "wind_condition": ladybug WindCondition schema, "sky_condition": ladybug SkyCondition schema}
2,919
def do_b0(self, line): self.application.apply_update(opendnp3.Binary(False), index=6)
Send the Master a BinaryInput (group 2) value of False at index 6. Command syntax is: b0
2,920
def get_base(vpc, **conn): base_result = describe_vpcs(VpcIds=[vpc["id"]], **conn)[0] vpc_name = None for t in base_result.get("Tags", []): if t["Key"] == "Name": vpc_name = t["Value"] dhcp_opts = None if base_result.get("DhcpOptionsId"): dhcp_opts = describe_dhcp_options(DhcpOptionsIds=[base_result["DhcpOptionsId"]], **conn)[0]["DhcpOptionsId"] attributes = {} attr_vals = [ ("EnableDnsHostnames", "enableDnsHostnames"), ("EnableDnsSupport", "enableDnsSupport") ] for attr, query in attr_vals: attributes[attr] = describe_vpc_attribute(VpcId=vpc["id"], Attribute=query, **conn)[attr] vpc.update({ : vpc_name, : conn["region"], : base_result.get("Tags", []), : base_result["IsDefault"], : base_result["InstanceTenancy"], : dhcp_opts, : base_result["CidrBlock"], : base_result.get("CidrBlockAssociationSet", []), : base_result.get("Ipv6CidrBlockAssociationSet", []), : attributes, : 1 }) return vpc
The base will return: - ARN - Region - Name - Id - Tags - IsDefault - InstanceTenancy - CidrBlock - CidrBlockAssociationSet - Ipv6CidrBlockAssociationSet - DhcpOptionsId - Attributes - _version :param bucket_name: :param conn: :return:
2,921
def attributes(self): if self._attributes is None: self._filters, self._attributes = self._fetch_configuration() return self._attributes
List of attributes available for the dataset (cached).
2,922
def _fw_rule_create(self, drvr_name, data, cache): tenant_id = data.get().get() fw_rule = data.get() rule = self._fw_rule_decode_store(data) fw_pol_id = fw_rule.get() rule_id = fw_rule.get() if tenant_id not in self.fwid_attr: self.fwid_attr[tenant_id] = FwMapAttr(tenant_id) self.fwid_attr[tenant_id].store_rule(rule_id, rule) if not cache: self._check_create_fw(tenant_id, drvr_name) self.tenant_db.store_rule_tenant(rule_id, tenant_id) if fw_pol_id is not None and not ( self.fwid_attr[tenant_id].is_policy_present(fw_pol_id)): pol_data = self.os_helper.get_fw_policy(fw_pol_id) if pol_data is not None: self.fw_policy_create(pol_data, cache=cache)
Firewall Rule create routine. This function updates its local cache with rule parameters. It checks if local cache has information about the Policy associated with the rule. If not, it means a restart has happened. It retrieves the policy associated with the FW by calling Openstack API's and calls t he policy create internal routine.
2,923
def execute_function(function_request): dispatch_table = getattr(settings, , None) if dispatch_table is None: raise BeanstalkDispatchError() for key in (FUNCTION, ARGS, KWARGS): if key not in function_request.keys(): raise BeanstalkDispatchError( .format(key)) function_path = dispatch_table.get( function_request[FUNCTION], ) if function_path: runnable = locate(function_path) if not runnable: raise BeanstalkDispatchError( .format(function_path)) args = function_request[ARGS] kwargs = function_request[KWARGS] if inspect.isclass(runnable): if issubclass(runnable, SafeTask): task = runnable() else: raise BeanstalkDispatchError( .format( function_request[FUNCTION])) else: task = SafeTask() task.run = runnable task.process(*args, **kwargs) else: raise BeanstalkDispatchError( .format( function_request[FUNCTION]))
Given a request created by `beanstalk_dispatch.common.create_request_body`, executes the request. This function is to be run on a beanstalk worker.
2,924
def _create_alignment_button(self): iconnames = ["AlignTop", "AlignCenter", "AlignBottom"] bmplist = [icons[iconname] for iconname in iconnames] self.alignment_tb = _widgets.BitmapToggleButton(self, bmplist) self.alignment_tb.SetToolTipString(_(u"Alignment")) self.Bind(wx.EVT_BUTTON, self.OnAlignment, self.alignment_tb) self.AddControl(self.alignment_tb)
Creates vertical alignment button
2,925
def heartbeat(self): try: with create_session() as session: job = session.query(BaseJob).filter_by(id=self.id).one() make_transient(job) session.commit() if job.state == State.SHUTDOWN: self.kill() is_unit_test = conf.getboolean(, ) if not is_unit_test: sleep_for = 0 if job.latest_heartbeat: seconds_remaining = self.heartrate - \ (timezone.utcnow() - job.latest_heartbeat)\ .total_seconds() sleep_for = max(0, seconds_remaining) sleep(sleep_for) with create_session() as session: job = session.query(BaseJob).filter(BaseJob.id == self.id).first() job.latest_heartbeat = timezone.utcnow() session.merge(job) session.commit() self.heartbeat_callback(session=session) self.log.debug() except OperationalError as e: self.log.error("Scheduler heartbeat got an exception: %s", str(e))
Heartbeats update the job's entry in the database with a timestamp for the latest_heartbeat and allows for the job to be killed externally. This allows at the system level to monitor what is actually active. For instance, an old heartbeat for SchedulerJob would mean something is wrong. This also allows for any job to be killed externally, regardless of who is running it or on which machine it is running. Note that if your heartbeat is set to 60 seconds and you call this method after 10 seconds of processing since the last heartbeat, it will sleep 50 seconds to complete the 60 seconds and keep a steady heart rate. If you go over 60 seconds before calling it, it won't sleep at all.
2,926
def write(self, data): proxy.state(self).digest.update(data) return proxy.original(self).write(data)
Intercepted method for writing data. :param data: Data to write :returns: Whatever the original method returns :raises: Whatever the original method raises This method updates the internal digest object with with the new data and then proceeds to call the original write method.
2,927
def read(self): writes = [c for c in connections if c.pending()] try: readable, writable, exceptable = select.select( connections, writes, connections, self._timeout) except exceptions.ConnectionClosedException: logger.exception() return [] except select.error: logger.exception() return [] if not (readable or writable or exceptable): logger.debug() return [] responses = [] if (isinstance(res, Response) and res.data == HEARTBEAT): logger.info(, conn) conn.nop() logger.debug() self.last_recv_timestamp = time.time() continue elif isinstance(res, Error): nonfatal = ( exceptions.FinFailedException, exceptions.ReqFailedException, exceptions.TouchFailedException ) if not isinstance(res.exception(), nonfatal): self.close_connection(conn) for conn in exceptable: self.close_connection(conn) return responses
Read from any of the connections that need it
2,928
def remove_tag(self, tag): tags = self.get_tags() tags.remove(tag) post_data = TAGS_TEMPLATE.format(connectware_id=self.get_connectware_id(), tags=escape(",".join(tags))) self._conn.put(, post_data) self._device_json = None
Remove tag from existing device tags :param tag: the tag to be removed from the list :raises ValueError: If tag does not exist in list
2,929
def write_info(dirs, parallel, config): if parallel["type"] in ["ipython"] and not parallel.get("run_local"): out_file = _get_cache_file(dirs, parallel) if not utils.file_exists(out_file): sys_config = copy.deepcopy(config) minfos = _get_machine_info(parallel, sys_config, dirs, config) with open(out_file, "w") as out_handle: yaml.safe_dump(minfos, out_handle, default_flow_style=False, allow_unicode=False)
Write cluster or local filesystem resources, spinning up cluster if not present.
2,930
def set_up(self): self.path.profile = self.path.gen.joinpath("profile") if not self.path.profile.exists(): self.path.profile.mkdir() self.python = hitchpylibrarytoolkit.project_build( "strictyaml", self.path, self.given["python version"], {"ruamel.yaml": self.given["ruamel version"]}, ).bin.python self.example_py_code = ( ExamplePythonCode(self.python, self.path.gen) .with_code(self.given.get("code", "")) .with_setup_code( self.given.get("setup", "") ) .with_terminal_size(160, 100) .with_strings( yaml_snippet_1=self.given.get("yaml_snippet_1"), yaml_snippet=self.given.get("yaml_snippet"), yaml_snippet_2=self.given.get("yaml_snippet_2"), modified_yaml_snippet=self.given.get("modified_yaml_snippet"), ) )
Set up your applications and the test environment.
2,931
def check_aggregations_privacy(self, aggregations_params): fields = self.get_aggregations_fields(aggregations_params) fields_dict = dictset.fromkeys(fields) fields_dict[] = self.view.Model.__name__ try: validate_data_privacy(self.view.request, fields_dict) except wrappers.ValidationError as ex: raise JHTTPForbidden( .format(ex))
Check per-field privacy rules in aggregations. Privacy is checked by making sure user has access to the fields used in aggregations.
2,932
def Maybe(validator): @wraps(Maybe) def built(value): if value != None: return validator(value) return built
Wraps the given validator callable, only using it for the given value if it is not ``None``.
2,933
def pif_list(call=None): if call != : raise SaltCloudSystemExit( ) ret = {} session = _get_session() pifs = session.xenapi.PIF.get_all() for pif in pifs: record = session.xenapi.PIF.get_record(pif) ret[record[]] = record return ret
Get a list of Resource Pools .. code-block:: bash salt-cloud -f pool_list myxen
2,934
def all(self, *, collection, attribute, word, func=None, operation=None): return self.iterable(, collection=collection, attribute=attribute, word=word, func=func, operation=operation)
Performs a filter with the OData 'all' keyword on the collection For example: q.any(collection='email_addresses', attribute='address', operation='eq', word='[email protected]') will transform to a filter such as: emailAddresses/all(a:a/address eq '[email protected]') :param str collection: the collection to apply the any keyword on :param str attribute: the attribute of the collection to check :param str word: the word to check :param str func: the logical function to apply to the attribute inside the collection :param str operation: the logical operation to apply to the attribute inside the collection :rtype: Query
2,935
def get_new_connection(self, connection_params): name = connection_params.pop() es = connection_params.pop() connection_params[] = OrderedDict if self.client_connection is not None: self.client_connection.close() self.client_connection = Database.connect(**connection_params) database = self.client_connection[name] self.djongo_connection = DjongoClient(database, es) return self.client_connection[name]
Receives a dictionary connection_params to setup a connection to the database. Dictionary correct setup is made through the get_connection_params method. TODO: This needs to be made more generic to accept other MongoClient parameters.
2,936
def is_tp(self, atol=None, rtol=None): choi = _to_choi(self.rep, self._data, *self.dim) return self._is_tp_helper(choi, atol, rtol)
Test if a channel is completely-positive (CP)
2,937
def density(self, *args): M = self.mass(*args) * MSUN V = 4./3 * np.pi * (self.radius(*args) * RSUN)**3 return M/V
Mean density in g/cc
2,938
def load_manual_sequence_file(self, ident, seq_file, copy_file=False, outdir=None, set_as_representative=False): if copy_file: if not outdir: outdir = self.sequence_dir if not outdir: raise ValueError() shutil.copy(seq_file, outdir) seq_file = op.join(outdir, seq_file) manual_sequence = SeqProp(id=ident, sequence_path=seq_file, seq=None) self.sequences.append(manual_sequence) if set_as_representative: self.representative_sequence = manual_sequence return self.sequences.get_by_id(ident)
Load a manual sequence, given as a FASTA file and optionally set it as the representative sequence. Also store it in the sequences attribute. Args: ident (str): Sequence ID seq_file (str): Path to sequence FASTA file copy_file (bool): If the FASTA file should be copied to the protein's sequences folder or the ``outdir``, if protein folder has not been set outdir (str): Path to output directory set_as_representative (bool): If this sequence should be set as the representative one Returns: SeqProp: Sequence that was loaded into the ``sequences`` attribute
2,939
def kong_61_2007(): r dlf = DigitalFilter(, ) dlf.base = np.array([ 2.3517745856009100e-02, 2.6649097336355482e-02, 3.0197383422318501e-02, 3.4218118311666032e-02, 3.8774207831722009e-02, 4.3936933623407420e-02, 4.9787068367863938e-02, 5.6416139503777350e-02, 6.3927861206707570e-02, 7.2439757034251456e-02, 8.2084998623898800e-02, 9.3014489210663506e-02, 1.0539922456186430e-01, 1.1943296826671961e-01, 1.3533528323661270e-01, 1.5335496684492850e-01, 1.7377394345044520e-01, 1.9691167520419400e-01, 2.2313016014842979e-01, 2.5283959580474641e-01, 2.8650479686019009e-01, 3.2465246735834979e-01, 3.6787944117144239e-01, 4.1686201967850839e-01, 4.7236655274101469e-01, 5.3526142851899028e-01, 6.0653065971263342e-01, 6.8728927879097224e-01, 7.7880078307140488e-01, 8.8249690258459546e-01, 1.0000000000000000e+00, 1.1331484530668261e+00, 1.2840254166877421e+00, 1.4549914146182010e+00, 1.6487212707001280e+00, 1.8682459574322221e+00, 2.1170000166126748e+00, 2.3988752939670981e+00, 2.7182818284590451e+00, 3.0802168489180310e+00, 3.4903429574618419e+00, 3.9550767229205772e+00, 4.4816890703380636e+00, 5.0784190371800806e+00, 5.7546026760057307e+00, 6.5208191203301116e+00, 7.3890560989306504e+00, 8.3728974881272649e+00, 9.4877358363585262e+00, 1.0751013186076360e+01, 1.2182493960703470e+01, 1.3804574186067100e+01, 1.5642631884188170e+01, 1.7725424121461639e+01, 2.0085536923187671e+01, 2.2759895093526730e+01, 2.5790339917193059e+01, 2.9224283781234941e+01, 3.3115451958692312e+01, 3.7524723159601002e+01, 4.2521082000062783e+01]) dlf.factor = np.array([1.1331484530668261]) dlf.j0 = np.array([ 1.4463210615326699e+02, -1.1066222143752420e+03, 3.7030010025325978e+03, -6.8968188464424520e+03, 7.1663544112656937e+03, -2.4507884783377681e+03, -4.0166567754046082e+03, 6.8623845298546094e+03, -5.0013321011775661e+03, 2.1291291365196648e+03, -1.3845222435542289e+03, 2.1661554291595580e+03, -2.2260393789657141e+03, 8.0317156013986391e+02, 1.0142221718890841e+03, -1.9350455051432630e+03, 1.6601169447226580e+03, -7.5159684285420133e+02, -9.0315984178183285e+01, 5.0705574889546148e+02, -5.1207646422722519e+02, 2.9722959494490038e+02, -5.0248319908072993e+01, -1.2290725861955920e+02, 1.9695244755899429e+02, -1.9175679966946601e+02, 1.4211755630338590e+02, -7.7463216543224149e+01, 1.7638009334931201e+01, 2.8855056499202671e+01, -5.9225643887809561e+01, 7.5987941373668960e+01, -8.1687962781233580e+01, 8.0599209238447102e+01, -7.4895905328771619e+01, 6.7516291538794434e+01, -5.9325033647358048e+01, 5.1617042242841528e+01, -4.4664967446820263e+01, 3.8366152052928278e+01, -3.3308787868993100e+01, 2.8278671651033459e+01, -2.4505863388620480e+01, 2.0469632532079750e+01, -1.7074034940700429e+01, 1.4206119215530070e+01, -1.0904435643084650e+01, 8.7518389425802283e+00, -6.7721665239085622e+00, 4.5096884588095891e+00, -3.2704247166629590e+00, 2.6827195063720430e+00, -1.8406031821386459e+00, 9.1586697140412443e-01, -3.2436011485890798e-01, 8.0675176189581893e-02, -1.2881307195759690e-02, 7.0489137468452920e-04, 2.3846917590855061e-04, -6.9102205995825531e-05, 6.7792635718095777e-06]) dlf.j1 = np.array([ 4.6440396425864918e+01, -4.5034239857914162e+02, 1.7723440076223640e+03, -3.7559735516994660e+03, 4.4736494009764137e+03, -2.2476603569606068e+03, -1.5219842155931799e+03, 3.4904608559273802e+03, -2.4814243247472318e+03, 5.7328164634108396e+02, 5.3132044837659631e-01, 6.8895205008006235e+02, -1.2012013872160269e+03, 7.9679138423597340e+02, 4.9874460187939818e+01, -5.6367338332457007e+02, 4.7971936503711203e+02, -5.8979702298044558e+01, -3.1935800954986922e+02, 4.5762551999442371e+02, -3.7239927283248380e+02, 1.8255852885279569e+02, -2.3504740340815669e-01, -1.1588151583545380e+02, 1.5740956677133170e+02, -1.4334746114883359e+02, 9.9857411013284818e+01, -4.8246322019171487e+01, 2.0371404343057380e+00, 3.3003938094974323e+01, -5.5476151884197712e+01, 6.7354852323852583e+01, -7.0735403363284121e+01, 6.8872932663164747e+01, -6.3272750944993042e+01, 5.6501568721817442e+01, -4.8706577819918110e+01, 4.1737211284663481e+01, -3.4776621242200903e+01, 2.9161717578906430e+01, -2.3886749056000909e+01, 1.9554007583544220e+01, -1.5966397353366460e+01, 1.2429310210239199e+01, -1.0139180791868180e+01, 7.4716493393871861e+00, -5.5509479014742613e+00, 4.3380799768234208e+00, -2.5911516181746550e+00, 1.6300524630626780e+00, -1.4041567266387460e+00, 7.5225141726873213e-01, 4.6808777208492733e-02, -3.6630197849601159e-01, 2.8948389902792782e-01, -1.3705521898064801e-01, 4.6292091649913013e-02, -1.1721281347435180e-02, 2.2002397354029149e-03, -2.8146036357227600e-04, 1.8788896009128770e-05]) return dlf
r"""Kong 61 pt Hankel filter, as published in [Kong07]_. Taken from file ``FilterModules.f90`` provided with 1DCSEM_. License: `Apache License, Version 2.0, <http://www.apache.org/licenses/LICENSE-2.0>`_.
2,940
def feed_ssldata(self, data): if self._state == self.S_UNWRAPPED: return ([], [data] if data else []) ssldata = []; appdata = [] self._need_ssldata = False if data: self._incoming.write(data) try: if self._state == self.S_DO_HANDSHAKE: self._sslobj.unwrap() self._sslobj = None self._state = self.S_UNWRAPPED if self._shutdown_cb: self._shutdown_cb() if self._state == self.S_UNWRAPPED: appdata.append(self._incoming.read()) except (ssl.SSLError, sslcompat.CertificateError) as e: if getattr(e, , None) not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE, ssl.SSL_ERROR_SYSCALL): if self._state == self.S_DO_HANDSHAKE and self._handshake_cb: self._handshake_cb(e) raise self._need_ssldata = e.errno == ssl.SSL_ERROR_WANT_READ if self._outgoing.pending: ssldata.append(self._outgoing.read()) return (ssldata, appdata)
Feed SSL record level data into the pipe. The data must be a bytes instance. It is OK to send an empty bytes instance. This can be used to get ssldata for a handshake initiated by this endpoint. Return a (ssldata, appdata) tuple. The ssldata element is a list of buffers containing SSL data that needs to be sent to the remote SSL. The appdata element is a list of buffers containing plaintext data that needs to be forwarded to the application. The appdata list may contain an empty buffer indicating an SSL "close_notify" alert. This alert must be acknowledged by calling :meth:`shutdown`.
2,941
def build_from_generator(cls, generator, target_size, max_subtoken_length=None, reserved_tokens=None): token_counts = collections.defaultdict(int) for item in generator: for tok in tokenizer.encode(native_to_unicode(item)): token_counts[tok] += 1 encoder = cls.build_to_target_size( target_size, token_counts, 1, 1e3, max_subtoken_length=max_subtoken_length, reserved_tokens=reserved_tokens) return encoder
Builds a SubwordTextEncoder from the generated text. Args: generator: yields text. target_size: int, approximate vocabulary size to create. max_subtoken_length: Maximum length of a subtoken. If this is not set, then the runtime and memory use of creating the vocab is quadratic in the length of the longest token. If this is set, then it is instead O(max_subtoken_length * length of longest token). reserved_tokens: List of reserved tokens. The global variable `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this argument is `None`, it will use `RESERVED_TOKENS`. Returns: SubwordTextEncoder with `vocab_size` approximately `target_size`.
2,942
async def _get_difference(self, channel_id, pts_date): self.client._log[__name__].debug() if channel_id: try: where = await self.client.get_input_entity(channel_id) except ValueError: return result = await self.client(functions.updates.GetChannelDifferenceRequest( channel=where, filter=types.ChannelMessagesFilterEmpty(), pts=pts_date, limit=100, force=True )) else: result = await self.client(functions.updates.GetDifferenceRequest( pts=pts_date[0], date=pts_date[1], qts=0 )) if isinstance(result, (types.updates.Difference, types.updates.DifferenceSlice, types.updates.ChannelDifference, types.updates.ChannelDifferenceTooLong)): self.original_update._entities.update({ utils.get_peer_id(x): x for x in itertools.chain(result.users, result.chats) }) if not self._load_entities(): self.client._log[__name__].info( , getattr(self.original_update, , None) )
Get the difference for this `channel_id` if any, then load entities. Calls :tl:`updates.getDifference`, which fills the entities cache (always done by `__call__`) and lets us know about the full entities.
2,943
def benchmark(self, func, gpu_args, instance, times, verbose): logging.debug( + instance.name) logging.debug(, *instance.threads) logging.debug(, *instance.grid) time = None try: time = self.dev.benchmark(func, gpu_args, instance.threads, instance.grid, times) except Exception as e: skippable_exceptions = ["too many resources requested for launch", "OUT_OF_RESOURCES", "INVALID_WORK_GROUP_SIZE"] if any([skip_str in str(e) for skip_str in skippable_exceptions]): logging.debug() if verbose: print("skipping config", instance.name, "reason: too many resources requested for launch") else: logging.debug( + str(e)) print("Error while benchmarking:", instance.name) raise e return time
benchmark the kernel instance
2,944
def rollback(self, dt): if not self.onOffset(dt): businesshours = self._get_business_hours_by_sec if self.n >= 0: dt = self._prev_opening_time( dt) + timedelta(seconds=businesshours) else: dt = self._next_opening_time( dt) + timedelta(seconds=businesshours) return dt
Roll provided date backward to next offset only if not on offset.
2,945
def rsa_private_key_pkcs1_to_pkcs8(pkcs1_key): algorithm = RsaAlgorithmIdentifier() algorithm["rsaEncryption"] = RSA_ENCRYPTION_ASN1_OID pkcs8_key = PKCS8PrivateKey() pkcs8_key["version"] = 0 pkcs8_key["privateKeyAlgorithm"] = algorithm pkcs8_key["privateKey"] = pkcs1_key return encoder.encode(pkcs8_key)
Convert a PKCS1-encoded RSA private key to PKCS8.
2,946
def _load_data_alignment(self, chain1, chain2): parser = PDB.PDBParser(QUIET=True) ppb = PDB.PPBuilder() structure1 = parser.get_structure(chain1, self.pdb1) structure2 = parser.get_structure(chain2, self.pdb2) seq1 = str(ppb.build_peptides(structure1)[0].get_sequence()) seq2 = str(ppb.build_peptides(structure2)[0].get_sequence()) align = pairwise2.align.globalms(seq1, seq2, 2, -1, -0.5, -0.1)[0] indexes = set(i for i, (s1, s2) in enumerate(zip(align[0], align[1])) if s1 != and s2 != ) coord1 = np.hstack([np.concatenate((r[].get_coord(), (1,)))[:, None] for i, r in enumerate(structure1.get_residues()) if i in indexes and in r]).astype(DTYPE, copy=False) coord2 = np.hstack([np.concatenate((r[].get_coord(), (1,)))[:, None] for i, r in enumerate(structure2.get_residues()) if i in indexes and in r]).astype(DTYPE, copy=False) self.coord1 = coord1 self.coord2 = coord2 self.N = len(seq1)
Extract the sequences from the PDB file, perform the alignment, and load the coordinates of the CA of the common residues.
2,947
def export_coreml(self, filename): import coremltools from coremltools.proto.FeatureTypes_pb2 import ArrayFeatureType from .._mxnet import _mxnet_utils prob_name = self.target + def get_custom_model_spec(): from coremltools.models.neural_network import NeuralNetworkBuilder from coremltools.models.datatypes import Array, Dictionary, String input_name = input_length = self._feature_extractor.output_length builder = NeuralNetworkBuilder([(input_name, Array(input_length,))], [(prob_name, Dictionary(String))], ) ctx = _mxnet_utils.get_mxnet_context()[0] input_name, output_name = input_name, 0 for i, cur_layer in enumerate(self._custom_classifier): W = cur_layer.weight.data(ctx).asnumpy() nC, nB = W.shape Wb = cur_layer.bias.data(ctx).asnumpy() builder.add_inner_product(name="inner_product_"+str(i), W=W, b=Wb, input_channels=nB, output_channels=nC, has_bias=True, input_name=str(input_name), output_name=+str(output_name)) if cur_layer.act: builder.add_activation("activation"+str(i), , +str(output_name), str(output_name)) input_name = i output_name = i + 1 last_output = builder.spec.neuralNetworkClassifier.layers[-1].output[0] builder.add_softmax(, last_output, self.target) builder.set_class_labels(self.classes) builder.set_input([input_name], [(input_length,)]) builder.set_output([self.target], [(self.num_classes,)]) return builder.spec top_level_spec = coremltools.proto.Model_pb2.Model() top_level_spec.specificationVersion = 3 desc = top_level_spec.description input = desc.input.add() input.name = self.feature input.type.multiArrayType.dataType = ArrayFeatureType.ArrayDataType.Value() input.type.multiArrayType.shape.append(15600) prob_output = desc.output.add() prob_output.name = prob_name label_output = desc.output.add() label_output.name = desc.predictedFeatureName = desc.predictedProbabilitiesName = prob_name if type(self.classes[0]) == int: prob_output.type.dictionaryType.int64KeyType.MergeFromString(b) label_output.type.int64Type.MergeFromString(b) else: prob_output.type.dictionaryType.stringKeyType.MergeFromString(b) label_output.type.stringType.MergeFromString(b) pipeline = top_level_spec.pipelineClassifier.pipeline preprocessing_model = pipeline.models.add() preprocessing_model.customModel.className = preprocessing_model.specificationVersion = 3 preprocessing_input = preprocessing_model.description.input.add() preprocessing_input.CopyFrom(input) preprocessed_output = preprocessing_model.description.output.add() preprocessed_output.name = preprocessed_output.type.multiArrayType.dataType = ArrayFeatureType.ArrayDataType.Value() preprocessed_output.type.multiArrayType.shape.append(1) preprocessed_output.type.multiArrayType.shape.append(96) preprocessed_output.type.multiArrayType.shape.append(64) feature_extractor_spec = self._feature_extractor.get_spec() pipeline.models.add().CopyFrom(feature_extractor_spec) pipeline.models[-1].description.input[0].name = preprocessed_output.name pipeline.models[-1].neuralNetwork.layers[0].input[0] = preprocessed_output.name pipeline.models.add().CopyFrom(get_custom_model_spec()) prob_output_type = pipeline.models[-1].description.output[0].type.dictionaryType if type(self.classes[0]) == int: prob_output_type.int64KeyType.MergeFromString(b) else: prob_output_type.stringKeyType.MergeFromString(b) mlmodel = coremltools.models.MLModel(top_level_spec) mlmodel.save(filename)
Save the model in Core ML format. See Also -------- save Examples -------- >>> model.export_coreml('./myModel.mlmodel')
2,948
def splitSymbol(self, index): row = [0,0,1,1,2,2,1,3,2,3,3][index>>6] col = [0,1,0,1,0,1,2,0,2,1,2][index>>6] insertLengthCode = row<<3 | index>>3&7 if row: insertLengthCode -= 8 copyLengthCode = col<<3 | index&7 return ( Symbol(self.insertLengthAlphabet, insertLengthCode), Symbol(self.copyLengthAlphabet, copyLengthCode), row==0 )
Give relevant values for computations: (insertSymbol, copySymbol, dist0flag)
2,949
def affiliation_history(self): affs = self._json.get(, {}).get() try: return [d[] for d in affs] except TypeError: return None
Unordered list of IDs of all affiliations the author was affiliated with acccording to Scopus.
2,950
def import_crud(app): try: app_path = import_module(app).__path__ except (AttributeError, ImportError): return None try: imp.find_module(, app_path) except ImportError: return None module = import_module("%s.crud" % app) return module
Import crud module and register all model cruds which it contains
2,951
def to_url(self): base_url = urlparse(self.url) if PY3: query = parse_qs(base_url.query) for k, v in self.items(): query.setdefault(k, []).append(to_utf8_optional_iterator(v)) scheme = base_url.scheme netloc = base_url.netloc path = base_url.path params = base_url.params fragment = base_url.fragment else: query = parse_qs(to_utf8(base_url.query)) for k, v in self.items(): query.setdefault(to_utf8(k), []).append(to_utf8_optional_iterator(v)) scheme = to_utf8(base_url.scheme) netloc = to_utf8(base_url.netloc) path = to_utf8(base_url.path) params = to_utf8(base_url.params) fragment = to_utf8(base_url.fragment) url = (scheme, netloc, path, params, urlencode(query, True), fragment) return urlunparse(url)
Serialize as a URL for a GET request.
2,952
def get_activities_for_project(self, module=None, **kwargs): _module_id = kwargs.get(, module) _activities_url = ACTIVITIES_URL.format(module_id=_module_id) return self._request_api(url=_activities_url).json()
Get the related activities of a project. :param str module: Stages of a given module :return: JSON
2,953
def _options_request(self, url, **kwargs): request_kwargs = { : , : url } for key, value in kwargs.items(): request_kwargs[key] = value return self._request(**request_kwargs)
a method to catch and report http options request connectivity errors
2,954
def pathconf(path, os_name=os.name, isdir_fnc=os.path.isdir, pathconf_fnc=getattr(os, , None), pathconf_names=getattr(os, , ())): if pathconf_fnc and pathconf_names: return {key: pathconf_fnc(path, key) for key in pathconf_names} if os_name == : maxpath = 246 if isdir_fnc(path) else 259 else: maxpath = 255 return { : maxpath, : maxpath - len(path), }
Get all pathconf variables for given path. :param path: absolute fs path :type path: str :returns: dictionary containing pathconf keys and their values (both str) :rtype: dict
2,955
def add_instruction(self, specification): instruction = self.as_instruction(specification) self._type_to_instruction[instruction.type] = instruction
Add an instruction specification :param specification: a specification with a key :data:`knittingpattern.Instruction.TYPE` .. seealso:: :meth:`as_instruction`
2,956
def _gen_success_message(publish_output): application_id = publish_output.get() details = json.dumps(publish_output.get(), indent=2) if CREATE_APPLICATION in publish_output.get(): return "Created new application with the following metadata:\n{}".format(details) return .format(application_id, details)
Generate detailed success message for published applications. Parameters ---------- publish_output : dict Output from serverlessrepo publish_application Returns ------- str Detailed success message
2,957
def dfbool2intervals(df,colbool): df.index=range(len(df)) intervals=bools2intervals(df[colbool]) for intervali,interval in enumerate(intervals): df.loc[interval[0]:interval[1],f]=intervali df.loc[interval[0]:interval[1],f]=interval[0] df.loc[interval[0]:interval[1],f]=interval[1] df.loc[interval[0]:interval[1],f]=interval[1]-interval[0]+1 df.loc[interval[0]:interval[1],f]=range(interval[1]-interval[0]+1) df[f]=df.index return df
ds contains bool values
2,958
def get_instance(self, payload): return AuthTypeCallsInstance( self._version, payload, account_sid=self._solution[], domain_sid=self._solution[], )
Build an instance of AuthTypeCallsInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.AuthTypeCallsInstance :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.AuthTypeCallsInstance
2,959
def evaluate(self, global_state: GlobalState, post=False) -> List[GlobalState]: log.debug("Evaluating {}".format(self.op_code)) op = self.op_code.lower() if self.op_code.startswith("PUSH"): op = "push" elif self.op_code.startswith("DUP"): op = "dup" elif self.op_code.startswith("SWAP"): op = "swap" elif self.op_code.startswith("LOG"): op = "log" instruction_mutator = ( getattr(self, op + "_", None) if not post else getattr(self, op + "_" + "post", None) ) if instruction_mutator is None: raise NotImplementedError if self.iprof is None: result = instruction_mutator(global_state) else: start_time = datetime.now() result = instruction_mutator(global_state) end_time = datetime.now() self.iprof.record(op, start_time, end_time) return result
Performs the mutation for this instruction. :param global_state: :param post: :return:
2,960
def safe_version(version): try: return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: version = version.replace(, ) return re.sub(, , version)
Convert an arbitrary string to a standard version string
2,961
def dump_to_stream(self, cnf, stream, **opts): tree = container_to_etree(cnf, **opts) etree_write(tree, stream)
:param cnf: Configuration data to dump :param stream: Config file or file like object write to :param opts: optional keyword parameters
2,962
def save_split_next(self): filenames = [] width = int(math.ceil(math.log(len(self), 10))) + 1 i = 1 blurb = Blurbs() while self: metadata, body = self.pop() metadata[] = str(i).rjust(width, ) if in metadata: del metadata[] blurb.append((metadata, body)) filename = blurb._extract_next_filename() blurb.save(filename) blurb.clear() filenames.append(filename) i += 1 return filenames
Save out blurbs created from "blurb split". They don't have dates, so we have to get creative.
2,963
def destroy(self): if self == App._main_app: App._main_app = None self.tk.destroy()
Destroy and close the App. :return: None. :note: Once destroyed an App can no longer be used.
2,964
def get_electron_number(self, charge=0): atomic_number = constants.elements[].to_dict() return sum([atomic_number[atom] for atom in self[]]) - charge
Return the number of electrons. Args: charge (int): Charge of the molecule. Returns: int:
2,965
def measure_impedance(self, sampling_window_ms, n_sampling_windows, delay_between_windows_ms, interleave_samples, rms, state): state_ = uint8_tVector() for i in range(0, len(state)): state_.append(int(state[i])) buffer = np.array(Base.measure_impedance(self, sampling_window_ms, n_sampling_windows, delay_between_windows_ms, interleave_samples, rms, state_)) return self.measure_impedance_buffer_to_feedback_result(buffer)
Measure voltage across load of each of the following control board feedback circuits: - Reference _(i.e., attenuated high-voltage amplifier output)_. - Load _(i.e., voltage across DMF device)_. The measured voltage _(i.e., ``V2``)_ can be used to compute the impedance of the measured load, the input voltage _(i.e., ``V1``)_, etc. Parameters ---------- sampling_window_ms : float Length of sampling window (in milleseconds) for each RMS/peak-to-peak voltage measurement. n_sampling_windows : int Number of RMS/peak-to-peak voltage measurements to take. delay_between_windows_ms : float Delay (in milleseconds) between RMS/peak-to-peak voltage measurements. interleave_samples : bool If ``True``, interleave RMS/peak-to-peak measurements for analog channels. For example, ``[<i_0>, <j_0>, <i_1>, <j_1>, ..., <i_n>, <j_n>]`` where ``i`` and ``j`` correspond to two different analog channels. If ``False``, all measurements for each analog channel are taken together. For example, ``[<i_0>, ..., <i_n>, <j_0>, ..., <j_n>]`` where ``i`` and ``j`` correspond to two different analog channels. rms : bool If ``True``, a RMS voltage measurement is collected for each sampling window. Otherwise, peak-to-peak measurements are collected. state : list State of device channels. Length should be equal to the number of device channels. Returns ------- :class:`FeedbackResults`
2,966
def _make_index_list(num_samples, num_params, num_groups=None): if num_groups is None: num_groups = num_params index_list = [] for j in range(num_samples): index_list.append(np.arange(num_groups + 1) + j * (num_groups + 1)) return index_list
Identify indices of input sample associated with each trajectory For each trajectory, identifies the indexes of the input sample which is a function of the number of factors/groups and the number of samples Arguments --------- num_samples : int The number of trajectories num_params : int The number of parameters num_groups : int The number of groups Returns ------- list of numpy.ndarray Example ------- >>> BruteForce()._make_index_list(num_samples=4, num_params=3, num_groups=2) [np.array([0, 1, 2]), np.array([3, 4, 5]), np.array([6, 7, 8]), np.array([9, 10, 11])]
2,967
def write_data(self, buf): result = self.devh.controlMsg( usb.ENDPOINT_OUT + usb.TYPE_CLASS + usb.RECIP_INTERFACE, usb.REQ_SET_CONFIGURATION, buf, value=0x200, timeout=50) if result != len(buf): raise IOError() return True
Send data to the device. If the write fails for any reason, an :obj:`IOError` exception is raised. :param buf: the data to send. :type buf: list(int) :return: success status. :rtype: bool
2,968
def SegmentSum(a, ids, *args): func = lambda idxs: reduce(np.add, a[idxs]) return seg_map(func, a, ids),
Segmented sum op.
2,969
def _readoct(self, length, start): if length % 3: raise InterpretError("Cannot convert to octal unambiguously - " "not multiple of 3 bits.") if not length: return end = oct(self._readuint(length, start))[LEADING_OCT_CHARS:] if end.endswith(): end = end[:-1] middle = * (length // 3 - len(end)) return middle + end
Read bits and interpret as an octal string.
2,970
def add_prefix(self): p = Prefix() if in request.json: try: if request.json[] is None or len(unicode(request.json[])) == 0: p.vrf = None else: p.vrf = VRF.get(int(request.json[])) except ValueError: return json.dumps({: 1, : "Invalid VRF ID " % request.json[]}) except NipapError, e: return json.dumps({: 1, : e.args, : type(e).__name__}) if in request.json: p.description = validate_string(request.json, ) if in request.json: p.expires = validate_string(request.json, ) if in request.json: p.comment = validate_string(request.json, ) if in request.json: p.node = validate_string(request.json, ) if in request.json: p.status = validate_string(request.json, ) if in request.json: p.type = validate_string(request.json, ) if in request.json: if request.json[] is not None: try: p.pool = Pool.get(int(request.json[])) except NipapError, e: return json.dumps({: 1, : e.args, : type(e).__name__}) if in request.json: p.country = validate_string(request.json, ) if in request.json: p.order_id = validate_string(request.json, ) if in request.json: p.customer_id = validate_string(request.json, ) if in request.json: p.alarm_priority = validate_string(request.json, ) if in request.json: p.monitor = request.json[] if in request.json: p.vlan = request.json[] if in request.json: p.tags = request.json[] if in request.json: p.avps = request.json[] args = {} if in request.json: args[] = request.json[] if in request.json: try: args[] = Pool.get(int(request.json[])) except NipapError, e: return json.dumps({: 1, : e.args, : type(e).__name__}) if in request.json: args[] = request.json[] if in request.json: args[] = request.json[] if args == {}: if in request.json: p.prefix = request.json[] try: p.save(args) except NipapError, e: return json.dumps({: 1, : e.args, : type(e).__name__}) return json.dumps(p, cls=NipapJSONEncoder)
Add prefix according to the specification. The following keys can be used: vrf ID of VRF to place the prefix in prefix the prefix to add if already known family address family (4 or 6) description A short description expires Expiry time of assignment comment Longer comment node Hostname of node type Type of prefix; reservation, assignment, host status Status of prefix; assigned, reserved, quarantine pool ID of pool country Country where the prefix is used order_id Order identifier customer_id Customer identifier vlan VLAN ID alarm_priority Alarm priority of prefix monitor If the prefix should be monitored or not from-prefix A prefix the prefix is to be allocated from from-pool A pool (ID) the prefix is to be allocated from prefix_length Prefix length of allocated prefix
2,971
def _select_list_view(self, model, **kwargs): from uliweb import request fields = kwargs.pop(, None) meta = kwargs.pop(, ) if in request.GET: if in kwargs: fields = kwargs.pop(, fields) if in kwargs: meta = kwargs.pop() else: if hasattr(model, ): meta = else: meta = meta view = functions.SelectListView(model, fields=fields, meta=meta, **kwargs) return view
:param model: :param fields_convert_map: it's different from ListView :param kwargs: :return:
2,972
def _validate_row_label(label, column_type_map): if not isinstance(label, str): raise TypeError("The row label column name must be a string.") if not label in column_type_map.keys(): raise ToolkitError("Row label column not found in the dataset.") if not column_type_map[label] in (str, int): raise TypeError("Row labels must be integers or strings.")
Validate a row label column. Parameters ---------- label : str Name of the row label column. column_type_map : dict[str, type] Dictionary mapping the name of each column in an SFrame to the type of the values in the column.
2,973
def line(ax, p1, p2, permutation=None, **kwargs): pp1 = project_point(p1, permutation=permutation) pp2 = project_point(p2, permutation=permutation) ax.add_line(Line2D((pp1[0], pp2[0]), (pp1[1], pp2[1]), **kwargs))
Draws a line on `ax` from p1 to p2. Parameters ---------- ax: Matplotlib AxesSubplot, None The subplot to draw on. p1: 2-tuple The (x,y) starting coordinates p2: 2-tuple The (x,y) ending coordinates kwargs: Any kwargs to pass through to Matplotlib.
2,974
def sliced(seq, n): return takewhile(bool, (seq[i: i + n] for i in count(0, n)))
Yield slices of length *n* from the sequence *seq*. >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) [(1, 2, 3), (4, 5, 6)] If the length of the sequence is not divisible by the requested slice length, the last slice will be shorter. >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) [(1, 2, 3), (4, 5, 6), (7, 8)] This function will only work for iterables that support slicing. For non-sliceable iterables, see :func:`chunked`.
2,975
def set_raw_tag_data(filename, data, act=True, verbose=False): "Replace the ID3 tag in FILENAME with DATA." check_tag_data(data) with open(filename, "rb+") as file: try: (cls, offset, length) = stagger.tags.detect_tag(file) except stagger.NoTagError: (offset, length) = (0, 0) if length > 0: verb(verbose, "{0}: replaced tag with {1} bytes of data" .format(filename, len(data))) else: verb(verbose, "{0}: created tag with {1} bytes of data" .format(filename, len(data))) if act: stagger.fileutil.replace_chunk(file, offset, length, data)
Replace the ID3 tag in FILENAME with DATA.
2,976
def _to_dict(self): _dict = {} if hasattr(self, ) and self.from_ is not None: _dict[] = self.from_ if hasattr(self, ) and self.to is not None: _dict[] = self.to if hasattr(self, ) and self.speaker is not None: _dict[] = self.speaker if hasattr(self, ) and self.confidence is not None: _dict[] = self.confidence if hasattr(self, ) and self.final_results is not None: _dict[] = self.final_results return _dict
Return a json dictionary representing this model.
2,977
def get_constantvalue(self): buff = self.get_attribute("ConstantValue") if buff is None: return None with unpack(buff) as up: (cval_ref, ) = up.unpack_struct(_H) return cval_ref
the constant pool index for this field, or None if this is not a contant field reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.2
2,978
def find_slack_bus(sub_network): gens = sub_network.generators() if len(gens) == 0: logger.warning("No generators in sub-network {}, better hope power is already balanced".format(sub_network.name)) sub_network.slack_generator = None sub_network.slack_bus = sub_network.buses_i()[0] else: slacks = gens[gens.control == "Slack"].index if len(slacks) == 0: sub_network.slack_generator = gens.index[0] sub_network.network.generators.loc[sub_network.slack_generator,"control"] = "Slack" logger.debug("No slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator)) elif len(slacks) == 1: sub_network.slack_generator = slacks[0] else: sub_network.slack_generator = slacks[0] sub_network.network.generators.loc[slacks[1:],"control"] = "PV" logger.debug("More than one slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator)) sub_network.slack_bus = gens.bus[sub_network.slack_generator] sub_network.network.sub_networks.at[sub_network.name,"slack_bus"] = sub_network.slack_bus logger.info("Slack bus for sub-network {} is {}".format(sub_network.name, sub_network.slack_bus))
Find the slack bus in a connected sub-network.
2,979
def writeClient(self, fd, sdClass=None, **kw): sdClass = sdClass or ServiceDescription assert issubclass(sdClass, ServiceDescription), \ print >>fd, *50 print >>fd, %self.getClientModuleName() print >>fd, print >>fd, %self.__class__ print >>fd, %.join(sys.argv) print >>fd, print >>fd, *50 self.services = [] for service in self._wsdl.services: sd = sdClass(self._addressing, do_extended=self.do_extended, wsdl=self._wsdl) if len(self._wsdl.types) > 0: sd.setTypesModuleName(self.getTypesModuleName(), self.getTypesModulePath()) self.gatherNamespaces() sd.fromWsdl(service, **kw) sd.write(fd) self.services.append(sd)
write out client module to file descriptor. Parameters and Keywords arguments: fd -- file descriptor sdClass -- service description class name imports -- list of imports readerclass -- class name of ParsedSoap reader writerclass -- class name of SoapWriter writer
2,980
def cli_help(context, command_name, general_parser, command_parsers): if command_name == : command_name = with context.io_manager.with_stdout() as stdout: if not command_name: general_parser.print_help(stdout) elif command_name in command_parsers: command_parsers[command_name].option_parser.print_help(stdout) else: raise ReturnCode( % command_name)
Outputs help information. See :py:mod:`swiftly.cli.help` for context usage information. See :py:class:`CLIHelp` for more information. :param context: The :py:class:`swiftly.cli.context.CLIContext` to use. :param command_name: The command_name to output help information for, or set to None or an empty string to output the general help information. :param general_parser: The :py:class:`swiftly.cli.optionparser.OptionParser` for general usage. :param command_parsers: A dict of (name, :py:class:`CLICommand`) for specific command usage.
2,981
def manage(cls, entity, unit_of_work): if hasattr(entity, ): if not unit_of_work is entity.__everest__.unit_of_work: raise ValueError( ) else: entity.__everest__ = cls(entity, unit_of_work)
Manages the given entity under the given Unit Of Work. If `entity` is already managed by the given Unit Of Work, nothing is done. :raises ValueError: If the given entity is already under management by a different Unit Of Work.
2,982
def make_value_from_env(self, param, value_type, function): value = os.getenv(param) if value is None: self.notify_user("Environment variable `%s` undefined" % param) return self.value_convert(value, value_type)
get environment variable
2,983
def acquire(self): start_time = time.time() while True: try: self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR) break except (OSError,) as e: if e.errno != errno.EEXIST: raise if (time.time() - start_time) >= self.timeout: raise FileLockException("%s: Timeout occured." % self.lockfile) time.sleep(self.delay) self.is_locked = True
Acquire the lock, if possible. If the lock is in use, it check again every `delay` seconds. It does this until it either gets the lock or exceeds `timeout` number of seconds, in which case it throws an exception.
2,984
def snapshot_list(self): NO_SNAPSHOTS_TAKEN = output = self._run_vagrant_command([, ]) if NO_SNAPSHOTS_TAKEN in output: return [] else: return output.splitlines()
This command will list all the snapshots taken.
2,985
def worker_bonus(self, chosen_hit, auto, amount, reason=, assignment_ids=None): if self.config.has_option(, ): reason = self.config.get(, ) while not reason: user_input = raw_input("Type the reason for the bonus. Workers " "will see this message: ") reason = user_input override_status = True if chosen_hit: override_status = False workers = self.amt_services.get_workers("Approved", chosen_hit) if not workers: print "No approved workers for HIT", chosen_hit return print , chosen_hit elif len(assignment_ids) == 1: workers = [self.amt_services.get_worker(assignment_ids[0])] if not workers: print "No submissions found for requested assignment ID" return else: workers = self.amt_services.get_workers("Approved") if not workers: print "No approved workers found." return workers = [worker for worker in workers if \ worker[] in assignment_ids] for worker in workers: assignment_id = worker[] try: init_db() part = Participant.query.\ filter(Participant.assignmentid == assignment_id).\ filter(Participant.workerid == worker[]).\ filter(Participant.endhit != None).\ one() if auto: amount = part.bonus status = part.status if amount <= 0: print "bonus amount <=$0, no bonus given for assignment", assignment_id elif status == 7 and not override_status: print "bonus already awarded for assignment", assignment_id else: success = self.amt_services.bonus_worker(assignment_id, amount, reason) if success: print "gave bonus of $" + str(amount) + " for assignment " + \ assignment_id part.status = 7 db_session.add(part) db_session.commit() db_session.remove() else: print "*** failed to bonus assignment", assignment_id except Exception as e: print e print "*** failed to bonus assignment", assignment_id
Bonus worker
2,986
def get_field_cache(self, cache_type=): if cache_type == : try: search_results = urlopen(self.get_url).read().decode() except HTTPError: return [] index_pattern = json.loads(search_results) fields_str = index_pattern[][] return json.loads(fields_str) elif cache_type == or cache_type.startswith(): search_results = urlopen(self.es_get_url).read().decode() es_mappings = json.loads(search_results) field_cache = [] for (index_name, val) in iteritems(es_mappings): if index_name != self.index: m_dict = es_mappings[index_name][] mappings = self.get_index_mappings(m_dict) field_cache.extend(mappings) field_cache = self.dedup_field_cache(field_cache) return field_cache self.pr_err("Unknown cache type: %s" % cache_type) return None
Return a list of fields' mappings
2,987
def delete_message(self, id, remove): path = {} data = {} params = {} path["id"] = id data["remove"] = remove self.logger.debug("POST /api/v1/conversations/{id}/remove_messages with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/conversations/{id}/remove_messages".format(**path), data=data, params=params, no_data=True)
Delete a message. Delete messages from this conversation. Note that this only affects this user's view of the conversation. If all messages are deleted, the conversation will be as well (equivalent to DELETE)
2,988
def rlgt(self, time=None, times=1, disallow_sibling_lgts=False): lgt = LGT(self.copy()) for _ in range(times): lgt.rlgt(time, disallow_sibling_lgts) return lgt.tree
Uses class LGT to perform random lateral gene transfer on ultrametric tree
2,989
def populate(self, priority, address, rtr, data): assert isinstance(data, bytes) self.needs_no_rtr(rtr) self.needs_data(data, 6) self.set_attributes(priority, address, rtr) self.cur = (((data[0] << 8)| data[1]) / 32 ) * 0.0625 self.min = (((data[2] << 8) | data[3]) / 32 ) * 0.0625 self.max = (((data[4] << 8) | data[5]) / 32 ) * 0.0625
data bytes (high + low) 1 + 2 = current temp 3 + 4 = min temp 5 + 6 = max temp :return: None
2,990
def construct_inlines(self): inline_formsets = [] for inline_class in self.get_inlines(): inline_instance = inline_class(self.model, self.request, self.object, self.kwargs, self) inline_formset = inline_instance.construct_formset() inline_formsets.append(inline_formset) return inline_formsets
Returns the inline formset instances
2,991
def _pcca_connected_isa(evec, n_clusters): (n, m) = evec.shape if n_clusters > m: raise ValueError("Cannot cluster the (" + str(n) + " x " + str(m) + " eigenvector matrix to " + str(n_clusters) + " clusters.") diffs = np.abs(np.max(evec, axis=0) - np.min(evec, axis=0)) assert diffs[0] < 1e-6, "First eigenvector is not constant. This indicates that the transition matrix " \ "is not connected or the eigenvectors are incorrectly sorted. Cannot do PCCA." assert diffs[1] > 1e-6, "An eigenvector after the first one is constant. " \ "Probably the eigenvectors are incorrectly sorted. Cannot do PCCA." c = evec[:, list(range(n_clusters))] ortho_sys = np.copy(c) max_dist = 0.0 ind = np.zeros(n_clusters, dtype=np.int32) for (i, row) in enumerate(c): if np.linalg.norm(row, 2) > max_dist: max_dist = np.linalg.norm(row, 2) ind[0] = i ortho_sys -= c[ind[0], None] for k in range(1, n_clusters): max_dist = 0.0 temp = np.copy(ortho_sys[ind[k - 1]]) for (i, row) in enumerate(ortho_sys): row -= np.dot(np.dot(temp, np.transpose(row)), temp) distt = np.linalg.norm(row, 2) if distt > max_dist and i not in ind[0:k]: max_dist = distt ind[k] = i ortho_sys /= np.linalg.norm(ortho_sys[ind[k]], 2) rot_mat = np.linalg.inv(c[ind]) chi = np.dot(c, rot_mat) return (chi, rot_mat)
PCCA+ spectral clustering method using the inner simplex algorithm. Clusters the first n_cluster eigenvectors of a transition matrix in order to cluster the states. This function assumes that the state space is fully connected, i.e. the transition matrix whose eigenvectors are used is supposed to have only one eigenvalue 1, and the corresponding first eigenvector (evec[:,0]) must be constant. Parameters ---------- eigenvectors : ndarray A matrix with the sorted eigenvectors in the columns. The stationary eigenvector should be first, then the one to the slowest relaxation process, etc. n_clusters : int Number of clusters to group to. Returns ------- (chi, rot_mat) chi : ndarray (n x m) A matrix containing the probability or membership of each state to be assigned to each cluster. The rows sum to 1. rot_mat : ndarray (m x m) A rotation matrix that rotates the dominant eigenvectors to yield the PCCA memberships, i.e.: chi = np.dot(evec, rot_matrix References ---------- [1] P. Deuflhard and M. Weber, Robust Perron cluster analysis in conformation dynamics. in: Linear Algebra Appl. 398C M. Dellnitz and S. Kirkland and M. Neumann and C. Schuette (Editors) Elsevier, New York, 2005, pp. 161-184
2,992
def all_subclasses(cls): subclasses = cls.__subclasses__() descendants = (descendant for subclass in subclasses for descendant in all_subclasses(subclass)) return set(subclasses) | set(descendants)
Recursively returns all the subclasses of the provided class.
2,993
def generate_daily(day_end_hour, use_dst, calib_data, hourly_data, daily_data, process_from): start = daily_data.before(datetime.max) if start is None: start = datetime.min start = calib_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start start = timezone.local_replace( start, use_dst=use_dst, hour=day_end_hour, minute=0, second=0) del daily_data[start:] stop = calib_data.before(datetime.max) acc = DayAcc() def dailygen(inputdata): day_start = start count = 0 while day_start <= stop: count += 1 if count % 30 == 0: logger.info("daily: %s", day_start.isoformat()) else: logger.debug("daily: %s", day_start.isoformat()) day_end = day_start + DAY if use_dst: day_end = timezone.local_replace( day_end + HOURx3, use_dst=use_dst, hour=day_end_hour) acc.reset() for data in inputdata[day_start:day_end]: acc.add_raw(data) for data in hourly_data[day_start:day_end]: acc.add_hourly(data) new_data = acc.result() if new_data: new_data[] = day_start yield new_data day_start = day_end daily_data.update(dailygen(calib_data)) return start
Generate daily summaries from calibrated and hourly data.
2,994
def double_ell_distance (mjr0, mnr0, pa0, mjr1, mnr1, pa1, dx, dy): theta = -np.arctan2 (dy, dx) sx0, sy0, cxy0 = ellbiv (mjr0, mnr0, pa0 + theta) sx1, sy1, cxy1 = ellbiv (mjr1, mnr1, pa1 + theta) sx, sy, cxy = bivconvolve (sx0, sy0, cxy0, sx1, sy1, cxy1) d = np.sqrt (dx**2 + dy**2) sigma_eff = sx * np.sqrt (1 - (cxy / (sx * sy))**2) return d / sigma_eff
Given two ellipses separated by *dx* and *dy*, compute their separation in terms of σ. Based on Pineau et al (2011A&A...527A.126P). The "0" ellipse is taken to be centered at (0, 0), while the "1" ellipse is centered at (dx, dy).
2,995
def escape(s, quote=True): assert not isinstance(s, bytes), if quote: return s.translate(_escape_map_full) return s.translate(_escape_map)
Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag quote is true (the default), the quotation mark characters, both double quote (") and single quote (') characters are also translated.
2,996
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): try: tarobj = tarfile.open(filename) except tarfile.TarError: raise UnrecognizedFormat( "%s is not a compressed or uncompressed tar file" % (filename,) ) try: tarobj.chown = lambda *args: None if not name.startswith() and not in name: prelim_dst = os.path.join(extract_dir, *name.split()) final_dst = progress_filter(name, prelim_dst) if final_dst and (member.isfile() or member.isdir() or member.islnk() or member.issym()): tarobj.extract(member, extract_dir) if final_dst != prelim_dst: shutil.move(prelim_dst, final_dst) return True finally: tarobj.close()
Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined by ``tarfile.open()``). See ``unpack_archive()`` for an explanation of the `progress_filter` argument.
2,997
def add_new_data_port(self): try: new_data_port_ids = gui_helper_state_machine.add_data_port_to_selected_states(, int, [self.model]) if new_data_port_ids: self.select_entry(new_data_port_ids[self.model.state]) except ValueError: pass
Add a new port with default values and select it
2,998
def predict(self, pairs): check_is_fitted(self, [, ]) return 2 * (- self.decision_function(pairs) <= self.threshold_) - 1
Predicts the learned metric between input pairs. (For now it just calls decision function). Returns the learned metric value between samples in every pair. It should ideally be low for similar samples and high for dissimilar samples. Parameters ---------- pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2) 3D Array of pairs to predict, with each row corresponding to two points, or 2D array of indices of pairs if the metric learner uses a preprocessor. Returns ------- y_predicted : `numpy.ndarray` of floats, shape=(n_constraints,) The predicted learned metric value between samples in every pair.
2,999
def render_latex(latex: str) -> PIL.Image: tmpfilename = with tempfile.TemporaryDirectory() as tmpdirname: tmppath = os.path.join(tmpdirname, tmpfilename) with open(tmppath + , ) as latex_file: latex_file.write(latex) subprocess.run(["pdflatex", "-halt-on-error", "-output-directory={}".format(tmpdirname), "{}".format(tmpfilename+)], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, check=True) subprocess.run([, , , , tmppath + , tmppath]) img = PIL.Image.open(tmppath + ) return img
Convert a single page LaTeX document into an image. To display the returned image, `img.show()` Required external dependencies: `pdflatex` (with `qcircuit` package), and `poppler` (for `pdftocairo`). Args: A LaTeX document as a string. Returns: A PIL Image Raises: OSError: If an external dependency is not installed.