code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_full_description(self, s, base=None): summary = self.get_summary(s) extended_summary = self.get_extended_summary(s) ret = (summary + '\n\n' + extended_summary).strip() if base is not None: self.params[base + '.full_desc'] = ret return ret
Get the full description from a docstring This here and the line above is the full description (i.e. the combination of the :meth:`get_summary` and the :meth:`get_extended_summary`) output Parameters ---------- s: str The docstring to use base: str or None A key under which the description shall be stored in the :attr:`params` attribute. If not None, the summary will be stored in ``base + '.full_desc'``. Otherwise, it will not be stored at all Returns ------- str The extracted full description
def get(self, key, default=None): try: index = self.__keys.index(str(key)) except ValueError: return default if 0 <= index < len(self): return super(Record, self).__getitem__(index) else: return default
Obtain a value from the record by key, returning a default value if the key does not exist. :param key: :param default: :return:
def do_print(filename): with open(filename) as cmake_file: body = ast.parse(cmake_file.read()) word_print = _print_details(lambda n: "{0} {1}".format(n.type, n.contents)) ast_visitor.recurse(body, while_stmnt=_print_details(), foreach=_print_details(), function_def=_print_details(), macro_def=_print_details(), if_block=_print_details(), if_stmnt=_print_details(), elseif_stmnt=_print_details(), else_stmnt=_print_details(), function_call=_print_details(lambda n: n.name), word=word_print)
Print the AST of filename.
def _parse_relationship(self): rs_dict = self.data.get('relationships', {}) for rs_name in self.KNOWN_RELATIONSHIPS: if rs_name in rs_dict: setattr( self, rs_name, Relationship(rs_name, rs_dict.get(rs_name))) else: setattr(self, rs_name, NoneRelationshipSingleton)
Nodes have Relationships, and similarly to properties, we set it as an attribute on the Organization so we can make calls like company.current_team person.degrees
def retrieve(self, id) : _, _, contact = self.http_client.get("/contacts/{id}".format(id=id)) return contact
Retrieve a single contact Returns a single contact available to the user, according to the unique contact ID provided If the specified contact does not exist, the request will return an error :calls: ``get /contacts/{id}`` :param int id: Unique identifier of a Contact. :return: Dictionary that support attriubte-style access and represent Contact resource. :rtype: dict
def get_compositions_by_search(self, composition_query, composition_search): if not self._can('search'): raise PermissionDenied() return self._provider_session.get_compositions_by_search(composition_query, composition_search)
Pass through to provider CompositionSearchSession.get_compositions_by_search
def PopAttributeContainer(self): try: serialized_data = self._list.pop(0) self.data_size -= len(serialized_data) return serialized_data except IndexError: return None
Pops a serialized attribute container from the list. Returns: bytes: serialized attribute container data.
def to_notional(instruments, prices, multipliers, desired_ccy=None, instr_fx=None, fx_rates=None): notionals = _instr_conv(instruments, prices, multipliers, True, desired_ccy, instr_fx, fx_rates) return notionals
Convert number of contracts of tradeable instruments to notional value of tradeable instruments in a desired currency. Parameters ---------- instruments: pandas.Series Series of instrument holdings. Index is instrument name and values are number of contracts. prices: pandas.Series Series of instrument prices. Index is instrument name and values are instrument prices. prices.index should be a superset of instruments.index otherwise NaN returned for instruments without prices multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of instruments.index desired_ccy: str Three letter string representing desired currency to convert notional values to, e.g. 'USD'. If None is given currency conversion is ignored. instr_fx: pandas.Series Series of instrument fx denominations. Index is instrument name and values are three letter strings representing the currency the instrument is denominated in. instr_fx.index should match prices.index fx_rates: pandas.Series Series of fx rates used for conversion to desired_ccy. Index is strings representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the corresponding exchange rates. Returns ------- pandas.Series of notional amounts of instruments with Index of instruments names Example ------- >>> import pandas as pd >>> import mapping.util as util >>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16']) >>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16']) >>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16']) >>> ntln = util.to_notional(current_contracts, prices, multipliers)
def get_occurrence(event_id, occurrence_id=None, year=None, month=None, day=None, hour=None, minute=None, second=None, tzinfo=None): if(occurrence_id): occurrence = get_object_or_404(Occurrence, id=occurrence_id) event = occurrence.event elif None not in (year, month, day, hour, minute, second): event = get_object_or_404(Event, id=event_id) date = timezone.make_aware(datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)), tzinfo) occurrence = event.get_occurrence(date) if occurrence is None: raise Http404 else: raise Http404 return event, occurrence
Because occurrences don't have to be persisted, there must be two ways to retrieve them. both need an event, but if its persisted the occurrence can be retrieved with an id. If it is not persisted it takes a date to retrieve it. This function returns an event and occurrence regardless of which method is used.
def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED, options=None, ciphers=None): context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) if options is None: options = 0 options |= OP_NO_SSLv2 options |= OP_NO_SSLv3 options |= OP_NO_COMPRESSION context.options |= options if getattr(context, 'supports_set_ciphers', True): context.set_ciphers(ciphers or _DEFAULT_CIPHERS) context.verify_mode = cert_reqs if getattr(context, 'check_hostname', None) is not None: context.check_hostname = False return context
All arguments have the same meaning as ``ssl_wrap_socket``. By default, this function does a lot of the same work that ``ssl.create_default_context`` does on Python 3.4+. It: - Disables SSLv2, SSLv3, and compression - Sets a restricted set of server ciphers If you wish to enable SSLv3, you can do:: from urllib3.util import ssl_ context = ssl_.create_urllib3_context() context.options &= ~ssl_.OP_NO_SSLv3 You can do the same to enable compression (substituting ``COMPRESSION`` for ``SSLv3`` in the last line above). :param ssl_version: The desired protocol version to use. This will default to PROTOCOL_SSLv23 which will negotiate the highest protocol that both the server and your installation of OpenSSL support. :param cert_reqs: Whether to require the certificate verification. This defaults to ``ssl.CERT_REQUIRED``. :param options: Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``, ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``. :param ciphers: Which cipher suites to allow the server to select. :returns: Constructed SSLContext object with specified options :rtype: SSLContext
def get_function_from_bot_intent_trigger(self, event): intent = event.get('currentIntent') if intent: intent = intent.get('name') if intent: return self.settings.AWS_BOT_EVENT_MAPPING.get( "{}:{}".format(intent, event.get('invocationSource')) )
For the given event build ARN and return the configured function
def _send_command(self, cmd_class, command, payload, timeout=3.0): if len(payload) > 60: return ValueError("Attempting to send a BGAPI packet with length > 60 is not allowed", actual_length=len(payload), command=command, command_class=cmd_class) header = bytearray(4) header[0] = 0 header[1] = len(payload) header[2] = cmd_class header[3] = command packet = header + bytearray(payload) self._stream.write(bytes(packet)) response = self._receive_packet(timeout) return response
Send a BGAPI packet to the dongle and return the response
def perform(self): for request in self._cfg[Integrator._CFG_KEY_REQUESTS]: request_type = self._cfg[Integrator._CFG_KEY_REQUESTS][request][Integrator._CFG_KEY_REQUEST_TYPE] request_cfg_file = self._cfg[Integrator._CFG_KEY_REQUESTS][request][Integrator._CFG_KEY_REQUEST_CFG_FILE] self._logger.debug('{}'.format(request_cfg_file)) self._process_request(request, request_type, request_cfg_file)
Performs bulk operation
def one_point_crossover(parents): crossover_point = random.randint(1, len(parents[0]) - 1) return (_one_parent_crossover(parents[0], parents[1], crossover_point), _one_parent_crossover(parents[1], parents[0], crossover_point))
Perform one point crossover on two parent chromosomes. Select a random position in the chromosome. Take genes to the left from one parent and the rest from the other parent. Ex. p1 = xxxxx, p2 = yyyyy, position = 2 (starting at 0), child = xxyyy
def await_paused(self, timeout=None): deadline = time.time() + timeout if timeout else None with self._lock: while self._state != self._PAUSED: if self._state != self._PAUSING: raise AssertionError('Cannot wait for {} to reach `{}` while it is in `{}`.'.format(self, self._PAUSED, self._state)) timeout = deadline - time.time() if deadline else None if timeout and timeout <= 0: return False self._condition.wait(timeout=timeout) return True
Blocks until the service is in the Paused state, then returns True. If a timeout is specified, the method may return False to indicate a timeout: with no timeout it will always (eventually) return True. Raises if the service is not currently in the Pausing state.
def _get_url_params(self, shorten=True): cable = True if self.category == 'cable' else False url_date = convert_month(self.date, shorten=shorten, cable=cable) return [ BASE_URL, self.weekday.lower(), self.category + '-ratings', url_date.replace(' ', '-') ]
Returns a list of each parameter to be used for the url format.
def hyperparameters(self): hp_dict = dict(force_dense='True') hp_dict.update(super(KMeans, self).hyperparameters()) return hp_dict
Return the SageMaker hyperparameters for training this KMeans Estimator
def lookup_family(hostname): fallback = socket.AF_INET try: hostnames = socket.getaddrinfo( hostname or None, None, socket.AF_UNSPEC, socket.SOCK_STREAM ) if not hostnames: return fallback h = hostnames[0] return h[0] except socket.gaierror: return fallback
Lookup a hostname and determine its address family. The first address returned will be AF_INET6 if the system is IPv6-enabled, and AF_INET otherwise.
def verify_file(fp, password): 'Returns whether a scrypt encrypted file is valid.' sf = ScryptFile(fp = fp, password = password) for line in sf: pass sf.close() return sf.valid
Returns whether a scrypt encrypted file is valid.
def on_epoch_end(self, last_metrics, **kwargs): "Set the final result in `last_metrics`." return add_metrics(last_metrics, self.val/self.count)
Set the final result in `last_metrics`.
def to_list(self, n=None): if n is None: self.cache() return self._base_sequence else: return self.cache().take(n).list()
Converts sequence to list of elements. >>> type(seq([]).to_list()) list >>> type(seq([])) functional.pipeline.Sequence >>> seq([1, 2, 3]).to_list() [1, 2, 3] :param n: Take n elements of sequence if not None :return: list of elements in sequence
def run(self): for msg in self.messages: col = getattr(msg, 'col', 0) yield msg.lineno, col, (msg.tpl % msg.message_args), msg.__class__
Yield the error messages.
def create_rectangular_prism(origin, size): from lace.topology import quads_to_tris lower_base_plane = np.array([ origin, origin + np.array([size[0], 0, 0]), origin + np.array([size[0], 0, size[2]]), origin + np.array([0, 0, size[2]]), ]) upper_base_plane = lower_base_plane + np.array([0, size[1], 0]) vertices = np.vstack([lower_base_plane, upper_base_plane]) faces = quads_to_tris(np.array([ [0, 1, 2, 3], [7, 6, 5, 4], [4, 5, 1, 0], [5, 6, 2, 1], [6, 7, 3, 2], [3, 7, 4, 0], ])) return Mesh(v=vertices, f=faces)
Return a Mesh which is an axis-aligned rectangular prism. One vertex is `origin`; the diametrically opposite vertex is `origin + size`. size: 3x1 array.
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15): tokens = [ token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore') if min_len <= len(token) <= max_len and not token.startswith('_') ] return tokens
Convert a document into a list of tokens. This lowercases, tokenizes, de-accents (optional). -- the output are final tokens = unicode strings, that won't be processed any further.
def unpack_nested_exception(error): i = 0 while True: if error.args[i:]: if isinstance(error.args[i], Exception): error = error.args[i] i = 0 else: i += 1 else: break return error
If exception are stacked, return the first one :param error: A python exception with possible exception embeded within :return: A python exception with no exception embeded within
def disconnect(self): self._connected = False if self._transport is not None: try: self._transport.disconnect() except Exception: self.logger.error( "Failed to disconnect from %s", self._host, exc_info=True) raise finally: self._transport = None
Disconnect from the current host, but do not update the closed state. After the transport is disconnected, the closed state will be True if this is called after a protocol shutdown, or False if the disconnect was in error. TODO: do we really need closed vs. connected states? this only adds complication and the whole reconnect process has been scrapped anyway.
def calls(self): return WebhookWebhooksCallProxy(self._client, self.sys['space'].id, self.sys['id'])
Provides access to call overview for the given webhook. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/webhook-calls :return: :class:`WebhookWebhooksCallProxy <contentful_management.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy>` object. :rtype: contentful.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy Usage: >>> webhook_webhooks_call_proxy = webhook.calls() <WebhookWebhooksCallProxy space_id="cfexampleapi" webhook_id="my_webhook">
def check_user_token(self, request, user): if not app_settings.REST_USER_TOKEN_ENABLED: return False try: token = Token.objects.get( user=user, key=request.data.get('password') ) except Token.DoesNotExist: token = None else: if app_settings.DISPOSABLE_USER_TOKEN: token.delete() finally: return token is not None
if user has no password set and has at least 1 social account this is probably a social login, the password field is the user's personal auth token
def hash_of_signed_transaction(txn_obj): (chain_id, _v) = extract_chain_id(txn_obj.v) unsigned_parts = strip_signature(txn_obj) if chain_id is None: signable_transaction = UnsignedTransaction(*unsigned_parts) else: extended_transaction = unsigned_parts + [chain_id, 0, 0] signable_transaction = ChainAwareUnsignedTransaction(*extended_transaction) return signable_transaction.hash()
Regenerate the hash of the signed transaction object. 1. Infer the chain ID from the signature 2. Strip out signature from transaction 3. Annotate the transaction with that ID, if available 4. Take the hash of the serialized, unsigned, chain-aware transaction Chain ID inference and annotation is according to EIP-155 See details at https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md :return: the hash of the provided transaction, to be signed
def get_variable_dtype( master_dtype=tf.bfloat16, slice_dtype=tf.float32, activation_dtype=tf.float32): return mtf.VariableDType( master_dtype=tf.as_dtype(master_dtype), slice_dtype=tf.as_dtype(slice_dtype), activation_dtype=tf.as_dtype(activation_dtype))
Datatypes to use for the run. Args: master_dtype: string, datatype for checkpoints keep this the same between training and eval/inference slice_dtype: string, datatype for variables in memory must be tf.float32 for training activation_dtype: string, datatype for activations less memory usage if tf.bfloat16 but possible numerical issues Returns: a mtf.VariableDtype
def trace(self, n): "Restore the position in the history of individual v's nodes" trace_map = {} self._trace(n, trace_map) s = list(trace_map.keys()) s.sort() return s
Restore the position in the history of individual v's nodes
def parseFeatureRequest(response): features = [] while (len(response) > 0): tag = response[0] control = ((((((response[2] << 8) + response[3]) << 8) + response[4]) << 8) + response[5]) try: features.append([Features[tag], control]) except KeyError: pass del response[:6] return features
Get the list of Part10 features supported by the reader. @param response: result of CM_IOCTL_GET_FEATURE_REQUEST commmand @rtype: list @return: a list of list [[tag1, value1], [tag2, value2]]
def copy(self, datasets=None): new_scn = self.__class__() new_scn.attrs = self.attrs.copy() new_scn.dep_tree = self.dep_tree.copy() for ds_id in (datasets or self.keys()): new_scn.datasets[ds_id] = self[ds_id] if not datasets: new_scn.wishlist = self.wishlist.copy() else: new_scn.wishlist = set([DatasetID.from_dict(ds.attrs) for ds in new_scn]) return new_scn
Create a copy of the Scene including dependency information. Args: datasets (list, tuple): `DatasetID` objects for the datasets to include in the new Scene object.
def copy(self, **replacements): cls = type(self) kwargs = {'org': self.org, 'name': self.name, 'ext': self.ext, 'classifier': self.classifier, 'rev': self.rev} for key, val in replacements.items(): kwargs[key] = val return cls(**kwargs)
Returns a clone of this M2Coordinate with the given replacements kwargs overlaid.
def time_range(self, start, end): self._set_query(self.time_query, time_start=self._format_time(start), time_end=self._format_time(end)) return self
Add a request for a time range to the query. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing temporal queries that have been set. Parameters ---------- start : datetime.datetime The start of the requested time range end : datetime.datetime The end of the requested time range Returns ------- self : DataQuery Returns self for chaining calls
def filter_butter(samples, samplerate, filtertype, freq, order=5): assert filtertype in ('low', 'high', 'band') b, a = filter_butter_coeffs(filtertype, freq, samplerate, order=order) return apply_multichannel(samples, lambda data:signal.lfilter(b, a, data))
Filters the samples with a digital butterworth filter samples : mono samples filtertype: 'low', 'band', 'high' freq : for low or high, the cutoff freq for band, (low, high) samplerate: the sampling-rate order : the order of the butterworth filter Returns --> the filtered samples NB: calls filter_butter_coeffs to calculate the coefficients
def connect_network_gateway(self, gateway_id, body=None): base_uri = self.network_gateway_path % gateway_id return self.put("%s/connect_network" % base_uri, body=body)
Connect a network gateway to the specified network.
def _parse_check(self, rule): for check_cls in (checks.FalseCheck, checks.TrueCheck): check = check_cls() if rule == str(check): return check try: kind, match = rule.split(':', 1) except Exception: if self.raise_error: raise InvalidRuleException(rule) else: LOG.exception('Failed to understand rule %r', rule) return checks.FalseCheck() if kind in checks.registered_checks: return checks.registered_checks[kind](kind, match) elif None in checks.registered_checks: return checks.registered_checks[None](kind, match) elif self.raise_error: raise InvalidRuleException(rule) else: LOG.error('No handler for matches of kind %r', kind) return checks.FalseCheck()
Parse a single base check rule into an appropriate Check object.
def is_file_like(obj): if not (hasattr(obj, 'read') or hasattr(obj, 'write')): return False if not hasattr(obj, "__iter__"): return False return True
Check if the object is a file-like object. For objects to be considered file-like, they must be an iterator AND have either a `read` and/or `write` method as an attribute. Note: file-like objects must be iterable, but iterable objects need not be file-like. .. versionadded:: 0.20.0 Parameters ---------- obj : The object to check Returns ------- is_file_like : bool Whether `obj` has file-like properties. Examples -------- >>> buffer(StringIO("data")) >>> is_file_like(buffer) True >>> is_file_like([1, 2, 3]) False
def n_orifices_per_row(self): H = self.b_rows - 0.5*self.orifice_diameter flow_per_orifice = pc.flow_orifice_vert(self.orifice_diameter, H, con.VC_ORIFICE_RATIO) n = np.zeros(self.n_rows) for i in range(self.n_rows): flow_needed = self.flow_ramp[i] - self.flow_actual(i, n) n_orifices_real = (flow_needed / flow_per_orifice).to(u.dimensionless) n[i] = min((max(0, round(n_orifices_real))), self.n_orifices_per_row_max) return n
Calculate number of orifices at each level given an orifice diameter.
def ensure_symlink (src, dst): try: os.symlink (src, dst) except OSError as e: if e.errno == 17: return True raise return False
Ensure the existence of a symbolic link pointing to src named dst. Returns a boolean indicating whether the symlink already existed.
def _compute_predicates(table_op, predicates, data, scope, **kwargs): for predicate in predicates: root_tables = predicate.op().root_tables() additional_scope = {} data_columns = frozenset(data.columns) for root_table in root_tables: mapping = remap_overlapping_column_names( table_op, root_table, data_columns ) if mapping is not None: new_data = data.loc[:, mapping.keys()].rename(columns=mapping) else: new_data = data additional_scope[root_table] = new_data new_scope = toolz.merge(scope, additional_scope) yield execute(predicate, scope=new_scope, **kwargs)
Compute the predicates for a table operation. Parameters ---------- table_op : TableNode predicates : List[ir.ColumnExpr] data : pd.DataFrame scope : dict kwargs : dict Returns ------- computed_predicate : pd.Series[bool] Notes ----- This handles the cases where the predicates are computed columns, in addition to the simple case of named columns coming directly from the input table.
def _check_required(self, value): if value is None and self._required: err_msg = self._errors['required'].format(self.__class__.__name__, self.name) if self.container_model: err_msg += self._errors['required_extra'].format(self.container_model.__name__) raise ValueError(err_msg)
Internal method to check if assigned value is None while it is required. Exception is thrown if ``True``
def handle_log(self, obj): record_dict = json.loads(obj[ExecutorProtocol.LOG_MESSAGE]) record_dict['msg'] = record_dict['msg'] executors_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'executors') record_dict['pathname'] = os.path.join(executors_dir, record_dict['pathname']) logger.handle(logging.makeLogRecord(record_dict))
Handle an incoming log processing request. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'log', 'message': [log message] }
def run(self, tag=None, output=None, **kwargs): start = datetime.datetime.now() count = 0 if tag: tag = Uri(tag) xml_generator = etree.iterparse(self.source, tag=tag.etree) else: xml_generator = etree.iterparse(self.source) i = 0 for event, element in xml_generator: type_tags = element.findall(_RDF_TYPE_TAG) rdf_types = [el.get(_RES_TAG) for el in type_tags if el.get(_RES_TAG)] if str(self.filter_val) in rdf_types: pdb.set_trace() count += 1 i += 1 element.clear() print("Found '{}' items in {}".format(count, (datetime.datetime.now() - start)))
runs the extractor Args: ----- output: ['filepath', None]
def LoadFromXml(self, node): import os self.classId = node.localName metaClassId = UcsUtils.FindClassIdInMoMetaIgnoreCase(self.classId) if metaClassId: self.classId = metaClassId if node.hasAttribute(NamingPropertyId.DN): self.dn = node.getAttribute(NamingPropertyId.DN) if self.dn: self.rn = os.path.basename(self.dn) self.WriteToAttributes(node) if (node.hasChildNodes()): childList = node.childNodes childCount = len(childList) for i in range(childCount): childNode = childList.item(i) if (childNode.nodeType != Node.ELEMENT_NODE): continue c = _GenericMO() self.child.append(c) c.LoadFromXml(childNode)
Method updates the object from the xml.
def list_securitygroup_rules(self, group_id): return self.security_group.getRules(id=group_id, iter=True)
List security group rules associated with a security group. :param int group_id: The security group to list rules for
def load(self, model): self._dawg.load(find_data(model)) self._loaded_model = True
Load pickled DAWG from disk.
def extended(self): if self.expires_at: return self.expires_at - self.issued_at > timedelta(days=30) else: return False
Determine whether the OAuth token has been extended.
def generic_ref_formatter(view, context, model, name, lazy=False): try: if lazy: rel_model = getattr(model, name).fetch() else: rel_model = getattr(model, name) except (mongoengine.DoesNotExist, AttributeError) as e: return Markup( '<span class="label label-danger">Error</span> <small>%s</small>' % e ) if rel_model is None: return '' try: return Markup( '<a href="%s">%s</a>' % ( url_for( '%s.details_view' % rel_model.__class__.__name__.lower(), id=rel_model.id, ), rel_model, ) ) except werkzeug.routing.BuildError as e: return Markup( '<span class="label label-danger">Error</span> <small>%s</small>' % e )
For GenericReferenceField and LazyGenericReferenceField See Also -------- diff_formatter
def get_user_columns_list(self): ret_lst = list() for col_name in self.get_columns_list(): if (not self.is_pk(col_name)) and (not self.is_fk(col_name)): ret_lst.append(col_name) return ret_lst
Returns all model's columns except pk or fk
def substitute_array(a, d): a = np.asarray(a, order="C") return np.array([substitute(v, d) for v in a.flat]).reshape(a.shape)
Apply ``substitute`` to all elements of an array ``a`` and return the resulting array. :param Union[np.array,List] a: The expression array to substitute. :param Dict[Parameter,Union[int,float]] d: Numerical substitutions for parameters. :return: An array of partially substituted Expressions or numbers. :rtype: np.array
def read_from_file(path, file_type='text', exception=ScriptWorkerException): FILE_TYPE_MAP = {'text': 'r', 'binary': 'rb'} if file_type not in FILE_TYPE_MAP: raise exception("Unknown file_type {} not in {}!".format(file_type, FILE_TYPE_MAP)) try: with open(path, FILE_TYPE_MAP[file_type]) as fh: return fh.read() except (OSError, FileNotFoundError) as exc: raise exception("Can't read_from_file {}: {}".format(path, str(exc)))
Read from ``path``. Small helper function to read from ``file``. Args: path (str): the path to read from. file_type (str, optional): the type of file. Currently accepts ``text`` or ``binary``. Defaults to ``text``. exception (Exception, optional): the exception to raise if unable to read from the file. Defaults to ``ScriptWorkerException``. Returns: None: if unable to read from ``path`` and ``exception`` is ``None`` str or bytes: the contents of ``path`` Raises: Exception: if ``exception`` is set.
def publish(self, synchronous=True, **kwargs): kwargs = kwargs.copy() if 'data' in kwargs and 'id' not in kwargs['data']: kwargs['data']['id'] = self.id kwargs.update(self._server_config.get_client_kwargs()) response = client.post(self.path('publish'), **kwargs) return _handle_response(response, self._server_config, synchronous)
Helper for publishing an existing content view. :param synchronous: What should happen if the server returns an HTTP 202 (accepted) status code? Wait for the task to complete if ``True``. Immediately return the server's response otherwise. :param kwargs: Arguments to pass to requests. :returns: The server's response, with all JSON decoded. :raises: ``requests.exceptions.HTTPError`` If the server responds with an HTTP 4XX or 5XX message.
def bschoi(value, ndim, array, order): value = ctypes.c_int(value) ndim = ctypes.c_int(ndim) array = stypes.toIntVector(array) order = stypes.toIntVector(order) return libspice.bschoi_c(value, ndim, array, order)
Do a binary search for a given value within an integer array, accompanied by an order vector. Return the index of the matching array entry, or -1 if the key value is not found. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bschoi_c.html :param value: Key value to be found in array. :type value: int :param ndim: Dimension of array. :type ndim: int :param array: Integer array to search. :type array: Array of ints :param order: Order vector. :type order: Array of ints :return: index :rtype: int
def storagehandler(self): if isinstance(self, StorageHandler): return self elif self.parent is not None: return self.parent.storagehandler else: return None
Returns the storage handler available to thise actor. :return: the storage handler, None if not available
def abort(self): if self.resume_queue: self.resume_queue.put(False) self.try_aborting_function(ss.ABORTING, ss.ABORTED, self.do_abort)
Abort the current operation and block until aborted Normally it will return in Aborted state. If something goes wrong it will return in Fault state. If the user disables then it will return in Disabled state.
def service_checks(self, name): return [ ServiceCheckStub( ensure_unicode(stub.check_id), ensure_unicode(stub.name), stub.status, normalize_tags(stub.tags), ensure_unicode(stub.hostname), ensure_unicode(stub.message), ) for stub in self._service_checks.get(to_string(name), []) ]
Return the service checks received under the given name
def solo(whyrun=False, logfile=None, **kwargs): if logfile is None: logfile = _default_logfile('chef-solo') args = ['chef-solo', '--no-color', '--logfile "{0}"'.format(logfile), '--format doc'] if whyrun: args.append('--why-run') return _exec_cmd(*args, **kwargs)
Execute a chef solo run and return a dict with the stderr, stdout, return code, and pid. CLI Example: .. code-block:: bash salt '*' chef.solo override-runlist=test config The configuration file to use environment Set the Chef Environment on the node group Group to set privilege to json-attributes Load attributes from a JSON file or URL log_level Set the log level (debug, info, warn, error, fatal) logfile Set the log file location node-name The node name for this client override-runlist Replace current run list with specified items for a single run recipe-url Pull down a remote gzipped tarball of recipes and untar it to the cookbook cache run-lock-timeout Set maximum duration to wait for another client run to finish, default is indefinitely. user User to set privilege to whyrun Enable whyrun mode when set to True
def minimum_distance2(hull_a, center_a, hull_b, center_b): if hull_a.shape[0] < 3 or hull_b.shape[0] < 3: return slow_minimum_distance2(hull_a, hull_b) else: return faster_minimum_distance2(hull_a, center_a, hull_b, center_b)
Return the minimum distance or 0 if overlap between 2 convex hulls hull_a - list of points in clockwise direction center_a - a point within the hull hull_b - list of points in clockwise direction center_b - a point within the hull
def get_config(ini_path=None, rootdir=None): config = Namespace() config.default_section = 'pylama' if not ini_path: path = get_default_config_file(rootdir) if path: config.read(path) else: config.read(ini_path) return config
Load configuration from INI. :return Namespace:
def get_batch(self, filename=None): try: history = self.history_model.objects.get(filename=filename) except self.history_model.DoesNotExist as e: raise TransactionsFileQueueError( f"Batch history not found for '{filename}'." ) from e if history.consumed: raise TransactionsFileQueueError( f"Batch closed for '{filename}'. Got consumed=True" ) batch = self.batch_cls() batch.batch_id = history.batch_id batch.filename = history.filename return batch
Returns a batch instance given the filename.
def rename_dont_move(self, path, dest): from snakebite.errors import FileAlreadyExistsException try: self.get_bite().rename2(path, dest, overwriteDest=False) except FileAlreadyExistsException: raise luigi.target.FileAlreadyExists()
Use snakebite.rename_dont_move, if available. :param path: source path (single input) :type path: string :param dest: destination path :type dest: string :return: True if succeeded :raises: snakebite.errors.FileAlreadyExistsException
async def remove_all(self, detach=False, eject=False, lock=False): kw = dict(force=True, detach=detach, eject=eject, lock=lock) tasks = [self.auto_remove(device, **kw) for device in self.get_all_handleable_roots()] results = await gather(*tasks) success = all(results) return success
Remove all filesystems handleable by udiskie. :param bool detach: detach the root drive :param bool eject: remove media from the root drive :param bool lock: lock the associated LUKS cleartext slave :returns: whether all attempted operations succeeded
def get(self, name): config = self.get_block('interface %s' % name) if 'no switchport\n' in config: return resource = dict(name=name) resource.update(self._parse_mode(config)) resource.update(self._parse_access_vlan(config)) resource.update(self._parse_trunk_native_vlan(config)) resource.update(self._parse_trunk_allowed_vlans(config)) resource.update(self._parse_trunk_groups(config)) return resource
Returns a dictionary object that represents a switchport The Switchport resource returns the following: * name (str): The name of the interface * mode (str): The switchport mode value * access_vlan (str): The switchport access vlan value * trunk_native_vlan (str): The switchport trunk native vlan vlaue * trunk_allowed_vlans (str): The trunk allowed vlans value * trunk_groups (list): The list of trunk groups configured Args: name (string): The interface identifier to get. Note: Switchports are only supported on Ethernet and Port-Channel interfaces Returns: dict: A Python dictionary object of key/value pairs that represent the switchport configuration for the interface specified If the specified argument is not a switchport then None is returned
def chunks(self, size=32, alignment=1): if (size % alignment) != 0: raise Error( 'size {} is not a multiple of alignment {}'.format( size, alignment)) address = self.address data = self.data chunk_offset = (address % alignment) if chunk_offset != 0: first_chunk_size = (alignment - chunk_offset) yield self._Chunk(address, data[:first_chunk_size]) address += (first_chunk_size // self._word_size_bytes) data = data[first_chunk_size:] else: first_chunk_size = 0 for offset in range(0, len(data), size): yield self._Chunk(address + offset // self._word_size_bytes, data[offset:offset + size])
Return chunks of the data aligned as given by `alignment`. `size` must be a multiple of `alignment`. Each chunk is returned as a named two-tuple of its address and data.
def container_device_get(name, device_name, remote_addr=None, cert=None, key=None, verify_cert=True): container = container_get( name, remote_addr, cert, key, verify_cert, _raw=True ) return _get_property_dict_item(container, 'devices', device_name)
Get a container device name : Name of the container device_name : The device name to retrieve remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates.
def get_counters(counter_list): if not isinstance(counter_list, list): raise CommandExecutionError('counter_list must be a list of tuples') try: query = win32pdh.OpenQuery() counters = build_counter_list(counter_list) for counter in counters: counter.add_to_query(query) win32pdh.CollectQueryData(query) time.sleep(1) win32pdh.CollectQueryData(query) ret = {} for counter in counters: try: ret.update({counter.path: counter.value()}) except pywintypes.error as exc: if exc.strerror == 'No data to return.': continue else: raise finally: win32pdh.CloseQuery(query) return ret
Get the values for the passes list of counters Args: counter_list (list): A list of counters to lookup Returns: dict: A dictionary of counters and their values
def version(): cmd = ['varnishd', '-V'] out = __salt__['cmd.run'](cmd, python_shell=False) ret = re.search(r'\(varnish-([^\)]+)\)', out).group(1) return ret
Return server version from varnishd -V CLI Example: .. code-block:: bash salt '*' varnish.version
def broken_chains(samples, chains): samples = np.asarray(samples) if samples.ndim != 2: raise ValueError("expected samples to be a numpy 2D array") num_samples, num_variables = samples.shape num_chains = len(chains) broken = np.zeros((num_samples, num_chains), dtype=bool, order='F') for cidx, chain in enumerate(chains): if isinstance(chain, set): chain = list(chain) chain = np.asarray(chain) if chain.ndim > 1: raise ValueError("chains should be 1D array_like objects") if len(chain) <= 1: continue all_ = (samples[:, chain] == 1).all(axis=1) any_ = (samples[:, chain] == 1).any(axis=1) broken[:, cidx] = np.bitwise_xor(all_, any_) return broken
Find the broken chains. Args: samples (array_like): Samples as a nS x nV array_like object where nS is the number of samples and nV is the number of variables. The values should all be 0/1 or -1/+1. chains (list[array_like]): List of chains of length nC where nC is the number of chains. Each chain should be an array_like collection of column indices in samples. Returns: :obj:`numpy.ndarray`: A nS x nC boolean array. If i, j is True, then chain j in sample i is broken. Examples: >>> samples = np.array([[-1, +1, -1, +1], [-1, -1, +1, +1]], dtype=np.int8) >>> chains = [[0, 1], [2, 3]] >>> dwave.embedding.broken_chains(samples, chains) array([[True, True], [ False, False]]) >>> samples = np.array([[-1, +1, -1, +1], [-1, -1, +1, +1]], dtype=np.int8) >>> chains = [[0, 2], [1, 3]] >>> dwave.embedding.broken_chains(samples, chains) array([[False, False], [ True, True]])
def on_message(self, event): metadata = self._parse_metadata(event) message = Message(text=metadata['text'], metadata=metadata).__dict__ if message.get('text'): message['text'] = self.find_and_replace_userids(message['text']) message['text'] = self.find_and_replace_channel_refs( message['text'] ) return message
Runs when a message event is received Args: event: RTM API event. Returns: Legobot.messge
def seek(self, offset, whence=Seek.set): _whence = int(whence) if _whence == Seek.current: offset += self._pos if _whence == Seek.current or _whence == Seek.set: if offset < 0: raise ValueError("Negative seek position {}".format(offset)) elif _whence == Seek.end: if offset > 0: raise ValueError("Positive seek position {}".format(offset)) offset += self._end else: raise ValueError( "Invalid whence ({}, should be {}, {} or {})".format( _whence, Seek.set, Seek.current, Seek.end ) ) if offset < self._pos: self._f = self._zip.open(self.name) self._pos = 0 self.read(offset - self._pos) return self._pos
Change stream position. Change the stream position to the given byte offset. The offset is interpreted relative to the position indicated by ``whence``. Arguments: offset (int): the offset to the new position, in bytes. whence (int): the position reference. Possible values are: * `Seek.set`: start of stream (the default). * `Seek.current`: current position; offset may be negative. * `Seek.end`: end of stream; offset must be negative. Returns: int: the new absolute position. Raises: ValueError: when ``whence`` is not known, or ``offset`` is invalid. Note: Zip compression does not support seeking, so the seeking is emulated. Seeking somewhere else than the current position will need to either: * reopen the file and restart decompression * read and discard data to advance in the file
def converged_ionic(self): nsw = self.parameters.get("NSW", 0) return nsw <= 1 or len(self.ionic_steps) < nsw
Checks that ionic step convergence has been reached, i.e. that vasp exited before reaching the max ionic steps for a relaxation run
def add_edge_bearings(G): for u, v, data in G.edges(keys=False, data=True): if u == v: data['bearing'] = np.nan else: origin_point = (G.nodes[u]['y'], G.nodes[u]['x']) destination_point = (G.nodes[v]['y'], G.nodes[v]['x']) bearing = get_bearing(origin_point, destination_point) data['bearing'] = round(bearing, 3) return G
Calculate the compass bearing from origin node to destination node for each edge in the directed graph then add each bearing as a new edge attribute. Parameters ---------- G : networkx multidigraph Returns ------- G : networkx multidigraph
def get_connection_string(connection=None): if not connection: config = configparser.ConfigParser() cfp = defaults.config_file_path if os.path.exists(cfp): log.info('fetch database configuration from %s', cfp) config.read(cfp) connection = config['database']['sqlalchemy_connection_string'] log.info('load connection string from %s: %s', cfp, connection) else: with open(cfp, 'w') as config_file: connection = defaults.sqlalchemy_connection_string_default config['database'] = {'sqlalchemy_connection_string': connection} config.write(config_file) log.info('create configuration file %s', cfp) return connection
return SQLAlchemy connection string if it is set :param connection: get the SQLAlchemy connection string #TODO :rtype: str
def _viewbox_unset(self, viewbox): self._viewbox = None viewbox.events.mouse_press.disconnect(self.viewbox_mouse_event) viewbox.events.mouse_release.disconnect(self.viewbox_mouse_event) viewbox.events.mouse_move.disconnect(self.viewbox_mouse_event) viewbox.events.mouse_wheel.disconnect(self.viewbox_mouse_event) viewbox.events.resize.disconnect(self.viewbox_resize_event)
Friend method of viewbox to unregister itself.
def dump_registers_peek(registers, data, separator = ' ', width = 16): if None in (registers, data): return '' names = compat.keys(data) names.sort() result = '' for reg_name in names: tag = reg_name.lower() dumped = HexDump.hexline(data[reg_name], separator, width) result += '%s -> %s\n' % (tag, dumped) return result
Dump data pointed to by the given registers, if any. @type registers: dict( str S{->} int ) @param registers: Dictionary mapping register names to their values. This value is returned by L{Thread.get_context}. @type data: dict( str S{->} str ) @param data: Dictionary mapping register names to the data they point to. This value is returned by L{Thread.peek_pointers_in_registers}. @rtype: str @return: Text suitable for logging.
def read(self, file_or_filename): if isinstance(file_or_filename, basestring): fname = os.path.basename(file_or_filename) logger.info("Unpickling case file [%s]." % fname) file = None try: file = open(file_or_filename, "rb") except: logger.error("Error opening %s." % fname) return None finally: if file is not None: case = pickle.load(file) file.close() else: file = file_or_filename case = pickle.load(file) return case
Loads a pickled case.
def bind(self, cube): table, column = self._physical_column(cube, self.column_name) column = column.label(self.matched_ref) column.quote = True return table, column
Map a model reference to an physical column in the database.
def close(self): if not (yield from super().close()): return False for adapter in self._ethernet_adapters.values(): if adapter is not None: for nio in adapter.ports.values(): if nio and isinstance(nio, NIOUDP): self.manager.port_manager.release_udp_port(nio.lport, self._project) try: self.acpi_shutdown = False yield from self.stop() except VMwareError: pass if self.linked_clone: yield from self.manager.remove_from_vmware_inventory(self._vmx_path)
Closes this VMware VM.
def GetVolumeByIndex(self, volume_index): if not self._is_parsed: self._Parse() self._is_parsed = True if volume_index < 0 or volume_index >= len(self._volume_identifiers): return None volume_identifier = self._volume_identifiers[volume_index] return self._volumes[volume_identifier]
Retrieves a specific volume based on the index. Args: volume_index (int): index of the volume. Returns: Volume: a volume or None if not available.
def _validate_translation(self, aligned_prot, aligned_nucl): codons = [''.join(i) for i in batch(str(aligned_nucl), 3)] for codon, aa in zip(codons, str(aligned_prot)): if codon == '---' and aa == '-': continue try: trans = self.translation_table.forward_table[codon] if not trans == aa: raise ValueError("Codon {0} translates to {1}, not {2}".format( codon, trans, aa)) except (KeyError, CodonTable.TranslationError): if aa != 'X': if self.unknown_action == 'fail': raise ValueError("Unknown codon: {0} mapped to {1}".format( codon, aa)) elif self.unknown_action == 'warn': logging.warn('Cannot verify that unknown codon %s ' 'maps to %s', codon, aa) return True
Given a seq for protein and nucleotide, ensure that the translation holds
def add_string_pairs_from_attributed_ui_element(results, ui_element, comment_prefix): attributed_strings = ui_element.getElementsByTagName('attributedString') if attributed_strings.length == 0: return False attributed_element = attributed_strings[0] fragment_index = 1 for fragment in attributed_element.getElementsByTagName('fragment'): try: label_entry_key = fragment.attributes['content'].value except KeyError: label_entry_key = fragment.getElementsByTagName('string')[0].firstChild.nodeValue comment = "%s Part %d" % (comment_prefix, fragment_index) results.append((label_entry_key, comment)) fragment_index += 1 return fragment_index > 1
Adds string pairs from a UI element with attributed text Args: results (list): The list to add the results to. attributed_element (element): The element from the xib that contains, to extract the fragments from. comment_prefix (str): The prefix of the comment to use for extracted string (will be appended "Part X" suffices) Returns: bool: Whether or not an attributed string was found.
def _post_process_yaml_data(self, fixture_data: Dict[str, Dict[str, Any]], relationship_columns: Set[str], ) -> Tuple[Dict[str, Dict[str, Any]], List[str]]: rv = {} relationships = set() if not fixture_data: return rv, relationships for identifier_id, data in fixture_data.items(): new_data = {} for col_name, value in data.items(): if col_name not in relationship_columns: new_data[col_name] = value continue identifiers = normalize_identifiers(value) if identifiers: relationships.add(identifiers[0].class_name) if isinstance(value, str) and len(identifiers) <= 1: new_data[col_name] = identifiers[0] if identifiers else None else: new_data[col_name] = identifiers rv[identifier_id] = new_data return rv, list(relationships)
Convert and normalize identifier strings to Identifiers, as well as determine class relationships.
def convert_objects(self, convert_dates=True, convert_numeric=False, convert_timedeltas=True, copy=True): msg = ("convert_objects is deprecated. To re-infer data dtypes for " "object columns, use {klass}.infer_objects()\nFor all " "other conversions use the data-type specific converters " "pd.to_datetime, pd.to_timedelta and pd.to_numeric." ).format(klass=self.__class__.__name__) warnings.warn(msg, FutureWarning, stacklevel=2) return self._constructor( self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric, convert_timedeltas=convert_timedeltas, copy=copy)).__finalize__(self)
Attempt to infer better dtype for object columns. .. deprecated:: 0.21.0 Parameters ---------- convert_dates : boolean, default True If True, convert to date where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. convert_numeric : boolean, default False If True, attempt to coerce to numbers (including strings), with unconvertible values becoming NaN. convert_timedeltas : boolean, default True If True, convert to timedelta where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type.
def reserve(self, timeout=None): if timeout is not None: command = 'reserve-with-timeout %d\r\n' % timeout else: command = 'reserve\r\n' try: return self._interact_job(command, ['RESERVED'], ['DEADLINE_SOON', 'TIMED_OUT']) except CommandFailed: exc = sys.exc_info()[1] _, status, results = exc.args if status == 'TIMED_OUT': return None elif status == 'DEADLINE_SOON': raise DeadlineSoon(results)
Reserve a job from one of the watched tubes, with optional timeout in seconds. Returns a Job object, or None if the request times out.
def filter_short(terms): return [term for i, term in enumerate(terms) if 26**(len(term)) > i]
only keep if brute-force possibilities are greater than this word's rank in the dictionary
def to_json(self, *, include_keys=None, exclude_keys=None, use_default_excludes=True, pretty=False): return to_json( self.to_dict( include_keys=include_keys, exclude_keys=exclude_keys, use_default_excludes=use_default_excludes), pretty=pretty)
Converts the response from to_dict to a JSON string. If pretty is True then newlines, indentation and key sorting are used.
def adjust(color, attribute, percent): r, g, b, a, type = parse_color(color) r, g, b = hsl_to_rgb(*_adjust(rgb_to_hsl(r, g, b), attribute, percent)) return unparse_color(r, g, b, a, type)
Adjust an attribute of color by a percent
def open_channel(self): if self._closing: raise ConnectionClosed("Closed by application") if self.closed.done(): raise self.closed.exception() channel = yield from self.channel_factory.open() return channel
Open a new channel on this connection. This method is a :ref:`coroutine <coroutine>`. :return: The new :class:`Channel` object.
def _loadOneSource(self, sourceFName): sourceLines = open(sourceFName).readlines() del sourceLines[0] if len(sourceLines[0].split("\t"))==2: self._loadTwoPartSource(sourceFName, sourceLines) elif len(sourceLines[0].split("\t"))==3: self._loadThreePartSource(sourceFName, sourceLines) else: raise Error, "%s does not appear to be a source authority file"
handles one authority file including format auto-detection.
def apply_noise(data, noise): if noise >= 1: noise = noise/100. for i in range(data.nRows()): ones = data.rowNonZeros(i)[0] replace_indices = numpy.random.choice(ones, size = int(len(ones)*noise), replace = False) for index in replace_indices: data[i, index] = 0 new_indices = numpy.random.choice(data.nCols(), size = int(len(ones)*noise), replace = False) for index in new_indices: while data[i, index] == 1: index = numpy.random.randint(0, data.nCols()) data[i, index] = 1
Applies noise to a sparse matrix. Noise can be an integer between 0 and 100, indicating the percentage of ones in the original input to move, or a float in [0, 1), indicating the same thing. The input matrix is modified in-place, and nothing is returned. This operation does not affect the sparsity of the matrix, or of any individual datapoint.
def get_data(self, endpoint="privacy"): if endpoint == "privacy": response = self._req('/data/privacy') elif endpoint == "submission": response = self._req('/data/submission') elif endpoint == "tos": response = self._req('/data/tos') else: raise DeviantartError("Unknown endpoint.") return response['text']
Returns policies of DeviantArt
def get_bytearray(self): if isinstance(self._bytearray, DB): return self._bytearray._bytearray return self._bytearray
return bytearray from self or DB parent
async def start_timeout(self): self.timeout_handle = self.pyvlx.connection.loop.call_later( self.timeout_in_seconds, self.timeout)
Start timeout.
def find_first_file_with_ext(base_paths, prefix, exts): for base_path in base_paths: for ext in exts: filename = os.path.join(base_path, "%s%s" % (prefix, ext)) if os.path.exists(filename) and os.path.isfile(filename): logger.debug("Found first file with relevant extension: %s", filename) return base_path, ext logger.debug("No files found for prefix %s, extensions %s", prefix, ", ".join(exts)) return None, None
Runs through the given list of file extensions and returns the first file with the given base path and extension combination that actually exists. Args: base_paths: The base paths in which to search for files. prefix: The filename prefix of the file for which to search. exts: An ordered list of file extensions for which to search. Returns: On success, a 2-tuple containing the base path in which the file was found, and the extension of the file. On failure, returns (None, None).
def runDynTask(task): func = getDynLocal(task[0]) if func is None: raise s_exc.NoSuchFunc(name=task[0]) return func(*task[1], **task[2])
Run a dynamic task and return the result. Example: foo = runDynTask( ('baz.faz.Foo', (), {} ) )
def get_description(self, language): description = self.gettext(language, self._description) if self._description else '' return ParsableText(description, "rst", self._translations.get(language, gettext.NullTranslations()))
Returns the course description
def find_pkg_dist(pkg_name, working_set=None): working_set = working_set or default_working_set req = Requirement.parse(pkg_name) return working_set.find(req)
Locate a package's distribution by its name.
def _es_margin(settings): return {k: settings[k] for k in (ConsoleWidget.SETTING_MARGIN, ConsoleWidget.SETTING_MARGIN_LEFT, ConsoleWidget.SETTING_MARGIN_RIGHT, ConsoleWidget.SETTING_MARGIN_CHAR)}
Extract margin formating related subset of widget settings.