code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def ExpireObject(self, key): node = self._hash.pop(key, None) if node: self._age.Unlink(node) self.KillObject(node.data) return node.data
Expire a specific object from cache.
def open_link(self): data = self.get_selected_item() if data['url_type'] == 'selfpost': self.open_submission() elif data['url_type'] == 'x-post subreddit': self.refresh_content(order='ignore', name=data['xpost_subreddit']) elif data['url_type'] == 'x-post submission': self.open_submission(url=data['url_full']) self.config.history.add(data['url_full']) else: self.term.open_link(data['url_full']) self.config.history.add(data['url_full'])
Open a link with the webbrowser
def set_dimensions(self, variables, unlimited_dims=None): if unlimited_dims is None: unlimited_dims = set() existing_dims = self.get_dimensions() dims = OrderedDict() for v in unlimited_dims: dims[v] = None for v in variables.values(): dims.update(dict(zip(v.dims, v.shape))) for dim, length in dims.items(): if dim in existing_dims and length != existing_dims[dim]: raise ValueError( "Unable to update size for existing dimension" "%r (%d != %d)" % (dim, length, existing_dims[dim])) elif dim not in existing_dims: is_unlimited = dim in unlimited_dims self.set_dimension(dim, length, is_unlimited)
This provides a centralized method to set the dimensions on the data store. Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions.
def generate_admin_metadata(name, creator_username=None): if not dtoolcore.utils.name_is_valid(name): raise(DtoolCoreInvalidNameError()) if creator_username is None: creator_username = dtoolcore.utils.getuser() datetime_obj = datetime.datetime.utcnow() admin_metadata = { "uuid": str(uuid.uuid4()), "dtoolcore_version": __version__, "name": name, "type": "protodataset", "creator_username": creator_username, "created_at": dtoolcore.utils.timestamp(datetime_obj) } return admin_metadata
Return admin metadata as a dictionary.
def get_encoded_query_params(self): get_data = encode_items(self.request.GET.lists()) return urlencode(get_data)
Return encoded query params to be used in proxied request
def _db_to_python(db_data: dict, table: LdapObjectClass, dn: str) -> LdapObject: fields = table.get_fields() python_data = table({ name: field.to_python(db_data[name]) for name, field in fields.items() if field.db_field }) python_data = python_data.merge({ 'dn': dn, }) return python_data
Convert a DbDate object to a LdapObject.
def get_text_for_repeated_menu_item( self, request=None, current_site=None, original_menu_tag='', **kwargs ): source_field_name = settings.PAGE_FIELD_FOR_MENU_ITEM_TEXT return self.repeated_item_text or getattr( self, source_field_name, self.title )
Return the a string to use as 'text' for this page when it is being included as a 'repeated' menu item in a menu. You might want to override this method if you're creating a multilingual site and you have different translations of 'repeated_item_text' that you wish to surface.
async def debug_command_message(self, message, context): conn_string = message.get('connection_string') command = message.get('command') args = message.get('args') client_id = context.user_data result = await self.debug(client_id, conn_string, command, args) return result
Handle a debug message. See :meth:`AbstractDeviceAdapter.debug`.
def getMeta(self, uri): action = urlparse(uri).path mediaKey = self.cacheKey + '_meta_' + action mediaKey = mediaKey.replace(' ', '__') meta = cache.get(mediaKey, None) if not meta: r = self.doQuery('meta/' + uri) if r.status_code == 200: meta = r.json() if 'expire' not in r.headers: expire = 5 * 60 else: expire = int((parser.parse(r.headers['expire']) - datetime.datetime.now(tzutc())).total_seconds()) if expire > 0: cache.set(mediaKey, meta, expire) return meta
Return meta information about an action. Cache the result as specified by the server
def sensor_id(self): if hasattr(self, '_sensor_id'): return self._sensor_id relationships = self._json_data.get('relationships') sensor_id = relationships.get('sensor').get('data').get('id') self._sensor_id = sensor_id return sensor_id
The id of the sensor of this data point. Returns: The id of the sensor that generated this datapoint. Will throw an AttributeError if no sensor id was found in the underlyign data.
def set_illuminant(self, illuminant): illuminant = illuminant.lower() if illuminant not in color_constants.ILLUMINANTS[self.observer]: raise InvalidIlluminantError(illuminant) self.illuminant = illuminant
Validates and sets the color's illuminant. .. note:: This only changes the illuminant. It does no conversion of the color's coordinates. For this, you'll want to refer to :py:meth:`XYZColor.apply_adaptation <colormath.color_objects.XYZColor.apply_adaptation>`. .. tip:: Call this after setting your observer. :param str illuminant: One of the various illuminants.
def can_import(self, file_uris, current_doc=None): if len(file_uris) <= 0: return False for file_uri in file_uris: file_uri = self.fs.safe(file_uri) if not self.check_file_type(file_uri): return False return True
Check that the specified file looks like an image supported by PIL
def _make_attr_element_from_typeattr(parent, type_attr_i): attr = _make_attr_element(parent, type_attr_i.attr) if type_attr_i.unit_id is not None: attr_unit = etree.SubElement(attr, 'unit') attr_unit.text = units.get_unit(type_attr_i.unit_id).abbreviation attr_is_var = etree.SubElement(attr, 'is_var') attr_is_var.text = type_attr_i.attr_is_var if type_attr_i.data_type is not None: attr_data_type = etree.SubElement(attr, 'data_type') attr_data_type.text = type_attr_i.data_type if type_attr_i.data_restriction is not None: attr_data_restriction = etree.SubElement(attr, 'restrictions') attr_data_restriction.text = type_attr_i.data_restriction return attr
General function to add an attribute element to a resource element. resource_attr_i can also e a type_attr if being called from get_tempalte_as_xml
def format_str(self): if self.static: return self.route.replace('%','%%') out, i = '', 0 for token, value in self.tokens(): if token == 'TXT': out += value.replace('%','%%') elif token == 'ANON': out += '%%(anon%d)s' % i; i+=1 elif token == 'VAR': out += '%%(%s)s' % value[1] return out
Return a format string with named fields.
def bind_df_model(model_fit, arima_results): if not hasattr(arima_results, 'df_model'): df_model = model_fit.k_exog + model_fit.k_trend + \ model_fit.k_ar + model_fit.k_ma + \ model_fit.k_seasonal_ar + model_fit.k_seasonal_ma setattr(arima_results, 'df_model', df_model)
Set model degrees of freedom. Older versions of statsmodels don't handle this issue. Sets the model degrees of freedom in place if not already present. Parameters ---------- model_fit : ARMA, ARIMA or SARIMAX The fitted model. arima_results : ModelResultsWrapper The results wrapper.
def update_unit(unit, **kwargs): try: db_unit = db.DBSession.query(Unit).join(Dimension).filter(Unit.id==unit["id"]).filter().one() db_unit.name = unit["name"] db_unit.abbreviation = unit.abbreviation db_unit.description = unit.description db_unit.lf = unit["lf"] db_unit.cf = unit["cf"] if "project_id" in unit and unit['project_id'] is not None and unit['project_id'] != "": db_unit.project_id = unit["project_id"] except NoResultFound: raise ResourceNotFoundError("Unit (ID=%s) does not exist"%(unit["id"])) db.DBSession.flush() return JSONObject(db_unit)
Update a unit in the DB. Raises and exception if the unit does not exist
def choices_from_enum(source: Enum) -> Tuple[Tuple[Any, str], ...]: result = tuple((s.value, s.name.title()) for s in source) return result
Makes tuple to use in Django's Fields ``choices`` attribute. Enum members names will be titles for the choices. :param source: Enum to process. :return: Tuple to put into ``choices``
def el_to_path_vector(el): path = [] while el.parent: path.append(el) el = el.parent return list(reversed(path + [el]))
Convert `el` to vector of foregoing elements. Attr: el (obj): Double-linked HTMLElement instance. Returns: list: HTMLElements which considered as path from root to `el`.
def get_word_under_cursor(self, WORD=False): start, end = self.find_boundaries_of_current_word(WORD=WORD) return self.text[self.cursor_position + start: self.cursor_position + end]
Return the word, currently below the cursor. This returns an empty string when the cursor is on a whitespace region.
def _get_start_end(parts, index=7): start = parts[1] end = [x.split("=")[-1] for x in parts[index].split(";") if x.startswith("END=")] if end: end = end[0] return start, end return None, None
Retrieve start and end for a VCF record, skips BNDs without END coords
def update(self): for s in self.sensors: s.position = self.io.get_object_position(object_name=s.name) s.orientation = self.io.get_object_orientation(object_name=s.name)
Updates the position and orientation of the tracked objects.
def generate_transaction_id(stmt_line): return str(abs(hash((stmt_line.date, stmt_line.memo, stmt_line.amount))))
Generate pseudo-unique id for given statement line. This function can be used in statement parsers when real transaction id is not available in source statement.
def _combine_attribute(attr_1, attr_2, len_1, len_2): if isinstance(attr_1, list) or isinstance(attr_2, list): attribute = np.concatenate((attr_1, attr_2), axis=0) attribute_changes = True else: if isinstance(attr_1, list) and isinstance(attr_2, list) and np.allclose(attr_1, attr_2): attribute = attr_1 attribute_changes = False else: attribute = [attr_1.copy()] * len_1 if type(attr_1) != list else attr_1.copy() attribute.extend([attr_2.copy()] * len_2 if type(attr_2 != list) else attr_2.copy()) attribute_changes = True return attribute, attribute_changes
Helper function to combine trajectory properties such as site_properties or lattice
def add(self, *args, **kwargs): check_uniqueness = kwargs.pop('check_uniqueness', False) args = self.prepare_args(args) for index in self._indexes: index.add(*args, check_uniqueness=check_uniqueness and index.handle_uniqueness, **kwargs) if check_uniqueness and index.handle_uniqueness: check_uniqueness = False
Add the instance tied to the field to all the indexes For the parameters, seen BaseIndex.add
def create(cls, jobStore, leaderPath): pathHash = cls._pathHash(leaderPath) contentHash = hashlib.md5() with cls._load(leaderPath) as src: with jobStore.writeSharedFileStream(sharedFileName=pathHash, isProtected=False) as dst: userScript = src.read() contentHash.update(userScript) dst.write(userScript) return cls(name=os.path.basename(leaderPath), pathHash=pathHash, url=jobStore.getSharedPublicUrl(sharedFileName=pathHash), contentHash=contentHash.hexdigest())
Saves the content of the file or directory at the given path to the given job store and returns a resource object representing that content for the purpose of obtaining it again at a generic, public URL. This method should be invoked on the leader node. :param toil.jobStores.abstractJobStore.AbstractJobStore jobStore: :param str leaderPath: :rtype: Resource
def assets(self) -> List[Asset]: return list(filter(is_element(Asset), self.content))
Returns the assets in the transaction.
def construct(cls, attempt_number, value, has_exception): fut = cls(attempt_number) if has_exception: fut.set_exception(value) else: fut.set_result(value) return fut
Construct a new Future object.
def _format_nsn(number, metadata, num_format, carrier_code=None): intl_number_formats = metadata.intl_number_format if (len(intl_number_formats) == 0 or num_format == PhoneNumberFormat.NATIONAL): available_formats = metadata.number_format else: available_formats = metadata.intl_number_format formatting_pattern = _choose_formatting_pattern_for_number(available_formats, number) if formatting_pattern is None: return number else: return _format_nsn_using_pattern(number, formatting_pattern, num_format, carrier_code)
Format a national number.
def get_all_parts(self, max_parts=None, part_number_marker=None): self._parts = [] query_args = 'uploadId=%s' % self.id if max_parts: query_args += '&max-parts=%d' % max_parts if part_number_marker: query_args += '&part-number-marker=%s' % part_number_marker response = self.bucket.connection.make_request('GET', self.bucket.name, self.key_name, query_args=query_args) body = response.read() if response.status == 200: h = handler.XmlHandler(self, self) xml.sax.parseString(body, h) return self._parts
Return the uploaded parts of this MultiPart Upload. This is a lower-level method that requires you to manually page through results. To simplify this process, you can just use the object itself as an iterator and it will automatically handle all of the paging with S3.
def _create_trustdb(cls): trustdb = os.path.join(cls.homedir, 'trustdb.gpg') if not os.path.isfile(trustdb): log.info("GnuPG complained that your trustdb file was missing. %s" % "This is likely due to changing to a new homedir.") log.info("Creating trustdb.gpg file in your GnuPG homedir.") cls.fix_trustdb(trustdb)
Create the trustdb file in our homedir, if it doesn't exist.
def create_channel( target: str, options: Optional[List[Tuple[str, Any]]] = None, interceptors: Optional[List[ClientInterceptor]] = None, ) -> grpc.Channel: options = (options or []) + [ ("grpc.max_send_message_length", grpc_max_msg_size), ("grpc.max_receive_message_length", grpc_max_msg_size), ] interceptors = interceptors or [] channel = grpc.insecure_channel(target, options) return grpc.intercept_channel(channel, *interceptors)
Creates a gRPC channel The gRPC channel is created with the provided options and intercepts each invocation via the provided interceptors. The created channel is configured with the following default options: - "grpc.max_send_message_length": 100MB, - "grpc.max_receive_message_length": 100MB. :param target: the server address. :param options: optional list of key-value pairs to configure the channel. :param interceptors: optional list of client interceptors. :returns: a gRPC channel.
def set_position(self, position): if position > self._duration(): return position_ns = position * _NANOSEC_MULT self._manager[ATTR_POSITION] = position self._player.seek_simple(_FORMAT_TIME, Gst.SeekFlags.FLUSH, position_ns)
Set media position.
def append_pdf(input_pdf: bytes, output_writer: PdfFileWriter): append_memory_pdf_to_writer(input_pdf=input_pdf, writer=output_writer)
Appends a PDF to a pyPDF writer. Legacy interface.
def browse_in_qt5_ui(self): self._render_type = "browse" self._tree.show(tree_style=self._get_tree_style())
Browse and edit the SubjectInfo in a simple Qt5 based UI.
def find_conflicts_within_selection_set( context: ValidationContext, cached_fields_and_fragment_names: Dict, compared_fragment_pairs: "PairSet", parent_type: Optional[GraphQLNamedType], selection_set: SelectionSetNode, ) -> List[Conflict]: conflicts: List[Conflict] = [] field_map, fragment_names = get_fields_and_fragment_names( context, cached_fields_and_fragment_names, parent_type, selection_set ) collect_conflicts_within( context, conflicts, cached_fields_and_fragment_names, compared_fragment_pairs, field_map, ) if fragment_names: compared_fragments: Set[str] = set() for i, fragment_name in enumerate(fragment_names): collect_conflicts_between_fields_and_fragment( context, conflicts, cached_fields_and_fragment_names, compared_fragments, compared_fragment_pairs, False, field_map, fragment_name, ) for other_fragment_name in fragment_names[i + 1 :]: collect_conflicts_between_fragments( context, conflicts, cached_fields_and_fragment_names, compared_fragment_pairs, False, fragment_name, other_fragment_name, ) return conflicts
Find conflicts within selection set. Find all conflicts found "within" a selection set, including those found via spreading in fragments. Called when visiting each SelectionSet in the GraphQL Document.
def deletesshkey(self, key_id): request = requests.delete( '{0}/{1}'.format(self.keys_url, key_id), headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.content == b'null': return False else: return True
Deletes an sshkey for the current user identified by id :param key_id: the id of the key :return: False if it didn't delete it, True if it was deleted
def get_correlated_report_ids(self, indicators): params = {'indicators': indicators} resp = self._client.get("reports/correlate", params=params) return resp.json()
DEPRECATED! Retrieves a list of the IDs of all TruSTAR reports that contain the searched indicators. :param indicators: A list of indicator values to retrieve correlated reports for. :return: The list of IDs of reports that correlated. Example: >>> report_ids = ts.get_correlated_report_ids(["wannacry", "www.evil.com"]) >>> print(report_ids) ["e3bc6921-e2c8-42eb-829e-eea8da2d3f36", "4d04804f-ff82-4a0b-8586-c42aef2f6f73"]
def read_string(self, key, embedded=True): data = None if key is not None: key_type = self.variable_type(key) data = self.db.read(key.strip()) if data is not None: try: data = json.loads(data) if embedded: data = self.read_embedded(data, key_type) if data is not None: data = u'{}'.format(data) except ValueError as e: err = u'Failed loading JSON data ({}). Error: ({})'.format(data, e) self.tcex.log.error(err) else: self.tcex.log.warning(u'The key field was None.') return data
Read method of CRUD operation for string data. Args: key (string): The variable to read from the DB. embedded (boolean): Resolve embedded variables. Returns: (string): Results retrieved from DB.
def update_energy(self, bypass_check=False): for outlet in self.outlets: outlet.update_energy(bypass_check)
Fetch updated energy information about devices
def register_observer(self, observer, events=None): if events is not None and not isinstance(events, (tuple, list)): events = (events,) if observer in self._observers: LOG.warning("Observer '%r' already registered, overwriting for events" " %r", observer, events) self._observers[observer] = events
Register a listener function. :param observer: external listener function :param events: tuple or list of relevant events (default=None)
def delete_bigger(self): logger.info( "Deleting all mails strictly bigger than {} bytes...".format( self.smallest_size)) candidates = [ mail for mail in self.pool if mail.size > self.smallest_size] if len(candidates) == self.size: logger.warning( "Skip deletion: all {} mails share the same size." "".format(self.size)) logger.info( "{} candidates found for deletion.".format(len(candidates))) for mail in candidates: self.delete(mail)
Delete all bigger duplicates. Only keeps the subset sharing the smallest size.
def history(self, **kwargs): url_str = self.base_url + '/%s/state-history' % kwargs['alarm_id'] del kwargs['alarm_id'] if kwargs: url_str = url_str + '?%s' % parse.urlencode(kwargs, True) resp = self.client.list(url_str) return resp['elements'] if type(resp) is dict else resp
History of a specific alarm.
def ajax_login_required(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): if request.is_ajax(): if request.user.is_authenticated(): return view_func(request, *args, **kwargs) else: response = http.HttpResponse() response['X-Django-Requires-Auth'] = True response['X-Django-Login-Url'] = settings.LOGIN_URL return response else: return login_required(view_func)(request, *args, **kwargs) return _wrapped_view
Handle non-authenticated users differently if it is an AJAX request.
def update_stack(self, fqn, template, old_parameters, parameters, tags, force_interactive=False, force_change_set=False, stack_policy=None, **kwargs): logger.debug("Attempting to update stack %s:", fqn) logger.debug(" parameters: %s", parameters) logger.debug(" tags: %s", tags) if template.url: logger.debug(" template_url: %s", template.url) else: logger.debug(" no template url, uploading template directly.") update_method = self.select_update_method(force_interactive, force_change_set) return update_method(fqn, template, old_parameters, parameters, stack_policy=stack_policy, tags=tags, **kwargs)
Update a Cloudformation stack. Args: fqn (str): The fully qualified name of the Cloudformation stack. template (:class:`stacker.providers.base.Template`): A Template object to use when updating the stack. old_parameters (list): A list of dictionaries that defines the parameter list on the existing Cloudformation stack. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. force_interactive (bool): A flag that indicates whether the update should be interactive. If set to True, interactive mode will be used no matter if the provider is in interactive mode or not. False will follow the behavior of the provider. force_change_set (bool): A flag that indicates whether the update must be executed with a change set. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy.
def set_cache_url (self): self.cache_url = urlutil.urlunsplit(self.urlparts[:4]+[u'']) if self.cache_url is not None: assert isinstance(self.cache_url, unicode), repr(self.cache_url)
Set the URL to be used for caching.
def to_dict(self): if not self.url: return None return { 'url': self.url, 'width': self.width, 'height': self.height }
Convert Image into raw dictionary data.
def cartesian_to_homogeneous_vectors(cartesian_vector, matrix_type="numpy"): dimension_x = cartesian_vector.shape[0] if matrix_type == "numpy": homogeneous_vector = np.zeros(dimension_x + 1) homogeneous_vector[-1] = 1 homogeneous_vector[:-1] = cartesian_vector return homogeneous_vector
Converts a cartesian vector to an homogenous vector
def should_run(self): should_run = True config = self.target or self.source if config.has('systems'): should_run = False valid_systems = [s.lower() for s in config.get('systems').split(",")] for system_type, param in [('is_osx', 'osx'), ('is_debian', 'debian')]: if param in valid_systems and getattr(system, system_type)(): should_run = True return should_run
Returns true if the feature should run
def get_by_path(path, first=False): api = get_api() cur_res = api parts = path.split(':') for part in parts: res = getattr(cur_res, part, None) if not res: res = find_by_name(cur_res, part) cur_res = res index = getattr(cur_res, 'index', None) if index: return index() return cur_res
Search for resources using colon-separated path notation. E.g.:: path = 'deployments:production:servers:haproxy' haproxies = get_by_path(path) :param bool first: Always use the first returned match for all intermediate searches along the path. If this is ``False`` and an intermediate search returns multiple hits, an exception is raised.
def has_parent(self, router): parent = self while parent and parent is not router: parent = parent._parent return parent is not None
Check if ``router`` is ``self`` or a parent or ``self``
def cluster_number(self, data, maxgap): data.sort() groups = [[data[0]]] for x in data[1:]: if abs(x - groups[-1][-1]) <= maxgap: groups[-1].append(x) else: groups.append([x]) return groups
General function that clusters numbers. Args data (list): list of integers. maxgap (int): max gap between numbers in the cluster.
def reject(lst, *values): lst = List.from_maybe(lst) values = frozenset(List.from_maybe_starargs(values)) ret = [] for item in lst: if item not in values: ret.append(item) return List(ret, use_comma=lst.use_comma)
Removes the given values from the list
def ttr(self, kloc, acc=10**3, verbose=1): kloc = numpy.asarray(kloc, dtype=int) shape = kloc.shape kloc = kloc.reshape(len(self), -1) cache = {} out = [evaluation.evaluate_recurrence_coefficients(self, k) for k in kloc.T] out = numpy.array(out).T return out.reshape((2,)+shape)
Three terms relation's coefficient generator Args: k (numpy.ndarray, int): The order of the coefficients. acc (int): Accuracy of discretized Stieltjes if analytical methods are unavailable. Returns: (Recurrence coefficients): Where out[0] is the first (A) and out[1] is the second coefficient With ``out.shape==(2,)+k.shape``.
def Get(self): args = user_management_pb2.ApiGetGrrUserArgs(username=self.username) data = self._context.SendRequest("GetGrrUser", args) return GrrUser(data=data, context=self._context)
Fetches user's data and returns it wrapped in a Grruser object.
def get_filename(disposition): if disposition: params = [param.strip() for param in disposition.split(';')[1:]] for param in params: if '=' in param: name, value = param.split('=', 1) if name == 'filename': return value.strip('"')
Parse Content-Disposition header to pull out the filename bit. See: http://tools.ietf.org/html/rfc2616#section-19.5.1
def jsoned(struct, wrap=True, meta=None, struct_key='result', pre_render_callback=None): return _json.dumps( structured( struct, wrap=wrap, meta=meta, struct_key=struct_key, pre_render_callback=pre_render_callback), default=json_encoder)
Provides a json dump of the struct Args: struct: The data to dump wrap (bool, optional): Specify whether to wrap the struct in an enclosing dict struct_key (str, optional): The string key which will contain the struct in the result dict meta (dict, optional): An optional dictonary to merge with the output dictionary. Examples: >>> jsoned([3,4,5]) ... '{"status": "success", "result": [3, 4, 5]}' >>> jsoned([3,4,5], wrap=False) ... '[3, 4, 5]'
def account_info(self): response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey}) return self._raise_or_extract(response)
Only available on Joe Sandbox Cloud Show information about the account.
def set_timestamp(self, time: Union[str, datetime.datetime] = None, now: bool = False) -> None: if now: self.timestamp = str(datetime.datetime.utcnow()) else: self.timestamp = str(time)
Sets the timestamp of the embed. Parameters ---------- time: str or :class:`datetime.datetime` The ``ISO 8601`` timestamp from the embed. now: bool Defaults to :class:`False`. If set to :class:`True` the current time is used for the timestamp.
def _init_datastore_v3_stub(self, **stub_kwargs): task_args = dict(datastore_file=self._data_path) task_args.update(stub_kwargs) self.testbed.init_datastore_v3_stub(**task_args)
Initializes the datastore stub using nosegae config magic
def get_cpds(self): cpds = self.model.get_cpds() tables = {} for cpd in cpds: tables[cpd.variable] = cpd.values.ravel() return tables
Adds tables to BIF Returns ------- dict: dict of type {variable: array} Example ------- >>> from pgmpy.readwrite import BIFReader, BIFWriter >>> model = BIFReader('dog-problem.bif').get_model() >>> writer = BIFWriter(model) >>> writer.get_cpds() {'bowel-problem': array([ 0.01, 0.99]), 'dog-out': array([ 0.99, 0.97, 0.9 , 0.3 , 0.01, 0.03, 0.1 , 0.7 ]), 'family-out': array([ 0.15, 0.85]), 'hear-bark': array([ 0.7 , 0.01, 0.3 , 0.99]), 'light-on': array([ 0.6 , 0.05, 0.4 , 0.95])}
def get_internal_angles(self): angles = [] for elx, elz in zip(self.grid['x'], self.grid['z']): el_angles = [] xy = np.vstack((elx, elz)) for i in range(0, elx.size): i1 = (i - 1) % elx.size i2 = (i + 1) % elx.size a = (xy[:, i] - xy[:, i1]) b = (xy[:, i2] - xy[:, i]) angle = np.pi - np.arctan2( a[0] * b[1] - a[1] * b[0], a[0] * b[0] + a[1] * b[1] ) el_angles.append(angle * 180 / np.pi) angles.append(el_angles) return np.array(angles)
Compute all internal angles of the grid Returns ------- numpy.ndarray NxK array with N the number of elements, and K the number of nodes, filled with the internal angles in degrees
def create_object(cls, members): obj = cls.__new__(cls) obj.__dict__ = members return obj
Promise an object of class `cls` with content `members`.
def main(): logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) parser = argparse.ArgumentParser( description='Test the SMA webconnect library.') parser.add_argument( 'ip', type=str, help='IP address of the Webconnect module') parser.add_argument( 'user', help='installer/user') parser.add_argument( 'password', help='Installer password') args = parser.parse_args() loop = asyncio.get_event_loop() def _shutdown(*_): VAR['running'] = False signal.signal(signal.SIGINT, _shutdown) loop.run_until_complete(main_loop( loop, user=args.user, password=args.password, ip=args.ip))
Main example.
def delete(self, event): super(CeleryReceiver, self).delete(event) AsyncResult(event.id).revoke(terminate=True)
Abort running task if it exists.
def create(self, repo_name, scm='git', private=True, **kwargs): url = self.bitbucket.url('CREATE_REPO') return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, name=repo_name, scm=scm, is_private=private, **kwargs)
Creates a new repository on own Bitbucket account and return it.
def unpack_source_dists(self, arguments, use_wheels=False): unpack_timer = Timer() logger.info("Unpacking distribution(s) ..") with PatchedAttribute(pip_install_module, 'PackageFinder', CustomPackageFinder): requirements = self.get_pip_requirement_set(arguments, use_remote_index=False, use_wheels=use_wheels) logger.info("Finished unpacking %s in %s.", pluralize(len(requirements), "distribution"), unpack_timer) return requirements
Find and unpack local source distributions and discover their metadata. :param arguments: The command line arguments to ``pip install ...`` (a list of strings). :param use_wheels: Whether pip and pip-accel are allowed to use wheels_ (:data:`False` by default for backwards compatibility with callers that use pip-accel as a Python API). :returns: A list of :class:`pip_accel.req.Requirement` objects. :raises: Any exceptions raised by pip, for example :exc:`pip.exceptions.DistributionNotFound` when not all requirements can be satisfied. This function checks whether there are local source distributions available for all requirements, unpacks the source distribution archives and finds the names and versions of the requirements. By using the ``pip install --download`` command we avoid reimplementing the following pip features: - Parsing of ``requirements.txt`` (including recursive parsing). - Resolution of possibly conflicting pinned requirements. - Unpacking source distributions in multiple formats. - Finding the name & version of a given source distribution.
def _eval_args(args): res = [] for arg in args: if not isinstance(arg, tuple): res.append(arg) elif is_callable_type(arg[0]): callable_args = _eval_args(arg[1:]) if len(arg) == 2: res.append(Callable[[], callable_args[0]]) elif arg[1] is Ellipsis: res.append(Callable[..., callable_args[1]]) else: res.append(Callable[list(callable_args[:-1]), callable_args[-1]]) else: res.append(type(arg[0]).__getitem__(arg[0], _eval_args(arg[1:]))) return tuple(res)
Internal helper for get_args.
def is_item_public(self, permission_name, view_name): permissions = self.get_public_permissions() if permissions: for i in permissions: if (view_name == i.view_menu.name) and ( permission_name == i.permission.name ): return True return False else: return False
Check if view has public permissions :param permission_name: the permission: can_show, can_edit... :param view_name: the name of the class view (child of BaseView)
def get_slice_nodes(self, time_slice=0): if not isinstance(time_slice, int) or time_slice < 0: raise ValueError("The timeslice should be a positive value greater than or equal to zero") return [(node, time_slice) for node in self._nodes()]
Returns the nodes present in a particular timeslice Parameters ---------- time_slice:int The timeslice should be a positive value greater than or equal to zero Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L']) >>> dbn.add_edges_from([(('D', 0),('G', 0)),(('I', 0),('G', 0)),(('G', 0),('L', 0)),(('D', 0),('D', 1))]) >>> dbn.get_slice_nodes()
def remove_wirevector(self, wirevector): self.wirevector_set.remove(wirevector) del self.wirevector_by_name[wirevector.name]
Remove a wirevector object to the block.
def refine_cand(candsfile, candloc=[], candnum=-1, threshold=0, scaledm=2.1, scalepix=2, scaleuv=1.0, chans=[], returndata=False): if candnum >= 0: candlocs, candprops, d0 = pc.read_candidates(candsfile, snrmin=threshold, returnstate=True) candloc = candlocs[candnum] candprop = candprops[candnum] logger.info('Refining cand {0} with features {1}'.format(candloc, candprop)) values = rt.pipeline_refine(d0, candloc, scaledm=scaledm, scalepix=scalepix, scaleuv=scaleuv, chans=chans, returndata=returndata) return values elif candloc: logger.info('Refining cand {0}'.format(candloc)) d0 = pickle.load(open(candsfile, 'r')) values = rt.pipeline_refine(d0, candloc, scaledm=scaledm, scalepix=scalepix, scaleuv=scaleuv, chans=chans, returndata=returndata) return d, cands else: return None
Helper function to interact with merged cands file and refine analysis candsfile is merged pkl file candloc (scan, segment, candint, dmind, dtind, beamnum) is as above. if no candloc, then it prints out cands above threshold.
def shared_prefix(args): i = 0 while i < min(map(len, args)): if len(set(map(operator.itemgetter(i), args))) != 1: break i += 1 return args[0][:i]
Find the shared prefix between the strings. For instance: sharedPrefix(['blahblah', 'blahwhat']) returns 'blah'.
def break_around_binary_operator(logical_line, tokens): r def is_binary_operator(token_type, text): return ((token_type == tokenize.OP or text in ['and', 'or']) and text not in "()[]{},:.;@=%~") line_break = False unary_context = True previous_token_type = None previous_text = None for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: continue if ('\n' in text or '\r' in text) and token_type != tokenize.STRING: line_break = True else: if (is_binary_operator(token_type, text) and line_break and not unary_context and not is_binary_operator(previous_token_type, previous_text)): yield start, "W503 line break before binary operator" unary_context = text in '([{,;' line_break = False previous_token_type = token_type previous_text = text
r""" Avoid breaks before binary operators. The preferred place to break around a binary operator is after the operator, not before it. W503: (width == 0\n + height == 0) W503: (width == 0\n and height == 0) Okay: (width == 0 +\n height == 0) Okay: foo(\n -x) Okay: foo(x\n []) Okay: x = '''\n''' + '' Okay: foo(x,\n -y) Okay: foo(x, # comment\n -y) Okay: var = (1 &\n ~2) Okay: var = (1 /\n -2) Okay: var = (1 +\n -1 +\n -2)
def keypress(self, size, key): key = self.__super.keypress(size, key) if key: key = self.unhandled_keys(size, key) return key
allow subclasses to intercept keystrokes
def __equalize_densities(self,nominal_bounds,nominal_density): left,bottom,right,top = nominal_bounds.lbrt() width = right-left; height = top-bottom center_y = bottom + height/2.0 true_density = int(nominal_density*(width))/float(width) n_cells = round(height*true_density,0) adjusted_half_height = n_cells/true_density/2.0 return (BoundingBox(points=((left, center_y-adjusted_half_height), (right, center_y+adjusted_half_height))), true_density)
Calculate the true density along x, and adjust the top and bottom bounds so that the density along y will be equal. Returns (adjusted_bounds, true_density)
def v_reference_leaf_leafref(ctx, stmt): if (hasattr(stmt, 'i_leafref') and stmt.i_leafref is not None and stmt.i_leafref_expanded is False): path_type_spec = stmt.i_leafref not_req_inst = not(path_type_spec.require_instance) x = validate_leafref_path(ctx, stmt, path_type_spec.path_spec, path_type_spec.path_, accept_non_config_target=not_req_inst ) if x is None: return ptr, expanded_path, path_list = x path_type_spec.i_target_node = ptr path_type_spec.i_expanded_path = expanded_path path_type_spec.i_path_list = path_list stmt.i_leafref_expanded = True if ptr is not None: chk_status(ctx, stmt, ptr) stmt.i_leafref_ptr = (ptr, path_type_spec.pos)
Verify that all leafrefs in a leaf or leaf-list have correct path
def _artifact_base(self): if '_artifact_base' not in self._memo: for artifact in self._artifacts: if self.re_target.search(artifact['name']) is not None: artifact_base = os.path.splitext(artifact['name'])[0] break else: raise FetcherException('Could not find build info in artifacts') self._memo['_artifact_base'] = artifact_base return self._memo['_artifact_base']
Build the artifact basename Builds are base.tar.bz2, info is base.json, shell is base.jsshell.zip...
def grid_coords_from_corners(upper_left_corner, lower_right_corner, size): assert upper_left_corner.wkt == lower_right_corner.wkt geotransform = np.array([upper_left_corner.lon, -(upper_left_corner.lon - lower_right_corner.lon) / float(size[1]), 0, upper_left_corner.lat, 0, -(upper_left_corner.lat - lower_right_corner.lat) / float(size[0])]) return GridCoordinates(geotransform=geotransform, wkt=upper_left_corner.wkt, y_size=size[0], x_size=size[1])
Points are the outer edges of the UL and LR pixels. Size is rows, columns. GC projection type is taken from Points.
def init(self): try: start_http_server(port=int(self.port), addr=self.host) except Exception as e: logger.critical("Can not start Prometheus exporter on {}:{} ({})".format(self.host, self.port, e)) sys.exit(2) else: logger.info("Start Prometheus exporter on {}:{}".format(self.host, self.port))
Init the Prometheus Exporter
def patch_worker_run_task(): _run_task = luigi.worker.Worker._run_task def run_task(self, task_id): task = self._scheduled_tasks[task_id] task._worker_id = self._id task._worker_task = self._first_task try: _run_task(self, task_id) finally: task._worker_id = None task._worker_task = None if os.getenv("LAW_SANDBOX_SWITCHED") == "1": self._start_phasing_out() luigi.worker.Worker._run_task = run_task
Patches the ``luigi.worker.Worker._run_task`` method to store the worker id and the id of its first task in the task. This information is required by the sandboxing mechanism
def resolve_extensions(bot: commands.Bot, name: str) -> list: if name.endswith('.*'): module_parts = name[:-2].split('.') path = pathlib.Path(module_parts.pop(0)) for part in module_parts: path = path / part return find_extensions_in(path) if name == '~': return list(bot.extensions.keys()) return [name]
Tries to resolve extension queries into a list of extension names.
def _complete_history(self, cmd, args, text): if args: return return [ x for x in { 'clear', 'clearall' } \ if x.startswith(text) ]
Find candidates for the 'history' command.
def next_free_pos(self, address): idx = self._search(address) if idx < len(self._list) and self._list[idx].start <= address < self._list[idx].end: i = idx while i + 1 < len(self._list) and self._list[i].end == self._list[i + 1].start: i += 1 if i == len(self._list): return self._list[-1].end return self._list[i].end return address
Returns the next free position with respect to an address, including that address itself :param address: The address to begin the search with (including itself) :return: The next free position
def _check_content(self, content_str): if self.do_content_check: space_ratio = float(content_str.count(' '))/len(content_str) if space_ratio > self.max_space_ratio: return "space-ratio: %f > %f" % (space_ratio, self.max_space_ratio) if len(content_str) > self.input_character_limit: return "too long: %d > %d" % (len(content_str), self.input_character_limit) return None
Check if the content is likely to be successfully read.
def pcapname(dev): if isinstance(dev, NetworkInterface): if dev.is_invalid(): return None return dev.pcap_name try: return IFACES.dev_from_name(dev).pcap_name except ValueError: return IFACES.dev_from_pcapname(dev).pcap_name
Get the device pcap name by device name or Scapy NetworkInterface
def getEyeOutputViewport(self, eEye): fn = self.function_table.getEyeOutputViewport pnX = c_uint32() pnY = c_uint32() pnWidth = c_uint32() pnHeight = c_uint32() fn(eEye, byref(pnX), byref(pnY), byref(pnWidth), byref(pnHeight)) return pnX.value, pnY.value, pnWidth.value, pnHeight.value
Gets the viewport in the frame buffer to draw the output of the distortion into
def pct_decode(s): if s is None: return None elif not isinstance(s, unicode): s = str(s) else: s = s.encode('utf8') return PERCENT_CODE_SUB(lambda mo: chr(int(mo.group(0)[1:], 16)), s)
Return the percent-decoded version of string s. >>> pct_decode('%43%6F%75%63%6F%75%2C%20%6A%65%20%73%75%69%73%20%63%6F%6E%76%69%76%69%61%6C') 'Coucou, je suis convivial' >>> pct_decode('') '' >>> pct_decode('%2525') '%25'
def get(cls, id_): with db.session.no_autoflush: query = cls.dbmodel.query.filter_by(id=id_) try: model = query.one() except NoResultFound: raise WorkflowsMissingObject("No object for for id {0}".format( id_ )) return cls(model)
Return a workflow object from id.
def get(self, key, defaultValue=None): if defaultValue is None: if self._jconf is not None: if not self._jconf.contains(key): return None return self._jconf.get(key) else: if key not in self._conf: return None return self._conf[key] else: if self._jconf is not None: return self._jconf.get(key, defaultValue) else: return self._conf.get(key, defaultValue)
Get the configured value for some key, or return a default otherwise.
def local_lambda_runner(self): layer_downloader = LayerDownloader(self._layer_cache_basedir, self.get_cwd()) image_builder = LambdaImage(layer_downloader, self._skip_pull_image, self._force_image_build) lambda_runtime = LambdaRuntime(self._container_manager, image_builder) return LocalLambdaRunner(local_runtime=lambda_runtime, function_provider=self._function_provider, cwd=self.get_cwd(), env_vars_values=self._env_vars_value, debug_context=self._debug_context)
Returns an instance of the runner capable of running Lambda functions locally :return samcli.commands.local.lib.local_lambda.LocalLambdaRunner: Runner configured to run Lambda functions locally
def set_hook(fn, key, **kwargs): if fn is None: return functools.partial(set_hook, key=key, **kwargs) try: hook_config = fn.__marshmallow_hook__ except AttributeError: fn.__marshmallow_hook__ = hook_config = {} hook_config[key] = kwargs return fn
Mark decorated function as a hook to be picked up later. .. note:: Currently only works with functions and instance methods. Class and static methods are not supported. :return: Decorated function if supplied, else this decorator with its args bound.
def slave_envs(self): if self.hostIP == 'dns': host = socket.gethostname() elif self.hostIP == 'ip': host = socket.gethostbyname(socket.getfqdn()) else: host = self.hostIP return {'rabit_tracker_uri': host, 'rabit_tracker_port': self.port}
get enviroment variables for slaves can be passed in as args or envs
def boost(self, boost): _LOGGER.debug("Setting boost mode: %s", boost) value = struct.pack('BB', PROP_BOOST, bool(boost)) self._conn.make_request(PROP_WRITE_HANDLE, value)
Sets boost mode.
def opcode_list(self, script): opcodes = [] new_pc = 0 try: for opcode, data, pc, new_pc in self.get_opcodes(script): opcodes.append(self.disassemble_for_opcode_data(opcode, data)) except ScriptError: opcodes.append(binascii.hexlify(script[new_pc:]).decode("utf8")) return opcodes
Disassemble the given script. Returns a list of opcodes.
def add_option(self, section, option, value=None): if not self.config.has_section(section): message = self.add_section(section) if not message[0]: return message if not self.config.has_option(section, option): if value: self.config.set(section, option, value) else: self.config.set(section, option) return(True, self.config.options(section)) return(False, 'Option: {} already exists @ {}'.format(option, section))
Creates an option for a section. If the section does not exist, it will create the section.
def request(self, account, amount): return self._keeper.dispenser.request_tokens(amount, account)
Request a number of tokens to be minted and transfered to `account` :param account: Account instance requesting the tokens :param amount: int number of tokens to request :return: bool
def update_image_digest(self, image, platform, digest): image_name_tag = self._key(image) image_name = image.to_str(tag=False) name_digest = '{}@{}'.format(image_name, digest) image_digests = self._images_digests.setdefault(image_name_tag, {}) image_digests[platform] = name_digest
Update parent image digest for specific platform :param ImageName image: image :param str platform: name of the platform/arch (x86_64, ppc64le, ...) :param str digest: digest of the specified image (sha256:...)
def filter_mean(matrix, top): assert isinstance(matrix, ExpMatrix) assert isinstance(top, int) if top >= matrix.p: logger.warning('Gene expression filter with `top` parameter that is ' '>= the number of genes!') top = matrix.p a = np.argsort(np.mean(matrix.X, axis=1)) a = a[::-1] sel = np.zeros(matrix.p, dtype=np.bool_) sel[a[:top]] = True matrix = matrix.loc[sel] return matrix
Filter genes in an expression matrix by mean expression. Parameters ---------- matrix: ExpMatrix The expression matrix. top: int The number of genes to retain. Returns ------- ExpMatrix The filtered expression matrix.
def _set_attachments(self, value): if value is None: setattr(self, '_PMMail__attachments', []) elif isinstance(value, list): setattr(self, '_PMMail__attachments', value) else: raise TypeError('Attachments must be a list')
A special set function to ensure we're setting with a list
def save_to_filename(self, file_name, sep='\n'): fp = open(file_name, 'wb') n = self.save_to_file(fp, sep) fp.close() return n
Read all messages from the queue and persist them to local file. Messages are written to the file and the 'sep' string is written in between messages. Messages are deleted from the queue after being written to the file. Returns the number of messages saved.