Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
15,700
def logic(self, data): try: msg = Message(data, self) msg.validate(self.protocol_version) except (ValueError, vol.Invalid) as exc: _LOGGER.warning(, exc) return None message_type = self.const.MessageType(msg.type) handler = message_type.get_handler(self.handlers) ret = handler(msg) ret = self._route_message(ret) ret = ret.encode() if ret else None return ret
Parse the data and respond to it appropriately. Response is returned to the caller and has to be sent data as a mysensors command string.
15,701
def detectBlackBerry10Phone(self): return UAgentInfo.deviceBB10 in self.__userAgent \ and UAgentInfo.mobile in self.__userAgent
Return detection of a Blackberry 10 OS phone Detects if the current browser is a BlackBerry 10 OS phone. Excludes the PlayBook.
15,702
def scroll(clicks, x=None, y=None, pause=None, _pause=True): _failSafeCheck() if type(x) in (tuple, list): x, y = x[0], x[1] x, y = position(x, y) platformModule._scroll(clicks, x, y) _autoPause(pause, _pause)
Performs a scroll of the mouse scroll wheel. Whether this is a vertical or horizontal scroll depends on the underlying operating system. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: clicks (int, float): The amount of scrolling to perform. x (int, float, None, tuple, optional): The x position on the screen where the click happens. None by default. If tuple, this is used for x and y. y (int, float, None, optional): The y position on the screen where the click happens. None by default. Returns: None
15,703
def rename(self, name): sql = .format( s=self.schema, t=self.name, name=name ) self.engine.execute(sql) self.table = SQLATable(name, self.metadata, schema=self.schema, autoload=True)
Rename the table
15,704
def get_next_second(intersection, intersections, to_end=True): along_edge = None index_second = intersection.index_second t = intersection.t for other_int in intersections: other_t = other_int.t if other_int.index_second == index_second and other_t > t: if along_edge is None or other_t < along_edge.t: along_edge = other_int if along_edge is None: if to_end: return _intersection_helpers.Intersection( None, None, index_second, 1.0, interior_curve=CLASSIFICATION_T.SECOND, ) else: return None else: return along_edge
Gets the next node along the current (second) edge. .. note:: This is a helper used only by :func:`get_next`, which in turn is only used by :func:`basic_interior_combine`, which itself is only used by :func:`combine_intersections`. Along with :func:`get_next_first`, this function does the majority of the heavy lifting in :func:`get_next`. **Very** similar to :func:`get_next_first`, but this works with the second curve while the other function works with the first. Args: intersection (.Intersection): The current intersection. intersections (List[.Intersection]): List of all detected intersections, provided as a reference for potential points to arrive at. to_end (Optional[bool]): Indicates if the next node should just be the end of the first edge or :data:`None`. Returns: Optional[.Intersection]: The "next" point along a surface of intersection. This will produce the next intersection along the current (second) edge or the end of the same edge. If ``to_end`` is :data:`False` and there are no other intersections along the current edge, will return :data:`None` (rather than the end of the same edge).
15,705
def BatchLabelVocabulary(self): bsc = getToolByName(self, ) ret = [] for p in bsc(portal_type=, is_active=True, sort_on=): ret.append((p.UID, p.Title)) return DisplayList(ret)
Return all batch labels as a display list
15,706
def cross_fade(self, seg1, seg2, duration): if seg1.comp_location + seg1.duration - seg2.comp_location < 2: dur = int(duration * seg1.track.samplerate) if dur % 2 == 1: dur -= 1 if dur / 2 > seg1.duration: dur = seg1.duration * 2 if dur / 2 > seg2.duration: dur = seg2.duration * 2 if seg2.start - (dur / 2) < 0: diff = seg2.start seg2.start = 0 seg2.duration -= diff seg2.comp_location -= diff dur = 2 * diff else: seg2.start -= (dur / 2) seg2.duration += (dur / 2) seg2.comp_location -= (dur / 2) seg1.duration += (dur / 2) out_frames = seg1.get_frames(channels=self.channels)[-dur:] seg1.duration -= dur in_frames = seg2.get_frames(channels=self.channels)[:dur] seg2.start += dur seg2.duration -= dur seg2.comp_location += dur in_frames = in_frames[:min(map(len, [in_frames, out_frames]))] out_frames = out_frames[:min(map(len, [in_frames, out_frames]))] cf_frames = radiotool.utils.linear(out_frames, in_frames) raw_track = RawTrack(cf_frames, name="crossfade", samplerate=seg1.track.samplerate) rs_comp_location = (seg1.comp_location + seg1.duration) /\ float(seg1.track.samplerate) rs_duration = raw_track.duration / float(raw_track.samplerate) raw_seg = Segment(raw_track, rs_comp_location, 0.0, rs_duration) raw_seg.duration = raw_track.duration raw_seg.comp_location = seg1.comp_location + seg1.duration self.add_track(raw_track) self.add_segment(raw_seg) return raw_seg else: print seg1.comp_location + seg1.duration, seg2.comp_location raise Exception("Segments must be adjacent" "to add a crossfade ({}, {})".format( seg1.comp_location + seg1.duration, seg2.comp_location))
Add a linear crossfade to the composition between two segments. :param seg1: First segment (fading out) :type seg1: :py:class:`radiotool.composer.Segment` :param seg2: Second segment (fading in) :type seg2: :py:class:`radiotool.composer.Segment` :param duration: Duration of crossfade (in seconds)
15,707
async def findTask(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs)
Find Indexed Task Find a task by index path, returning the highest-rank task with that path. If no task exists for the given path, this API end-point will respond with a 404 status. This method gives output: ``v1/indexed-task-response.json#`` This method is ``stable``
15,708
def new_clustered_sortind(x, k=10, row_key=None, cluster_key=None): try: from sklearn.cluster import MiniBatchKMeans except ImportError: raise ImportError( ) def _cluster_key(i): return cluster_key(x[labels == i, :]) sorted_labels = sorted(range(k), key=_cluster_key) else: sorted_labels = range(k) if row_key: def _row_key(i): return row_key(x[i, :]) final_ind = [] breaks = [] pos = 0 for label in sorted_labels: label_inds = np.nonzero(labels == label)[0] if row_key: label_sort_ind = sorted(label_inds, key=_row_key) else: label_sort_ind = label_inds for li in label_sort_ind: final_ind.append(li) pos += len(label_inds) breaks.append(pos) return np.array(final_ind), np.array(breaks)
Uses MiniBatch k-means clustering to cluster matrix into groups. Each cluster of rows is then sorted by `scorefunc` -- by default, the max peak height when all rows in a cluster are averaged, or cluster.mean(axis=0).max(). Returns the index that will sort the rows of `x` and a list of "breaks". `breaks` is essentially a cumulative row count for each cluster boundary. In other words, after plotting the array you can use axhline on each "break" to plot the cluster boundary. If `k` is a list or tuple, iteratively try each one and select the best with the lowest mean distance from cluster centers. :param x: Matrix whose rows are to be clustered :param k: Number of clusters to create or a list of potential clusters; the optimum will be chosen from the list :param row_key: Optional function to act as a sort key for sorting rows within clusters. Signature should be `scorefunc(a)` where `a` is a 1-D NumPy array. :param cluster_key: Optional function for sorting clusters. Signature is `clusterfunc(a)` where `a` is a NumPy array containing all rows of `x` for cluster `i`. It must return a single value.
15,709
def _type_single(self, value, _type): if value is None or _type in (None, NoneType): pass elif isinstance(value, _type): value = dt2ts(value) if _type in [datetime, date] else value else: if _type in (datetime, date): value = dt2ts(value) elif _type in (unicode, str): value = to_encoding(value) else: try: value = _type(value) except Exception: value = to_encoding(value) logger.error("typecast failed: %s(value=%s)" % ( _type.__name__, value)) raise return value
apply type to the single value
15,710
def from_df(cls, path:PathOrStr, train_df:DataFrame, valid_df:DataFrame, test_df:Optional[DataFrame]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, classes:Collection[str]=None, text_cols:IntsOrStrs=1, label_cols:IntsOrStrs=0, label_delim:str=None, chunksize:int=10000, max_vocab:int=60000, min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False, **kwargs) -> DataBunch: "Create a `TextDataBunch` from DataFrames. `kwargs` are passed to the dataloader creation." processor = _get_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab, min_freq=min_freq, mark_fields=mark_fields, include_bos=include_bos, include_eos=include_eos) if classes is None and is_listy(label_cols) and len(label_cols) > 1: classes = label_cols src = ItemLists(path, TextList.from_df(train_df, path, cols=text_cols, processor=processor), TextList.from_df(valid_df, path, cols=text_cols, processor=processor)) if cls==TextLMDataBunch: src = src.label_for_lm() else: if label_delim is not None: src = src.label_from_df(cols=label_cols, classes=classes, label_delim=label_delim) else: src = src.label_from_df(cols=label_cols, classes=classes) if test_df is not None: src.add_test(TextList.from_df(test_df, path, cols=text_cols)) return src.databunch(**kwargs)
Create a `TextDataBunch` from DataFrames. `kwargs` are passed to the dataloader creation.
15,711
def pathsplit(pth, dropext=True): if dropext: pth = os.path.splitext(pth)[0] parts = os.path.split(pth) if parts[0] == : return parts[1:] elif len(parts[0]) == 1: return parts else: return pathsplit(parts[0], dropext=False) + parts[1:]
Split a path into a tuple of all of its components.
15,712
def sitetree_breadcrumbs(parser, token): tokens = token.split_contents() use_template = detect_clause(parser, , tokens) tokens_num = len(tokens) if tokens_num == 3: tree_alias = parser.compile_filter(tokens[2]) return sitetree_breadcrumbsNode(tree_alias, use_template) else: raise template.TemplateSyntaxError( % tokens[0])
Parses sitetree_breadcrumbs tag parameters. Two notation types are possible: 1. Two arguments: {% sitetree_breadcrumbs from "mytree" %} Used to render breadcrumb path for "mytree" site tree. 2. Four arguments: {% sitetree_breadcrumbs from "mytree" template "sitetree/mycrumb.html" %} Used to render breadcrumb path for "mytree" site tree using specific template "sitetree/mycrumb.html"
15,713
def name(self, name): self._data[] = name request = self._base_request request[] = name return self._tc_requests.update(request, owner=self.owner)
Updates the security labels name. Args: name:
15,714
def scale_0to1(image_in, exclude_outliers_below=False, exclude_outliers_above=False): min_value = image_in.min() max_value = image_in.max() image = image_in.copy() if exclude_outliers_below: perctl = float(exclude_outliers_below) image[image < np.percentile(image, perctl)] = min_value if exclude_outliers_above: perctl = float(exclude_outliers_above) image[image > np.percentile(image, 100.0 - perctl)] = max_value image = (image - min_value) / (max_value - min_value) return image
Scale the two images to [0, 1] based on min/max from both. Parameters ----------- image_in : ndarray Input image exclude_outliers_{below,above} : float Lower/upper limit, a value between 0 and 100. Returns ------- scaled_image : ndarray clipped and/or scaled image
15,715
def get_similar_entries(context, number=5, template=): entry = context.get() if not entry: return {: template, : []} vectors = EntryPublishedVectorBuilder() entries = vectors.get_related(entry, number) return {: template, : entries}
Return similar entries.
15,716
def main(): plugin = Register() if plugin.args.option == : plugin.sqlserverlocks_handle() else: plugin.unknown("Unknown actions.")
Register your own mode and handle method here.
15,717
def inbox(self): if not self.isme: raise PyPumpException("You cans inboxes") if self._inbox is None: self._inbox = Inbox(self.links[], pypump=self._pump) return self._inbox
:class:`Inbox feed <pypump.models.feed.Inbox>` with all :class:`activities <pypump.models.activity.Activity>` received by the person, can only be read if logged in as the owner. Example: >>> for activity in pump.me.inbox[:2]: ... print(activity.id) ... https://microca.st/api/activity/BvqXQOwXShSey1HxYuJQBQ https://pumpyourself.com/api/activity/iQGdnz5-T-auXnbUUdXh-A
15,718
def calc_sasa(dssp_df): infodict = {: dssp_df.exposure_asa.sum(), : dssp_df.exposure_rsa.mean(), : len(dssp_df)} return infodict
Calculation of SASA utilizing the DSSP program. DSSP must be installed for biopython to properly call it. Install using apt-get on Ubuntu or from: http://swift.cmbi.ru.nl/gv/dssp/ Input: PDB or CIF structure file Output: SASA (integer) of structure
15,719
def select_parser(self, request, parsers): if not request.content_type: return parsers[0], parsers[0].mimetype mimetype = MimeType.parse(request.content_type) for parser in parsers: if mimetype.match(parser.mimetype): return parser, mimetype return None, None
Selects the appropriated parser which matches to the request's content type. :param request: The HTTP request. :param parsers: The lists of parsers. :return: The parser selected or none.
15,720
def list_models( self, dataset, max_results=None, page_token=None, retry=DEFAULT_RETRY ): if isinstance(dataset, str): dataset = DatasetReference.from_string( dataset, default_project=self.project ) if not isinstance(dataset, (Dataset, DatasetReference)): raise TypeError("dataset must be a Dataset, DatasetReference, or string") path = "%s/models" % dataset.path result = page_iterator.HTTPIterator( client=self, api_request=functools.partial(self._call_api, retry), path=path, item_to_value=_item_to_model, items_key="models", page_token=page_token, max_results=max_results, ) result.dataset = dataset return result
[Beta] List models in the dataset. See https://cloud.google.com/bigquery/docs/reference/rest/v2/models/list Args: dataset (Union[ \ :class:`~google.cloud.bigquery.dataset.Dataset`, \ :class:`~google.cloud.bigquery.dataset.DatasetReference`, \ str, \ ]): A reference to the dataset whose models to list from the BigQuery API. If a string is passed in, this method attempts to create a dataset reference from a string using :func:`google.cloud.bigquery.dataset.DatasetReference.from_string`. max_results (int): (Optional) Maximum number of models to return. If not passed, defaults to a value set by the API. page_token (str): (Optional) Token representing a cursor into the models. If not passed, the API will return the first page of models. The token marks the beginning of the iterator to be returned and the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. retry (:class:`google.api_core.retry.Retry`): (Optional) How to retry the RPC. Returns: google.api_core.page_iterator.Iterator: Iterator of :class:`~google.cloud.bigquery.model.Model` contained within the requested dataset.
15,721
def flush(self, timeout=None, callback=None): client, scope = self._stack[-1] if client is not None: return client.flush(timeout=timeout, callback=callback)
Alias for self.client.flush
15,722
def pin_direction(self, pin): if type(pin) is list: return [self.pin_direction(p) for p in pin] pin_id = self._pin_mapping.get(pin, None) if pin_id: return self._pin_direction(pin_id) else: raise KeyError( % pin)
Gets the `ahio.Direction` this pin was set to. If you're developing a driver, implement _pin_direction(self, pin) @arg pin the pin you want to see the mode @returns the `ahio.Direction` the pin is set to @throw KeyError if pin isn't mapped.
15,723
def script_dir_plus_file(filename, pyobject, follow_symlinks=True): return join(script_dir(pyobject, follow_symlinks), filename)
Get current script's directory and then append a filename Args: filename (str): Filename to append to directory path pyobject (Any): Any Python object in the script follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True. Returns: str: Current script's directory and with filename appended
15,724
def _do_spelling_suggestion(database, query, spelling_query): if spelling_query: if in spelling_query: return .join([database.get_spelling_suggestion(term).decode() for term in spelling_query.split()]) else: return database.get_spelling_suggestion(spelling_query).decode() term_set = set() for term in query: for match in re.findall(, term.decode()): term_set.add(database.get_spelling_suggestion(match).decode()) return .join(term_set)
Private method that returns a single spelling suggestion based on `spelling_query` or `query`. Required arguments: `database` -- The database to check spelling against `query` -- The query to check `spelling_query` -- If not None, this will be checked instead of `query` Returns a string with a suggested spelling
15,725
def _to_eng_tuple(number): split = lambda x, p: (x.ljust(3 + neg, "0")[:p], x[p:].rstrip("0")) mant, exp = to_scientific_tuple(number) mant, neg = mant.replace(".", ""), mant.startswith("-") new_mant = ".".join(filter(None, split(mant, 1 + (exp % 3) + neg))) new_exp = int(3 * math.floor(exp / 3)) return NumComp(new_mant, new_exp)
Return tuple with mantissa and exponent of number formatted in engineering notation. :param number: Number :type number: integer or float :rtype: tuple
15,726
def get_mon_map(service): try: mon_status = check_output([, , service, , ]) if six.PY3: mon_status = mon_status.decode() try: return json.loads(mon_status) except ValueError as v: log("Unable to parse mon_status json: {}. Error: {}" .format(mon_status, str(v))) raise except CalledProcessError as e: log("mon_status command failed with message: {}" .format(str(e))) raise
Returns the current monitor map. :param service: six.string_types. The Ceph user name to run the command under :return: json string. :raise: ValueError if the monmap fails to parse. Also raises CalledProcessError if our ceph command fails
15,727
def get_attachment_info(self, attachment): attachment_uid = api.get_uid(attachment) attachment_file = attachment.getAttachmentFile() attachment_type = attachment.getAttachmentType() attachment_icon = attachment_file.icon if callable(attachment_icon): attachment_icon = attachment_icon() return { : attachment.getAttachmentKeys(), : self.get_attachment_size(attachment), : attachment_file.filename, : attachment_icon, : api.get_uid(attachment_type) if attachment_type else , : attachment.absolute_url(), : attachment_uid, : attachment.getReportOption(), : , }
Returns a dictionary of attachment information
15,728
def indent_iterable(elems: Sequence[str], num: int = 2) -> List[str]: return [" " * num + l for l in elems]
Indent an iterable.
15,729
def describe_cache_parameter_groups(name=None, conn=None, region=None, key=None, keyid=None, profile=None): return _describe_resource(name=name, name_param=, res_type=, info_node=, conn=conn, region=region, key=key, keyid=keyid, profile=profile)
Return details about all (or just one) Elasticache cache clusters. Example: .. code-block:: bash salt myminion boto3_elasticache.describe_cache_parameter_groups salt myminion boto3_elasticache.describe_cache_parameter_groups myParameterGroup
15,730
def run_show_val(obj, name): val = obj.debugger.settings[obj.name] obj.msg("%s is %s." % (obj.name, obj.cmd.proc._saferepr(val),)) return False
Generic subcommand value display
15,731
def get_bpf_pointer(tcpdump_lines): if conf.use_pypy: return _legacy_bpf_pointer(tcpdump_lines) size = int(tcpdump_lines[0]) bpf_insn_a = bpf_insn * size bip = bpf_insn_a() tcpdump_lines = tcpdump_lines[1:] i = 0 for line in tcpdump_lines: values = [int(v) for v in line.split()] bip[i].code = c_ushort(values[0]) bip[i].jt = c_ubyte(values[1]) bip[i].jf = c_ubyte(values[2]) bip[i].k = c_uint(values[3]) i += 1 return bpf_program(size, bip)
Create a BPF Pointer for TCPDump filter
15,732
def join_path_prefix(path, pre_path=None): if not path: return path if pre_path and not os.path.isabs(path): return os.path.join(pre_path, path) return path
If path set and not absolute, append it to pre path (if used) :param path: path to append :type path: str | None :param pre_path: Base path to append to (default: None) :type pre_path: None | str :return: Path or appended path :rtype: str | None
15,733
def assembleimage(patches, pmasks, gridids): r for d in range(patches[0].ndim): groups = {} for patch, pmask, gridid in zip(patches, pmasks, gridids): groupid = gridid[1:] if not groupid in groups: groups[groupid] = [] groups[groupid].append((patch, pmask, gridid[0])) patches = [] gridids = [] pmasks = [] for groupid, group in list(groups.items()): patches.append(numpy.concatenate([p for p, _, _ in sorted(group, key=itemgetter(2))], d)) pmasks.append(numpy.concatenate([m for _, m, _ in sorted(group, key=itemgetter(2))], d)) gridids.append(groupid) objs = find_objects(pmasks[0]) if not 1 == len(objs): raise ValueError() return patches[0][objs[0]]
r""" Assemble an image from a number of patches, patch masks and their grid ids. Parameters ---------- patches : sequence Sequence of patches. pmasks : sequence Sequence of associated patch masks. gridids Sequence of associated grid ids. Returns ------- image : ndarray The patches assembled back into an image of the original proportions. Examples -------- Two-dimensional example: >>> import numpy >>> from medpy.iterators import CentredPatchIterator >>> arr = numpy.arange(0, 25).reshape((5,5)) >>> arr array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) >>> patches, pmasks, gridids, _ = zip(*CentredPatchIterator(arr, 2)) >>> result = CentredPatchIterator.assembleimage(patches, pmasks, gridids) >>> numpy.all(arr == result) True Five-dimensional example: >>> arr = numpy.random.randint(0, 10, range(5, 10)) >>> patches, pmasks, gridids, _ = zip(*CentredPatchIterator(arr, range(2, 7))) >>> result = CentredPatchIterator.assembleimage(patches, pmasks, gridids) >>> numpy.all(arr == result) True
15,734
def __read_byte_size(decl, attrs): size = attrs.get(XML_AN_SIZE, 0) decl.byte_size = int(size) / 8
Using duck typing to set the size instead of in constructor
15,735
def cdd(d, k): if not isinstance(k, list): k = [k] for i in k: if i in d: d.pop(i)
Conditionally delete key (or list of keys) 'k' from dict 'd'
15,736
def setup_graph(self): graph_default_context = None if self.execution_type == "single": self.graph = tf.Graph() graph_default_context = self.graph.as_default() graph_default_context.__enter__() self.global_model = None elif self.execution_type == "distributed": if self.distributed_spec["job"] == "ps": return None elif self.distributed_spec["job"] == "worker": if self.is_local_model: graph = tf.Graph() graph_default_context = graph.as_default() graph_default_context.__enter__() self.global_model = deepcopy(self) self.global_model.is_local_model = False self.global_model.setup() self.graph = graph self.as_local_model() self.scope += + str(self.distributed_spec["task_index"]) else: self.graph = tf.get_default_graph() self.global_model = None self.device = tf.train.replica_device_setter( worker_device=self.device, cluster=self.distributed_spec["cluster_spec"] ) else: raise TensorForceError("Unsupported job type: {}!".format(self.distributed_spec["job"])) else: raise TensorForceError("Unsupported distributed type: {}!".format(self.distributed_spec["type"])) return graph_default_context
Creates our Graph and figures out, which shared/global model to hook up to. If we are in a global-model's setup procedure, we do not create a new graph (return None as the context). We will instead use the already existing local replica graph of the model. Returns: None or the graph's as_default()-context.
15,737
def get_all_subdomains(offset=None, count=None, min_sequence=None, db_path=None, zonefiles_dir=None): opts = get_blockstack_opts() if not is_subdomains_enabled(opts): return [] if db_path is None: db_path = opts[] if zonefiles_dir is None: zonefiles_dir = opts[] db = SubdomainDB(db_path, zonefiles_dir) return db.get_all_subdomains(offset=offset, count=count, min_sequence=None)
Static method for getting the list of all subdomains
15,738
def check_serial_port(name): try: cdc = next(serial.tools.list_ports.grep(name)) return cdc[0] except StopIteration: msg = "device {} not found. ".format(name) msg += "available devices are: " ports = list(serial.tools.list_ports.comports()) for p in ports: msg += "{},".format(text_type(p)) raise ValueError(msg)
returns valid COM Port.
15,739
def load_glb(self): with open(self.path, ) as fd: magic = fd.read(4) if magic != GLTF_MAGIC_HEADER: raise ValueError("{} has incorrect header {} != {}".format(self.path, magic, GLTF_MAGIC_HEADER)) version = struct.unpack(, fd.read(4))[0] if version != 2: raise ValueError("{} has unsupported version {}".format(self.path, version)) _ = struct.unpack(, fd.read(4))[0] chunk_0_length = struct.unpack(, fd.read(4))[0] chunk_0_type = fd.read(4) if chunk_0_type != b: raise ValueError("Expected JSON chunk, not {} in file {}".format(chunk_0_type, self.path)) json_meta = fd.read(chunk_0_length).decode() chunk_1_length = struct.unpack(, fd.read(4))[0] chunk_1_type = fd.read(4) if chunk_1_type != b: raise ValueError("Expected BIN chunk, not {} in file {}".format(chunk_1_type, self.path)) self.meta = GLTFMeta(self.path, json.loads(json_meta), binary_buffer=fd.read(chunk_1_length))
Loads a binary gltf file
15,740
def load(args): from datetime import datetime as dt from jcvi.formats.fasta import Seq, SeqRecord valid_id_attributes = ["ID", "Name", "Parent", "Alias", "Target"] p = OptionParser(load.__doc__) p.add_option("--parents", dest="parents", default="mRNA", help="list of features to extract, use comma to separate (e.g." + \ ") [default: %default]") p.add_option("--children", dest="children", default="CDS", help="list of features to extract, use comma to separate (e.g." + \ ") [default: %default]") p.add_option("--feature", dest="feature", help="feature type to extract. e.g. `--feature=CDS` or " + \ "`--feature=upstream:TSS:500` [default: %default]") p.add_option("--id_attribute", choices=valid_id_attributes, help="The attribute field to extract and use as FASTA sequence ID " + \ "[default: %default]") p.add_option("--desc_attribute", default="Note", help="The attribute field to extract and use as FASTA sequence " + \ "description [default: %default]") p.add_option("--full_header", default=None, choices=["default", "tair"], help="Specify if full FASTA header (with seqid, coordinates and datestamp)" + \ " should be generated [default: %default]") g1 = OptionGroup(p, "Optional parameters (if generating full header)") g1.add_option("--sep", dest="sep", default=" ", \ help="Specify separator used to delimiter header elements [default: \"%default\"]") g1.add_option("--datestamp", dest="datestamp", \ help="Specify a datestamp in the format YYYYMMDD or automatically pick `today`" + \ " [default: %default]") g1.add_option("--conf_class", dest="conf_class", default=False, action="store_true", help="Specify if `conf_class` attribute should be parsed and placed in the header" + \ " [default: %default]") p.add_option_group(g1) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) gff_file, fasta_file = args if opts.feature: opts.feature, opts.parent, opts.children, upstream_site, upstream_len, \ flag, error_msg = parse_feature_param(opts.feature) if flag: sys.exit(error_msg) parents = set(opts.parents.split()) children_list = set(opts.children.split()) skipChildren = True if len(parents.symmetric_difference(children_list)) == 0 \ else False id_attr = opts.id_attribute desc_attr = opts.desc_attribute sep = opts.sep import gffutils g = make_index(gff_file) f = Fasta(fasta_file, index=False) seqlen = {} for seqid, size in f.itersizes(): seqlen[seqid] = size fw = must_open(opts.outfile, "w") for feat in get_parents(gff_file, parents): desc = "" if desc_attr: fparent = feat.attributes[][0] \ if in feat.attributes else None if fparent: try: g_fparent = g[fparent] except gffutils.exceptions.FeatureNotFoundError: logging.error("{} not found in index .. skipped".format(fparent)) continue if desc_attr in g_fparent.attributes: desc = ",".join(g_fparent.attributes[desc_attr]) elif desc_attr in feat.attributes: desc = ",".join(feat.attributes[desc_attr]) if opts.full_header: desc_parts = [] desc_parts.append(desc) if opts.conf_class and in feat.attributes: desc_parts.append(feat.attributes[][0]) if opts.full_header == "tair": orient = "REVERSE" if feat.strand == "-" else "FORWARD" feat_coords = "{0}:{1}-{2} {3} LENGTH=[LEN]".format(feat.seqid, \ feat.start, feat.end, orient) else: (s, e) = (feat.start, feat.end) if (feat.strand == "+") \ else (feat.end, feat.start) feat_coords = "{0}:{1}-{2}".format(feat.seqid, s, e) desc_parts.append(feat_coords) datestamp = opts.datestamp if opts.datestamp else \ "{0}{1}{2}".format(dt.now().year, dt.now().month, dt.now().day) desc_parts.append(datestamp) desc = sep.join(str(x) for x in desc_parts) desc = "".join(str(x) for x in (sep, desc)).strip() if opts.feature == "upstream": upstream_start, upstream_stop = get_upstream_coords(upstream_site, upstream_len, \ seqlen[feat.seqid], feat, children_list, g) if not upstream_start or not upstream_stop: continue feat_seq = f.sequence(dict(chr=feat.seqid, start=upstream_start, stop=upstream_stop, strand=feat.strand)) (s, e) = (upstream_start, upstream_stop) \ if feat.strand == "+" else \ (upstream_stop, upstream_start) upstream_seq_loc = str(feat.seqid) + ":" + str(s) + "-" + str(e) desc = sep.join(str(x) for x in (desc, upstream_seq_loc, \ "FLANKLEN=" + str(upstream_len))) else: children = [] if not skipChildren: for c in g.children(feat.id, 1): if c.featuretype not in children_list: continue child = f.sequence(dict(chr=c.chrom, start=c.start, stop=c.stop, strand=c.strand)) children.append((child, c)) if not children: print("[warning] %s has no children with type %s" \ % (feat.id, .join(children_list)), file=sys.stderr) continue else: child = f.sequence(dict(chr=feat.seqid, start=feat.start, stop=feat.end, strand=feat.strand)) children.append((child, feat)) children.sort(key=lambda x: x[1].start) if feat.strand == : children.reverse() feat_seq = .join(x[0] for x in children) desc = desc.replace("\"", "") id = ",".join(feat.attributes[id_attr]) if id_attr \ and feat.attributes[id_attr] else \ feat.id if opts.full_header == "tair": desc = desc.replace("[LEN]", str(len(feat_seq))) rec = SeqRecord(Seq(feat_seq), id=id, description=desc) SeqIO.write([rec], fw, "fasta") fw.flush()
%prog load gff_file fasta_file [--options] Parses the selected features out of GFF, with subfeatures concatenated. For example, to get the CDS sequences, do this: $ %prog load athaliana.gff athaliana.fa --parents mRNA --children CDS To get 500bp upstream of a genes Transcription Start Site (TSS), do this: $ %prog load athaliana.gff athaliana.fa --feature=upstream:TSS:500 Switch TSS with TrSS for Translation Start Site.
15,741
def call_somatic(tumor_name, normal_name): tumor_thresh, normal_thresh = 3.5, 3.5 new_headers = [, ( ) % (int(tumor_thresh * 10), int(normal_thresh * 10))] def _output_filter_line(line, indexes): parts = line.split("\t") if _check_lods(parts, tumor_thresh, normal_thresh, indexes) and _check_freqs(parts, indexes): parts[7] = parts[7] + ";SOMATIC" else: if parts[6] in set([".", "PASS"]): parts[6] = "REJECT" else: parts[6] += ";REJECT" line = "\t".join(parts) sys.stdout.write(line) def _write_header(header): for hline in header[:-1] + new_headers + [header[-1]]: sys.stdout.write(hline) header = [] indexes = None for line in sys.stdin: if not indexes: if line.startswith(" header.append(line) else: parts = header[-1].rstrip().split("\t") indexes = {"tumor": parts.index(tumor_name), "normal": parts.index(normal_name)} _write_header(header) _output_filter_line(line, indexes) else: _output_filter_line(line, indexes) if not indexes: _write_header(header)
Call SOMATIC variants from tumor/normal calls, adding REJECT filters and SOMATIC flag. Works from stdin and writes to stdout, finding positions of tumor and normal samples. Uses MuTect like somatic filter based on implementation in speedseq: https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L62 Extracts the genotype likelihoods (GLs) from FreeBayes, which are like phred scores except not multiplied by 10.0 (https://en.wikipedia.org/wiki/Phred_quality_score). For tumors, we retrieve the best likelihood to not be reference (the first GL) and for normal, the best likelhood to be reference. After calculating the likelihoods, we compare these to thresholds to pass variants at tuned sensitivity/precision. Tuning done on DREAM synthetic 3 dataset evaluations. We also check that the frequency of the tumor exceeds the frequency of the normal by a threshold to avoid calls that are low frequency in both tumor and normal. This supports both FreeBayes and VarDict output frequencies.
15,742
def declare_alias(self, name): def decorator(f): self._auto_register_function(f, name) return f return decorator
Insert a Python function into this Namespace with an explicitly-given name, but detect its argument count automatically.
15,743
def point_distance_ellipsode(point1,point2): a = 6378137 f = 1/298.25722 b = a - a*f e = math.sqrt((a*a-b*b)/(a*a)) lon1 = point1[][0] lat1 = point1[][1] lon2 = point1[][0] lat2 = point2[][1] M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5) N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5)) distance_lat = M*number2radius(lat2-lat1) distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180) return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
calculate the distance between two points on the ellipsode based on point1 Keyword arguments: point1 -- point one geojson object point2 -- point two geojson object return distance
15,744
def _get(self, node, key): node_type = self._get_node_type(node) if node_type == NODE_TYPE_BLANK: return BLANK_NODE if node_type == NODE_TYPE_BRANCH: if not key: return node[-1] sub_node = self._decode_to_node(node[key[0]]) return self._get(sub_node, key[1:]) curr_key = without_terminator(unpack_to_nibbles(node[0])) if node_type == NODE_TYPE_LEAF: return node[1] if key == curr_key else BLANK_NODE if node_type == NODE_TYPE_EXTENSION: if starts_with(key, curr_key): sub_node = self._decode_to_node(node[1]) return self._get(sub_node, key[len(curr_key):]) else: return BLANK_NODE
get value inside a node :param node: node in form of list, or BLANK_NODE :param key: nibble list without terminator :return: BLANK_NODE if does not exist, otherwise value or hash
15,745
def _handle_codeblock(self, match): from pygments.lexers import get_lexer_by_name yield match.start(1), String , match.group(1) yield match.start(2), String , match.group(2) yield match.start(3), Text , match.group(3) lexer = None if self.handlecodeblocks: try: lexer = get_lexer_by_name( match.group(2).strip() ) except ClassNotFound: pass code = match.group(4) if lexer is None: yield match.start(4), String, code return for item in do_insertions([], lexer.get_tokens_unprocessed(code)): yield item yield match.start(5), String , match.group(5)
match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks
15,746
def build_props(self): props = {} if self.filters: props["filters"] = {} for grp in self.filters: props["filters"][grp] = [f.params for f in self.filters[grp]] if self.charts: props["charts"] = [c.params for c in self.charts] props["type"] = self.layout return props
Build the props dictionary.
15,747
def update(self): if self.hasproxy(): sonarD = SonarData() range = 0 data = self.proxy.getSonarData() sonarD.range = data.range sonarD.maxAngle = data.maxAngle sonarD.minAngle = data.minAngle sonarD.maxRange = data.maxRange sonarD.minRange = data.minRange self.lock.acquire() self.sonar = sonarD self.lock.release()
Updates LaserData.
15,748
def _readXput(self, fileCards, directory, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None): for card in self.projectCards: if (card.name in fileCards) and self._noneOrNumValue(card.value) and fileCards[card.name]: fileIO = fileCards[card.name] filename = card.value.strip() self._invokeRead(fileIO=fileIO, directory=directory, filename=filename, session=session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
GSSHAPY Project Read Files from File Method
15,749
def flat(self, obj, mask=0): s = self.base if self.leng and self.item > 0: s += self.leng(obj) * self.item if _getsizeof: s = _getsizeof(obj, s) if mask: s = (s + mask) & ~mask return s
Return the aligned flat size.
15,750
def _query_helper(self, by=None): if by is None: primary_keys = self.table.primary_key.columns.keys() if len(primary_keys) > 1: warnings.warn("WARNING: MORE THAN 1 PRIMARY KEY FOR TABLE %s. " "USING THE FIRST KEY %s." % (self.table.name, primary_keys[0])) if not primary_keys: raise NoPrimaryKeyException("Table %s needs a primary key for" "the .last() method to work properly. " "Alternatively, specify an ORDER BY " "column with the by= argument. " % self.table.name) id_col = primary_keys[0] else: id_col = by if self.column is None: col = "*" else: col = self.column.name return col, id_col
Internal helper for preparing queries.
15,751
def copyto(self, other): if isinstance(other, Context): return super(RowSparseNDArray, self).copyto(other) elif isinstance(other, NDArray): stype = other.stype if stype in (, ): return super(RowSparseNDArray, self).copyto(other) else: raise TypeError( + str(stype)) else: raise TypeError( + str(type(other)))
Copies the value of this array to another array. If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape`` and ``self.shape`` should be the same. This function copies the value from ``self`` to ``other``. If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on the target context, and the value of ``self`` is copied. Parameters ---------- other : NDArray or RowSparseNDArray or Context The destination array or context. Returns ------- NDArray or RowSparseNDArray The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
15,752
def _log_graphql_error(self, query, data): if isinstance(query, bytes): query = query.decode() elif not isinstance(query, str): query = bytes(query).decode() data = self._fixup_graphql_error(data) errors = data[] self.logger.error(, len(errors)) for i, error in enumerate(errors): paths = error.get() if paths: paths = + .join(str(path) for path in paths) else: paths = self.logger.info(.format(i, paths)) for ln in error.get(, ).split(): self.logger.info(.format(ln)) s = self.snippet(query, error.get()) if s: self.logger.info() self.logger.info() for ln in s: self.logger.info(.format(ln)) return data
Log a ``{"errors": [...]}`` GraphQL return and return itself. :param query: the GraphQL query that triggered the result. :type query: str :param data: the decoded JSON object. :type data: dict :return: the input ``data`` :rtype: dict
15,753
def t_binaryValue(t): r else: t.value = int(t.value[0:-1], 2) return t
r'[+-]?[0-9]+[bB]
15,754
def analyze(problem, Y, X, M=10, print_to_console=False, seed=None): if seed: np.random.seed(seed) D = problem[] N = Y.size if print_to_console: print("Parameter First") Si = ResultDict((k, [None] * D) for k in []) Si[] = problem[] for i in range(D): S1 = compute_first_order(permute_outputs(Y, X[:, i]), M) S1 = unskew_S1(S1, M, N) Si[][i] = S1 if print_to_console: print("%s %g" % (problem[][i].ljust(9), Si[][i])) return Si
Performs the Random Balanced Design - Fourier Amplitude Sensitivity Test (RBD-FAST) on model outputs. Returns a dictionary with keys 'S1', where each entry is a list of size D (the number of parameters) containing the indices in the same order as the parameter file. Parameters ---------- problem : dict The problem definition Y : numpy.array A NumPy array containing the model outputs X : numpy.array A NumPy array containing the model inputs M : int The interference parameter, i.e., the number of harmonics to sum in the Fourier series decomposition (default 10) print_to_console : bool Print results directly to console (default False) References ---------- .. [1] S. Tarantola, D. Gatelli and T. Mara (2006) "Random Balance Designs for the Estimation of First Order Global Sensitivity Indices", Reliability Engineering and System Safety, 91:6, 717-727 .. [2] Elmar Plischke (2010) "An effective algorithm for computing global sensitivity indices (EASI) Reliability Engineering & System Safety", 95:4, 354-360. doi:10.1016/j.ress.2009.11.005 .. [3] Jean-Yves Tissot, Clémentine Prieur (2012) "Bias correction for the estimation of sensitivity indices based on random balance designs.", Reliability Engineering and System Safety, Elsevier, 107, 205-213. doi:10.1016/j.ress.2012.06.010 .. [4] Jeanne Goffart, Mickael Rabouille & Nathan Mendes (2015) "Uncertainty and sensitivity analysis applied to hygrothermal simulation of a brick building in a hot and humid climate", Journal of Building Performance Simulation. doi:10.1080/19401493.2015.1112430 Examples -------- >>> X = latin.sample(problem, 1000) >>> Y = Ishigami.evaluate(X) >>> Si = rbd_fast.analyze(problem, Y, X, print_to_console=False)
15,755
def exponentialRDD(sc, mean, size, numPartitions=None, seed=None): return callMLlibFunc("exponentialRDD", sc._jsc, float(mean), size, numPartitions, seed)
Generates an RDD comprised of i.i.d. samples from the Exponential distribution with the input mean. :param sc: SparkContext used to create the RDD. :param mean: Mean, or 1 / lambda, for the Exponential distribution. :param size: Size of the RDD. :param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`). :param seed: Random seed (default: a random long integer). :return: RDD of float comprised of i.i.d. samples ~ Exp(mean). >>> mean = 2.0 >>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2) >>> stats = x.stats() >>> stats.count() 1000 >>> abs(stats.mean() - mean) < 0.5 True >>> from math import sqrt >>> abs(stats.stdev() - sqrt(mean)) < 0.5 True
15,756
def get_android_resources(self): try: return self.arsc["resources.arsc"] except KeyError: self.arsc["resources.arsc"] = ARSCParser(self.zip.read( "resources.arsc")) return self.arsc["resources.arsc"]
Return the :class:`ARSCParser` object which corresponds to the resources.arsc file :rtype: :class:`ARSCParser`
15,757
def transpose(a, axes=None): if isinstance(a, np.ndarray): return np.transpose(a, axes) elif isinstance(a, RemoteArray): return a.transpose(*axes) elif isinstance(a, Remote): return _remote_to_array(a).transpose(*axes) elif isinstance(a, DistArray): if axes is None: axes = range(a.ndim - 1, -1, -1) axes = list(axes) if len(set(axes)) < len(axes): raise ValueError("repeated axis in transpose") if sorted(axes) != list(range(a.ndim)): raise ValueError("axes don't match array") distaxis = a._distaxis new_distaxis = axes.index(distaxis) new_subarrays = [ra.transpose(*axes) for ra in a._subarrays] return DistArray(new_subarrays, new_distaxis) else: return np.transpose(a, axes)
Returns a view of the array with axes transposed. For a 1-D array, this has no effect. For a 2-D array, this is the usual matrix transpose. For an n-D array, if axes are given, their order indicates how the axes are permuted Args: a (array_like): Input array. axes (list of int, optional): By default, reverse the dimensions, otherwise permute the axes according to the values given.
15,758
def _ignore_sql(self, query): return any([ re.search(pattern, query.get()) for pattern in QC_SETTINGS[] ])
Check to see if we should ignore the sql query.
15,759
def get_minutes_description(self): return self.get_segment_description( self._expression_parts[1], _("every minute"), lambda s: s, lambda s: _("every {0} minutes").format(s), lambda s: _("minutes {0} through {1} past the hour"), lambda s: if s == "0" else _("at {0} minutes past the hour") )
Generates a description for only the MINUTE portion of the expression Returns: The MINUTE description
15,760
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_swbd(self, **kwargs): config = ET.Element("config") logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status") config = logical_chassis_fwdl_status output = ET.SubElement(logical_chassis_fwdl_status, "output") cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries") fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries") blade_swbd = ET.SubElement(fwdl_entries, "blade-swbd") blade_swbd.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
15,761
def _expand_colspan_rowspan(self, rows): all_texts = [] remainder = [] for tr in rows: texts = [] next_remainder = [] index = 0 tds = self._parse_td(tr) for td in tds: while remainder and remainder[0][0] <= index: prev_i, prev_text, prev_rowspan = remainder.pop(0) texts.append(prev_text) if prev_rowspan > 1: next_remainder.append((prev_i, prev_text, prev_rowspan - 1)) index += 1 text = _remove_whitespace(self._text_getter(td)) rowspan = int(self._attr_getter(td, ) or 1) colspan = int(self._attr_getter(td, ) or 1) for _ in range(colspan): texts.append(text) if rowspan > 1: next_remainder.append((index, text, rowspan - 1)) index += 1 for prev_i, prev_text, prev_rowspan in remainder: texts.append(prev_text) if prev_rowspan > 1: next_remainder.append((prev_i, prev_text, prev_rowspan - 1)) all_texts.append(texts) remainder = next_remainder while remainder: next_remainder = [] texts = [] for prev_i, prev_text, prev_rowspan in remainder: texts.append(prev_text) if prev_rowspan > 1: next_remainder.append((prev_i, prev_text, prev_rowspan - 1)) all_texts.append(texts) remainder = next_remainder return all_texts
Given a list of <tr>s, return a list of text rows. Parameters ---------- rows : list of node-like List of <tr>s Returns ------- list of list Each returned row is a list of str text. Notes ----- Any cell with ``rowspan`` or ``colspan`` will have its contents copied to subsequent cells.
15,762
def learn(self, bottomUpInput, enableInference=None): return self.compute(bottomUpInput, enableLearn=True, enableInference=enableInference)
TODO: document :param bottomUpInput: :param enableInference: :return:
15,763
def delete(self, folder_id): self.folder_id = folder_id return self._mc_client._delete(url=self._build_path(folder_id))
Delete a specific campaign folder, and mark all the campaigns in the folder as ‘unfiled’. :param folder_id: The unique id for the campaign folder. :type folder_id: :py:class:`str`
15,764
def flipFlopFsm(self, fsmTable, *varBinds, **context): count = [0] cbFun = context.get() def _cbFun(varBind, **context): idx = context.pop(, None) err = context.pop(, None) if err: errors = context[] errors.append( {: err, : idx, : varBind, : context[]}) context[] = self.STATUS_ERROR if idx is None: if cbFun: cbFun((), **context) return _varBinds = context[] _varBinds[idx] = varBind count[0] += 1 debug.logger & debug.FLAG_INS and debug.logger( % (idx, count[0], len(varBinds))) if count[0] < len(varBinds): return debug.logger & debug.FLAG_INS and debug.logger( % (_varBinds,)) self.flipFlopFsm(fsmTable, *varBinds, **dict(context, cbFun=cbFun)) debug.logger & debug.FLAG_INS and debug.logger( % (varBinds,)) mibTree, = self.mibBuilder.importSymbols(, ) try: state = context[] status = context[] instances = context[] errors = context[] _varBinds = context[] except KeyError: state, status = self.STATE_START, self.STATUS_OK instances = {} errors = [] _varBinds = list(varBinds) self._indexMib() debug.logger & debug.FLAG_INS and debug.logger( % (state, status)) try: newState = fsmTable[(state, status)] except KeyError: try: newState = fsmTable[(self.STATE_ANY, status)] except KeyError: raise error.SmiError( % (state, status)) debug.logger & debug.FLAG_INS and debug.logger( % (state, status, newState)) state = newState if state == self.STATE_STOP: context.pop(, None) context.pop(, None) context.pop(, None) context.pop(, None) if cbFun: cbFun(_varBinds, **context) return if not varBinds: _cbFun(None, **context) return actionFun = getattr(mibTree, state, None) if not actionFun: raise error.SmiError( % (state, self)) for idx, varBind in enumerate(varBinds): actionFun( varBind, **dict(context, cbFun=_cbFun, state=state, status=status, idx=idx, total=len(varBinds), instances=instances, errors=errors, varBinds=_varBinds, nextName=None)) debug.logger & debug.FLAG_INS and debug.logger( % (actionFun, varBind))
Read, modify, create or remove Managed Objects Instances. Given one or more py:class:`~pysnmp.smi.rfc1902.ObjectType`, recursively transitions corresponding Managed Objects Instances through the Finite State Machine (FSM) states till it reaches its final stop state. Parameters ---------- fsmTable: :py:class:`dict` A map of (`state`, `status`) -> `state` representing FSM transition matrix. See :py:class:`RowStatus` for FSM transition logic. varBinds: :py:class:`tuple` of :py:class:`~pysnmp.smi.rfc1902.ObjectType` objects representing Managed Objects Instances to work with. Other Parameters ---------------- \*\*context: Query parameters: * `cbFun` (callable) - user-supplied callable that is invoked to pass the new value of the Managed Object Instance or an error. * `acFun` (callable) - user-supplied callable that is invoked to authorize access to the requested Managed Object Instance. If not supplied, no access control will be performed. Notes ----- The callback functions (e.g. `cbFun`, `acFun`) have the same signature as this method where `varBind` contains the new Managed Object Instance value. In case of errors, the `errors` key in the `context` dict will contain a sequence of `dict` objects describing one or more errors that occur. Such error `dict` will have the `error`, `idx` and `state` keys providing the details concerning the error, for which variable-binding and in what state the system has failed.
15,765
def get_arguments(self): ApiCli.get_arguments(self) if self.args.hostGroupId is not None: self.hostGroupId = self.args.hostGroupId if self.args.force is not None: self.force = self.args.force if self.force: self.url_parameters = {"forceRemove": True} self.path = "v1/hostgroup/{0}".format(str(self.hostGroupId))
Extracts the specific arguments of this CLI
15,766
def __msgc_step3_discontinuity_localization(self): import scipy start = self._start_time seg = 1 - self.segmentation.astype(np.int8) self.stats["low level object voxels"] = np.sum(seg) self.stats["low level image voxels"] = np.prod(seg.shape) seg_border = scipy.ndimage.filters.laplace(seg, mode="constant") logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None)) seg_border[seg_border != 0] = 1 logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None)) boundary_dilatation_distance = self.segparams["boundary_dilatation_distance"] seg = scipy.ndimage.morphology.binary_dilation( seg_border, np.ones( [ (boundary_dilatation_distance * 2) + 1, (boundary_dilatation_distance * 2) + 1, (boundary_dilatation_distance * 2) + 1, ] ), ) if self.keep_temp_properties: self.temp_msgc_lowres_discontinuity = seg else: self.temp_msgc_lowres_discontinuity = None if self.debug_images: import sed3 pd = sed3.sed3(seg_border) pd.show() pd = sed3.sed3(seg) pd.show() self.stats["t3"] = time.time() - start return seg
Estimate discontinuity in basis of low resolution image segmentation. :return: discontinuity in low resolution
15,767
def full_parent_name(self): entries = [] command = self while command.parent is not None: command = command.parent entries.append(command.name) return .join(reversed(entries))
Retrieves the fully qualified parent command name. This the base command name required to execute it. For example, in ``?one two three`` the parent name would be ``one two``.
15,768
def area(self): area = abs(self.primitive.height * self.primitive.polygon.length) area += self.primitive.polygon.area * 2 return area
The surface area of the primitive extrusion. Calculated from polygon and height to avoid mesh creation. Returns ---------- area: float, surface area of 3D extrusion
15,769
def colorize(text, messageType=None): formattedText = str(text) if "ERROR" in messageType: formattedText = colorama.Fore.RED + formattedText elif "WARNING" in messageType: formattedText = colorama.Fore.YELLOW + formattedText elif "SUCCESS" in messageType: formattedText = colorama.Fore.GREEN + formattedText elif "INFO" in messageType: formattedText = colorama.Fore.BLUE + formattedText if "BOLD" in messageType: formattedText = colorama.Style.BRIGHT + formattedText return formattedText + colorama.Style.RESET_ALL
Function that colorizes a message. Args: ----- text: The string to be colorized. messageType: Possible options include "ERROR", "WARNING", "SUCCESS", "INFO" or "BOLD". Returns: -------- string: Colorized if the option is correct, including a tag at the end to reset the formatting.
15,770
def url_for(self, *args, **kwargs): return yarl.URL(self.url(parts=kwargs))
Construct url for route with additional params.
15,771
def format_cffi_externs(cls): extern_decls = [ f.extern_signature.pretty_print() for _, f in cls._extern_fields.items() ] return ( + .join(extern_decls) + )
Generate stubs for the cffi bindings from @_extern_decl methods.
15,772
def undefine(vm_, **kwargs): * conn = __get_conn(**kwargs) dom = _get_domain(conn, vm_) if getattr(libvirt, , False): ret = dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM) == 0 else: ret = dom.undefine() == 0 conn.close() return ret
Remove a defined vm, this does not purge the virtual machine image, and this only works if the vm is powered down :param vm_: domain name :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.undefine <domain>
15,773
def std(a, axis=None, ddof=0): axes = _normalise_axis(axis, a) if axes is None or len(axes) != 1: msg = "This operation is currently limited to a single axis" raise AxisSupportError(msg) dtype = (np.array([0], dtype=a.dtype) / 1.).dtype return _Aggregation(a, axes[0], _StdStreamsHandler, _StdMaskedStreamsHandler, dtype, dict(ddof=ddof))
Request the standard deviation of an Array over any number of axes. .. note:: Currently limited to operating on a single axis. :param axis: Axis or axes along which the operation is performed. The default (axis=None) is to perform the operation over all the dimensions of the input array. The axis may be negative, in which case it counts from the last to the first axis. If axis is a tuple of ints, the operation is performed over multiple axes. :type axis: None, or int, or iterable of ints. :param int ddof: Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. By default ddof is zero. :return: The Array representing the requested standard deviation. :rtype: Array
15,774
def lazy_connect(cls, pk): instance = cls() instance._pk = instance.pk.normalize(pk) instance._connected = False return instance
Create an object, setting its primary key without testing it. So the instance is not connected
15,775
def __perform_request(self, url, type=GET, params=None): if params is None: params = {} if not self.token: raise TokenError("No token provided. Please use a valid token") url = urlparse.urljoin(self.end_point, url) identity = lambda x: x json_dumps = lambda x: json.dumps(x) lookup = { GET: (self._session.get, {}, , identity), POST: (self._session.post, {: }, , json_dumps), PUT: (self._session.put, {: }, , json_dumps), DELETE: (self._session.delete, {: }, , json_dumps), } requests_method, headers, payload, transform = lookup[type] agent = "{0}/{1} {2}/{3}".format(, __version__, requests.__name__, requests.__version__) headers.update({: + self.token, : agent}) kwargs = {: headers, payload: transform(params)} timeout = self.get_timeout() if timeout: kwargs[] = timeout headers_str = str(headers).replace(self.token.strip(), ) self._log.debug( % (type, url, payload, params, headers_str, timeout)) return requests_method(url, **kwargs)
This method will perform the real request, in this way we can customize only the "output" of the API call by using self.__call_api method. This method will return the request object.
15,776
def update_energy(self, bypass_check: bool = False): if bypass_check or (not bypass_check and self.update_time_check): self.get_weekly_energy() if in self.energy: self.get_monthly_energy() self.get_yearly_energy() if not bypass_check: self.update_energy_ts = time.time()
Builds weekly, monthly and yearly dictionaries
15,777
def parse_comet(self): import re pat = ( + + ( + ) ) m = re.findall(pat, self.targetname.strip()) prefixnumber = None desig = None name = None if len(m) > 0: for el in m: if len(el[0]) > 0: prefixnumber = el[0].replace(, ) if len(el[3]) > 0: desig = el[3].replace(, ) if len(el[5]) > 0: if len(el[5]) > 1: name = el[5] return (desig, prefixnumber, name)
Parse `targetname` as if it were a comet. :return: (string or None, int or None, string or None); The designation, number and prefix, and name of the comet as derived from `self.targetname` are extracted into a tuple; each element that does not exist is set to `None`. Parenthesis in `self.targetname` will be ignored. :example: the following table shows the result of the parsing: +--------------------------------+--------------------------------+ |targetname |(desig, prefixnumber, name) | +================================+================================+ |1P/Halley |(None, '1P', 'Halley') | +--------------------------------+--------------------------------+ |3D/Biela |(None, '3D', 'Biela') | +--------------------------------+--------------------------------+ |9P/Tempel 1 |(None, '9P', 'Tempel 1') | +--------------------------------+--------------------------------+ |73P/Schwassmann Wachmann 3 C |(None, '73P', | | |'Schwassmann Wachmann 3 C') | +--------------------------------+--------------------------------+ |73P-C/Schwassmann Wachmann 3 C |(None, '73P-C', | | |'Schwassmann Wachmann 3 C') | +--------------------------------+--------------------------------+ |73P-BB |(None, '73P-BB', None) | +--------------------------------+--------------------------------+ |322P |(None, '322P', None) | +--------------------------------+--------------------------------+ |X/1106 C1 |('1166 C1', 'X', None) | +--------------------------------+--------------------------------+ |P/1994 N2 (McNaught-Hartley) |('1994 N2', 'P', | | |'McNaught-Hartley') | +--------------------------------+--------------------------------+ |P/2001 YX127 (LINEAR) |('2001 YX127', 'P', 'LINEAR') | +--------------------------------+--------------------------------+ |C/-146 P1 |('-146 P1', 'C', None) | +--------------------------------+--------------------------------+ |C/2001 A2-A (LINEAR) |('2001 A2-A', 'C', 'LINEAR') | +--------------------------------+--------------------------------+ |C/2013 US10 |('2013 US10', 'C', None) | +--------------------------------+--------------------------------+ |C/2015 V2 (Johnson) |('2015 V2', 'C', 'Johnson') | +--------------------------------+--------------------------------+ |C/2016 KA (Catalina) |('2016 KA', 'C', 'Catalina') | +--------------------------------+--------------------------------+
15,778
def _process_book(book_url): data = DOWNER.download(book_url) dom = dhtmlparser.parseString(data) details_tags = dom.find("div", {"id": "contentDetail"}) assert details_tags, "Can't find details of the book." details = details_tags[0] title = _parse_title(dom, details) authors = _parse_authors(details) publisher = _parse_publisher(details) price = _parse_price(details) pages, binding = _parse_pages_binding(details) pub = Publication( title, authors, price, publisher ) pub.optionals.URL = book_url pub.optionals.binding = binding pub.optionals.pages = pages pub.optionals.ISBN, pub.optionals.EAN = _parse_ISBN_EAN(details) pub.optionals.edition = _parse_edition(details) pub.optionals.description = _parse_description(details) return pub
Parse available informations about book from the book details page. Args: book_url (str): Absolute URL of the book. Returns: obj: :class:`structures.Publication` instance with book details.
15,779
def _get_real_ip(self): try: real_ip = self.request.META[] return real_ip.split()[0] except KeyError: return self.request.META[] except Exception: return None
Get IP from request. :param request: A usual request object :type request: HttpRequest :return: ipv4 string or None
15,780
def cli(env, limit, closed=False, get_all=False): manager = AccountManager(env.client) invoices = manager.get_invoices(limit, closed, get_all) table = formatting.Table([ "Id", "Created", "Type", "Status", "Starting Balance", "Ending Balance", "Invoice Amount", "Items" ]) table.align[] = table.align[] = table.align[] = table.align[] = if isinstance(invoices, dict): invoices = [invoices] for invoice in invoices: table.add_row([ invoice.get(), utils.clean_time(invoice.get(), out_format="%Y-%m-%d"), invoice.get(), invoice.get(), invoice.get(), invoice.get(), invoice.get(), invoice.get() ]) env.fout(table)
Invoices and all that mess
15,781
def delete(self, records, context): DELETE = self.statement() sql, data = DELETE(records, context) if context.dryRun: print sql % data return 0 else: return self.execute(sql, data, writeAccess=True)
Removes the inputted record from the database. :param records | <orb.Collection> context | <orb.Context> :return <int> number of rows removed
15,782
def add_criterion(self, name, priority, and_or, search_type, value): criterion = SearchCriteria(name, priority, and_or, search_type, value) self.criteria.append(criterion)
Add a search criteria object to a smart group. Args: name: String Criteria type name (e.g. "Application Title") priority: Int or Str number priority of criterion. and_or: Str, either "and" or "or". search_type: String Criteria search type. (e.g. "is", "is not", "member of", etc). Construct a SmartGroup with the criteria of interest in the web interface to determine what range of values are available. value: String value to search for/against.
15,783
def _set_pfiles(dry_run, **kwargs): pfiles_orig = os.environ[] pfiles = kwargs.get(, None) if pfiles: if dry_run: print("mkdir %s" % pfiles) else: try: os.makedirs(pfiles) except OSError: pass pfiles = "%s:%s" % (pfiles, pfiles_orig) os.environ[] = pfiles return pfiles_orig
Set the PFILES env var Parameters ---------- dry_run : bool Don't actually run Keyword arguments ----------------- pfiles : str Value to set PFILES Returns ------- pfiles_orig : str Current value of PFILES envar
15,784
def ppf(q, df, loc=0.0, scale=1.0, gamma = 1.0): result = np.zeros(q.shape[0]) probzero = Skewt.cdf(x=np.zeros(1),loc=np.zeros(1),df=df,gamma=gamma) result[q<probzero] = 1.0/gamma*ss.t.ppf(((np.power(gamma,2) + 1.0) * q[q<probzero])/2.0,df) result[q>=probzero] = gamma*ss.t.ppf((1.0 + 1.0/np.power(gamma,2))/2.0*(q[q >= probzero] - probzero) + 0.5, df) return result
PPF function for Skew t distribution
15,785
def get_profiles(self): out = set(x.profile for x in self.requires if x.profile) out.update(x.profile for x in self.removes if x.profile) return out
Returns set of profile names referenced in this Feature :returns: set of profile names
15,786
def sign_transaction(self, signer: Account): tx_hash = self.hash256() sig_data = signer.generate_signature(tx_hash) sig = [Sig([signer.get_public_key_bytes()], 1, [sig_data])] self.sig_list = sig
This interface is used to sign the transaction. :param signer: an Account object which will sign the transaction. :return: a Transaction object which has been signed.
15,787
def set_state(self, state): self.index_x.set(state[REG_X]) self.index_y.set(state[REG_Y]) self.user_stack_pointer.set(state[REG_U]) self.system_stack_pointer.set(state[REG_S]) self.program_counter.set(state[REG_PC]) self.accu_a.set(state[REG_A]) self.accu_b.set(state[REG_B]) self.direct_page.set(state[REG_DP]) self.set_cc(state[REG_CC]) self.cycles = state["cycles"] self.memory.load(address=0x0000, data=state["RAM"])
used in unittests
15,788
def doNew(self, WHAT={}, **params): if hasattr(WHAT, ): for key in WHAT: if key not in [,]: if WHAT.__new2old__.has_key(key): self._addDBParam(WHAT.__new2old__[key].encode(), WHAT[key]) else: self._addDBParam(key, WHAT[key]) elif type(WHAT)==dict: for key in WHAT: self._addDBParam(key, WHAT[key]) else: raise FMError, % type(WHAT) if self._layout == : raise FMError, for key in params: self._addDBParam(key, params[key]) if len(self._dbParams) == 0: raise FMError, return self._doAction()
This function will perform the command -new.
15,789
def notify(self, subsystem, recipient, subject, body_html, body_text): if not re.match(self.validation, recipient, re.I): raise ValueError() if recipient.startswith(): target_type = elif recipient.find() != -1: target_type = else: self.log.error(.format(recipient)) return try: self._send_message( target_type=target_type, target=recipient, message=body_text, title=subject ) except SlackError as ex: self.log.error(.format(recipient, ex))
You can send messages either to channels and private groups by using the following formats #channel-name @username-direct-message Args: subsystem (`str`): Name of the subsystem originating the notification recipient (`str`): Recipient subject (`str`): Subject / title of the notification, not used for this notifier body_html (`str)`: HTML formatted version of the message, not used for this notifier body_text (`str`): Text formatted version of the message Returns: `None`
15,790
def save(self, file, contents, name=None, overwrite=False): if name is None: name = self.format_from_extension(op.splitext(file)[1]) file_format = self.file_type(name) if file_format == : _write_text(file, contents) elif file_format == : _write_json(file, contents) else: write_function = self._formats[name].get(, None) if write_function is None: raise IOError("The format must declare a file type or " "load/save functions.") if op.exists(file) and not overwrite: print("The file already exists, please use overwrite=True.") return write_function(file, contents)
Save contents into a file. The format name can be specified explicitly or inferred from the file extension.
15,791
def find_one(driver, locator_list, elem_type=CSS, timeout=TIMEOUT): def _find_one(driver): finders = { CLASS_NAME: driver.find_elements_by_class_name, CSS: driver.find_elements_by_css_selector, ID: driver.find_elements_by_id, LINK: driver.find_elements_by_link_text, NAME: driver.find_elements_by_name, PARTIAL_LINK: driver.find_elements_by_partial_link_text, TAG: driver.find_elements_by_tag_name, XPATH: driver.find_elements_by_xpath } elems = [finders[elem_type](loc) for loc in locator_list] if any([len(elem_list) > 0 for elem_list in elems]): return elems else: return False raw_results = WebDriverWait(driver, timeout).until(_find_one) results = [elem for elem_list in raw_results for elem in elem_list] return results.pop() if len(results) == 1 else results
Args: driver (selenium webdriver): Selenium webdriver object locator_list (:obj: `list` of :obj: `str`): List of CSS selector strings elem_type (Selenium By types): Selenium By type (i.e. By.CSS_SELECTOR) timeout (int): Number of seconds to wait before timing out Returns: Selenium Element Raises: TimeoutException: Raised if no elements are found within the TIMEOUT
15,792
def com_google_fonts_check_metadata_match_filename_postscript(font_metadata): post_script_name = font_metadata.post_script_name filename = os.path.splitext(font_metadata.filename)[0] if filename != post_script_name: yield FAIL, ("METADATA.pb font filename=\"{}\" does not match" " post_script_name=\"{}\"." "").format(font_metadata.filename, font_metadata.post_script_name) else: yield PASS, ("METADATA.pb font fields \"filename\" and" " \"post_script_name\" have equivalent values.")
METADATA.pb font.filename and font.post_script_name fields have equivalent values?
15,793
def images(self, type): images = [] res = yield from self.http_query("GET", "/{}/images".format(type), timeout=None) images = res.json try: if type in ["qemu", "dynamips", "iou"]: for local_image in list_images(type): if local_image[] not in [i[] for i in images]: images.append(local_image) images = sorted(images, key=itemgetter()) else: images = sorted(images, key=itemgetter()) except OSError as e: raise ComputeError("Can't list images: {}".format(str(e))) return images
Return the list of images available for this type on controller and on the compute node.
15,794
def read_header(filename): header = {} in_header = False data = nl.universal_read(filename) lines = [x.strip() for x in data.split()] for line in lines: if line=="*** Header Start ***": in_header=True continue if line=="*** Header End ***": return header fields = line.split(": ") if len(fields)==2: header[fields[0]] = fields[1]
returns a dictionary of values in the header of the given file
15,795
def sign_digest_deterministic(self, digest, hashfunc=None, sigencode=sigencode_string): secexp = self.privkey.secret_multiplier k = rfc6979.generate_k( self.curve.generator.order(), secexp, hashfunc, digest) return self.sign_digest(digest, sigencode=sigencode, k=k)
Calculates 'k' from data itself, removing the need for strong random generator and producing deterministic (reproducible) signatures. See RFC 6979 for more details.
15,796
def is_close_to(self, other, tolerance): self._validate_close_to_args(self.val, other, tolerance) if self.val < (other-tolerance) or self.val > (other+tolerance): if type(self.val) is datetime.datetime: tolerance_seconds = tolerance.days * 86400 + tolerance.seconds + tolerance.microseconds / 1000000 h, rem = divmod(tolerance_seconds, 3600) m, s = divmod(rem, 60) self._err( % (self.val.strftime(), other.strftime(), h, m, s)) else: self._err( % (self.val, other, tolerance)) return self
Asserts that val is numeric and is close to other within tolerance.
15,797
def read_gtfs(path: Path, dist_units: str) -> "Feed": path = Path(path) if not path.exists(): raise ValueError(f"Path {path} does not exist") if path.is_file(): zipped = True tmp_dir = tempfile.TemporaryDirectory() src_path = Path(tmp_dir.name) shutil.unpack_archive(str(path), tmp_dir.name, "zip") else: zipped = False src_path = path feed_dict = {table: None for table in cs.GTFS_REF["table"]} for p in src_path.iterdir(): table = p.stem if p.is_file() and p.stat().st_size and table in feed_dict: df = pd.read_csv(p, dtype=cs.DTYPE, encoding="utf-8-sig") if not df.empty: feed_dict[table] = cn.clean_column_names(df) feed_dict["dist_units"] = dist_units if zipped: tmp_dir.cleanup() return Feed(**feed_dict)
Create a Feed instance from the given path and given distance units. The path should be a directory containing GTFS text files or a zip file that unzips as a collection of GTFS text files (and not as a directory containing GTFS text files). The distance units given must lie in :const:`constants.dist_units` Notes ----- - Ignore non-GTFS files - Automatically strip whitespace from the column names in GTFS files
15,798
def AgregarDatoPDF(self, campo, valor, pagina=): "Agrego un dato a la factura (internamente)" if campo == and valor.startswith(self.InstallDir): if not os.path.exists(valor): valor = os.path.join(self.InstallDir, "plantillas", os.path.basename(valor)) if DEBUG: print "NUEVO PATH:", valor self.datos[campo] = valor return True
Agrego un dato a la factura (internamente)
15,799
def exclude_reference_link(self, exclude): if not isinstance(exclude, bool): raise InvalidUsage() self._sysparms[] = exclude
Sets `sysparm_exclude_reference_link` to a bool value :param exclude: bool