Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
376,100
def silenceRemoval(x, fs, st_win, st_step, smoothWindow=0.5, weight=0.5, plot=False): if weight >= 1: weight = 0.99 if weight <= 0: weight = 0.01 x = audioBasicIO.stereo2mono(x) st_feats, _ = aF.stFeatureExtraction(x, fs, st_win * fs, st_step * fs) st_energy = st_feats[1, :] en = numpy.sort(st_energy) l1 = int(len(en) / 10) t1 = numpy.mean(en[0:l1]) + 0.000000000000001 t2 = numpy.mean(en[-l1:-1]) + 0.000000000000001 class1 = st_feats[:, numpy.where(st_energy <= t1)[0]] class2 = st_feats[:, numpy.where(st_energy >= t2)[0]] faets_s = [class1.T, class2.T] [faets_s_norm, means_s, stds_s] = aT.normalizeFeatures(faets_s) svm = aT.trainSVM(faets_s_norm, 1.0) prob_on_set = [] for i in range(st_feats.shape[1]): cur_fv = (st_feats[:, i] - means_s) / stds_s prob_on_set.append(svm.predict_proba(cur_fv.reshape(1,-1))[0][1]) prob_on_set = numpy.array(prob_on_set) prob_on_set = smoothMovingAvg(prob_on_set, smoothWindow / st_step) prog_on_set_sort = numpy.sort(prob_on_set) Nt = int(prog_on_set_sort.shape[0] / 10) T = (numpy.mean((1 - weight) * prog_on_set_sort[0:Nt]) + weight * numpy.mean(prog_on_set_sort[-Nt::])) max_idx = numpy.where(prob_on_set > T)[0] i = 0 time_clusters = [] seg_limits = [] while i < len(max_idx): cur_cluster = [max_idx[i]] if i == len(max_idx)-1: break while max_idx[i+1] - cur_cluster[-1] <= 2: cur_cluster.append(max_idx[i+1]) i += 1 if i == len(max_idx)-1: break i += 1 time_clusters.append(cur_cluster) seg_limits.append([cur_cluster[0] * st_step, cur_cluster[-1] * st_step]) min_dur = 0.2 seg_limits_2 = [] for s in seg_limits: if s[1] - s[0] > min_dur: seg_limits_2.append(s) seg_limits = seg_limits_2 if plot: timeX = numpy.arange(0, x.shape[0] / float(fs), 1.0 / fs) plt.subplot(2, 1, 1) plt.plot(timeX, x) for s in seg_limits: plt.axvline(x=s[0]) plt.axvline(x=s[1]) plt.subplot(2, 1, 2) plt.plot(numpy.arange(0, prob_on_set.shape[0] * st_step, st_step), prob_on_set) plt.title() for s in seg_limits: plt.axvline(x=s[0]) plt.axvline(x=s[1]) plt.title() plt.show() return seg_limits
Event Detection (silence removal) ARGUMENTS: - x: the input audio signal - fs: sampling freq - st_win, st_step: window size and step in seconds - smoothWindow: (optinal) smooth window (in seconds) - weight: (optinal) weight factor (0 < weight < 1) the higher, the more strict - plot: (optinal) True if results are to be plotted RETURNS: - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
376,101
def load_ldap_config(self): try: with open(.format(self.config_dir), ) as FILE: config = yaml.load(FILE) self.host = config[] self.user_dn = config[] self.port = config[] self.basedn = config[] self.mail_domain = config[] self.service_ou = config[] except OSError as err: print(.format( type(err), self.config_dir))
Configure LDAP Client settings.
376,102
def permute(self, idx): if set(idx) != set(range(self.rank)): raise ValueError() self.factors = [f[:, idx] for f in self.factors] return self.factors
Permutes the columns of the factor matrices inplace
376,103
def calc_missingremoterelease_v1(self): flu = self.sequences.fluxes.fastaccess flu.missingremoterelease = max( flu.requiredremoterelease-flu.actualrelease, 0.)
Calculate the portion of the required remote demand that could not be met by the actual discharge release. Required flux sequences: |RequiredRemoteRelease| |ActualRelease| Calculated flux sequence: |MissingRemoteRelease| Basic equation: :math:`MissingRemoteRelease = max( RequiredRemoteRelease-ActualRelease, 0)` Example: >>> from hydpy.models.dam import * >>> parameterstep() >>> fluxes.requiredremoterelease = 2.0 >>> fluxes.actualrelease = 1.0 >>> model.calc_missingremoterelease_v1() >>> fluxes.missingremoterelease missingremoterelease(1.0) >>> fluxes.actualrelease = 3.0 >>> model.calc_missingremoterelease_v1() >>> fluxes.missingremoterelease missingremoterelease(0.0)
376,104
def split_seq(sam_num, n_tile): import math print(sam_num) print(n_tile) start_num = sam_num[0::int(math.ceil(len(sam_num) / (n_tile)))] end_num = start_num[1::] end_num.append(len(sam_num)) return [[i, j] for i, j in zip(start_num, end_num)]
Split the number(sam_num) into numbers by n_tile
376,105
def create_entity(self, name, gl_structure, description=None): new_entity = Entity(name, gl_structure, description=description) self.entities.append(new_entity) return new_entity
Create an entity and add it to the model. :param name: The entity name. :param gl_structure: The entity's general ledger structure. :param description: The entity description. :returns: The created entity.
376,106
def _processDML(self, dataset_name, cols, reader): sql_template = self._generateInsertStatement(dataset_name, cols) c = self.conn.cursor() c.executemany(sql_template, reader) self.conn.commit()
Overridden version of create DML for SQLLite
376,107
def main(args=None): for arg in args: glyphsLib.dump(load(open(arg, "r", encoding="utf-8")), sys.stdout)
Roundtrip the .glyphs file given as an argument.
376,108
def from_dict(cls, pods): frag = cls() frag.content = pods[] frag._resources = [FragmentResource(**d) for d in pods[]] frag.js_init_fn = pods[] frag.js_init_version = pods[] frag.json_init_args = pods[] return frag
Returns a new Fragment from a dictionary representation.
376,109
def ufloatDict_nominal(self, ufloat_dict): return OrderedDict(izip(ufloat_dict.keys(), map(lambda x: x.nominal_value, ufloat_dict.values())))
This gives us a dictionary of nominal values from a dictionary of uncertainties
376,110
def calcRapRperi(self,**kwargs): if hasattr(self,): return self._rperirap EL= self.calcEL(**kwargs) E, L= EL if self._vR == 0. and m.fabs(self._vT - vcirc(self._pot,self._R,use_physical=False)) < _EPS: rperi= self._R rap = self._R elif self._vR == 0. and self._vT > vcirc(self._pot,self._R,use_physical=False): rperi= self._R if self._gamma != 0.: startsign= _rapRperiAxiEq(self._R+10.**-8.,E,L,self._pot) startsign/= m.fabs(startsign) else: startsign= 1. rend= _rapRperiAxiFindStart(self._R,E,L,self._pot,rap=True, startsign=startsign) rap= optimize.brentq(_rapRperiAxiEq,rperi+0.00001,rend, args=(E,L,self._pot)) elif self._vR == 0. and self._vT < vcirc(self._pot,self._R,use_physical=False): rap= self._R if self._gamma != 0.: startsign= _rapRperiAxiEq(self._R-10.**-8.,E,L,self._pot) startsign/= m.fabs(startsign) else: startsign= 1. rstart= _rapRperiAxiFindStart(self._R,E,L,self._pot, startsign=startsign) if rstart == 0.: rperi= 0. else: rperi= optimize.brentq(_rapRperiAxiEq,rstart,rap-0.000001, args=(E,L,self._pot)) else: if self._gamma != 0.: startsign= _rapRperiAxiEq(self._R,E,L,self._pot) startsign/= m.fabs(startsign) else: startsign= 1. rstart= _rapRperiAxiFindStart(self._R,E,L,self._pot, startsign=startsign) if rstart == 0.: rperi= 0. else: try: rperi= optimize.brentq(_rapRperiAxiEq,rstart,self._R, (E,L,self._pot), maxiter=200) except RuntimeError: raise UnboundError("Orbit seems to be unbound") rend= _rapRperiAxiFindStart(self._R,E,L,self._pot,rap=True, startsign=startsign) rap= optimize.brentq(_rapRperiAxiEq,self._R,rend, (E,L,self._pot)) self._rperirap= (rperi,rap) return self._rperirap
NAME: calcRapRperi PURPOSE: calculate the apocenter and pericenter radii INPUT: OUTPUT: (rperi,rap) HISTORY: 2010-12-01 - Written - Bovy (NYU)
376,111
def handleError(self, record): if logging.raiseExceptions: t, v, tb = sys.exc_info() if issubclass(t, NotifierException) and self.fallback: msg = f"Could not log msg to provider !\n{v}" self.fallback_defaults["message"] = msg self.fallback.notify(**self.fallback_defaults) else: super().handleError(record)
Handles any errors raised during the :meth:`emit` method. Will only try to pass exceptions to fallback notifier (if defined) in case the exception is a sub-class of :exc:`~notifiers.exceptions.NotifierException` :param record: :class:`logging.LogRecord`
376,112
def resume_training(self, train_data, model_path, valid_data=None): restore_state = self.checkpointer.restore(model_path) loss_fn = self._get_loss_fn() self.train() self._train_model( train_data=train_data, loss_fn=loss_fn, valid_data=valid_data, restore_state=restore_state, )
This model resume training of a classifier by reloading the appropriate state_dicts for each model Args: train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the train split model_path: the path to the saved checpoint for resuming training valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the dev split
376,113
def delete_report(server, report_number, timeout=HQ_DEFAULT_TIMEOUT): try: r = requests.post(server + "/reports/delete/%d" % report_number, timeout=timeout) except Exception as e: logging.error(e) return False return r
Delete a specific crash report from the server. :param report_number: Report Number :return: server response
376,114
def get_vswhere_path(): if alternate_path and os.path.exists(alternate_path): return alternate_path if DEFAULT_PATH and os.path.exists(DEFAULT_PATH): return DEFAULT_PATH if os.path.exists(DOWNLOAD_PATH): return DOWNLOAD_PATH _download_vswhere() return DOWNLOAD_PATH
Get the path to vshwere.exe. If vswhere is not already installed as part of Visual Studio, and no alternate path is given using `set_vswhere_path()`, the latest release will be downloaded and stored alongside this script.
376,115
def add_children_gos(self, gos): lst = [] obo_dag = self.obo_dag get_children = lambda go_obj: list(go_obj.get_all_children()) + [go_obj.id] for go_id in gos: go_obj = obo_dag[go_id] lst.extend(get_children(go_obj)) return set(lst)
Return children of input gos plus input gos.
376,116
def _set_if_type(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u: {: 7}, u: {: 2}, u: {: 5}, u: {: 1}, u: {: 6}, u: {: 8}},), is_leaf=True, yang_name="if-type", rest_name="if-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace=, defining_module=, yang_type=, is_config=False) except (TypeError, ValueError): raise ValueError({ : , : "brocade-mpls-operational:mpls-if-type", : , }) self.__if_type = t if hasattr(self, ): self._set()
Setter method for if_type, mapped from YANG variable /mpls_state/dynamic_bypass/dynamic_bypass_interface/if_type (mpls-if-type) If this variable is read-only (config: false) in the source YANG file, then _set_if_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_if_type() directly. YANG Description: Interface type
376,117
def graph_from_labels(label_image, fg_markers, bg_markers, regional_term = False, boundary_term = False, regional_term_args = False, boundary_term_args = False): logger = Logger.getInstance() logger.info() label_image = scipy.asarray(label_image) fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_) bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_) __check_label_image(label_image) if not regional_term: regional_term = __regional_term_label if not boundary_term: boundary_term = __boundary_term_label if not hasattr(regional_term, ) or not 3 == len(inspect.getargspec(regional_term)[0]): raise AttributeError() if not hasattr(boundary_term, ) or not 3 == len(inspect.getargspec(boundary_term)[0]): raise AttributeError() logger.info() nodes = len(scipy.unique(label_image)) edges = 10 * nodes logger.debug(.format(nodes, edges)) graph = GCGraph(nodes, edges) logger.debug(.format(len(scipy.unique(label_image[fg_markers])), len(scipy.unique(label_image[bg_markers])))) logger.info() regional_term(graph, label_image, regional_term_args) logger.info() boundary_term(graph, label_image, boundary_term_args) logger.info() graph.set_source_nodes(scipy.unique(label_image[fg_markers] - 1)) graph.set_sink_nodes(scipy.unique(label_image[bg_markers] - 1)) return graph.get_graph()
Create a graph-cut ready graph to segment a nD image using the region neighbourhood. Create a `~medpy.graphcut.maxflow.GraphDouble` object for all regions of a nD label image. Every region of the label image is regarded as a node. They are connected to their immediate neighbours by arcs. If to regions are neighbours is determined using :math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). In the next step the arcs weights (n-weights) are computed using the supplied ``boundary_term`` function (see :mod:`~medpy.graphcut.energy_voxel` for a selection). Implicitly the graph holds two additional nodes: the source and the sink, so called terminal nodes. These are connected with all other nodes through arcs of an initial weight (t-weight) of zero. All regions that are under the foreground markers are considered to be tightly bound to the source: The t-weight of the arc from source to these nodes is set to a maximum value. The same goes for the background markers: The covered regions receive a maximum (`~medpy.graphcut.graph.GCGraph.MAX`) t-weight for their arc towards the sink. All other t-weights are set using the supplied ``regional_term`` function (see :mod:`~medpy.graphcut.energy_voxel` for a selection). Parameters ---------- label_image: ndarray The label image as an array cwhere each voxel carries the id of the region it belongs to. Note that the region labels have to start from 1 and be continuous (can be achieved with `~medpy.filter.label.relabel`). fg_markers : ndarray The foreground markers as binary array of the same shape as the original image. bg_markers : ndarray The background markers as binary array of the same shape as the original image. regional_term : function This can be either `False`, in which case all t-weights are set to 0, except for the nodes that are directly connected to the source or sink; or a function, in which case the supplied function is used to compute the t_edges. It has to have the following signature *regional_term(graph, regional_term_args)*, and is supposed to compute (source_t_weight, sink_t_weight) for all regions of the image and add these to the passed `~medpy.graphcut.graph.GCGraph` object. The weights have only to be computed for nodes where they do not equal zero. Additional parameters can be passed to the function via the ``regional_term_args`` parameter. boundary_term : function This can be either `False`, in which case all n-edges, i.e. between all nodes that are not source or sink, are set to 0; or a function, in which case the supplied function is used to compute the edge weights. It has to have the following signature *boundary_term(graph, boundary_term_args)*, and is supposed to compute the edges between all adjacent regions of the image and to add them to the supplied `~medpy.graphcut.graph.GCGraph` object. Additional parameters can be passed to the function via the ``boundary_term_args`` parameter. regional_term_args : tuple Use this to pass some additional parameters to the ``regional_term`` function. boundary_term_args : tuple Use this to pass some additional parameters to the ``boundary_term`` function. Returns ------- graph : `~medpy.graphcut.maxflow.GraphDouble` The created graph, ready to execute the graph-cut. Raises ------ AttributeError If an argument is malformed. FunctionError If one of the supplied functions returns unexpected results. Notes ----- If a voxel is marked as both, foreground and background, the background marker is given higher priority. All arcs whose weight is not explicitly set are assumed to carry a weight of zero.
376,118
async def load_blob(reader, elem_type, params=None, elem=None): ivalue = elem_type.SIZE if elem_type.FIX_SIZE else await load_uvarint(reader) fvalue = bytearray(ivalue) await reader.areadinto(fvalue) if elem is None: return fvalue elif isinstance(elem, BlobType): setattr(elem, elem_type.DATA_ATTR, fvalue) return elem else: elem.extend(fvalue) return elem
Loads blob from reader to the element. Returns the loaded blob. :param reader: :param elem_type: :param params: :param elem: :return:
376,119
def fold_enrichment(self): return self.k / (self.K*(self.cutoff/float(self.N)))
(property) Returns the fold enrichment at the XL-mHG cutoff.
376,120
def copy_resource(self, container, resource, local_filename): self.push_log("Receiving tarball for resource and storing as {2}".format(container, resource, local_filename)) super(DockerFabricClient, self).copy_resource(container, resource, local_filename)
Identical to :meth:`dockermap.client.base.DockerClientWrapper.copy_resource` with additional logging.
376,121
def get_proc_dir(cachedir, **kwargs): t exist. The following optional Keyword Arguments are handled: mode: which is anything os.makedir would accept as mode. uid: the uid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this uid. Must be int. Works only on unix/unix like systems. gid: the gid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this gid. Must be int. Works only on unix/unix like systems. procmodemodemodemodechownuidgid', -1) if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \ [i for i in (uid, gid) if i != -1]: os.chown(fn_, uid, gid) return fn_
Given the cache directory, return the directory that process data is stored in, creating it if it doesn't exist. The following optional Keyword Arguments are handled: mode: which is anything os.makedir would accept as mode. uid: the uid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this uid. Must be int. Works only on unix/unix like systems. gid: the gid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this gid. Must be int. Works only on unix/unix like systems.
376,122
def get_model_url_name(model_nfo, page, with_namespace=False): prefix = if with_namespace: prefix = return ( % (prefix, % model_nfo, page)).lower()
Returns a URL for a given Tree admin page type.
376,123
def _plot_extension(self, gta, prefix, src, loge_bounds=None, **kwargs): if loge_bounds is None: loge_bounds = (self.energies[0], self.energies[-1]) name = src[].lower().replace(, ) esuffix = % (loge_bounds[0], loge_bounds[1]) p = ExtensionPlotter(src, self.roi, , self.config[][], loge_bounds=loge_bounds) fig = plt.figure() p.plot(0) plt.gca().set_xlim(-2, 2) ROIPlotter.setup_projection_axis(0) annotate(src=src, loge_bounds=loge_bounds) plt.savefig(os.path.join(self.config[][], % ( prefix, name, esuffix))) plt.close(fig) fig = plt.figure() p.plot(1) plt.gca().set_xlim(-2, 2) ROIPlotter.setup_projection_axis(1) annotate(src=src, loge_bounds=loge_bounds) plt.savefig(os.path.join(self.config[][], % ( prefix, name, esuffix))) plt.close(fig) for i, c in enumerate(self.components): suffix = % i p = ExtensionPlotter(src, self.roi, suffix, self.config[][], loge_bounds=loge_bounds) fig = plt.figure() p.plot(0) ROIPlotter.setup_projection_axis(0, loge_bounds=loge_bounds) annotate(src=src, loge_bounds=loge_bounds) plt.gca().set_xlim(-2, 2) plt.savefig(os.path.join(self.config[][], % ( prefix, name, esuffix, suffix))) plt.close(fig) fig = plt.figure() p.plot(1) plt.gca().set_xlim(-2, 2) ROIPlotter.setup_projection_axis(1, loge_bounds=loge_bounds) annotate(src=src, loge_bounds=loge_bounds) plt.savefig(os.path.join(self.config[][], % ( prefix, name, esuffix, suffix))) plt.close(fig)
Utility function for generating diagnostic plots for the extension analysis.
376,124
def bz2_decompress_stream(src): dec = bz2.BZ2Decompressor() for block in src: decoded = dec.decompress(block) if decoded: yield decoded
Decompress data from `src`. Args: src (iterable): iterable that yields blocks of compressed data Yields: blocks of uncompressed data
376,125
def error(self, error): self._error = RuntimeError(error) if isinstance(error, str) else error
Defines a simulated exception error that will be raised. Arguments: error (str|Exception): error to raise. Returns: self: current Mock instance.
376,126
def intersection(self, *others): return self.copy(super(NGram, self).intersection(*others))
Return the intersection of two or more sets as a new set. >>> from ngram import NGram >>> a = NGram(['spam', 'eggs']) >>> b = NGram(['spam', 'ham']) >>> list(a.intersection(b)) ['spam']
376,127
def resolve_frompath(pkgpath, relpath, level=0): if level == 0: return relpath parts = pkgpath.split() + [] parts = parts[:-level] + (relpath.split() if relpath else []) return .join(parts)
Resolves the path of the module referred to by 'from ..x import y'.
376,128
def set_condition(self, condition = True): if condition is None: self.__condition = True else: self.__condition = condition
Sets a new condition callback for the breakpoint. @see: L{__init__} @type condition: function @param condition: (Optional) Condition callback function.
376,129
def load_cash_balances(self): from gnucash_portfolio.accounts import AccountsAggregate, AccountAggregate cfg = self.__get_config() cash_root_name = cfg.get(ConfigKeys.cash_root) gc_db = self.config.get(ConfigKeys.gnucash_book_path) with open_book(gc_db, open_if_lock=True) as book: svc = AccountsAggregate(book) root_account = svc.get_by_fullname(cash_root_name) acct_svc = AccountAggregate(book, root_account) cash_balances = acct_svc.load_cash_balances_with_children(cash_root_name) self.__store_cash_balances_per_currency(cash_balances)
Loads cash balances from GnuCash book and recalculates into the default currency
376,130
def encrypt(self, data): aes_key, hmac_key = self.keys pad = self.AES_BLOCK_SIZE - len(data) % self.AES_BLOCK_SIZE if six.PY2: data = data + pad * chr(pad) else: data = data + salt.utils.stringutils.to_bytes(pad * chr(pad)) iv_bytes = os.urandom(self.AES_BLOCK_SIZE) if HAS_M2: cypher = EVP.Cipher(alg=, key=aes_key, iv=iv_bytes, op=1, padding=False) encr = cypher.update(data) encr += cypher.final() else: cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes) encr = cypher.encrypt(data) data = iv_bytes + encr sig = hmac.new(hmac_key, data, hashlib.sha256).digest() return data + sig
encrypt data with AES-CBC and sign it with HMAC-SHA256
376,131
def this(obj, **kwargs): verbose = kwargs.get("verbose", True) if verbose: print(.format(" whatis.this? ")) for func in pipeline: s = func(obj, **kwargs) if s is not None: print(s) if verbose: print(.format(" whatis.this? "))
Prints series of debugging steps to user. Runs through pipeline of functions and print results of each.
376,132
def remove_action(self, action, sub_menu=): if sub_menu: try: mnu = self._sub_menus[sub_menu] except KeyError: pass else: mnu.removeAction(action) else: try: self._actions.remove(action) except ValueError: pass self.removeAction(action)
Removes an action/separator from the editor's context menu. :param action: Action/seprator to remove. :param advanced: True to remove the action from the advanced submenu.
376,133
def is_valid_geometry(self): has_sites = (self.sites is not None or in self.inputs or in self.inputs) if not has_sites and not self.ground_motion_fields: return True if ( in self.inputs and not has_sites and not self.inputs[].endswith()): raise ValueError() elif ( in self.calculation_mode or in self.calculation_mode or in self.calculation_mode): return True flags = dict( sites=bool(self.sites), sites_csv=self.inputs.get(, 0), hazard_curves_csv=self.inputs.get(, 0), gmfs_csv=self.inputs.get(, 0), region=bool(self.region and self.region_grid_spacing)) return sum(bool(v) for v in flags.values()) == 1 or self.inputs.get( ) or self.inputs.get()
It is possible to infer the geometry only if exactly one of sites, sites_csv, hazard_curves_csv, gmfs_csv, region is set. You did set more than one, or nothing.
376,134
def update_kwargs(self, kwargs, count, offset): kwargs.update({self.count_key: count, self.offset_key: offset}) return kwargs
Helper to support handy dictionaries merging on all Python versions.
376,135
def filters_query(filters): def _cast_val(filtr): val = filtr.val if filtr.oper in (, ): val = + filtr.val + elif filtr.oper == : val = + filtr.val elif filtr.oper == : val = filtr.val + return val def _filter(filtr): oper = FILTER_TABLE[filtr.oper] prop = .format( field=filtr.field.replace(, ), oper=filtr.oper, ) if isinstance(filtr, FilterRel): stmt = _filter_rel(filtr, oper, prop) else: stmt = .format( field=filtr.field, oper=oper, prop=prop, ) return stmt, {prop: _cast_val(filtr)} def _filter_or(filters): param = {} stmts = [] for filtr in filters: vals = _filter(filtr) param.update(vals[1]) stmts.append(vals[0]) stmt = .join(stmts) stmt = .format(stmt) return stmt, param def _filter_rel(rel, oper, prop): stmt = return stmt.format( field=rel.local_field, foreign_field=rel.foreign_field, foreign_filter=rel.foreign_filter, foreign_rtype=rel.foreign_rtype, oper=oper, prop=prop, ) param = {} stmts = [] for filtr in filters: if isinstance(filtr, FilterOr): vals = _filter_or(filtr) else: vals = _filter(filtr) param.update(vals[1]) stmts.append(vals[0]) if stmts: stmt = .join(stmts) stmt = + stmt return stmt, param
Turn the tuple of filters into SQL WHERE statements The key (column name) & operator have already been vetted so they can be trusted but the value could still be evil so it MUST be a parameterized input! That is done by creating a param dict where they key name & val look like: '{}_{}'.format(key, oper): val The key is constructed the way it is to ensure uniqueness, if we just used the key name then it could get clobbered. Ultimately the WHERE statement will look something like: age >= {age_gte} where age_gte is the key name in the param dict with a value of the evil user input. In the end, a string statement & dict param are returned as a tuple if any filters were provided otherwise None. :return: tuple (string, dict)
376,136
def get_pdf(article, debug=False): print(.format(article)) identifier = [_ for _ in article.identifier if in _] if identifier: url = .format(identifier[0][9:13], .join(_ for _ in identifier[0][14:] if _.isdigit())) else: params = { : article.bibcode, : , : } url = requests.get(, params=params).url q = requests.get(url) if not q.ok: print(.format( article, q.status_code, url)) if debug: q.raise_for_status() else: return None if q.content.endswith(): print(.format( article, url)) return None return q.content
Download an article PDF from arXiv. :param article: The ADS article to retrieve. :type article: :class:`ads.search.Article` :returns: The binary content of the requested PDF.
376,137
def identify_col_pos(txt): res = [] lines = txt.split() prev_ch = for col_pos, ch in enumerate(lines[0]): if _is_white_space(ch) is False and _is_white_space(prev_ch) is True: res.append(col_pos) prev_ch = ch res.append(col_pos) return res
assume no delimiter in this file, so guess the best fixed column widths to split by
376,138
def overlap_correlation(wnd, hop): return sum(wnd * Stream(wnd).skip(hop)) / sum(el ** 2 for el in wnd)
Overlap correlation percent for the given overlap hop in samples.
376,139
def is_binary_file(file): file_handle = open(file, "rb") try: chunk_size = 1024 while True: chunk = file_handle.read(chunk_size) if chr(0) in chunk: return True if len(chunk) < chunk_size: break finally: file_handle.close() return False
Returns if given file is a binary file. :param file: File path. :type file: unicode :return: Is file binary. :rtype: bool
376,140
def ToDatetime(self): return datetime.utcfromtimestamp( self.seconds + self.nanos / float(_NANOS_PER_SECOND))
Converts Timestamp to datetime.
376,141
def add_coordinate_condition(self, droppable_id, container_id, coordinate, match=True): if not isinstance(coordinate, BasicCoordinate): raise InvalidArgument() self.my_osid_object_form._my_map[].append( {: droppable_id, : container_id, : coordinate.get_values(), : match}) self.my_osid_object_form._my_map[].sort(key=lambda k: k[])
stub
376,142
def _set_es_workers(self, **kwargs): def make_es_worker(search_conn, es_index, es_doc_type, class_name): new_esbase = copy.copy(search_conn) new_esbase.es_index = es_index new_esbase.doc_type = es_doc_type log.info("Indexing into ES index doctype ", class_name.pyuri, es_index, es_doc_type) return new_esbase def additional_indexers(rdf_class): rtn_list = rdf_class.es_indexers() rtn_list.remove(rdf_class) return rtn_list self.es_worker = make_es_worker(self.search_conn, self.es_index, self.es_doc_type, self.rdf_class.__name__) if not kwargs.get("idx_only_base"): self.other_indexers = {item.__name__: make_es_worker( self.search_conn, item.es_defs.get()[0], item.es_defs.get()[0], item.__name__) for item in additional_indexers(self.rdf_class)} else: self.other_indexers = {}
Creates index worker instances for each class to index kwargs: ------- idx_only_base[bool]: True will only index the base class
376,143
def parse(format, string, extra_types=None, evaluate_result=True, case_sensitive=False): re looking for is instead just a part of the string use search(). If ``evaluate_result`` is True the return value will be an Result instance with two attributes: .fixed - tuple of fixed-position values from the string .named - dict of named values from the string If ``evaluate_result`` is False the return value will be a Match instance with one method: .evaluate_result() - This will return a Result instance like you would get with ``evaluate_result`` set to True The default behaviour is to match strings case insensitively. You may match with case by specifying case_sensitive=True. If the format is invalid a ValueError will be raised. See the module documentation for the use of "extra_types". In the case there is no match parse() will return None. ' p = Parser(format, extra_types=extra_types, case_sensitive=case_sensitive) return p.parse(string, evaluate_result=evaluate_result)
Using "format" attempt to pull values from "string". The format must match the string contents exactly. If the value you're looking for is instead just a part of the string use search(). If ``evaluate_result`` is True the return value will be an Result instance with two attributes: .fixed - tuple of fixed-position values from the string .named - dict of named values from the string If ``evaluate_result`` is False the return value will be a Match instance with one method: .evaluate_result() - This will return a Result instance like you would get with ``evaluate_result`` set to True The default behaviour is to match strings case insensitively. You may match with case by specifying case_sensitive=True. If the format is invalid a ValueError will be raised. See the module documentation for the use of "extra_types". In the case there is no match parse() will return None.
376,144
def past_trades(self, symbol=, limit_trades=50, timestamp=0): request = url = self.base_url + request params = { : request, : self.get_nonce(), : symbol, : limit_trades, : timestamp } return requests.post(url, headers=self.prepare(params))
Send a trade history request, return the response. Arguements: symbol -- currency symbol (default 'btcusd') limit_trades -- maximum number of trades to return (default 50) timestamp -- only return trades after this unix timestamp (default 0)
376,145
def read_file_header(fd, endian): fields = [ (, , 116), (, , 8), (, , 2), (, , 2) ] hdict = {} for name, fmt, num_bytes in fields: data = fd.read(num_bytes) hdict[name] = unpack(endian, fmt, data) hdict[] = hdict[].strip() v_major = hdict[] >> 8 v_minor = hdict[] & 0xFF hdict[] = % (v_major, v_minor) return hdict
Read mat 5 file header of the file fd. Returns a dict with header values.
376,146
def follow_cf(save, Uspan, target_cf, nup, n_tot=5.0, slsp=None): if slsp == None: slsp = Spinon(slaves=6, orbitals=3, avg_particles=n_tot, hopping=[0.5]*6, populations = np.asarray([n_tot]*6)/6) zet, lam, mu, mean_f = [], [], [], [] for co in Uspan: print(, co, , target_cf) res=root(targetpop, nup[-1],(co,target_cf,slsp, n_tot)) print(res.x) if res.x>nup[-1]: break nup.append(res.x) slsp.param[]=population_distri(nup[-1]) mean_f.append(slsp.mean_field()) zet.append(slsp.quasiparticle_weight()) lam.append(slsp.param[]) mu.append(orbital_energies(slsp.param, zet[-1])) case = save.createGroup(.format(target_cf)) varis = st.setgroup(case) st.storegroup(varis, Uspan[:len(zet)], zet, lam, mu, nup[1:],target_cf,mean_f)
Calculates the quasiparticle weight in single site spin hamiltonian under with N degenerate half-filled orbitals
376,147
def slugify(cls, s): slug = re.sub("[^0-9a-zA-Z-]", "-", s) return re.sub("-{2,}", "-", slug).strip()
Return the slug version of the string ``s``
376,148
def map_noreturn(targ, argslist): exceptions = [] n_threads = len(argslist) exc_lock = threading.Lock() done_lock = CountDownLatch(n_threads) def eb(wr, el=exc_lock, ex=exceptions, dl=done_lock): el.acquire() ex.append(sys.exc_info()) el.release() dl.countdown() def cb(wr, value, dl=done_lock): dl.countdown() for args in argslist: __PyMCThreadPool__.putRequest( WorkRequest(targ, callback=cb, exc_callback=eb, args=args, requestID=id(args))) done_lock.await_lock() if exceptions: six.reraise(*exceptions[0])
parallel_call_noreturn(targ, argslist) :Parameters: - targ : function - argslist : list of tuples Does [targ(*args) for args in argslist] using the threadpool.
376,149
def p_ctx_coords(self, p): if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[3]]
ctx_coords : multiplicative_path | ctx_coords COLON multiplicative_path
376,150
def _GetDirectory(self): if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY: return None return TARDirectory(self._file_system, self.path_spec)
Retrieves a directory. Returns: TARDirectory: a directory or None if not available.
376,151
def from_filename(self, filename): if os.path.exists(filename): with open(filename) as fp: return IntentSchema(json.load(fp, object_pairs_hook=OrderedDict)) else: print () return IntentSchema()
Build an IntentSchema from a file path creates a new intent schema if the file does not exist, throws an error if the file exists but cannot be loaded as a JSON
376,152
def wrap_socket(self, sock, server_side=False, do_handshake_on_connect=True, suppress_ragged_eofs=True, dummy=None): return ssl.wrap_socket(sock, keyfile=self._keyfile, certfile=self._certfile, server_side=server_side, cert_reqs=self._verify_mode, ssl_version=self._protocol, ca_certs=self._cafile, do_handshake_on_connect=do_handshake_on_connect, suppress_ragged_eofs=suppress_ragged_eofs)
Wrap an existing Python socket sock and return an ssl.SSLSocket object.
376,153
def UpsertUserDefinedFunction(self, collection_link, udf, options=None): if options is None: options = {} collection_id, path, udf = self._GetContainerIdWithPathForUDF(collection_link, udf) return self.Upsert(udf, path, , collection_id, None, options)
Upserts a user defined function in a collection. :param str collection_link: The link to the collection. :param str udf: :param dict options: The request options for the request. :return: The upserted UDF. :rtype: dict
376,154
def put_attachment(self, attachmentid, attachment_update): assert type(attachment_update) is DotDict if (not in attachment_update): attachment_update.ids = [attachmentid] return self._put(.format(attachmentid=attachmentid), json.dumps(attachment_update))
http://bugzilla.readthedocs.org/en/latest/api/core/v1/attachment.html#update-attachment
376,155
def get_result(self, decorated_function, *args, **kwargs): cache_entry = self.get_cache(decorated_function, *args, **kwargs) if cache_entry.has_value is False: raise WCacheStorage.CacheMissedException() return cache_entry.cached_value
Get result from storage for specified function. Will raise an exception (:class:`.WCacheStorage.CacheMissedException`) if there is no cached result. :param decorated_function: called function (original) :param args: args with which function is called :param kwargs: kwargs with which function is called :return: (any type, even None)
376,156
def get_neighbors(self, site, r, include_index=False, include_image=False): nn = self.get_sites_in_sphere(site.coords, r, include_index=include_index, include_image=include_image) return [d for d in nn if site != d[0]]
Get all neighbors to a site within a sphere of radius r. Excludes the site itself. Args: site (Site): Which is the center of the sphere. r (float): Radius of sphere. include_index (bool): Whether the non-supercell site index is included in the returned data include_image (bool): Whether to include the supercell image is included in the returned data Returns: [(site, dist) ...] since most of the time, subsequent processing requires the distance. If include_index == True, the tuple for each neighbor also includes the index of the neighbor. If include_supercell == True, the tuple for each neighbor also includes the index of supercell.
376,157
def sample_counters(mc, system_info): return { (x, y): mc.get_router_diagnostics(x, y) for (x, y) in system_info }
Sample every router counter in the machine.
376,158
def hasattrs(object, *names): for name in names: if not hasattr(object, name): return False return True
Takes in an object and a variable length amount of named attributes, and checks to see if the object has each property. If any of the attributes are missing, this returns false. :param object: an object that may or may not contain the listed attributes :param names: a variable amount of attribute names to check for :return: True if the object contains each named attribute, false otherwise
376,159
def default_vsan_policy_configured(name, policy): }) return ret
Configures the default VSAN policy on a vCenter. The state assumes there is only one default VSAN policy on a vCenter. policy Dict representation of a policy
376,160
def _add_token_span_to_document(self, span_element): for token in span_element.text.split(): token_id = self._add_token_to_document(token) if span_element.tag == : self._add_spanning_relation(.format(self.act_count), token_id) else: self._add_spanning_relation(span_element.tag, token_id) if span_element.tag == : self.act_count += 1
adds an <intro>, <act> or <conclu> token span to the document.
376,161
def file_size(self, name, force_refresh=False): uname, version = split_name(name) t = time.time() logger.debug(, name) try: if not self.remote_store or (version is not None and not force_refresh): try: if self.local_store and self.local_store.exists(name): return self.local_store.file_size(name) except Exception: if self.remote_store: logger.warning("Error getting from local store", name, exc_info=True) else: raise if self.remote_store: return self.remote_store.file_size(name) raise FiletrackerError("File not available: %s" % name) finally: logger.debug(, name, time.time() - t)
Returns the size of the file. For efficiency this operation does not use locking, so may return inconsistent data. Use it for informational purposes.
376,162
def register(self, schema): result = None uuid = schema.uuid if uuid in self._schbyuuid: result = self._schbyuuid[uuid] if result != schema: self._schbyuuid[uuid] = schema name = schema.name schemas = self._schbyname.setdefault(name, set()) schemas.add(schema) for innername, innerschema in iteritems(schema.getschemas()): if innerschema.uuid not in self._schbyuuid: register(innerschema) return result
Register input schema class. When registering a schema, all inner schemas are registered as well. :param Schema schema: schema to register. :return: old registered schema. :rtype: type
376,163
def wrap_and_format(self, width=None, include_params=False, include_return=False, excluded_params=None): if excluded_params is None: excluded_params = [] out = StringIO() if width is None: width, _height = get_terminal_size() for line in self.maindoc: if isinstance(line, Line): out.write(fill(line.contents, width=width)) out.write() elif isinstance(line, BlankLine): out.write() elif isinstance(line, ListItem): out.write(fill(line.contents, initial_indent=" %s " % line.marker[0], subsequent_indent=" ", width=width)) out.write() if include_params: included_params = set(self.param_info) - set(excluded_params) if len(included_params) > 0: out.write("\nParameters:\n") for param in included_params: info = self.param_info[param] out.write(" - %s (%s):\n" % (param, info.type_name)) out.write(fill(info.desc, initial_indent=" ", subsequent_indent=" ", width=width)) out.write() if include_return: print("Returns:") print(" " + self.return_info.type_name) return out.getvalue()
Wrap, format and print this docstring for a specific width. Args: width (int): The number of characters per line. If set to None this will be inferred from the terminal width and default to 80 if not passed or if passed as None and the terminal width cannot be determined. include_return (bool): Include the return information section in the output. include_params (bool): Include a parameter information section in the output. excluded_params (list): An optional list of parameter names to exclude. Options for excluding things are, for example, 'self' or 'cls'.
376,164
def group_callback(self, iocb): if _debug: IOGroup._debug("group_callback %r", iocb) for iocb in self.ioMembers: if not iocb.ioComplete.isSet(): if _debug: IOGroup._debug(" - waiting for child: %r", iocb) break else: if _debug: IOGroup._debug(" - all children complete") self.ioState = COMPLETED self.trigger()
Callback when a child iocb completes.
376,165
def get_emitter(self, name: str) -> Callable[[Event], Event]: return self._event_manager.get_emitter(name)
Gets and emitter for a named event. Parameters ---------- name : The name of the event he requested emitter will emit. Users may provide their own named events by requesting an emitter with this function, but should do so with caution as it makes time much more difficult to think about. Returns ------- An emitter for the named event. The emitter should be called by the requesting component at the appropriate point in the simulation lifecycle.
376,166
def pipeline_exists(url, pipeline_id, auth, verify_ssl): http://host:port/ try: pipeline_status(url, pipeline_id, auth, verify_ssl)[] return True except requests.HTTPError: return False
:param url: (str): the host url in the form 'http://host:port/'. :param pipeline_id: (string) the pipeline identifier :param auth: (tuple) a tuple of username, password :return: (boolean)
376,167
def lnlike(self, model, refactor=False, pos_tol=2.5, neg_tol=50., full_output=False): r lnl = 0 try: self._ll_info except AttributeError: refactor = True if refactor: t = np.delete(self.time, np.concatenate([self.nanmask, self.badmask])) f = np.delete(self.flux, np.concatenate([self.nanmask, self.badmask])) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) pos_inds = np.where((f > med + pos_tol * MAD))[0] pos_inds = np.array([np.argmax(self.time == t[i]) for i in pos_inds]) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) neg_inds = np.where((f < med - neg_tol * MAD))[0] neg_inds = np.array([np.argmax(self.time == t[i]) for i in neg_inds]) outmask = np.array(self.outmask) transitmask = np.array(self.transitmask) self.outmask = np.concatenate([neg_inds, pos_inds]) self.transitmask = np.array([], dtype=int) self._ll_info = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): m = self.get_masked_chunk(b, pad=False) K = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) A = np.zeros((len(m), len(m))) for n in range(self.pld_order): XM = self.X(n, m) A += self.lam[b][n] * np.dot(XM, XM.T) K += A self._ll_info[b] = [cho_factor(K), m] self.outmask = outmask self.transitmask = transitmask amp = [None for b in self.breakpoints] var = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): CDK = self._ll_info[b][0] m = self._ll_info[b][1] var[b] = 1. / np.dot(model[m], cho_solve(CDK, model[m])) amp[b] = var[b] * np.dot(model[m], cho_solve(CDK, self.fraw[m])) r = self.fraw[m] - amp[b] * model[m] lnl += -0.5 * np.dot(r, cho_solve(CDK, r)) if full_output: vari = var[0] ampi = amp[0] for v, a in zip(var[1:], amp[1:]): ampi = (ampi * v + a * vari) / (vari + v) vari = vari * v / (vari + v) med = np.nanmedian(self.fraw) return lnl, ampi / med, vari / med ** 2 else: return lnl
r""" Return the likelihood of the astrophysical model `model`. Returns the likelihood of `model` marginalized over the PLD model. :param ndarray model: A vector of the same shape as `self.time` \ corresponding to the astrophysical model. :param bool refactor: Re-compute the Cholesky decomposition? This \ typically does not need to be done, except when the PLD \ model changes. Default :py:obj:`False`. :param float pos_tol: the positive (i.e., above the median) \ outlier tolerance in standard deviations. :param float neg_tol: the negative (i.e., below the median) \ outlier tolerance in standard deviations. :param bool full_output: If :py:obj:`True`, returns the maximum \ likelihood model amplitude and the variance on the amplitude \ in addition to the log-likelihood. In the case of a transit \ model, these are the transit depth and depth variance. Default \ :py:obj:`False`.
376,168
def print_prefixed_lines(lines: List[Tuple[str, Optional[str]]]) -> str: existing_lines = [line for line in lines if line[1] is not None] pad_len = reduce(lambda pad, line: max(pad, len(line[0])), existing_lines, 0) return "\n".join( map( lambda line: line[0].rjust(pad_len) + line[1], existing_lines ) )
Print lines specified like this: ["prefix", "string"]
376,169
def get_page_content(self, page_id, page_info=0): try: return(self.process.GetPageContent(page_id, "", page_info)) except Exception as e: print(e) print("Could not get Page Content")
PageInfo 0 - Returns only basic page content, without selection markup and binary data objects. This is the standard value to pass. 1 - Returns page content with no selection markup, but with all binary data. 2 - Returns page content with selection markup, but no binary data. 3 - Returns page content with selection markup and all binary data.
376,170
def locally_cache_remote_file(href, dir): scheme, host, remote_path, params, query, fragment = urlparse(href) assert scheme in (,), % (scheme,href) head, ext = posixpath.splitext(posixpath.basename(remote_path)) head = sub(r, , head) hash = md5(href).hexdigest()[:8] local_path = % locals() headers = {} if posixpath.exists(local_path): msg( % local_path ) t = localtime(os.stat(local_path).st_mtime) headers[] = strftime(, t) if scheme == : conn = HTTPSConnection(host, timeout=5) else: conn = HTTPConnection(host, timeout=5) if query: remote_path += % query conn.request(, remote_path, headers=headers) resp = conn.getresponse() if resp.status in range(200, 210): f = open(un_posix(local_path), ) msg( % remote_path) f.write(resp.read()) f.close() elif resp.status in (301, 302, 303) and resp.getheader(, False): redirected_href = urljoin(href, resp.getheader()) redirected_path = locally_cache_remote_file(redirected_href, dir) os.rename(redirected_path, local_path) elif resp.status == 304: pass else: raise Exception("Failed to get remote resource %s: %s" % (href, resp.status)) return local_path
Locally cache a remote resource using a predictable file name and awareness of modification date. Assume that files are "normal" which is to say they have filenames with extensions.
376,171
def seek(self, offset, whence=SEEK_SET): if whence == SEEK_SET: self.__sf.seek(offset) elif whence == SEEK_CUR: self.__sf.seek(self.tell() + offset) elif whence == SEEK_END: self.__sf.seek(self.__sf.filesize - offset)
Reposition the file pointer.
376,172
def get_title(self, properly_capitalized=False): if properly_capitalized: self.title = _extract( self._request(self.ws_prefix + ".getInfo", True), "name" ) return self.title
Returns the artist or track title.
376,173
def ok_rev_reg_id(token: str, issuer_did: str = None) -> bool: rr_id_m = re.match( .format(B58), token or ) return bool(rr_id_m) and ((not issuer_did) or (rr_id_m.group(1) == issuer_did and rr_id_m.group(2) == issuer_did))
Whether input token looks like a valid revocation registry identifier from input issuer DID (default any); i.e., <issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag>:CL_ACCUM:<rev-reg-id-tag> for protocol >= 1.4, or <issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:CL_ACCUM:<rev-reg-id-tag> for protocol == 1.3. :param token: candidate string :param issuer_did: issuer DID to match, if specified :return: whether input token looks like a valid revocation registry identifier
376,174
def get_commit_bzs(self, from_revision, to_revision=None): rng = self.rev_range(from_revision, to_revision) GIT_COMMIT_FIELDS = [, , ] GIT_LOG_FORMAT = [, , ] GIT_LOG_FORMAT = .join(GIT_LOG_FORMAT) + log_out = self(, % GIT_LOG_FORMAT, rng, log_cmd=False, fatal=False) if not log_out: return [] log = log_out.strip().split("\x1e") log = [row.strip().split("\x1f") for row in log] log = [dict(zip(GIT_COMMIT_FIELDS, row)) for row in log] result = [] for commit in log: bzs = search_bug_references(commit[]) bzs.extend(search_bug_references(commit[])) result.append((commit[], commit[], bzs)) return result
Return a list of tuples, one per commit. Each tuple is (sha1, subject, bz_list). bz_list is a (possibly zero-length) list of numbers.
376,175
def make_category(self, string, parent=None, order=1): cat = Category( name=string.strip(), slug=slugify(SLUG_TRANSLITERATOR(string.strip()))[:49], order=order ) cat._tree_manager.insert_node(cat, parent, , True) cat.save() if parent: parent.rght = cat.rght + 1 parent.save() return cat
Make and save a category object from a string
376,176
def createTemplate(data): conn = Qubole.agent() return conn.post(Template.rest_entity_path, data)
Create a new template. Args: `data`: json data required for creating a template Returns: Dictionary containing the details of the template with its ID.
376,177
def convex_hull(self): points = util.vstack_empty([m.vertices for m in self.dump()]) hull = convex.convex_hull(points) return hull
The convex hull of the whole scene Returns --------- hull: Trimesh object, convex hull of all meshes in scene
376,178
def _compile_fragment_ast(schema, current_schema_type, ast, location, context): query_metadata_table = context[] is_base_type_of_union = ( isinstance(current_schema_type, GraphQLUnionType) and current_schema_type.is_same_type(equivalent_union_type) ) if not (is_same_type_as_scope or is_base_type_of_union): query_metadata_table.record_coercion_at_location(location, coerces_to_type_obj) basic_blocks.append(blocks.CoerceType({coerces_to_type_name})) inner_basic_blocks = _compile_ast_node_to_ir( schema, coerces_to_type_obj, ast, location, context) basic_blocks.extend(inner_basic_blocks) return basic_blocks
Return a list of basic blocks corresponding to the inline fragment at this AST node. Args: schema: GraphQL schema object, obtained from the graphql library current_schema_type: GraphQLType, the schema type at the current location ast: GraphQL AST node, obtained from the graphql library. location: Location object representing the current location in the query context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! Returns: list of basic blocks, the compiled output of the vertex AST node
376,179
def save_yaml_model(model, filename, sort=False, **kwargs): obj = model_to_dict(model, sort=sort) obj["version"] = YAML_SPEC if isinstance(filename, string_types): with io.open(filename, "w") as file_handle: yaml.dump(obj, file_handle, **kwargs) else: yaml.dump(obj, filename, **kwargs)
Write the cobra model to a file in YAML format. ``kwargs`` are passed on to ``yaml.dump``. Parameters ---------- model : cobra.Model The cobra model to represent. filename : str or file-like File path or descriptor that the YAML representation should be written to. sort : bool, optional Whether to sort the metabolites, reactions, and genes or maintain the order defined in the model. See Also -------- to_yaml : Return a string representation. ruamel.yaml.dump : Base function.
376,180
def multiply(self, matrix): if not isinstance(matrix, DenseMatrix): raise ValueError("Only multiplication with DenseMatrix " "is supported.") j_model = self._java_matrix_wrapper.call("multiply", matrix) return RowMatrix(j_model)
Multiply this matrix by a local dense matrix on the right. :param matrix: a local dense matrix whose number of rows must match the number of columns of this matrix :returns: :py:class:`RowMatrix` >>> rm = RowMatrix(sc.parallelize([[0, 1], [2, 3]])) >>> rm.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect() [DenseVector([2.0, 3.0]), DenseVector([6.0, 11.0])]
376,181
def verify(path): valid = False try: zf = zipfile.ZipFile(path) except (zipfile.BadZipfile, IsADirectoryError): pass else: names = sorted(zf.namelist()) names = [nn for nn in names if nn.endswith(".tif")] names = [nn for nn in names if nn.startswith("SID PHA")] for name in names: with zf.open(name) as pt: fd = io.BytesIO(pt.read()) if SingleTifPhasics.verify(fd): valid = True break zf.close() return valid
Verify that `path` is a zip file with Phasics TIFF files
376,182
def get_experiment_kind(root): properties = {} if root.find().text == : properties[] = else: raise NotImplementedError(root.find().text + ) properties[] = {: , : , : } kind = getattr(root.find(), , False) if not kind: raise MissingElementError() elif kind in [, ]: properties[][] = kind else: raise NotImplementedError(kind + ) return properties
Read common properties from root of ReSpecTh XML file. Args: root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file Returns: properties (`dict`): Dictionary with experiment type and apparatus information.
376,183
def cases(self, env, data): for handler in self.handlers: env._push() data._push() try: result = handler(env, data) finally: env._pop() data._pop() if result is not None: return result
Calls each nested handler until one of them returns nonzero result. If any handler returns `None`, it is interpreted as "request does not match, the handler has nothing to do with it and `web.cases` should try to call the next handler".
376,184
def recursive_apply(inval, func): if isinstance(inval, dict): return {k: recursive_apply(v, func) for k, v in inval.items()} elif isinstance(inval, list): return [recursive_apply(v, func) for v in inval] else: return func(inval)
Recursively apply a function to all levels of nested iterables :param inval: the object to run the function on :param func: the function that will be run on the inval
376,185
def filter_bolts(table, header): bolts_info = [] for row in table: if row[0] == : bolts_info.append(row) return bolts_info, header
filter to keep bolts
376,186
def loadUi(self, filename, baseinstance=None): try: xui = ElementTree.parse(filename) except xml.parsers.expat.ExpatError: log.exception( % filename) return None loader = UiLoader(baseinstance) xcustomwidgets = xui.find() if xcustomwidgets is not None: for xcustom in xcustomwidgets: header = xcustom.find().text clsname = xcustom.find().text if not header: continue if clsname in loader.dynamicWidgets: continue if in header: header = + .join(header.split()[:-1]) try: __import__(header) module = sys.modules[header] cls = getattr(module, clsname) except (ImportError, KeyError, AttributeError): log.error( % (header, clsname)) continue loader.dynamicWidgets[clsname] = cls loader.registerCustomWidget(cls) ui = loader.load(filename) QtCore.QMetaObject.connectSlotsByName(ui) return ui
Generate a loader to load the filename. :param filename | <str> baseinstance | <QWidget> :return <QWidget> || None
376,187
def add_subtree(cls, for_node, node, options): if cls.is_loop_safe(for_node, node): options.append( (node.pk, mark_safe(cls.mk_indent(node.get_depth()) + escape(node)))) for subnode in node.get_children(): cls.add_subtree(for_node, subnode, options)
Recursively build options tree.
376,188
def _shuffle(y, labels, random_state): if labels is None: ind = random_state.permutation(len(y)) else: ind = np.arange(len(labels)) for label in np.unique(labels): this_mask = (labels == label) ind[this_mask] = random_state.permutation(ind[this_mask]) return y[ind]
Return a shuffled copy of y eventually shuffle among same labels.
376,189
def validate_pair(ob: Any) -> bool: try: if len(ob) != 2: log.warning("Unexpected result: {!r}", ob) raise ValueError() except ValueError: return False return True
Does the object have length 2?
376,190
def reload(self): if not self.id: return reloaded_object = self.__class__.find(self.id) self.set_raw( reloaded_object.raw, reloaded_object.etag )
Re-fetches the object from the API, discarding any local changes. Returns without doing anything if the object is new.
376,191
def get_image(self, image, output=): if isinstance(image, string_types): image = nb.load(image) if type(image).__module__.startswith(): if output == : return image image = image.get_data() if not type(image).__module__.startswith(): raise ValueError("Input image must be a string, a NiBabel image, " "or a numpy array.") if image.shape[:3] == self.volume.shape: if output == : return nb.nifti1.Nifti1Image(image, None, self.get_header()) elif output == : return image else: image = image.ravel() if output == : return image.ravel() image = np.reshape(image, self.volume.shape) if output == : return image return nb.nifti1.Nifti1Image(image, None, self.get_header())
A flexible method for transforming between different representations of image data. Args: image: The input image. Can be a string (filename of image), NiBabel image, N-dimensional array (must have same shape as self.volume), or vectorized image data (must have same length as current conjunction mask). output: The format of the returned image representation. Must be one of: 'vector': A 1D vectorized array 'array': An N-dimensional array, with shape = self.volume.shape 'image': A NiBabel image Returns: An object containing image data; see output options above.
376,192
def rate_limited(max_per_hour: int, *args: Any) -> Callable[..., Any]: return util.rate_limited(max_per_hour, *args)
Rate limit a function.
376,193
def patcher(args): from jcvi.formats.bed import uniq p = OptionParser(patcher.__doc__) p.add_option("--backbone", default="OM", help="Prefix of the backbone assembly [default: %default]") p.add_option("--object", default="object", help="New object name [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) backbonebed, otherbed = args backbonebed = uniq([backbonebed]) otherbed = uniq([otherbed]) pf = backbonebed.split(".")[0] key = lambda x: (x.seqid, x.start, x.end) cmd = "intersectBed -v -wa" cmd += " -a {0} -b {1}".format(otherbed, backbonebed) outfile = otherbed.rsplit(".", 1)[0] + ".not." + backbonebed sh(cmd, outfile=outfile) uniqbed = Bed() uniqbedfile = pf + ".merged.bed" uniqbed.extend(Bed(backbonebed)) uniqbed.extend(Bed(outfile)) uniqbed.print_to_file(uniqbedfile, sorted=True) bed = uniqbed key = lambda x: range_parse(x.accn).seqid bed_fn = pf + ".patchers.bed" bed_fw = open(bed_fn, "w") for k, sb in groupby(bed, key=key): sb = list(sb) chr, start, end, strand = merge_ranges(sb) print("\t".join(str(x) for x in \ (chr, start, end, opts.object, 1000, strand)), file=bed_fw) bed_fw.close()
%prog patcher backbone.bed other.bed Given optical map alignment, prepare the patchers. Use --backbone to suggest which assembly is the major one, and the patchers will be extracted from another assembly.
376,194
def RetryUpload(self, job, job_id, error): if self.IsErrorRetryable(error): retry_count = 0 sleep_interval = config.CONFIG["BigQuery.retry_interval"] while retry_count < config.CONFIG["BigQuery.retry_max_attempts"]: time.sleep(sleep_interval.seconds) logging.info("Retrying job_id: %s", job_id) retry_count += 1 try: response = job.execute() return response except errors.HttpError as e: if self.IsErrorRetryable(e): sleep_interval *= config.CONFIG["BigQuery.retry_multiplier"] logging.exception("Error with job: %s, will retry in %s", job_id, sleep_interval) else: raise BigQueryJobUploadError( "Cant retry error code %s. Giving up on " "job: %s." % (error.resp.status, job_id)) raise BigQueryJobUploadError( "Giving up on job:%s after %s retries." % (job_id, retry_count))
Retry the BigQuery upload job. Using the same job id protects us from duplicating data on the server. If we fail all of our retries we raise. Args: job: BigQuery job object job_id: ID string for this upload job error: errors.HttpError object from the first error Returns: API response object on success, None on failure Raises: BigQueryJobUploadError: if we can't get the bigquery job started after retry_max_attempts
376,195
def list_files(start_path): s = u for root, dirs, files in os.walk(start_path): level = root.replace(start_path, ).count(os.sep) indent = * 4 * level s += u.format(indent, os.path.basename(root)) sub_indent = * 4 * (level + 1) for f in files: s += u.format(sub_indent, f) return s
tree unix command replacement.
376,196
def translate(args): transl_tables = [str(x) for x in xrange(1,25)] p = OptionParser(translate.__doc__) p.add_option("--ids", default=False, action="store_true", help="Create .ids file with the complete/partial/gaps " "label [default: %default]") p.add_option("--longest", default=False, action="store_true", help="Find the longest ORF from each input CDS [default: %default]") p.add_option("--table", default=1, choices=transl_tables, help="Specify translation table to use [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) cdsfasta, = args if opts.longest: cdsfasta = longestorf([cdsfasta]) f = Fasta(cdsfasta, lazy=True) outfile = opts.outfile fw = must_open(outfile, "w") if opts.ids: idsfile = cdsfasta.rsplit(".", 1)[0] + ".ids" ids = open(idsfile, "w") else: ids = None five_prime_missing = three_prime_missing = 0 contain_ns = complete = cannot_translate = total = 0 for name, rec in f.iteritems_ordered(): cds = rec.seq cdslen = len(cds) peplen = cdslen / 3 total += 1 pep = "" for i in xrange(3): newcds = cds[i: i + peplen * 3] newpep = newcds.translate(table=opts.table) if len(newpep.split("*")[0]) > len(pep.split("*")[0]): pep = newpep labels = [] if "*" in pep.rstrip("*"): logging.error("{0} cannot translate".format(name)) cannot_translate += 1 labels.append("cannot_translate") contains_start = pep.startswith("M") contains_stop = pep.endswith("*") contains_ns = "X" in pep start_ns = pep.startswith("X") end_ns = pep.endswith("X") if not contains_start: five_prime_missing += 1 labels.append("five_prime_missing") if not contains_stop: three_prime_missing += 1 labels.append("three_prime_missing") if contains_ns: contain_ns += 1 labels.append("contain_ns") if contains_start and contains_stop: complete += 1 labels.append("complete") if start_ns: labels.append("start_ns") if end_ns: labels.append("end_ns") if ids: print("\t".join((name, ",".join(labels))), file=ids) peprec = SeqRecord(pep, id=name, description=rec.description) SeqIO.write([peprec], fw, "fasta") fw.flush() print("Complete gene models: {0}".\ format(percentage(complete, total)), file=sys.stderr) print("Missing 5`-end: {0}".\ format(percentage(five_prime_missing, total)), file=sys.stderr) print("Missing 3`-end: {0}".\ format(percentage(three_prime_missing, total)), file=sys.stderr) print("Contain Ns: {0}".\ format(percentage(contain_ns, total)), file=sys.stderr) if cannot_translate: print("Cannot translate: {0}".\ format(percentage(cannot_translate, total)), file=sys.stderr) fw.close() return cdsfasta, outfile
%prog translate cdsfasta Translate CDS to proteins. The tricky thing is that sometimes the CDS represents a partial gene, therefore disrupting the frame of the protein. Check all three frames to get a valid translation.
376,197
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, numTrees, featureSubsetStrategy="auto", impurity="gini", maxDepth=4, maxBins=32, seed=None): return cls._train(data, "classification", numClasses, categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
Train a random forest model for binary or multiclass classification. :param data: Training dataset: RDD of LabeledPoint. Labels should take values {0, 1, ..., numClasses-1}. :param numClasses: Number of classes for classification. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param numTrees: Number of trees in the random forest. :param featureSubsetStrategy: Number of features to consider for splits at each node. Supported values: "auto", "all", "sqrt", "log2", "onethird". If "auto" is set, this parameter is set based on numTrees: if numTrees == 1, set to "all"; if numTrees > 1 (forest) set to "sqrt". (default: "auto") :param impurity: Criterion used for information gain calculation. Supported values: "gini" or "entropy". (default: "gini") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 4) :param maxBins: Maximum number of bins used for splitting features. (default: 32) :param seed: Random seed for bootstrapping and choosing feature subsets. Set as None to generate seed based on system time. (default: None) :return: RandomForestModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import RandomForest >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(0.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42) >>> model.numTrees() 3 >>> model.totalNumNodes() 7 >>> print(model) TreeEnsembleModel classifier with 3 trees <BLANKLINE> >>> print(model.toDebugString()) TreeEnsembleModel classifier with 3 trees <BLANKLINE> Tree 0: Predict: 1.0 Tree 1: If (feature 0 <= 1.5) Predict: 0.0 Else (feature 0 > 1.5) Predict: 1.0 Tree 2: If (feature 0 <= 1.5) Predict: 0.0 Else (feature 0 > 1.5) Predict: 1.0 <BLANKLINE> >>> model.predict([2.0]) 1.0 >>> model.predict([0.0]) 0.0 >>> rdd = sc.parallelize([[3.0], [1.0]]) >>> model.predict(rdd).collect() [1.0, 0.0]
376,198
def parents(self): assert self.parent is not self if self.parent is None: return [] return [self.parent] + self.parent.parents()
return the ancestor nodes
376,199
def read_remote(self): coded_line = self.inout.read_msg() if isinstance(coded_line, bytes): coded_line = coded_line.decode("utf-8") control = coded_line[0] remote_line = coded_line[1:] return (control, remote_line)
Send a message back to the server (in contrast to the local user output channel).