Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
17,600
def configure_firewall(self, FirewallRules): firewall_rule_bodies = [ FirewallRule.to_dict() for FirewallRule in FirewallRules ] return self.cloud_manager.configure_firewall(self, firewall_rule_bodies)
Helper function for automatically adding several FirewallRules in series.
17,601
def draw_polygon( self, *pts, close_path=True, stroke=None, stroke_width=1, stroke_dash=None, fill=None ) -> None: c = self.c c.saveState() if stroke is not None: c.setStrokeColorRGB(*stroke) c.setLineWidth(stroke_width) c.setDash(stroke_dash) if fill is not None: c.setFillColorRGB(*fill) p = c.beginPath() fn = p.moveTo for x,y in zip(*[iter(pts)]*2): fn(x, y) fn = p.lineTo if close_path: p.close() c.drawPath(p, stroke=(stroke is not None), fill=(fill is not None)) c.restoreState()
Draws the given polygon.
17,602
def search_bm25(cls, term, weights=None, with_score=False, score_alias=, explicit_ordering=False): if not weights: rank = SQL() elif isinstance(weights, dict): weight_args = [] for field in cls._meta.sorted_fields: if isinstance(field, SearchField) and not field.unindexed: weight_args.append( weights.get(field, weights.get(field.name, 1.0))) rank = fn.bm25(cls._meta.entity, *weight_args) else: rank = fn.bm25(cls._meta.entity, *weights) selection = () order_by = rank if with_score: selection = (cls, rank.alias(score_alias)) if with_score and not explicit_ordering: order_by = SQL(score_alias) return (cls .select(*selection) .where(cls.match(FTS5Model.clean_query(term))) .order_by(order_by))
Full-text search using selected `term`.
17,603
def _GetPluginData(self): return_dict = {} return_dict[] = [ (, plaso.__version__), (, sys.version)] hashers_information = hashers_manager.HashersManager.GetHashersInformation() parsers_information = parsers_manager.ParsersManager.GetParsersInformation() plugins_information = ( parsers_manager.ParsersManager.GetParserPluginsInformation()) presets_information = parsers_manager.ParsersManager.GetPresetsInformation() return_dict[] = hashers_information return_dict[] = parsers_information return_dict[] = plugins_information return_dict[] = presets_information return return_dict
Retrieves the version and various plugin information. Returns: dict[str, list[str]]: available parsers and plugins.
17,604
def load_collection_from_stream(resource, stream, content_type): coll = create_staging_collection(resource) load_into_collection_from_stream(coll, stream, content_type) return coll
Creates a new collection for the registered resource and calls `load_into_collection_from_stream` with it.
17,605
def find_token(self, start_token, tok_type, tok_str=None, reverse=False): t = start_token advance = self.prev_token if reverse else self.next_token while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type): t = advance(t, include_extra=True) return t
Looks for the first token, starting at start_token, that matches tok_type and, if given, the token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you can check it with `token.ISEOF(t.type)`.
17,606
def restart(self): for page in self._pages.values(): page.hide() pageId = self.startId() try: first_page = self._pages[pageId] except KeyError: return self._currentId = pageId self._navigation = [pageId] page_size = self.pageSize() x = (self.width() - page_size.width()) / 2 y = (self.height() - page_size.height()) / 2 first_page.move(self.width()+first_page.width(), y) first_page.show() anim_out = QtCore.QPropertyAnimation(self) anim_out.setTargetObject(first_page) anim_out.setPropertyName() anim_out.setStartValue(first_page.pos()) anim_out.setEndValue(QtCore.QPoint(x, y)) anim_out.setDuration(self.animationSpeed()) anim_out.setEasingCurve(QtCore.QEasingCurve.Linear) anim_out.finished.connect(anim_out.deleteLater) self._buttons[self.WizardButton.BackButton].setVisible(False) self._buttons[self.WizardButton.NextButton].setVisible(self.canGoForward()) self._buttons[self.WizardButton.CommitButton].setVisible(first_page.isCommitPage()) self._buttons[self.WizardButton.FinishButton].setVisible(first_page.isFinalPage()) self.adjustSize() first_page.initializePage() self.currentIdChanged.emit(pageId) anim_out.start()
Restarts the whole wizard from the beginning.
17,607
def bundle(self, bundle_id, channel=None): s id. @param channel Optional channel name. ' return self.entity(bundle_id, get_files=True, channel=channel)
Get the default data for a bundle. @param bundle_id The bundle's id. @param channel Optional channel name.
17,608
def put_archive(request, pid): d1_gmn.app.views.assert_db.is_not_replica(pid) d1_gmn.app.views.assert_db.is_not_archived(pid) d1_gmn.app.sysmeta.archive_sciobj(pid) return pid
MNStorage.archive(session, did) → Identifier.
17,609
def getreferingobjs(referedobj, iddgroups=None, fields=None): referringobjs = [] idf = referedobj.theidf referedidd = referedobj.getfieldidd("Name") try: references = referedidd[] except KeyError as e: return referringobjs idfobjs = idf.idfobjects.values() idfobjs = list(itertools.chain.from_iterable(idfobjs)) if iddgroups: idfobjs = [anobj for anobj in idfobjs if anobj.getfieldidd()[] in iddgroups] for anobj in idfobjs: if not fields: thefields = anobj.objls else: thefields = fields for field in thefields: try: itsidd = anobj.getfieldidd(field) except ValueError as e: continue if in itsidd: refname = itsidd[][0] if refname in references: if referedobj.isequal(, anobj[field]): referringobjs.append(anobj) return referringobjs
Get a list of objects that refer to this object
17,610
def keyPressEvent(self, event): key = event.key() if key in [Qt.Key_Enter, Qt.Key_Return]: self.show_editor() elif key in [Qt.Key_Tab]: self.finder.setFocus() elif key in [Qt.Key_Backtab]: self.parent().reset_btn.setFocus() elif key in [Qt.Key_Up, Qt.Key_Down, Qt.Key_Left, Qt.Key_Right]: super(ShortcutsTable, self).keyPressEvent(event) elif key not in [Qt.Key_Escape, Qt.Key_Space]: text = event.text() if text: if re.search(VALID_FINDER_CHARS, text) is not None: self.finder.setFocus() self.finder.set_text(text) elif key in [Qt.Key_Escape]: self.finder.keyPressEvent(event)
Qt Override.
17,611
def rpc_get_docstring(self, filename, source, offset): return self._call_backend("rpc_get_docstring", None, filename, get_source(source), offset)
Get the docstring for the symbol at the offset.
17,612
def get_any_nt_unit_rule(g): for rule in g.rules: if len(rule.rhs) == 1 and isinstance(rule.rhs[0], NT): return rule return None
Returns a non-terminal unit rule from 'g', or None if there is none.
17,613
def gaussian(x, y, xsigma, ysigma): if xsigma==0.0 or ysigma==0.0: return x*0.0 with float_error_ignore(): x_w = np.divide(x,xsigma) y_h = np.divide(y,ysigma) return np.exp(-0.5*x_w*x_w + -0.5*y_h*y_h)
Two-dimensional oriented Gaussian pattern (i.e., 2D version of a bell curve, like a normal distribution but not necessarily summing to 1.0).
17,614
def symmetrise(matrix, tri=): if tri == : tri_fn = np.triu_indices else: tri_fn = np.tril_indices size = matrix.shape[0] matrix[tri_fn(size)[::-1]] = matrix[tri_fn(size)] return matrix
Will copy the selected (upper or lower) triangle of a square matrix to the opposite side, so that the matrix is symmetrical. Alters in place.
17,615
def _reset(self, server, **kwargs): if server: Server._handle_server_subobjs(server, kwargs.get()) for key in server: object.__setattr__(self, key, server[key]) for key in kwargs: object.__setattr__(self, key, kwargs[key])
Reset the server object with new values given as params. - server: a dict representing the server. e.g the API response. - kwargs: any meta fields such as cloud_manager and populated. Note: storage_devices and ip_addresses may be given in server as dicts or in kwargs as lists containing Storage and IPAddress objects.
17,616
def method_name(func): @wraps(func) def _method_name(*args, **kwargs): name = to_pascal_case(func.__name__) return func(name=name, *args, **kwargs) return _method_name
Method wrapper that adds the name of the method being called to its arguments list in Pascal case
17,617
def start(self) -> None: self.stop() self._thread = receiverThread(socket=self.sock, callbacks=self._callbacks) self._thread.start()
Starts a new thread that handles the input. If a thread is already running, the thread will be restarted.
17,618
def zip_ll(data, means, M): genes, cells = data.shape clusters = means.shape[1] ll = np.zeros((cells, clusters)) d0 = (data==0) d1 = (data>0) for i in range(clusters): means_i = np.tile(means[:,i], (cells, 1)) means_i = means_i.transpose() L_i = np.tile(M[:,i], (cells, 1)) L_i = L_i.transpose() ll_0 = np.log(L_i + (1 - L_i)*np.exp(-means_i)) ll_0 = np.where((L_i==0) & (means_i==0), -means_i, ll_0) ll_1 = np.log(1 - L_i) + xlogy(data, means_i) - means_i ll_0 = np.where(d0, ll_0, 0.0) ll_1 = np.where(d1, ll_1, 0.0) ll[:,i] = np.sum(ll_0 + ll_1, 0) return ll
Calculates the zero-inflated Poisson log-likelihood. Args: data (array): genes x cells means (array): genes x k M (array): genes x k - this is the zero-inflation parameter. Returns: cells x k array of log-likelihood for each cell/cluster pair.
17,619
def accuracy_study(tdm=None, u=None, s=None, vt=None, verbosity=0, **kwargs): smat = np.zeros((len(u), len(vt))) np.fill_diagonal(smat, s) smat = pd.DataFrame(smat, columns=vt.index, index=u.index) if verbosity: print() print() print(smat.round(2)) print() print() print(np.diag(smat.round(2))) tdm_prime = u.values.dot(smat.values).dot(vt.values) if verbosity: print() print() print(tdm_prime.round(2)) err = [np.sqrt(((tdm_prime - tdm).values.flatten() ** 2).sum() / np.product(tdm.shape))] if verbosity: print() print() print(err[-1]) smat2 = smat.copy() for numdim in range(len(s) - 1, 0, -1): smat2.iloc[numdim, numdim] = 0 if verbosity: print(.format(numdim)) print(np.diag(smat2.round(2))) tdm_prime2 = u.values.dot(smat2.values).dot(vt.values) err += [np.sqrt(((tdm_prime2 - tdm).values.flatten() ** 2).sum() / np.product(tdm.shape))] if verbosity: print(.format(numdim)) print(err[-1]) return err
Reconstruct the term-document matrix and measure error as SVD terms are truncated
17,620
def run(self): for fn in glob_all(self.args.random_data_folder, ): if fn in self.trained_fns: print( + fn + ) continue print( + fn + ) self.train_on_audio(fn) print() self.trained_fns.append(fn) save_trained_fns(self.trained_fns, self.args.model)
Begin reading through audio files, saving false activations and retraining when necessary
17,621
def get_router_for_floatingip(self, context, internal_port, internal_subnet, external_network_id): gw_port = orm.aliased(models_v2.Port, name="gw_port") routerport_qry = context.session.query( RouterPort.router_id, models_v2.IPAllocation.ip_address).join( models_v2.Port, models_v2.IPAllocation).filter( models_v2.Port.network_id == internal_port[], RouterPort.port_type.in_(bc.constants.ROUTER_INTERFACE_OWNERS), models_v2.IPAllocation.subnet_id == internal_subnet[] ).join(gw_port, gw_port.device_id == RouterPort.router_id).filter( gw_port.network_id == external_network_id, gw_port.device_owner == bc.constants.DEVICE_OWNER_ROUTER_GW ).distinct() routerport_qry = routerport_qry.outerjoin( RouterRedundancyBinding, RouterRedundancyBinding.redundancy_router_id == RouterPort.router_id) routerport_qry = routerport_qry.filter( RouterRedundancyBinding.redundancy_router_id == expr.null()) first_router_id = None for router_id, interface_ip in routerport_qry: if interface_ip == internal_subnet[]: return router_id if not first_router_id: first_router_id = router_id if first_router_id: return first_router_id raise l3_exceptions.ExternalGatewayForFloatingIPNotFound( subnet_id=internal_subnet[], external_network_id=external_network_id, port_id=internal_port[])
We need to over-load this function so that we only return the user visible router and never its redundancy routers (as they never have floatingips associated with them).
17,622
def holdAcknowledge(): a = TpPd(pd=0x3) b = MessageType(mesType=0x19) packet = a / b return packet
HOLD ACKNOWLEDGE Section 9.3.11
17,623
def _set_cdn_access(self, container, public, ttl=None): headers = {"X-Cdn-Enabled": "%s" % public} if public and ttl: headers["X-Ttl"] = ttl self.api.cdn_request("/%s" % utils.get_name(container), method="PUT", headers=headers)
Enables or disables CDN access for the specified container, and optionally sets the TTL for the container when enabling access.
17,624
def get_scoped_variable_m(self, data_port_id): for scoped_variable_m in self.scoped_variables: if scoped_variable_m.scoped_variable.data_port_id == data_port_id: return scoped_variable_m return None
Returns the scoped variable model for the given data port id :param data_port_id: The data port id to search for :return: The model of the scoped variable with the given id
17,625
def _check_curtailment_target(curtailment, curtailment_target, curtailment_key): if not (abs(curtailment.sum(axis=1) - curtailment_target) < 1e-1).all(): message = .format(curtailment_key) logging.error(message) raise TypeError(message)
Raises an error if curtailment target was not met in any time step. Parameters ----------- curtailment : :pandas:`pandas:DataFrame<dataframe>` Dataframe containing the curtailment in kW per generator and time step. Index is a :pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are the generator representatives. curtailment_target : :pandas:`pandas.Series<series>` The curtailment in kW that was to be distributed amongst the generators. Index of the series is a :pandas:`pandas.DatetimeIndex<datetimeindex>`. curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str` The technology and weather cell ID if :obj:`tuple` or only the technology if :obj:`str` the curtailment was specified for.
17,626
def composite( background_image, foreground_image, foreground_width_ratio=0.25, foreground_position=(0.0, 0.0), ): if foreground_width_ratio <= 0: return background_image composite = background_image.copy() width = int(foreground_width_ratio * background_image.shape[1]) foreground_resized = resize(foreground_image, width) size = foreground_resized.shape x = int(foreground_position[1] * (background_image.shape[1] - size[1])) y = int(foreground_position[0] * (background_image.shape[0] - size[0])) composite[y : y + size[0], x : x + size[1]] = foreground_resized return composite
Takes two images and composites them.
17,627
def minion_pub(self, clear_load): if not self.__verify_minion_publish(clear_load): return {} else: return self.masterapi.minion_pub(clear_load)
Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: .. code-block:: bash peer: .*: - .* This configuration will enable all minions to execute all commands: .. code-block:: bash peer: foo.example.com: - test.* The above configuration will only allow the minion foo.example.com to execute commands from the test module. :param dict clear_load: The minion pay
17,628
def solid_angles(self, permutation=None): if permutation is None: return self._solid_angles else: return [self._solid_angles[ii] for ii in permutation]
Returns the list of "perfect" solid angles Each edge is given as a list of its end vertices coordinates.
17,629
def dump_json_data(page): def content_langs_ordered(): params = {: page} if page.freeze_date: params[] = page.freeze_date cqs = Content.objects.filter(**params) cqs = cqs.values().annotate(latest=Max()) return [c[] for c in cqs.order_by()] languages = content_langs_ordered() def language_content(ctype): return dict( (lang, page.get_content(lang, ctype, language_fallback=False)) for lang in languages) def placeholder_content(): out = {} for p in get_placeholders(page.get_template()): if p.ctype in (, ): continue out[p.name] = language_content(p.name) return out def isoformat(d): return None if d is None else d.strftime(ISODATE_FORMAT) def custom_email(user): return user.email tags = [] if settings.PAGE_TAGGING: tags = [tag.name for tag in page.tags.all()] return { : dict( (lang, page.get_complete_slug(lang, hideroot=False)) for lang in languages), : language_content(), : custom_email(page.author), : isoformat(page.creation_date), : isoformat(page.publication_date), : isoformat(page.publication_end_date), : isoformat(page.last_modification_date), : { Page.PUBLISHED: , Page.HIDDEN: , Page.DRAFT: }[page.status], : page.template, : ( [site.domain for site in page.sites.all()] if settings.PAGE_USE_SITE_ID else []), : page.redirect_to_url, : dict( (lang, page.redirect_to.get_complete_slug( lang, hideroot=False)) for lang in page.redirect_to.get_languages() ) if page.redirect_to is not None else None, : placeholder_content(), : languages, : tags, }
Return a python dict representation of this page for use as part of a JSON export.
17,630
def getModelPosterior(self,min): Sigma = self.getLaplaceCovar(min) n_params = self.vd.getNumberScales() ModCompl = 0.5*n_params*SP.log(2*SP.pi)+0.5*SP.log(SP.linalg.det(Sigma)) RV = min[]+ModCompl return RV
USES LAPLACE APPROXIMATION TO CALCULATE THE BAYESIAN MODEL POSTERIOR
17,631
def _animate_bbvi(self,stored_latent_variables,stored_predictive_likelihood): from matplotlib.animation import FuncAnimation, writers import matplotlib.pyplot as plt import seaborn as sns fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ud = BBVINLLMAnimate(ax,self.data,stored_latent_variables,self.index,self.z_no,self.link) anim = FuncAnimation(fig, ud, frames=np.arange(stored_latent_variables.shape[0]), init_func=ud.init, interval=10, blit=True) plt.plot(self.data) plt.xlabel("Time") plt.ylabel(self.data_name) plt.show()
Produces animated plot of BBVI optimization Returns ---------- None (changes model attributes)
17,632
def distance(self, loc): assert type(loc) == type(self) lon1, lat1, lon2, lat2 = map(radians, [ self.lon, self.lat, loc.lon, loc.lat, ]) dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 c = 2 * asin(sqrt(a)) r = 6371000 return c * r
Calculate the great circle distance between two points on the earth (specified in decimal degrees)
17,633
def run(command, parser, cl_args, unknown_args): Log.debug("Deactivate Args: %s", cl_args) return cli_helper.run(command, cl_args, "deactivate topology")
:param command: :param parser: :param cl_args: :param unknown_args: :return:
17,634
def write_unchecked_hmac_data(self, offsets, data): fname = None with tempfile.NamedTemporaryFile(mode=, delete=False) as fd: fname = fd.name fd.write(data) unchecked = UncheckedChunk( data_len=len(data), fd_start=0, file_path=pathlib.Path(fname), temp=True, ) with self._meta_lock: self._unchecked_chunks[offsets.chunk_num] = { : unchecked, : False, } return str(unchecked.file_path)
Write unchecked encrypted data to disk :param Descriptor self: this :param Offsets offsets: download offsets :param bytes data: hmac/encrypted data
17,635
def log_interpolate_1d(x, xp, *args, **kwargs): r fill_value = kwargs.pop(, np.nan) axis = kwargs.pop(, 0) log_x = np.log(x) log_xp = np.log(xp) return interpolate_1d(log_x, log_xp, *args, axis=axis, fill_value=fill_value)
r"""Interpolates data with logarithmic x-scale over a specified axis. Interpolation on a logarithmic x-scale for interpolation values in pressure coordintates. Parameters ---------- x : array-like 1-D array of desired interpolated values. xp : array-like The x-coordinates of the data points. args : array-like The data to be interpolated. Can be multiple arguments, all must be the same shape as xp. axis : int, optional The axis to interpolate over. Defaults to 0. fill_value: float, optional Specify handling of interpolation points out of data bounds. If None, will return ValueError if points are out of bounds. Defaults to nan. Returns ------- array-like Interpolated values for each point with coordinates sorted in ascending order. Examples -------- >>> x_log = np.array([1e3, 1e4, 1e5, 1e6]) >>> y_log = np.log(x_log) * 2 + 3 >>> x_interp = np.array([5e3, 5e4, 5e5]) >>> metpy.calc.log_interp(x_interp, x_log, y_log) array([20.03438638, 24.63955657, 29.24472675]) Notes ----- xp and args must be the same shape.
17,636
def get_pubkey(self): pkey = PKey.__new__(PKey) pkey._pkey = _lib.NETSCAPE_SPKI_get_pubkey(self._spki) _openssl_assert(pkey._pkey != _ffi.NULL) pkey._pkey = _ffi.gc(pkey._pkey, _lib.EVP_PKEY_free) pkey._only_public = True return pkey
Get the public key of this certificate. :return: The public key. :rtype: :py:class:`PKey`
17,637
def parse(self, filepath, dependencies=False, recursive=False, greedy=False): abspath = self.tramp.abspath(filepath) self._add_current_codedir(abspath) fname = filepath.split("/")[-1].lower() mtime_check = self._check_parse_modtime(abspath, fname) if mtime_check is None: return if self.verbose: start_time = clock() msg.okay("WORKING on {0}".format(abspath), 2) if fname not in self._modulefiles: self._modulefiles[fname] = [] if fname not in self._programfiles: self._programfiles[fname] = [] pickle_load = False pprograms = [] if len(mtime_check) == 1 and settings.use_filesystem_cache: pmodules = self.serialize.load_module(abspath, mtime_check[0], self) if pmodules is not None: for module in pmodules: self.modules[module.name.lower()] = module self._modulefiles[fname].append(module.name.lower()) pickle_load = True else: pmodules, pprograms = self._parse_from_file(abspath, fname, dependencies, recursive, greedy) else: pmodules, pprograms = self._parse_from_file(abspath, fname, dependencies, recursive, greedy) self._parsed.append(abspath.lower()) if not pickle_load and len(pmodules) > 0 and settings.use_filesystem_cache: self.serialize.save_module(abspath, pmodules) if self.verbose: msg.info("PARSED: {} modules and {} ".format(len(pmodules), len(pprograms)) + "programs in {} in {}".format(fname, secondsToStr(clock() - start_time)), 2) for module in pmodules: msg.gen("\tMODULE {}".format(module.name), 2) for program in pprograms: msg.gen("\tPROGRAM {}".format(program.name), 2) if len(pmodules) > 0 or len(pprograms) > 0: msg.blank() self._parse_dependencies(pmodules, dependencies, recursive, greedy)
Parses the fortran code in the specified file. :arg dependencies: if true, all folder paths will be searched for modules that have been referenced but aren't loaded in the parser. :arg greedy: if true, when a module cannot be found using a file name of module_name.f90, all modules in all folders are searched.
17,638
def filter_generic(self, content_object=None, **kwargs): if content_object: kwargs[] = ContentType.objects.get_for_model( content_object ) kwargs[] = content_object.id return self.filter(**kwargs)
Filter by a generic object. :param content_object: the content object to filter on.
17,639
def save(self, *args, **kwargs): self.name = str(self.parent.name) + " - " + str(self.child.name) + " - " + str(self.ownership_type) if self.amount > 100: raise ValueError("Ownership amount cannot be more than 100%") elif self.amount < 0: raise ValueError("Ownership amount cannot be less than 0%") else: super(Ownership, self).save(*args, **kwargs)
Generate a name, and ensure amount is less than or equal to 100
17,640
def free_index(self, name, free=True, **kwargs): src = self.roi.get_source_by_name(name) self.free_source(name, free=free, pars=index_parameters.get(src[], []), **kwargs)
Free/Fix index of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False).
17,641
def remove_schema(self, database, schema): self.schemas.discard((_lower(database), _lower(schema)))
Remove a schema from the set of known schemas (case-insensitive) If the schema does not exist, it will be ignored - it could just be a temporary table. :param str database: The database name to remove. :param str schema: The schema name to remove.
17,642
def _round_whole_even(i): rt really normally exactly between two numbers. Examples -------- _round_whole_even(116.5) 116 ' if i % .5 == 0: if (i + 0.5) % 2 == 0: i = i + 0.5 else: i = i - 0.5 else: i = round(i, 0) return int(i)
r'''Round a number to the nearest whole number. If the number is exactly between two numbers, round to the even whole number. Used by `viscosity_index`. Parameters ---------- i : float Number, [-] Returns ------- i : int Rounded number, [-] Notes ----- Should never run with inputs from a practical function, as numbers on computers aren't really normally exactly between two numbers. Examples -------- _round_whole_even(116.5) 116
17,643
def get_object_by_name(content, object_type, name, regex=False): container = content.viewManager.CreateContainerView( content.rootFolder, [object_type], True ) for c in container.view: if regex: if re.match(name, c.name): return c elif c.name == name: return c
Get the vsphere object associated with a given text name Source: https://github.com/rreubenur/vmware-pyvmomi-examples/blob/master/create_template.py
17,644
def create_invoice_from_albaran(pk, list_lines): context = {} if list_lines: new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list().filter( pk__in=[int(x) for x in list_lines] ).exclude(invoiced=True)] if new_list_lines: lo = SalesLineOrder.objects.values_list().filter(pk__in=new_list_lines)[:1] if lo and lo[0] and lo[0][0]: new_pk = lo[0][0] context = GenLineProduct.create_invoice_from_order(new_pk, new_list_lines) if not in context or not context[]: SalesLineAlbaran.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude(invoiced=True).update(invoiced=True) return context else: error = _() else: error = _() else: error = _() context[] = error return context
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
17,645
def _run(command, quiet=False, timeout=None): try: with _spawn(command, quiet, timeout) as child: command_output = child.read().strip().replace("\r\n", "\n") except pexpect.TIMEOUT: logger.info(f"command {command} timed out") raise Error() return command_output
Run a command, returns command output.
17,646
def install_package_requirements(self, psrc, stream_output=None): package = self.target + + psrc assert isdir(package), package reqname = if not exists(package + reqname): reqname = if not exists(package + reqname): return return self.user_run_script( script=scripts.get_script_path(), args=[ + psrc + reqname], rw_venv=True, rw_project=True, stream_output=stream_output )
Install from requirements.txt file found in psrc :param psrc: name of directory in environment directory
17,647
def dinic(graph, capacity, source, target): assert source != target add_reverse_arcs(graph, capacity) Q = deque() total = 0 n = len(graph) flow = [[0] * n for u in range(n)] while True: Q.appendleft(source) lev = [None] * n lev[source] = 0 while Q: u = Q.pop() for v in graph[u]: if lev[v] is None and capacity[u][v] > flow[u][v]: lev[v] = lev[u] + 1 Q.appendleft(v) if lev[target] is None: return flow, total up_bound = sum(capacity[source][v] for v in graph[source]) - total total += _dinic_step(graph, capacity, lev, flow, source, target, up_bound)
Maximum flow by Dinic :param graph: directed graph in listlist or listdict format :param capacity: in matrix format or same listdict graph :param int source: vertex :param int target: vertex :returns: skew symmetric flow matrix, flow value :complexity: :math:`O(|V|^2 |E|)`
17,648
def new(self, limit=None): return self._reddit.new(self.display_name, limit=limit)
GETs new links from this subreddit. Calls :meth:`narwal.Reddit.new`. :param limit: max number of links to return
17,649
def get_obj(self, vimtype, name, folder=None): obj = None content = self.service_instance.RetrieveContent() if folder is None: folder = content.rootFolder container = content.viewManager.CreateContainerView(folder, [vimtype], True) for c in container.view: if c.name == name: obj = c break container.Destroy() return obj
Return an object by name, if name is None the first found object is returned
17,650
def rewriteFasta(sequence, sequence_name, fasta_in, fasta_out): f=open(fasta_in, ) f2=open(fasta_out,) lines = f.readlines() i=0 while i < len(lines): line = lines[i] if line[0] == ">": f2.write(line) fChr=line.split(" ")[0] fChr=fChr[1:] if fChr == sequence_name: code=[,,,,] firstbase=lines[i+1][0] while firstbase in code: i=i+1 firstbase=lines[i][0] s=0 while s <= len(sequence): f2.write(sequence[s:s+60]+"\n") s=s+60 else: i=i+1 else: f2.write(line) i=i+1 f2.close f.close
Rewrites a specific sequence in a multifasta file while keeping the sequence header. :param sequence: a string with the sequence to be written :param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2) :param fasta_in: /path/to/original.fa :param fasta_out: /path/to/destination.fa :returns: nothing
17,651
def is_ready(self): if not self._thread: return False if not self._ready.is_set(): return False return True
Is thread & ioloop ready. :returns bool:
17,652
def score_x_of_a_kind_yahtzee(dice: List[int], min_same_faces: int) -> int: for die, count in Counter(dice).most_common(1): if count >= min_same_faces: return sum(dice) return 0
Return sum of dice if there are a minimum of equal min_same_faces dice, otherwise return zero. Only works for 3 or more min_same_faces.
17,653
def digest_file(fname): size = 4096 hval = hashlib.new(HASH_TYPE) with open(fname, ) as fd: for chunk in iter(lambda: fd.read(size), b): hval.update(chunk) return hval.hexdigest()
Digest files using SHA-2 (256-bit) TESTING Produces identical output to `openssl sha256 FILE` for the following: * on all source .py files and some binary pyc files in parent dir * empty files with different names * 3.3GB DNAse Hypersensitive file * empty file, file with one space, file with one return all produce * distinct output PERF takes about 20 seconds to hash 3.3GB file on an empty file and on build.py INSPIRATION: http://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file WARNING: not clear if we need to pad file bytes for proper cryptographic hashing
17,654
def parse_scalar_type_definition(lexer: Lexer) -> ScalarTypeDefinitionNode: start = lexer.token description = parse_description(lexer) expect_keyword(lexer, "scalar") name = parse_name(lexer) directives = parse_directives(lexer, True) return ScalarTypeDefinitionNode( description=description, name=name, directives=directives, loc=loc(lexer, start) )
ScalarTypeDefinition: Description? scalar Name Directives[Const]?
17,655
def Server(self): if self.context_key == : server_id = clc.v2.API.Call(, self.context_val,session=self.session)[] return(clc.v2.Server(id=server_id,alias=self.alias,session=self.session)) elif self.context_key == : return(clc.v2.Server(id=self.context_val,alias=self.alias,session=self.session)) else: raise(clc.CLCException("%s object not server" % self.context_key))
Return server associated with this request. >>> d = clc.v2.Datacenter() >>> q = clc.v2.Server.Create(name="api2",cpu=1,memory=1,group_id=d.Groups().Get("Default Group").id,template=d.Templates().Search("centos-6-64")[0].id,network_id=d.Networks().networks[0].id,ttl=4000) >>> q.WaitUntilComplete() 0 >>> q.success_requests[0].Server() <clc.APIv2.server.Server object at 0x1095a8390> >>> print _ VA1BTDIAPI214
17,656
def isdir(self, path): result = True try: self.sftp_client.lstat(path) except FileNotFoundError: result = False return result
Return true if the path refers to an existing directory. Parameters ---------- path : str Path of directory on the remote side to check.
17,657
def get_or_create(self, um_from_user, um_to_user, message): created = False try: contact = self.get(Q(um_from_user=um_from_user, um_to_user=um_to_user) | Q(um_from_user=um_to_user, um_to_user=um_from_user)) except self.model.DoesNotExist: created = True contact = self.create(um_from_user=um_from_user, um_to_user=um_to_user, latest_message=message) return (contact, created)
Get or create a Contact We override Django's :func:`get_or_create` because we want contact to be unique in a bi-directional manner.
17,658
async def step(self, step_id, session, scenario=None): if scenario is None: scenario = pick_scenario(self.wid, step_id) try: await self.send_event(, scenario=scenario) await scenario[](session, *scenario[], **scenario[]) await self.send_event(, scenario=scenario) if scenario[] > 0.: await cancellable_sleep(scenario[]) return 1 except Exception as exc: await self.send_event(, scenario=scenario, exception=exc) if self.args.verbose > 0: self.console.print_error(exc) await self.console.flush() return -1
single scenario call. When it returns 1, it works. -1 the script failed, 0 the test is stopping or needs to stop.
17,659
def add_statements(self, pmid, stmts): if pmid not in self.stmts: self.stmts[pmid] = stmts else: self.stmts[pmid] += stmts
Add INDRA Statements to the incremental model indexed by PMID. Parameters ---------- pmid : str The PMID of the paper from which statements were extracted. stmts : list[indra.statements.Statement] A list of INDRA Statements to be added to the model.
17,660
def recursive_refs(envs, name): refs_by_name = { env[]: set(env[]) for env in envs } refs = refs_by_name[name] if refs: indirect_refs = set(itertools.chain.from_iterable([ recursive_refs(envs, ref) for ref in refs ])) else: indirect_refs = set() return set.union(refs, indirect_refs)
Return set of recursive refs for given env name >>> local_refs = sorted(recursive_refs([ ... {'name': 'base', 'refs': []}, ... {'name': 'test', 'refs': ['base']}, ... {'name': 'local', 'refs': ['test']}, ... ], 'local')) >>> local_refs == ['base', 'test'] True
17,661
def em_schedule(**kwargs): mdrunner = kwargs.pop(, None) integrators = kwargs.pop(, [, ]) kwargs.pop(, None) nsteps = kwargs.pop(, [100, 1000]) outputs = [.format(i, integrator) for i,integrator in enumerate(integrators)] outputs[-1] = kwargs.pop(, ) files = {: kwargs.pop(, None)} for i, integrator in enumerate(integrators): struct = files[] logger.info("[em %d] energy minimize with %s for maximum %d steps", i, integrator, nsteps[i]) kwargs.update({:struct, :outputs[i], :integrator, : nsteps[i]}) if not integrator == : kwargs[] = mdrunner else: kwargs[] = None logger.warning("[em %d] Not using mdrunner for L-BFGS because it cannot " "do parallel runs.", i) files = energy_minimize(**kwargs) return files
Run multiple energy minimizations one after each other. :Keywords: *integrators* list of integrators (from 'l-bfgs', 'cg', 'steep') [['bfgs', 'steep']] *nsteps* list of maximum number of steps; one for each integrator in in the *integrators* list [[100,1000]] *kwargs* mostly passed to :func:`gromacs.setup.energy_minimize` :Returns: dictionary with paths to final structure ('struct') and other files :Example: Conduct three minimizations: 1. low memory Broyden-Goldfarb-Fletcher-Shannon (BFGS) for 30 steps 2. steepest descent for 200 steps 3. finish with BFGS for another 30 steps We also do a multi-processor minimization when possible (i.e. for steep (and conjugate gradient) by using a :class:`gromacs.run.MDrunner` class for a :program:`mdrun` executable compiled for OpenMP in 64 bit (see :mod:`gromacs.run` for details):: import gromacs.run gromacs.setup.em_schedule(struct='solvate/ionized.gro', mdrunner=gromacs.run.MDrunnerOpenMP64, integrators=['l-bfgs', 'steep', 'l-bfgs'], nsteps=[50,200, 50]) .. Note:: You might have to prepare the mdp file carefully because at the moment one can only modify the *nsteps* parameter on a per-minimizer basis.
17,662
def loads(s, model=None, parser=None): with StringIO(s) as f: return load(f, model=model, parser=parser)
Deserialize s (a str) to a Python object.
17,663
def offset_mask(mask): def axis_data(axis): x = mask.sum(axis) trimmed_front = N.trim_zeros(x,"f") offset = len(x)-len(trimmed_front) size = len(N.trim_zeros(trimmed_front,"b")) return offset,size xo,xs = axis_data(0) yo,ys = axis_data(1) array = mask[yo:yo+ys,xo:xo+xs] offset = (yo,xo) return offset, array
Returns a mask shrunk to the 'minimum bounding rectangle' of the nonzero portion of the previous mask, and its offset from the original. Useful to find the smallest rectangular section of the image that can be extracted to include the entire geometry. Conforms to the y-first expectations of numpy arrays rather than x-first (geodata).
17,664
def _get_writable_metadata(self): object_metadata = {"name": self.name} for key in self._changes: if key in _WRITABLE_FIELDS: object_metadata[key] = self._properties[key] return object_metadata
Get the object / blob metadata which is writable. This is intended to be used when creating a new object / blob. See the `API reference docs`_ for more information, the fields marked as writable are: * ``acl`` * ``cacheControl`` * ``contentDisposition`` * ``contentEncoding`` * ``contentLanguage`` * ``contentType`` * ``crc32c`` * ``md5Hash`` * ``metadata`` * ``name`` * ``storageClass`` For now, we don't support ``acl``, access control lists should be managed directly through :class:`ObjectACL` methods.
17,665
def process_module(self, module): if module.file_encoding: encoding = module.file_encoding else: encoding = "ascii" with module.stream() as stream: for lineno, line in enumerate(stream): self._check_encoding(lineno + 1, line, encoding)
inspect the source file to find encoding problem
17,666
def advance_operation_time(self, operation_time): if not isinstance(operation_time, Timestamp): raise TypeError("operation_time must be an instance " "of bson.timestamp.Timestamp") self._advance_operation_time(operation_time)
Update the operation time for this session. :Parameters: - `operation_time`: The :data:`~pymongo.client_session.ClientSession.operation_time` from another `ClientSession` instance.
17,667
def print_experiments(experiments): headers = ["JOB NAME", "CREATED", "STATUS", "DURATION(s)", "INSTANCE", "DESCRIPTION", "METRICS"] expt_list = [] for experiment in experiments: expt_list.append([normalize_job_name(experiment.name), experiment.created_pretty, experiment.state, experiment.duration_rounded, experiment.instance_type_trimmed, experiment.description, format_metrics(experiment.latest_metrics)]) floyd_logger.info(tabulate(expt_list, headers=headers))
Prints job details in a table. Includes urls and mode parameters
17,668
def DeleteInstance(self, si, logger, session, vcenter_data_model, vm_uuid, vm_name): vm = self.pv_service.find_by_uuid(si, vm_uuid) if vm is not None: result = self.pv_service.destroy_vm(vm=vm, logger=logger) else: resource___format = "Could not find the VM {0},will remove the resource.".format(vm_name) logger.info(resource___format) result = resource___format return result
:param logger: :param CloudShellAPISession session: :param str vm_name: This is the resource name :return:
17,669
def to_json(self): result = super(ContentType, self).to_json() result.update({ : self.name, : self.description, : self.display_field, : [f.to_json() for f in self.fields] }) return result
Returns the JSON representation of the content type.
17,670
def _close(self): if self._state != "closed": self.event(DisconnectedEvent(self._dst_addr)) self._set_state("closed") if self._socket is None: return try: self._socket.shutdown(socket.SHUT_RDWR) except socket.error: pass self._socket.close() self._socket = None self._write_queue.clear() self._write_queue_cond.notify()
Same as `_close` but expects `lock` acquired.
17,671
def hash(self): hashed = super(Group, self).hash() return khash(hashed, frozenset(self._values))
:rtype: int :return: hash of the field
17,672
def crc(self): result = self._data.fast_hash() if hasattr(self.mesh, ): result ^= self.mesh.crc() return result
A checksum for the current visual object and its parent mesh. Returns ---------- crc: int, checksum of data in visual object and its parent mesh
17,673
def restore_review_history_for(brain_or_object): review_history = get_purged_review_history_for(brain_or_object) obj = api.get_object(brain_or_object) wf_tool = api.get_tool("portal_workflow") wf_ids = get_workflow_ids_for(brain_or_object) wfs = map(lambda wf_id: wf_tool.getWorkflowById(wf_id), wf_ids) wfs = filter(lambda wf: wf.state_var == "review_state", wfs) if not wfs: logger.error("No valid workflow found for {}".format(api.get_id(obj))) else: continue create_action = True wf_tool.setStatusOf(workflow.id, obj, history) indexes = ["review_state", "is_active"] obj.reindexObject(idxs=indexes)
Restores the review history for the given brain or object
17,674
def request(self, method, path, contents, headers, decode_json=False, stream=False, query=None, cdn=False): if cdn: raise Exception() if isinstance(contents, six.string_types): contents = StringIO(contents) if not headers: headers = {} if not query: query = {} rpath = path.lstrip() if in rpath: container_name, object_name = rpath.split(, 1) else: container_name = rpath object_name = if not container_name: status, reason, hdrs, body = self._account( method, contents, headers, stream, query, cdn) elif not object_name: status, reason, hdrs, body = self._container( method, container_name, contents, headers, stream, query, cdn) else: status, reason, hdrs, body = self._object( method, container_name, object_name, contents, headers, stream, query, cdn) if status and status // 100 != 5: if not stream and decode_json and status // 100 == 2: if body: body = loads(body) else: body = None return (status, reason, hdrs, body) raise Exception( % (method, path, status, reason))
See :py:func:`swiftly.client.client.Client.request`
17,675
def build_model(hparams_set, model_name, data_dir, problem_name, beam_size=1): hparams = trainer_lib.create_hparams( hparams_set, data_dir=data_dir, problem_name=problem_name) translate_model = registry.model(model_name)( hparams, tf.estimator.ModeKeys.EVAL) inputs = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name="inputs") targets = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name="targets") translate_model({ "inputs": inputs, "targets": targets, }) att_mats = get_att_mats(translate_model) with tf.variable_scope(tf.get_variable_scope(), reuse=True): samples = translate_model.infer({ "inputs": inputs, }, beam_size=beam_size)["outputs"] return inputs, targets, samples, att_mats
Build the graph required to fetch the attention weights. Args: hparams_set: HParams set to build the model with. model_name: Name of model. data_dir: Path to directory containing training data. problem_name: Name of problem. beam_size: (Optional) Number of beams to use when decoding a translation. If set to 1 (default) then greedy decoding is used. Returns: Tuple of ( inputs: Input placeholder to feed in ids to be translated. targets: Targets placeholder to feed to translation when fetching attention weights. samples: Tensor representing the ids of the translation. att_mats: Tensors representing the attention weights. )
17,676
def expandService(service_element): uris = sortedURIs(service_element) if not uris: uris = [None] expanded = [] for uri in uris: type_uris = getTypeURIs(service_element) expanded.append((type_uris, uri, service_element)) return expanded
Take a service element and expand it into an iterator of: ([type_uri], uri, service_element)
17,677
def filter_iqr(array, lower, upper): upper, lower = iqr(array, upper, lower) new = list(array) for x in new[:]: if x < lower or x > upper: new.remove(x) return new
Return elements which falls within specified interquartile range. Arguments: array (list): Sequence of numbers. lower (float): Lower bound for IQR, in range 0 <= lower <= 1. upper (float): Upper bound for IQR, in range 0 <= upper <= 1. Returns: list: Copy of original list, with elements outside of IQR removed.
17,678
def stats(self, start, end, fields=None): start = self.pickler.dumps(start) end = self.pickler.dumps(end) backend = self.read_backend return backend.execute( backend.structure(self).stats(start, end, fields), self._stats)
Perform a multivariate statistic calculation of this :class:`ColumnTS` from a *start* date/datetime to an *end* date/datetime. :param start: Start date for analysis. :param end: End date for analysis. :param fields: Optional subset of :meth:`fields` to perform analysis on. If not provided all fields are included in the analysis.
17,679
def _make_walker(self, *args, **kwargs): walker = self.walker_class(*args, **kwargs) return walker
Create a walker instance.
17,680
def check_label(labels, required, value_regex, target_labels): present = target_labels is not None and not set(labels).isdisjoint(set(target_labels)) if present: if required and not value_regex: return True elif value_regex: pattern = re.compile(value_regex) present_labels = set(labels) & set(target_labels) for l in present_labels: if not bool(pattern.search(target_labels[l])): return False return True else: return False else: return not required
Check if the label is required and match the regex :param labels: [str] :param required: bool (if the presence means pass or not) :param value_regex: str (using search method) :param target_labels: [str] :return: bool (required==True: True if the label is present and match the regex if specified) (required==False: True if the label is not present)
17,681
def show_vcs_output_virtual_ip_address(self, **kwargs): config = ET.Element("config") show_vcs = ET.Element("show_vcs") config = show_vcs output = ET.SubElement(show_vcs, "output") virtual_ip_address = ET.SubElement(output, "virtual-ip-address") virtual_ip_address.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
17,682
def path_to_resource(project, path, type=None): project_path = path_relative_to_project_root(project, path) if project_path is None: project_path = rope.base.project._realpath(path) project = rope.base.project.get_no_project() if type is None: return project.get_resource(project_path) if type == : return project.get_file(project_path) if type == : return project.get_folder(project_path) return None
Get the resource at path You only need to specify `type` if `path` does not exist. It can be either 'file' or 'folder'. If the type is `None` it is assumed that the resource already exists. Note that this function uses `Project.get_resource()`, `Project.get_file()`, and `Project.get_folder()` methods.
17,683
def getmergerequests(self, project_id, page=1, per_page=20, state=None): data = {: page, : per_page, : state} request = requests.get( .format(self.projects_url, project_id), params=data, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.status_code == 200: return request.json() else: return False
Get all the merge requests for a project. :param project_id: ID of the project to retrieve merge requests for :param page: Page Number :param per_page: Records per page :param state: Passes merge request state to filter them by it :return: list with all the merge requests
17,684
def connect(db_url=None, pooling=hgvs.global_config.uta.pooling, application_name=None, mode=None, cache=None): _logger.debug( + str(db_url) + ) if db_url is None: db_url = _get_ncbi_db_url() url = _parse_url(db_url) if url.scheme == : conn = NCBI_postgresql( url=url, pooling=pooling, application_name=application_name, mode=mode, cache=cache) else: raise RuntimeError("{url.scheme} in {url} is not currently supported".format(url=url)) _logger.info( + str(db_url) + ) return conn
Connect to a uta/ncbi database instance. :param db_url: URL for database connection :type db_url: string :param pooling: whether to use connection pooling (postgresql only) :type pooling: bool :param application_name: log application name in connection (useful for debugging; PostgreSQL only) :type application_name: str When called with an explicit db_url argument, that db_url is used for connecting. When called without an explicit argument, the function default is determined by the environment variable UTA_DB_URL if it exists, or hgvs.datainterface.uta.public_db_url otherwise. >>> hdp = connect() >>> hdp.schema_version() '1.1' The format of the db_url is driver://user:pass@host/database (the same as that used by SQLAlchemy). Examples: A remote public postgresql database: postgresql://anonymous:[email protected]/uta' A local postgresql database: postgresql://localhost/uta A local SQLite database: sqlite:////tmp/uta-0.0.6.db For postgresql db_urls, pooling=True causes connect to use a psycopg2.pool.ThreadedConnectionPool.
17,685
def crc(self): if self._modified_c or not hasattr(self, ): if self.flags[]: self._hashed_crc = crc32(self) else: contiguous = np.ascontiguousarray(self) self._hashed_crc = crc32(contiguous) self._modified_c = False return self._hashed_crc
A zlib.crc32 or zlib.adler32 checksum of the current data. Returns ----------- crc: int, checksum from zlib.crc32 or zlib.adler32
17,686
def reset(self, labels=None): if labels is None: labels = self.dfltlbl if labels == self.alllbl: labels = self.t0.keys() elif not isinstance(labels, (list, tuple)): labels = [labels,] for lbl in labels: if lbl not in self.t0: raise KeyError( % lbl) self.t0[lbl] = None self.td[lbl] = 0.0
Reset specified timer(s). Parameters ---------- labels : string or list, optional (default None) Specify the label(s) of the timer(s) to be stopped. If it is ``None``, stop the default timer with label specified by the ``dfltlbl`` parameter of :meth:`__init__`. If it is equal to the string specified by the ``alllbl`` parameter of :meth:`__init__`, stop all timers.
17,687
def origin_west_asia(origin): return origin_armenia(origin) or origin_azerbaijan(origin) \ or origin_bahrain(origin) or origin_cyprus(origin) \ or origin_georgia(origin) or origin_georgia(origin) \ or origin_iraq(origin) or origin_israel(origin) \ or origin_jordan(origin) or origin_kuwait(origin) \ or origin_lebanon(origin) or origin_oman(origin) \ or origin_qatar(origin) or origin_saudi_arabia(origin) \ or origin_syria(origin) or origin_turkey(origin) \ or origin_united_arab_emirates(origin) or origin_yemen(origin)
\ Returns if the origin is located in Western Asia. Holds true for the following countries: * Armenia * Azerbaijan * Bahrain * Cyprus * Georgia * Iraq * Israel * Jordan * Kuwait * Lebanon * Oman * Qatar * Saudi Arabia * Syria * Turkey * United Arab Emirates * Yemen `origin` The origin to check.
17,688
def get_file_listing_sha(listing_paths: Iterable) -> str: return sha256(.join(sorted(listing_paths)).encode()).hexdigest()
Return sha256 string for group of FTP listings.
17,689
def init(self): evclass_shape = [16, 40, 10] evtype_shape = [16, 16, 40, 10] evclass_psf_shape = [16, 40, 10, 100] evtype_psf_shape = [16, 16, 40, 10, 100] self._hists_eff = dict() self._hists = dict(evclass_on=np.zeros(evclass_shape), evclass_off=np.zeros(evclass_shape), evclass_alpha=np.zeros([16, 40, 1]), evtype_on=np.zeros(evtype_shape), evtype_off=np.zeros(evtype_shape), evtype_alpha=np.zeros([16, 1, 40, 1]), evclass_psf_on=np.zeros(evclass_psf_shape), evclass_psf_off=np.zeros(evclass_psf_shape), evtype_psf_on=np.zeros(evtype_psf_shape), evtype_psf_off=np.zeros(evtype_psf_shape), )
Initialize histograms.
17,690
def bind_bar(self, sender=None, **kwargs): bar = kwargs.pop() self.bars[bar.name] = bar
Binds a navigation bar into this extension instance.
17,691
def get_ajax_url(self): if self.ajax_url: return self.ajax_url return reverse(, kwargs=self.kwargs)
Get ajax url
17,692
def replace_markdown_cells(src, dst): if len(src[]) != len(dst[]): raise ValueError() for n in range(len(src[])): if src[][n][] != dst[][n][]: raise ValueError() if src[][n][] == : dst[][n][] = src[][n][]
Overwrite markdown cells in notebook object `dst` with corresponding cells in notebook object `src`.
17,693
def drop_scored_calls(self,names): def _remove(calls,names): d = dict([(k,v) for k,v in calls.items() if k not in names]) return d if isinstance(names, str): names = [names] output = self.copy() output[] = output[].\ apply(lambda x: _remove(x,names)) return output
Take a name or list of scored call names and drop those from the scored calls Args: names (list): list of names to drop or a single string name to drop Returns: CellDataFrame: The CellDataFrame modified.
17,694
def _G(self, x, p): prefactor = (p + p ** 3) ** -1 * p if isinstance(x, np.ndarray): inds0 = np.where(x * p == 1) inds1 = np.where(x * p < 1) inds2 = np.where(x * p > 1) func = np.ones_like(x) func[inds0] = np.log(0.25 * x[inds0] ** 2 * p ** 2) + np.pi * p * (self._u(x[inds0]) - 1) + \ 2 * p ** 2 * (self._u(x[inds0]) * np.arctanh(self._u(x[inds0]) ** -1) + np.log(0.5 * x[inds0])) func[inds1] = np.log(0.25 * x[inds1] ** 2 * p ** 2) + np.pi * p * (self._u(x[inds1]) - 1) + \ 2 * p ** 2 * (self._u(x[inds1]) * np.arctanh(self._u(x[inds1]) ** -1) + np.log(0.5 * x[inds1])) + 2 * self._g(x[inds1], p) * np.arctanh( self._g(x[inds1], p)) func[inds2] = np.log(0.25 * x[inds2] ** 2 * p ** 2) + np.pi * p * (self._u(x[inds2]) - 1) + \ 2 * p ** 2 * (self._u(x[inds2]) * np.arctanh(self._u(x[inds2]) ** -1) + np.log(0.5 * x[inds2])) - 2 * self._f(x[inds2], p) * np.arctan( self._f(x[inds2], p)) else: if x * p == 1: func = np.log(0.25 * x ** 2 * p ** 2) + np.pi * p * (self._u(x) - 1) + \ 2 * p ** 2 * (self._u(x) * np.arctanh(self._u(x) ** -1) + np.log(0.5 * x)) elif x * p < 1: func = np.log(0.25 * x ** 2 * p ** 2) + np.pi * p * (self._u(x) - 1) + \ 2 * p ** 2 * (self._u(x) * np.arctanh(self._u(x) ** -1) + np.log(0.5 * x)) + 2 * self._g(x, p) * np.arctanh(self._g(x, p)) else: func = np.log(0.25 * x ** 2 * p ** 2) + np.pi * p * (self._u(x) - 1) + \ 2 * p ** 2 * (self._u(x) * np.arctanh(self._u(x) ** -1) + np.log(0.5 * x)) - 2 * self._f(x, p) * np.arctan(self._f(x, p)) return func * prefactor
analytic solution of the 2d projected mass integral integral: 2 * pi * x * kappa * dx :param x: :param p: :return:
17,695
def with_wrapper(self, wrapper=None, name=None): name = name or .format(self.name) return self.__class__(self.data, name=name, wrapper=wrapper)
Copy this BarSet, and return a new BarSet with the specified name and wrapper. If no name is given, `{self.name}_custom_wrapper` is used. If no wrapper is given, the new BarSet will have no wrapper.
17,696
def guess_locktime(redeem_script): s not a constant before OP_CLTV OP_CHECKLOCKTIMEVERIFY') return int(script_array[loc - 1], 16) except ValueError: return 0
str -> int If OP_CLTV is used, guess an appropriate lock_time Otherwise return 0 (no lock time) Fails if there's not a constant before OP_CLTV
17,697
def _update_states(self, final_states: RnnStateStorage, restoration_indices: torch.LongTensor) -> None: new_unsorted_states = [state.index_select(1, restoration_indices) for state in final_states] if self._states is None: current_state_batch_size = self._states[0].size(1) new_state_batch_size = final_states[0].size(1) used_new_rows_mask = [(state[0, :, :].sum(-1) != 0.0).float().view(1, new_state_batch_size, 1) for state in new_unsorted_states] new_states = [] if current_state_batch_size > new_state_batch_size: for old_state, new_state, used_mask in zip(self._states, new_unsorted_states, used_new_rows_mask): masked_old_state = old_state[:, :new_state_batch_size, :] * (1 - used_mask) old_state[:, :new_state_batch_size, :] = new_state + masked_old_state new_states.append(old_state.detach()) else: new_states = [] for old_state, new_state, used_mask in zip(self._states, new_unsorted_states, used_new_rows_mask): masked_old_state = old_state * (1 - used_mask) new_state += masked_old_state new_states.append(new_state.detach()) self._states = tuple(new_states)
After the RNN has run forward, the states need to be updated. This method just sets the state to the updated new state, performing several pieces of book-keeping along the way - namely, unsorting the states and ensuring that the states of completely padded sequences are not updated. Finally, it also detaches the state variable from the computational graph, such that the graph can be garbage collected after each batch iteration. Parameters ---------- final_states : ``RnnStateStorage``, required. The hidden states returned as output from the RNN. restoration_indices : ``torch.LongTensor``, required. The indices that invert the sorting used in ``sort_and_run_forward`` to order the states with respect to the lengths of the sequences in the batch.
17,698
def formatted_message(self): return render_template_string( self.body, event=self, meta=self.meta, original=self.original, updated=self.updated, version=self.version, )
Method that will return the formatted message for the event. This formatting is done with Jinja and the template text is stored in the ``body`` attribute. The template is supplied the following variables, as well as the built in Flask ones: - ``event``: This is the event instance that this method belongs to. - ``meta``: This is a dictionary of cached values that have been stored when the event was created based upon the event's DSL. - ``original``: This is a dump of the instance before the instance was updated. - ``updated``: This is a dump of the instance after it was updated. - ``version``: This is the version of the event DSL. This property is cached because Jinja rendering is slower than raw Python string formatting.
17,699
def find_element_by_id(self, id_): return self.find_element(by=By.ID, value=id_)
Finds an element by id. :Args: - id\\_ - The id of the element to be found. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_id('foo')