Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
15,800
def query_pre_approvals(self, initial_date, final_date, page=None, max_results=None): last_page = False results = [] while last_page is False: search_result = self._consume_query_pre_approvals( initial_date, final_date, page, max_results) results.extend(search_result.pre_approvals) if search_result.current_page is None or \ search_result.total_pages is None or \ search_result.current_page == search_result.total_pages: last_page = True else: page = search_result.current_page + 1 return results
query pre-approvals by date range
15,801
def get_found_locations(): random.seed(1704) print(.join([chr(random.randint(32,122)) for x in range(100000)])[59741:59746]) random.seed(1572) print(.join([chr(random.randint(32,122)) for x in range(100000)])[73834:73838]) random.seed(561240) print(.join([chr(random.randint(32,122)) for x in range(3)])) random.seed(706075) print(.join([chr(random.randint(32,122)) for x in range(3)]))
INFO:__main__:found HELP in 1572 INFO:__main__:found MATHS in 1704 INFO:__main__:found ROCKS in 1975 #random.seed(1572) #garbage = ''.join([chr(random.randint(32,122)) for x in range(100000)]) #wrd_location = garbage.find('HELP') #print(wrd_location) # 73834 #print(garbage[wrd_location:wrd_location+4]) # HELP
15,802
def GuinierPorodGuinier(q, G, Rg1, alpha, Rg2): return GuinierPorodMulti(q, G, Rg1, alpha, Rg2)
Empirical Guinier-Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch ``Rg1``: the first radius of gyration ``alpha``: the power-law exponent ``Rg2``: the second radius of gyration Formula: -------- ``G*exp(-q^2*Rg1^2/3)`` if ``q<q_sep1``. ``A*q^alpha`` if ``q_sep1 <= q <=q_sep2``. ``G2*exp(-q^2*Rg2^2/3)`` if ``q_sep2<q``. The parameters ``A``,``G2``, ``q_sep1``, ``q_sep2`` are determined from conditions of smoothness at the cross-overs. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719.
15,803
def upload_to_cache_server(fpath): url_base = url_upload = % url_base url_download = % url_base file_field = try: import requests response = requests.post(url_upload, files={file_field: open(fpath, )}, timeout=REMOTE_TIMEOUT) response.raise_for_status() info_cache = response.text return url_download + info_cache except (ImportError, requests.RequestException) as e: raise RemoteUploadError( % (url_upload, e))
Uploads .torrent file to a cache server. Returns upload file URL. :rtype: str
15,804
def V_horiz_guppy(D, L, a, h, headonly=False): r R = 0.5*D Af = R*R*acos((R-h)/R) - (R-h)*(2.*R*h - h*h)**0.5 Vf = 2.*a*R*R/3.*acos(1. - h/R) + 2.*a/9./R*(2*R*h - h**2)**0.5*(2*h - 3*R)*(h + R) if headonly: Vf = Vf/2. else: Vf += Af*L return Vf
r'''Calculates volume of a tank with guppy heads, according to [1]_. .. math:: V_f = A_fL + \frac{2aR^2}{3}\cos^{-1}\left(1 - \frac{h}{R}\right) +\frac{2a}{9R}\sqrt{2Rh - h^2}(2h-3R)(h+R) .. math:: Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2} Parameters ---------- D : float Diameter of the main cylindrical section, [m] L : float Length of the main cylindrical section, [m] a : float Distance the guppy head extends on one side, [m] h : float Height, as measured up to where the fluid ends, [m] headonly : bool, optional Function returns only the volume of a single head side if True Returns ------- V : float Volume [m^3] Examples -------- Matching example from [1]_, with inputs in inches and volume in gallons. >>> V_horiz_guppy(D=108., L=156., a=42., h=36)/231. 1931.7208029476762 References ---------- .. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015. http://www.webcalc.com.br/blog/Tank_Volume.PDF
15,805
def _apply_record_checks(self, i, r, summarize=False, report_unexpected_exceptions=True, context=None): for check, modulus in self._record_checks: if i % modulus == 0: rdict = self._as_dict(r) try: check(rdict) except RecordError as e: code = e.code if e.code is not None else RECORD_CHECK_FAILED p = {: code} if not summarize: message = e.message if e.message is not None else MESSAGES[RECORD_CHECK_FAILED] p[] = message p[] = i + 1 p[] = r if context is not None: p[] = context if e.details is not None: p[] = e.details yield p except Exception as e: if report_unexpected_exceptions: p = {: UNEXPECTED_EXCEPTION} if not summarize: p[] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e) p[] = i + 1 p[] = r p[] = e p[] = % (check.__name__, check.__doc__) if context is not None: p[] = context yield p
Apply record checks on `r`.
15,806
def driver(self): if not self._driver: raise AttributeError("`driver` is not bound on this agent implementation({}). " "Do you forget to call `super().on_bind_driver` when you override the method " "`on_bind_driver` in your sub class?" .format(repr(self))) return self._driver
Return the driver of this agent related to. None if the driver is not ready to bind. Returns: :py:class:`inherit from Poco <poco.pocofw.Poco>`: the driver of this agent related to.
15,807
def summary(dataset_uri, format): dataset = dtoolcore.DataSet.from_uri(dataset_uri) creator_username = dataset._admin_metadata["creator_username"] frozen_at = dataset._admin_metadata["frozen_at"] num_items = len(dataset.identifiers) tot_size = sum([dataset.item_properties(i)["size_in_bytes"] for i in dataset.identifiers]) if format == "json": json_lines = [ , .format(dataset.name), .format(dataset.uuid), .format(creator_username), .format(num_items), .format(tot_size), .format(frozen_at), , ] formatted_json = "\n".join(json_lines) colorful_json = pygments.highlight( formatted_json, pygments.lexers.JsonLexer(), pygments.formatters.TerminalFormatter()) click.secho(colorful_json, nl=False) else: info = [ ("name", dataset.name), ("uuid", dataset.uuid), ("creator_username", creator_username), ("number_of_items", str(num_items)), ("size", sizeof_fmt(tot_size).strip()), ("frozen_at", date_fmt(frozen_at)), ] for key, value in info: click.secho("{}: ".format(key), nl=False) click.secho(value, fg="green")
Report summary information about a dataset.
15,808
def __ComputeUploadConfig(self, media_upload_config, method_id): config = base_api.ApiUploadInfo() if in media_upload_config: config.max_size = self.__MaxSizeToInt( media_upload_config[]) if not in media_upload_config: logging.warn( , method_id) config.accept.extend([ str(a) for a in media_upload_config.get(, )]) for accept_pattern in config.accept: if not _MIME_PATTERN_RE.match(accept_pattern): logging.warn(, accept_pattern) protocols = media_upload_config.get(, {}) for protocol in (, ): media = protocols.get(protocol, {}) for attr in (, ): if attr in media: setattr(config, % (protocol, attr), media[attr]) return config
Fill out the upload config for this method.
15,809
def faves(self, option): params = join_params(self.parameters, {"faves": option}) return self.__class__(**params)
Set whether to filter by a user's faves list. Options available are user.ONLY, user.NOT, and None; default is None.
15,810
def on_success(self, metadata): handler = self.handlers.get("on_success") if callable(handler): handler(metadata) handler = self.handlers.get("on_summary") if callable(handler): handler()
Called when a SUCCESS message has been received.
15,811
def storeToXML(self, out, comment=None, encoding=): dump_xml(self.data, out, comment=comment, encoding=encoding)
Write the `Properties` object's entries (in unspecified order) in XML properties format to ``out``. :param out: a file-like object to write the properties to :type out: binary file-like object :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param string encoding: the name of the encoding to use for the XML document (also included in the XML declaration) :return: `None`
15,812
def remove_choice(self, choice_name): self.choice_names.remove(choice_name) self.redis.hset(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "choices", escape.json_encode(self.choice_names)) self.refresh()
Adds a choice for the experiment
15,813
def explode_dn(dn, notypes=0, flags=0): if not dn: return [] dn_decomp = str2dn(dn, flags) rdn_list = [] for rdn in dn_decomp: if notypes: rdn_list.append(.join([ escape_dn_chars(avalue or ) for atype, avalue, dummy in rdn ])) else: rdn_list.append(.join([ .join((atype, escape_dn_chars(avalue or ))) for atype, avalue, dummy in rdn ])) return rdn_list
explode_dn(dn [, notypes=0]) -> list This function takes a DN and breaks it up into its component parts. The notypes parameter is used to specify that only the component's attribute values be returned and not the attribute types.
15,814
def question_image_filepath(instance, filename): return .join([, str(instance.question_level), str(instance.question_level_id), binascii.b2a_hex(os.urandom(15)), filename])
Function DocString
15,815
def set_imap_cb(self, w, index): name = imap.get_names()[index] self.t_.set(intensity_map=name)
This callback is invoked when the user selects a new intensity map from the preferences pane.
15,816
def move_up(self): old_index = self.current_index self.current_index -= 1 self.__wrap_index() self.__handle_selections(old_index, self.current_index)
Try to select the button above the currently selected one. If a button is not there, wrap down to the bottom of the menu and select the last button.
15,817
def _tree_to_labels(X, single_linkage_tree, min_cluster_size=10, cluster_selection_method=, allow_single_cluster=False, match_reference_implementation=False): condensed_tree = condense_tree(single_linkage_tree, min_cluster_size) stability_dict = compute_stability(condensed_tree) labels, probabilities, stabilities = get_clusters(condensed_tree, stability_dict, cluster_selection_method, allow_single_cluster, match_reference_implementation) return (labels, probabilities, stabilities, condensed_tree, single_linkage_tree)
Converts a pretrained tree and cluster size into a set of labels and probabilities.
15,818
def run(): _parser_options() set_verbose(args["verbose"]) if _check_global_settings(): _load_db() else: exit(-1) _setup_server() if args["rollback"]: _server_rollback() okay("The server rollback appears to have been successful.") exit(0) _server_enable() _list_repos() _handle_install() _do_cron()
Main script entry to handle the arguments given to the script.
15,819
def unregister(self, bucket, name): assert bucket in self, % bucket if not name in self[bucket]: raise NotRegistered( % name) del self[bucket][name]
Remove the function from the registry by name
15,820
def rand_email(): name = random.choice(string.ascii_letters) + \ rand_str(string.ascii_letters + string.digits, random.randint(4, 14)) domain = rand_str(string.ascii_lowercase, random.randint(2, 10)) kind = random.choice(_all_email_kinds) return "%s@%s%s" % (name, domain, kind)
Random email. Usage Example:: >>> rand_email() [email protected]
15,821
def has_in_url_path(url, subs): scheme, netloc, path, query, fragment = urlparse.urlsplit(url) return any([sub in path for sub in subs])
Test if any of `subs` strings is present in the `url` path.
15,822
def oortC(self,R,t=0.,nsigma=None,deg=False,phi=0., epsrel=1.e-02,epsabs=1.e-05, grid=None,gridpoints=101,returnGrids=False, derivRGrid=None,derivphiGrid=None,derivGridpoints=101, derivHierarchgrid=False, hierarchgrid=False,nlevels=2,integrate_method=): if isinstance(grid,bool) and grid: (surfacemass,grid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=True, gridpoints=gridpoints, returnGrid=True, hierarchgrid=hierarchgrid, nlevels=nlevels, integrate_method=integrate_method) elif isinstance(grid,evolveddiskdfGrid) or \ isinstance(grid,evolveddiskdfHierarchicalGrid): surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=grid, gridpoints=gridpoints, returnGrid=False, hierarchgrid=hierarchgrid, nlevels=nlevels, integrate_method=integrate_method) if isinstance(derivRGrid,bool) and derivRGrid: (dsurfacemassdR,derivRGrid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=True, gridpoints=derivGridpoints, returnGrid=True, hierarchgrid=derivHierarchgrid, nlevels=nlevels, integrate_method=integrate_method,deriv=) elif isinstance(derivRGrid,evolveddiskdfGrid) or \ isinstance(derivRGrid,evolveddiskdfHierarchicalGrid): dsurfacemassdR= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=derivRGrid, gridpoints=derivGridpoints, returnGrid=False, hierarchgrid=derivHierarchgrid, nlevels=nlevels, integrate_method=integrate_method,deriv=) if isinstance(derivphiGrid,bool) and derivphiGrid: (dsurfacemassdphi,derivphiGrid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=True, gridpoints=derivGridpoints, returnGrid=True, hierarchgrid=derivHierarchgrid, nlevels=nlevels, integrate_method=integrate_method,deriv=) elif isinstance(derivphiGrid,evolveddiskdfGrid) or \ isinstance(derivphiGrid,evolveddiskdfHierarchicalGrid): dsurfacemassdphi= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=derivphiGrid, gridpoints=derivGridpoints, returnGrid=False, hierarchgrid=derivHierarchgrid, nlevels=nlevels, integrate_method=integrate_method,deriv=) meanvR= self.meanvR(R,t=t,nsigma=nsigma,deg=deg,phi=phi, epsrel=epsrel,epsabs=epsabs, grid=grid,gridpoints=gridpoints,returnGrid=False, surfacemass=surfacemass, hierarchgrid=hierarchgrid, nlevels=nlevels,integrate_method=integrate_method, use_physical=False) dmeanvTdphi= (self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=derivphiGrid, gridpoints=derivGridpoints, returnGrid=False, hierarchgrid=derivHierarchgrid, nlevels=nlevels, integrate_method=integrate_method,deriv=) /surfacemass -self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=grid, gridpoints=gridpoints, returnGrid=False, hierarchgrid=hierarchgrid, nlevels=nlevels, integrate_method=integrate_method) /surfacemass**2.*dsurfacemassdphi) dmeanvRdR= (self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=derivRGrid, gridpoints=derivGridpoints, returnGrid=False, hierarchgrid=derivHierarchgrid, nlevels=nlevels, integrate_method=integrate_method,deriv=) /surfacemass -self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=grid, gridpoints=gridpoints, returnGrid=False, hierarchgrid=hierarchgrid, nlevels=nlevels, integrate_method=integrate_method) /surfacemass**2.*dsurfacemassdR) if returnGrids: return (0.5*(-meanvR/R-dmeanvTdphi/R+dmeanvRdR),grid, derivRGrid,derivphiGrid) else: return 0.5*(-meanvR/R-dmeanvTdphi/R+dmeanvRdR)
NAME: oortC PURPOSE: calculate the Oort function C at (R,phi,t) INPUT: R - radius at which to calculate C (can be Quantity) phi= azimuth (rad unless deg=True; can be Quantity) t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity) nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous) deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity epsrel, epsabs - scipy.integrate keywords grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid derivRGrid, derivphiGrid= if set to True, build a grid and use that to evaluate integrals of the derivatives of the DF; if set to a grid-objects (such as returned by this procedure), use this grid gridpoints= number of points to use for the grid in 1D (default=101) derivGridpoints= number of points to use for the grid in 1D (default=101) returnGrid= if True, return the grid objects (default=False) hierarchgrid= if True, use a hierarchical grid (default=False) derivHierarchgrid= if True, use a hierarchical grid (default=False) nlevels= number of hierarchical levels for the hierarchical grid integrate_method= orbit.integrate method argument OUTPUT: Oort C at R,phi,t HISTORY: 2011-10-16 - Written - Bovy (NYU)
15,823
def get_trades(self, pair="SWTH_NEO", start_time=None, end_time=None, limit=5000): if limit > 10000 or limit < 1: raise ValueError("Attempting to request more trades than allowed by the API.") api_params = { "blockchain": self.blockchain, "pair": pair, "contract_hash": self.contract_hash } if start_time is not None: api_params[] = start_time if end_time is not None: api_params[] = end_time if limit != 5000: api_params[] = limit return self.request.get(path=, params=api_params)
Function to fetch a list of filled trades for the parameters requested. Execution of this function is as follows:: get_trades(pair="SWTH_NEO", limit=3) The expected return result for this function is as follows:: [{ 'id': '15bb16e2-7a80-4de1-bb59-bcaff877dee0', 'fill_amount': 100000000, 'take_amount': 100000000, 'event_time': '2018-08-04T15:00:12.634Z', 'is_buy': True }, { 'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e', 'fill_amount': 47833882, 'take_amount': 97950000000, 'event_time': '2018-08-03T02:44:47.706Z', 'is_buy': True }, { 'id': '7a308ccc-b7f5-46a3-bf6b-752ab076cc9f', 'fill_amount': 1001117, 'take_amount': 2050000000, 'event_time': '2018-08-03T02:32:50.703Z', 'is_buy': True }] :param pair: The trading pair that will be used to request filled trades. :type pair: str :param start_time: Only return trades after this time (in epoch seconds). :type start_time: int :param end_time: Only return trades before this time (in epoch seconds). :type end_time: int :param limit: The number of filled trades to return. Min: 1, Max: 10000, Default: 5000 :type limit: int :return: List of dictionaries consisting of filled orders that meet requirements of the parameters passed to it.
15,824
def owner_profile(self) -> Profile: if not self._owner_profile: self._owner_profile = Profile.from_id(self._context, self._node[][]) return self._owner_profile
:class:`Profile` instance of the story item's owner.
15,825
def float_range(start=0, stop=None, step=1): start = float(start) while start < stop: yield start start += step
Much like the built-in function range, but accepts floats >>> tuple(float_range(0, 9, 1.5)) (0.0, 1.5, 3.0, 4.5, 6.0, 7.5)
15,826
def most_free_pixel(self): dist_tf = self.to_distance_im() max_px = np.where(dist_tf == np.max(dist_tf)) free_pixel = np.array([max_px[0][0], max_px[1][0]]) return free_pixel
Find the black pixel with the largest distance from the white pixels. Returns ------- :obj:`numpy.ndarray` 2-vector containing the most free pixel
15,827
def connect_checkable_button(instance, prop, widget): add_callback(instance, prop, widget.setChecked) widget.toggled.connect(partial(setattr, instance, prop)) widget.setChecked(getattr(instance, prop) or False)
Connect a boolean callback property with a Qt button widget. Parameters ---------- instance : object The class instance that the callback property is attached to prop : str The name of the callback property widget : QtWidget The Qt widget to connect. This should implement the ``setChecked`` method and the ``toggled`` signal.
15,828
def get_output(script): with logs.debug_time(u): commands = _get_last_n(const.SHELL_LOGGER_LIMIT) for command in commands: if command[] == script: lines = _get_output_lines(command[]) output = .join(lines).strip() return output else: logs.warn("Output isn't available in shell logger") return None
Gets command output from shell logger.
15,829
def itershuffle(iterable, bufsize=1000): iterable = iter(iterable) buf = [] try: while True: for i in range(random.randint(1, bufsize - len(buf))): buf.append(next(iterable)) random.shuffle(buf) for i in range(random.randint(1, bufsize)): if buf: yield buf.pop() else: break except StopIteration: random.shuffle(buf) while buf: yield buf.pop() raise StopIteration
Shuffle an iterator. This works by holding `bufsize` items back and yielding them sometime later. Obviously, this is not unbiased – but should be good enough for batching. Larger bufsize means less bias. From https://gist.github.com/andres-erbsen/1307752 iterable (iterable): Iterator to shuffle. bufsize (int): Items to hold back. YIELDS (iterable): The shuffled iterator.
15,830
def load_file(self, filepath, chname=None, wait=True, create_channel=True, display_image=True, image_loader=None): if not chname: channel = self.get_current_channel() else: if not self.has_channel(chname) and create_channel: self.gui_call(self.add_channel, chname) channel = self.get_channel(chname) chname = channel.name if image_loader is None: image_loader = self.load_image cache_dir = self.settings.get(, self.tmpdir) info = iohelper.get_fileinfo(filepath, cache_dir=cache_dir) if not info.ondisk: errmsg = "File must be locally loadable: %s" % (filepath) self.gui_do(self.show_error, errmsg) return filepath = info.filepath kwargs = {} idx = None if info.numhdu is not None: kwargs[] = info.numhdu try: image = image_loader(filepath, **kwargs) except Exception as e: errmsg = "Failed to load : %s" % (filepath, str(e)) self.gui_do(self.show_error, errmsg) return future = Future.Future() future.freeze(image_loader, filepath, **kwargs) image.set(loader=image_loader, image_future=future) if image.get(, None) is None: image.set(path=filepath) name = image.get(, None) if name is None: name = iohelper.name_image_from_path(filepath, idx=idx) image.set(name=name) if display_image: if wait: self.gui_call(self.add_image, name, image, chname=chname) else: self.gui_do(self.add_image, name, image, chname=chname) else: self.gui_do(self.bulk_add_image, name, image, chname) return image
Load a file and display it. Parameters ---------- filepath : str The path of the file to load (must reference a local file). chname : str, optional The name of the channel in which to display the image. wait : bool, optional If `True`, then wait for the file to be displayed before returning (synchronous behavior). create_channel : bool, optional Create channel. display_image : bool, optional If not `False`, then will load the image. image_loader : func, optional A special image loader, if provided. Returns ------- image The image object that was loaded.
15,831
def sendRequest(self, name, args): (respEvt, id) = self.newResponseEvent() self.sendMessage({"id":id, "method":name, "params": args}) return respEvt
sends a request to the peer
15,832
def tseries_between(self, tstart=None, tend=None): if self.tseries is None: return None ndat = self.tseries.shape[0] if tstart is None: istart = 0 else: igm = 0 igp = ndat - 1 while igp - igm > 1: istart = igm + (igp - igm) // 2 if self.tseries.iloc[istart][] >= tstart: igp = istart else: igm = istart istart = igp if tend is None: iend = None else: igm = 0 igp = ndat - 1 while igp - igm > 1: iend = igm + (igp - igm) // 2 if self.tseries.iloc[iend][] > tend: igp = iend else: igm = iend iend = igm + 1 return self.tseries.iloc[istart:iend]
Return time series data between requested times. Args: tstart (float): starting time. Set to None to start at the beginning of available data. tend (float): ending time. Set to None to stop at the end of available data. Returns: :class:`pandas.DataFrame`: slice of :attr:`tseries`.
15,833
def get_by_id_or_404(self, id, **kwargs): obj = self.get_by_id(id=id, **kwargs) if obj: return obj raise Http404
Gets by a instance instance r raises a 404 is one isn't found.
15,834
def write_value(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0): try: ostream.write(pack(, self.value)) except Exception: self.logger.error("Error writing boolean value to buffer") raise
Write the value of the Boolean object to the output stream. Args: ostream (Stream): A buffer to contain the encoded bytes of the value of a Boolean object. Usually a BytearrayStream object. Required. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
15,835
def _make_sparse_blocks(self, variable, records, data): Num_DimsDim_SizesData_TypeNum_Elementss varinq(, expand=True) records : list a list of physical records data : varies bytes array, numpy.ndarray or list of str form with all physical data or embedded virtual data (returned from call to varget() for a sparse variable) Returns: sparse_blocks: list A list of sparse records/data in the form [[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...] DataUnknown dictionary.... SkipInvalid sparse data... Less data than the specified records... SkipNum_DimsDim_SizesInvalid sparse data... Less data than the specified records... SkipCan not handle list data.... Only support list of str... SkipCan not handle list data.... Only support list of str... SkipNum_DimsDim_SizesInvalid sparse data... Less data than the specified records... SkipInvalid sparse data... Less data than the specified records... Skip') return
Handles the data for the variable with sparse records. Organizes the physical record numbers into blocks in a list: [[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...] Place consecutive physical records into a single block If all records are physical, this calls _make_sparse_blocks_with_physical If any records are virtual, this calls _make_sparse_blocks_with_virtual Parameters: variable : dict the variable dictionary, with 'Num_Dims', 'Dim_Sizes', 'Data_Type', 'Num_Elements' key words, typically returned from a call to cdf read's varinq('variable', expand=True) records : list a list of physical records data : varies bytes array, numpy.ndarray or list of str form with all physical data or embedded virtual data (returned from call to varget('variable') for a sparse variable) Returns: sparse_blocks: list A list of sparse records/data in the form [[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
15,836
def spawn(self, *targets: Callable, count: int = 1, **process_kwargs): r if not targets: def wrapper(target: Callable): return self.spawn(target, count=count, **process_kwargs) return wrapper if len(targets) * count == 1: return self._process(targets[0], **process_kwargs) return ProcessList( self._process(target, **process_kwargs) for _ in range(count) for target in targets )
r""" Produce one or many child process(s) bound to this context. :param \*targets: Passed on to the :py:class:`Process` constructor, one at a time. :param count: The number of processes to spawn for each item in ``targets``. :param \*\*process_kwargs: .. include:: /api/context/params/process_kwargs.rst :return: A ``ProcessList`` of the :py:class:`Process` instance(s) produced.
15,837
def get_marginal_topic_distrib(doc_topic_distrib, doc_lengths): unnorm = (doc_topic_distrib.T * doc_lengths).sum(axis=1) return unnorm / unnorm.sum()
Return marginal topic distribution p(T) (topic proportions) given the document-topic distribution (theta) `doc_topic_distrib` and the document lengths `doc_lengths`. The latter can be calculated with `get_doc_lengths()`.
15,838
def recursive_getattr(obj: Any, attr: str, *args) -> Any: def _getattr(obj, attr): return getattr(obj, attr, *args) return functools.reduce(_getattr, [obj] + attr.split())
Recursive ``getattar``. This can be used as a drop in for the standard ``getattr(...)``. Credit to: https://stackoverflow.com/a/31174427 Args: obj: Object to retrieve the attribute from. attr: Name of the attribute, with each successive attribute separated by a ".". Returns: The requested attribute. (Same as ``getattr``). Raises: AttributeError: If the attribute was not found and no default was provided. (Same as ``getattr``).
15,839
def draw_confusion_matrix(matrix): fig = tfmpl.create_figure(figsize=(7,7)) ax = fig.add_subplot(111) ax.set_title() tfmpl.plots.confusion_matrix.draw( ax, matrix, axis_labels=[ + str(x) for x in range(10)], normalize=True ) return fig
Draw confusion matrix for MNIST.
15,840
def import_ecdsakey_from_private_pem(pem, scheme=, password=None): securesystemslib.formats.PEMECDSA_SCHEMA.check_match(pem) securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme) if password is not None: securesystemslib.formats.PASSWORD_SCHEMA.check_match(password) else: logger.debug( ) ecdsakey_dict = {} keytype = public = None private = None public, private = \ securesystemslib.ecdsa_keys.create_ecdsa_public_and_private_from_pem(pem, password) key_value = {: public.replace(, ), : } keyid = _get_keyid(keytype, scheme, key_value) key_value[] = private ecdsakey_dict[] = keytype ecdsakey_dict[] = scheme ecdsakey_dict[] = keyid ecdsakey_dict[] = key_value ecdsakey_dict[] = \ securesystemslib.settings.HASH_ALGORITHMS return ecdsakey_dict
<Purpose> Import the private ECDSA key stored in 'pem', and generate its public key (which will also be included in the returned ECDSA key object). In addition, a keyid identifier for the ECDSA key is generated. The object returned conforms to: {'keytype': 'ecdsa-sha2-nistp256', 'scheme': 'ecdsa-sha2-nistp256', 'keyid': keyid, 'keyval': {'public': '-----BEGIN PUBLIC KEY----- ... -----END PUBLIC KEY-----', 'private': '-----BEGIN EC PRIVATE KEY----- ... -----END EC PRIVATE KEY-----'}} The private key is a string in PEM format. >>> ecdsa_key = generate_ecdsa_key() >>> private_pem = ecdsa_key['keyval']['private'] >>> ecdsa_key = import_ecdsakey_from_private_pem(private_pem) >>> securesystemslib.formats.ECDSAKEY_SCHEMA.matches(ecdsa_key) True <Arguments> pem: A string in PEM format. The private key is extracted and returned in an ecdsakey object. scheme: The signature scheme used by the imported key. password: (optional) The password, or passphrase, to decrypt the private part of the ECDSA key if it is encrypted. 'password' is not used directly as the encryption key, a stronger encryption key is derived from it. <Exceptions> securesystemslib.exceptions.FormatError, if the arguments are improperly formatted. securesystemslib.exceptions.UnsupportedAlgorithmError, if 'pem' specifies an unsupported key type. <Side Effects> None. <Returns> A dictionary containing the ECDSA keys and other identifying information. Conforms to 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
15,841
def _deserialize_default(cls, cls_target, obj_raw): if cls._is_deserialized(cls_target, obj_raw): return obj_raw elif type(obj_raw) == dict: return cls._deserialize_dict(cls_target, obj_raw) else: return cls_target(obj_raw)
:type cls_target: T|type :type obj_raw: int|str|bool|float|list|dict|None :rtype: T
15,842
def get_data_view(self, data_view_id): url = routes.get_data_view(data_view_id) response = self._get(url).json() result = response["data"]["data_view"] datasets_list = [] for dataset in result["datasets"]: datasets_list.append(Dataset( name=dataset["name"], id=dataset["id"], description=dataset["description"] )) columns_list = [] for column in result["columns"]: columns_list.append(ColumnFactory.from_dict(column)) return DataView( view_id=data_view_id, name=result["name"], description=result["description"], datasets=datasets_list, columns=columns_list, )
Retrieves a summary of information for a given data view - view id - name - description - columns :param data_view_id: The ID number of the data view to which the run belongs, as a string :type data_view_id: str
15,843
def get_length(self): length = 0 for i, point in enumerate(self.points): if i != 0: length += point.distance(self.points[i - 1]) return length
Calculate and return the length of the line as a sum of lengths of all its segments. :returns: Total length in km.
15,844
def parametrize(self, operator, params): return (CaselessKeyword(operator, identChars=alphanums) + self.parameter(params))
Return a parser that parses an operator with parameters.
15,845
def create(self, roomId=None, toPersonId=None, toPersonEmail=None, text=None, markdown=None, files=None, **request_parameters): check_type(roomId, basestring) check_type(toPersonId, basestring) check_type(toPersonEmail, basestring) check_type(text, basestring) check_type(markdown, basestring) check_type(files, list) if files: if len(files) != 1: raise ValueError("The length of the `files` list is greater " "than one (1). The files parameter is a " "list, which accepts multiple values to " "allow for future expansion, but currently " "only one file may be included with the " "message.") check_type(files[0], basestring) post_data = dict_from_items_with_values( request_parameters, roomId=roomId, toPersonId=toPersonId, toPersonEmail=toPersonEmail, text=text, markdown=markdown, files=files, ) if not files or is_web_url(files[0]): json_data = self._session.post(API_ENDPOINT, json=post_data) elif is_local_file(files[0]): try: post_data[] = open_local_file(files[0]) multipart_data = MultipartEncoder(post_data) headers = {: multipart_data.content_type} json_data = self._session.post(API_ENDPOINT, headers=headers, data=multipart_data) finally: post_data[].file_object.close() else: raise ValueError("The `files` parameter does not contain a vaild " "URL or path to a local file.") return self._object_factory(OBJECT_TYPE, json_data)
Post a message, and optionally a attachment, to a room. The files parameter is a list, which accepts multiple values to allow for future expansion, but currently only one file may be included with the message. Args: roomId(basestring): The room ID. toPersonId(basestring): The ID of the recipient when sending a private 1:1 message. toPersonEmail(basestring): The email address of the recipient when sending a private 1:1 message. text(basestring): The message, in plain text. If `markdown` is specified this parameter may be optionally used to provide alternate text for UI clients that do not support rich text. markdown(basestring): The message, in markdown format. files(`list`): A list of public URL(s) or local path(s) to files to be posted into the room. Only one file is allowed per message. **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: Message: A Message object with the details of the created message. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error. ValueError: If the files parameter is a list of length > 1, or if the string in the list (the only element in the list) does not contain a valid URL or path to a local file.
15,846
def strip_prompt(self, *args, **kwargs): a_string = super(FlexvnfSSH, self).strip_prompt(*args, **kwargs) return self.strip_context_items(a_string)
Strip the trailing router prompt from the output.
15,847
def generate_mix2pl_dataset(n, m, useDirichlet=True): alpha = np.random.rand() gamma1 = None gamma2 = None if useDirichlet: gamma1 = np.random.dirichlet(np.ones(m)) gamma2 = np.random.dirichlet(np.ones(m)) else: gamma1 = np.random.rand(m) gamma1 /= np.sum(gamma1) gamma2 = np.random.rand(m) gamma2 /= np.sum(gamma1) votes = [] for i in range(n): vote = None draw = np.random.rand() if draw <= alpha: vote = draw_pl_vote(m, gamma1) else: vote = draw_pl_vote(m, gamma2) votes.append(vote) params = np.hstack((alpha, gamma1, gamma2)) return (params, votes)
Description: Generate a mixture of 2 Plackett-Luce models dataset and return the parameters and votes. Parameters: n: number of votes to generate m: number of alternatives useDirichlet: boolean flag to use the Dirichlet distribution
15,848
def get_data_files(dirname): flist = [] for dirpath, _dirnames, filenames in os.walk(dirname): for fname in filenames: flist.append(osp.join(dirpath, fname)) return flist
Return data files in directory *dirname*
15,849
def get_staff_updater(cls): from django.core.exceptions import ImproperlyConfigured if not issubclass(cls, BaseStaffMember): raise ImproperlyConfigured("%s is not a sublass of StaffMember" % cls) def update_staff_member(sender, instance, created, *args, **kwargs): if instance.is_staff and not cls.objects.filter(user=instance).count(): staffmember = cls( user=instance, is_active=True) staffmember.save() elif instance.is_staff: staffmembers = cls.objects.filter(user=instance) if len(staffmembers): staffmember = staffmembers[0] staffmember.is_active = True if instance.first_name != staffmember.first_name: staffmember.first_name = instance.first_name staffmember.slug = slugify( % ( instance.first_name, instance.last_name)) if instance.last_name != staffmember.last_name: staffmember.last_name = instance.last_name staffmember.slug = slugify( % ( instance.first_name, instance.last_name)) if instance.email != staffmember.email: staffmember.email = instance.email staffmember.save() elif not instance.is_staff: for staffmember in cls.objects.filter(user=instance): staffmember.is_active = False staffmember.save() from django.db import transaction transaction.commit_unless_managed() return update_staff_member
This returns a function for passing to a signal.
15,850
def account_unfollow(self, id): id = self.__unpack_id(id) url = .format(str(id)) return self.__api_request(, url)
Unfollow a user. Returns a `relationship dict`_ containing the updated relationship to the user.
15,851
def find_video_detail_by_id(self, video_id, ext=None): url = params = { : self.client_id, : video_id } if ext: params[] = ext r = requests.get(url, params=params) check_error(r) return r.json()
doc: http://cloud.youku.com/docs?id=46
15,852
def unobserve_all_properties(self, handler): for name in self._property_handlers: self.unobserve_property(name, handler)
Unregister a property observer from *all* observed properties.
15,853
def snapshots(self, xml_bytes): root = XML(xml_bytes) result = [] for snapshot_data in root.find("snapshotSet"): snapshot_id = snapshot_data.findtext("snapshotId") volume_id = snapshot_data.findtext("volumeId") status = snapshot_data.findtext("status") start_time = snapshot_data.findtext("startTime") start_time = datetime.strptime( start_time[:19], "%Y-%m-%dT%H:%M:%S") progress = snapshot_data.findtext("progress")[:-1] progress = float(progress or "0") / 100. snapshot = model.Snapshot( snapshot_id, volume_id, status, start_time, progress) result.append(snapshot) return result
Parse the XML returned by the C{DescribeSnapshots} function. @param xml_bytes: XML bytes with a C{DescribeSnapshotsResponse} root element. @return: A list of L{Snapshot} instances. TODO: ownersSet, restorableBySet, ownerId, volumeSize, description, ownerAlias.
15,854
def my_application(api): device = api.list_connected_devices().first() print(, device.id) api.delete_device_subscriptions(device.id) try: print(, ngrok_url) api.update_webhook(ngrok_url) print(, resource_path) deferred = api.get_resource_value_async(device_id=device.id, resource_path=resource_path) print(, deferred.async_id) result = deferred.wait(15) print(, repr(result)) return result except Exception: print(traceback.format_exc()) finally: api.delete_webhook() print("Deregistered and unsubscribed from all resources. Exiting.") exit(1)
An example application. - Registers a webhook with mbed cloud services - Requests the value of a resource - Prints the value when it arrives
15,855
def convert_from_ik_angles(self, joints): if len(joints) != len(self.motors) + 2: raise ValueError(.format(len(self.motors) + 2)) joints = [rad2deg(j) for j in joints[1:-1]] joints *= self._reversed return [(j * (1 if m.direct else -1)) - m.offset for j, m in zip(joints, self.motors)]
Convert from IKPY internal representation to poppy representation.
15,856
def db_set_indexing(cls, is_indexing, impl, working_dir): indexing_lockfile_path = config.get_lockfile_filename(impl, working_dir) if is_indexing: with open(indexing_lockfile_path, ) as f: pass else: try: os.unlink(indexing_lockfile_path) except: pass
Set lockfile path as to whether or not the system is indexing. NOT THREAD SAFE, USE ONLY FOR CRASH DETECTION.
15,857
def hist(self, dimension=None, num_bins=20, bin_range=None, adjoin=True, individually=True, **kwargs): if dimension is not None and not isinstance(dimension, list): dimension = [dimension] histmaps = [self.clone(shared_data=False) for _ in (dimension or [None])] if individually: map_range = None else: if dimension is None: raise Exception("Please supply the dimension to compute a histogram for.") map_range = self.range(kwargs[]) bin_range = map_range if bin_range is None else bin_range style_prefix = + self.name + if issubclass(self.type, (NdOverlay, Overlay)) and not in kwargs: kwargs[] = 0 for k, v in self.data.items(): hists = v.hist(adjoin=False, dimension=dimension, bin_range=bin_range, num_bins=num_bins, style_prefix=style_prefix, **kwargs) if isinstance(hists, Layout): for i, hist in enumerate(hists): histmaps[i][k] = hist else: histmaps[0][k] = hists if adjoin: layout = self for hist in histmaps: layout = (layout << hist) if issubclass(self.type, (NdOverlay, Overlay)): layout.main_layer = kwargs[] return layout else: if len(histmaps) > 1: return Layout(histmaps) else: return histmaps[0]
Computes and adjoins histogram along specified dimension(s). Defaults to first value dimension if present otherwise falls back to first key dimension. Args: dimension: Dimension(s) to compute histogram on num_bins (int, optional): Number of bins bin_range (tuple optional): Lower and upper bounds of bins adjoin (bool, optional): Whether to adjoin histogram Returns: AdjointLayout of HoloMap and histograms or just the histograms
15,858
def get_extreme(self, target_prop, maximize=True, min_temp=None, max_temp=None, min_doping=None, max_doping=None, isotropy_tolerance=0.05, use_average=True): def is_isotropic(x, isotropy_tolerance): if len(x) != 3: raise ValueError("Invalid input to is_isotropic!") st = sorted(x) return bool(all([st[0], st[1], st[2]]) and \ (abs((st[1] - st[0]) / st[1]) <= isotropy_tolerance) and \ (abs((st[2] - st[0])) / st[2] <= isotropy_tolerance) and \ (abs((st[2] - st[1]) / st[2]) <= isotropy_tolerance)) if target_prop.lower() == "seebeck": d = self.get_seebeck(output="eigs", doping_levels=True) elif target_prop.lower() == "power factor": d = self.get_power_factor(output="eigs", doping_levels=True) elif target_prop.lower() == "conductivity": d = self.get_conductivity(output="eigs", doping_levels=True) elif target_prop.lower() == "kappa": d = self.get_thermal_conductivity(output="eigs", doping_levels=True) elif target_prop.lower() == "zt": d = self.get_zt(output="eigs", doping_levels=True) else: raise ValueError("Target property: {} not recognized!". format(target_prop)) absval = True x_val = None x_temp = None x_doping = None x_isotropic = None output = {} min_temp = min_temp or 0 max_temp = max_temp or float() min_doping = min_doping or 0 max_doping = max_doping or float() for pn in (, ): for t in d[pn]: if min_temp <= float(t) <= max_temp: for didx, evs in enumerate(d[pn][t]): doping_lvl = self.doping[pn][didx] if min_doping <= doping_lvl <= max_doping: isotropic = is_isotropic(evs, isotropy_tolerance) if absval: evs = [abs(x) for x in evs] if use_average: val = float(sum(evs)) / len(evs) else: val = max(evs) if x_val is None or (val > x_val and maximize) \ or (val < x_val and not maximize): x_val = val x_temp = t x_doping = doping_lvl x_isotropic = isotropic output[pn] = {: x_val, : x_temp, : x_doping, : x_isotropic} x_val = None if maximize: max_type = if output[][] >= \ output[][] else else: max_type = if output[][] <= \ output[][] else output[] = output[max_type] output[][] = max_type return output
This method takes in eigenvalues over a range of carriers, temperatures, and doping levels, and tells you what is the "best" value that can be achieved for the given target_property. Note that this method searches the doping dict only, not the full mu dict. Args: target_prop: target property, i.e. "seebeck", "power factor", "conductivity", "kappa", or "zt" maximize: True to maximize, False to minimize (e.g. kappa) min_temp: minimum temperature allowed max_temp: maximum temperature allowed min_doping: minimum doping allowed (e.g., 1E18) max_doping: maximum doping allowed (e.g., 1E20) isotropy_tolerance: tolerance for isotropic (0.05 = 5%) use_average: True for avg of eigenval, False for max eigenval Returns: A dictionary with keys {"p", "n", "best"} with sub-keys: {"value", "temperature", "doping", "isotropic"}
15,859
def addNewLvls(self): RAISEMSG = 99 logging.addLevelName(RAISEMSG, ) def raisemsg(self,msg,lvl=RAISEMSG, *args, **kws): self.log(lvl,msg, *args, **kws) logging.Logger.raisemsg = raisemsg IMPORTANTINFO = 25 logging.addLevelName(IMPORTANTINFO, ) def importantinfo(self,msg,lvl=IMPORTANTINFO, *args, **kws): self.log(lvl,msg, *args, **kws) logging.Logger.importantinfo = importantinfo FILEONLY = 1 logging.addLevelName(FILEONLY, ) def fileonly(self,msg,lvl=FILEONLY, *args, **kws): self.log(lvl,msg, *args, **kws) logging.Logger.fileonly = fileonly
+---------------------+----------------------+ | Standard Levels | New Levels | +---------------+-----+----------------+-----+ | Name |Level| Name |Level| +===============+=====+================+=====+ | | |RAISEMSG | 99 | +---------------+-----+----------------+-----+ |CRITICAL | 50 | | | +---------------+-----+----------------+-----+ |ERROR | 40 | | | +---------------+-----+----------------+-----+ |WARNING | 30 | | | +---------------+-----+----------------+-----+ | | |IMPORTANTINFO | 25 | +---------------+-----+----------------+-----+ |INFO | 20 | | | +---------------+-----+----------------+-----+ |DEBUG | 10 | | | +---------------+-----+----------------+-----+ | | |FILEONLY | 1 | +---------------+-----+----------------+-----+ |NOTSET | 0 | | | +---------------+-----+----------------+-----+
15,860
def _find_exits(self, src_block, target_block): exit_stmt_ids[DEFAULT_STATEMENT] = None all_simple_paths = list(networkx.all_simple_paths(self._cfg.graph, src_block, target_block, cutoff=3)) for simple_path in all_simple_paths: if len(simple_path) <= 1: continue if self._same_function: for i in range(len(simple_path) - 1): jumpkind = self._cfg.graph[simple_path[i]][simple_path[i + 1]][] if jumpkind in (, ): return { } a, b = simple_path[0], simple_path[1] exit_stmt_id = self._cfg.get_exit_stmt_idx(a, b) if exit_stmt_id is None: continue if exit_stmt_ids[exit_stmt_id] is None: exit_stmt_ids[exit_stmt_id] = [ b.addr ] else: exit_stmt_ids[exit_stmt_id].append(b.addr) return exit_stmt_ids
Source block has more than one exit, and through some of those exits, the control flow can eventually go to the target block. This method returns exits that lead to the target block. :param src_block: The block that has multiple exits. :param target_block: The target block to reach. :returns: a dict of statement ID -> a list of target IPs (or None if the exit should not be taken), each corresponds to an exit to take in order to reach the target. For example, it returns the following dict: { 'default': None, # It has a default exit, but shouldn't be taken 15: [ 0x400080 ], # Statement 15 is an exit statement, and should be taken when the target is # 0x400080 28: None # Statement 28 is an exit statement, but shouldn't be taken }
15,861
def make_stream_features(self, stream, features): mechs = self.settings[] if mechs and not stream.authenticated: sub = ElementTree.SubElement(features, MECHANISMS_TAG) for mech in mechs: if mech in sasl.SERVER_MECHANISMS: ElementTree.SubElement(sub, MECHANISM_TAG).text = mech return features
Add SASL features to the <features/> element of the stream. [receving entity only] :returns: update <features/> element.
15,862
def logpdf(x, shape, loc=0.0, scale=1.0, skewness=1.0): m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0)) loc = loc + (skewness - (1.0/skewness))*scale*m1 result = np.zeros(x.shape[0]) result[x-loc<0] = np.log(2.0) - np.log(skewness + 1.0/skewness) + ss.t.logpdf(x=skewness*x[(x-loc) < 0], loc=loc[(x-loc) < 0]*skewness,df=shape, scale=scale[(x-loc) < 0]) result[x-loc>=0] = np.log(2.0) - np.log(skewness + 1.0/skewness) + ss.t.logpdf(x=x[(x-loc) >= 0]/skewness, loc=loc[(x-loc) >= 0]/skewness,df=shape, scale=scale[(x-loc) >= 0]) return result
Log PDF for the Skew-t distribution Parameters ---------- x : np.array random variables shape : float The degrees of freedom for the skew-t distribution loc : np.array The location parameter for the skew-t distribution scale : float The scale of the distribution skewness : float Skewness parameter (if 1, no skewness, if > 1, +ve skew, if < 1, -ve skew)
15,863
def json_schema_to_click_type(schema: dict) -> tuple: choices = None if isinstance(schema["type"], list): if "string" in schema["type"]: schema["type"] = "string" click_type = SCHEMA_BASE_MAP[schema["type"]] description = schema.get("title") if schema.get("enum"): enum = [value for value in schema["enum"] if isinstance(value, str)] choices = click.Choice(enum) return click_type, description, choices
A generic handler of a single property JSON schema to :class:`click.ParamType` converter :param schema: JSON schema property to operate on :return: Tuple of :class:`click.ParamType`, `description`` of option and optionally a :class:`click.Choice` if the allowed values are a closed list (JSON schema ``enum``)
15,864
def repolist(status=, media=None): manager = MANAGER with settings(hide(, )): if media: repos = run_as_root("%(manager)s repolist %(status)s | sed | sed -n " % locals()) else: repos = run_as_root("%(manager)s repolist %(status)s | sed | sed | sed -n " % locals()) return [line.split()[0] for line in repos.splitlines()[1:]]
Get the list of ``yum`` repositories. Returns enabled repositories by default. Extra *status* may be passed to list disabled repositories if necessary. Media and debug repositories are kept disabled, except if you pass *media*. :: import burlap # Install a package that may be included in disabled repositories burlap.rpm.install('vim', burlap.rpm.repolist('disabled'))
15,865
def references(self): if self._references is None: self._references = list() self.__pull_combined_properties() return self._references
list: External links, or references, listed anywhere on the \ MediaWiki page Note: Not settable Note May include external links within page that are not \ technically cited anywhere
15,866
def calculate(self, scene, xaxis, yaxis): sceneRect = scene.sceneRect() h_lines = [] h_alt = [] h_labels = [] v_lines = [] v_alt = [] v_labels = [] xlabels = [] xcount = 1 xsections = 1 xdelta = 0 xdeltamin = 0 ylabels = [] ycount = 1 ysections = 1 ydeltamin = 0 ydelta = 0 axis_lft = 0 axis_rht = 0 axis_bot = 0 axis_top = 0 if xaxis and self.showXAxis(): size = sceneRect.width() xdeltamin = xaxis.minimumLabelWidth() result = self.calculateAxis(xaxis, size, xdeltamin) xlabels, xcount, xsections, newWidth, xdelta = result if newWidth != size: sceneRect.setWidth(newWidth) if yaxis and self.showYAxis(): size = sceneRect.height() ydeltamin = yaxis.minimumLabelHeight() result = self.calculateAxis(yaxis, size, ydeltamin) ylabels, ycount, ysections, newHeight, ydelta = result if newHeight != size: sceneRect.setHeight(newHeight) if xaxis and self.showXAxis(): x = sceneRect.left() + xdeltamin / 2 axis_lft = x axis_rht = x alt = False for i in range(xcount): v_lines.append(QLineF(x, sceneRect.top(), x, sceneRect.bottom())) if alt: alt_rect = QRectF(x - xdelta, sceneRect.top(), xdelta, sceneRect.height()) v_alt.append(alt_rect) v_labels.append((x, xdelta, xlabels[i])) axis_rht = x x += xdelta alt = not alt if yaxis and self.showYAxis(): y = sceneRect.bottom() - ydeltamin / 2 axis_bot = y axis_top = y alt = False for i in range(ycount): h_lines.append(QLineF(sceneRect.left(), y, sceneRect.right(), y)) if alt: alt_rect = QRectF(sceneRect.left(), y, sceneRect.width(), ydelta) h_alt.append(alt_rect) h_labels.append((y, ydelta, ylabels[i])) axis_top = y y -= ydelta alt = not alt self._buildData[] = h_lines self._buildData[] = h_alt self._buildData[] = h_labels self._buildData[] = v_lines self._buildData[] = v_alt self._buildData[] = v_labels self._buildData[] = sceneRect self._buildData[] = QRectF(axis_lft, axis_top, axis_rht - axis_lft, axis_bot - axis_top) scene.setSceneRect(sceneRect) return sceneRect
Calculates the grid data before rendering. :param scene | <XChartScene> xaxis | <XChartAxis> yaxis | <XChartAxis>
15,867
def _generate_main_files_header(notebook_object, notebook_title="Notebook Title", notebook_description="Notebook Description"): header_temp = HEADER_MAIN_FILES.replace("Notebook Title", notebook_title) notebook_object["cells"].append(nb.v4.new_markdown_cell(header_temp)) notebook_object["cells"].append(nb.v4.new_markdown_cell(notebook_description, **{"metadata": {"tags": ["test"]}}))
Internal function that is used for generation of the 'MainFiles' notebooks header. ---------- Parameters ---------- notebook_object : notebook object Object of "notebook" class where the header will be created. notebook_title : None or str Title of the Notebook. notebook_description : str An introductory text to present the Notebook and involve the reader.
15,868
def get_member_ids(): pm = get_tool("portal_membership") member_ids = pm.listMemberIds() return filter(lambda x: x, member_ids)
Return all member ids of the portal.
15,869
def aggregate(name): cube = get_cube(name) result = cube.aggregate(aggregates=request.args.get(), drilldowns=request.args.get(), cuts=request.args.get(), order=request.args.get(), page=request.args.get(), page_size=request.args.get()) result[] = if request.args.get(, ).lower() == : return create_csv_response(result[]) else: return jsonify(result)
Perform an aggregation request.
15,870
def appendleft(self, item): self._deque.appendleft(item) self.notEmpty.set()
Add item to the left side of the GeventDeque. This method does not block. Either the GeventDeque grows to consume available memory, or if this GeventDeque has and is at maxlen, the rightmost item is removed.
15,871
def update_hpx_skymap_allsky(map_in, map_out): if map_out is None: in_hpx = map_in.hpx out_hpx = HPX.create_hpx(in_hpx.nside, in_hpx.nest, in_hpx.coordsys, None, in_hpx.ebins, None, in_hpx.conv, None) data_out = map_in.expanded_counts_map() print(data_out.shape, data_out.sum()) map_out = HpxMap(data_out, out_hpx) else: map_out.data += map_in.expanded_counts_map() return map_out
'Update' a HEALPix skymap This checks map_out exists and creates it from map_in if it does not. If map_out does exist, this adds the data in map_in to map_out
15,872
def Grieves_Thodos(zs, Tcs, Aijs): r if not none_and_length_check([zs, Tcs]): raise Exception() Tcm = 0 for i in range(len(zs)): Tcm += Tcs[i]/(1. + 1./zs[i]*sum(Aijs[i][j]*zs[j] for j in range(len(zs)))) return Tcm
r'''Calculates critical temperature of a mixture according to mixing rules in [1]_. .. math:: T_{cm} = \sum_{i} \frac{T_{ci}}{1 + (1/x_i)\sum_j A_{ij} x_j} For a binary mxiture, this simplifies to: .. math:: T_{cm} = \frac{T_{c1}}{1 + (x_2/x_1)A_{12}} + \frac{T_{c2}} {1 + (x_1/x_2)A_{21}} Parameters ---------- zs : array-like Mole fractions of all components Tcs : array-like Critical temperatures of all components, [K] Aijs : array-like of shape `zs` by `zs` Interaction parameters Returns ------- Tcm : float Critical temperatures of the mixture, [K] Notes ----- All parameters, even if zero, must be given to this function. Giving 0s gives really bad results however. Examples -------- butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K. >>> Grieves_Thodos([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6], [[0, 1.2503, 1.516], [0.799807, 0, 1.23843], [0.659633, 0.807474, 0]]) 450.1839618758971 References ---------- .. [1] Grieves, Robert B., and George Thodos. "The Critical Temperatures of Multicomponent Hydrocarbon Systems." AIChE Journal 8, no. 4 (September 1, 1962): 550-53. doi:10.1002/aic.690080426. .. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati. "Prediction of True Critical Temperature of Multi-Component Mixtures: Extending Fast Estimation Methods." Fluid Phase Equilibria 392 (April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001.
15,873
def create(self, title, description=None, private=False): url = "/collections" data = { "title": title, "description": description, "private": private } result = self._post(url, data=data) return CollectionModel.parse(result)
Create a new collection. This requires the 'write_collections' scope. :param title [string]: The title of the collection. (Required.) :param description [string]: The collection’s description. (Optional.) :param private [boolean]: Whether to make this collection private. (Optional; default false). :return: [Collection]: The Unsplash Collection.
15,874
def _srvc_closing_routine(self, closing): if (not self._keep_open and closing and self.is_open): f_fd = self._hdf5file.fileno() self._hdf5file.flush() try: os.fsync(f_fd) try: self._hdf5store.flush(fsync=True) except TypeError: f_fd = self._hdf5store._handle.fileno() self._hdf5store.flush() os.fsync(f_fd) except OSError as exc: errmsg = ( % repr(exc)) self._logger.debug(errmsg) self._hdf5store.close() if self._hdf5file.isopen: self._logger.error() self._hdf5file = None self._hdf5store = None self._trajectory_group = None self._trajectory_name = None self._trajectory_index = None self._overview_group_ = None self._logger.debug() return True else: return False
Routine to close an hdf5 file The file is closed only when `closing=True`. `closing=True` means that the file was opened in the current highest recursion level. This prevents re-opening and closing of the file if `store` or `load` are called recursively.
15,875
def ggsave(name, plot, data=None, *args, **kwargs): kwdefaults = { : 10, : 8, : 1 } keys_to_rm = ["prefix", "quiet", "postfix", ] varname = prefix = kwargs.get(, ) postfix = kwargs.get(, ) libs = kwargs.get(, []) libs = .join(["library(%s)" % lib for lib in libs]) quiet = kwargs.get("quiet", False) kwargs = {k: v for k, v in kwargs.iteritems() if v is not None and k not in keys_to_rm} kwdefaults.update(kwargs) kwargs = kwdefaults if data is None: data = plot.data if data is None: } if name: stmt = GGStatement("ggsave", esc(name), varname, *args, **kwargs) prog = "%s\n%s" % (prog, stmt.r) if not quiet: print prog print if name: execute_r(prog, quiet) return prog
Save a GGStatements object to destination name @param name output file name. if None, don't run R command @param kwargs keyword args to pass to ggsave. The following are special keywords for the python save method data: a python data object (list, dict, DataFrame) used to populate the `data` variable in R libs: list of library names to load in addition to ggplot2 prefix: string containing R code to run before any ggplot commands (including data loading) postfix: string containing R code to run after data is loaded (e.g., if you want to rename variable names) quiet: if Truthy, don't print out R program string
15,876
def get_max_size(self, commands): as_lengths = ( discord.utils._string_width(c.name) for c in commands ) return max(as_lengths, default=0)
Returns the largest name length of the specified command list. Parameters ------------ commands: Sequence[:class:`Command`] A sequence of commands to check for the largest size. Returns -------- :class:`int` The maximum width of the commands.
15,877
def inferTM(self, bottomUp, externalInput): self.reset() self.tm.compute(bottomUp, basalInput=externalInput, learn=False) return self.tm.getPredictiveCells()
Run inference and return the set of predicted cells
15,878
def reset_query_marks(self): for ml in (self.__reagents, self.__reactants, self.__products): for m in ml: if hasattr(m, ): m.reset_query_marks() self.flush_cache()
set or reset hyb and neighbors marks to atoms.
15,879
def formatmonth(self, theyear, themonth, withyear=True, net=None, qs=None, template=): context = self.get_context() context[] = date(self.yr, self.mo, 1) context[] = [] for week in self.monthdays2calendar(theyear, themonth): week_row = [] for day, weekday in week: week_row.append(self.formatday(day, weekday)) context[].append(week_row) nxt, prev = get_next_and_prev(net) extra_qs = ( + .join(qs)) if qs else context[] = mark_safe( % (prev, extra_qs)) context[] = mark_safe( % (nxt, extra_qs)) context[] = withyear return render_to_string(template, context)
Return a formatted month as a table.
15,880
def encode_float(encoder, pcm, frame_size, max_data_bytes): pcm = ctypes.cast(pcm, c_float_pointer) data = (ctypes.c_char * max_data_bytes)() result = _encode_float(encoder, pcm, frame_size, data, max_data_bytes) if result < 0: raise OpusError(result) return array.array(, data[:result]).tostring()
Encodes an Opus frame from floating point input
15,881
def getObj(self): if self.obj is None: if not self.pk: return None self.obj = self.foreignModel.objects.get(self.pk) return self.obj
getObj - Fetch (if not fetched) and return the obj associated with this data.
15,882
def calculate_inner_product_output_shapes(operator): ] 2. [N, C, 1, 1] ---> [N, C check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1) check_input_and_output_types(operator, good_input_types=[FloatTensorType]) input = operator.inputs[0] output = operator.outputs[0] input_shape = input.type.shape if len(input_shape) == 4 and (input_shape[2] != 1 or input_shape[3] != 1): raise RuntimeError() params = operator.raw_operator.innerProduct if input_shape[1] != params.inputChannels: raise RuntimeError( % (params.inputChannels, input_shape[1])) if len(input_shape) == 4: output.type.shape = [input_shape[0], params.outputChannels, 1, 1] elif len(input_shape) == 2: output.type.shape = [input_shape[0], params.outputChannels] else: raise RuntimeError()
Allowed input/output patterns are 1. [N, C] ---> [N, C'] 2. [N, C, 1, 1] ---> [N, C', 1, 1]
15,883
def modprobe(module, persist=True): cmd = [, module] log( % module, level=INFO) subprocess.check_call(cmd) if persist: persistent_modprobe(module)
Load a kernel module and configure for auto-load on reboot.
15,884
def get_settings(infile): settings = yaml.load(_as_file(infile)) if not hasattr(settings, ): raise ValueError("Settings not found in {}".format(infile)) processed_settings = {} for k, v in settings.items(): if k.startswith("PMG_DB_"): processed_settings[k[7:].lower()] = v else: processed_settings[k] = v auth_aliases(processed_settings) return processed_settings
Read settings from input file. :param infile: Input file for JSON settings. :type infile: file or str path :return: Settings parsed from file :rtype: dict
15,885
def _replace(self, data, replacements): for find, repl in replacements: data = data.replace(find, repl) return data
Given a list of 2-tuples (find, repl) this function performs all replacements on the input and returns the result.
15,886
def combine_sources(sources, chunksize=None): r from pyemma.coordinates.data.sources_merger import SourcesMerger return SourcesMerger(sources, chunk=chunksize)
r""" Combines multiple data sources to stream from. The given source objects (readers and transformers, eg. TICA) are concatenated in dimension axis during iteration. This can be used to couple arbitrary features in order to pass them to an Estimator expecting only one source, which is usually the case. All the parameters for iterator creation are passed to the actual sources, to ensure consistent behaviour. Parameters ---------- sources : list, tuple list of DataSources (Readers, StreamingTransformers etc.) to combine for streaming access. chunksize: int, default=None Number of data frames to process at once. Choose a higher value here, to optimize thread usage and gain processing speed. If None is passed, use the default value of the underlying reader/data source. Choose zero to disable chunking at all. Notes ----- This is currently only implemented for matching lengths trajectories. Returns ------- merger : :class:`SourcesMerger <pyemma.coordinates.data.sources_merger.SourcesMerger>`
15,887
def battery2_send(self, voltage, current_battery, force_mavlink1=False): return self.send(self.battery2_encode(voltage, current_battery), force_mavlink1=force_mavlink1)
2nd Battery status voltage : voltage in millivolts (uint16_t) current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
15,888
def findLibrary(name): assert is_unix, "Current implementation for Unix only (Linux, Solaris, AIX)" lib = None lp = compat.getenv(, ) for path in lp.split(os.pathsep): libs = glob(os.path.join(path, name + )) if libs: lib = libs[0] break if lib is None: expr = r % re.escape(name) m = re.search(expr, compat.exec_command(, )) if m: lib = m.group(0) if lib is None: paths = [, ] if is_aix: paths.append() for path in paths: libs = glob(os.path.join(path, name + )) if libs: lib = libs[0] break if lib is None: return None dir, file = os.path.split(lib) return os.path.join(dir, getSoname(lib))
Look for a library in the system. Emulate the algorithm used by dlopen. `name`must include the prefix, e.g. ``libpython2.4.so``
15,889
def upload(self, photo_file, **kwds): with open(photo_file, ) as in_file: result = self._client.post("/photo/upload.json", files={: in_file}, **kwds)["result"] return Photo(self._client, result)
Endpoint: /photo/upload.json Uploads the specified photo filename.
15,890
def _filter_dates(dates, time_difference): LOGGER.debug("dates=%s", dates) if len(dates) <= 1: return dates sorted_dates = sorted(dates) separate_dates = [sorted_dates[0]] for curr_date in sorted_dates[1:]: if curr_date - separate_dates[-1] > time_difference: separate_dates.append(curr_date) return separate_dates
Filters out dates within time_difference, preserving only the oldest date. :param dates: a list of datetime objects :param time_difference: a ``datetime.timedelta`` representing the time difference threshold :return: an ordered list of datetimes `d1<=d2<=...<=dn` such that `d[i+1]-di > time_difference` :rtype: list(datetime.datetime)
15,891
def convert_embedding(net, node, model, builder): input_name, output_name = _get_input_output_name(net, node) name = node[] inputs = node[] outputs = node[] arg_params, aux_params = model.get_params() W = arg_params[_get_node_name(net, inputs[1][0])].asnumpy() if not ONE_HOT_ENCODE_HACK: nC, nB = W.shape W = W.T builder.add_embedding(name = name, W = W, b = None, input_dim = nC, output_channels = nB, has_bias = False, input_name = input_name, output_name = output_name) else: W = W.T nC, nB = W.shape builder.add_inner_product(name = name, W = W, b = None, input_channels = nB, output_channels = nC, has_bias = False, input_name = input_name, output_name = output_name)
Convert an embedding layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. model: model An model for MXNet builder: NeuralNetworkBuilder A neural network builder object.
15,892
def enable_api_key(apiKey, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) response = _api_key_patch_replace(conn, apiKey, , ) return {: _convert_datetime_str(response)} except ClientError as e: return {: __utils__[](e)}
enable the given apiKey. CLI Example: .. code-block:: bash salt myminion boto_apigateway.enable_api_key api_key
15,893
def _update_list_store(config_m, list_store, ignore_keys=None): ignore_keys = [] if ignore_keys is None else ignore_keys list_store.clear() for config_key in sorted(config_m.config.keys): if config_key in ignore_keys: continue config_value = config_m.get_current_config_value(config_key) if isinstance(config_value, bool): list_store.append((str(config_key), str(config_value), False, True, True, False, config_value)) else: list_store.append((str(config_key), str(config_value), True, False, False, True, config_value))
Generic method to create list store for a given config model :param ConfigModel config_m: Config model to read into list store :param Gtk.ListStore list_store: List store to be filled :param list ignore_keys: List of keys that should be ignored
15,894
def get_context_data(self, **kwargs): context = {: self.object } if in kwargs: context[] = self.get_confirmation_message(kwargs[]) context.update(kwargs) return context
Hook for adding arguments to the context.
15,895
def get_linenumbers(functions, module, searchstr=): lines = inspect.getsourcelines(module)[0] line_numbers = {} for function in functions: try: line_numbers[function] = lines.index( searchstr.format(function)) + 1 except ValueError: print(r.format(searchstr.format(function))) line_numbers[function] = 0 return line_numbers
Returns a dictionary which maps function names to line numbers. Args: functions: a list of function names module: the module to look the functions up searchstr: the string to search for Returns: A dictionary with functions as keys and their line numbers as values.
15,896
def start(self): with self.synclock: if self.syncthread is not None: logging.warn( "Logger: Start called on a syncer that is already running") return self.sync() self.__setsync()
Start the logger background synchronization service. This allows you to not need to worry about syncing with ConnectorDB - you just insert into the Logger, and the Logger will by synced every syncperiod.
15,897
def update_sma(self, step): if self.linear_growth: sma = self.sma + step else: sma = self.sma * (1. + step) return sma
Calculate an updated value for the semimajor axis, given the current value and the step value. The step value must be managed by the caller to support both modes: grow outwards and shrink inwards. Parameters ---------- step : float The step value. Returns ------- sma : float The new semimajor axis length.
15,898
def run(self, quil_program, classical_addresses: List[int] = None, trials=1): if classical_addresses is None: caddresses = get_classical_addresses_from_program(quil_program) else: caddresses = {: classical_addresses} buffers = self._connection._qvm_run(quil_program, caddresses, trials, self.measurement_noise, self.gate_noise, self.random_seed) if len(buffers) == 0: return [] if in buffers: return buffers[].tolist() raise ValueError("You are using QVMConnection.run with multiple readout registers not " "named `ro`. Please use the new `QuantumComputer` abstraction.")
Run a Quil program multiple times, accumulating the values deposited in a list of classical addresses. :param Program quil_program: A Quil program. :param classical_addresses: The classical memory to retrieve. Specified as a list of integers that index into a readout register named ``ro``. This function--and particularly this argument--are included for backwards compatibility and will be removed in the future. :param int trials: Number of shots to collect. :return: A list of dictionaries of bits. Each dictionary corresponds to the values in `classical_addresses`. :rtype: list
15,899
def detect(agent, fill_none=False): result = dict(platform=dict(name=None, version=None)) _suggested_detectors = [] for info_type in detectorshub: detectors = _suggested_detectors or detectorshub[info_type] for detector in detectors: try: detector.detect(agent, result) except Exception as _err: pass if fill_none: for outer_key in (, ): outer_value = result.setdefault(outer_key, dict()) for inner_key in (, ): outer_value.setdefault(inner_key, None) return result
fill_none: if name/version is not detected respective key is still added to the result with value None