Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
23,700
def putcellslice(self, rownr, value, blc, trc, inc=[]): return self._table.putcellslice(self._column, rownr, value, blc, trc, inc)
Put into a slice of a table cell holding an array. (see :func:`table.putcellslice`)
23,701
def is_external_url(self, url, site_url): url_splitted = urlsplit(url) if not url_splitted.netloc: return False return url_splitted.netloc != urlsplit(site_url).netloc
Check if the URL is an external URL.
23,702
def insert(self, iterable, index=0, data=None, weight=1.0): if index == len(iterable): self.is_terminal = True self.key = iterable self.weight = weight if data: self.data.add(data) else: if iterable[index] not in self.children: self.children[iterable[index]] = TrieNode() self.children[iterable[index]].insert(iterable, index + 1, data)
Insert new node into tree Args: iterable(hashable): key used to find in the future. data(object): data associated with the key index(int): an index used for insertion. weight(float): the wait given for the item added.
23,703
def wait( self, timeout: Union[int, float] = None, safe: bool = False ) -> List[Union[Any, Exception]]: if safe: _wait = self._wait_or_catch_exc else: _wait = Process.wait if timeout is None: return [_wait(process) for process in self] else: final = time.time() + timeout return [_wait(process, final - time.time()) for process in self]
Call :py:meth:`~Process.wait()` on all the Processes in this list. :param timeout: Same as :py:meth:`~Process.wait()`. This parameter controls the timeout for all the Processes combined, not a single :py:meth:`~Process.wait()` call. :param safe: Suppress any errors that occur while waiting for a Process. The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred. :return: A ``list`` containing the values returned by child Processes of this Context.
23,704
def generate_sources_zip(milestone_id=None, output=None): if not is_input_valid(milestone_id, output): logging.error("invalid input") return 1 create_work_dir(output) download_sources_artifacts(milestone_id, output) create_zip(output)
Generate a sources archive for given milestone id.
23,705
def get_departures(self, stop_id, route, destination, api_key): self.stop_id = stop_id self.route = route self.destination = destination self.api_key = api_key url = \ \ \ \ + self.stop_id \ + auth = + self.api_key header = {: , : auth} try: response = requests.get(url, headers=header, timeout=10) except: logger.warning("Network or Timeout error") return self.info if response.status_code != 200: logger.warning("Error with the request sent; check api key") return self.info result = response.json() try: result[] except KeyError: logger.warning("No stop events for this query") return self.info maxresults = 1 monitor = [] if self.destination != : for i in range(len(result[])): destination = result[][i][][][] if destination == self.destination: event = self.parseEvent(result, i) if event != None: monitor.append(event) if len(monitor) >= maxresults: break elif self.route != : for i in range(len(result[])): number = result[][i][][] if number == self.route: event = self.parseEvent(result, i) if event != None: monitor.append(event) if len(monitor) >= maxresults: break else: for i in range(0, maxresults): event = self.parseEvent(result, i) if event != None: monitor.append(event) if monitor: self.info = { ATTR_STOP_ID: self.stop_id, ATTR_ROUTE: monitor[0][0], ATTR_DUE_IN: monitor[0][1], ATTR_DELAY: monitor[0][2], ATTR_REALTIME: monitor[0][5], ATTR_DESTINATION: monitor[0][6], ATTR_MODE: monitor[0][7] } return self.info
Get the latest data from Transport NSW.
23,706
def freeze(self): self.app.disable() self.clear.disable() self.nod.disable() self.led.disable() self.dummy.disable() self.readSpeed.disable() self.expose.disable() self.number.disable() self.wframe.disable(everything=True) self.nmult.disable() self.frozen = True
Freeze all settings so they cannot be altered
23,707
def appliance_device_snmp_v1_trap_destinations(self): if not self.__appliance_device_snmp_v1_trap_destinations: self.__appliance_device_snmp_v1_trap_destinations = ApplianceDeviceSNMPv1TrapDestinations(self.__connection) return self.__appliance_device_snmp_v1_trap_destinations
Gets the ApplianceDeviceSNMPv1TrapDestinations API client. Returns: ApplianceDeviceSNMPv1TrapDestinations:
23,708
def in_config(self, key): key = self._real_key(key) exists = self._config.get(key) return exists
Check to see if the given key (or an alias) is in the config file.
23,709
def _newConsole(cls, console): self = cls.__new__(cls) _BaseConsole.__init__(self) self.console_c = console self.console = self self.width = _lib.TCOD_console_get_width(console) self.height = _lib.TCOD_console_get_height(console) return self
Make a Console instance, from a console ctype
23,710
def on_lstCanvasExpLayers_itemSelectionChanged(self): self.parent.exposure_layer = self.selected_canvas_explayer() lblText = self.parent.get_layer_description_from_canvas( self.parent.exposure_layer, ) self.lblDescribeCanvasExpLayer.setText(lblText) self.parent.pbnNext.setEnabled(True)
Update layer description label .. note:: This is an automatic Qt slot executed when the category selection changes.
23,711
def golden_images(self): if not self.__golden_images: self.__golden_images = GoldenImages(self.__connection) return self.__golden_images
Gets the Golden Images API client. Returns: GoldenImages:
23,712
def delete_servers(self, server_id): endpoint = "https://api.newrelic.com" uri = "{endpoint}/api/v1/accounts/{account_id}/servers/{server_id}.xml".format( endpoint=endpoint, account_id=self.account_id, server_id=server_id) response = self._make_delete_request(uri) failed_deletions = [] for server in response.findall(): if not in server.findall()[0].text: failed_deletions.append({: server.get()}) return failed_deletions
Requires: account ID, server ID Input should be server id Returns: list of failed deletions (if any) Endpoint: api.newrelic.com Errors: 403 Invalid API Key Method: Delete
23,713
def to_iter(obj): if isinstance(obj, type(None)): return None elif isinstance(obj, six.string_types): return [obj] else: if isinstance(obj, collections.Iterable): return obj else: return [obj]
Convert an object to a list if it is not already an iterable. Nones are returned unaltered. This is an awful function that proliferates an explosion of types, please do not use anymore.
23,714
def get_global_cache_dir(appname=, ensure=False): if appname is None or appname == : appname = get_default_appname() global_cache_dir = util_cplat.get_app_resource_dir(appname, meta_util_constants.global_cache_dname) if ensure: util_path.ensuredir(global_cache_dir) return global_cache_dir
Returns (usually) writable directory for an application cache
23,715
def cubehelix_pal(start=0, rot=.4, gamma=1.0, hue=0.8, light=.85, dark=.15, reverse=False): cdict = mpl._cm.cubehelix(gamma, start, rot, hue) cubehelix_cmap = mpl.colors.LinearSegmentedColormap(, cdict) def cubehelix_palette(n): values = np.linspace(light, dark, n) return [mcolors.rgb2hex(cubehelix_cmap(x)) for x in values] return cubehelix_palette
Utility for creating continuous palette from the cubehelix system. This produces a colormap with linearly-decreasing (or increasing) brightness. That means that information will be preserved if printed to black and white or viewed by someone who is colorblind. Parameters ---------- start : float (0 <= start <= 3) The hue at the start of the helix. rot : float Rotations around the hue wheel over the range of the palette. gamma : float (0 <= gamma) Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1) colors. hue : float (0 <= hue <= 1) Saturation of the colors. dark : float (0 <= dark <= 1) Intensity of the darkest color in the palette. light : float (0 <= light <= 1) Intensity of the lightest color in the palette. reverse : bool If True, the palette will go from dark to light. Returns ------- out : function Continuous color palette that takes a single :class:`int` parameter ``n`` and returns ``n`` equally spaced colors. References ---------- Green, D. A. (2011). "A colour scheme for the display of astronomical intensity images". Bulletin of the Astromical Society of India, Vol. 39, p. 289-295. Examples -------- >>> palette = cubehelix_pal() >>> palette(5) ['#edd1cb', '#d499a7', '#aa688f', '#6e4071', '#2d1e3e']
23,716
def evaluate(self, verbose=True, passes=None): if self.is_pivot: index, pivot, columns = LazyOpResult( self.expr, self.weld_type, 0 ).evaluate(verbose=verbose, passes=passes) df_dict = {} for i, column_name in enumerate(columns): df_dict[column_name] = pivot[i] return DataFrameWeld(pd.DataFrame(df_dict, index=index)) else: df = pd.DataFrame(columns=[]) weldvec_type_list = [] for type in self.column_types: weldvec_type_list.append(WeldVec(type)) columns = LazyOpResult( grizzly_impl.unzip_columns( self.expr, self.column_types ), WeldStruct(weldvec_type_list), 0 ).evaluate(verbose=verbose, passes=passes) for i, column_name in enumerate(self.column_names): df[column_name] = columns[i] return DataFrameWeld(df)
Summary Returns: TYPE: Description
23,717
def list_folder_content(self, folder, name=None, entity_type=None, content_type=None, page_size=DEFAULT_PAGE_SIZE, page=None, ordering=None): filefolder-namecreated_onmodified_onordering=name,created_oncountnextpreviousresultscontent_typeplain/textcreated_by303447created_on2017-03-13T10:17:01.688472Zdescriptionentity_typefilemodified_by303447modified_on2017-03-13T10:17:01.688632Znamefile_1parenteac11058-4ae0-4ea9-ada8-d3ea23887509uuid0e17eaac-cb00-4336-b9d7-657026844281 if not is_valid_uuid(folder): raise StorageArgumentException( .format(folder)) params = self._prep_params(locals()) del params[] return self._authenticated_request \ .to_endpoint(.format(folder)) \ .with_params(params) \ .return_body() \ .get()
List files and folders (not recursively) contained in the folder. This function does not retrieve all results, pages have to be manually retrieved by the caller. Args: folder (str): The UUID of the requested folder. name (str): Optional filter on entity name. entity_type (str): Optional filter on entity type. Admitted values: ['file', 'folder']. content_type (str): Optional filter on entity content type (only files are returned). page_size (int): Number of elements per page. page (int): Number of the page. ordering (str): Indicate on which fields to sort the result. Prepend '-' to invert order. Multiple values can be provided. Ordering is supported on: ['name', 'created_on', 'modified_on']. Example: 'ordering=name,created_on' Returns: A dictionary of the results:: { u'count': 1, u'next': None, u'previous': None, u'results': [{u'content_type': u'plain/text', u'created_by': u'303447', u'created_on': u'2017-03-13T10:17:01.688472Z', u'description': u'', u'entity_type': u'file', u'modified_by': u'303447', u'modified_on': u'2017-03-13T10:17:01.688632Z', u'name': u'file_1', u'parent': u'eac11058-4ae0-4ea9-ada8-d3ea23887509', u'uuid': u'0e17eaac-cb00-4336-b9d7-657026844281'}] } Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
23,718
def get_data_and_shared_column_widths(self, data_kwargs, width_kwargs): list_of_list, column_widths = self.get_data_and_column_widths( data_kwargs, width_kwargs) for table, shared_limit in self.shared_tables: _, widths = table.get_data_and_column_widths( data_kwargs, width_kwargs) for i, width in enumerate(widths[:len(column_widths)]): delta = width - column_widths[i] if delta > 0 and (not shared_limit or delta <= shared_limit): column_widths[i] = width return list_of_list, column_widths
:param data_kwargs: kwargs used for converting data to strings :param width_kwargs: kwargs used for determining column widths :return: tuple(list of list of strings, list of int)
23,719
def check(self, cfg, state, peek_blocks): node = self._get_cfg_node(cfg, state) if node is None: return False
Check if the specified address will be executed :param cfg: :param state: :param int peek_blocks: :return: :rtype: bool
23,720
def close(self): for pool in self.__pool.values(): while not pool.empty(): conn = pool.get_nowait() try: self._close(conn) except Exception: pass self.__poolSize.clear()
Closes the connection to the database for this connection. :return <bool> closed
23,721
def trisolve(dl, d, du, b, inplace=False): if (dl.shape[0] != du.shape[0] or (d.shape[0] != dl.shape[0] + 1) or d.shape[0] != b.shape[0]): raise ValueError() bshape_in = b.shape rtype = np.result_type(dl, d, du, b) if not inplace: dl = np.array(dl, dtype=rtype, copy=True, order=) d = np.array(d, dtype=rtype, copy=True, order=) du = np.array(du, dtype=rtype, copy=True, order=) b = np.array(b, dtype=rtype, copy=True, order=) dl, d, du, b = (np.array(v, dtype=rtype, copy=False, order=) for v in (dl, d, du, b)) _lapack_trisolve(dl, d, du, b, rtype) return b.reshape(bshape_in)
The tridiagonal matrix (Thomas) algorithm for solving tridiagonal systems of equations: a_{i}x_{i-1} + b_{i}x_{i} + c_{i}x_{i+1} = y_{i} in matrix form: Mx = b TDMA is O(n), whereas standard Gaussian elimination is O(n^3). Arguments: ----------- dl: (n - 1,) vector the lower diagonal of M d: (n,) vector the main diagonal of M du: (n - 1,) vector the upper diagonal of M b: (n,) vector the result of Mx inplace: if True, and if d and b are both float64 vectors, they will be modified in place (may be faster) Returns: ----------- x: (n,) vector the solution to Mx = b References: ----------- http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm http://www.netlib.org/lapack/explore-html/d1/db3/dgtsv_8f.html
23,722
def estimate_bitstring_probs(results): nshots, nq = np.shape(results) outcomes = np.array([int("".join(map(str, r)), 2) for r in results]) probs = np.histogram(outcomes, bins=np.arange(-.5, 2 ** nq, 1))[0] / float(nshots) return _bitstring_probs_by_qubit(probs)
Given an array of single shot results estimate the probability distribution over all bitstrings. :param np.array results: A 2d array where the outer axis iterates over shots and the inner axis over bits. :return: An array with as many axes as there are qubit and normalized such that it sums to one. ``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``. :rtype: np.array
23,723
def apply_mask(img, mask): from .mask import apply_mask vol, _ = apply_mask(img, mask) return vector_to_volume(vol, read_img(mask).get_data().astype(bool))
Return the image with the given `mask` applied.
23,724
def _set_rspan_access(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=rspan_access.rspan_access, is_container=, presence=False, yang_name="rspan-access", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__rspan_access = t if hasattr(self, ): self._set()
Setter method for rspan_access, mapped from YANG variable /interface/fortygigabitethernet/switchport/access/rspan_access (container) If this variable is read-only (config: false) in the source YANG file, then _set_rspan_access is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rspan_access() directly. YANG Description: The access layer characteristics of this interface.
23,725
def add_directory(self, *args, **kwargs): exc = kwargs.get(, None) for path in args: self.files.append(DirectoryPath(path, self, exclusions=exc))
Add directory or directories list to bundle :param exclusions: List of excluded paths :type path: str|unicode :type exclusions: list
23,726
def add_region_location(self, region, locations=None, use_live=True): return self.add_country_locations(Country.get_countries_in_region(region, exception=HDXError, use_live=use_live), locations=locations)
Add all countries in a region. If a 3 digit UNStats M49 region code is not provided, value is parsed as a region name. If any country is already added, it is ignored. Args: region (str): M49 region, intermediate region or subregion to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries in region added or False if any already present.
23,727
def setup_actions(self): self.actionOpen.triggered.connect(self.on_open) self.actionNew.triggered.connect(self.on_new) self.actionSave.triggered.connect(self.on_save) self.actionSave_as.triggered.connect(self.on_save_as) self.actionQuit.triggered.connect(QtWidgets.QApplication.instance().quit) self.tabWidget.current_changed.connect( self.on_current_tab_changed) self.actionAbout.triggered.connect(self.on_about)
Connects slots to signals
23,728
def normalizeToTag(val): try: val = val.upper() except AttributeError: raise KeyError("{} is not a tag or name string".format(val)) if val not in tagsAndNameSetUpper: raise KeyError("{} is not a tag or name string".format(val)) else: try: return fullToTagDictUpper[val] except KeyError: return val
Converts tags or full names to 2 character tags, case insensitive # Parameters _val_: `str` > A two character string giving the tag or its full name # Returns `str` > The short name of _val_
23,729
def autocorrplot(trace, vars=None, fontmap = None, max_lag=100): try: traces = trace.traces except AttributeError: traces = [trace] if fontmap is None: fontmap = {1:10, 2:8, 3:6, 4:5, 5:4} if vars is None: vars = traces[0].varnames samples = [{v:trace[v] for v in vars} for trace in traces] chains = len(traces) n = len(samples[0]) f, ax = subplots(n, chains, squeeze=False) max_lag = min(len(samples[0][vars[0]])-1, max_lag) for i, v in enumerate(vars): for j in xrange(chains): d = np.squeeze(samples[j][v]) ax[i,j].acorr(d, detrend=mlab.detrend_mean, maxlags=max_lag) if not j: ax[i, j].set_ylabel("correlation") ax[i, j].set_xlabel("lag") if chains > 1: ax[i, j].set_title("chain {0}".format(j+1)) tlabels = gca().get_xticklabels() setp(tlabels, , fontmap[1]) tlabels = gca().get_yticklabels() setp(tlabels, , fontmap[1])
Bar plot of the autocorrelation function for a trace
23,730
def fromurl(url): ps = PatchSet( urllib_request.urlopen(url) ) if ps.errors == 0: return ps return False
Parse patch from an URL, return False if an error occured. Note that this also can throw urlopen() exceptions.
23,731
def binarize(netin, threshold_type, threshold_level, sign=, axis=): if threshold_type == : netout = binarize_percent(netin, threshold_level, sign, axis) elif threshold_type == : netout = binarize_magnitude(netin, threshold_level, sign) elif threshold_type == : netout = binarize_rdp(netin, threshold_level, sign, axis) else: raise ValueError() return netout
Binarizes a network, returning the network. General wrapper function for different binarization functions. Parameters ---------- netin : array or dict Network (graphlet or contact representation), threshold_type : str What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'. threshold_level : str Paramter dependent on threshold type. If 'rdp', it is the delta (i.e. error allowed in compression). If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal). If 'magnitude', it is the amplitude of signal to keep. sign : str, default='pos' States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa. axis : str Threshold over specfied axis. Valid for percent and rdp. Can be time or graphlet. Returns ------- netout : array or dict (depending on input) Binarized network
23,732
def count_rows(self, table_name): self.table_must_exist(table_name) query = "SELECT COUNT (*) FROM `%s`" % table_name.lower() self.own_cursor.execute(query) return int(self.own_cursor.fetchone()[0])
Return the number of entries in a table by counting them.
23,733
def _create_window_info(self, window, wm_title: str, wm_class: str): if "FocusProxy" in wm_class: parent = window.query_tree().parent return self._get_window_info(parent, False) else: return WindowInfo(wm_title=wm_title, wm_class=wm_class)
Creates a WindowInfo object from the window title and WM_CLASS. Also checks for the Java XFocusProxyWindow workaround and applies it if needed: Workaround for Java applications: Java AWT uses a XFocusProxyWindow class, so to get usable information, the parent window needs to be queried. Credits: https://github.com/mooz/xkeysnail/pull/32 https://github.com/JetBrains/jdk8u_jdk/blob/master/src/solaris/classes/sun/awt/X11/XFocusProxyWindow.java#L35
23,734
def retrieve_page(self, method, path, post_params={}, headers={}, status=200, username=None, password=None, *args, **kwargs): headers = headers.copy() basicauth = self._prepare_basicauth(username, password) if basicauth: headers.update([basicauth]) if method in ["PUT", "POST"]: datagen, form_hdrs = poster.encode.multipart_encode(post_params) body = "".join(datagen) headers.update(form_hdrs) uri = self._prepare_uri(path) else: body = "" uri = self._prepare_uri(path, post_params) response = self._make_request(uri, method, body, headers) if status: real_status = int(response.status_int) assert real_status == int(status), \ "expected %s, received %s." % (status, real_status) return response
Makes the actual request. This will also go through and generate the needed steps to make the request, i.e. basic auth. ``method``: Any supported HTTP methods defined in :rfc:`2616`. ``path``: Absolute or relative path. See :meth:`_prepare_uri` for more detail. ``post_params``: Dictionary of key/value pairs to be added as `POST` parameters. ``headers``: Dictionary of key/value pairs to be added to the HTTP headers. ``status``: Will error out if the HTTP status code does not match this value. Set this to `None` to disable checking. ``username``, ``password``: Username and password for basic auth; see :meth:`_prepare_basicauth` for more detail. An important note is that when ``post_params`` is specified, its behavior depends on the ``method``. That is, for `PUT` and `POST` requests, the dictionary is multipart encoded and put into the body of the request. For everything else, it is added as a query string to the URL.
23,735
def _run(self): stop = self._stop_evt connected = self._connected_evt tws = self._tws fd = tws.fd() pollfd = [fd] while not stop.is_set(): while (not connected.is_set() or not tws.isConnected()) and not stop.is_set(): connected.clear() backoff = 0 retries = 0 while not connected.is_set() and not stop.is_set(): if tws.reconnect_auto and not tws.reconnect(): if backoff < self.MAX_BACKOFF: retries += 1 backoff = min(2**(retries + 1), self.MAX_BACKOFF) connected.wait(backoff / 1000.) else: connected.wait(1) fd = tws.fd() pollfd = [fd] if fd > 0: try: evtin, _evtout, evterr = select.select(pollfd, [], pollfd, 1) except select.error: connected.clear() continue else: if fd in evtin: try: if not tws.checkMessages(): tws.eDisconnect(stop_polling=False) continue except (SystemExit, SystemError, KeyboardInterrupt): break except: try: self._wrapper.pyError(*sys.exc_info()) except: print_exc() elif fd in evterr: connected.clear() continue
Continually poll TWS
23,736
def slice_shift(self, periods=1, axis=0): if periods == 0: return self if periods > 0: vslicer = slice(None, -periods) islicer = slice(periods, None) else: vslicer = slice(-periods, None) islicer = slice(None, periods) new_obj = self._slice(vslicer, axis=axis) shifted_axis = self._get_axis(axis)[islicer] new_obj.set_axis(shifted_axis, axis=axis, inplace=True) return new_obj.__finalize__(self)
Equivalent to `shift` without copying data. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. Parameters ---------- periods : int Number of periods to move, can be positive or negative Returns ------- shifted : same type as caller Notes ----- While the `slice_shift` is faster than `shift`, you may pay for it later during alignment.
23,737
def sync(self, videoQuality, client=None, clientId=None, limit=None, unwatched=False, title=None): from plexapi.sync import SyncItem, Policy, MediaSettings myplex = self._server.myPlexAccount() sync_item = SyncItem(self._server, None) sync_item.title = title if title else self._defaultSyncTitle() sync_item.rootTitle = self.title sync_item.contentType = self.listType sync_item.metadataType = self.METADATA_TYPE sync_item.machineIdentifier = self._server.machineIdentifier section = self._server.library.sectionByID(self.librarySectionID) sync_item.location = % (section.uuid, quote_plus(self.key)) sync_item.policy = Policy.create(limit, unwatched) sync_item.mediaSettings = MediaSettings.createVideo(videoQuality) return myplex.sync(sync_item, client=client, clientId=clientId)
Add current video (movie, tv-show, season or episode) as sync item for specified device. See :func:`plexapi.myplex.MyPlexAccount.sync()` for possible exceptions. Parameters: videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in :mod:`plexapi.sync` module. client (:class:`plexapi.myplex.MyPlexDevice`): sync destination, see :func:`plexapi.myplex.MyPlexAccount.sync`. clientId (str): sync destination, see :func:`plexapi.myplex.MyPlexAccount.sync`. limit (int): maximum count of items to sync, unlimited if `None`. unwatched (bool): if `True` watched videos wouldn't be synced. title (str): descriptive title for the new :class:`plexapi.sync.SyncItem`, if empty the value would be generated from metadata of current media. Returns: :class:`plexapi.sync.SyncItem`: an instance of created syncItem.
23,738
def rdf_catalog(): format = RDF_EXTENSIONS[negociate_content()] url = url_for(, format=format) return redirect(url)
Root RDF endpoint with content negociation handling
23,739
async def _formulate_body(self): c_type, body = None, multipart_ctype = .format(_BOUNDARY) if self.data is not None: if self.files or self.json is not None: raise TypeError( ) c_type = try: body = self._dict_to_query(self.data, params=False) except AttributeError: body = self.data c_type = self.mimetype or elif self.files is not None: if self.data or self.json is not None: raise TypeError( ) c_type = multipart_ctype body = await self._multipart(self.files) elif self.json is not None: if self.data or self.files: raise TypeError( ) c_type = body = _json.dumps(self.json) return c_type, str(len(body)), body
Takes user supplied data / files and forms it / them appropriately, returning the contents type, len, and the request body its self. Returns: The str mime type for the Content-Type header. The len of the body. The body as a str.
23,740
def pipe_uniq(context=None, _INPUT=None, conf=None, **kwargs): funcs = get_splits(None, conf, **cdicts(opts, kwargs)) pieces, _pass = funcs[0](), funcs[2]() _OUTPUT = _INPUT if _pass else unique_items(_INPUT, pieces.field) return _OUTPUT
An operator that filters out non unique items according to the specified field. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) kwargs -- other inputs, e.g. to feed terminals for rule values conf : {'field': {'type': 'text', 'value': <field to be unique>}} Returns ------- _OUTPUT : generator of unique items
23,741
def _set_intf_isis(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=intf_isis.intf_isis, is_container=, presence=True, yang_name="intf-isis", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__intf_isis = t if hasattr(self, ): self._set()
Setter method for intf_isis, mapped from YANG variable /routing_system/interface/ve/intf_isis (container) If this variable is read-only (config: false) in the source YANG file, then _set_intf_isis is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_intf_isis() directly.
23,742
def create_post(self, title, body, board, category, username): category_url = "%s/categories.json" % board response = requests.get(category_url) if response.status_code != 200: print( % category_url) sys.exit(1) categories = response.json()[][] categories = {c[]:c[] for c in categories} if category not in categories: bot.warning( % category) category_id = categories.get(category, None) headers = {"Content-Type": "application/json", "User-Api-Client-Id": self.client_id, "User-Api-Key": self.token } data = {: title, : body, : category_id} response = requests.post("%s/posts.json" % board, headers=headers, data=json.dumps(data)) if response.status_code in [200, 201, 202]: topic = response.json() url = "%s/t/%s/%s" %(board, topic[], topic[]) bot.info(url) return url elif response.status_code == 404: bot.error() sys.exit(1) else: bot.error( % board) bot.error(response.content) sys.exit(1)
create a Discourse post, given a title, body, board, and token. Parameters ========== title: the issue title body: the issue body board: the discourse board to post to
23,743
def head(self, lines=10): self.seek(0) for i in range(lines): if not self.seek_line_forward(): break end_pos = self.file.tell() self.seek(0) data = self.file.read(end_pos - 1) if data: return self.splitlines(data) else: return []
\ Return the top lines of the file.
23,744
def serialize_to_normalized_compact_json(py_obj): return json.dumps( py_obj, sort_keys=True, separators=(, ), cls=ToJsonCompatibleTypes )
Serialize a native object to normalized, compact JSON. The JSON string is normalized by sorting any dictionary keys. It will be on a single line without whitespace between elements. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, compact JSON string.
23,745
def save_experiment(self, name, variants): try: model.Experiment( name=name, started_on=datetime.utcnow(), variants=[ model.Variant(name=v, order=i) for i, v in enumerate(variants) ] ) self.Session.commit() finally: self.Session.close()
Persist an experiment and its variants (unless they already exist). :param name a unique string name for the experiment :param variants a list of strings, each with a unique variant name
23,746
def clear_dcnm_in_part(self, tenant_id, fw_dict, is_fw_virt=False): res = fw_const.DCNM_IN_PART_UPDDEL_SUCCESS tenant_name = fw_dict.get() ret = True try: self._update_partition_in_delete(tenant_name) except Exception as exc: LOG.error("Clear of In Partition failed for tenant %(tenant)s" " , Exception %(exc)s", {: tenant_id, : str(exc)}) res = fw_const.DCNM_IN_PART_UPDDEL_FAIL ret = False self.update_fw_db_result(tenant_id, dcnm_status=res) LOG.info("In partition cleared off service ip addr") return ret
Clear the DCNM in partition service information. Clear the In partition service node IP address in DCNM and update the result.
23,747
def get_firmware(self): self.get_status() try: self.firmware = self.data[] except TypeError: self.firmware = return self.firmware
Get the current firmware version.
23,748
def getPotential(self, columnIndex, potential): assert(columnIndex < self._numColumns) potential[:] = self._potentialPools[columnIndex]
:param columnIndex: (int) column index to get potential for. :param potential: (list) will be overwritten with column potentials. Must match the number of inputs.
23,749
def workspace_backup_restore(ctx, choose_first, bak): backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup)) backup_manager.restore(bak, choose_first)
Restore backup BAK
23,750
def execute(self): if bool(self.files) is False: response = requests.post(self.url, json=self.json, proxies=self.proxies) else: self.files[] = (None, json.dumps(self.json)) response = requests.post(self.url, files=self.files, proxies=self.proxies) if response.status_code in [200, 204]: logger.debug("Webhook executed") else: logger.error( % (response.status_code, response.content.decode("utf-8")))
execute Webhook :return:
23,751
def get_correctness(self, question_id): response = self.get_response(question_id) if response.is_answered(): item = self._get_item(response.get_item_id()) return item.get_correctness_for_response(response) raise errors.IllegalState()
get measure of correctness for the question
23,752
def section_term_lengths(neurites, neurite_type=NeuriteType.all): return map_sections(_section_length, neurites, neurite_type=neurite_type, iterator_type=Tree.ileaf)
Termination section lengths in a collection of neurites
23,753
def validate_all_values_for_key_in_obj(obj, key, validation_fun): for vkey, value in obj.items(): if vkey == key: validation_fun(value) elif isinstance(value, dict): validate_all_values_for_key_in_obj(value, key, validation_fun) elif isinstance(value, list): validate_all_values_for_key_in_list(value, key, validation_fun)
Validate value for all (nested) occurrence of `key` in `obj` using `validation_fun`. Args: obj (dict): dictionary object. key (str): key whose value is to be validated. validation_fun (function): function used to validate the value of `key`. Raises: ValidationError: `validation_fun` will raise this error on failure
23,754
def absdeg(deg): import numpy d = numpy.copy(deg) if numpy.max(numpy.abs(deg)) > 90.0: d[deg < 0] = 360 + deg[deg < 0] else: d[deg < 0] = 180 + deg[deg < 0] return d
Change from signed degrees to 0-180 or 0-360 ranges deg: ndarray Movement data in pitch, roll, yaw (degrees) Returns ------- deg_abs: ndarray Movement translated from -180:180/-90:90 degrees to 0:360/0:180 degrees Example ------- deg = numpy.array([-170, -120, 0, 90]) absdeg(deg) # returns array([190, 240, 0, 90])
23,755
def match_classes(self, el, classes): current_classes = self.get_classes(el) found = True for c in classes: if c not in current_classes: found = False break return found
Match element's classes.
23,756
def _fit_newton(self, fitcache=None, ebin=None, **kwargs): tol = kwargs.get(, self.config[][]) max_iter = kwargs.get(, self.config[][]) init_lambda = kwargs.get(, self.config[][]) use_reduced = kwargs.get(, True) free_params = self.get_params(True) free_norm_params = [p for p in free_params if p[] is True] if len(free_params) != len(free_norm_params): msg = + \ self.logger.error(msg) raise Exception(msg) verbosity = kwargs.get(, 0) if fitcache is None: fitcache = self._create_fitcache(**kwargs) fitcache.update(self.get_params(), tol, max_iter, init_lambda, use_reduced) logemin = self.loge_bounds[0] logemax = self.loge_bounds[1] imin = int(utils.val_to_edge(self.log_energies, logemin)[0]) imax = int(utils.val_to_edge(self.log_energies, logemax)[0]) if ebin is not None: fitcache.fitcache.setEnergyBin(ebin) elif imin == 0 and imax == self.enumbins: fitcache.fitcache.setEnergyBin(-1) else: fitcache.fitcache.setEnergyBins(imin, imax) num_free = len(free_norm_params) o = {: 0, : 3, : True, : 0, : None, : np.ones(num_free) * np.nan, : np.ones(num_free) * np.nan, : np.zeros(num_free, dtype=int), : np.empty(num_free, dtype=bool), : num_free * [None], : num_free * [None], } if num_free == 0: return o ref_vals = np.array(fitcache.fitcache.refValues()) free = np.array(fitcache.fitcache.currentFree()) norm_vals = ref_vals[free] norm_idxs = [] for i, p in enumerate(free_norm_params): norm_idxs += [p[]] o[][i] = p[] o[][i] = p[] o[][i] = p[] o[][i] = p[] o[] = fitcache.fit(verbose=verbosity) o[] = fitcache.fitcache.currentEDM() pars, errs, cov = fitcache.get_pars() pars *= norm_vals errs *= norm_vals cov = cov * np.outer(norm_vals, norm_vals) o[] = pars o[] = errs o[] = cov errinv = np.zeros_like(o[]) m = o[] > 0 errinv[m] = 1. / o[][m] o[] = o[] * np.outer(errinv, errinv) if o[] in [-2, 0]: for idx, val, err in zip(norm_idxs, pars, errs): self._set_value_bounded(idx, val) self.like[idx].setError(err) self.like.syncSrcParams() o[] = True else: o[] = False if o[]: self.logger.error(, o[]) loglike = -self.like() o[] = loglike return o
Fast fitting method using newton fitter.
23,757
def update(self): with self._lock: if self._state == Bundle.ACTIVE: self.stop() self.start()
Stops and starts the framework, if the framework is active. :raise BundleException: Something wrong occurred while stopping or starting the framework.
23,758
def install_lib(url, replace_existing=False, fix_wprogram=True): d = tmpdir(tmpdir()) f = download(url) Archive(f).extractall(d) clean_dir(d) d, src_dlib = find_lib_dir(d) move_examples(d, src_dlib) fix_examples_dir(src_dlib) if fix_wprogram: fix_wprogram_in_files(src_dlib) targ_dlib = libraries_dir() / src_dlib.name if targ_dlib.exists(): log.debug(, targ_dlib) if replace_existing: log.debug(, targ_dlib) targ_dlib.rmtree() else: raise ConfduinoError( + targ_dlib) log.debug(, src_dlib, targ_dlib) src_dlib.move(targ_dlib) libraries_dir().copymode(targ_dlib) for x in targ_dlib.walk(): libraries_dir().copymode(x) return targ_dlib.name
install library from web or local files system. :param url: web address or file path :param replace_existing: bool :rtype: None
23,759
def intervals_to_fragment_list(self, text_file, time_values): if not isinstance(text_file, TextFile): self.log_exc(u"text_file is not an instance of TextFile", None, True, TypeError) if not isinstance(time_values, list): self.log_exc(u"time_values is not a list", None, True, TypeError) if len(time_values) < 4: self.log_exc(u"time_values has length < 4", None, True, ValueError) self.log(u"Converting time values to fragment list...") begin = time_values[0] end = time_values[-1] self.log([u" Creating SyncMapFragmentList with begin %.3f and end %.3f", begin, end]) self.smflist = SyncMapFragmentList( begin=begin, end=end, rconf=self.rconf, logger=self.logger ) self.log(u" Creating HEAD fragment") self.smflist.add(SyncMapFragment( text_fragment=TextFragment(identifier=u"HEAD", lines=[], filtered_lines=[]), begin=time_values[0], end=time_values[1], fragment_type=SyncMapFragment.HEAD ), sort=False) self.log(u" Creating REGULAR fragments") fragments = text_file.fragments for i in range(1, len(time_values) - 2): self.log([u" Adding fragment %d ...", i]) self.smflist.add(SyncMapFragment( text_fragment=fragments[i - 1], begin=time_values[i], end=time_values[i + 1], fragment_type=SyncMapFragment.REGULAR ), sort=False) self.log([u" Adding fragment %d ... done", i]) self.log(u" Creating TAIL fragment") self.smflist.add(SyncMapFragment( text_fragment=TextFragment(identifier=u"TAIL", lines=[], filtered_lines=[]), begin=time_values[len(time_values) - 2], end=end, fragment_type=SyncMapFragment.TAIL ), sort=False) self.log(u"Converting time values to fragment list... done") self.log(u"Sorting fragment list...") self.smflist.sort() self.log(u"Sorting fragment list... done") return self.smflist
Transform a list of at least 4 time values (corresponding to at least 3 intervals) into a sync map fragment list and store it internally. The first interval is a HEAD, the last is a TAIL. For example: time_values=[0.000, 1.000, 2.000, 3.456] => [(0.000, 1.000), (1.000, 2.000), (2.000, 3.456)] :param text_file: the text file containing the text fragments associated :type text_file: :class:`~aeneas.textfile.TextFile` :param time_values: the time values :type time_values: list of :class:`~aeneas.exacttiming.TimeValue` :raises: TypeError: if ``text_file`` is not an instance of :class:`~aeneas.textfile.TextFile` or ``time_values`` is not a list :raises: ValueError: if ``time_values`` has length less than four
23,760
def active(self, include=None): return self._get(self._build_url(self.endpoint.active(include=include)))
Return all active views.
23,761
def find_module(module, paths=None): parts = module.split() while parts: part = parts.pop(0) f, path, (suffix, mode, kind) = info = imp.find_module(part, paths) if kind == PKG_DIRECTORY: parts = parts or [] paths = [path] elif parts: raise ImportError("Can't find %r in %s" % (parts, module)) return info
Just like 'imp.find_module()', but with package support
23,762
def db_create(name, character_set=None, collate=None, **connection_args): *dbname*dbnameutf8utf8_general_ci if db_exists(name, **connection_args): log.info(%s\, name) return False log.error(err) return False
Adds a databases to the MySQL server. name The name of the database to manage character_set The character set, if left empty the MySQL default will be used collate The collation, if left empty the MySQL default will be used CLI Example: .. code-block:: bash salt '*' mysql.db_create 'dbname' salt '*' mysql.db_create 'dbname' 'utf8' 'utf8_general_ci'
23,763
def getSets(self, **kwargs): re searching for. :param str theme: The theme of the set. :param str subtheme: The subtheme of the set. :param str setNumber: The LEGO set number. :param str year: The year in which the set came out. :param int owned: Whether or not you own the set. Only works when logged in with :meth:`login`. Set to `1` to make true. :param int wanted: Whether or not you want the set. Only works when logged in with :meth:`login`. Set to `1` to make true. :param str orderBy: How you want the set ordered. Accepts , , , , , , , , , , , , . Add to the end to sort descending, e.g. NameDESC. Case insensitive. Defaults to . :param int pageSize: How many results are on a page. Defaults to 20. :param int pageNumber: The number of the page you params = { : self.apiKey, : self.userHash, : kwargs.get(, ), : kwargs.get(, ), : kwargs.get(, ), : kwargs.get(, ), : kwargs.get(, ), : kwargs.get(, ), : kwargs.get(, ), : kwargs.get(, ), : kwargs.get(, ), : kwargs.get(, ), : kwargs.get(, ) } url = Client.ENDPOINT.format() returned = get(url, params=params) self.checkResponse(returned) root = ET.fromstring(returned.text) return [Build(i, self) for i in root]
A way to get different sets from a query. All parameters are optional, but you should probably use some (so that you get results) :param str query: The thing you're searching for. :param str theme: The theme of the set. :param str subtheme: The subtheme of the set. :param str setNumber: The LEGO set number. :param str year: The year in which the set came out. :param int owned: Whether or not you own the set. Only works when logged in with :meth:`login`. Set to `1` to make true. :param int wanted: Whether or not you want the set. Only works when logged in with :meth:`login`. Set to `1` to make true. :param str orderBy: How you want the set ordered. Accepts 'Number', 'YearFrom', 'Pieces', 'Minifigs', 'Rating', 'UKRetailPrice', 'USRetailPrice', 'CARetailPrice', 'EURetailPrice', 'Theme', 'Subtheme', 'Name', 'Random'. Add 'DESC' to the end to sort descending, e.g. NameDESC. Case insensitive. Defaults to 'Number'. :param int pageSize: How many results are on a page. Defaults to 20. :param int pageNumber: The number of the page you're looking at. Defaults to 1. :param str userName: The name of a user whose sets you want to search. :returns: A list of :class:`brickfront.build.Build` objects. :rtype: list
23,764
def convert_complexFaultSource(self, node): geom = node.complexFaultGeometry edges = self.geo_lines(geom) mfd = self.convert_mfdist(node) msr = valid.SCALEREL[~node.magScaleRel]() with context(self.fname, node): cmplx = source.ComplexFaultSource( source_id=node[], name=node[], tectonic_region_type=node.attrib.get(), mfd=mfd, rupture_mesh_spacing=self.complex_fault_mesh_spacing, magnitude_scaling_relationship=msr, rupture_aspect_ratio=~node.ruptAspectRatio, edges=edges, rake=~node.rake, temporal_occurrence_model=self.get_tom(node)) return cmplx
Convert the given node into a complex fault object. :param node: a node with tag areaGeometry :returns: a :class:`openquake.hazardlib.source.ComplexFaultSource` instance
23,765
def get_workflow_info(func_list): funcs = [] for item in func_list: if item is None: continue if isinstance(item, list): funcs.append(get_workflow_info(item)) else: funcs.append(get_func_info(item)) return funcs
Return function info, go through lists recursively.
23,766
def instance_from_str(instance_str): match = instance_str_re.match(instance_str) if not match: raise ValueError("Invalid instance string") model_string = match.group(1) try: model = apps.get_model(model_string) except (LookupError, ValueError): raise ValueError("Invalid instance string") pk = match.group(2) if pk: try: return model, model._default_manager.get(pk=pk) except model.DoesNotExist: raise ValueError("Invalid instance string") return model, None
Given an instance string in the form "app.Model:pk", returns a tuple of ``(model, instance)``. If the pk part is empty, ``instance`` will be ``None``. Raises ``ValueError`` on invalid model strings or missing instances.
23,767
def delete_subtree(self, nodes): r if self._validate_node_name(nodes): raise RuntimeError("Argument `nodes` is not valid") self._delete_subtree(nodes)
r""" Delete nodes (and their sub-trees) from the tree. :param nodes: Node(s) to delete :type nodes: :ref:`NodeName` or list of :ref:`NodeName` :raises: * RuntimeError (Argument \`nodes\` is not valid) * RuntimeError (Node *[node_name]* not in tree) Using the same example tree created in :py:meth:`ptrie.Trie.add_nodes`:: >>> from __future__ import print_function >>> import docs.support.ptrie_example >>> tobj = docs.support.ptrie_example.create_tree() >>> print(tobj) root ├branch1 (*) │├leaf1 ││└subleaf1 (*) │└leaf2 (*) │ └subleaf2 └branch2 >>> tobj.delete_subtree(['root.branch1.leaf1', 'root.branch2']) >>> print(tobj) root └branch1 (*) └leaf2 (*) └subleaf2
23,768
def table_repr(columns, rows, data, padding=2): padding = * padding column_lengths = [len(column) for column in columns] for row in rows: for i, column in enumerate(columns): item = str(data[row][column]) column_lengths[i] = max(len(item), column_lengths[i]) max_row_length = max(len(row) for row in rows) if len(rows) else 0 table_row = * max_row_length for i, column in enumerate(columns): table_row += padding + column.rjust(column_lengths[i]) table_rows = [table_row] for row in rows: table_row = row.rjust(max_row_length) for i, column in enumerate(columns): item = str(data[row][column]) table_row += padding + item.rjust(column_lengths[i]) table_rows.append(table_row) return .join(table_rows)
Generate a table for cli output
23,769
def get_object(self, view_name, view_args, view_kwargs): lookup_value = view_kwargs.get(self.lookup_url_kwarg) parent_lookup_value = view_kwargs.get(self.parent_lookup_field) lookup_kwargs = { self.lookup_field: lookup_value, } if parent_lookup_value: lookup_kwargs.update({self.parent_lookup_field: parent_lookup_value}) return self.get_queryset().get(**lookup_kwargs)
Return the object corresponding to a matched URL. Takes the matched URL conf arguments, and should return an object instance, or raise an `ObjectDoesNotExist` exception.
23,770
def BVV(value, size=None, **kwargs): if type(value) in (bytes, str): if type(value) is str: l.warning("BVV value is a unicode string, encoding as utf-8") value = value.encode() if size is None: size = len(value)*8 elif type(size) is not int: raise TypeError("Bitvector size must be either absent (implicit) or an integer") elif size != len(value)*8: raise ClaripyValueError() value = int(binascii.hexlify(value), 16) if value != b"" else 0 elif size is None or (type(value) is not int and value is not None): raise TypeError() if value is not None: value &= (1 << size) -1 if not kwargs: try: return _bvv_cache[(value, size)] except KeyError: pass result = BV(, (value, size), length=size, **kwargs) _bvv_cache[(value, size)] = result return result
Creates a bit-vector value (i.e., a concrete value). :param value: The value. Either an integer or a string. If it's a string, it will be interpreted as the bytes of a big-endian constant. :param size: The size (in bits) of the bit-vector. Optional if you provide a string, required for an integer. :returns: A BV object representing this value.
23,771
def reset(self): if self.__row_number > self.__sample_size: self.__parser.reset() self.__extract_sample() self.__extract_headers() self.__row_number = 0
Resets the stream pointer to the beginning of the file.
23,772
def _pre_job_handling(self, job): if self._low_priority: self._release_gil(len(self._nodes), 20, 0.0001) self._deregister_analysis_job(job.func_addr, job) if not self._inside_regions(job.addr): obj = self.project.loader.find_object_containing(job.addr) if obj is not None and isinstance(obj, self._cle_pseudo_objects): pass else: if self._show_progressbar or self._progress_callback: max_percentage_stage_1 = 50.0 percentage = self._seg_list.occupied_size * max_percentage_stage_1 / self._regions_size if percentage > max_percentage_stage_1: percentage = max_percentage_stage_1 self._update_progress(percentage, cfg=self)
Some pre job-processing tasks, like update progress bar. :param CFGJob job: The CFGJob instance. :return: None
23,773
def __get_doc_block_parts_wrapper(self): self.__get_doc_block_parts_source() helper = self._get_data_type_helper() parameters = list() for parameter_info in self._parameters: parameters.append( {: parameter_info[], : helper.column_type_to_python_type(parameter_info), : parameter_info[], : self.__get_parameter_doc_description(parameter_info[])}) self._doc_block_parts_wrapper[] = self._doc_block_parts_source[] self._doc_block_parts_wrapper[] = parameters
Generates the DocBlock parts to be used by the wrapper generator.
23,774
def deprecated(function): def IssueDeprecationWarning(*args, **kwargs): warnings.simplefilter(, DeprecationWarning) warnings.warn(.format( function.__name__), category=DeprecationWarning, stacklevel=2) return function(*args, **kwargs) IssueDeprecationWarning.__name__ = function.__name__ IssueDeprecationWarning.__doc__ = function.__doc__ IssueDeprecationWarning.__dict__.update(function.__dict__) return IssueDeprecationWarning
Decorator to mark functions or methods as deprecated.
23,775
def _proc_sparse(self, tarfile): structs, isextended, origsize = self._sparse_structs del self._sparse_structs while isextended: buf = tarfile.fileobj.read(BLOCKSIZE) pos = 0 for i in range(21): try: offset = nti(buf[pos:pos + 12]) numbytes = nti(buf[pos + 12:pos + 24]) except ValueError: break if offset and numbytes: structs.append((offset, numbytes)) pos += 24 isextended = bool(buf[504]) self.sparse = structs self.offset_data = tarfile.fileobj.tell() tarfile.offset = self.offset_data + self._block(self.size) self.size = origsize return self
Process a GNU sparse header plus extra headers.
23,776
def get(self, artifact): coord = self._key(artifact) if coord in self._artifacts_to_versions: return self._artifacts_to_versions[coord] return artifact
Gets the coordinate with the correct version for the given artifact coordinate. :param M2Coordinate artifact: the coordinate to lookup. :return: a coordinate which is the same as the input, but with the correct pinned version. If this artifact set does not pin a version for the input artifact, this just returns the original coordinate. :rtype: M2Coordinate
23,777
def backdate(res, date=None, as_datetime=False, fmt=): if res is None: return None if date is None: date = datetime.datetime.now() else: try: date = parse_date(date) except Exception as e: pass new_date = date periods = int("".join([s for s in res if s.isdigit()])) if periods > 0: if "K" in res: new_date = date - datetime.timedelta(microseconds=periods) elif "S" in res: new_date = date - datetime.timedelta(seconds=periods) elif "T" in res: new_date = date - datetime.timedelta(minutes=periods) elif "H" in res or "V" in res: new_date = date - datetime.timedelta(hours=periods) elif "W" in res: new_date = date - datetime.timedelta(weeks=periods) else: new_date = date - datetime.timedelta(days=periods) while new_date.weekday() > 4: new_date = backdate(res="1D", date=new_date, as_datetime=True) if as_datetime: return new_date return new_date.strftime(fmt)
get past date based on currect date
23,778
def pause_and_wait_for_user(self, timeout=None, prompt_text=): timeout = timeout if timeout is not None else self.user_wait_timeout self.paused = True def check_user_ready(driver): if driver.paused: if driver.is_user_ready(): driver.paused = False return True else: if not driver.is_present(Locator(, , )): pause_html = staticreader.read_html_file()\ .replace(, )\ .replace(, prompt_text) webdriver_style = staticreader.read_css_file().replace(, ) driver.js_executor.execute_template_and_return_result( , {: webdriver_style}) driver.js_executor.execute_template_and_return_result( , {: , : pause_html}) return False self.wait_until( lambda: check_user_ready(self), timeout=timeout, failure_message= ) self.js_executor.execute_template_and_return_result( , {: } )
Injects a radio button into the page and waits for the user to click it; will raise an exception if the radio to resume is never checked @return: None
23,779
def verify_order(self, hostname, domain, location, hourly, flavor, router=None): create_options = self._generate_create_dict(hostname=hostname, router=router, domain=domain, flavor=flavor, datacenter=location, hourly=hourly) return self.client[].verifyOrder(create_options)
Verifies an order for a dedicated host. See :func:`place_order` for a list of available options.
23,780
async def pulse(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["pulse"], *args, **kwargs)
Publish a Pulse Message Publish a message on pulse with the given `routingKey`. This method takes input: ``v1/pulse-request.json#`` This method is ``experimental``
23,781
def copy(self, name=None, description=None, meta=None): cls = self.__class__ kwargs = self._rel(copy=True) kwargs.update(self._data(copy=True)) if name is not None: kwargs[] = name if description is not None: kwargs[] = description if meta is not None: kwargs[] = meta return cls(**kwargs)
Create a copy of the current object (may alter the container's name, description, and update the metadata if needed).
23,782
def _merge_objects(tref, merged, obj): size = None for (timestamp, tsize) in obj.snapshots: if timestamp == tref: size = tsize if size: _merge_asized(merged, size)
Merge the snapshot size information of multiple tracked objects. The tracked object `obj` is scanned for size information at time `tref`. The sizes are merged into **Asized** instance `merged`.
23,783
def H(self, H): self._H = H self._T = self._calculate_T(H)
Set the enthalpy of the package to the specified value, and recalculate it's temperature. :param H: The new enthalpy value. [kWh]
23,784
def dict(self, **kwargs): return dict( time = self.timestamp, event_data = self.event_data, event_type = self.event_type, partition = self.partition, report_code = self.report_code, event_prefix = self.event_prefix, event_source = self.event_source, event_status = self.event_status, event_code = hex(self.event_code), event_description = self.event_description, **kwargs )
Dictionary representation
23,785
def parse_unifrac_v1_8(unifrac, file_data): for line in file_data: if line == "": break line = line.split("\t") unifrac["pcd"][line[0]] = [float(e) for e in line[1:]] unifrac["eigvals"] = [float(entry) for entry in file_data[-2].split("\t")[1:]] unifrac["varexp"] = [float(entry) for entry in file_data[-1].split("\t")[1:]] return unifrac
Function to parse data from older version of unifrac file obtained from Qiime version 1.8 and earlier. :type unifrac: dict :param unifracFN: The path to the unifrac results file :type file_data: list :param file_data: Unifrac data lines after stripping whitespace characters.
23,786
def login(team=None): _check_team_id(team) _check_team_exists(team) _check_team_login(team) login_url = "%s/login" % get_registry_url(team) print("Launching a web browser...") print("If that didn't work, please visit the following URL: %s" % login_url) _open_url(login_url) print() refresh_token = input("Enter the code from the webpage: ") login_with_token(refresh_token, team)
Authenticate. Launches a web browser and asks the user for a token.
23,787
def normalize_key_phrases (path, ranks, stopwords=None, spacy_nlp=None, skip_ner=True): global STOPWORDS, SPACY_NLP if (type(stopwords) is list) or (type(stopwords) is set): stopwords = set(stopwords) else: if not STOPWORDS: STOPWORDS = load_stopwords(stopwords) stopwords = STOPWORDS if not spacy_nlp: if not SPACY_NLP: SPACY_NLP = spacy.load("en") spacy_nlp = SPACY_NLP single_lex = {} phrase_lex = {} if isinstance(path, str): path = json_iter(path) for meta in path: sent = [w for w in map(WordNode._make, meta["graf"])] for rl in collect_keyword(sent, ranks, stopwords): id = str(rl.ids) if id not in single_lex: single_lex[id] = rl else: prev_lex = single_lex[id] single_lex[id] = rl._replace(count = prev_lex.count + 1) if not skip_ner: for rl in collect_entities(sent, ranks, stopwords, spacy_nlp): id = str(rl.ids) if id not in phrase_lex: phrase_lex[id] = rl else: prev_lex = phrase_lex[id] phrase_lex[id] = rl._replace(count = prev_lex.count + 1) for rl in collect_phrases(sent, ranks, spacy_nlp): id = str(rl.ids) if id not in phrase_lex: phrase_lex[id] = rl else: prev_lex = phrase_lex[id] phrase_lex[id] = rl._replace(count = prev_lex.count + 1) rank_list = [rl.rank for rl in single_lex.values()] if len(rank_list) < 1: max_single_rank = 0 else: max_single_rank = max(rank_list) repeated_roots = {} for rl in sorted(phrase_lex.values(), key=lambda rl: len(rl), reverse=True): rank_list = [] for i in iter(range(0, len(rl.ids))): id = rl.ids[i] if not id in repeated_roots: repeated_roots[id] = 1.0 rank_list.append(rl.rank[i]) else: repeated_roots[id] += 1.0 rank_list.append(rl.rank[i] / repeated_roots[id]) phrase_rank = calc_rms(rank_list) single_lex[str(rl.ids)] = rl._replace(rank = phrase_rank) sum_ranks = sum([rl.rank for rl in single_lex.values()]) for rl in sorted(single_lex.values(), key=lambda rl: rl.rank, reverse=True): if sum_ranks > 0.0: rl = rl._replace(rank=rl.rank / sum_ranks) elif rl.rank == 0.0: rl = rl._replace(rank=0.1) rl = rl._replace(text=re.sub(r"\s([\.\,\-\+\:\@])\s", r"\1", rl.text)) yield rl
collect keyphrases, named entities, etc., while removing stop words
23,788
def provides_defaults_for(self, rule: , **values: Any) -> bool: defaults_match = all( values[key] == self.defaults[key] for key in self.defaults if key in values ) return self != rule and bool(self.defaults) and defaults_match
Returns true if this rule provides defaults for the argument and values.
23,789
def mutex(func): def wrapper(*args, **kwargs): lock = args[0].lock lock.acquire(True) try: return func(*args, **kwargs) except: raise finally: lock.release() return wrapper
use a thread lock on current method, if self.lock is defined
23,790
def address_exists(name, addressname=None, vsys=1, ipnetmask=None, iprange=None, fqdn=None, description=None, commit=False): ret = _default_ret(name) if not addressname: ret.update({: "The service name field must be provided."}) return ret address = __salt__[](addressname, vsys)[] if address and in address: address = address[] else: address = {} element = "" if ipnetmask: element = "<ip-netmask>{0}</ip-netmask>".format(ipnetmask) elif iprange: element = "<ip-range>{0}</ip-range>".format(iprange) elif fqdn: element = "<fqdn>{0}</fqdn>".format(fqdn) else: ret.update({: "A valid address type must be specified."}) return ret if description: element += "<description>{0}</description>".format(description) full_element = "<entry name=>{1}</entry>".format(addressname, element) new_address = xml.to_dict(ET.fromstring(full_element), True) if address == new_address: ret.update({ : , : True }) return ret else: xpath = "/config/devices/entry[@name=\]/vsys/entry[@name=\]/address/" \ "entry[@name=\]".format(vsys, addressname) result, msg = _edit_config(xpath, full_element) if not result: ret.update({ : msg }) return ret if commit is True: ret.update({ : {: address, : new_address}, : __salt__[](), : , : True }) else: ret.update({ : {: address, : new_address}, : , : True }) return ret
Ensures that an address object exists in the configured state. If it does not exist or is not configured with the specified attributes, it will be adjusted to match the specified values. This module will only process a single address type (ip-netmask, ip-range, or fqdn). It will process the specified value if the following order: ip-netmask, ip-range, fqdn. For proper execution, only specify a single address type. name: The name of the module function to execute. addressname(str): The name of the address object. The name is case-sensitive and can have up to 31 characters, which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on Panorama, unique within its device group and any ancestor or descendant device groups. vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1. ipnetmask(str): The IPv4 or IPv6 address or IP address range using the format ip_address/mask or ip_address where the mask is the number of significant binary digits used for the network portion of the address. Ideally, for IPv6, you specify only the network portion, not the host portion. iprange(str): A range of addresses using the format ip_address–ip_address where both addresses can be IPv4 or both can be IPv6. fqdn(str): A fully qualified domain name format. The FQDN initially resolves at commit time. Entries are subsequently refreshed when the firewall performs a check every 30 minutes; all changes in the IP address for the entries are picked up at the refresh cycle. description(str): A description for the policy (up to 255 characters). commit(bool): If true the firewall will commit the changes, if false do not commit changes. SLS Example: .. code-block:: yaml panos/address/h-10.10.10.10: panos.address_exists: - addressname: h-10.10.10.10 - vsys: 1 - ipnetmask: 10.10.10.10 - commit: False panos/address/10.0.0.1-10.0.0.50: panos.address_exists: - addressname: r-10.0.0.1-10.0.0.50 - vsys: 1 - iprange: 10.0.0.1-10.0.0.50 - commit: False panos/address/foo.bar.com: panos.address_exists: - addressname: foo.bar.com - vsys: 1 - fqdn: foo.bar.com - description: My fqdn object - commit: False
23,791
def list_cards(self, *args, **kwargs): return payplug.Card.list(self, *args, **kwargs)
List the cards of the customer. :param page: the page number :type page: int|None :param per_page: number of customers per page. It's a good practice to increase this number if you know that you will need a lot of payments. :type per_page: int|None :return: The cards of the customer :rtype APIResourceCollection
23,792
def get_pages(self, include_draft=False): def pages_generator(pages_root_path): for file_path in traverse_directory(pages_root_path, yield_dir=False): rel_path = os.path.relpath(file_path, pages_root_path) rel_path, ext = os.path.splitext(rel_path) if not ext or ext == or get_standard_format_name( ext[1:]) is None: continue if rel_path.endswith(os.path.sep + ): rel_path = rel_path[:-len()] else: rel_path += page = self.get_page(rel_path.replace(os.path.sep, ), include_draft=include_draft) if page is not None: yield page pages_path = os.path.join(current_app.instance_path, ) return list(pages_generator(pages_path))
Get all custom pages (supported formats, excluding other files like '.js', '.css', '.html'). :param include_draft: return draft page or not :return: an iterable of Page objects
23,793
def threshold(self, messy_data, recall_weight=1.5): blocked_pairs = self._blockData(messy_data) return self.thresholdBlocks(blocked_pairs, recall_weight)
Returns the threshold that maximizes the expected F score, a weighted average of precision and recall for a sample of data. Arguments: messy_data -- Dictionary of records from messy dataset, where the keys are record_ids and the values are dictionaries with the keys being field names recall_weight -- Sets the tradeoff between precision and recall. I.e. if you care twice as much about recall as you do precision, set recall_weight to 2.
23,794
def get_child_books(self, book_id): if self._catalog_session is not None: return self._catalog_session.get_child_catalogs(catalog_id=book_id) return BookLookupSession( self._proxy, self._runtime).get_books_by_ids( list(self.get_child_book_ids(book_id)))
Gets the child books of the given ``id``. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` to query return: (osid.commenting.BookList) - the child books of the ``id`` raise: NotFound - a ``Book`` identified by ``Id is`` not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
23,795
def list_extensions(): import nnabla_ext.cpu from os.path import dirname, join, realpath from os import listdir ext_dir = realpath((join(dirname(nnabla_ext.cpu.__file__), ))) return listdir(ext_dir)
List up available extensions. Note: It may not work on some platforms/environments since it depends on the directory structure of the namespace packages. Returns: list of str Names of available extensions.
23,796
def ReadTriggers(self, collection_link, options=None): if options is None: options = {} return self.QueryTriggers(collection_link, None, options)
Reads all triggers in a collection. :param str collection_link: The link to the document collection. :param dict options: The request options for the request. :return: Query Iterable of Triggers. :rtype: query_iterable.QueryIterable
23,797
def _handleSmsStatusReport(self, notificationLine): self.log.debug() cdsiMatch = self.CDSI_REGEX.match(notificationLine) if cdsiMatch: msgMemory = cdsiMatch.group(1) msgIndex = cdsiMatch.group(2) report = self.readStoredSms(msgIndex, msgMemory) self.deleteStoredSms(msgIndex) if report.reference in self.sentSms: self.sentSms[report.reference].report = report if self._smsStatusReportEvent: self._smsStatusReportEvent.set() else: self.smsStatusReportCallback(report)
Handler for SMS status reports
23,798
def ndim(self): try: return self.__ndim except AttributeError: ndim = len(self.coord_vectors) self.__ndim = ndim return ndim
Number of dimensions of the grid.
23,799
def remove(args): session = c.Session(args) if not args["name"] in session.feeds: sys.exit("You donw') as configfile: session.feeds.write(configfile) try: os.remove(os.path.join(session.data_dir, args["name"])) except FileNotFoundError: pass
Remove the feed given in <args>