Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
20,200
def parse(file_contents, file_name): try: yaml.load(file_contents) except Exception: _, exc_value, _ = sys.exc_info() return("Cannot Parse: {file_name}: \n {exc_value}" .format(file_name=file_name, exc_value=exc_value))
This takes a list of filenames and their paths of expected yaml files and tried to parse them, erroring if there are any parsing issues. Args: file_contents (str): Contents of a yml file Raises: yaml.parser.ParserError: Raises an error if the file contents cannot be parsed and interpreted as yaml
20,201
def validate_owner_repo_package(ctx, param, value): form = "OWNER/REPO/PACKAGE" return validate_slashes(param, value, minimum=3, maximum=3, form=form)
Ensure that owner/repo/package is formatted correctly.
20,202
def other_object_webhook_handler(event): if event.parts[:2] == ["charge", "dispute"]: target_cls = models.Dispute else: target_cls = { "charge": models.Charge, "coupon": models.Coupon, "invoice": models.Invoice, "invoiceitem": models.InvoiceItem, "plan": models.Plan, "product": models.Product, "transfer": models.Transfer, "source": models.Source, }.get(event.category) _handle_crud_like_event(target_cls=target_cls, event=event)
Handle updates to transfer, charge, invoice, invoiceitem, plan, product and source objects. Docs for: - charge: https://stripe.com/docs/api#charges - coupon: https://stripe.com/docs/api#coupons - invoice: https://stripe.com/docs/api#invoices - invoiceitem: https://stripe.com/docs/api#invoiceitems - plan: https://stripe.com/docs/api#plans - product: https://stripe.com/docs/api#products - source: https://stripe.com/docs/api#sources
20,203
def freeze(self, dest_dir): for resource in self.resources(): if resource.present: resource.freeze(dest_dir)
Freezes every resource within a context
20,204
def ungeometrize_stops(geo_stops: DataFrame) -> DataFrame: f = geo_stops.copy().to_crs(cs.WGS84) f["stop_lon"], f["stop_lat"] = zip( *f["geometry"].map(lambda p: [p.x, p.y]) ) del f["geometry"] return f
The inverse of :func:`geometrize_stops`. Parameters ---------- geo_stops : GeoPandas GeoDataFrame Looks like a GTFS stops table, but has a ``'geometry'`` column of Shapely Point objects that replaces the ``'stop_lon'`` and ``'stop_lat'`` columns. Returns ------- DataFrame A GTFS stops table where the ``'stop_lon'`` and ``'stop_lat'`` columns are derived from the points in the given GeoDataFrame and are in WGS84 coordinates regardless of the coordinate reference system of the given GeoDataFrame.
20,205
async def current_position( self, mount: top_types.Mount, critical_point: CriticalPoint = None) -> Dict[Axis, float]: if not self._current_position: raise MustHomeError async with self._motion_lock: if mount == mount.RIGHT: offset = top_types.Point(0, 0, 0) else: offset = top_types.Point(*self.config.mount_offset) z_ax = Axis.by_mount(mount) plunger_ax = Axis.of_plunger(mount) cp = self._critical_point_for(mount, critical_point) return { Axis.X: self._current_position[Axis.X] + offset[0] + cp.x, Axis.Y: self._current_position[Axis.Y] + offset[1] + cp.y, z_ax: self._current_position[z_ax] + offset[2] + cp.z, plunger_ax: self._current_position[plunger_ax] }
Return the postion (in deck coords) of the critical point of the specified mount. This returns cached position to avoid hitting the smoothie driver unless ``refresh`` is ``True``. If `critical_point` is specified, that critical point will be applied instead of the default one. For instance, if `critical_point=CriticalPoints.MOUNT` then the position of the mount will be returned. If the critical point specified does not exist, then the next one down is returned - for instance, if there is no tip on the specified mount but `CriticalPoint.TIP` was specified, the position of the nozzle will be returned.
20,206
def update(self, arg, allow_overwrite=False): if inspect.isclass(arg) and issubclass(arg, ICatalog) or isinstance(arg, ICatalog): arg = arg._providers if not allow_overwrite: for key in arg: if key in self._providers: raise KeyError("Key %s already exists" % key) super(ProviderMapping, self).update(arg)
Update our providers from either an ICatalog subclass/instance or a mapping. If arg is an ICatalog, we update from it's ._providers attribute. :param arg: Di/Catalog/Mapping to update from. :type arg: ICatalog or collections.Mapping :param allow_overwrite: If True, allow overwriting existing keys :type allow_overwrite: bool
20,207
def calc_effective_permeability(self, inlets=None, outlets=None, domain_area=None, domain_length=None): r phase = self.project.phases()[self.settings[]] d_normal = self._calc_eff_prop(inlets=inlets, outlets=outlets, domain_area=domain_area, domain_length=domain_length) K = d_normal * sp.mean(phase[]) return K
r""" This calculates the effective permeability in this linear transport algorithm. Parameters ---------- inlets : array_like The pores where the inlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes.
20,208
def _inspect(self,obj,objtype=None): gen=super(Dynamic,self).__get__(obj,objtype) if hasattr(gen,): return gen._Dynamic_last else: return gen
Return the last generated value for this parameter.
20,209
def generate_data_for_edit_page(self): if not self.can_edit: return {} if self.edit_form: return self.edit_form.to_dict() return self.generate_simple_data_page()
Generate a custom representation of table's fields in dictionary type if exist edit form else use default representation. :return: dict
20,210
def get_relationship_mdata(): return { : { : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : False, : False, : False, : False, : [], : , : [], }, : { : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : False, : False, : False, : False, : [], : , : [], }, }
Return default mdata map for Relationship
20,211
def create_tx(network, spendables, payables, fee="standard", lock_time=0, version=1): Tx = network.tx def _fix_spendable(s): if isinstance(s, Tx.Spendable): return s if not hasattr(s, "keys"): return Tx.Spendable.from_text(s) return Tx.Spendable.from_dict(s) spendables = [_fix_spendable(s) for s in spendables] txs_in = [spendable.tx_in() for spendable in spendables] txs_out = [] for payable in payables: if len(payable) == 2: address, coin_value = payable else: address = payable coin_value = 0 script = network.contract.for_address(address) txs_out.append(Tx.TxOut(coin_value, script)) tx = Tx(version=version, txs_in=txs_in, txs_out=txs_out, lock_time=lock_time) tx.set_unspents(spendables) distribute_from_split_pool(tx, fee) return tx
This function provides the easiest way to create an unsigned transaction. All coin values are in satoshis. :param spendables: a list of Spendable objects, which act as inputs. Each item in the list can be a Spendable, or text from Spendable.as_text, or a dictionary from Spendable.as_dict (which might be easier for airgapped transactions, for example). :param payables: a list where each entry is a address, or a tuple of (address, coin_value). If the coin_value is missing or zero, this address is thrown into the "split pool". Funds not explicitly claimed by the fee or an address are shared as equally as possible among the split pool. All coins are consumed: if the amount to be split does not divide evenly, some of the earlier addresses will get an extra satoshi. :param fee: an integer, or the (deprecated) string "standard" for it to be calculated :param version: (optional) the version to use in the transaction. Defaults to 1. :param lock_time: (optional) the lock_time to use in the transaction. Defaults to 0. :return: :class:`Tx <Tx>` object, with unspents populated :rtype: pycoin.tx.Tx.Tx Usage:: >>> spendables = spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH") >>> tx = create_tx(network, spendables, ["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"], fee=0) This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might take a while to confirm, possibly never).
20,212
def add(config, username, filename): try: client = Client() client.prepare_connection() user_api = UserApi(client) key_api = API(client) key_api.add(username, user_api, filename) except (ldap3.core.exceptions.LDAPNoSuchAttributeResult, ldap_tools.exceptions.InvalidResult, ldap3.core.exceptions.LDAPAttributeOrValueExistsResult ) as err: print(.format(type(err), err.args[0])) except Exception as err: raise err from None
Add user's SSH public key to their LDAP entry.
20,213
def map_hmms(input_model, mapping): output_model = copy.copy(input_model) o_hmms = [] for i_hmm in input_model[]: i_hmm_name = i_hmm[] o_hmm_names = mapping.get(i_hmm_name, [i_hmm_name]) for o_hmm_name in o_hmm_names: o_hmm = copy.copy(i_hmm) o_hmm[] = o_hmm_name o_hmms.append(o_hmm) output_model[] = o_hmms return output_model
Create a new HTK HMM model given a model and a mapping dictionary. :param input_model: The model to transform of type dict :param mapping: A dictionary from string -> list(string) :return: The transformed model of type dict
20,214
def var_window_closed(self, widget): self.action_group.get_action().set_active(False) self.show_vars = False self.var_window = None
Called if user clicked close button on var window :param widget: :return:
20,215
def addvlan(self, vlanid, vlan_name): create_dev_vlan( vlanid, vlan_name, self.auth, self.url, devid = self.devid)
Function operates on the IMCDev object. Takes input of vlanid (1-4094), str of vlan_name, auth and url to execute the create_dev_vlan method on the IMCDev object. Device must be supported in the HPE IMC Platform VLAN Manager module. :param vlanid: str of VLANId ( valid 1-4094 ) :param vlan_name: str of vlan_name :return:
20,216
def get_regulate_amounts(self): p = pb.controlsExpressionWithTemplateReac() s = _bpp() res = s.searchPlain(self.model, p) res_array = [_match_to_array(m) for m in res.toArray()] stmts = [] for res in res_array: controller ERgeneric controller ERcontroller simple PEcontroller PEControlConversioninput PEinput simple PEchanged generic ERoutput PEoutput simple PEchanged ER self.statements.append(st_dec)
Extract INDRA RegulateAmount Statements from the BioPAX model. This method extracts IncreaseAmount/DecreaseAmount Statements from the BioPAX model. It fully reuses BioPAX Pattern's org.biopax.paxtools.pattern.PatternBox.controlsExpressionWithTemplateReac pattern to find TemplateReactions which control the expression of a protein.
20,217
def is_full(self): capacity = self.get_true_capacity() if capacity != -1: num_signed_up = self.eighthsignup_set.count() return num_signed_up >= capacity return False
Return whether the activity is full.
20,218
def _log_likelihood_transit_plus_line(theta, params, model, t, data_flux, err_flux, priorbounds): u = [] for ix, key in enumerate(sorted(priorbounds.keys())): if key == : params.rp = theta[ix] elif key == : params.t0 = theta[ix] elif key == : params.a = theta[ix] elif key == : params.inc = theta[ix] elif key == : params.per = theta[ix] elif key == : params.per = theta[ix] elif key == : params.w = theta[ix] elif key == : u.append(theta[ix]) elif key == : u.append(theta[ix]) params.u = u elif key == : poly_order0 = theta[ix] elif key == : poly_order1 = theta[ix] try: poly_order0 except Exception as e: poly_order0 = 0 else: pass transit = model.light_curve(params) line = poly_order0 + t*poly_order1 model = transit + line residuals = data_flux - model log_likelihood = -0.5*( np.sum((residuals/err_flux)**2 + np.log(2*np.pi*(err_flux)**2)) ) return log_likelihood
Given a batman TransitModel and its proposed parameters (theta), update the batman params object with the proposed parameters and evaluate the gaussian likelihood. Note: the priorbounds are only needed to parse theta.
20,219
def autorg(filename, mininterval=None, qminrg=None, qmaxrg=None, noprint=True): if isinstance(filename, Curve): curve = filename with tempfile.NamedTemporaryFile(, delete=False) as f: curve.save(f) filename = f.name cmdline = [, filename, , ] if mininterval is not None: cmdline.extend([, str(mininterval)]) if qminrg is not None: cmdline.extend([, str(qminrg)]) if qmaxrg is not None: cmdline.extend([, str(qmaxrg)]) result = execute_command(cmdline, noprint=noprint) Rg, dRg, I0, dI0, idxfirst, idxlast, quality, aggregation, filename = result[0].split(None, 8) try: curve except NameError: curve = Curve.new_from_file(filename) else: os.unlink(filename) return ErrorValue(float(Rg), float(dRg)), ErrorValue(float(I0), float(dI0)), curve.q[int(idxfirst) - 1], curve.q[ int(idxlast) - 1], float(quality), float(aggregation)
Execute autorg. Inputs: filename: either a name of an ascii file, or an instance of Curve. mininterval: the minimum number of points in the Guinier range qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0 qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3 noprint: if the output of autorg should be redirected to the null device. Outputs: Rg as an ErrorValue I0 as an ErrorValue qmin: the lower end of the chosen Guinier range qmax: the upper end of the chosen Guinier range quality: the quality parameter, between 0 and 1 aggregation: float, the extent of aggregation
20,220
def try_unbuffered_file(file, _alreadyopen={}): try: fileno = file.fileno() except (AttributeError, UnsupportedOperation): return filedesc
Try re-opening a file in an unbuffered mode and return it. If that fails, just return the original file. This function remembers the file descriptors it opens, so it never opens the same one twice. This is meant for files like sys.stdout or sys.stderr.
20,221
def scatter(h1: Histogram1D, ax: Axes, *, errors: bool = False, **kwargs): show_stats = kwargs.pop("show_stats", False) show_values = kwargs.pop("show_values", False) density = kwargs.pop("density", False) cumulative = kwargs.pop("cumulative", False) value_format = kwargs.pop("value_format", None) text_kwargs = pop_kwargs_with_prefix("text_", kwargs) data = get_data(h1, cumulative=cumulative, density=density) if "cmap" in kwargs: cmap = _get_cmap(kwargs) _, cmap_data = _get_cmap_data(data, kwargs) kwargs["color"] = cmap(cmap_data) else: kwargs["color"] = kwargs.pop("color", "blue") _apply_xy_lims(ax, h1, data, kwargs) _add_ticks(ax, h1, kwargs) _add_labels(ax, h1, kwargs) if errors: err_data = get_err_data(h1, cumulative=cumulative, density=density) ax.errorbar(h1.bin_centers, data, yerr=err_data, fmt=kwargs.pop("fmt", "o"), ecolor=kwargs.pop("ecolor", "black"), ms=0) ax.scatter(h1.bin_centers, data, **kwargs) if show_values: _add_values(ax, h1, data, value_format=value_format, **text_kwargs) if show_stats: _add_stats_box(h1, ax, stats=show_stats)
Scatter plot of 1D histogram.
20,222
def _get_serializer(self, _type): if _type in _serializers: return _serializers[_type] elif _type == : return self._get_array_serializer() elif _type == : return self._get_object_serializer() raise ValueError(.format(_type))
Gets a serializer for a particular type. For primitives, returns the serializer from the module-level serializers. For arrays and objects, uses the special _get_T_serializer methods to build the encoders and decoders.
20,223
def must_be_same(self, klass): if self.__class__ is not klass: self.__class__ = klass self._morph() self.clear()
Called to make sure a Node is a Dir. Since we're an Entry, we can morph into one.
20,224
def from_pandas(cls, index): from pandas import Index as PandasIndex check_type(index, PandasIndex) return Index(index.values, index.dtype, index.name)
Create baloo Index from pandas Index. Parameters ---------- index : pandas.base.Index Returns ------- Index
20,225
def pay(self, transactionAmount, senderTokenId, recipientTokenId=None, callerTokenId=None, chargeFeeTo="Recipient", callerReference=None, senderReference=None, recipientReference=None, senderDescription=None, recipientDescription=None, callerDescription=None, metadata=None, transactionDate=None, reserve=False): params = {} params[] = senderTokenId params[] = str(transactionAmount) params[] = "USD" params[] = chargeFeeTo params[] = ( recipientTokenId if recipientTokenId is not None else boto.config.get("FPS", "recipient_token") ) params[] = ( callerTokenId if callerTokenId is not None else boto.config.get("FPS", "caller_token") ) if(transactionDate != None): params[] = transactionDate if(senderReference != None): params[] = senderReference if(recipientReference != None): params[] = recipientReference if(senderDescription != None): params[] = senderDescription if(recipientDescription != None): params[] = recipientDescription if(callerDescription != None): params[] = callerDescription if(metadata != None): params[] = metadata if(callerReference == None): callerReference = uuid.uuid4() params[] = callerReference if reserve: response = self.make_request("Reserve", params) else: response = self.make_request("Pay", params) body = response.read() if(response.status == 200): rs = ResultSet() h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs else: raise FPSResponseError(response.status, response.reason, body)
Make a payment transaction. You must specify the amount. This can also perform a Reserve request if 'reserve' is set to True.
20,226
def format_dates(self, data, columns): for column in columns: if column in data.columns: data[column] = pandas.to_datetime(data[column]) return data
This method translates columns values into datetime objects :param data: original Pandas dataframe :param columns: list of columns to cast the date to a datetime object :type data: pandas.DataFrame :type columns: list of strings :returns: Pandas dataframe with updated 'columns' with datetime objects :rtype: pandas.DataFrame
20,227
def qubits(self): return [(v, i) for k, v in self.qregs.items() for i in range(v.size)]
Return a list of qubits as (QuantumRegister, index) pairs.
20,228
def path_valid(self): valid = [i is not None for i in self.polygons_closed] valid = np.array(valid, dtype=np.bool) return valid
Returns ---------- path_valid: (n,) bool, indexes of self.paths self.polygons_closed which are valid polygons
20,229
def bbox(self): if not hasattr(self, ): data = None for key in (, , ): if key in self.tagged_blocks: data = self.tagged_blocks.get_data(key) assert data is not None rect = data.get(b) self._bbox = ( int(rect.get(b)), int(rect.get(b)), int(rect.get(b)), int(rect.get(b)), ) return self._bbox
(left, top, right, bottom) tuple.
20,230
def closest_pair(arr, give="indicies"): idxs = [idx for idx in np.ndindex(arr.shape)] outs = [] min_dist = arr.max() - arr.min() for idxa in idxs: for idxb in idxs: if idxa == idxb: continue dist = abs(arr[idxa] - arr[idxb]) if dist == min_dist: if not [idxb, idxa] in outs: outs.append([idxa, idxb]) elif dist < min_dist: min_dist = dist outs = [[idxa, idxb]] if give == "indicies": return outs elif give == "distance": return min_dist else: raise KeyError("give not recognized in closest_pair")
Find the pair of indices corresponding to the closest elements in an array. If multiple pairs are equally close, both pairs of indicies are returned. Optionally returns the closest distance itself. I am sure that this could be written as a cheaper operation. I wrote this as a quick and dirty method because I need it now to use on some relatively small arrays. Feel free to refactor if you need this operation done as fast as possible. - Blaise 2016-02-07 Parameters ---------- arr : numpy.ndarray The array to search. give : {'indicies', 'distance'} (optional) Toggle return behavior. If 'distance', returns a single float - the closest distance itself. Default is indicies. Returns ------- list of lists of two tuples List containing lists of two tuples: indicies the nearest pair in the array. >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1]) >>> closest_pair(arr) [[(1,), (8,)], [(3,), (4,)]]
20,231
def reflect_table(engine, klass): try: meta = MetaData() meta.reflect(bind=engine) except OperationalError as e: raise DatabaseError(error=e.orig.args[1], code=e.orig.args[0]) table = None for tb in klass.tables(): if tb in meta.tables: table = meta.tables[tb] break if table is None: raise DatabaseError(error="Invalid schema. Table not found", code="-1") mapper(klass, table, column_prefix=klass.column_prefix()) return table
Inspect and reflect objects
20,232
def read_rows(self, rows, **keys): if rows is None: return self._read_all() if self._info[] == ASCII_TBL: keys[] = rows return self.read(**keys) rows = self._extract_rows(rows) dtype, offsets, isvar = self.get_rec_dtype(**keys) w, = numpy.where(isvar == True) if w.size > 0: vstorage = keys.get(, self._vstorage) colnums = self._extract_colnums() return self._read_rec_with_var( colnums, rows, dtype, offsets, isvar, vstorage) else: array = numpy.zeros(rows.size, dtype=dtype) self._FITS.read_rows_as_rec(self._ext+1, array, rows) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) for colnum, name in enumerate(array.dtype.names): self._rescale_and_convert_field_inplace( array, name, self._info[][colnum][], self._info[][colnum][]) lower = keys.get(, False) upper = keys.get(, False) if self.lower or lower: _names_to_lower_if_recarray(array) elif self.upper or upper: _names_to_upper_if_recarray(array) self._maybe_trim_strings(array, **keys) return array
Read the specified rows. parameters ---------- rows: list,array A list or array of row indices. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction.
20,233
def create_logger(): global logger formatter = logging.Formatter() handler = TimedRotatingFileHandler(log_file, when="midnight", interval=1) handler.setFormatter(formatter) handler.setLevel(log_level) handler.suffix = "%Y-%m-%d" logger = logging.getLogger("sacplus") logger.setLevel(log_level) logger.addHandler(handler)
Initial the global logger variable
20,234
def _parse_message(self, data): match = self._regex.match(str(data)) if match is None: raise InvalidMessageError(.format(data)) header, self.bitfield, self.numeric_code, self.panel_data, alpha = match.group(1, 2, 3, 4, 5) is_bit_set = lambda bit: not self.bitfield[bit] == "0" self.ready = is_bit_set(1) self.armed_away = is_bit_set(2) self.armed_home = is_bit_set(3) self.backlight_on = is_bit_set(4) self.programming_mode = is_bit_set(5) self.beeps = int(self.bitfield[6], 16) self.zone_bypassed = is_bit_set(7) self.ac_power = is_bit_set(8) self.chime_on = is_bit_set(9) self.alarm_event_occurred = is_bit_set(10) self.alarm_sounding = is_bit_set(11) self.battery_low = is_bit_set(12) self.entry_delay_off = is_bit_set(13) self.fire_alarm = is_bit_set(14) self.check_zone = is_bit_set(15) self.perimeter_only = is_bit_set(16) self.system_fault = is_bit_set(17) if self.bitfield[18] in list(PANEL_TYPES): self.panel_type = PANEL_TYPES[self.bitfield[18]] self.text = alpha.strip() self.mask = int(self.panel_data[3:3+8], 16) if self.panel_type in (ADEMCO, DSC): if int(self.panel_data[19:21], 16) & 0x01 > 0: self.cursor_location = int(self.panel_data[21:23], 16)
Parse the message from the device. :param data: message data :type data: string :raises: :py:class:`~alarmdecoder.util.InvalidMessageError`
20,235
def editor_interfaces(self): return ContentTypeEditorInterfacesProxy(self._client, self.space.id, self._environment_id, self.id)
Provides access to editor interface management methods for the given content type. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/editor-interface :return: :class:`ContentTypeEditorInterfacesProxy <contentful_management.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy>` object. :rtype: contentful.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy Usage: >>> content_type_editor_interfaces_proxy = content_type.editor_interfaces() <ContentTypeEditorInterfacesProxy space_id="cfexampleapi" environment_id="master" content_type_id="cat">
20,236
def settimeout(self, timeout): self.sock_opt.timeout = timeout if self.sock: self.sock.settimeout(timeout)
Set the timeout to the websocket. timeout: timeout time(second).
20,237
def is_equal_to_ignoring_case(self, other): if not isinstance(self.val, str_types): raise TypeError() if not isinstance(other, str_types): raise TypeError() if self.val.lower() != other.lower(): self._err( % (self.val, other)) return self
Asserts that val is case-insensitive equal to other.
20,238
def find_step_impl(self, step): result = None for si in self.steps[step.step_type]: matches = si.match(step.match) if matches: if result: raise AmbiguousStepImpl(step, result[0], si) args = [self._apply_transforms(arg, si) for arg in matches.groups()] result = si, args if not result: raise UndefinedStepImpl(step) return result
Find the implementation of the step for the given match string. Returns the StepImpl object corresponding to the implementation, and the arguments to the step implementation. If no implementation is found, raises UndefinedStepImpl. If more than one implementation is found, raises AmbiguousStepImpl. Each of the arguments returned will have been transformed by the first matching transform implementation.
20,239
def padDigitalData(self, dig_data, n): n = int(n) l0 = len(dig_data) if l0 % n == 0: return dig_data else: ladd = n - (l0 % n) dig_data_add = np.zeros(ladd, dtype="uint32") dig_data_add.fill(dig_data[-1]) return np.concatenate((dig_data, dig_data_add))
Pad dig_data with its last element so that the new array is a multiple of n.
20,240
def delete_namespaced_pod_preset(self, name, namespace, **kwargs): kwargs[] = True if kwargs.get(): return self.delete_namespaced_pod_preset_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_pod_preset_with_http_info(name, namespace, **kwargs) return data
delete_namespaced_pod_preset # noqa: E501 delete a PodPreset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_pod_preset(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodPreset (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :return: V1Status If the method is called asynchronously, returns the request thread.
20,241
def get_output_metadata(self, path, filename): checksums = get_checksums(path, []) metadata = {: filename, : os.path.getsize(path), : checksums[], : } if self.metadata_only: metadata[] = True return metadata
Describe a file by its metadata. :return: dict
20,242
def built(name, runas, dest_dir, spec, sources, tgt, template=None, deps=None, env=None, results=None, force=False, saltenv=, log_dir=): nocheck ret = {: name, : {}, : , : True} if not results: ret[] = results\ ret[] = False return ret if isinstance(results, six.string_types): results = results.split() needed = _get_missing_results(results, dest_dir) if not force and not needed: ret[] = return ret if __opts__[]: ret[] = None if force: ret[] = else: ret[] = ret[] += .join(needed) return ret if env is not None and not isinstance(env, dict): ret[] = (env\ ) ret[] = False return ret func = if __grains__.get(, False) not in (, ): for res in results: if res.endswith(): func = break ret[] = __salt__[func]( runas, tgt, dest_dir, spec, sources, deps, env, template, saltenv, log_dir) needed = _get_missing_results(results, dest_dir) if needed: ret[] = ret[] += .join(needed) ret[] = False else: ret[] = return ret
Ensure that the named package is built and exists in the named directory name The name to track the build, the name value is otherwise unused runas The user to run the build process as dest_dir The directory on the minion to place the built package(s) spec The location of the spec file (used for rpms) sources The list of package sources tgt The target platform to run the build on template Run the spec file through a templating engine .. versionchanged:: 2015.8.2 This argument is now optional, allowing for no templating engine to be used if none is desired. deps Packages required to ensure that the named package is built can be hosted on either the salt master server or on an HTTP or FTP server. Both HTTPS and HTTP are supported as well as downloading directly from Amazon S3 compatible URLs with both pre-configured and automatic IAM credentials env A dictionary of environment variables to be set prior to execution. Example: .. code-block:: yaml - env: DEB_BUILD_OPTIONS: 'nocheck' .. warning:: The above illustrates a common PyYAML pitfall, that **yes**, **no**, **on**, **off**, **true**, and **false** are all loaded as boolean ``True`` and ``False`` values, and must be enclosed in quotes to be used as strings. More info on this (and other) PyYAML idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`. results The names of the expected rpms that will be built force : False If ``True``, packages will be built even if they already exist in the ``dest_dir``. This is useful when building a package for continuous or nightly package builds. .. versionadded:: 2015.8.2 saltenv The saltenv to use for files downloaded from the salt filesever log_dir : /var/log/salt/rpmbuild Root directory for log files created from the build. Logs will be organized by package name, version, OS release, and CPU architecture under this directory. .. versionadded:: 2015.8.2
20,243
async def create( cls, interface: Interface, mode: LinkMode, subnet: Union[Subnet, int] = None, ip_address: str = None, force: bool = False, default_gateway: bool = False): if not isinstance(interface, Interface): raise TypeError( "interface must be an Interface, not %s" % type(interface).__name__) if not isinstance(mode, LinkMode): raise TypeError( "mode must be a LinkMode, not %s" % type(mode).__name__) if subnet is not None: if isinstance(subnet, Subnet): subnet = subnet.id elif isinstance(subnet, int): pass else: raise TypeError( "subnet must be a Subnet or int, not %s" % type(subnet).__name__) if mode in [LinkMode.AUTO, LinkMode.STATIC]: if subnet is None: raise ValueError( % mode) if default_gateway and mode not in [LinkMode.AUTO, LinkMode.STATIC]: raise ValueError( % mode) params = { : interface.node.system_id, : interface.id, : mode.value, : force, : default_gateway, } if subnet is not None: params[] = subnet if ip_address is not None: params[] = ip_address return cls._object(new_link)
Create a link on `Interface` in MAAS. :param interface: Interface to create the link on. :type interface: `Interface` :param mode: Mode of the link. :type mode: `LinkMode` :param subnet: The subnet to create the link on (optional). :type subnet: `Subnet` or `int` :param ip_address: The IP address to assign to the link. :type ip_address: `str` :param force: If True, allows `LinkMode.LINK_UP` to be created even if other links already exist. Also allows the selection of any subnet no matter the VLAN the subnet belongs to. Using this option will cause all other interface links to be deleted (optional). :type force: `bool` :param default_gateway: If True, sets the gateway IP address for the subnet as the default gateway for the node this interface belongs to. Option can only be used with the `LinkMode.AUTO` and `LinkMode.STATIC` modes. :type default_gateway: `bool` :returns: The created InterfaceLink. :rtype: `InterfaceLink`
20,244
def _build(self, x, prev_state): x.get_shape().with_rank(2) self._batch_size = x.get_shape().as_list()[0] self._dtype = x.dtype x_zeros = tf.concat( [x, tf.zeros( shape=(self._batch_size, 1), dtype=self._dtype)], 1) x_ones = tf.concat( [x, tf.ones( shape=(self._batch_size, 1), dtype=self._dtype)], 1) halting_linear = basic.Linear(name="halting_linear", output_size=1) body = functools.partial( self._body, halting_linear=halting_linear, x_ones=x_ones) cumul_halting_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) iteration_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) core_output_size = [x.value for x in self._core.output_size] out_init = tf.zeros(shape=(self._batch_size,) + tuple(core_output_size), dtype=self._dtype) cumul_state_init = _nested_zeros_like(prev_state) remainder_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) (unused_final_x, final_out, unused_final_state, final_cumul_state, unused_final_halting, final_iteration, final_remainder) = tf.while_loop( self._cond, body, [x_zeros, out_init, prev_state, cumul_state_init, cumul_halting_init, iteration_init, remainder_init]) act_output = basic.Linear( name="act_output_linear", output_size=self._output_size)(final_out) return (act_output, (final_iteration, final_remainder)), final_cumul_state
Connects the core to the graph. Args: x: Input `Tensor` of shape `(batch_size, input_size)`. prev_state: Previous state. This could be a `Tensor`, or a tuple of `Tensor`s. Returns: The tuple `(output, state)` for this core. Raises: ValueError: if the `Tensor` `x` does not have rank 2.
20,245
def get_full_xml_representation(entity, private_key): from federation.entities.diaspora.mappers import get_outbound_entity diaspora_entity = get_outbound_entity(entity, private_key) xml = diaspora_entity.to_xml() return "<XML><post>%s</post></XML>" % etree.tostring(xml).decode("utf-8")
Get full XML representation of an entity. This contains the <XML><post>..</post></XML> wrapper. Accepts either a Base entity or a Diaspora entity. Author `private_key` must be given so that certain entities can be signed.
20,246
def glob(patterns, parent=None, excludes=None, include_dotfiles=False, ignore_false_excludes=False): if not glob2: raise glob2_ext if isinstance(patterns, str): patterns = [patterns] if not parent: parent = os.getcwd() result = [] for pattern in patterns: if not os.path.isabs(pattern): pattern = os.path.join(parent, pattern) result += glob2.glob(canonical(pattern)) for pattern in (excludes or ()): if not os.path.isabs(pattern): pattern = os.path.join(parent, pattern) pattern = canonical(pattern) if not isglob(pattern): try: result.remove(pattern) except ValueError as exc: if not ignore_false_excludes: raise ValueError(.format(exc, pattern)) else: for item in glob2.glob(pattern): try: result.remove(item) except ValueError as exc: if not ignore_false_excludes: raise ValueError(.format(exc, pattern)) return result
Wrapper for #glob2.glob() that accepts an arbitrary number of patterns and matches them. The paths are normalized with #norm(). Relative patterns are automaticlly joined with *parent*. If the parameter is omitted, it defaults to the current working directory. If *excludes* is specified, it must be a string or a list of strings that is/contains glob patterns or filenames to be removed from the result before returning. > Every file listed in *excludes* will only remove **one** match from > the result list that was generated from *patterns*. Thus, if you > want to exclude some files with a pattern except for a specific file > that would also match that pattern, simply list that file another > time in the *patterns*. # Parameters patterns (list of str): A list of glob patterns or filenames. parent (str): The parent directory for relative paths. excludes (list of str): A list of glob patterns or filenames. include_dotfiles (bool): If True, `*` and `**` can also capture file or directory names starting with a dot. ignore_false_excludes (bool): False by default. If True, items listed in *excludes* that have not been globbed will raise an exception. # Returns list of str: A list of filenames.
20,247
def convertnumbers(table, strict=False, **kwargs): return convertall(table, numparser(strict), **kwargs)
Convenience function to convert all field values to numbers where possible. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz', 'quux'], ... ['1', '3.0', '9+3j', 'aaa'], ... ['2', '1.3', '7+2j', None]] >>> table2 = etl.convertnumbers(table1) >>> table2 +-----+-----+--------+-------+ | foo | bar | baz | quux | +=====+=====+========+=======+ | 1 | 3.0 | (9+3j) | 'aaa' | +-----+-----+--------+-------+ | 2 | 1.3 | (7+2j) | None | +-----+-----+--------+-------+
20,248
def create_image_lists(image_dir, testing_percentage, validation_percentage): if not tf.gfile.Exists(image_dir): tf.logging.error("Image directory not found.") return None result = collections.OrderedDict() sub_dirs = sorted(x[0] for x in tf.gfile.Walk(image_dir)) is_root_dir = True for sub_dir in sub_dirs: if is_root_dir: is_root_dir = False continue extensions = sorted(set(os.path.normcase(ext) for ext in [, , , , ])) file_list = [] dir_name = os.path.basename( sub_dir[:-1] if sub_dir.endswith() else sub_dir) if dir_name == image_dir: continue tf.logging.info("Looking for images in ") for extension in extensions: file_glob = os.path.join(image_dir, dir_name, + extension) file_list.extend(tf.gfile.Glob(file_glob)) if not file_list: tf.logging.warning() continue if len(file_list) < 20: tf.logging.warning( ) elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS: tf.logging.warning( .format(dir_name, MAX_NUM_IMAGES_PER_CLASS)) label_name = re.sub(r, , dir_name.lower()) training_images = [] testing_images = [] validation_images = [] for file_name in file_list: base_name = os.path.basename(file_name) hash_name = re.sub(r, , file_name) hash_name_hashed = hashlib.sha1(tf.compat.as_bytes(hash_name)).hexdigest() percentage_hash = ((int(hash_name_hashed, 16) % (MAX_NUM_IMAGES_PER_CLASS + 1)) * (100.0 / MAX_NUM_IMAGES_PER_CLASS)) if percentage_hash < validation_percentage: validation_images.append(base_name) elif percentage_hash < (testing_percentage + validation_percentage): testing_images.append(base_name) else: training_images.append(base_name) result[label_name] = { : dir_name, : training_images, : testing_images, : validation_images, } return result
Builds a list of training images from the file system. Analyzes the sub folders in the image directory, splits them into stable training, testing, and validation sets, and returns a data structure describing the lists of images for each label and their paths. Args: image_dir: String path to a folder containing subfolders of images. testing_percentage: Integer percentage of the images to reserve for tests. validation_percentage: Integer percentage of images reserved for validation. Returns: An OrderedDict containing an entry for each label subfolder, with images split into training, testing, and validation sets within each label. The order of items defines the class indices.
20,249
def textFileStream(self, directory): return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as text files. Files must be wrriten to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. The text files must be encoded as UTF-8.
20,250
def coord(self, offset=(0,0)): (tilex, tiley) = self.tile (offsetx, offsety) = offset world_tiles = 1<<self.zoom x = ( tilex + 1.0*offsetx/TILES_WIDTH ) / (world_tiles/2.) - 1 y = ( tiley + 1.0*offsety/TILES_HEIGHT) / (world_tiles/2.) - 1 lon = x * 180.0 y = math.exp(-y*2*math.pi) e = (y-1)/(y+1) lat = 180.0/math.pi * math.asin(e) return (lat, lon)
return lat,lon within a tile given (offsetx,offsety)
20,251
def dict(self): json_list = [] for step in self.steps: json_list.append(step.dict) return json_list
the python object for rendering json. It is called dict to be coherent with the other modules but it actually returns a list :return: the python object for rendering json :rtype: list
20,252
def visit_set(self, node): return "{%s}" % ", ".join(child.accept(self) for child in node.elts)
return an astroid.Set node as string
20,253
def get_institute_graph_url(start, end): filename = get_institute_graph_filename(start, end) urls = { : urlparse.urljoin(GRAPH_URL, filename + ".png"), : urlparse.urljoin(GRAPH_URL, filename + ".csv"), } return urls
Pie chart comparing institutes usage.
20,254
def ppo_original_world_model(): hparams = ppo_original_params() hparams.policy_network = "next_frame_basic_deterministic" hparams_keys = hparams.values().keys() video_hparams = basic_deterministic_params.next_frame_basic_deterministic() for (name, value) in six.iteritems(video_hparams.values()): if name in hparams_keys: hparams.set_hparam(name, value) else: hparams.add_hparam(name, value) hparams.weight_decay = 0 return hparams
Atari parameters with world model as policy.
20,255
def ConfigureViewTypeChoices( self, event=None ): self.viewTypeTool.SetItems( getattr( self.loader, , [] )) if self.loader and self.viewType in self.loader.ROOTS: self.viewTypeTool.SetSelection( self.loader.ROOTS.index( self.viewType )) def chooser( typ ): def Callback( event ): if typ != self.viewType: self.viewType = typ self.OnRootView( event ) return Callback for item in self.viewTypeMenu.GetMenuItems(): self.viewTypeMenu.DeleteItem( item ) if self.loader and self.loader.ROOTS: for root in self.loader.ROOTS: item = wx.MenuItem( self.viewTypeMenu, -1, root.title(), _("View hierarchy by %(name)s")%{ : root.title(), }, kind=wx.ITEM_RADIO, ) item.SetCheckable( True ) self.viewTypeMenu.AppendItem( item ) item.Check( root == self.viewType ) wx.EVT_MENU( self, item.GetId(), chooser( root ))
Configure the set of View types in the toolbar (and menus)
20,256
def _log(self, s): r sys.stderr.write(s) sys.stderr.flush()
r"""Log a string. It flushes but doesn't append \n, so do that yourself.
20,257
def show(parent=None, targets=[], modal=None, auto_publish=False, auto_validate=False): if modal is None: modal = bool(os.environ.get("PYBLISH_QML_MODAL", False)) install(modal) show_settings = settings.to_dict() show_settings[] = auto_publish show_settings[] = auto_validate if _state.get("currentServer"): server = _state["currentServer"] proxy = ipc.server.Proxy(server) try: proxy.show(show_settings) return server except IOError: _state.pop("currentServer") if not host.is_headless(): host.splash() try: service = ipc.service.Service() server = ipc.server.Server(service, targets=targets, modal=modal) except Exception: traceback.print_exc() return host.desplash() proxy = ipc.server.Proxy(server) proxy.show(show_settings) _state["currentServer"] = server log.info("Success. QML server available as " "pyblish_qml.api.current_server()") server.listen() return server
Attempt to show GUI Requires install() to have been run first, and a live instance of Pyblish QML in the background. Arguments: parent (None, optional): Deprecated targets (list, optional): Publishing targets modal (bool, optional): Block interactions to parent
20,258
def storage_set(self, key, value): if not self._module: return self._storage_init() module_name = self._module.module_full_name return self._storage.storage_set(module_name, key, value)
Store a value for the module.
20,259
def set_iscsi_volume(self, port_id, initiator_iqn, initiator_dhcp=False, initiator_ip=None, initiator_netmask=None, target_dhcp=False, target_iqn=None, target_ip=None, target_port=3260, target_lun=0, boot_prio=1, chap_user=None, chap_secret=None, mutual_chap_secret=None): initiator_netmask = (_convert_netmask(initiator_netmask) if initiator_netmask else None) port_handler = _parse_physical_port_id(port_id) iscsi_boot = _create_iscsi_boot( initiator_iqn, initiator_dhcp=initiator_dhcp, initiator_ip=initiator_ip, initiator_netmask=initiator_netmask, target_dhcp=target_dhcp, target_iqn=target_iqn, target_ip=target_ip, target_port=target_port, target_lun=target_lun, boot_prio=boot_prio, chap_user=chap_user, chap_secret=chap_secret, mutual_chap_secret=mutual_chap_secret) port = self._find_port(port_handler) if port: port_handler.set_iscsi_port(port, iscsi_boot) else: port = port_handler.create_iscsi_port(iscsi_boot) self._add_port(port_handler, port)
Set iSCSI volume information to configuration. :param port_id: Physical port ID. :param initiator_iqn: IQN of initiator. :param initiator_dhcp: True if DHCP is used in the iSCSI network. :param initiator_ip: IP address of initiator. None if DHCP is used. :param initiator_netmask: Netmask of initiator as integer. None if DHCP is used. :param target_dhcp: True if DHCP is used for iSCSI target. :param target_iqn: IQN of target. None if DHCP is used. :param target_ip: IP address of target. None if DHCP is used. :param target_port: Port number of target. None if DHCP is used. :param target_lun: LUN number of target. None if DHCP is used, :param boot_prio: Boot priority of the volume. 1 indicates the highest priority.
20,260
def logical_downlinks(self): if not self.__logical_downlinks: self.__logical_downlinks = LogicalDownlinks( self.__connection) return self.__logical_downlinks
Gets the LogicalDownlinks API client. Returns: LogicalDownlinks:
20,261
def mod_repo(repo, saltenv=, **kwargs): t be updated unless another change is made at the same time. Keys should be properly added on initial configuration. CLI Examples: .. code-block:: bash salt pkg.mod_repo uri=http://new/uri salt pkg.mod_repo comps=main,universe refresh_dbNeonThe \ argument to \ has been renamed to \. Support for using \ will be removed in the Neon release of Salt.refresh_dbrefresht_ be changed on the if repo.startswith(): if __grains__[] in (, , ): if salt.utils.path.which() \ and not in kwargs: repo_info = get_repo(repo) if repo_info: return {repo: repo_info} else: env = None http_proxy_url = _get_http_proxy_url() if http_proxy_url: env = {: http_proxy_url, : http_proxy_url} if float(__grains__[]) < 12.04: cmd = [, repo] else: cmd = [, , repo] out = _call_apt(cmd, env=env, scope=False, **kwargs) if out[]: raise CommandExecutionError( {0}\{1}\ {3}\.format( repo[4:], cmd, out[], out[] ) ) if refresh: refresh_db() return {repo: out} else: if not HAS_SOFTWAREPROPERTIES: _warn_software_properties(repo) else: log.info() try: owner_name, ppa_name = repo[4:].split(, 1) except ValueError: raise CommandExecutionError( {0}\.format(repo[4:]) ) dist = __grains__[] kwargs[] = dist ppa_auth = if not in kwargs: filename = kwargs[] = filename.format(owner_name, ppa_name, dist) try: launchpad_ppa_info = _get_ppa_info_from_launchpad( owner_name, ppa_name) if not in kwargs: kwargs[] = launchpad_ppa_info[ ] else: if not in kwargs: error_str = \ raise CommandExecutionError( error_str.format(owner_name, ppa_name) ) except HTTPError as exc: raise CommandExecutionError( .format( owner_name, ppa_name, exc) ) except IndexError as exc: raise CommandExecutionError( .format(owner_name, ppa_name, exc) ) if not in kwargs: kwargs[] = if in kwargs: if not launchpad_ppa_info[]: raise CommandExecutionError( .format(repo) ) if in kwargs: ppa_auth = .format(kwargs[]) repo = LP_PVT_SRC_FORMAT.format(ppa_auth, owner_name, ppa_name, dist) else: repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist) else: raise CommandExecutionError( .format(repo) ) sources = sourceslist.SourcesList() if kwargs.get(, False): sources = _consolidate_repo_sources(sources) repos = [s for s in sources if not s.invalid] mod_source = None try: repo_type, \ repo_architectures, \ repo_uri, \ repo_dist, \ repo_comps = _split_repo_str(repo) except SyntaxError: raise SyntaxError( {0}\.format(repo) ) full_comp_list = set(repo_comps) no_proxy = __salt__[]() if in kwargs: keyid = kwargs.pop(, None) keyserver = kwargs.pop(, None) if not keyid or not keyserver: error_str = raise NameError(error_str) if not isinstance(keyid, list): keyid = [keyid] for key in keyid: if isinstance(key, int): key = hex(key) cmd = [, , key] output = __salt__[](cmd, python_shell=False, **kwargs) imported = output.startswith() if keyserver: if not imported: http_proxy_url = _get_http_proxy_url() if http_proxy_url and keyserver not in no_proxy: cmd = [, , , , .format(http_proxy_url), , keyserver, , , , key] else: cmd = [, , , , keyserver, , , , key] ret = _call_apt(cmd, scope=False, **kwargs) if ret[] != 0: raise CommandExecutionError( .format(ret[]) ) elif in kwargs: key_url = kwargs[] fn_ = __salt__[](key_url, saltenv) if not fn_: raise CommandExecutionError( .format(key_url) ) cmd = [, , fn_] out = __salt__[](cmd, python_shell=False, **kwargs) if not out.upper().startswith(): raise CommandExecutionError( .format(key_url) ) elif in kwargs: key_text = kwargs[] cmd = [, , ] out = __salt__[](cmd, stdin=key_text, python_shell=False, **kwargs) if not out.upper().startswith(): raise CommandExecutionError( .format(key_text) ) if in kwargs: kwargs[] = kwargs[].split() full_comp_list |= set(kwargs[]) else: kwargs[] = list(full_comp_list) if in kwargs: kwargs[] = kwargs[].split() else: kwargs[] = repo_architectures if in kwargs: kwargs[] = salt.utils.data.is_true(kwargs[]) kw_type = kwargs.get() kw_dist = kwargs.get() for source in repos: repo_matches = source.type == repo_type and source.uri.rstrip() == repo_uri.rstrip() and source.dist == repo_dist kw_matches = source.dist == kw_dist and source.type == kw_type if repo_matches or kw_matches: for comp in full_comp_list: if comp in getattr(source, , []): mod_source = source if not source.comps: mod_source = source if kwargs[] != source.architectures: mod_source = source if mod_source: break if in kwargs: kwargs[] = \ salt.utils.pkg.deb.combine_comments(kwargs[]) if not mod_source: mod_source = sourceslist.SourceEntry(repo) if in kwargs: mod_source.comment = kwargs[] sources.list.append(mod_source) elif in kwargs: mod_source.comment = kwargs[] for key in kwargs: if key in _MODIFY_OK and hasattr(mod_source, key): setattr(mod_source, key, kwargs[key]) sources.save() if refresh: refresh_db() return { repo: { : getattr(mod_source, , []), : mod_source.comps, : mod_source.disabled, : mod_source.file, : mod_source.type, : mod_source.uri, : mod_source.line } }
Modify one or more values for a repo. If the repo does not exist, it will be created, so long as the definition is well formed. For Ubuntu the ``ppa:<project>/repo`` format is acceptable. ``ppa:`` format can only be used to create a new repository. The following options are available to modify a repo definition: architectures A comma-separated list of supported architectures, e.g. ``amd64`` If this option is not set, all architectures (configured in the system) will be used. comps A comma separated list of components for the repo, e.g. ``main`` file A file name to be used keyserver Keyserver to get gpg key from keyid Key ID or a list of key IDs to load with the ``keyserver`` argument key_url URL to a GPG key to add to the APT GPG keyring key_text GPG key in string form to add to the APT GPG keyring .. versionadded:: 2018.3.0 consolidate : False If ``True``, will attempt to de-duplicate and consolidate sources comments Sometimes you want to supply additional information, but not as enabled configuration. All comments provided here will be joined into a single string and appended to the repo configuration with a comment marker (#) before it. .. versionadded:: 2015.8.9 .. note:: Due to the way keys are stored for APT, there is a known issue where the key won't be updated unless another change is made at the same time. Keys should be properly added on initial configuration. CLI Examples: .. code-block:: bash salt '*' pkg.mod_repo 'myrepo definition' uri=http://new/uri salt '*' pkg.mod_repo 'myrepo definition' comps=main,universe
20,262
def remove(self, key, where=None, start=None, stop=None): where = _ensure_term(where, scope_level=1) try: s = self.get_storer(key) except KeyError: raise except Exception: if where is not None: raise ValueError( "trying to remove a node with a non-None where clause!") s = self.get_node(key) if s is not None: s._f_remove(recursive=True) return None if com._all_none(where, start, stop): s.group._f_remove(recursive=True) else: if not s.is_table: raise ValueError( ) return s.delete(where=where, start=start, stop=stop)
Remove pandas object partially by specifying the where condition Parameters ---------- key : string Node to remove or delete rows from where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection Returns ------- number of rows removed (or None if not a Table) Exceptions ---------- raises KeyError if key is not a valid store
20,263
def vtableEqual(a, objectStart, b): N.enforce_number(objectStart, N.UOffsetTFlags) if len(a) * N.VOffsetTFlags.bytewidth != len(b): return False for i, elem in enumerate(a): x = encode.Get(packer.voffset, b, i * N.VOffsetTFlags.bytewidth) if x == 0 and elem == 0: pass else: y = objectStart - elem if x != y: return False return True
vtableEqual compares an unwritten vtable to a written vtable.
20,264
def convert_units(values, source_measure_or_unit_abbreviation, target_measure_or_unit_abbreviation,**kwargs): if numpy.isscalar(values): values = [values] float_values = [float(value) for value in values] values_to_return = convert(float_values, source_measure_or_unit_abbreviation, target_measure_or_unit_abbreviation) return values_to_return
Convert a value from one unit to another one. Example:: >>> cli = PluginLib.connect() >>> cli.service.convert_units(20.0, 'm', 'km') 0.02 Parameters: values: single measure or an array of measures source_measure_or_unit_abbreviation: A measure in the source unit, or just the abbreviation of the source unit, from which convert the provided measure value/values target_measure_or_unit_abbreviation: A measure in the target unit, or just the abbreviation of the target unit, into which convert the provided measure value/values Returns: Always a list
20,265
def exists(self): if self.name in ("", "/") and self.parent is None: return True else: return self in self.parent.directories
Check whether the directory exists on the camera.
20,266
def clean(self, list_article_candidates): results = [] for article_candidate in list_article_candidates: article_candidate.title = self.do_cleaning(article_candidate.title) article_candidate.description = self.do_cleaning(article_candidate.description) article_candidate.text = self.do_cleaning(article_candidate.text) article_candidate.topimage = self.do_cleaning(article_candidate.topimage) article_candidate.author = self.do_cleaning(article_candidate.author) article_candidate.publish_date = self.do_cleaning(article_candidate.publish_date) results.append(article_candidate) return results
Iterates over each article_candidate and cleans every extracted data. :param list_article_candidates: A list, the list of ArticleCandidate-Objects which have been extracted :return: A list, the list with the cleaned ArticleCandidate-Objects
20,267
def face_subset(self, face_index): if self.defined: result = ColorVisuals( face_colors=self.face_colors[face_index]) else: result = ColorVisuals() return result
Given a mask of face indices, return a sliced version. Parameters ---------- face_index: (n,) int, mask for faces (n,) bool, mask for faces Returns ---------- visual: ColorVisuals object containing a subset of faces.
20,268
def _register_options(self, api_interface): op_paths = api_interface.op_paths(collate_methods=True) for path, operations in op_paths.items(): if api.Method.OPTIONS not in operations: self._options_operation(api_interface, path, operations.keys())
Register CORS options endpoints.
20,269
def encode_schedule(schedule): interpolation, steps, pmfs = schedule return interpolation + + .join( + str(s) + + .join(map(str, p)) for s, p in zip(steps, pmfs))
Encodes a schedule tuple into a string. Args: schedule: A tuple containing (interpolation, steps, pmfs), where interpolation is a string specifying the interpolation strategy, steps is an int array_like of shape [N] specifying the global steps, and pmfs is an array_like of shape [N, M] where pmf[i] is the sampling distribution at global step steps[i]. N is the number of schedule requirements to interpolate and M is the size of the probability space. Returns: The string encoding of the schedule tuple.
20,270
def create(cls, name, servers=None, time_range=, all_logs=False, filter_for_delete=None, comment=None, **kwargs): if not servers: servers = [svr.href for svr in ManagementServer.objects.all()] servers.extend([svr.href for svr in LogServer.objects.all()]) else: servers = [svr.href for svr in servers] filter_for_delete = filter_for_delete.href if filter_for_delete else \ FilterExpression().href json = { : name, : servers, : time_range, : 0, : 0, : , : filter_for_delete, : comment} json.update(**log_target_types(all_logs, **kwargs)) return ElementCreator(cls, json)
Create a new delete log task. Provide True to all_logs to delete all log types. Otherwise provide kwargs to specify each log by type of interest. :param str name: name for this task :param servers: servers to back up. Servers must be instances of management servers or log servers. If no value is provided, all servers are backed up. :type servers: list(ManagementServer or LogServer) :param str time_range: specify a time range for the deletion. Valid options are 'yesterday', 'last_full_week_sun_sat', 'last_full_week_mon_sun', 'last_full_month' (default 'yesterday') :param FilterExpression filter_for_delete: optional filter for deleting. (default: FilterExpression('Match All') :param bool all_logs: if True, all log types will be deleted. If this is True, kwargs are ignored (default: False) :param kwargs: see :func:`~log_target_types` for keyword arguments and default values. :raises ElementNotFound: specified servers were not found :raises CreateElementFailed: failure to create the task :return: the task :rtype: DeleteLogTask
20,271
def _fill_cropping(self, image_size, view_size): def aspect_ratio(width, height): return width / height ar_view = aspect_ratio(*view_size) ar_image = aspect_ratio(*image_size) if ar_view < ar_image: crop = (1.0 - (ar_view/ar_image)) / 2.0 return (crop, 0.0, crop, 0.0) if ar_view > ar_image: crop = (1.0 - (ar_image/ar_view)) / 2.0 return (0.0, crop, 0.0, crop) return (0.0, 0.0, 0.0, 0.0)
Return a (left, top, right, bottom) 4-tuple containing the cropping values required to display an image of *image_size* in *view_size* when stretched proportionately. Each value is a percentage expressed as a fraction of 1.0, e.g. 0.425 represents 42.5%. *image_size* and *view_size* are each (width, height) pairs.
20,272
def editpermissions_user_view(self, request, user_id, forum_id=None): user_model = get_user_model() user = get_object_or_404(user_model, pk=user_id) forum = get_object_or_404(Forum, pk=forum_id) if forum_id else None context = self.get_forum_perms_base_context(request, forum) context[] = forum context[] = .format(_(), user) context[] = self._get_permissions_form( request, UserForumPermission, {: forum, : user}, ) return render(request, self.editpermissions_user_view_template_name, context)
Allows to edit user permissions for the considered forum. The view displays a form to define which permissions are granted for the given user for the considered forum.
20,273
def article(request, slug): tree = request.current_page.get_public_object() if tree.application_urls != : return page(request, slug) draft = use_draft(request) and request.user.has_perm() preview = in request.GET and request.user.has_perm() site = tree.node.site article = get_article_from_slug(tree, slug, preview, draft) if not article: _handle_no_page(request) request.current_article = article if hasattr(request, ) and request.user.is_staff: user_languages = get_language_list(site_id=site.pk) else: user_languages = get_public_languages(site_id=site.pk) request_language = get_language_from_request(request, check_path=True) available_languages = [ language for language in user_languages if language in list(article.get_published_languages()) ] own_urls = [ request.build_absolute_uri(request.path), % request.path, request.path, ] try: redirect_on_fallback = get_redirect_on_fallback(request_language, site_id=site.pk) except LanguageError: redirect_on_fallback = False if request_language not in user_languages: default_language = get_default_language_for_site(site.pk) fallbacks = get_fallback_languages(default_language, site_id=site.pk) fallbacks = [default_language] + fallbacks else: fallbacks = get_fallback_languages(request_language, site_id=site.pk) fallback_languages = [ language for language in fallbacks if language != request_language and language in available_languages ] language_is_unavailable = request_language not in available_languages if language_is_unavailable and not fallback_languages: if article.has_change_permission(request) and structure_requested: return render_object_structure(request, article) return render_article(request, article, current_language=request_language, slug=slug)
The main view of the Django-CMS Articles! Takes a request and a slug, renders the article.
20,274
def recall(self, label=None): if label is None: return self.call("recall") else: return self.call("recall", float(label))
Returns recall or recall for a given label (category) if specified.
20,275
def estimate_tx_gas_with_safe(self, safe_address: str, to: str, value: int, data: bytes, operation: int, block_identifier=) -> int: data = data or b def parse_revert_data(result: bytes) -> int: gas_estimation_offset = 4 + 32 + 32 estimated_gas = result[gas_estimation_offset:] if len(estimated_gas) != 32: logger.warning(, safe_address, result.hex(), tx) raise CannotEstimateGas( % (result.hex(), tx)) return int(estimated_gas.hex(), 16) try: tx = self.get_contract(safe_address).functions.requiredTxGas( to, value, data, operation ).buildTransaction({ : safe_address, : int(1e7), : 0, }) result: HexBytes = self.w3.eth.call(tx, block_identifier=block_identifier) return parse_revert_data(result) except ValueError as exc: error_dict = exc.args[0] data = error_dict.get() if not data: raise exc elif isinstance(data, str) and in data: result = HexBytes(data.replace(, )) return parse_revert_data(result) key = list(data.keys())[0] result = data[key][] if result == : raise exc else: logger.warning() estimated_gas_hex = result[138:] assert len(estimated_gas_hex) == 64 estimated_gas = int(estimated_gas_hex, 16) return estimated_gas
Estimate tx gas using safe `requiredTxGas` method :return: int: Estimated gas :raises: CannotEstimateGas: If gas cannot be estimated :raises: ValueError: Cannot decode received data
20,276
def decode(self, dataset_split=None, decode_from_file=False, checkpoint_path=None): if decode_from_file: decoding.decode_from_file(self._estimator, self._decode_hparams.decode_from_file, self._hparams, self._decode_hparams, self._decode_hparams.decode_to_file) else: decoding.decode_from_dataset( self._estimator, self._hparams.problem.name, self._hparams, self._decode_hparams, dataset_split=dataset_split, checkpoint_path=checkpoint_path)
Decodes from dataset or file.
20,277
def dispose(json_str): result_str = list(json_str) escaped = False normal = True sl_comment = False ml_comment = False quoted = False a_step_from_comment = False a_step_from_comment_away = False former_index = None for index, char in enumerate(json_str): if escaped: escaped = False continue if a_step_from_comment: if char != and char != : a_step_from_comment = False normal = True continue if a_step_from_comment_away: if char != : a_step_from_comment_away = False if char == : if normal and not escaped: quoted = True normal = False elif quoted and not escaped: quoted = False normal = True elif char == : if normal or quoted: escaped = True elif char == : if a_step_from_comment: a_step_from_comment = False sl_comment = True normal = False former_index = index - 1 elif a_step_from_comment_away: a_step_from_comment_away = False normal = True ml_comment = False for i in range(former_index, index + 1): result_str[i] = "" elif normal: a_step_from_comment = True normal = False elif char == : if a_step_from_comment: a_step_from_comment = False ml_comment = True normal = False former_index = index - 1 elif ml_comment: a_step_from_comment_away = True elif char == : if sl_comment: sl_comment = False normal = True for i in range(former_index, index + 1): result_str[i] = "" elif char == or char == : if normal: _remove_last_comma(result_str, index) return ("" if isinstance(json_str, str) else u"").join(result_str)
Clear all comments in json_str. Clear JS-style comments like // and /**/ in json_str. Accept a str or unicode as input. Args: json_str: A json string of str or unicode to clean up comment Returns: str: The str without comments (or unicode if you pass in unicode)
20,278
def get_best_span(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor) -> torch.Tensor: if span_start_logits.dim() != 2 or span_end_logits.dim() != 2: raise ValueError("Input shapes must be (batch_size, passage_length)") batch_size, passage_length = span_start_logits.size() device = span_start_logits.device span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1) span_log_mask = torch.triu(torch.ones((passage_length, passage_length), device=device)).log() valid_span_log_probs = span_log_probs + span_log_mask best_spans = valid_span_log_probs.view(batch_size, -1).argmax(-1) span_start_indices = best_spans // passage_length span_end_indices = best_spans % passage_length return torch.stack([span_start_indices, span_end_indices], dim=-1)
This acts the same as the static method ``BidirectionalAttentionFlow.get_best_span()`` in ``allennlp/models/reading_comprehension/bidaf.py``. We keep it here so that users can directly import this function without the class. We call the inputs "logits" - they could either be unnormalized logits or normalized log probabilities. A log_softmax operation is a constant shifting of the entire logit vector, so taking an argmax over either one gives the same result.
20,279
def validateElement(self, ctxt, elem): if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlValidateElement(ctxt__o, self._o, elem__o) return ret
Try to validate the subtree under an element
20,280
async def create(gc: GroupControl, name, slaves): click.echo("Creating group %s with slaves: %s" % (name, slaves)) click.echo(await gc.create(name, slaves))
Create new group
20,281
def main(argv=sys.argv, stream=sys.stderr): args = parse_args(argv) suite = build_suite(args) runner = unittest.TextTestRunner(verbosity=args.verbose, stream=stream) result = runner.run(suite) return get_status(result)
Entry point for ``tappy`` command.
20,282
def find_next_candidate(self): try: return self.candidates.pop() except IndexError: pass try: node = self.top_targets_left.pop() except IndexError: return None self.current_top = node alt, message = node.alter_targets() if alt: self.message = message self.candidates.append(node) self.candidates.extend(self.order(alt)) node = self.candidates.pop() return node
Returns the next candidate Node for (potential) evaluation. The candidate list (really a stack) initially consists of all of the top-level (command line) targets provided when the Taskmaster was initialized. While we walk the DAG, visiting Nodes, all the children that haven't finished processing get pushed on to the candidate list. Each child can then be popped and examined in turn for whether *their* children are all up-to-date, in which case a Task will be created for their actual evaluation and potential building. Here is where we also allow candidate Nodes to alter the list of Nodes that should be examined. This is used, for example, when invoking SCons in a source directory. A source directory Node can return its corresponding build directory Node, essentially saying, "Hey, you really need to build this thing over here instead."
20,283
def reflect_filter(sources, model, cache=None): targets = [reflect(source, model, cache=cache) for source in sources] return [target for target in targets if target is not None]
Returns the list of reflections of objects in the `source` list to other class. Objects that are not found in target table are silently discarded.
20,284
def upload_headimg(self, account, media_file): return self._post( , params={ : account }, files={ : media_file } )
上传客服账号头像 详情请参考 http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html :param account: 完整客服账号 :param media_file: 要上传的头像文件,一个 File-Object :return: 返回的 JSON 数据包
20,285
def replicate_global_dbs(cloud_url=None, local_url=None): local_url = local_url or config["local_server"]["url"] cloud_url = cloud_url or config["cloud_server"]["url"] server = Server(local_url) for db_name in global_dbs: server.replicate( db_name, urljoin(cloud_url, db_name), db_name, continuous=True, )
Set up replication of the global databases from the cloud server to the local server. :param str cloud_url: Used to override the cloud url from the global configuration in case the calling function is in the process of initializing the cloud server :param str local_url: Used to override the local url from the global configuration in case the calling function is in the process of initializing the local server
20,286
def frozen_stats_from_tree(tree): if not tree: raise ValueError() stats_index = [] for parent_offset, members in tree: stats = FrozenStatistics(*members) stats_index.append(stats) if parent_offset is not None: stats_index[parent_offset].children.append(stats) return stats_index[0]
Restores a statistics from the given flat members tree. :func:`make_frozen_stats_tree` makes a tree for this function.
20,287
def lock(self, session, lock_type, timeout, requested_key=None): try: sess = self.sessions[session] except KeyError: return StatusCode.error_invalid_object return sess.lock(lock_type, timeout, requested_key)
Establishes an access mode to the specified resources. Corresponds to viLock function of the VISA library. :param session: Unique logical identifier to a session. :param lock_type: Specifies the type of lock requested, either Constants.EXCLUSIVE_LOCK or Constants.SHARED_LOCK. :param timeout: Absolute time period (in milliseconds) that a resource waits to get unlocked by the locking session before returning an error. :param requested_key: This parameter is not used and should be set to VI_NULL when lockType is VI_EXCLUSIVE_LOCK. :return: access_key that can then be passed to other sessions to share the lock, return value of the library call. :rtype: str, :class:`pyvisa.constants.StatusCode`
20,288
def to_phalf_from_pfull(arr, val_toa=0, val_sfc=0): phalf = np.zeros((arr.shape[0] + 1, arr.shape[1], arr.shape[2])) phalf[0] = val_toa phalf[-1] = val_sfc phalf[1:-1] = 0.5*(arr[:-1] + arr[1:]) return phalf
Compute data at half pressure levels from values at full levels. Could be the pressure array itself, but it could also be any other data defined at pressure levels. Requires specification of values at surface and top of atmosphere.
20,289
def list_running_zones(self): self.update_controller_info() if self.running is None or not self.running: return None return int(self.running[0][])
Returns the currently active relay. :returns: Returns the running relay number or None if no relays are active. :rtype: string
20,290
def editors(self, value): warnings.warn( _ASSIGNMENT_DEPRECATED_MSG.format("editors", EDITOR_ROLE), DeprecationWarning, ) self[EDITOR_ROLE] = value
Update editors. DEPRECATED: use ``policy["roles/editors"] = value`` instead.
20,291
def unnest(c, elem, ignore_whitespace=False): parent = elem.getparent() gparent = parent.getparent() index = parent.index(elem) preparent = etree.Element(parent.tag) preparent.text, parent.text = (parent.text or ), for k in parent.attrib.keys(): preparent.set(k, parent.get(k)) if index > 0: for ch in parent.getchildren()[:index]: preparent.append(ch) gparent.insert(gparent.index(parent), preparent) XML.remove_if_empty(preparent, leave_tail=True, ignore_whitespace=ignore_whitespace) XML.remove(elem, leave_tail=True) gparent.insert(gparent.index(parent), elem) elem.tail = XML.remove_if_empty(parent, leave_tail=True, ignore_whitespace=ignore_whitespace)
unnest the element from its parent within doc. MUTABLE CHANGES
20,292
def create_venv_with_package(packages): with tempfile.TemporaryDirectory() as tempdir: myenv = create(tempdir, with_pip=True) pip_call = [ myenv.env_exe, "-m", "pip", "install", ] subprocess.check_call(pip_call + [, ]) if packages: subprocess.check_call(pip_call + packages) yield myenv
Create a venv with these packages in a temp dir and yielf the env. packages should be an iterable of pip version instructio (e.g. package~=1.2.3)
20,293
def _get_parent(root): elem = root while True: elem = elem.getparent() if elem.tag in [, ]: return elem
Returns root element for a list. :Args: root (Element): lxml element of current location :Returns: lxml element representing list
20,294
def _encrypt(self, dec, password=None): if AES is None: raise ImportError("PyCrypto required") if password is None: password = self.password if password is None: raise ValueError( "Password need to be provided to create encrypted archives") master_key = Random.get_random_bytes(32) master_salt = Random.get_random_bytes(64) user_salt = Random.get_random_bytes(64) master_iv = Random.get_random_bytes(16) user_iv = Random.get_random_bytes(16) rounds = 10000 l = len(dec) pad = 16 - (l % 16) dec += bytes([pad] * pad) cipher = AES.new(master_key, IV=master_iv, mode=AES.MODE_CBC) enc = cipher.encrypt(dec) master_ck = PBKDF2(self.encode_utf8(master_key), master_salt, dkLen=256//8, count=rounds) user_key = PBKDF2(password, user_salt, dkLen=256//8, count=rounds) master_dec = b"\x10" + master_iv + b"\x20" + master_key + b"\x20" + master_ck l = len(master_dec) pad = 16 - (l % 16) master_dec += bytes([pad] * pad) cipher = AES.new(user_key, IV=user_iv, mode=AES.MODE_CBC) master_enc = cipher.encrypt(master_dec) enc = binascii.b2a_hex(user_salt).upper() + b"\n" + \ binascii.b2a_hex(master_salt).upper() + b"\n" + \ str(rounds).encode() + b"\n" + \ binascii.b2a_hex(user_iv).upper() + b"\n" + \ binascii.b2a_hex(master_enc).upper() + b"\n" + enc return enc
Internal encryption function Uses either the password argument for the encryption, or, if not supplied, the password field of the object :param dec: a byte string representing the to be encrypted data :rtype: bytes
20,295
def _rdumpq(q,size,value,encoding=None): write = q.appendleft if value is None: write("0:~") return size + 3 if value is True: write("4:true!") return size + 7 if value is False: write("5:false!") return size + 8 if isinstance(value,(int,long)): data = str(value) ldata = len(data) span = str(ldata) write(" write(data) write(":") write(span) return size + 2 + len(span) + ldata if isinstance(value,(float,)): data = repr(value) ldata = len(data) span = str(ldata) write("^") write(data) write(":") write(span) return size + 2 + len(span) + ldata if isinstance(value,str): lvalue = len(value) span = str(lvalue) write(",") write(value) write(":") write(span) return size + 2 + len(span) + lvalue if isinstance(value,(list,tuple,)): write("]") init_size = size = size + 1 for item in reversed(value): size = _rdumpq(q,size,item,encoding) span = str(size - init_size) write(":") write(span) return size + 1 + len(span) if isinstance(value,dict): write("}") init_size = size = size + 1 for (k,v) in value.iteritems(): size = _rdumpq(q,size,v,encoding) size = _rdumpq(q,size,k,encoding) span = str(size - init_size) write(":") write(span) return size + 1 + len(span) if isinstance(value,unicode): if encoding is None: raise ValueError("must specify encoding to dump unicode strings") value = value.encode(encoding) lvalue = len(value) span = str(lvalue) write(",") write(value) write(":") write(span) return size + 2 + len(span) + lvalue raise ValueError("unserializable object")
Dump value as a tnetstring, to a deque instance, last chunks first. This function generates the tnetstring representation of the given value, pushing chunks of the output onto the given deque instance. It pushes the last chunk first, then recursively generates more chunks. When passed in the current size of the string in the queue, it will return the new size of the string in the queue. Operating last-chunk-first makes it easy to calculate the size written for recursive structures without having to build their representation as a string. This is measurably faster than generating the intermediate strings, especially on deeply nested structures.
20,296
def get_channel_groups(self, channel_ids): groups = [] for channel_id in channel_ids: group = self.get_channel_property(channel_id, ) groups.append(group) return groups
This function returns the group of each channel specifed by channel_ids Parameters ---------- channel_ids: array_like The channel ids (ints) for which the groups will be returned Returns ---------- groups: array_like Returns a list of corresonding groups (ints) for the given channel_ids
20,297
def has_descriptor(self, descriptor): for p in self.descriptors: if p in descriptor: return True return False
Return ``True`` if the character has the given descriptor. :param IPADescriptor descriptor: the descriptor to be checked against :rtype: bool
20,298
def makeFrequencyPanel(allFreqs, patientName): titles = sorted( iter(allFreqs.keys()), key=lambda title: (allFreqs[title][], title)) origMaxY = 0 cols = 6 rows = len(allFreqs) figure, ax = plt.subplots(rows, cols, squeeze=False) substitutions = [, , , , , ] colors = [, , , , , ] for i, title in enumerate(titles): for index in range(6): for subst in allFreqs[str(title)]: substitution = substitutions[index] print(i, index, title, , substitutions[index]) if substitution[0] == : pattern = else: pattern = maxY = makeFrequencyGraph(allFreqs, title, substitution, pattern, color=colors[index], createFigure=False, showFigure=False, readsAx=ax[i][index]) if maxY > origMaxY: origMaxY = maxY if index == 0: gi = title.split()[1] titles = title.split() try: typeIndex = titles.index() except ValueError: typeNumber = % gi else: typeNumber = titles[typeIndex + 1] ax[i][index].set_ylabel(( % ( typeNumber, allFreqs[title][])), fontsize=10) if i == 0: ax[i][index].set_title(substitution, fontsize=13) if i == len(allFreqs) - 1 or i == (len(allFreqs) - 1) / 2: if index < 3: pat = [, , , , , , , , , , , , , , , ] else: pat = [, , , , , , , , , , , , , , , ] ax[i][index].set_xticklabels(pat, rotation=45, fontsize=8) for i, title in enumerate(allFreqs): for index in range(6): a = ax[i][index] a.set_ylim([0, origMaxY]) figure.suptitle( % patientName, fontsize=20) figure.set_size_inches(5 * cols, 3 * rows, forward=True) figure.show() return allFreqs
For a title, make a graph showing the frequencies. @param allFreqs: result from getCompleteFreqs @param patientName: A C{str}, title for the panel
20,299
def find_service_by_type(self, service_type): for service in self._services: if service_type == service.type: return service return None
Get service for a given service type. :param service_type: Service type, ServiceType :return: Service