Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
385,200
def proxy_uri(self, value): option = Option() option.number = defines.OptionRegistry.PROXY_URI.number option.value = str(value) self.add_option(option)
Set the Proxy-Uri option of a request. :param value: the Proxy-Uri value
385,201
def stereo_bm_preset(self, value): if value in (cv2.STEREO_BM_BASIC_PRESET, cv2.STEREO_BM_FISH_EYE_PRESET, cv2.STEREO_BM_NARROW_PRESET): self._bm_preset = value else: raise InvalidBMPresetError("Stereo BM preset must be defined as " "cv2.STEREO_BM_*_PRESET.") self._replace_bm()
Set private ``_stereo_bm_preset`` and reset ``_block_matcher``.
385,202
def add_val(self, subj: Node, pred: URIRef, json_obj: JsonObj, json_key: str, valuetype: Optional[URIRef] = None) -> Optional[BNode]: if json_key not in json_obj: print("Expecting to find object named in JSON:".format(json_key)) print(json_obj._as_json_dumps()) print("entry skipped") return None val = json_obj[json_key] if isinstance(val, List): list_idx = 0 for lv in val: entry_bnode = BNode() if pred == FHIR.Bundle.entry: entry_subj = URIRef(lv.fullUrl) self.add(entry_bnode, FHIR.index, Literal(list_idx)) self.add_val(entry_bnode, FHIR.Bundle.entry.fullUrl, lv, ) self.add(entry_bnode, FHIR.Bundle.entry.resource, entry_subj) self.add(subj, pred, entry_bnode) entry_mv = FHIRMetaVocEntry(self._vocabulary, FHIR.BundleEntryComponent) for k, p in entry_mv.predicates().items(): if k not in [, ] and k in lv: print("---> adding {}".format(k)) self.add_val(subj, p, lv, k) FHIRResource(self._vocabulary, None, self._base_uri, lv.resource, self._g, False, self._replace_narrative_text, False, resource_uri=entry_subj) else: self.add(entry_bnode, FHIR.index, Literal(list_idx)) if isinstance(lv, JsonObj): self.add_value_node(entry_bnode, pred, lv, valuetype) else: vt = self._meta.predicate_type(pred) atom_type = self._meta.primitive_datatype_nostring(vt) if vt else None self.add(entry_bnode, FHIR.value, Literal(lv, datatype=atom_type)) self.add(subj, pred, entry_bnode) list_idx += 1 else: vt = self._meta.predicate_type(pred) if not valuetype else valuetype if self._meta.is_atom(pred): if self._replace_narrative_text and pred == FHIR.Narrative.div and len(val) > 120: val = REPLACED_NARRATIVE_TEXT self.add(subj, pred, Literal(val)) else: v = BNode() if self._meta.is_primitive(vt): self.add(v, FHIR.value, Literal(str(val), datatype=self._meta.primitive_datatype_nostring(vt, val))) else: self.add_value_node(v, pred, val, valuetype) self.add(subj, pred, v) if pred == FHIR.Reference.reference: self.add_reference(subj, val) self.add_extension_val(v, json_obj, json_key) return v return None
Add the RDF representation of val to the graph as a target of subj, pred. Note that FHIR lists are represented as a list of BNODE objects with a fhir:index discrimanant :param subj: graph subject :param pred: predicate :param json_obj: object containing json_key :param json_key: name of the value in the JSON resource :param valuetype: value type if NOT determinable by predicate :return: value node if target is a BNode else None
385,203
def run_dssp(pdb, path=True, outfile=None): if not path: if type(pdb) == str: pdb = pdb.encode() try: temp_pdb = tempfile.NamedTemporaryFile(delete=False) temp_pdb.write(pdb) temp_pdb.seek(0) dssp_out = subprocess.check_output( [global_settings[][], temp_pdb.name]) temp_pdb.close() finally: os.remove(temp_pdb.name) else: dssp_out = subprocess.check_output( [global_settings[][], pdb]) dssp_out = dssp_out.decode() if outfile: with open(outfile, ) as outf: outf.write(dssp_out) return dssp_out
Uses DSSP to find helices and extracts helices from a pdb file or string. Parameters ---------- pdb : str Path to pdb file or string. path : bool, optional Indicates if pdb is a path or a string. outfile : str, optional Filepath for storing the dssp output. Returns ------- dssp_out : str Std out from DSSP.
385,204
def resolve(self, obj): if not isinstance(obj, str): return obj if in obj: return resolve_reference(obj) value = self._entrypoints.get(obj) if value is None: raise LookupError(.format(self.namespace, obj)) if isinstance(value, EntryPoint): value = self._entrypoints[obj] = value.load() return value
Resolve a reference to an entry point or a variable in a module. If ``obj`` is a ``module:varname`` reference to an object, :func:`resolve_reference` is used to resolve it. If it is a string of any other kind, the named entry point is loaded from this container's namespace. Otherwise, ``obj`` is returned as is. :param obj: an entry point identifier, an object reference or an arbitrary object :return: the loaded entry point, resolved object or the unchanged input value :raises LookupError: if ``obj`` was a string but the named entry point was not found
385,205
def get_amount_of_tweets(self): if not self.__response: raise TwitterSearchException(1013) return (len(self.__response[][]) if self.__order_is_search else len(self.__response[]))
Returns current amount of tweets available within this instance :returns: The amount of tweets currently available :raises: TwitterSearchException
385,206
def format(self, formatter, subset=None): if subset is None: row_locs = range(len(self.data)) col_locs = range(len(self.data.columns)) else: subset = _non_reducing_slice(subset) if len(subset) == 1: subset = subset, self.data.columns sub_df = self.data.loc[subset] row_locs = self.data.index.get_indexer_for(sub_df.index) col_locs = self.data.columns.get_indexer_for(sub_df.columns) if is_dict_like(formatter): for col, col_formatter in formatter.items(): col_formatter = _maybe_wrap_formatter(col_formatter) col_num = self.data.columns.get_indexer_for([col])[0] for row_num in row_locs: self._display_funcs[(row_num, col_num)] = col_formatter else: locs = product(*(row_locs, col_locs)) for i, j in locs: formatter = _maybe_wrap_formatter(formatter) self._display_funcs[(i, j)] = formatter return self
Format the text display value of cells. .. versionadded:: 0.18.0 Parameters ---------- formatter : str, callable, or dict subset : IndexSlice An argument to ``DataFrame.loc`` that restricts which elements ``formatter`` is applied to. Returns ------- self : Styler Notes ----- ``formatter`` is either an ``a`` or a dict ``{column name: a}`` where ``a`` is one of - str: this will be wrapped in: ``a.format(x)`` - callable: called with the value of an individual cell The default display value for numeric values is the "general" (``g``) format with ``pd.options.display.precision`` precision. Examples -------- >>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b']) >>> df.style.format("{:.2%}") >>> df['c'] = ['a', 'b', 'c', 'd'] >>> df.style.format({'c': str.upper})
385,207
def archive(self): ids = self.rpc_model.search(self.domain, context=self.context) if ids: self.rpc_model.write(ids, {: False})
Archives (soft delete) all the records matching the query. This assumes that the model allows archiving (not many do - especially transactional documents). Internal implementation sets the active field to False.
385,208
def get_window(window, Nx, fftbins=True): s the name of the window function (e.g., ``) - If tuple, itkaiserkaisers a function that accepts one integer argument (the window length) - If list-like, it if six.callable(window): return window(Nx) elif (isinstance(window, (six.string_types, tuple)) or np.isscalar(window)): return scipy.signal.get_window(window, Nx, fftbins=fftbins) elif isinstance(window, (np.ndarray, list)): if len(window) == Nx: return np.asarray(window) raise ParameterError( .format(len(window), Nx)) else: raise ParameterError(.format(window))
Compute a window function. This is a wrapper for `scipy.signal.get_window` that additionally supports callable or pre-computed windows. Parameters ---------- window : string, tuple, number, callable, or list-like The window specification: - If string, it's the name of the window function (e.g., `'hann'`) - If tuple, it's the name of the window function and any parameters (e.g., `('kaiser', 4.0)`) - If numeric, it is treated as the beta parameter of the `'kaiser'` window, as in `scipy.signal.get_window`. - If callable, it's a function that accepts one integer argument (the window length) - If list-like, it's a pre-computed window of the correct length `Nx` Nx : int > 0 The length of the window fftbins : bool, optional If True (default), create a periodic window for use with FFT If False, create a symmetric window for filter design applications. Returns ------- get_window : np.ndarray A window of length `Nx` and type `window` See Also -------- scipy.signal.get_window Notes ----- This function caches at level 10. Raises ------ ParameterError If `window` is supplied as a vector of length != `n_fft`, or is otherwise mis-specified.
385,209
def _read_opt_ilnp(self, code, *, desc): _type = self._read_opt_type(code) _size = self._read_unpack(1) _nval = self._read_fileng(_size) opt = dict( desc=desc, type=_type, length=_size + 2, value=_nval, ) return opt
Read HOPOPT ILNP Nonce option. Structure of HOPOPT ILNP Nonce option [RFC 6744]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next Header | Hdr Ext Len | Option Type | Option Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ / Nonce Value / +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hopopt.ilnp.type Option Type 0 0 hopopt.ilnp.type.value Option Number 0 0 hopopt.ilnp.type.action Action (10) 0 2 hopopt.ilnp.type.change Change Flag (0) 1 8 hopopt.ilnp.length Length of Option Data 2 16 hopopt.ilnp.value Nonce Value
385,210
def _populate_user(self): self._populate_user_from_attributes() self._populate_user_from_group_memberships() self._populate_user_from_dn_regex() self._populate_user_from_dn_regex_negation()
Populates our User object with information from the LDAP directory.
385,211
def etree_write(tree, stream): try: tree.write(stream, encoding="utf-8", xml_declaration=True) except TypeError: tree.write(stream, encoding="unicode", xml_declaration=True)
Write XML ElementTree 'root' content into 'stream'. :param tree: XML ElementTree object :param stream: File or file-like object can write to
385,212
def artist_update(self, artist_id, name=None, urls=None, alias=None, group=None): params = { : artist_id, : name, : urls, : alias, : group } return self._get(, params, method=)
Function to update artists (Requires Login) (UNTESTED). Only the artist_id parameter is required. The other parameters are optional. Parameters: artist_id (int): The id of thr artist to update (Type: INT). name (str): The artist's name. urls (str): A list of URLs associated with the artist, whitespace delimited. alias (str): The artist that this artist is an alias for. Simply enter the alias artist's name. group (str): The group or cicle that this artist is a member of. Simply enter the group's name.
385,213
def get_connection_info(connection_file=None, unpack=False, profile=None): if connection_file is None: cf = get_connection_file() else: cf = find_connection_file(connection_file, profile=profile) with open(cf) as f: info = f.read() if unpack: info = json.loads(info) info[] = str_to_bytes(info.get(, )) return info
Return the connection information for the current Kernel. Parameters ---------- connection_file : str [optional] The connection file to be used. Can be given by absolute path, or IPython will search in the security directory of a given profile. If run from IPython, If unspecified, the connection file for the currently running IPython Kernel will be used, which is only allowed from inside a kernel. unpack : bool [default: False] if True, return the unpacked dict, otherwise just the string contents of the file. profile : str [optional] The name of the profile to use when searching for the connection file, if different from the current IPython session or 'default'. Returns ------- The connection dictionary of the current kernel, as string or dict, depending on `unpack`.
385,214
def process(self, now): if self._pn_connection is None: LOG.error("Connection.process() called on destroyed connection!") return 0 if self._pn_connection.state & proton.Endpoint.LOCAL_UNINIT: return 0 if self._pn_sasl and not self._sasl_done: if (_PROTON_VERSION < (0, 10)): if self._pn_sasl.state not in (proton.SASL.STATE_PASS, proton.SASL.STATE_FAIL): LOG.debug("SASL in progress. State=%s", str(self._pn_sasl.state)) if self._handler: with self._callback_lock: self._handler.sasl_step(self, self._pn_sasl) return self._next_deadline self._sasl_done = True if self._handler: with self._callback_lock: self._handler.sasl_done(self, self._pn_sasl, self._pn_sasl.outcome) else: if self._pn_sasl.outcome is not None: self._sasl_done = True if self._handler: with self._callback_lock: self._handler.sasl_done(self, self._pn_sasl, self._pn_sasl.outcome) timer_deadline = self._expire_timers(now) transport_deadline = self._pn_transport.tick(now) if timer_deadline and transport_deadline: self._next_deadline = min(timer_deadline, transport_deadline) else: self._next_deadline = timer_deadline or transport_deadline pn_event = self._pn_collector.peek() while pn_event: if _Link._handle_proton_event(pn_event, self): pass elif self._handle_proton_event(pn_event): pass elif _SessionProxy._handle_proton_event(pn_event, self): pass self._pn_collector.pop() pn_event = self._pn_collector.peek() if self._error: if self._handler: self._next_deadline = now with self._callback_lock: self._handler.connection_failed(self, self._error) elif (self._endpoint_state == self._CLOSED and self._read_done and self._write_done): if self._handler: with self._callback_lock: self._handler.connection_closed(self) return self._next_deadline
Perform connection state processing.
385,215
def lookupAll(data, configFields, lookupType, db, histObj={}): for field in data.keys(): if field in configFields.keys() and data[field]!=: if lookupType in configFields[field]["lookup"]: if lookupType in [, , ]: fieldValNew, histObj = DataLookup(fieldVal=data[field], db=db, lookupType=lookupType, fieldName=field, histObj=histObj) elif lookupType in [, , ]: fieldValNew, histObj = RegexLookup(fieldVal=data[field], db=db, fieldName=field, lookupType=lookupType, histObj=histObj) elif lookupType==: fieldValNew, histObj, checkMatch = IncludesLookup(fieldVal=data[field], lookupType=, db=db, fieldName=field, histObj=histObj) data[field] = fieldValNew return data, histObj
Return a record after having cleaning rules of specified type applied to all fields in the config :param dict data: single record (dictionary) to which cleaning rules should be applied :param dict configFields: "fields" object from DWM config (see DataDictionary) :param string lookupType: Type of lookup to perform/MongoDB collection name. One of 'genericLookup', 'fieldSpecificLookup', 'normLookup', 'genericRegex', 'fieldSpecificRegex', 'normRegex', 'normIncludes' :param MongoClient db: MongoClient instance connected to MongoDB :param dict histObj: History object to which changes should be appended
385,216
def hdb_disk_interface(self, hdb_disk_interface): self._hdb_disk_interface = hdb_disk_interface log.info(.format(name=self._name, id=self._id, interface=self._hdb_disk_interface))
Sets the hda disk interface for this QEMU VM. :param hdb_disk_interface: QEMU hdb disk interface
385,217
def choices(self): if not self._choices: gandi = self.gandi or GandiContextHelper() self._choices = self._get_choices(gandi) if not self._choices: api = gandi.get_api_connector() gandi.echo( "api and that it's running." % (api.host)) sys.exit(1) return self._choices
Retrieve choices from API if possible
385,218
def read(self, file_p): if not isinstance(file_p, basestring): raise TypeError("file_p can only be an instance of type basestring") progress = self._call("read", in_p=[file_p]) progress = IProgress(progress) return progress
Reads an OVF file into the appliance object. This method succeeds if the OVF is syntactically valid and, by itself, without errors. The mere fact that this method returns successfully does not mean that VirtualBox supports all features requested by the appliance; this can only be examined after a call to :py:func:`interpret` . in file_p of type str Name of appliance file to open (either with an .ovf or .ova extension, depending on whether the appliance is distributed as a set of files or as a single file, respectively). return progress of type :class:`IProgress` Progress object to track the operation completion.
385,219
def get_Tsys(calON_obs,calOFF_obs,calflux,calfreq,spec_in,oneflux=False,**kwargs): return diode_spec(calON_obs,calOFF_obs,calflux,calfreq,spec_in,average=False,oneflux=False,**kwargs)[1]
Returns frequency dependent system temperature given observations on and off a calibrator source Parameters ---------- (See diode_spec())
385,220
def get_celery_app( name=os.getenv( "CELERY_NAME", "worker"), auth_url=os.getenv( "BROKER_URL", "redis://localhost:6379/9"), backend_url=os.getenv( "BACKEND_URL", "redis://localhost:6379/10"), include_tasks=[], ssl_options=None, transport_options=None, path_to_config_module=os.getenv( "CONFIG_MODULE_PATH", "celery_loaders.work_tasks.celery_config"), worker_log_format=os.getenv( "WORKER_LOG_FORMAT", "%(asctime)s: %(levelname)s %(message)s"), **kwargs): if len(include_tasks) == 0: log.error(("creating celery app={} MISSING tasks={}") .format( name, include_tasks)) else: log.info(("creating celery app={} tasks={}") .format( name, include_tasks)) app = celery.Celery( name, broker_url=auth_url, result_backend=backend_url, include=include_tasks) app.config_from_object( path_to_config_module, namespace="CELERY") app.conf.update(kwargs) if transport_options: log.info(("loading transport_options={}") .format(transport_options)) app.conf.update(**transport_options) if ssl_options: log.info(("loading ssl_options={}") .format(ssl_options)) app.conf.update(**ssl_options) if len(include_tasks) > 0: app.autodiscover_tasks(include_tasks) return app
get_celery_app :param name: name for this app :param auth_url: celery broker :param backend_url: celery backend :param include_tasks: list of modules containing tasks to add :param ssl_options: security options dictionary :param trasport_options: transport options dictionary :param path_to_config_module: config module :param worker_log_format: format for logs
385,221
def get_option_lists(self): return [self.get_option_list()] + \ [option_list for name, description, option_list in self.get_option_groups()]
A hook to override the option lists used to generate option names and defaults.
385,222
def _get_imports_h(self, data_types): if not isinstance(data_types, list): data_types = [data_types] import_classes = [] for data_type in data_types: if is_user_defined_type(data_type): import_classes.append(fmt_class_prefix(data_type)) for field in data_type.all_fields: data_type, _ = unwrap_nullable(field.data_type) while is_list_type(data_type) or is_map_type(data_type): data_type = (data_type.value_data_type if is_map_type(data_type) else data_type.data_type) if is_user_defined_type(data_type): import_classes.append(fmt_class_prefix(data_type)) import_classes = list(set(import_classes)) import_classes.sort() return import_classes
Emits all necessary header file imports for the given Stone data type.
385,223
def p_term_var(self, p): _LOGGER.debug("term -> VAR") if p[1] not in self._VAR_VALUES: if self._autodefine_vars: self._VAR_VALUES[p[1]] = TypedClass(None, TypedClass.UNKNOWN) if p[1] in self._VAR_VALUES: _LOGGER.debug("term -> VAR") p[0] = self._VAR_VALUES[p[1]] else: raise UndefinedVar()
term : VAR
385,224
def split(pattern, string, maxsplit=0, flags=0): return _compile(pattern, flags).split(string, maxsplit)
Split the source string by the occurrences of the pattern, returning a list containing the resulting substrings.
385,225
def align_texts(source_blocks, target_blocks, params = LanguageIndependent): if len(source_blocks) != len(target_blocks): raise ValueError("Source and target texts do not have the same number of blocks.") return [align_blocks(source_block, target_block, params) for source_block, target_block in zip(source_blocks, target_blocks)]
Creates the sentence alignment of two texts. Texts can consist of several blocks. Block boundaries cannot be crossed by sentence alignment links. Each block consists of a list that contains the lengths (in characters) of the sentences in this block. @param source_blocks: The list of blocks in the source text. @param target_blocks: The list of blocks in the target text. @param params: the sentence alignment parameters. @returns: A list of sentence alignment lists
385,226
def make_graph_pygraphviz(self, recs, nodecolor, edgecolor, dpi, draw_parents=True, draw_children=True): import pygraphviz as pgv grph = pgv.AGraph(name="GO tree") edgeset = set() for rec in recs: if draw_parents: edgeset.update(rec.get_all_parent_edges()) if draw_children: edgeset.update(rec.get_all_child_edges()) edgeset = [(self.label_wrap(a), self.label_wrap(b)) for (a, b) in edgeset] for rec in recs: grph.add_node(self.label_wrap(rec.item_id)) for src, target in edgeset: grph.add_edge(target, src) grph.graph_attr.update(dpi="%d" % dpi) grph.node_attr.update(shape="box", style="rounded,filled", fillcolor="beige", color=nodecolor) grph.edge_attr.update(shape="normal", color=edgecolor, dir="back", label="is_a") for rec in recs: try: node = grph.get_node(self.label_wrap(rec.item_id)) node.attr.update(fillcolor="plum") except: continue return grph
Draw AMIGO style network, lineage containing one query record.
385,227
def mean_min_time_distance(item_a, item_b, max_value): times_a = item_a.times.reshape((item_a.times.size, 1)) times_b = item_b.times.reshape((1, item_b.times.size)) distance_matrix = (times_a - times_b) ** 2 mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean()) return np.minimum(mean_min_distances, max_value) / float(max_value)
Calculate the mean time difference among the time steps in each object. Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
385,228
def do_dir(self, args, unknown): if unknown: self.perror("dir does not take any positional arguments:", traceback_war=False) self.do_help() self._last_result = cmd2.CommandResult(, ) return contents = os.listdir(self.cwd) fmt = if args.long: fmt = for f in contents: self.stdout.write(fmt.format(f)) self.stdout.write() self._last_result = cmd2.CommandResult(data=contents)
List contents of current directory.
385,229
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True): if allow_dotted_names: attrs = attr.split() else: attrs = [attr] for i in attrs: if i.startswith(): raise AttributeError( % i ) else: obj = getattr(obj,i) return obj
resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d Resolves a dotted attribute name to an object. Raises an AttributeError if any attribute in the chain starts with a '_'. If the optional allow_dotted_names argument is false, dots are not supported and this function operates similar to getattr(obj, attr).
385,230
def get_rendered_objects(self): objects = self.objects if isinstance(objects, str): objects = getattr(self.object, objects).all() return [ self.get_rendered_object(obj) for obj in objects ]
Render objects
385,231
def set(self, document_data, merge=False): batch = self._client.batch() batch.set(self, document_data, merge=merge) write_results = batch.commit() return _first_write_result(write_results)
Replace the current document in the Firestore database. A write ``option`` can be specified to indicate preconditions of the "set" operation. If no ``option`` is specified and this document doesn't exist yet, this method will create it. Overwrites all content for the document with the fields in ``document_data``. This method performs almost the same functionality as :meth:`create`. The only difference is that this method doesn't make any requirements on the existence of the document (unless ``option`` is used), whereas as :meth:`create` will fail if the document already exists. Args: document_data (dict): Property names and values to use for replacing a document. merge (Optional[bool] or Optional[List<apispec>]): If True, apply merging instead of overwriting the state of the document. Returns: google.cloud.firestore_v1beta1.types.WriteResult: The write result corresponding to the committed document. A write result contains an ``update_time`` field.
385,232
def _dict_from_terse_tabular( names: List[str], inp: str, transformers: Dict[str, Callable[[str], Any]] = {})\ -> List[Dict[str, Any]]: res = [] for n in names: if n not in transformers: transformers[n] = lambda s: s for line in inp.split(): if len(line) < 3: continue fields = line.split() res.append(dict([ (elem[0], transformers[elem[0]](elem[1])) for elem in zip(names, fields)])) return res
Parse NMCLI terse tabular output into a list of Python dict. ``names`` is a list of strings of field names to apply to the input data, which is assumed to be colon separated. ``inp`` is the input as a string (i.e. already decode()d) from nmcli ``transformers`` is a dict mapping field names to callables of the form f: str -> any. If a fieldname is in transformers, that callable will be invoked on the field matching the name and the result stored. The return value is a list with one element per valid line of input, where each element is a dict with keys taken from names and values from the input
385,233
def resnet_v2(inputs, block_fn, layer_blocks, filters, data_format="channels_first", is_training=False, is_cifar=False, use_td=False, targeting_rate=None, keep_prob=None): inputs = block_layer( inputs=inputs, filters=filters[1], block_fn=block_fn, blocks=layer_blocks[0], strides=1, is_training=is_training, name="block_layer1", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[2], block_fn=block_fn, blocks=layer_blocks[1], strides=2, is_training=is_training, name="block_layer2", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[3], block_fn=block_fn, blocks=layer_blocks[2], strides=2, is_training=is_training, name="block_layer3", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) if not is_cifar: inputs = block_layer( inputs=inputs, filters=filters[4], block_fn=block_fn, blocks=layer_blocks[3], strides=2, is_training=is_training, name="block_layer4", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) return inputs
Resnet model. Args: inputs: `Tensor` images. block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include in each of the 3 or 4 block groups. Each group consists of blocks that take inputs of the same resolution. filters: list of 4 or 5 `int`s denoting the number of filter to include in block. data_format: `str`, "channels_first" `[batch, channels, height, width]` or "channels_last" `[batch, height, width, channels]`. is_training: bool, build in training mode or not. is_cifar: bool, whether the data is CIFAR or not. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: Pre-logit activations.
385,234
def help_center_articles_search(self, category=None, label_names=None, locale=None, query=None, section=None, updated_after=None, updated_before=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/search api_path = "/api/v2/help_center/articles/search.json" api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if category: api_query.update({ "category": category, }) if label_names: api_query.update({ "label_names": label_names, }) if locale: api_query.update({ "locale": locale, }) if query: api_query.update({ "query": query, }) if section: api_query.update({ "section": section, }) if updated_after: api_query.update({ "updated_after": updated_after, }) if updated_before: api_query.update({ "updated_before": updated_before, }) return self.call(api_path, query=api_query, **kwargs)
https://developer.zendesk.com/rest_api/docs/help_center/search#search-articles
385,235
def select_many_with_correspondence( self, collection_selector=identity, result_selector=KeyedElement): if self.closed(): raise ValueError("Attempt to call " "select_many_with_correspondence() on a closed Queryable.") if not is_callable(collection_selector): raise TypeError("select_many_with_correspondence() parameter " "projector={0} is not callable".format(repr(collection_selector))) if not is_callable(result_selector): raise TypeError("select_many_with_correspondence() parameter " "selector={0} is not callable".format(repr(result_selector))) return self._create( self._generate_select_many_with_correspondence(collection_selector, result_selector))
Projects each element of a sequence to an intermediate new sequence, and flattens the resulting sequence, into one sequence and uses a selector function to incorporate the corresponding source for each item in the result sequence. Note: This method uses deferred execution. Args: collection_selector: A unary function mapping each element of the source iterable into an intermediate sequence. The single argument of the collection_selector is the value of an element from the source sequence. The return value should be an iterable derived from that element value. The default collection_selector, which is the identity function, assumes that each element of the source sequence is itself iterable. result_selector: An optional binary function mapping the elements in the flattened intermediate sequence together with their corresponding source elements to elements of the result sequence. The two positional arguments of the result_selector are, first the source element corresponding to an element from the intermediate sequence, and second the actual element from the intermediate sequence. The return value should be the corresponding value in the result sequence. If no result_selector function is provided, the elements of the result sequence are KeyedElement namedtuples. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector which incorporates the corresponding source element into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If projector or selector are not callable.
385,236
def gfuds(udfuns, udqdec, relate, refval, adjust, step, nintvls, cnfine, result): relate = stypes.stringToCharP(relate) refval = ctypes.c_double(refval) adjust = ctypes.c_double(adjust) step = ctypes.c_double(step) nintvls = ctypes.c_int(nintvls) libspice.gfuds_c(udfuns, udqdec, relate, refval, adjust, step, nintvls, ctypes.byref(cnfine), ctypes.byref(result)) return result
Perform a GF search on a user defined scalar quantity. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfuds_c.html :param udfuns: Name of the routine that computes the scalar quantity of interest at some time. :type udfuns: ctypes.CFunctionType :param udqdec: Name of the routine that computes whether the scalar quantity is decreasing. :type udqdec: ctypes.CFunctionType :param relate: Operator that either looks for an extreme value (max, min, local, absolute) or compares the geometric quantity value and a number. :type relate: str :param refval: Value used as reference for scalar quantity condition. :type refval: float :param adjust: Allowed variation for absolute extremal geometric conditions. :type adjust: float :param step: Step size used for locating extrema and roots. :type step: float :param nintvls: Workspace window interval count. :type nintvls: int :param cnfine: SPICE window to which the search is restricted. :type cnfine: spiceypy.utils.support_types.SpiceCell :param result: SPICE window containing results. :type result: spiceypy.utils.support_types.SpiceCell :return: result :rtype: spiceypy.utils.support_types.SpiceCell
385,237
def all(self, axis=None, *args, **kwargs): nv.validate_all(args, kwargs) values = self.sp_values if len(values) != len(self) and not np.all(self.fill_value): return False return values.all()
Tests whether all elements evaluate True Returns ------- all : bool See Also -------- numpy.all
385,238
def setValues(nxG, nyG, iBeg, iEnd, jBeg, jEnd, data): nxGHalf = nxG/2. nyGHalf = nyG/2. nxGQuart = nxGHalf/2. nyGQuart = nyGHalf/2. for i in range(data.shape[0]): iG = iBeg + i di = iG - nxG for j in range(data.shape[1]): jG = jBeg + j dj = jG - 0.8*nyG data[i, j] = numpy.floor(1.9*numpy.exp(-di**2/nxGHalf**2 - dj**2/nyGHalf**2))
Set setValues @param nxG number of global cells in x @param nyG number of global cells in y @param iBeg global starting index in x @param iEnd global ending index in x @param jBeg global starting index in y @param jEnd global ending index in y @param data local array
385,239
def find_children(self, linespec): res = [] for parent in self.find_objects(linespec): res.append(parent.line) res.extend([child.line for child in parent.children]) return res
Find lines and immediate children that match the linespec regex. :param linespec: regular expression of line to match :returns: list of lines. These correspond to the lines that were matched and their immediate children
385,240
def GET_AUTH(self, courseid): course, __ = self.get_course_and_check_rights(courseid) return self.page(course)
GET request
385,241
def translate_bit_for_bit(data): headers = sorted(data.get("Headers", [])) table = .replace(, data.get("Title", "table")) table += n_cols = "c"*(len(headers)+1) table += .replace("$NCOLS", n_cols) table += " Variable &" for header in headers: table += .replace(, header).replace(, ) table = table[:-1] + for k, v in data.get("Data", []).items(): table += "\n \\textbf{$VAR} & ".replace("$VAR", k) for header in headers: table += .replace("$VAL", str(v[header])) table = table[:-1] + table += return table
Translates data where data["Type"]=="Bit for Bit"
385,242
def ring(surf, xy, r, width, color): r2 = r - width x0, y0 = xy x = r2 y = 0 err = 0 right = {} while x >= y: right[x] = y right[y] = x right[-x] = y right[-y] = x y += 1 if err <= 0: err += 2 * y + 1 if err > 0: x -= 1 err -= 2 * x + 1 def h_fill_the_circle(surf, color, x, y, right): if -r2 <= y <= r2: pygame.draw.line(surf, color, (x0 + right[y], y0 + y), (x0 + x, y0 + y)) pygame.draw.line(surf, color, (x0 - right[y], y0 + y), (x0 - x, y0 + y)) else: pygame.draw.line(surf, color, (x0 - x, y0 + y), (x0 + x, y0 + y)) x = r y = 0 err = 0 while x >= y: h_fill_the_circle(surf, color, x, y, right) h_fill_the_circle(surf, color, x, -y, right) h_fill_the_circle(surf, color, y, x, right) h_fill_the_circle(surf, color, y, -x, right) y += 1 if err < 0: err += 2 * y + 1 if err >= 0: x -= 1 err -= 2 * x + 1 gfxdraw.aacircle(surf, x0, y0, r, color) gfxdraw.aacircle(surf, x0, y0, r2, color)
Draws a ring
385,243
def put_property(elt, key, value, ttl=None, ctx=None): return put_properties(elt=elt, properties={key: value}, ttl=ttl, ctx=ctx)
Put properties in elt. :param elt: properties elt to put. Not None methods. :param number ttl: If not None, property time to leave. :param ctx: elt ctx from where put properties. Equals elt if None. It allows to get function properties related to a class or instance if related function is defined in base class. :param dict properties: properties to put in elt. elt and ttl are exclude. :return: Timer if ttl is not None. :rtype: Timer
385,244
def _request(self, lat_min, lon_min, lat_max, lon_max, start, end, picture_size=None, set_=None, map_filter=None): if not isinstance(lat_min, float): raise PynoramioException( .format(self.__class__.__name__)) if not isinstance(lon_min, float): raise PynoramioException( .format(self.__class__.__name__)) if not isinstance(lat_max, float): raise PynoramioException( .format(self.__class__.__name__)) if not isinstance(lon_max, float): raise PynoramioException( .format(self.__class__.__name__)) if not isinstance(start, int): raise PynoramioException( .format(self.__class__.__name__)) if not isinstance(end, int): raise PynoramioException( .format(self.__class__.__name__)) url = self.base_url + .format(lon_min, lat_min, lon_max, lat_max, start, end) if picture_size is not None and isinstance(picture_size, basestring) \ and picture_size in [, , , , , ]: url += .format(picture_size) if set_ is not None and (isinstance(set_, basestring) and set_ in [, ]) \ or (isinstance(set_, int)): url += .format(set_) else: url += if map_filter is not None and isinstance(map_filter, bool) and not map_filter: url += r = requests.get(url) try: return r.json() except ValueError: raise PynoramioException( .format(self.__class__.__name__))
Internal method to send requests to the Panoramio data API. :param lat_min: Minimum latitude of the bounding box :type lat_min: float :param lon_min: Minimum longitude of the bounding box :type lon_min: float :param lat_max: Maximum latitude of the bounding box :type lat_max: float :param lon_max: Maximum longitude of the bounding box :type lon_max: float :param start: Start number of the number of photo's to retrieve, where 0 is the most popular picture :type start: int :param end: Last number of the number of photo's to retrieve, where 0 is the most popular picture :type end: int :param picture_size: This can be: original, medium (*default*), small, thumbnail, square, mini_square :type picture_size: basestring :param set_: This can be: public, popular or user-id; where user-id is the specific id of a user (as integer) :type set_: basestring/int :param map_filter: Whether to return photos that look better together; when True, tries to avoid returning photos of the same location :type map_filter: bool :return: JSON response of the request formatted as a dictionary.
385,245
def send_custom_host_notification(self, host, options, author, comment): logger.warning("The external command " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( , ))
DOES NOTHING (Should send a custom notification) Format of the line that triggers function call:: SEND_CUSTOM_HOST_NOTIFICATION;<host_name>;<options>;<author>;<comment> :param host: host to send notif for :type host: alignak.object.host.Host :param options: notification options :type options: :param author: notification author :type author: str :param comment: notification text :type comment: str :return: None
385,246
def delta(self, signature): "Generates delta for remote file via API using local filepath/sync/delta', self.path, signature=signature)
Generates delta for remote file via API using local file's signature.
385,247
def run_gatk(self, params, tmp_dir=None, log_error=True, data=None, region=None, memscale=None, parallel_gc=False, ld_preload=False): needs_java7 = LooseVersion(self.get_gatk_version()) < LooseVersion("3.6") if needs_java7: setpath.remove_bcbiopath() with tx_tmpdir(self._config) as local_tmp_dir: if tmp_dir is None: tmp_dir = local_tmp_dir cl = self.cl_gatk(params, tmp_dir, memscale=memscale, parallel_gc=parallel_gc) atype_index = params.index("-T") if params.count("-T") > 0 \ else params.index("--analysis_type") prog = params[atype_index + 1] cl = fix_missing_spark_user(cl, prog, params) if ld_preload: cl = "export LD_PRELOAD=%s/lib/libopenblas.so && %s" % (os.path.dirname(utils.get_bcbio_bin()), cl) do.run(cl, "GATK: {0}".format(prog), data, region=region, log_error=log_error) if needs_java7: setpath.prepend_bcbiopath()
Top level interface to running a GATK command. ld_preload injects required libraries for Java JNI calls: https://gatkforums.broadinstitute.org/gatk/discussion/8810/something-about-create-pon-workflow
385,248
def reset(self, document, parent, level): self.language = languages.get_language( document.settings.language_code) self.memo.document = document self.memo.reporter = document.reporter self.memo.language = self.language self.memo.section_level = level if self.memo.inliner is None: self.memo.inliner = Inliner() self.memo.inliner.init_customizations(document.settings) inliner = self.memo.inliner inliner.reporter = document.reporter inliner.document = document inliner.language = self.language inliner.parent = parent self.document = document self.reporter = self.memo.reporter self.node = parent self.state.runtime_init() self.input_lines = document[]
Reset the state of state machine. After reset, self and self.state can be used to passed to docutils.parsers.rst.Directive.run Parameters ---------- document: docutils document Current document of the node. parent: parent node Parent node that will be used to interpret role and directives. level: int Current section level.
385,249
def _writeGpoScript(psscript=False): t set, then it is in the GUI WINDIRSystem32GroupPolicyMachineScriptsscripts.iniWINDIRSystem32GroupPolicyMachineScriptspsscripts.iniWINDIRSystem32GroupPolicyUserScriptsscripts.iniWINDIRSystem32GroupPolicyUserScriptspsscripts.ini')
helper function to write local GPO startup/shutdown script scripts are stored in scripts.ini and psscripts.ini files in ``WINDIR\\System32\\GroupPolicy\\Machine|User\\Scripts`` these files have the hidden attribute set files have following format: empty line [Startup] 0CmdLine=<path to script 0> 0Parameters=<script 0 parameters> [Shutdown] 0CmdLine=<path to shutdown script 0> 0Parameters=<shutdown script 0 parameters> Number is incremented for each script added psscript file also has the option of a [ScriptsConfig] section, which has the following two parameters: StartExecutePSFirst EndExecutePSFirst these can be set to True/False to denote if the powershell startup/shutdown scripts execute first (True) or last (False), if the value isn't set, then it is 'Not Configured' in the GUI
385,250
def addResourceFile(self, pid, resource_file, resource_filename=None, progress_callback=None): url = "{url_base}/resource/{pid}/files/".format(url_base=self.url_base, pid=pid) params = {} close_fd = self._prepareFileForUpload(params, resource_file, resource_filename) encoder = MultipartEncoder(params) if progress_callback is None: progress_callback = default_progress_callback monitor = MultipartEncoderMonitor(encoder, progress_callback) r = self._request(, url, data=monitor, headers={: monitor.content_type}) if close_fd: fd = params[][1] fd.close() if r.status_code != 201: if r.status_code == 403: raise HydroShareNotAuthorized((, url)) elif r.status_code == 404: raise HydroShareNotFound((pid,)) else: raise HydroShareHTTPException((url, , r.status_code)) response = r.json() return response
Add a new file to an existing resource :param pid: The HydroShare ID of the resource :param resource_file: a read-only binary file-like object (i.e. opened with the flag 'rb') or a string representing path to file to be uploaded as part of the new resource :param resource_filename: string representing the filename of the resource file. Must be specified if resource_file is a file-like object. If resource_file is a string representing a valid file path, and resource_filename is not specified, resource_filename will be equal to os.path.basename(resource_file). is a string :param progress_callback: user-defined function to provide feedback to the user about the progress of the upload of resource_file. For more information, see: http://toolbelt.readthedocs.org/en/latest/uploading-data.html#monitoring-your-streaming-multipart-upload :return: Dictionary containing 'resource_id' the ID of the resource to which the file was added, and 'file_name' the filename of the file added. :raises: HydroShareNotAuthorized if user is not authorized to perform action. :raises: HydroShareNotFound if the resource was not found. :raises: HydroShareHTTPException if an unexpected HTTP response code is encountered.
385,251
def _repos_checked(self, worker, output, error): if worker.repo in self._checking_repos: self._checking_repos.remove(worker.repo) if output: self._valid_repos.append(worker.repo) if len(self._checking_repos) == 0: self._download_repodata(self._valid_repos)
Callback for _check_repos.
385,252
def do_update(pool,request,models): "unlike *_check() below, update doesn't worry about missing children" return {k:fkapply(models,pool,process_update,missing_update,k,v) for k,v in request.items()}
unlike *_check() below, update doesn't worry about missing children
385,253
def combinations(iterable, r): pool = tuple(iterable) n = len(pool) if r > n: return indices = list(range(r)) yield list(pool[i] for i in indices) while True: for i in reversed(range(r)): if indices[i] != i + n - r: break else: return indices[i] += 1 for j in range(i + 1, r): indices[j] = indices[j - 1] + 1 yield list(pool[i] for i in indices)
Calculate combinations >>> list(combinations('ABCD',2)) [['A', 'B'], ['A', 'C'], ['A', 'D'], ['B', 'C'], ['B', 'D'], ['C', 'D']] >>> list(combinations(range(4), 3)) [[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]] Args: iterable: Any iterable object. r: Size of combination. Yields: list: Combination of size r.
385,254
def get_generated_project_files(self, tool): exporter = ToolsSupported().get_tool(tool) return exporter(self.generated_files[tool], self.settings).get_generated_project_files()
Get generated project files, the content depends on a tool. Look at tool implementation
385,255
def destroy(self, eip_or_aid, disassociate=False): if "." in eip_or_aid: return "true" == self.call("ReleaseAddress", response_data_key="return", PublicIp=eip_or_aid) else: if disassociate: self.disassociate(eip_or_aid) return "true" == self.call("ReleaseAddress", response_data_key="return", AllocationId=eip_or_aid)
Release an EIP. If the EIP was allocated for a VPC instance, an AllocationId(aid) must be provided instead of a PublicIp. Setting disassociate to True will attempt to disassociate the IP before releasing it (required for associated nondefault VPC instances).
385,256
def compute_toc_line_indentation_spaces( header_type_curr: int = 1, header_type_prev: int = 0, no_of_indentation_spaces_prev: int = 0, parser: str = , ordered: bool = False, list_marker: str = , list_marker_log: list = build_list_marker_log(, ), index: int = 1) -> int: r assert header_type_curr >= 1 assert header_type_prev >= 0 assert no_of_indentation_spaces_prev >= 0 if (parser == or parser == or parser == or parser == or parser == ): if ordered: assert list_marker in md_parser[parser][][][ ] else: assert list_marker in md_parser[parser][][][ ] if (parser == or parser == or parser == or parser == ): if ordered: assert len( list_marker_log) == md_parser[][][] for e in list_marker_log: assert isinstance(e, str) assert index >= 1 if (parser == or parser == or parser == or parser == ): if header_type_prev == 0: no_of_indentation_spaces_curr = 0 elif header_type_curr == header_type_prev: no_of_indentation_spaces_curr = no_of_indentation_spaces_prev else: if ordered: list_marker_prev = str(list_marker_log[header_type_curr - 1]) else: list_marker_prev = list_marker if header_type_curr > header_type_prev: no_of_indentation_spaces_curr = ( no_of_indentation_spaces_prev + len(list_marker_prev) + len()) elif header_type_curr < header_type_prev: no_of_indentation_spaces_curr = ( no_of_indentation_spaces_prev - (len(list_marker_prev) + len())) if ordered: for i in range((header_type_curr - 1) + 1, md_parser[][][]): list_marker_log[i] = str( md_parser[][][] []) + list_marker if ordered: list_marker_log[header_type_curr - 1] = str(index) + list_marker elif parser == : no_of_indentation_spaces_curr = 4 * (header_type_curr - 1) return no_of_indentation_spaces_curr
r"""Compute the number of indentation spaces for the TOC list element. :parameter header_type_curr: the current type of header (h[1-Inf]). Defaults to ``1``. :parameter header_type_prev: the previous type of header (h[1-Inf]). Defaults to ``0``. :parameter no_of_indentation_spaces_prev: the number of previous indentation spaces. Defaults to ``0``. :parameter parser: decides rules on how compute indentations. Defaults to ``github``. :parameter ordered: if set to ``True``, numbers will be used as list ids or otherwise a dash character, otherwise. Defaults to ``False``. :parameter list_marker: a string that contains some of the first characters of the list element. Defaults to ``-``. :parameter list_marker_log: a data structure that holds list marker information for ordered lists. Defaults to ``build_list_marker_log('github', '.')``. :parameter index: a number that will be used as list id in case of an ordered table of contents. Defaults to ``1``. :type header_type_curr: int :type header_type_prev: int :type no_of_indentation_spaces_prev: int :type parser: str :type ordered: bool :type list_marker: str :type list_marker_log: list :type index: int :returns: no_of_indentation_spaces_curr, the number of indentation spaces for the list element. :rtype: int :raises: a built-in exception. .. note:: Please note that this function assumes that no_of_indentation_spaces_prev contains the correct number of spaces.
385,257
def realimag_files(xscript=0, yscript="d[1]+1j*d[2]", eyscript=None, exscript=None, paths=None, g=None, **kwargs): return files(xscript, yscript, eyscript, exscript, plotter=realimag_databoxes, paths=paths, g=g, **kwargs)
This will load a bunch of data files, generate data based on the supplied scripts, and then plot the ydata's real and imaginary parts versus xdata. Parameters ---------- xscript=0 Script for x data yscript='d[1]+1j*d[2]' Script for y data eyscript=None Script for y error exscript=None Script for x error paths=None List of paths to open. g=None Optional dictionary of globals for the scripts See spinmob.plot.realimag.data() for additional optional arguments. See spinmob.data.databox.execute_script() for more information about scripts. Common additional parameters ---------------------------- filters="*.*" Set the file filters for the dialog.
385,258
def create_cloud_integration(self, **kwargs): kwargs[] = True if kwargs.get(): return self.create_cloud_integration_with_http_info(**kwargs) else: (data) = self.create_cloud_integration_with_http_info(**kwargs) return data
Create a cloud integration # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_cloud_integration(async_req=True) >>> result = thread.get() :param async_req bool :param CloudIntegration body: Example Body: <pre>{ \"name\":\"CloudWatch integration\", \"service\":\"CLOUDWATCH\", \"cloudWatch\":{ \"baseCredentials\":{ \"roleArn\":\"arn:aws:iam::&lt;accountid&gt;:role/&lt;rolename&gt;\", \"externalId\":\"wave123\" }, \"metricFilterRegex\":\"^aws.(sqs|ec2|ebs|elb).*$\", \"pointTagFilterRegex\":\"(region|name)\" }, \"serviceRefreshRateInMins\":5 }</pre> :return: ResponseContainerCloudIntegration If the method is called asynchronously, returns the request thread.
385,259
def get(self, url, params=None): url = self.escapeUrl(url) content = six.BytesIO(self.raw(url, params=params)) content.seek(0,2) contentlen = content.tell() content.seek(0) MAX_BUFFER_SIZE=1024*1024*200 if contentlen > MAX_BUFFER_SIZE: contentfile = tempfile.NamedTemporaryFile() contentfile.write(content.read()) o = lxml.objectify.parse(contentfile) else: o = lxml.objectify.fromstring(content.getvalue()) if o.tag == : JFSError.raiseError(o, url) return o
Make a GET request for url and return the response content as a generic lxml.objectify object
385,260
def delete(self, r=None, w=None, dw=None, pr=None, pw=None, timeout=None): self.client.delete(self, r=r, w=w, dw=dw, pr=pr, pw=pw, timeout=timeout) self.clear() return self
Delete this object from Riak. :param r: R-value, wait for this many partitions to read object before performing the put :type r: integer :param w: W-value, wait for this many partitions to respond before returning to client. :type w: integer :param dw: DW-value, wait for this many partitions to confirm the write before returning to client. :type dw: integer :param pr: PR-value, require this many primary partitions to be available before performing the read that precedes the put :type pr: integer :param pw: PW-value, require this many primary partitions to be available before performing the put :type pw: integer :param timeout: a timeout value in milliseconds :type timeout: int :rtype: :class:`RiakObject`
385,261
def saveWallet(self, wallet, fpath): if not fpath: raise ValueError("empty path") _fpath = self._normalize(fpath) _dpath = _fpath.parent try: _dpath.relative_to(self._baseDir) except ValueError: raise ValueError( "path {} is not is not relative to the keyrings {}".format( fpath, self._baseDir)) self._createDirIfNotExists(_dpath) while _dpath != self._baseDir: self._ensurePermissions(_dpath, self.dmode) _dpath = _dpath.parent with _fpath.open("w") as wf: self._ensurePermissions(_fpath, self.fmode) encodedWallet = self.encode(wallet) wf.write(encodedWallet) logger.debug("stored wallet in {}".format( wallet.name, _fpath)) return str(_fpath)
Save wallet into specified localtion. Returns the canonical path for the ``fpath`` where ``wallet`` has been stored. Error cases: - ``fpath`` is not inside the keyrings base dir - ValueError raised - directory part of ``fpath`` exists and it's not a directory - NotADirectoryError raised - ``fpath`` exists and it's a directory - IsADirectoryError raised :param wallet: wallet to save :param fpath: wallet file path, absolute or relative to keyrings base dir
385,262
def spawn(self, *cmds: str) -> List[SublemonSubprocess]: if not self._is_running: raise SublemonRuntimeError( ) subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds] for sp in subprocs: asyncio.ensure_future(sp.spawn()) return subprocs
Coroutine to spawn shell commands. If `max_concurrency` is reached during the attempt to spawn the specified subprocesses, excess subprocesses will block while attempting to acquire this server's semaphore.
385,263
def download_static_assets(doc, destination, base_url, request_fn=make_request, url_blacklist=[], js_middleware=None, css_middleware=None, derive_filename=_derive_filename): if not isinstance(doc, BeautifulSoup): doc = BeautifulSoup(doc, "html.parser") def download_assets(selector, attr, url_middleware=None, content_middleware=None, node_filter=None): nodes = doc.select(selector) for i, node in enumerate(nodes): if node_filter: if not node_filter(node): src = node[attr] node[attr] = print(, src) continue if node[attr].startswith(): continue url = urljoin(base_url, node[attr]) if _is_blacklisted(url, url_blacklist): print(, url) node[attr] = "" continue if url_middleware: url = url_middleware(url) filename = derive_filename(url) node[attr] = filename print(" Downloading", url, "to filename", filename) download_file(url, destination, request_fn=request_fn, filename=filename, middleware_callbacks=content_middleware) def js_content_middleware(content, url, **kwargs): if js_middleware: content = js_middleware(content, url, **kwargs) if src.startswith(): return match.group(0) src_url = urljoin(base_url, os.path.join(file_dir, src)) if _is_blacklisted(src_url, url_blacklist): print(, src_url) return derived_filename = derive_filename(src_url) download_file(src_url, destination, request_fn=request_fn, filename=derived_filename) return % derived_filename return _CSS_URL_RE.sub(repl, content) download_assets("img[src]", "src") download_assets("link[href]", "href", content_middleware=css_content_middleware, node_filter=css_node_filter) download_assets("script[src]", "src", content_middleware=js_content_middleware) download_assets("source[src]", "src") download_assets("source[srcset]", "srcset") for node in doc.select(): node.string = css_content_middleware(node.get_text(), url=) for node in doc.select(): if not node.attrs.get(): node.string = js_content_middleware(node.get_text(), url=) return doc
Download all static assets referenced from an HTML page. The goal is to easily create HTML5 apps! Downloads JS, CSS, images, and audio clips. Args: doc: The HTML page source as a string or BeautifulSoup instance. destination: The folder to download the static assets to! base_url: The base URL where assets will be downloaded from. request_fn: The function to be called to make requests, passed to ricecooker.utils.html.download_file(). Pass in a custom one for custom caching logic. url_blacklist: A list of keywords of files to not include in downloading. Will do substring matching, so e.g. 'acorn.js' will match '/some/path/to/acorn.js'. js_middleware: If specificed, JS content will be passed into this callback which is expected to return JS content with any modifications. css_middleware: If specificed, CSS content will be passed into this callback which is expected to return CSS content with any modifications. Return the modified page HTML with links rewritten to the locations of the downloaded static files, as a BeautifulSoup object. (Call str() on it to extract the raw HTML.)
385,264
def evaluate_ising(linear, quad, state): if _numpy and isinstance(state, np.ndarray): return evaluate_ising(linear, quad, state.tolist()) energy = 0.0 for index, value in uniform_iterator(linear): energy += state[index] * value for (index_a, index_b), value in six.iteritems(quad): energy += value * state[index_a] * state[index_b] return energy
Calculate the energy of a state given the Hamiltonian. Args: linear: Linear Hamiltonian terms. quad: Quadratic Hamiltonian terms. state: Vector of spins describing the system state. Returns: Energy of the state evaluated by the given energy function.
385,265
def _eq(left, right): if isinstance(left, (tuple, list)) and isinstance(right, (tuple, list)): return len(left) == len(right) and all(_eq(*pair) for pair in zip(left, right)) else: return left == right
Equality comparison that allows for equality between tuple and list types with equivalent elements.
385,266
def _set_base_dn(self): results = self._search( , , [], scope=ldap.SCOPE_BASE ) if results and type(results) is list: dn, attrs = results[0] r = attrs[][0].decode() else: raise Exception self._base_dn = r self._active_user_base = + self._base_dn self._stage_user_base = + self._base_dn self._preserved_user_base = + self._base_dn self._groups_base = + self._base_dn log.debug( % self._base_dn)
Get Base DN from LDAP
385,267
def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None): writer_spec = cls.get_params(mr_spec.mapper, allow_old=False) key = cls._generate_filename(writer_spec, mr_spec.name, mr_spec.mapreduce_id, shard_number, shard_attempt) status = _ConsistentStatus() status.writer_spec = writer_spec status.mainfile = cls._open_file(writer_spec, key) status.mapreduce_id = mr_spec.mapreduce_id status.shard = shard_number return cls(status)
Inherit docs.
385,268
def set_multivar(self, section, option, value=): s value to a list if applicable. If "value" is a list, then any existing values for the specified section and option will be replaced with the list being passed. ' self._string_check(value, allow_list=True) if not section or section == self.DEFAULTSECT: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError( salt.utils.stringutils.to_str(section)) key = self.optionxform(option) self._add_option(sectdict, key, value)
This function is unique to the GitConfigParser. It will add another value for the option if it already exists, converting the option's value to a list if applicable. If "value" is a list, then any existing values for the specified section and option will be replaced with the list being passed.
385,269
def get_context(): pid = os.getpid() if pid not in context: context[pid] = zmq.Context() logger.debug(, pid) return context[pid]
Provide the context to use. This function takes care of creating new contexts in case of forks.
385,270
def undo_sign_in(entry, session=None): if session is None: session = Session() else: session = session entry_to_delete = ( session .query(Entry) .filter(Entry.uuid == entry.uuid) .one_or_none() ) if entry_to_delete: logger.info(.format(entry_to_delete.user_id)) logger.debug(.format(entry_to_delete)) session.delete(entry_to_delete) session.commit() else: error_message = .format(entry) logger.error(error_message) raise ValueError(error_message)
Delete a signed in entry. :param entry: `models.Entry` object. The entry to delete. :param session: (optional) SQLAlchemy session through which to access the database.
385,271
def submit(self, command="", blocksize=1, job_name="parsl.auto"): if blocksize < self.nodes_per_block: blocksize = self.nodes_per_block job_name = "{0}.{1}".format(job_name, time.time()) script_path = "{0}/{1}.submit".format(self.script_dir, job_name) script_path = os.path.abspath(script_path) job_config = self.get_configs(command, blocksize) logger.debug("Writing submit script") self._write_submit_script(template_string, script_path, job_name, job_config) channel_script_path = self.channel.push_file(script_path, self.channel.script_dir) cmd = "qsub -terse {0}".format(channel_script_path) retcode, stdout, stderr = super().execute_wait(cmd, 10) if retcode == 0: for line in stdout.split(): job_id = line.strip() if not job_id: continue self.resources[job_id] = {: job_id, : , : blocksize} return job_id else: print("[WARNING!!] Submission of command to scale_out failed") logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
The submit method takes the command string to be executed upon instantiation of a resource most often to start a pilot (such as IPP engine or even Swift-T engines). Args : - command (str) : The bash command string to be executed. - blocksize (int) : Blocksize to be requested KWargs: - job_name (str) : Human friendly name to be assigned to the job request Returns: - A job identifier, this could be an integer, string etc Raises: - ExecutionProviderException or its subclasses
385,272
def set(self, newvalue): def setter(state): return self._optic.set(state, newvalue) return setter
Set the focus to `newvalue`. >>> from lenses import lens >>> set_item_one_to_four = lens[1].set(4) >>> set_item_one_to_four([1, 2, 3]) [1, 4, 3]
385,273
def walk(self, address): for step in self._walk_to_address(address): node = step yield node.address, node.data to_process = deque() to_process.extendleft( node.children) while to_process: node = to_process.pop() yield node.address, node.data if node.children: to_process.extendleft( node.children)
Returns a stream of pairs of node addresses and data, raising AddressNotInTree if ADDRESS is not in the tree. First the ancestors of ADDRESS (including itself) are yielded, earliest to latest, and then the descendants of ADDRESS are yielded in an unspecified order. Arguments: address (str): the address to be walked
385,274
def is_same_channel(self, left, right): return self.normalize(left) == self.normalize(right)
Check if given nicknames are equal in the server's case mapping.
385,275
def memoize(func): class Memodict(dict): def __getitem__(self, *key): return dict.__getitem__(self, key) def __missing__(self, key): ret = self[key] = func(*key) return ret return Memodict().__getitem__
Memoization decorator for a function taking one or more arguments.
385,276
def get_queryset(qs=None, app=DEFAULT_APP, db_alias=None): if isinstance(qs, (djmodels.Manager, djmodels.query.QuerySet)): qs = qs.all() else: qs = get_model(qs, app=app).objects.all() if db_alias: return qs.using(db_alias) else: return qs
>>> get_queryset('Permission', app='django.contrib.auth').count() > 0 True
385,277
async def health_check(self) -> Iterator[HealthCheckFail]: ds_class = getattr(settings, , ) forbidden_defaults = [None, , ] if ds_class in forbidden_defaults: yield HealthCheckFail( , f f f f f ) try: import_class(ds_class) except (ImportError, KeyError, AttributeError, TypeError): yield HealthCheckFail( , f f f f f f f f ) states = set(t.dest for t in self.transitions) for state in states: async for check in state.health_check(): yield check
Perform the checks. So far: - Make a list of the unique destination states from the transitions list, then check the health of each of them.
385,278
def parse_share_url(share_url): *__, group_id, share_token = share_url.rstrip().split() return group_id, share_token
Return the group_id and share_token in a group's share url. :param str share_url: the share url of a group
385,279
def telegram(self) -> list: telegram_controls = [control.telegram() for control in self.controls] return telegram_controls
Returns list of Telegram compatible states of the RichMessage instance nested controls. Returns: telegram_controls: Telegram representation of RichMessage instance nested controls.
385,280
async def peek(self, task_id): args = (task_id,) res = await self.conn.call(self.__funcs[], args) return self._create_task(res.body)
Get task without changing its state :param task_id: Task id :return: Task instance
385,281
def rooms_info(self, room_id=None, room_name=None): if room_id is not None: return self.__call_api_get(, roomId=room_id) elif room_name is not None: return self.__call_api_get(, roomName=room_name) else: raise RocketMissingParamException()
Retrieves the information about the room.
385,282
def avail_images(kwargs=None, call=None): if call == : raise SaltCloudSystemExit( ) if not isinstance(kwargs, dict): kwargs = {} if in kwargs: owner = kwargs[] else: provider = get_configured_provider() owner = config.get_cloud_config_value( , provider, __opts__, default= ) ret = {} params = {: , : owner} images = aws.query(params, location=get_location(), provider=get_provider(), opts=__opts__, sigver=) for image in images: ret[image[]] = image return ret
Return a dict of all available VM images on the cloud provider.
385,283
def get_ip_addresses(): LOGGER.debug("IPAddressService.get_ip_addresses") args = {: , : } response = IPAddressService.requester.call(args) ret = None if response.rc == 0: ret = [] for ipAddress in response.response_content[]: ret.append(IPAddress.json_2_ip_address(ipAddress)) elif response.rc != 404: err_msg = \ + str(response.response_content) + + str(response.error_message) + \ " (" + str(response.rc) + ")" LOGGER.warning(err_msg) return ret
:return: all knows IP Address
385,284
def filter_backends(backends, filters=None, **kwargs): def _match_all(obj, criteria): return all(getattr(obj, key_, None) == value_ for key_, value_ in criteria.items()) configuration_filters = {} status_filters = {} for key, value in kwargs.items(): if all(key in backend.configuration() for backend in backends): configuration_filters[key] = value else: status_filters[key] = value if configuration_filters: backends = [b for b in backends if _match_all(b.configuration(), configuration_filters)] if status_filters: backends = [b for b in backends if _match_all(b.status(), status_filters)] backends = list(filter(filters, backends)) return backends
Return the backends matching the specified filtering. Filter the `backends` list by their `configuration` or `status` attributes, or from a boolean callable. The criteria for filtering can be specified via `**kwargs` or as a callable via `filters`, and the backends must fulfill all specified conditions. Args: backends (list[BaseBackend]): list of backends. filters (callable): filtering conditions as a callable. **kwargs (dict): dict of criteria. Returns: list[BaseBackend]: a list of backend instances matching the conditions.
385,285
def collapse(self, id_user): c = CmtCOLLAPSED(id_bibrec=self.id_bibrec, id_cmtRECORDCOMMENT=self.id, id_user=id_user) db.session.add(c) db.session.commit()
Collapse comment beloging to user.
385,286
def set_cookie(self, kaka, request): if not kaka: return part = urlparse(request.url) _domain = part.hostname logger.debug("%s: ", _domain, kaka) for cookie_name, morsel in kaka.items(): std_attr = ATTRS.copy() std_attr["name"] = cookie_name _tmp = morsel.coded_value if _tmp.startswith() and _tmp.endswith(): std_attr["value"] = _tmp[1:-1] else: std_attr["value"] = _tmp std_attr["version"] = 0 for attr in morsel.keys(): if attr in ATTRS: if morsel[attr]: if attr == "expires": std_attr[attr] = _since_epoch(morsel[attr]) elif attr == "path": if morsel[attr].endswith(","): std_attr[attr] = morsel[attr][:-1] else: std_attr[attr] = morsel[attr] else: std_attr[attr] = morsel[attr] elif attr == "max-age": if morsel["max-age"]: std_attr["expires"] = time.time() + int(morsel["max-age"]) for att, item in PAIRS.items(): if std_attr[att]: std_attr[item] = True if std_attr["domain"]: if std_attr["domain"].startswith("."): std_attr["domain_initial_dot"] = True else: std_attr["domain"] = _domain std_attr["domain_specified"] = True if morsel["max-age"] is 0: try: self.cookiejar.clear(domain=std_attr["domain"], path=std_attr["path"], name=std_attr["name"]) except ValueError: pass elif std_attr["expires"] and std_attr["expires"] < utc_now(): try: self.cookiejar.clear(domain=std_attr["domain"], path=std_attr["path"], name=std_attr["name"]) except ValueError: pass else: new_cookie = http_cookiejar.Cookie(**std_attr) self.cookiejar.set_cookie(new_cookie)
Returns a http_cookiejar.Cookie based on a set-cookie header line
385,287
def make_private(self, recursive=False, future=False, client=None): self.acl.all().revoke_read() self.acl.save(client=client) if future: doa = self.default_object_acl if not doa.loaded: doa.reload(client=client) doa.all().revoke_read() doa.save(client=client) if recursive: blobs = list( self.list_blobs( projection="full", max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, client=client, ) ) if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: message = ( "Refusing to make private recursively with more than " "%d objects. If you actually want to make every object " "in this bucket private, iterate through the blobs " "returned by and call " " on each one." ) % (self._MAX_OBJECTS_FOR_ITERATION,) raise ValueError(message) for blob in blobs: blob.acl.all().revoke_read() blob.acl.save(client=client)
Update bucket's ACL, revoking read access for anonymous users. :type recursive: bool :param recursive: If True, this will make all blobs inside the bucket private as well. :type future: bool :param future: If True, this will make all objects created in the future private as well. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :raises ValueError: If ``recursive`` is True, and the bucket contains more than 256 blobs. This is to prevent extremely long runtime of this method. For such buckets, iterate over the blobs returned by :meth:`list_blobs` and call :meth:`~google.cloud.storage.blob.Blob.make_private` for each blob.
385,288
def show(self, dump=False, indent=3, lvl="", label_lvl=""): return self._show_or_dump(dump, indent, lvl, label_lvl)
Prints or returns (when "dump" is true) a hierarchical view of the packet. :param dump: determine if it prints or returns the string value :param int indent: the size of indentation for each layer :param str lvl: additional information about the layer lvl :param str label_lvl: additional information about the layer fields :return: return a hierarchical view if dump, else print it
385,289
def is_declared(self, expression_var): if not isinstance(expression_var, Variable): raise ValueError(f) return any(expression_var is x for x in self.get_declared_variables())
True if expression_var is declared in this constraint set
385,290
def get_lock(lockfile): pidfile = open(lockfile, "a+") try: fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB) except IOError,e: raise RuntimeError, "failed to lock %s: %s" % (lockfile, e) pidfile.seek(0) pidfile_pid = pidfile.readline().strip() if pidfile_pid.isdigit(): if pycbc_glue.utils.pid_exists(int(pidfile_pid)): raise RuntimeError, ("pidfile %s contains pid (%s) of a running " "process" % (lockfile, pidfile_pid)) else: print ("pidfile %s contains stale pid %s; writing new lock" % (lockfile, pidfile_pid)) pidfile.truncate(0) pidfile.write("%d\n" % os.getpid()) pidfile.close() confirm_lock(lockfile) return True
Tries to write a lockfile containing the current pid. Excepts if the lockfile already contains the pid of a running process. Although this should prevent a lock from being granted twice, it can theoretically deny a lock unjustly in the unlikely event that the original process is gone but another unrelated process has been assigned the same pid by the OS.
385,291
async def check_permissions(self, action: str, **kwargs): for permission in await self.get_permissions(action=action, **kwargs): if not await ensure_async(permission.has_permission)( scope=self.scope, consumer=self, action=action, **kwargs): raise PermissionDenied()
Check if the action should be permitted. Raises an appropriate exception if the request is not permitted.
385,292
def autohide(obj): for name, item in six.iteritems(vars(obj)): if callable(item) and name in (, ): item = hide(item) for name, subclass in class_members(obj): autohide(subclass)
Automatically hide setup() and teardown() methods, recursively.
385,293
def migrate_config(self, current_config, config_to_migrate, always_update, update_defaults): value = self._search_config_for_possible_names(current_config) self._update_config(config_to_migrate, value, always_update, update_defaults)
Migrate config value in current_config, updating config_to_migrate. Given the current_config object, it will attempt to find a value based on all the names given. If no name could be found, then it will simply set the value to the default. If a value is found and is in the list of previous_defaults, it will either update or keep the old value based on if update_defaults is set. If a non-default value is set it will either keep this value or update it based on if ``always_update`` is true. Args: current_config (dict): Current configuration. config_to_migrate (dict): Config to update. always_update (bool): Always update value. update_defaults (bool): Update values found in previous_defaults
385,294
def get_apo(self, symbol, interval=, series_type=, fastperiod=None, slowperiod=None, matype=None): _FUNCTION_KEY = "APO" return _FUNCTION_KEY, ,
Return the absolute price oscillator values in two json objects as data and meta_data. It raises ValueError when problems arise Keyword Arguments: symbol: the symbol for the equity we want to get its data interval: time interval between two conscutive values, supported values are '1min', '5min', '15min', '30min', '60min', 'daily', 'weekly', 'monthly' (default '60min)' series_type: The desired price type in the time series. Four types are supported: 'close', 'open', 'high', 'low' (default 'close') fastperiod: Positive integers are accepted (default=None) slowperiod: Positive integers are accepted (default=None) matype : Moving average type. By default, fastmatype=0. Integers 0 - 8 are accepted (check down the mappings) or the string containing the math type can also be used. * 0 = Simple Moving Average (SMA), * 1 = Exponential Moving Average (EMA), * 2 = Weighted Moving Average (WMA), * 3 = Double Exponential Moving Average (DEMA), * 4 = Triple Exponential Moving Average (TEMA), * 5 = Triangular Moving Average (TRIMA), * 6 = T3 Moving Average, * 7 = Kaufman Adaptive Moving Average (KAMA), * 8 = MESA Adaptive Moving Average (MAMA)
385,295
def _get_method_kwargs(self): method_kwargs = { : self.user, : self.ctype, : self.content_object.pk, } return method_kwargs
Helper method. Returns kwargs needed to filter the correct object. Can also be used to create the correct object.
385,296
def set_value(self, selector, new_value, by=By.CSS_SELECTOR, timeout=settings.LARGE_TIMEOUT): if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT: timeout = self.__get_new_timeout(timeout) if page_utils.is_xpath_selector(selector): by = By.XPATH orginal_selector = selector css_selector = self.convert_to_css_selector(selector, by=by) self.__demo_mode_highlight_if_active(orginal_selector, by) if not self.demo_mode: self.scroll_to(orginal_selector, by=by, timeout=timeout) value = re.escape(new_value) value = self.__escape_quotes_if_needed(value) css_selector = re.escape(css_selector) css_selector = self.__escape_quotes_if_needed(css_selector) script = ( % (css_selector, value)) self.execute_script(script) if new_value.endswith(): element = self.wait_for_element_present( orginal_selector, by=by, timeout=timeout) element.send_keys(Keys.RETURN) if settings.WAIT_FOR_RSC_ON_PAGE_LOADS: self.wait_for_ready_state_complete() self.__demo_mode_pause_if_active()
This method uses JavaScript to update a text field.
385,297
def load_ref_spectra(): data_dir = "/Users/annaho/Data/AAOmega/ref_spectra" ff = glob.glob("%s/*.txt" %data_dir) nstars = len(ff) print("We have %s training objects" %nstars) f = ff[0] data = Table.read(f, format="ascii.fast_no_header") wl = data[] npix = len(wl) print("We have %s pixels" %npix) tr_flux = np.zeros((nstars,npix)) tr_ivar = np.zeros(tr_flux.shape) for i,f in enumerate(ff): data = Table.read(f, format="ascii.fast_no_header") flux = data[] tr_flux[i,:] = flux sigma = data[] tr_ivar[i,:] = 1.0 / sigma**2 return np.array(ff), wl, tr_flux, tr_ivar
Pull out wl, flux, ivar from files of training spectra
385,298
def index_buffer(self, buffer, index_element_size=4): if not type(buffer) in [moderngl.Buffer, numpy.ndarray, bytes]: raise VAOError("buffer parameter must be a moderngl.Buffer, numpy.ndarray or bytes instance") if isinstance(buffer, numpy.ndarray): buffer = self.ctx.buffer(buffer.tobytes()) if isinstance(buffer, bytes): buffer = self.ctx.buffer(data=buffer) self._index_buffer = buffer self._index_element_size = index_element_size
Set the index buffer for this VAO Args: buffer: ``moderngl.Buffer``, ``numpy.array`` or ``bytes`` Keyword Args: index_element_size (int): Byte size of each element. 1, 2 or 4
385,299
def DeserializeForImport(self, reader): super(Block, self).Deserialize(reader) self.Transactions = [] transaction_length = reader.ReadVarInt() for i in range(0, transaction_length): tx = Transaction.DeserializeFrom(reader) self.Transactions.append(tx) if len(self.Transactions) < 1: raise Exception( % self.Index)
Deserialize full object. Args: reader (neo.IO.BinaryReader):