Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
10,700
def _create_link(self, act_node, name, instance): act_node._links[name] = instance act_node._children[name] = instance full_name = instance.v_full_name if full_name not in self._root_instance._linked_by: self._root_instance._linked_by[full_name] = {} linking = self._root_instance._linked_by[full_name] if act_node.v_full_name not in linking: linking[act_node.v_full_name] = (act_node, set()) linking[act_node.v_full_name][1].add(name) if name not in self._links_count: self._links_count[name] = 0 self._links_count[name] = self._links_count[name] + 1 self._logger.debug( % (name, act_node.v_full_name, instance.v_full_name)) return instance
Creates a link and checks if names are appropriate
10,701
def _encode_utf8(self, **kwargs): if is_py3: return kwargs unencoded_pairs = kwargs for i in unencoded_pairs.keys(): if isinstance(unencoded_pairs[i], types.UnicodeType): unencoded_pairs[i] = unencoded_pairs[i].encode() return unencoded_pairs
UTF8 encodes all of the NVP values.
10,702
def predict_survival_function(self, X, times=None): return np.exp(-self.predict_cumulative_hazard(X, times=times))
Predict the survival function for individuals, given their covariates. This assumes that the individual just entered the study (that is, we do not condition on how long they have already lived for.) Parameters ---------- X: numpy array or DataFrame a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. times: iterable, optional an iterable of increasing times to predict the cumulative hazard at. Default is the set of all durations (observed and unobserved). Uses a linear interpolation if points in time are not in the index. Returns ------- survival_function : DataFrame the survival probabilities of individuals over the timeline
10,703
def track_from_file(file_object, filetype, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False): if not force_upload: try: md5 = hashlib.md5(file_object.read()).hexdigest() return track_from_md5(md5) except util.EchoNestAPIError: pass file_object.seek(0) return _track_from_data(file_object.read(), filetype, timeout)
Create a track object from a file-like object. NOTE: Does not create the detailed analysis for the Track. Call Track.get_analysis() for that. Args: file_object: a file-like Python object filetype: the file type. Supported types include mp3, ogg, wav, m4a, mp4, au force_upload: skip the MD5 shortcut path, force an upload+analysis Example: >>> f = open("Miaow-01-Tempered-song.mp3") >>> t = track.track_from_file(f, 'mp3') >>> t < Track > >>>
10,704
def guess_labels(self, doc): if doc.nb_pages <= 0: return set() self.label_guesser.total_nb_documents = len(self._docs_by_id.keys()) label_names = self.label_guesser.guess(doc) labels = set() for label_name in label_names: label = self.labels[label_name] labels.add(label) return labels
return a prediction of label names
10,705
def do_workers(self, args): workers = self.task_master.workers(alive=not args.all) for k in sorted(workers.iterkeys()): self.stdout.write(.format(k, workers[k])) if args.details: heartbeat = self.task_master.get_heartbeat(k) for hk, hv in heartbeat.iteritems(): self.stdout.write(.format(hk, hv))
list all known workers
10,706
def addOntology(self): self._openRepo() name = self._args.name filePath = self._getFilePath(self._args.filePath, self._args.relativePath) if name is None: name = getNameFromPath(filePath) ontology = ontologies.Ontology(name) ontology.populateFromFile(filePath) self._updateRepo(self._repo.insertOntology, ontology)
Adds a new Ontology to this repo.
10,707
def load_data_file(filename, encoding=): data = pkgutil.get_data(PACKAGE_NAME, os.path.join(DATA_DIR, filename)) return data.decode(encoding).splitlines()
Load a data file and return it as a list of lines. Parameters: filename: The name of the file (no directories included). encoding: The file encoding. Defaults to utf-8.
10,708
def _onMessageNotification(self, client, userdata, pahoMessage): try: note = Notification(pahoMessage, self._messageCodecs) except InvalidEventException as e: self.logger.critical(str(e)) else: self.logger.debug("Received Notification") if self.notificationCallback: self.notificationCallback(note)
Internal callback for gateway notification messages, parses source device from topic string and passes the information on to the registered device command callback
10,709
def open_imports(self, imported_definitions): for imp in self.imports: imp.load(self, imported_definitions)
Import the I{imported} WSDLs.
10,710
def _get_all_attributes(network): attrs = network.attributes for n in network.nodes: attrs.extend(n.attributes) for l in network.links: attrs.extend(l.attributes) for g in network.resourcegroups: attrs.extend(g.attributes) return attrs
Get all the complex mode attributes in the network so that they can be used for mapping to resource scenarios later.
10,711
def list(gandi, domain, zone_id, output, format, limit): options = { : limit, } output_keys = [, , , ] if not zone_id: result = gandi.domain.info(domain) zone_id = result[] if not zone_id: gandi.echo(t seems to be managed at Gandi.w%s %s IN %s %snamettltypevalueab\nYour zone file have been writen in %stext%s %s IN %s %snamettltypevaluejson,: ')) gandi.echo(format_record) return records
List DNS zone records for a domain.
10,712
def add_child_catalog(self, catalog_id, child_id): if self._catalog_session is not None: return self._catalog_session.add_child_catalog(catalog_id=catalog_id, child_id=child_id) return self._hierarchy_session.add_child(id_=catalog_id, child_id=child_id)
Adds a child to a catalog. arg: catalog_id (osid.id.Id): the ``Id`` of a catalog arg: child_id (osid.id.Id): the ``Id`` of the new child raise: AlreadyExists - ``catalog_id`` is already a parent of ``child_id`` raise: NotFound - ``catalog_id`` or ``child_id`` not found raise: NullArgument - ``catalog_id`` or ``child_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
10,713
def _update_field(self, action, field, value, max_tries, tries=0): self.fetch() action(self, field, value) try: self.save() except requests.HTTPError as ex: if tries < max_tries and ex.response.status_code == 409: self._update_field( action, field, value, max_tries, tries=tries+1) else: raise
Private update_field method. Wrapped by Document.update_field. Tracks a "tries" var to help limit recursion.
10,714
def convert_to_spaces(cls, ops, kwargs): from qnet.algebra.core.hilbert_space_algebra import ( HilbertSpace, LocalSpace) cops = [o if isinstance(o, HilbertSpace) else LocalSpace(o) for o in ops] return cops, kwargs
For all operands that are merely of type str or int, substitute LocalSpace objects with corresponding labels: For a string, just itself, for an int, a string version of that int.
10,715
def get(self, filename): timer = Timer() self.check_prerequisites() with PatchedBotoConfig(): raw_key = self.get_cache_key(filename) logger.info("Checking if distribution archive is available in S3 bucket: %s", raw_key) key = self.s3_bucket.get_key(raw_key) if key is None: logger.debug("Distribution archive is not available in S3 bucket.") else: logger.info("Downloading distribution archive from S3 bucket ..") file_in_cache = os.path.join(self.config.binary_cache, filename) makedirs(os.path.dirname(file_in_cache)) with AtomicReplace(file_in_cache) as temporary_file: key.get_contents_to_filename(temporary_file) logger.debug("Finished downloading distribution archive from S3 bucket in %s.", timer) return file_in_cache
Download a distribution archive from the configured Amazon S3 bucket. :param filename: The filename of the distribution archive (a string). :returns: The pathname of a distribution archive on the local file system or :data:`None`. :raises: :exc:`.CacheBackendError` when any underlying method fails.
10,716
def generate_neuroml2_from_network(nl_model, nml_file_name=None, print_summary=True, seed=1234, format=, base_dir=None, copy_included_elements=False, target_dir=None, validate=False): print_v("Generating NeuroML2 for %s%s..." % (nl_model.id, % (base_dir, target_dir) if base_dir or target_dir else )) import neuroml from neuroml.hdf5.NetworkBuilder import NetworkBuilder neuroml_handler = NetworkBuilder() generate_network(nl_model, neuroml_handler, seed=seed, base_dir=base_dir) nml_doc = neuroml_handler.get_nml_doc() for i in nl_model.input_sources: if nml_doc.get_by_id(i.id) == None: if i.neuroml2_source_file: incl = neuroml.IncludeType(_locate_file(i.neuroml2_source_file, base_dir)) if not incl in nml_doc.includes: nml_doc.includes.append(incl) if i.neuroml2_input: input_params = i.parameters if i.parameters else {} if i.neuroml2_input.lower() == : input = neuroml.PulseGenerator(id=i.id) nml_doc.pulse_generators.append(input) elif i.neuroml2_input.lower() == : input = neuroml.PulseGeneratorDL(id=i.id) nml_doc.pulse_generator_dls.append(input) elif i.neuroml2_input.lower() == : input = neuroml.PoissonFiringSynapse(id=i.id) nml_doc.poisson_firing_synapses.append(input) for p in input_params: exec( % (p, evaluate(input_params[p], nl_model.parameters))) for c in nl_model.cells: if c.neuroml2_source_file: incl = neuroml.IncludeType(_locate_file(c.neuroml2_source_file, base_dir)) found_cell = False for cell in nml_doc.cells: if cell.id == c.id: nml_doc.cells.remove(cell) nml_doc.includes.append(incl) found_cell = True if not found_cell: for p in nl_model.populations: if p.component == c.id: pass if not incl in nml_doc.includes: nml_doc.includes.append(incl) if c.neuroml2_cell: cell_params = c.parameters if c.parameters else {} if c.neuroml2_cell.lower() == : cell = neuroml.SpikeGenerator(id=c.id) nml_doc.spike_generators.append(cell) elif c.neuroml2_cell.lower() == : cell = neuroml.SpikeGeneratorPoisson(id=c.id) nml_doc.spike_generator_poissons.append(cell) elif c.neuroml2_cell.lower() == : cell = neuroml.SpikeGeneratorRefPoisson(id=c.id) nml_doc.spike_generator_ref_poissons.append(cell) else: raise Exception(%c.neuroml2_cell) for p in cell_params: exec( % (p, evaluate(cell_params[p], nl_model.parameters))) for s in nl_model.synapses: if nml_doc.get_by_id(s.id) == None: if s.neuroml2_source_file: incl = neuroml.IncludeType(_locate_file(s.neuroml2_source_file, base_dir)) if not incl in nml_doc.includes: nml_doc.includes.append(incl) _extract_pynn_components_to_neuroml(nl_model, nml_doc) if print_summary: print_v(nml_doc.summary()) if target_dir == None: target_dir = base_dir if format == : if not nml_file_name: nml_file_name = _locate_file( % nml_doc.id, target_dir) from neuroml.writers import NeuroMLWriter NeuroMLWriter.write(nml_doc, nml_file_name) if format == : if not nml_file_name: nml_file_name = _locate_file( % nml_doc.id, target_dir) from neuroml.writers import NeuroMLHdf5Writer NeuroMLHdf5Writer.write(nml_doc, nml_file_name) print_v("Written NeuroML to %s" % nml_file_name) if validate and format == : from pyneuroml import pynml success = pynml.validate_neuroml2(nml_file_name, verbose_validate=False) if success: print_v() else: print_v() return nml_file_name, nml_doc
Generate and save NeuroML2 file (in either XML or HDF5 format) from the NeuroMLlite description
10,717
def write_list(path_out, image_list): with open(path_out, ) as fout: for i, item in enumerate(image_list): line = % item[0] for j in item[2:]: line += % j line += % item[1] fout.write(line)
Hepler function to write image list into the file. The format is as below, integer_image_index \t float_label_index \t path_to_image Note that the blank between number and tab is only used for readability. Parameters ---------- path_out: string image_list: list
10,718
def tag(self, value): if value is None: value = sys.argv[0] self._tag = value[:self.MAX_TAG_LEN]
The name of the program that generated the log message. The tag can only contain alphanumeric characters. If the tag is longer than {MAX_TAG_LEN} characters it will be truncated automatically.
10,719
def get_record(self, msg_id): r = self._records.find_one({: msg_id}) if not r: raise KeyError(msg_id) return r
Get a specific Task Record, by msg_id.
10,720
def _non_framed_body_length(header, plaintext_length): body_length = header.algorithm.iv_len body_length += 8 body_length += plaintext_length body_length += header.algorithm.auth_len return body_length
Calculates the length of a non-framed message body, given a complete header. :param header: Complete message header object :type header: aws_encryption_sdk.structures.MessageHeader :param int plaintext_length: Length of plaintext in bytes :rtype: int
10,721
def hdrval(cls): hdrmap = {: , : , : , u(): , : , : , u(): } return hdrmap
Construct dictionary mapping display column title to IterationStats entries.
10,722
def _merge_nested_if_from_else(self, ifStm: "IfContainer"): self.elIfs.append((ifStm.cond, ifStm.ifTrue)) self.elIfs.extend(ifStm.elIfs) self.ifFalse = ifStm.ifFalse
Merge nested IfContarner form else branch to this IfContainer as elif and else branches
10,723
def dskstl(keywrd, dpval): keywrd = ctypes.c_int(keywrd) dpval = ctypes.c_double(dpval) libspice.dskstl_c(keywrd, dpval)
Set the value of a specified DSK tolerance or margin parameter. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskstl_c.html :param keywrd: Code specifying parameter to set. :type keywrd: int :param dpval: Value of parameter. :type dpval: float :return:
10,724
def remove(self, member): if not self.client.zrem(self.name, member): raise KeyError(member)
Remove member.
10,725
def header_output(self): result = [] for key in self.keys(): result.append(key + + self.get(key).value) return .join(result)
只输出cookie的key-value字串. 比如: HISTORY=21341; PHPSESSION=3289012u39jsdijf28; token=233129
10,726
def as_alias_handler(alias_list): list_ = list() for alias in alias_list: if alias.asname: list_.append(alias.asname) else: list_.append(alias.name) return list_
Returns a list of all the names that will be called.
10,727
def loads(cls, data, store_password, try_decrypt_keys=True): try: pos = 0 version = b4.unpack_from(data, pos)[0]; pos += 4 if version != 1: raise UnsupportedKeystoreVersionException(+repr(version)) salt, pos = cls._read_data(data, pos) iteration_count = b4.unpack_from(data, pos)[0]; pos += 4 encrypted_bks_store = data[pos:] try: decrypted = rfc7292.decrypt_PBEWithSHAAndTwofishCBC(encrypted_bks_store, store_password, salt, iteration_count) except BadDataLengthException as e: raise BadKeystoreFormatException("Bad UBER keystore format: %s" % str(e)) except BadPaddingException as e: raise DecryptionFailureException("Failed to decrypt UBER keystore: bad password?") hash_fn = hashlib.sha1 hash_digest_size = hash_fn().digest_size bks_store = decrypted[:-hash_digest_size] bks_hash = decrypted[-hash_digest_size:] if len(bks_hash) != hash_digest_size: raise BadKeystoreFormatException("Insufficient signature bytes; found %d bytes, expected %d bytes" % (len(bks_hash), hash_digest_size)) if hash_fn(bks_store).digest() != bks_hash: raise KeystoreSignatureException("Hash mismatch; incorrect keystore password?") store_type = "uber" entries, size = cls._load_bks_entries(bks_store, store_type, store_password, try_decrypt_keys=try_decrypt_keys) return cls(store_type, entries, version=version) except struct.error as e: raise BadKeystoreFormatException(e)
See :meth:`jks.jks.KeyStore.loads`. :param bytes data: Byte string representation of the keystore to be loaded. :param str password: Keystore password string :param bool try_decrypt_keys: Whether to automatically try to decrypt any encountered key entries using the same password as the keystore password. :returns: A loaded :class:`UberKeyStore` instance, if the keystore could be successfully parsed and the supplied store password is correct. If the ``try_decrypt_keys`` parameters was set to ``True``, any keys that could be successfully decrypted using the store password have already been decrypted; otherwise, no atttempt to decrypt any key entries is made. :raises BadKeystoreFormatException: If the keystore is malformed in some way :raises UnsupportedKeystoreVersionException: If the keystore contains an unknown format version number :raises KeystoreSignatureException: If the keystore signature could not be verified using the supplied store password :raises DecryptionFailureException: If the keystore contents could not be decrypted using the supplied store password :raises DuplicateAliasException: If the keystore contains duplicate aliases
10,728
def _get_disk_size(self, device): out = __salt__[]("df {0}".format(device)) if out[]: msg = "Disk size info error: {0}".format(out[]) log.error(msg) raise SIException(msg) devpath, blocks, used, available, used_p, mountpoint = [elm for elm in out[].split(os.linesep)[-1].split(" ") if elm] return { : devpath, : blocks, : used, : available, : used_p, : mountpoint, }
Get a size of a disk.
10,729
def folderitem(self, obj, item, index): fullname = obj.getFullname() if fullname: item["Fullname"] = fullname item["replace"]["Fullname"] = get_link( obj.absolute_url(), value=fullname) else: item["Fullname"] = "" default_department = obj.getDefaultDepartment() if default_department: item["replace"]["DefaultDepartment"] = get_link( default_department.absolute_url(), value=default_department.Title()) departments = obj.getDepartments() if departments: links = map( lambda o: get_link(o.absolute_url(), value=o.Title(), css_class="link"), departments) item["replace"]["Departments"] = ", ".join(links) email = obj.getEmailAddress() if email: item["EmailAddress"] = obj.getEmailAddress() item["replace"]["EmailAddress"] = get_email_link( email, value=email) item["BusinessPhone"] = obj.getBusinessPhone() item["Fax"] = obj.getBusinessFax() item["MobilePhone"] = obj.getMobilePhone() return item
Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item
10,730
def poll_parser(poll): if __is_deleted(poll): return deleted_parser(poll) if poll[] not in poll_types: raise Exception() return Poll( poll[], poll[], __check_key(, poll), __check_key(, poll), poll[], poll[], poll[], poll[], poll[], )
Parses a poll object
10,731
def get_css(self): css = {} print_css = os.path.join(self.theme_dir, , ) if not os.path.exists(print_css): print_css = os.path.join(THEMES_DIR, , , ) if not os.path.exists(print_css): raise IOError(u"Cannot find css/print.css in default theme") with codecs.open(print_css, encoding=self.encoding) as css_file: css[] = { : utils.get_path_url(print_css, self.relative), : css_file.read(), } screen_css = os.path.join(self.theme_dir, , ) if (os.path.exists(screen_css)): with codecs.open(screen_css, encoding=self.encoding) as css_file: css[] = { : utils.get_path_url(screen_css, self.relative), : css_file.read(), } else: self.log(u"No screen stylesheet provided in current theme", ) return css
Fetches and returns stylesheet file path or contents, for both print and screen contexts, depending if we want a standalone presentation or not.
10,732
def load_toml_validator_config(filename): if not os.path.exists(filename): LOGGER.info( "Skipping validator config loading from non-existent config file:" " %s", filename) return ValidatorConfig() LOGGER.info("Loading validator information from config: %s", filename) try: with open(filename) as fd: raw_config = fd.read() except IOError as e: raise LocalConfigurationError( "Unable to load validator configuration file: {}".format(str(e))) toml_config = toml.loads(raw_config) invalid_keys = set(toml_config.keys()).difference( [, , , , , , , , , , , , , , , , , , , , ]) if invalid_keys: raise LocalConfigurationError( "Invalid keys in validator config: " "{}".format(", ".join(sorted(list(invalid_keys))))) bind_network = None bind_component = None bind_consensus = None for bind in toml_config.get("bind", []): if "network" in bind: bind_network = bind[bind.find(":") + 1:] if "component" in bind: bind_component = bind[bind.find(":") + 1:] if "consensus" in bind: bind_consensus = bind[bind.find(":") + 1:] network_public_key = None network_private_key = None if toml_config.get("network_public_key") is not None: network_public_key = toml_config.get("network_public_key").encode() if toml_config.get("network_private_key") is not None: network_private_key = toml_config.get("network_private_key").encode() config = ValidatorConfig( bind_network=bind_network, bind_component=bind_component, bind_consensus=bind_consensus, endpoint=toml_config.get("endpoint", None), peering=toml_config.get("peering", None), seeds=toml_config.get("seeds", None), peers=toml_config.get("peers", None), network_public_key=network_public_key, network_private_key=network_private_key, scheduler=toml_config.get("scheduler", None), permissions=parse_permissions(toml_config.get("permissions", None)), roles=toml_config.get("roles", None), opentsdb_url=toml_config.get("opentsdb_url", None), opentsdb_db=toml_config.get("opentsdb_db", None), opentsdb_username=toml_config.get("opentsdb_username", None), opentsdb_password=toml_config.get("opentsdb_password", None), minimum_peer_connectivity=toml_config.get( "minimum_peer_connectivity", None), maximum_peer_connectivity=toml_config.get( "maximum_peer_connectivity", None), state_pruning_block_depth=toml_config.get( "state_pruning_block_depth", None), fork_cache_keep_time=toml_config.get( "fork_cache_keep_time", None), component_thread_pool_workers=toml_config.get( "component_thread_pool_workers", None), network_thread_pool_workers=toml_config.get( "network_thread_pool_workers", None), signature_thread_pool_workers=toml_config.get( "signature_thread_pool_workers", None) ) return config
Returns a ValidatorConfig created by loading a TOML file from the filesystem.
10,733
def open_file(self, fname, external=False): fname = to_text_string(fname) ext = osp.splitext(fname)[1] if encoding.is_text_file(fname): self.editor.load(fname) elif self.variableexplorer is not None and ext in IMPORT_EXT: self.variableexplorer.import_data(fname) elif not external: fname = file_uri(fname) programs.start_file(fname)
Open filename with the appropriate application Redirect to the right widget (txt -> editor, spydata -> workspace, ...) or open file outside Spyder (if extension is not supported)
10,734
def round(self, value_array): min_value = self.domain[0] max_value = self.domain[1] rounded_value = value_array[0] if rounded_value < min_value: rounded_value = min_value elif rounded_value > max_value: rounded_value = max_value return [rounded_value]
If value falls within bounds, just return it otherwise return min or max, whichever is closer to the value Assumes an 1d array with a single element as an input.
10,735
def get_binfo(self): try: return self.binfo except AttributeError: pass binfo = self.new_binfo() self.binfo = binfo executor = self.get_executor() ignore_set = self.ignore_set if self.has_builder(): binfo.bact = str(executor) binfo.bactsig = SCons.Util.MD5signature(executor.get_contents()) if self._specific_sources: sources = [ s for s in self.sources if not s in ignore_set] else: sources = executor.get_unignored_sources(self, self.ignore) seen = set() binfo.bsources = [s for s in sources if s not in seen and not seen.add(s)] binfo.bsourcesigs = [s.get_ninfo() for s in binfo.bsources] binfo.bdepends = self.depends binfo.bdependsigs = [d.get_ninfo() for d in self.depends if d not in ignore_set] binfo.bimplicit = self.implicit or [] binfo.bimplicitsigs = [i.get_ninfo() for i in binfo.bimplicit if i not in ignore_set] return binfo
Fetch a node's build information. node - the node whose sources will be collected cache - alternate node to use for the signature cache returns - the build signature This no longer handles the recursive descent of the node's children's signatures. We expect that they're already built and updated by someone else, if that's what's wanted.
10,736
def add_equad(psr, equad, flagid=None, flags=None, seed=None): if seed is not None: N.random.seed(seed) equadvec = N.zeros(psr.nobs) if flags is None: if not N.isscalar(equad): raise ValueError() else: equadvec = N.ones(psr.nobs) * equad if flags is not None and flagid is not None and not N.isscalar(equad): if len(equad) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(psr.flagvals(flagid)) equadvec[ind] = equad[ct] psr.stoas[:] += (equadvec / day) * N.random.randn(psr.nobs)
Add quadrature noise of rms `equad` [s]. Optionally take a pseudorandom-number-generator seed.
10,737
def set_is_immediate(self, value): if value is None: self.__is_immediate = value elif not isinstance(value, bool): raise TypeError("IsImediate must be set to a bool") else: self.__is_immediate = value
Setter for 'is_immediate' field. :param value - a new value of 'is_immediate' field. Must be a boolean type.
10,738
def _from_dict(cls, _dict): args = {} xtra = _dict.copy() if in _dict: args[] = _dict.get() del xtra[] if in _dict: args[] = _dict.get() del xtra[] if in _dict: args[] = _dict.get() del xtra[] if in _dict: args[] = QueryResultMetadata._from_dict( _dict.get()) del xtra[] if in _dict: args[] = _dict.get() del xtra[] args.update(xtra) return cls(**args)
Initialize a QueryResult object from a json dictionary.
10,739
def postinit(self, args, body): self.args = args self.body = body
Do some setup after initialisation. :param args: The arguments that the function takes. :type args: Arguments :param body: The contents of the function body. :type body: list(NodeNG)
10,740
def widgets(self): widgets = [] for i, chart in enumerate(most_visited_pages_charts()): widgets.append(Widget(html_id= % i, content=json.dumps(chart), template=, js_code=[])) return widgets
Get the items.
10,741
def _maketicks_selected(self, plt, branches): ticks = self.get_ticks() distance = [] label = [] rm_elems = [] for i in range(1, len(ticks[])): if ticks[][i] == ticks[][i - 1]: rm_elems.append(i) for i in range(len(ticks[])): if i not in rm_elems: distance.append(ticks[][i]) label.append(ticks[][i]) l_branches = [distance[i] - distance[i - 1] for i in range(1, len(distance))] n_distance = [] n_label = [] for branch in branches: n_distance.append(l_branches[branch]) if ("$\\mid$" not in label[branch]) and ( "$\\mid$" not in label[branch + 1]): n_label.append([label[branch], label[branch + 1]]) elif ("$\\mid$" in label[branch]) and ( "$\\mid$" not in label[branch + 1]): n_label.append( [label[branch].split("$")[-1], label[branch + 1]]) elif ("$\\mid$" not in label[branch]) and ( "$\\mid$" in label[branch + 1]): n_label.append([label[branch], label[branch + 1].split("$")[0]]) else: n_label.append([label[branch].split("$")[-1], label[branch + 1].split("$")[0]]) f_distance = [] rf_distance = [] f_label = [] f_label.append(n_label[0][0]) f_label.append(n_label[0][1]) f_distance.append(0.0) f_distance.append(n_distance[0]) rf_distance.append(0.0) rf_distance.append(n_distance[0]) length = n_distance[0] for i in range(1, len(n_distance)): if n_label[i][0] == n_label[i - 1][1]: f_distance.append(length) f_distance.append(length + n_distance[i]) f_label.append(n_label[i][0]) f_label.append(n_label[i][1]) else: f_distance.append(length + n_distance[i]) f_label[-1] = n_label[i - 1][1] + "$\\mid$" + n_label[i][0] f_label.append(n_label[i][1]) rf_distance.append(length + n_distance[i]) length += n_distance[i] n_ticks = {: f_distance, : f_label} uniq_d = [] uniq_l = [] temp_ticks = list(zip(n_ticks[], n_ticks[])) for i in range(len(temp_ticks)): if i == 0: uniq_d.append(temp_ticks[i][0]) uniq_l.append(temp_ticks[i][1]) logger.debug("Adding label {l} at {d}".format( l=temp_ticks[i][0], d=temp_ticks[i][1])) else: if temp_ticks[i][1] == temp_ticks[i - 1][1]: logger.debug("Skipping label {i}".format( i=temp_ticks[i][1])) else: logger.debug("Adding label {l} at {d}".format( l=temp_ticks[i][0], d=temp_ticks[i][1])) uniq_d.append(temp_ticks[i][0]) uniq_l.append(temp_ticks[i][1]) logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l))) plt.gca().set_xticks(uniq_d) plt.gca().set_xticklabels(uniq_l) for i in range(len(n_ticks[])): if n_ticks[][i] is not None: shift = [] br = -1 for branch in branches: br += 1 shift.append(distance[branch] - rf_distance[br]) return plt, shift
utility private method to add ticks to a band structure with selected branches
10,742
def find_method_params(self): req = self.request args = req.controller_info["method_args"] kwargs = req.controller_info["method_kwargs"] return args, kwargs
Return the method params :returns: tuple (args, kwargs) that will be passed as *args, **kwargs
10,743
def get_active_trips_df(trip_times: DataFrame) -> DataFrame: active_trips = ( pd.concat( [ pd.Series(1, trip_times.start_time), pd.Series(-1, trip_times.end_time), ] ) .groupby(level=0, sort=True) .sum() .cumsum() .ffill() ) return active_trips
Count the number of trips in ``trip_times`` that are active at any given time. Parameters ---------- trip_times : DataFrame Contains columns - start_time: start time of the trip in seconds past midnight - end_time: end time of the trip in seconds past midnight Returns ------- Series index is times from midnight when trips start and end, values are number of active trips for that time
10,744
def connect_job(job_id, deployment_name, token_manager=None, app_url=defaults.APP_URL, persist=False, websocket=None, data_url=None): if data_url == None: data_url = get_data_url_for_job(job_id, deployment_name, token_manager=token_manager, app_url=app_url) if websocket == None: websocket = __wss_connect(data_url, token_manager, job_id=job_id) pong = json.dumps({ : True }) if not persist: job_finished = False while not job_finished: try: data = websocket.recv() if data: payload = json.loads(data) if is_debug_enabled(): printable_payload = dict(payload) if in payload: retry = 1 while retry <= 5: try: debug( % (job_id, retry)) websocket = __wss_connect(data_url, token_manager, job_id=job_id) break except socket.error: if is_debug_enabled(): traceback.print_exc() retry += 1 time.sleep(5) debug( % (job_id, retry)) websocket = __wss_connect(data_url, token_manager, job_id=job_id) websocket.close()
connect to a running Juttle program by job_id
10,745
def get(self, task_id): request = TOPRequest() request[] = task_id self.create(self.execute(request)[]) return self
taobao.topats.result.get 获取异步任务结果 使用指南:http://open.taobao.com/doc/detail.htm?id=30 - 1.此接口用于获取异步任务处理的结果,传入的task_id必需属于当前的appKey才可以 - 2.此接口只返回执行完成的任务结果,未执行完的返回结果里面不包含任务结果,只有任务id,执行状态 - 3.执行完成的每个task的子任务结果内容与单个任务的结果结构一致。如:taobao.topats.trades.fullinfo.get返回的子任务结果就会是Trade的结构体。
10,746
def get(self): self.log.info() ra = self.ra dec = self.dec if self.covered == False or self.covered == 999 or self.covered == "999": return self.covered self._download_sdss_image() self.log.info() return self.covered
*download the image*
10,747
def _two_to_one(datadir): _, env_name = _split_path(datadir) print remove_container(.format(env_name)) remove_container(.format(env_name)) remove_container(.format(env_name)) print if exists(path_join(datadir, )): os.remove(path_join(datadir, )) to_move = ([, , , ] + ([] if not is_boot2docker() else [])) web_command( command=[, , ] + to_move, ro={scripts.get_script_path(): }, rw={datadir: } ) pgdata_name = .format(env_name) if is_boot2docker() and inspect_container(pgdata_name): rename_container(pgdata_name, .format(env_name)) print with open(path_join(datadir, )) as pd: datacats_env_location = path_join(pd.read(), ) cp = SafeConfigParser() cp.read(datacats_env_location) cp.set(, , cp.get(, )) cp.remove_section() with open(datacats_env_location, ) as config: cp.write(config) cp = SafeConfigParser() cp.read(path_join(datadir, )) cp.write(config)
After this command, your environment will be converted to format version {} and will not work with Datacats versions beyond and including 1.0.0. This format version doesn't support multiple sites, and after this only your "primary" site will be usable, though other sites will be maintained if you wish to do a migration back to a version which supports multisite. Would you like to continue the migration? (y/n) [n]:
10,748
def run(self): prev_frame_time = time.time() while True: self._win.switch_to() self._win.dispatch_events() now = time.time() self._update(now - prev_frame_time) prev_frame_time = now self._draw() self._win.flip()
Run the interactive window until the user quits
10,749
def parse(cls, line, ns={}): parses = [p for p in cls.opts_spec.scanString(line)] if len(parses) != 1: raise SyntaxError("Invalid specification syntax.") else: e = parses[0][2] processed = line[:e] if (processed.strip() != line.strip()): raise SyntaxError("Failed to parse remainder of string: %r" % line[e:]) grouped_paths = cls._group_paths_without_options(cls.opts_spec.parseString(line)) parse = {} for pathspecs, group in grouped_paths: options = {} normalization = cls.process_normalization(group) if normalization is not None: options[] = normalization if in group: plotopts = group[][0] opts = cls.todict(plotopts, , ns=ns) options[] = {cls.aliases.get(k,k):v for k,v in opts.items()} if in group: styleopts = group[][0] opts = cls.todict(styleopts, , ns=ns) options[] = {cls.aliases.get(k,k):v for k,v in opts.items()} for pathspec in pathspecs: parse[pathspec] = merge_option_dicts(parse.get(pathspec, {}), options) return { cls.apply_deprecations(path): { option_type: Options(**option_pairs) for option_type, option_pairs in options.items() } for path, options in parse.items() }
Parse an options specification, returning a dictionary with path keys and {'plot':<options>, 'style':<options>} values.
10,750
def unschedule(self, campaign_id): self.campaign_id = campaign_id return self._mc_client._post(url=self._build_path(campaign_id, ))
Unschedule a scheduled campaign that hasn’t started sending. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str`
10,751
def has_storage(func): @wraps(func) def wrapped(*args, **kwargs): me = args[0] if not hasattr(me, ) or \ not me._storage: raise exceptions.ImproperConfigurationError( .format(me._name.upper()) ) return func(*args, **kwargs) return wrapped
Ensure that self/cls contains a Storage backend.
10,752
def pixels_connectivity_compute(raster, i, j, idx): nrows, ncols = raster.shape value = raster[i][j] for di in [-1, 0, 1]: for dj in [-1, 0, 1]: if 0 <= i + di < nrows and 0 <= j + dj < ncols: if raster[i + di][j + dj] == value and not (di == dj and di == 0): if [i + di, j + dj] not in idx: idx.append([i + di, j + dj]) pixels_connectivity_compute(raster, i + di, j + dj, idx)
Compute if the two given value's pixels have connectivity Compute if the two given value's pixels of raster have connectivity between the [i.j]pixel and its 8-neighborhood. If they have connectivity, then put its neighborhood to List idx and go in a recursion. If the [i, j]pixel and its neighborhood don't have connectivity, do nothing. Args: raster: A rasterfile stored pixels initial values. i: The pixel's x coord. j: The pixel's y coord. idx: A List stored pixels which have the same ID(means same that these pixels have connectivity)
10,753
def _update_record_with_name(self, old_record, rtype, new_name, content): new_type = rtype if rtype else old_record[] new_ttl = self._get_lexicon_option() if new_ttl is None and in old_record: new_ttl = old_record[] new_priority = self._get_lexicon_option() if new_priority is None and in old_record: new_priority = old_record[] new_content = content if new_content is None and in old_record: new_content = old_record[] record = self._create_request_record(None, new_type, new_name, new_content, new_ttl, new_priority) self._request_add_dns_record(record) self._request_delete_dns_record_by_id(old_record[])
Updates existing record and changes it's sub-domain name
10,754
def _determine_types(start_node, first_name, add_leaf, add_link): if start_node.v_is_root: where = first_name else: where = start_node._branch if where in SUBTREE_MAPPING: type_tuple = SUBTREE_MAPPING[where] else: type_tuple = (GROUP, LEAF) if add_link: return type_tuple[0], LINK if add_leaf: return type_tuple else: return type_tuple[0], type_tuple[0]
Determines types for generic additions
10,755
def p_qualifierType_1(p): dv = None if len(p) == 5: dv = p[4] p[0] = (p[2], True, p[3], dv)
qualifierType_1 : ':' dataType array | ':' dataType array defaultValue
10,756
def result_key_for(self, op_name): ops = self.resource_data.get(, {}) op = ops.get(op_name, {}) key = op.get(, None) return key
Checks for the presence of a ``result_key``, which defines what data should make up an instance. Returns ``None`` if there is no ``result_key``. :param op_name: The operation name to look for the ``result_key`` in. :type op_name: string :returns: The expected key to look for data within :rtype: string or None
10,757
def extra_prepare(self, configuration, args_dict): harpoon = self.find_harpoon_options(configuration, args_dict) self.register = self.setup_addon_register(harpoon) if "images" not in self.configuration: self.configuration["images"] = {} self.configuration.update( { "$@": harpoon.get("extra", "") , "bash": args_dict["bash"] or sb.NotSpecified , "harpoon": harpoon , "assume_role": args_dict["assume_role"] or NotSpecified , "command": args_dict[] or sb.NotSpecified , "collector": self } , source = "<args_dict>" )
Called before the configuration.converters are activated Here we make sure that we have harpoon options from ``args_dict`` in the configuration. We then load all the harpoon modules as specified by the ``harpoon.addons`` setting. Finally we inject into the configuration: $@ The ``harpoon.extra`` setting bash The ``bash`` setting command The ``command`` setting harpoon The harpoon settings collector This instance
10,758
def connectExec(connection, protocol, commandLine): deferred = connectSession(connection, protocol) @deferred.addCallback def requestSubsystem(session): return session.requestExec(commandLine) return deferred
Connect a Protocol to a ssh exec session
10,759
def dcc(self): if self._dcc is None: self._dcc = DCCManager(self) return self._dcc
return the :class:`~irc3.dcc.DCCManager`
10,760
def corners(self): corners = [] for ind in itertools.product(*((0,1),)*self.dim): ind = np.array(ind) corners.append(self.l + ind*self.r) return np.array(corners)
Iterate the vector of all corners of the hyperrectangles >>> Tile(3, dim=2).corners array([[0, 0], [0, 3], [3, 0], [3, 3]])
10,761
def sargasso_chart (self): config = { : , : , : , : } return bargraph.plot(self.sargasso_data, [name for name in self.sargasso_keys if in name], config)
Make the sargasso plot
10,762
def airplane(self, model_mask: str = ) -> str: model = self.random.custom_code(mask=model_mask) plane = self.random.choice(AIRPLANES) return .format(plane, model)
Generate a dummy airplane model. :param model_mask: Mask of truck model. Here '@' is a placeholder of characters and '#' is a placeholder of digits. :return: Airplane model. :Example: Boeing 727.
10,763
def save_beat( self, output_file_name, frequencys, play_time, sample_rate=44100, volume=0.01 ): left_frequency, right_frequency = frequencys left_chunk = self.__create_chunk(left_frequency, play_time, sample_rate) right_chunk = self.__create_chunk(right_frequency, play_time, sample_rate) frame_list = self.read_stream(left_chunk, right_chunk, volume) wf = wave.open(output_file_name, ) wf.setparams((2, 2, sample_rate, 0, , )) wf.writeframes(b.join(frame_list)) wf.close()
引数で指定した条件でビートを鳴らす Args: frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple play_time: 再生時間(秒) sample_rate: サンプルレート volume: 音量 Returns: void
10,764
def register_postloop_hook(self, func: Callable[[None], None]) -> None: self._validate_prepostloop_callable(func) self._postloop_hooks.append(func)
Register a function to be called at the end of the command loop.
10,765
def build_msg_fmtstr2(lbl, length, invert_rate, backspace): r with_wall = True tzname = time.tzname[0] if util_cplat.WIN32: tzname = tzname.replace(, ) CLEARLINE_EL0 = CLEARLINE_EL2 = CLEAR_BEFORE = + CLEARLINE_EL2 CLEAR_AFTER = CLEARLINE_EL0 msg_head = ProgressIter.build_msg_fmtstr_head_cols(length, lbl) if backspace: msg_head = [CLEAR_BEFORE] + msg_head msg_tail = [ ( if invert_rate else ), ( if length == 0 else ), , ( + tzname if with_wall else ), (), CLEAR_AFTER if backspace else , ] msg_fmtstr_time = .join((msg_head + msg_tail)) return msg_fmtstr_time
r""" Args: lbl (str): invert_rate (bool): backspace (bool): Returns: str: msg_fmtstr_time CommandLine: python -m utool.util_progress --exec-ProgressIter.build_msg_fmtstr2 Setup: >>> from utool.util_progress import * # NOQA >>> lbl = 'foo' >>> invert_rate = True >>> backspace = False >>> length = None Example: >>> # DISABLE_DOCTEST >>> msg_fmtstr_time = ProgressIter.build_msg_fmtstr2(lbl, length, invert_rate, backspace) >>> result = ('%s' % (ut.repr2(msg_fmtstr_time),)) >>> print(result)
10,766
def convert_attribute_name_to_tag(value): if not isinstance(value, six.string_types): raise ValueError("The attribute name must be a string.") for entry in attribute_name_tag_table: if value == entry[0]: return entry[1] raise ValueError("Unrecognized attribute name: ".format(value))
A utility function that converts an attribute name string into the corresponding attribute tag. For example: 'State' -> enums.Tags.STATE Args: value (string): The string name of the attribute. Returns: enum: The Tags enumeration value that corresponds to the attribute name string. Raises: ValueError: if the attribute name string is not a string or if it is an unrecognized attribute name
10,767
def _config_convert_to_address_helper(self) -> None: to_address = self._socket_factory.to_address for k, v in self.config.items(): if k == : continue if k.endswith(): self.config[k] = to_address(v)
converts the config from ports to zmq ip addresses Operates on `self.config` using `self._socket_factory.to_address`
10,768
def _WebSafeComponent(c, alt=False): sc = c * 100.0 d = sc % 20 if d==0: return c l = sc - d u = l + 20 if alt: if (sc-l) >= (u-sc): return l/100.0 else: return u/100.0 else: if (sc-l) >= (u-sc): return u/100.0 else: return l/100.0
Convert a color component to its web safe equivalent. Parameters: :c: The component value [0...1] :alt: If True, return the alternative value instead of the nearest one. Returns: The web safe equivalent of the component value.
10,769
def iter_follower_file(fname): with open(fname, ) as f: for line in f: parts = line.split() if len(parts) > 3: yield parts[1].lower(), set(int(x) for x in parts[2:])
Iterator from a file of follower information and return a tuple of screen_name, follower ids. File format is: <iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
10,770
def generate_additional_properties(self): self.create_variable_is_dict() with self.l(): self.create_variable_keys() add_prop_definition = self._definition["additionalProperties"] if add_prop_definition: properties_keys = list(self._definition.get("properties", {}).keys()) with self.l(): with self.l(, properties_keys): self.l() self.generate_func_code_block( add_prop_definition, .format(self._variable), .format(self._variable_name, self._variable), ) else: with self.l(): self.l()
Means object with keys with values defined by definition. .. code-block:: python { 'properties': { 'key': {'type': 'number'}, } 'additionalProperties': {'type': 'string'}, } Valid object is containing key called 'key' and it's value any number and any other key with any string.
10,771
def retarget_with_change_points(song, cp_times, duration): analysis = song.analysis beat_length = analysis[BEAT_DUR_KEY] beats = np.array(analysis["beats"]) cps = np.array(novelty(song, nchangepoints=4)) cp_times = np.array(cp_times) def music_labels(t): closest_beat_idx = np.argmin(np.abs(beats - t)) closest_beat = beats[closest_beat_idx] closest_cp = cps[np.argmin(np.abs(cps - closest_beat))] if np.argmin(np.abs(beats - closest_cp)) == closest_beat_idx: return "cp" else: return "noncp" def out_labels(t): if np.min(np.abs(cp_times - t)) < 1.5 * beat_length: return "cp" return "noncp" m_labels = [music_labels(i) for i in np.arange(0, song.duration_in_seconds, beat_length)] o_labels = [out_labels(i) for i in np.arange(0, duration, beat_length)] constraints = [ rt_constraints.TimbrePitchConstraint( context=0, timbre_weight=1.0, chroma_weight=1.0), rt_constraints.EnergyConstraint(penalty=.5), rt_constraints.MinimumLoopConstraint(8), rt_constraints.NoveltyConstraint(m_labels, o_labels, 1.0) ] comp, info = retarget( [song], duration, constraints=[constraints], fade_in_len=None, fade_out_len=None) final_cp_locations = [beat_length * i for i, label in enumerate(info[]) if label == ] return comp, final_cp_locations
Create a composition of a song of a given duration that reaches music change points at specified times. This is still under construction. It might not work as well with more than 2 ``cp_times`` at the moment. Here's an example of retargeting music to be 40 seconds long and hit a change point at the 10 and 30 second marks:: song = Song("instrumental_music.wav") composition, change_points =\ retarget.retarget_with_change_points(song, [10, 30], 40) composition.export(filename="retargeted_instrumental_music.") :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param cp_times: Times to reach change points (in seconds) :type cp_times: list of floats :param duration: Target length of retargeted music (in seconds) :type duration: float :returns: Composition of retargeted song and list of locations of change points in the retargeted composition :rtype: (:py:class:`radiotool.composer.Composition`, list)
10,772
def _mk_range_bucket(name, n1, n2, r1, r2): d = {} if r1 is not None: d[n1] = r1 if r2 is not None: d[n2] = r2 if not d: raise TypeError() d[] = name return d
Create a named range specification for encoding. :param name: The name of the range as it should appear in the result :param n1: The name of the lower bound of the range specifier :param n2: The name of the upper bound of the range specified :param r1: The value of the lower bound (user value) :param r2: The value of the upper bound (user value) :return: A dictionary containing the range bounds. The upper and lower bounds are keyed under ``n1`` and ``n2``. More than just a simple wrapper, this will not include any range bound which has a user value of `None`. Likewise it will raise an exception if both range values are ``None``.
10,773
def get_enclosed_object(self): if self._enclosed_object is None: enclosed_object_id = self.get_enclosed_object_id() package_name = enclosed_object_id.get_identifier_namespace().split()[0] obj_name = enclosed_object_id.get_identifier_namespace().split()[1] mgr = self.my_osid_object._get_provider_manager(package_name.upper()) try: lookup_session = getattr(mgr, + obj_name.lower() + )(self.my_osid_object._proxy) except TypeError: lookup_session = getattr(mgr, + obj_name.lower() + )() getattr(lookup_session, + CATALOG_LOOKUP[package_name] + )() self._enclosed_object = getattr( lookup_session, + obj_name.lower())(enclosed_object_id) return self._enclosed_object
Return the enclosed object
10,774
def get_cp2k_structure(atoms): from cp2k_tools.generator import dict2cp2k }, }, }, } )
Convert the atoms structure to a CP2K input file skeleton string
10,775
def create_order(self, oid, price, context=None, expires=None): expires = absdatetime(expires, default=self.EXP_ORDER) orders = self.request( , safeformat(, oid), json.dumps({ : price, : expires.isoformat(), : context })) orderid = self._extract_id_from_batch_response(orders) return { : orderid, : expires, : context, : { : oid, : price }, : { : urljoin(self.usr_frontend, % orderid), : urljoin(self.usr_frontend, % orderid) } }
CREATES a single order for object ``oid``, with price set to ``price`` and validity until ``expires``. :type oid: ``bigint`` :param oid: Object ID. :type price: ``bigint`` :param price: Vingd amount (in cents) the user/buyer shall be charged upon successful purchase. :type context: ``string`` :param context: Purchase (order-related) context. Retrieved upon purchase verification. :type expires: ``datetime``/``dict`` :param expires: Order expiry timestamp, absolute (``datetime``) or relative (``dict``). Valid keys for relative expiry timestamp dictionary are same as keyword arguments for `datetime.timedelta` (``days``, ``seconds``, ``minutes``, ``hours``, ``weeks``). Default: `Vingd.EXP_ORDER`. :rtype: ``dict`` :returns: Order dictionary:: order = { 'id': <order_id>, 'expires': <order_expiry>, 'context': <purchase_context>, 'object': { 'id': <oid>, 'price': <amount_in_cents> }, 'urls': { 'redirect': <url_for_failsafe_redirect_purchase_mode>, 'popup': <url_for_popup_purchase_mode> } } :raises GeneralException: :resource: ``objects/<oid>/orders/`` :access: authorized users
10,776
def new(project_name): try: locale.setlocale(locale.LC_ALL, ) except: print("Warning: Unable to set locale. Expect encoding problems.") config = utils.get_config() config[][] = project_name values = new_project_ui(config) if type(values) is not str: print() pprint.pprint(values) project_dir = render.render_project(**values) git.init_repo(project_dir, **values) else: print(values)
Creates a new project
10,777
def put_on_top(self, request, queryset): queryset.update(publication_date=timezone.now()) self.ping_directories(request, queryset, messages=False) self.message_user(request, _( ))
Put the selected entries on top at the current date.
10,778
def comment_request(self, request_id, body, commit=None, filename=None, row=None): request_url = ("{}pull-request/{}/comment" .format(self.create_basic_url(), request_id)) payload = {: body} if commit is not None: payload[] = commit if filename is not None: payload[] = filename if row is not None: payload[] = row return_value = self._call_api(request_url, method=, data=payload) LOG.debug(return_value)
Create a comment on the request. :param request_id: the id of the request :param body: the comment body :param commit: which commit to comment on :param filename: which file to comment on :param row: which line of code to comment on :return:
10,779
def calculate_month(birth_date): year = int(birth_date.strftime()) month = int(birth_date.strftime()) + ((int(year / 100) - 14) % 5) * 20 return month
Calculates and returns a month number basing on PESEL standard.
10,780
def Connect(self, Username, WaitConnected=False): if WaitConnected: self._Connect_Event = threading.Event() self._Connect_Stream = [None] self._Connect_Username = Username self._Connect_ApplicationStreams(self, self.Streams) self._Owner.RegisterEventHandler(, self._Connect_ApplicationStreams) self._Alter(, Username) self._Connect_Event.wait() self._Owner.UnregisterEventHandler(, self._Connect_ApplicationStreams) try: return self._Connect_Stream[0] finally: del self._Connect_Stream, self._Connect_Event, self._Connect_Username else: self._Alter(, Username)
Connects application to user. :Parameters: Username : str Name of the user to connect to. WaitConnected : bool If True, causes the method to wait until the connection is established. :return: If ``WaitConnected`` is True, returns the stream which can be used to send the data. Otherwise returns None. :rtype: `ApplicationStream` or None
10,781
def _is_noop_timeperiod(self, process_name, timeperiod): time_grouping = context.process_context[process_name].time_grouping if time_grouping == 1: return False process_hierarchy = self.timetable.get_tree(process_name).process_hierarchy timeperiod_dict = process_hierarchy[process_name].timeperiod_dict return timeperiod_dict._translate_timeperiod(timeperiod) != timeperiod
method verifies if the given timeperiod for given process is valid or falls in-between grouping checkpoints :param process_name: name of the process :param timeperiod: timeperiod to verify :return: False, if given process has no time_grouping set or it is equal to 1. False, if time_grouping is custom but the given timeperiod matches the grouped timeperiod. True, if the timeperiod falls in-between grouping cracks
10,782
def replace(self, new_node): cur_node = self.cur_node nodestack = self.nodestack cur = nodestack.pop() prev = nodestack[-1] index = prev[-1] - 1 oldnode, name = prev[-2][index] assert cur[0] is cur_node is oldnode, (cur[0], cur_node, prev[-2], index) parent = prev[0] if isinstance(parent, list): parent[index] = new_node else: setattr(parent, name, new_node)
Replace a node after first checking integrity of node stack.
10,783
def handle_cmd(self, cmd): cmd = cmd.strip() segments = [] for s in cmd.split(): if s.startswith(): break segments.append(s) args = [] if not len(segments): return while segments: cur_cmd = "_".join(segments) if cur_cmd in self._cmd_methods: argc = self._cmd_argc[cur_cmd] if argc is not None and len(args) != argc: msg(, " ".join(segments), argc, len(args)) return self._cmd_methods[cur_cmd](args) return args.insert(0, segments.pop()) prefix = .join(args) + matches = filter( lambda cmd: cmd.startswith(prefix), self._cmd_methods.keys()) candidates = set([]) for m in matches: if len(m) <= len(prefix): continue m = m[len(prefix):] if in m: m = m[:m.index()] candidates.add(m) if len(candidates): msg(, .join(args)) for c in candidates: msg(, c) else: msg( + , .join(args))
Handles a single server command.
10,784
def mapPartitions(self, f, preservesPartitioning=False): def func(s, iterator): return f(iterator) return self.mapPartitionsWithIndex(func, preservesPartitioning)
Return a new RDD by applying a function to each partition of this RDD. >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> def f(iterator): yield sum(iterator) >>> rdd.mapPartitions(f).collect() [3, 7]
10,785
def get_rendition_url(self, width=0, height=0): if width == 0 and height == 0: return self.get_master_url() target_width, target_height = self.get_rendition_size(width, height) key = % (target_width, target_height) if not self.renditions: self.renditions = {} rendition_name = self.renditions.get(key, False) if not rendition_name: rendition_name = self.make_rendition(target_width, target_height) return default_storage.url(rendition_name)
get the rendition URL for a specified size if the renditions does not exists it will be created
10,786
def _read_as_table(self): rows = list() for row in self._rows: rows.append([row[i].get() for i in range(self.num_of_columns)]) return rows
Read the data contained in all entries as a list of lists containing all of the data :return: list of dicts containing all tabular data
10,787
def get_cts_metadata(self, key: str, lang: str = None) -> Literal: return self.metadata.get_single(RDF_NAMESPACES.CTS.term(key), lang)
Get easily a metadata from the CTS namespace :param key: CTS property to retrieve :param lang: Language in which it should be :return: Literal value of the CTS graph property
10,788
def reorient_image(image, axis1, axis2=None, doreflection=False, doscale=0, txfn=None): inpixeltype = image.pixeltype if image.pixeltype != : image = image.clone() axis_was_none = False if axis2 is None: axis_was_none = True axis2 = [0]*image.dimension axis1 = np.array(axis1) axis2 = np.array(axis2) axis1 = axis1 / np.sqrt(np.sum(axis1*axis1)) * (-1) axis1 = axis1.astype() if not axis_was_none: axis2 = axis2 / np.sqrt(np.sum(axis2*axis2)) * (-1) axis2 = axis2.astype() else: axis2 = np.array([0]*image.dimension).astype() if txfn is None: txfn = mktemp(suffix=) if isinstance(doreflection, tuple): doreflection = list(doreflection) if not isinstance(doreflection, list): doreflection = [doreflection] if isinstance(doscale, tuple): doscale = list(doscale) if not isinstance(doscale, list): doscale = [doscale] if len(doreflection) == 1: doreflection = [doreflection[0]]*image.dimension if len(doscale) == 1: doscale = [doscale[0]]*image.dimension libfn = utils.get_lib_fn( % image._libsuffix) libfn(image.pointer, txfn, axis1.tolist(), axis2.tolist(), doreflection, doscale) image2 = apply_transforms(image, image, transformlist=[txfn]) if image.pixeltype != inpixeltype: image2 = image2.clone(inpixeltype) return {:image2, :txfn}
Align image along a specified axis ANTsR function: `reorientImage` Arguments --------- image : ANTsImage image to reorient axis1 : list/tuple of integers vector of size dim, might need to play w/axis sign axis2 : list/tuple of integers vector of size dim for 3D doreflection : boolean whether to reflect doscale : scalar value 1 allows automated estimate of scaling txfn : string file name for transformation Returns ------- ANTsImage Example ------- >>> import ants >>> image = ants.image_read(ants.get_ants_data('r16')) >>> ants.reorient_image(image, (1,0))
10,789
def probes_used_extract_scores(full_scores, same_probes): if full_scores.shape[1] != same_probes.shape[0]: raise "Size mismatch" import numpy as np model_scores = np.ndarray((full_scores.shape[0],np.sum(same_probes)), ) c=0 for i in range(0,full_scores.shape[1]): if same_probes[i]: for j in range(0,full_scores.shape[0]): model_scores[j,c] = full_scores[j,i] c+=1 return model_scores
Extracts a matrix of scores for a model, given a probes_used row vector of boolean
10,790
def _call(self, x, out): if self.domain.is_real: out.real = self.scalar.real * x out.imag = self.scalar.imag * x else: out.lincomb(self.scalar, x)
Return ``self(x)``.
10,791
def acknowledge(self, request, *args, **kwargs): alert = self.get_object() if not alert.acknowledged: alert.acknowledge() return response.Response(status=status.HTTP_200_OK) else: return response.Response({: _()}, status=status.HTTP_409_CONFLICT)
To acknowledge alert - run **POST** against */api/alerts/<alert_uuid>/acknowledge/*. No payload is required. All users that can see alerts can also acknowledge it. If alert is already acknowledged endpoint will return error with code 409(conflict).
10,792
def play_song(self, song): if song is not None and song == self.current_song: logger.warning() else: self._playlist.current_song = song
播放指定歌曲 如果目标歌曲与当前歌曲不相同,则修改播放列表当前歌曲, 播放列表会发出 song_changed 信号,player 监听到信号后调用 play 方法, 到那时才会真正的播放新的歌曲。如果和当前播放歌曲相同,则忽略。 .. note:: 调用方不应该直接调用 playlist.current_song = song 来切换歌曲
10,793
def set_branding(self, asset_ids): if asset_ids is None: raise NullArgument() if self.get_branding_metadata().is_read_only(): raise NoAccess() if not isinstance(asset_ids, list): raise InvalidArgument() if not self.my_osid_object_form._is_valid_input(asset_ids, self.get_branding_metadata(), array=True): raise InvalidArgument() branding_ids = [] for asset_id in asset_ids: branding_ids.append(str(asset_id)) self.my_osid_object_form._my_map[] = branding_ids
Sets the branding. arg: asset_ids (osid.id.Id[]): the new assets raise: InvalidArgument - ``asset_ids`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``asset_ids`` is ``null`` *compliance: mandatory -- This method must be implemented.*
10,794
def set_connection(host=None, database=None, user=None, password=None): c.CONNECTION[] = host c.CONNECTION[] = database c.CONNECTION[] = user c.CONNECTION[] = password
Set connection parameters. Call set_connection with no arguments to clear.
10,795
def _von_mises_cdf_series(x, concentration, num_terms, dtype): num_terms = tf.cast(num_terms, dtype=dtype) def loop_body(n, rn, drn_dconcentration, vn, dvn_dconcentration): denominator = 2. * n / concentration + rn ddenominator_dk = -2. * n / concentration ** 2 + drn_dconcentration rn = 1. / denominator drn_dconcentration = -ddenominator_dk / denominator ** 2 multiplier = tf.sin(n * x) / n + vn vn = rn * multiplier dvn_dconcentration = (drn_dconcentration * multiplier + rn * dvn_dconcentration) n -= 1. return n, rn, drn_dconcentration, vn, dvn_dconcentration (_, _, _, vn, dvn_dconcentration) = tf.while_loop( cond=lambda n, *_: n > 0., body=loop_body, loop_vars=( num_terms, tf.zeros_like(x, name="rn"), tf.zeros_like(x, name="drn_dconcentration"), tf.zeros_like(x, name="vn"), tf.zeros_like(x, name="dvn_dconcentration"), ), ) cdf = .5 + x / (2. * np.pi) + vn / np.pi dcdf_dconcentration = dvn_dconcentration / np.pi cdf_clipped = tf.clip_by_value(cdf, 0., 1.) dcdf_dconcentration *= tf.cast((cdf >= 0.) & (cdf <= 1.), dtype) return cdf_clipped, dcdf_dconcentration
Computes the von Mises CDF and its derivative via series expansion.
10,796
def get_assessment_offered_mdata(): return { : { : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : False, : False, : False, : False, : [], : , : [], }, : { : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : False, : False, : False, : False, : [None], : , : [], }, : { : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : False, : False, : False, : False, : [], : , : [], }, : { : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : False, : False, : False, : False, : [None], : , }, : { : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : False, : False, : False, : False, : [], : , : [], }, : { : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : False, : False, : False, : False, : [None], : , : [], }, : { : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : False, : False, : False, : False, : [None], : , : [], }, : { : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : False, : False, : False, : False, : [], : , : [], }, : { : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : { : , : str(DEFAULT_LANGUAGE_TYPE), : str(DEFAULT_SCRIPT_TYPE), : str(DEFAULT_FORMAT_TYPE), }, : False, : False, : False, : False, : [None], : , }, }
Return default mdata map for AssessmentOffered
10,797
def add_arguments(cls, parser, sys_arg_list=None): parser.add_argument(, , dest=, required=True, help="config file for routing groups " "(only in configfile mode)") return ["file"]
Arguments for the configfile mode.
10,798
def extract_words(string): string return re.findall(r % (A, A, A), string, flags=FLAGS)
Extract all alphabetic syllabified forms from 'string'.
10,799
def _underscore_to_camelcase(value): def camelcase(): yield str.lower while True: yield str.capitalize c = camelcase() return "".join(next(c)(x) if x else for x in value.split("_"))
Convert Python snake case back to mixed case.