Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
10,800
def is_visit_primitive(obj): from .base import visit if (isinstance(obj, tuple(PRIMITIVE_TYPES)) and not isinstance(obj, STR) and not isinstance(obj, bytes)): return True if (isinstance(obj, CONTAINERS) and not isinstance(obj, STR) and not isinstance(obj, bytes)): return False if isinstance(obj, STR) or isinstance(obj, bytes): if len(obj) == 1: return True return False return list(visit(obj, max_enum=2)) == [obj]
Returns true if properly visiting the object returns only the object itself.
10,801
def _handle_inotify_event(self, wd): b = os.read(wd, 1024) if not b: return self.__buffer += b while 1: length = len(self.__buffer) if length < _STRUCT_HEADER_LENGTH: _LOGGER.debug("Not enough bytes for a header.") return peek_slice = self.__buffer[:_STRUCT_HEADER_LENGTH] header_raw = struct.unpack( _HEADER_STRUCT_FORMAT, peek_slice) header = _INOTIFY_EVENT(*header_raw) type_names = self._get_event_names(header.mask) _LOGGER.debug("Events received in stream: {}".format(type_names)) event_length = (_STRUCT_HEADER_LENGTH + header.len) if length < event_length: return filename = self.__buffer[_STRUCT_HEADER_LENGTH:event_length] filename_bytes = filename.rstrip(b) self.__buffer = self.__buffer[event_length:] path = self.__watches_r.get(header.wd) if path is not None: filename_unicode = filename_bytes.decode() yield (header, type_names, path, filename_unicode) buffer_length = len(self.__buffer) if buffer_length < _STRUCT_HEADER_LENGTH: break
Handle a series of events coming-in from inotify.
10,802
def part(self): part_id = self._json_data[] return self._client.part(pk=part_id, category=self._json_data[])
Retrieve the part that holds this Property. :returns: The :class:`Part` associated to this property :raises APIError: if the `Part` is not found
10,803
def _readResponse(self): traps = [] reply_word = None while reply_word != : reply_word, words = self._readSentence() if reply_word == : traps.append(TrapError(**words)) elif reply_word in (, ) and words: yield words if len(traps) > 1: raise MultiTrapError(*traps) elif len(traps) == 1: raise traps[0]
Yield each row of response untill !done is received. :throws TrapError: If one !trap is received. :throws MultiTrapError: If > 1 !trap is received.
10,804
def responds(self): contacted_text = self._contacted_xpb.\ get_text_(self.profile_tree).lower() if not in contacted_text: return contacted_text.strip().replace(, )
:returns: The frequency with which the user associated with this profile responds to messages.
10,805
def save(self, save_json=True, save_xml=True): if self.layer_is_file_based: if save_json: self.write_to_file(self.json_uri) if save_xml: self.write_to_file(self.xml_uri) else: self.write_to_db(save_json, save_xml)
Saves the metadata json and/or xml to a file or DB. :param save_json: flag to save json :type save_json: bool :param save_xml: flag to save xml :type save_xml: bool
10,806
def reset_env(exclude=[]): if os.getenv(env.INITED): wandb_keys = [key for key in os.environ.keys() if key.startswith( ) and key not in exclude] for key in wandb_keys: del os.environ[key] return True else: return False
Remove environment variables, used in Jupyter notebooks
10,807
def str_replace(x, pat, repl, n=-1, flags=0, regex=False): sl = _to_string_sequence(x).replace(pat, repl, n, flags, regex) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
Replace occurences of a pattern/regex in a column with some other string. :param str pattern: string or a regex pattern :param str replace: a replacement string :param int n: number of replacements to be made from the start. If -1 make all replacements. :param int flags: ?? :param bool regex: If True, ...? :returns: an expression containing the string replacements. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.replace(pat='et', repl='__') Expression = str_replace(text, pat='et', repl='__') Length: 5 dtype: str (expression) --------------------------------- 0 Som__hing 1 very pr__ty 2 is coming 3 our 4 way.
10,808
def updatePassword(self, user, currentPassword, newPassword): return self.__post(, data={ : user, : currentPassword, : newPassword })
Change the password of a user.
10,809
def _loadData(self, data): self._data = data for elem in data: id = utils.lowerFirst(elem.attrib[]) if id in self._settings: self._settings[id]._loadData(elem) continue self._settings[id] = Setting(self._server, elem, self._initpath)
Load attribute values from Plex XML response.
10,810
def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]): app = inliner.document.settings.env.app ref = + text node = nodes.reference(rawtext, text, refuri=ref, **options) return [node], []
Link to a GitHub user. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization.
10,811
def OnShowFindReplace(self, event): data = wx.FindReplaceData(wx.FR_DOWN) dlg = wx.FindReplaceDialog(self.grid, data, "Find & Replace", wx.FR_REPLACEDIALOG) dlg.data = data dlg.Show(True)
Calls the find-replace dialog
10,812
def set_content(self, data): content = self._get_content(data, ) if content == : content = self._get_content(data, ) if content == : if data.get(): content = data.get() return content
handle the content from the data :param data: contains the data from the provider :type data: dict :rtype: string
10,813
def SetSerializersProfiler(self, serializers_profiler): self._serializers_profiler = serializers_profiler if self._storage_file: self._storage_file.SetSerializersProfiler(serializers_profiler)
Sets the serializers profiler. Args: serializers_profiler (SerializersProfiler): serializers profiler.
10,814
def _filtdim(items, shape, dim, nsl): normshape = tuple(stop - start for start, stop in shape) nsl_type = type(nsl) newitems = list() num = reduce(operator.mul, normshape[:dim+1]) size = len(items) // num n = normshape[dim] if nsl_type is int: for i in range(num): if i % n == nsl: newitems += items[size*i:size*(i+1)] newshape = shape[:dim] + shape[dim+1:] elif nsl_type is slice: for i in range(num): if nsl.start <= (i % n) < nsl.stop: newitems += items[size*i:size*(i+1)] offset = shape[dim][0] redim = (offset + nsl.start, offset + nsl.stop) newshape = shape[:dim] + (redim, ) + shape[dim+1:] else: if nsl.size < clog2(n): fstr = "expected dim {} select to have >= {} bits, got {}" raise ValueError(fstr.format(dim, clog2(n), nsl.size)) groups = [list() for _ in range(n)] for i in range(num): groups[i % n] += items[size*i:size*(i+1)] for muxins in zip(*groups): it = boolfunc.iter_terms(nsl._items) xs = [reduce(operator.and_, (muxin, ) + next(it)) for muxin in muxins] newitems.append(reduce(operator.or_, xs)) newshape = shape[:dim] + shape[dim+1:] return newitems, newshape
Return items, shape filtered by a dimension slice.
10,815
def put_encryption_materials(self, cache_key, encryption_materials, plaintext_length, entry_hints=None): return CryptoMaterialsCacheEntry(cache_key=cache_key, value=encryption_materials)
Does not add encryption materials to the cache since there is no cache to which to add them. :param bytes cache_key: Identifier for entries in cache :param encryption_materials: Encryption materials to add to cache :type encryption_materials: aws_encryption_sdk.materials_managers.EncryptionMaterials :param int plaintext_length: Length of plaintext associated with this request to the cache :param entry_hints: Metadata to associate with entry (optional) :type entry_hints: aws_encryption_sdk.caches.CryptoCacheEntryHints :rtype: aws_encryption_sdk.caches.CryptoMaterialsCacheEntry
10,816
def _process_genes(self, limit=None): LOG.info("Processing genes") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = .join((self.rawdir, self.files[][])) geno = Genotype(graph) with open(raw, , encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter=, quotechar=) for row in filereader: line_counter += 1 (gene_id, gene_so_id, gene_symbol, ncbi_gene_id ) = row if self.test_mode and gene_id not in self.test_ids[]: continue gene_id = + gene_id.strip() ncbi_gene_id = + ncbi_gene_id.strip() self.id_label_map[gene_id] = gene_symbol if not self.test_mode and limit is not None and line_counter > limit: pass else: geno.addGene(gene_id, gene_symbol) model.addEquivalentClass(gene_id, ncbi_gene_id) LOG.info("Done with genes") return
This table provides the ZFIN gene id, the SO type of the gene, the gene symbol, and the NCBI Gene ID. Triples created: <gene id> a class <gene id> rdfs:label gene_symbol <gene id> equivalent class <ncbi_gene_id> :param limit: :return:
10,817
def setup(app): global _is_sphinx _is_sphinx = True app.add_config_value(, False, ) app.add_source_parser(, M2RParser) app.add_directive(, MdInclude)
When used for spinx extension.
10,818
def add_handler( self, fd: Union[int, _Selectable], handler: Callable[..., None], events: int ) -> None: raise NotImplementedError()
Registers the given handler to receive the given events for ``fd``. The ``fd`` argument may either be an integer file descriptor or a file-like object with a ``fileno()`` and ``close()`` method. The ``events`` argument is a bitwise or of the constants ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``. When an event occurs, ``handler(fd, events)`` will be run. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors.
10,819
def get_self_host(request_data): if in request_data: current_host = request_data[] elif in request_data: current_host = request_data[] else: raise Exception() if in current_host: current_host_data = current_host.split() possible_port = current_host_data[-1] try: possible_port = float(possible_port) current_host = current_host_data[0] except ValueError: current_host = .join(current_host_data) return current_host
Returns the current host. :param request_data: The request as a dict :type: dict :return: The current host :rtype: string
10,820
def list_provincies(self, gewest=2): try: gewest_id = gewest.id except AttributeError: gewest_id = gewest def creator(): return [Provincie(p[0], p[1], Gewest(p[2])) for p in self.provincies if p[2] == gewest_id] if self.caches[].is_configured: key = % gewest_id provincies = self.caches[].get_or_create(key, creator) else: provincies = creator() for p in provincies: p.set_gateway(self) return provincies
List all `provincies` in a `gewest`. :param gewest: The :class:`Gewest` for which the \ `provincies` are wanted. :param integer sort: What field to sort on. :rtype: A :class:`list` of :class:`Provincie`.
10,821
def match(self, objects: List[Any]) -> bool: s = self._make_string(objects) m = self._compiled_expression.match(s) return m is not None
Return True if the list of objects matches the expression.
10,822
def rmdir_p(self): try: self.rmdir() except OSError: _, e, _ = sys.exc_info() if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST: raise return self
Like :meth:`rmdir`, but does not raise an exception if the directory is not empty or does not exist.
10,823
def make_back_notes(self, body): for notes in self.article.root.xpath(): notes_sec = deepcopy(notes.find()) notes_sec.tag = notes_sec.attrib[] = body.append(notes_sec)
The notes element in PLoS articles can be employed for posting notices of corrections or adjustments in proof. The <notes> element has a very diverse content model, but PLoS practice appears to be fairly consistent: a single <sec> containing a <title> and a <p>
10,824
def detect(self): if PY3: import subprocess else: import commands as subprocess try: theip = subprocess.getoutput(self.opts_command) except Exception: theip = None self.set_current_value(theip) return theip
Detect and return the IP address.
10,825
def error_msg_wx(msg, parent=None): dialog =wx.MessageDialog(parent = parent, message = msg, caption = , style=wx.OK | wx.CENTRE) dialog.ShowModal() dialog.Destroy() return None
Signal an error condition -- in a GUI, popup a error dialog
10,826
def hsepd_pdf(sigma1, sigma2, xi, beta, sim=None, obs=None, node=None, skip_nan=False): sim, obs = prepare_arrays(sim, obs, node, skip_nan) sigmas = _pars_h(sigma1, sigma2, sim) mu_xi, sigma_xi, w_beta, c_beta = _pars_sepd(xi, beta) x, mu = obs, sim a = (x-mu)/sigmas a_xi = numpy.empty(a.shape) idxs = mu_xi+sigma_xi*a < 0. a_xi[idxs] = numpy.absolute(xi*(mu_xi+sigma_xi*a[idxs])) a_xi[~idxs] = numpy.absolute(1./xi*(mu_xi+sigma_xi*a[~idxs])) ps = (2.*sigma_xi/(xi+1./xi)*w_beta * numpy.exp(-c_beta*a_xi**(2./(1.+beta))))/sigmas return ps
Calculate the probability densities based on the heteroskedastic skewed exponential power distribution. For convenience, the required parameters of the probability density function as well as the simulated and observed values are stored in a dictonary: >>> import numpy >>> from hydpy import round_ >>> from hydpy import hsepd_pdf >>> general = {'sigma1': 0.2, ... 'sigma2': 0.0, ... 'xi': 1.0, ... 'beta': 0.0, ... 'sim': numpy.arange(10.0, 41.0), ... 'obs': numpy.full(31, 25.0)} The following test function allows the variation of one parameter and prints some and plots all of probability density values corresponding to different simulated values: >>> def test(**kwargs): ... from matplotlib import pyplot ... special = general.copy() ... name, values = list(kwargs.items())[0] ... results = numpy.zeros((len(general['sim']), len(values)+1)) ... results[:, 0] = general['sim'] ... for jdx, value in enumerate(values): ... special[name] = value ... results[:, jdx+1] = hsepd_pdf(**special) ... pyplot.plot(results[:, 0], results[:, jdx+1], ... label='%s=%.1f' % (name, value)) ... pyplot.legend() ... for idx, result in enumerate(results): ... if not (idx % 5): ... round_(result) When varying parameter `beta`, the resulting probabilities correspond to the Laplace distribution (1.0), normal distribution (0.0), and the uniform distribution (-1.0), respectively. Note that we use -0.99 instead of -1.0 for approximating the uniform distribution to prevent from running into numerical problems, which are not solved yet: >>> test(beta=[1.0, 0.0, -0.99]) 10.0, 0.002032, 0.000886, 0.0 15.0, 0.008359, 0.010798, 0.0 20.0, 0.034382, 0.048394, 0.057739 25.0, 0.141421, 0.079788, 0.057739 30.0, 0.034382, 0.048394, 0.057739 35.0, 0.008359, 0.010798, 0.0 40.0, 0.002032, 0.000886, 0.0 .. testsetup:: >>> from matplotlib import pyplot >>> pyplot.close() When varying parameter `xi`, the resulting density is negatively skewed (0.2), symmetric (1.0), and positively skewed (5.0), respectively: >>> test(xi=[0.2, 1.0, 5.0]) 10.0, 0.0, 0.000886, 0.003175 15.0, 0.0, 0.010798, 0.012957 20.0, 0.092845, 0.048394, 0.036341 25.0, 0.070063, 0.079788, 0.070063 30.0, 0.036341, 0.048394, 0.092845 35.0, 0.012957, 0.010798, 0.0 40.0, 0.003175, 0.000886, 0.0 .. testsetup:: >>> from matplotlib import pyplot >>> pyplot.close() In the above examples, the actual `sigma` (5.0) is calculated by multiplying `sigma1` (0.2) with the mean simulated value (25.0), internally. This can be done for modelling homoscedastic errors. Instead, `sigma2` is multiplied with the individual simulated values to account for heteroscedastic errors. With increasing values of `sigma2`, the resulting densities are modified as follows: >>> test(sigma2=[0.0, 0.1, 0.2]) 10.0, 0.000886, 0.002921, 0.005737 15.0, 0.010798, 0.018795, 0.022831 20.0, 0.048394, 0.044159, 0.037988 25.0, 0.079788, 0.053192, 0.039894 30.0, 0.048394, 0.04102, 0.032708 35.0, 0.010798, 0.023493, 0.023493 40.0, 0.000886, 0.011053, 0.015771 .. testsetup:: >>> from matplotlib import pyplot >>> pyplot.close()
10,827
def from_country(cls, country): result = cls.list({: }) dc_countries = {} for dc in result: if dc[] not in dc_countries: dc_countries[dc[]] = dc[] return dc_countries.get(country)
Retrieve the first datacenter id associated to a country.
10,828
def get_info(self): if not self.info: ci = win32pdh.GetCounterInfo(self.handle, 0) self.info = { : ci[0], : ci[1], : ci[2], : ci[3], : ci[4], : ci[5], : ci[6], : ci[7][0], : ci[7][1], : ci[7][2], : ci[7][3], : ci[7][4], : ci[7][5], : ci[8] } return self.info
Get information about the counter .. note:: GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes if this is called after sampling data.
10,829
def analyze(problem, Y, M=4, print_to_console=False, seed=None): if seed: np.random.seed(seed) D = problem[] if Y.size % (D) == 0: N = int(Y.size / D) else: print() exit() omega = np.zeros([D]) omega[0] = math.floor((N - 1) / (2 * M)) m = math.floor(omega[0] / (2 * M)) if m >= (D - 1): omega[1:] = np.floor(np.linspace(1, m, D - 1)) else: omega[1:] = np.arange(D - 1) % m + 1 if print_to_console: print("Parameter First Total") Si = ResultDict((k, [None] * D) for k in [, ]) Si[] = problem[] for i in range(D): l = np.arange(i * N, (i + 1) * N) Si[][i] = compute_first_order(Y[l], N, M, omega[0]) Si[][i] = compute_total_order(Y[l], N, omega[0]) if print_to_console: print("%s %f %f" % (problem[][i], Si[][i], Si[][i])) return Si
Performs the Fourier Amplitude Sensitivity Test (FAST) on model outputs. Returns a dictionary with keys 'S1' and 'ST', where each entry is a list of size D (the number of parameters) containing the indices in the same order as the parameter file. Parameters ---------- problem : dict The problem definition Y : numpy.array A NumPy array containing the model outputs M : int The interference parameter, i.e., the number of harmonics to sum in the Fourier series decomposition (default 4) print_to_console : bool Print results directly to console (default False) References ---------- .. [1] Cukier, R. I., C. M. Fortuin, K. E. Shuler, A. G. Petschek, and J. H. Schaibly (1973). "Study of the sensitivity of coupled reaction systems to uncertainties in rate coefficients." J. Chem. Phys., 59(8):3873-3878, doi:10.1063/1.1680571. .. [2] Saltelli, A., S. Tarantola, and K. P.-S. Chan (1999). "A Quantitative Model-Independent Method for Global Sensitivity Analysis of Model Output." Technometrics, 41(1):39-56, doi:10.1080/00401706.1999.10485594. Examples -------- >>> X = fast_sampler.sample(problem, 1000) >>> Y = Ishigami.evaluate(X) >>> Si = fast.analyze(problem, Y, print_to_console=False)
10,830
def global_matches(self, text): matches = [] match_append = matches.append n = len(text) for lst in [keyword.kwlist, __builtin__.__dict__.keys(), self.namespace.keys(), self.global_namespace.keys()]: for word in lst: if word[:n] == text and word != "__builtins__": match_append(word) return matches
Compute matches when text is a simple name. Return a list of all keywords, built-in functions and names currently defined in self.namespace or self.global_namespace that match.
10,831
def compute(self, runner_results, setup=False, poll=False, ignore_errors=False): for (host, value) in runner_results.get(, {}).iteritems(): if not ignore_errors and (( in value and bool(value[])) or ( in value and value[] != 0)): self._increment(, host) elif in value and bool(value[]): self._increment(, host) elif in value and bool(value[]): if not setup and not poll: self._increment(, host) self._increment(, host) else: if not poll or ( in value and bool(value[])): self._increment(, host) for (host, value) in runner_results.get(, {}).iteritems(): self._increment(, host)
walk through all results and increment stats
10,832
def debug_variable_node_render(self, context): try: output = self.filter_expression.resolve(context) output = template_localtime(output, use_tz=context.use_tz) output = localize(output, use_l10n=context.use_l10n) output = force_text(output) except Exception as e: if not hasattr(e, ): e.django_template_source = self.source raise if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData): return escape(output) else: return output
Like DebugVariableNode.render, but doesn't catch UnicodeDecodeError.
10,833
def fan_speed(self, speed: int = None) -> bool: body = helpers.req_body(self.manager, ) body[] = self.uuid head = helpers.req_headers(self.manager) if self.details.get() != : self.mode_toggle() else: if speed is not None: level = int(self.details.get()) if speed == level: return False elif speed in [1, 2, 3]: body[] = speed else: if (level + 1) > 3: body[] = 1 else: body[] = int(level + 1) r, _ = helpers.call_api(, , json=body, headers=head) if r is not None and helpers.check_response(r, ): self.details[] = body[] return True else: return False
Adjust Fan Speed by Specifying 1,2,3 as argument or cycle through speeds increasing by one
10,834
def get_connection(self, command_name, *keys, **options): "Get a connection from the pool" self._checkpid() try: connection = self._available_connections.pop() except IndexError: connection = self.make_connection() self._in_use_connections.add(connection) try: connection.connect() if not connection.is_ready_for_command(): connection.disconnect() connection.connect() if not connection.is_ready_for_command(): raise ConnectionError() except: self.release(connection) raise return connection
Get a connection from the pool
10,835
def headerData(self, section, orientation, role): if role == QtCore.Qt.DisplayRole: if orientation == QtCore.Qt.Horizontal: return self.headers[section]
Get the Header for the columns in the table Required by view, see :qtdoc:`subclassing<qabstractitemmodel.subclassing>` :param section: column of header to return :type section: int
10,836
def get_total_size_trans(self, entries): size = 0 for entry in entries: if entry[][] > 0: size += entry[][] return size
Returns the total size of a collection of entries - transferred. NOTE: use with har file generated with chrome-har-capturer :param entries: ``list`` of entries to calculate the total size of.
10,837
def version_router(self, request, response, api_version=None, versions={}, not_found=None, **kwargs): request_version = self.determine_version(request, api_version) if request_version: request_version = int(request_version) versions.get(request_version or False, versions.get(None, not_found))(request, response, api_version=api_version, **kwargs)
Intelligently routes a request to the correct handler based on the version being requested
10,838
def is_valid(self, request_data, request_id=None, raise_exceptions=False): self.__error = None try: if self.document.get(, None) != : raise OneLogin_Saml2_ValidationError( , OneLogin_Saml2_ValidationError.UNSUPPORTED_SAML_VERSION ) if self.document.get(, None) is None: raise OneLogin_Saml2_ValidationError( , OneLogin_Saml2_ValidationError.MISSING_ID ) self.check_status() if not self.validate_num_assertions(): raise OneLogin_Saml2_ValidationError( , OneLogin_Saml2_ValidationError.WRONG_NUMBER_OF_ASSERTIONS ) idp_data = self.__settings.get_idp_data() idp_entity_id = idp_data.get(, ) sp_data = self.__settings.get_sp_data() sp_entity_id = sp_data.get(, ) signed_elements = self.process_signed_elements() has_signed_response = % OneLogin_Saml2_Constants.NS_SAMLP in signed_elements has_signed_assertion = % OneLogin_Saml2_Constants.NS_SAML in signed_elements if self.__settings.is_strict(): no_valid_xml_msg = res = OneLogin_Saml2_Utils.validate_xml( tostring(self.document), , self.__settings.is_debug_active() ) if not isinstance(res, Document): raise OneLogin_Saml2_ValidationError( no_valid_xml_msg, OneLogin_Saml2_ValidationError.INVALID_XML_FORMAT ) if self.encrypted: res = OneLogin_Saml2_Utils.validate_xml( tostring(self.decrypted_document), , self.__settings.is_debug_active() ) if not isinstance(res, Document): raise OneLogin_Saml2_ValidationError( no_valid_xml_msg, OneLogin_Saml2_ValidationError.INVALID_XML_FORMAT ) security = self.__settings.get_security_data() current_url = OneLogin_Saml2_Utils.get_self_url_no_query(request_data) in_response_to = self.document.get(, None) if request_id is None and in_response_to is not None and security.get(, False): raise OneLogin_Saml2_ValidationError( % in_response_to, OneLogin_Saml2_ValidationError.WRONG_INRESPONSETO ) if request_id is not None and in_response_to != request_id: raise OneLogin_Saml2_ValidationError( % (in_response_to, request_id), OneLogin_Saml2_ValidationError.WRONG_INRESPONSETO ) if not self.encrypted and security.get(, False): raise OneLogin_Saml2_ValidationError( , OneLogin_Saml2_ValidationError.NO_ENCRYPTED_ASSERTION ) if security.get(, False): encrypted_nameid_nodes = self.__query_assertion() if len(encrypted_nameid_nodes) != 1: raise OneLogin_Saml2_ValidationError( , OneLogin_Saml2_ValidationError.NO_ENCRYPTED_NAMEID ) if not self.check_one_condition(): raise OneLogin_Saml2_ValidationError( , OneLogin_Saml2_ValidationError.MISSING_CONDITIONS ) self.validate_timestamps(raise_exceptions=True) if not self.check_one_authnstatement(): raise OneLogin_Saml2_ValidationError( , OneLogin_Saml2_ValidationError.WRONG_NUMBER_OF_AUTHSTATEMENTS ) requested_authn_contexts = security.get(, True) if security.get(, False) and requested_authn_contexts and requested_authn_contexts is not True: authn_contexts = self.get_authn_contexts() unmatched_contexts = set(requested_authn_contexts).difference(authn_contexts) if unmatched_contexts: raise OneLogin_Saml2_ValidationError( t include requested context "%s", , /saml:AttributeStatementwantAttributeStatementThere is no AttributeStatement on the Response/saml:AttributeStatement/saml:EncryptedAttributeThere is an EncryptedAttribute in the Response and this SP not support themDestinationThe response was received at %s instead of %sThe response has an empty Destination value%s is not a valid audience for this ResponseInvalid issuer in the Assertion/Response (expected %(idpEntityId)s, got %(issuer)s)idpEntityIdissuerThe attributes have expired, based on the SessionNotOnOrAfter of the AttributeStatement of this Response/saml:Subject/saml:SubjectConfirmationMethodsaml:SubjectConfirmationDataInResponseTorejectUnsolicitedResponsesWithInResponseToRecipientNotOnOrAfterNotBeforeA valid SubjectConfirmation was not found on this ResponsewantAssertionsSignedThe Assertion of the Response is not signed and the SP require itwantMessagesSignedThe Message of the Response is not signed and the SP require itNo Signature found. SAML Response rejectedx509certcertFingerprintcertFingerprintAlgorithmx509certMultisigningx509certMultix509certMultisigningx509certMultisigningSignature validation failed. SAML Response rejectedSignature validation failed. SAML Response rejected', OneLogin_Saml2_ValidationError.INVALID_SIGNATURE ) return True except Exception as err: self.__error = err.__str__() debug = self.__settings.is_debug_active() if debug: print(err.__str__()) if raise_exceptions: raise err return False
Validates the response object. :param request_data: Request Data :type request_data: dict :param request_id: Optional argument. The ID of the AuthNRequest sent by this SP to the IdP :type request_id: string :param raise_exceptions: Whether to return false on failure or raise an exception :type raise_exceptions: Boolean :returns: True if the SAML Response is valid, False if not :rtype: bool
10,839
def mk_set_headers(self, data, columns): columns = tuple(columns) lens = [] for key in columns: value_len = max(len(str(each.get(key, ))) for each in data) lens.append(max(value_len, len(self._get_name(key)))) fmt = self.mk_fmt(*lens) return fmt
figure out sizes and create header fmt
10,840
def _root(path, root): if root: return os.path.join(root, os.path.relpath(path, os.path.sep)) else: return path
Relocate an absolute path to a new root directory.
10,841
def __generate_cluster_centers(self, width): centers = [] default_offset = max(width) * 4.0 for i in range(self.__amount_clusters): center = [ random.gauss(i * default_offset, width[i] / 2.0) for _ in range(self.__dimension) ] centers.append(center) return centers
! @brief Generates centers (means in statistical term) for clusters. @param[in] width (list): Width of generated clusters. @return (list) Generated centers in line with normal distribution.
10,842
def generate_id(self): if self.use_repeatable_ids: self.repeatable_id_counter += 1 return .format(self.repeatable_id_counter) else: return str(uuid4())
Generate a fresh id
10,843
def _find_keep_files(root, keep): real_keep = set() real_keep.add(root) if isinstance(keep, list): for fn_ in keep: if not os.path.isabs(fn_): continue fn_ = os.path.normcase(os.path.abspath(fn_)) real_keep.add(fn_) while True: fn_ = os.path.abspath(os.path.dirname(fn_)) real_keep.add(fn_) drive, path = os.path.splitdrive(fn_) if not path.lstrip(os.sep): break return real_keep
Compile a list of valid keep files (and directories). Used by _clean_dir()
10,844
def value(self): val = (round(i / 1000, 3) for i in self.__serial_price(1)) return list(val)
成交量序列(張) :rtype: list
10,845
def either(self): if not hasattr(self, ): return Either(Required(self)) else: ret = [] groups = [[self]] while groups: children = groups.pop(0) types = [type(c) for c in children] if Either in types: either = [c for c in children if type(c) is Either][0] children.pop(children.index(either)) for c in either.children: groups.append([c] + children) elif Required in types: required = [c for c in children if type(c) is Required][0] children.pop(children.index(required)) groups.append(list(required.children) + children) elif Optional in types: optional = [c for c in children if type(c) is Optional][0] children.pop(children.index(optional)) groups.append(list(optional.children) + children) elif OneOrMore in types: oneormore = [c for c in children if type(c) is OneOrMore][0] children.pop(children.index(oneormore)) groups.append(list(oneormore.children) * 2 + children) else: ret.append(children) return Either(*[Required(*e) for e in ret])
Transform pattern into an equivalent, with only top-level Either.
10,846
def f_delete_links(self, iterator_of_links, remove_from_trajectory=False): to_delete_links = [] group_link_pairs = [] for elem in iterator_of_links: if isinstance(elem, str): split_names = elem.split() parent_name = .join(split_names[:-1]) link = split_names[-1] parent_node = self.f_get(parent_name) if parent_name != else self link_name = parent_node.v_full_name + + link if parent_name != else link to_delete_links.append((pypetconstants.DELETE_LINK, link_name)) group_link_pairs.append((parent_node, link)) else: link_name = elem[0].v_full_name + + elem[1] to_delete_links.append((pypetconstants.DELETE_LINK, link_name)) group_link_pairs.append(elem) try: self._storage_service.store(pypetconstants.LIST, to_delete_links, trajectory_name=self.v_name) except: self._logger.error( % str(to_delete_links)) raise if remove_from_trajectory: for group, link in group_link_pairs: group.f_remove_link(link)
Deletes several links from the hard disk. Links can be passed as a string ``'groupA.groupB.linkA'`` or as a tuple containing the node from which the link should be removed and the name of the link ``(groupWithLink, 'linkA')``.
10,847
def check_labels(self): for entry in self.labels: self.check_is_declared(entry.name, entry.lineno, CLASS.label)
Checks if all the labels has been declared
10,848
def supports(cls, template_file=None): if anytemplate.compat.IS_PYTHON_3: cls._priority = 99 return False return super(Engine, cls).supports(template_file=template_file)
:return: Whether the engine can process given template file or not.
10,849
def threshold_monitor_hidden_threshold_monitor_sfp_policy_area_threshold_high_threshold(self, **kwargs): config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") sfp = ET.SubElement(threshold_monitor, "sfp") policy = ET.SubElement(sfp, "policy") policy_name_key = ET.SubElement(policy, "policy_name") policy_name_key.text = kwargs.pop() area = ET.SubElement(policy, "area") type_key = ET.SubElement(area, "type") type_key.text = kwargs.pop() area_value_key = ET.SubElement(area, "area_value") area_value_key.text = kwargs.pop() threshold = ET.SubElement(area, "threshold") high_threshold = ET.SubElement(threshold, "high-threshold") high_threshold.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
10,850
def log_normalize(a, axis=None): with np.errstate(under="ignore"): a_lse = logsumexp(a, axis) a -= a_lse[:, np.newaxis]
Normalizes the input array so that the exponent of the sum is 1. Parameters ---------- a : array Non-normalized input data. axis : int Dimension along which normalization is performed. Notes ----- Modifies the input **inplace**.
10,851
def _save_model(self, steps=0): for brain_name in self.trainers.keys(): self.trainers[brain_name].save_model() self.logger.info()
Saves current model to checkpoint folder. :param steps: Current number of steps in training process. :param saver: Tensorflow saver for session.
10,852
def compress_table(condition, tbl, axis=None, out=None, blen=None, storage=None, create=, **kwargs): if axis is not None and axis != 0: raise NotImplementedError() if out is not None: raise NotImplementedError() storage = _util.get_storage(storage) names, columns = _util.check_table_like(tbl) blen = _util.get_blen_table(tbl, blen) _util.check_equal_length(columns[0], condition) length = len(columns[0]) nnz = count_nonzero(condition) out = None for i in range(0, length, blen): j = min(i+blen, length) bcond = condition[i:j] if np.any(bcond): bcolumns = [c[i:j] for c in columns] res = [np.compress(bcond, c, axis=0) for c in bcolumns] if out is None: out = getattr(storage, create)(res, names=names, expectedlen=nnz, **kwargs) else: out.append(res) return out
Return selected rows of a table.
10,853
def read_plain_int64(file_obj, count): return struct.unpack("<{}q".format(count).encode("utf-8"), file_obj.read(8 * count))
Read `count` 64-bit ints using the plain encoding.
10,854
def _is_duplicate_record(self, rtype, name, content): records = self._list_records(rtype, name, content) is_duplicate = len(records) >= 1 if is_duplicate: LOGGER.info(, rtype, name, content) return is_duplicate
Check if DNS entry already exists.
10,855
def estimate_parameters(self, max_dist_kb, size_bin_kb, display_graph): logger.info("estimation of the parameters of the model") self.bins = np.arange( size_bin_kb, max_dist_kb + size_bin_kb, size_bin_kb ) self.mean_contacts = np.zeros_like(self.bins, dtype=np.float32) self.dict_collect = dict() self.gpu_vect_frags.copy_from_gpu() epsi = self.mean_value_trans for k in self.bins: self.dict_collect[k] = [] for i in range(0, 2000): start = self.sparse_matrix.indptr[i] end = self.sparse_matrix.indptr[i + 1] id_j = self.sparse_matrix.indices[start:end] data = self.sparse_matrix.data[start:end] info_i = self.np_sub_frags_2_frags[i] init_id_fi = info_i[0] id_c_i = self.S_o_A_frags["id_c"][init_id_fi] s_i = ( self.S_o_A_frags["start_bp"][init_id_fi] / 1000.0 + self.np_sub_frags_2_frags[i][1] ) len_kb_c_i = self.S_o_A_frags["l_cont_bp"][init_id_fi] / 1000 local_bins = np.arange( size_bin_kb, min(len_kb_c_i, max_dist_kb) + size_bin_kb, size_bin_kb, ) local_storage = np.zeros_like(local_bins, dtype=np.int32) for fj, dj in zip(id_j, data): info_j = self.np_sub_frags_2_frags[fj] init_id_fj = info_j[0] id_c_j = self.S_o_A_frags["id_c"][init_id_fj] if id_c_i == id_c_j: s_j = ( self.S_o_A_frags["start_bp"][init_id_fj] / 1000.0 + self.np_sub_frags_2_frags[fj][1] ) d = np.abs(s_i - s_j) if d < max_dist_kb: id_bin = d / size_bin_kb local_storage[id_bin] += dj for my_bin, val in zip(local_bins, local_storage): self.dict_collect[my_bin].append(val) for id_bin in range(0, len(self.bins)): k = self.bins[id_bin] self.mean_contacts[id_bin] = np.mean(self.dict_collect[k]) for id_bin in range(0, len(self.bins)): k = self.bins[id_bin] tmp = np.mean(self.dict_collect[k]) if np.isnan(tmp) or tmp == 0: self.mean_contacts[id_bin] = np.nan else: self.mean_contacts[id_bin] = tmp + epsi self.mean_contacts_upd = [] self.bins_upd = [] for count, ele in zip(self.mean_contacts, self.bins): if not np.isnan(count): self.bins_upd.append(ele) self.mean_contacts_upd.append(count) self.bins_upd = np.array(self.bins_upd) self.mean_contacts_upd = np.array(self.mean_contacts_upd) p, self.y_estim = nuis.estimate_param_hic( self.mean_contacts_upd, self.bins_upd ) fit_param = p.x logger.info("mean value trans = {}".format(self.mean_value_trans)) estim_max_dist = nuis.estimate_max_dist_intra( fit_param, self.mean_value_trans ) logger.info("max distance cis/trans = {}".format(estim_max_dist)) self.param_simu = self.setup_model_parameters( fit_param, estim_max_dist ) self.gpu_param_simu = cuda.mem_alloc(self.param_simu.nbytes) self.gpu_param_simu_test = cuda.mem_alloc(self.param_simu.nbytes) cuda.memcpy_htod(self.gpu_param_simu, self.param_simu) if display_graph: plt.loglog(self.bins_upd, self.mean_contacts_upd, "-*b") plt.loglog(self.bins_upd, self.y_estim, "-*r") plt.xlabel("genomic distance (kb)") plt.ylabel("frequency of contact") plt.legend(["obs", "fit"]) plt.show()
estimation by least square optimization of Rippe parameters on the experimental data :param max_dist_kb: :param size_bin_kb:
10,856
def reverse_mapping(mapping): keys, values = zip(*mapping.items()) return dict(zip(values, keys))
For every key, value pair, return the mapping for the equivalent value, key pair >>> reverse_mapping({'a': 'b'}) == {'b': 'a'} True
10,857
def set_itunes_element(self): self.set_itunes_author_name() self.set_itunes_block() self.set_itunes_closed_captioned() self.set_itunes_duration() self.set_itunes_explicit() self.set_itune_image() self.set_itunes_order() self.set_itunes_subtitle() self.set_itunes_summary()
Set each of the itunes elements.
10,858
def install_plugin(self, dir, entry_script=None): self.runtimepath.append(dir) if entry_script is not None: self.command(.format(entry_script), False)
Install *Vim* plugin. :param string dir: the root directory contains *Vim* script :param string entry_script: path to the initializing script
10,859
def active_brokers(self): return { broker for broker in six.itervalues(self.brokers) if not broker.inactive and not broker.decommissioned }
Set of brokers that are not inactive or decommissioned.
10,860
def is_equal(self, other): other = IntervalCell.coerce(other) return other.low == self.low and other.high == self.high
If two intervals are the same
10,861
def read_data(self, **kwargs): trigger_id = kwargs.get() data = list() kwargs[] = kwargs[] = super(ServiceTumblr, self).read_data(**kwargs) cache.set( + str(trigger_id), data) return data
get the data from the service as the pocket service does not have any date in its API linked to the note, add the triggered date to the dict data thus the service will be triggered when data will be found :param kwargs: contain keyword args : trigger_id at least :type kwargs: dict :rtype: list
10,862
def honeypot_exempt(view_func): def wrapped(*args, **kwargs): return view_func(*args, **kwargs) wrapped.honeypot_exempt = True return wraps(view_func, assigned=available_attrs(view_func))(wrapped)
Mark view as exempt from honeypot validation
10,863
def get_feature_state_for_scope(self, feature_id, user_scope, scope_name, scope_value): route_values = {} if feature_id is not None: route_values[] = self._serialize.url(, feature_id, ) if user_scope is not None: route_values[] = self._serialize.url(, user_scope, ) if scope_name is not None: route_values[] = self._serialize.url(, scope_name, ) if scope_value is not None: route_values[] = self._serialize.url(, scope_value, ) response = self._send(http_method=, location_id=, version=, route_values=route_values) return self._deserialize(, response)
GetFeatureStateForScope. [Preview API] Get the state of the specified feature for the given named scope :param str feature_id: Contribution id of the feature :param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users. :param str scope_name: Scope at which to get the feature setting for (e.g. "project" or "team") :param str scope_value: Value of the scope (e.g. the project or team id) :rtype: :class:`<ContributedFeatureState> <azure.devops.v5_0.feature_management.models.ContributedFeatureState>`
10,864
def is_child_of_bin(self, id_, bin_id): if self._catalog_session is not None: return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=bin_id) return self._hierarchy_session.is_child(id_=bin_id, child_id=id_)
Tests if a bin is a direct child of another. arg: id (osid.id.Id): an ``Id`` arg: bin_id (osid.id.Id): the ``Id`` of a bin return: (boolean) - ``true`` if the ``id`` is a child of ``bin_id,`` ``false`` otherwise raise: NotFound - ``bin_id`` is not found raise: NullArgument - ``id`` or ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
10,865
def create_translation_field(translated_field, language): cls_name = translated_field.__class__.__name__ if not isinstance(translated_field, tuple(SUPPORTED_FIELDS.keys())): raise ImproperlyConfigured("%s is not supported by Linguist." % cls_name) translation_class = field_factory(translated_field.__class__) kwargs = get_translation_class_kwargs(translated_field.__class__) return translation_class( translated_field=translated_field, language=language, **kwargs )
Takes the original field, a given language, a decider model and return a Field class for model.
10,866
def deepish_copy(org): out = dict().fromkeys(org) for k, v in org.items(): if isinstance(v, dict): out[k] = deepish_copy(v) else: try: out[k] = v.copy() except AttributeError: try: out[k] = v[:] except TypeError: out[k] = v return out
Improved speed deep copy for dictionaries of simple python types. Thanks to Gregg Lind: http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
10,867
def _generate_token(self, length=32): return .join(choice(ascii_letters + digits) for x in range(length))
_generate_token - internal function for generating randomized alphanumberic strings of a given length
10,868
def bulk_exports(self): if self._bulk_exports is None: self._bulk_exports = BulkExports(self) return self._bulk_exports
:returns: Version bulk_exports of preview :rtype: twilio.rest.preview.bulk_exports.BulkExports
10,869
def unmajority(p, a, b, c): p.ccx(a, b, c) p.cx(c, a) p.cx(a, b)
Unmajority gate.
10,870
def to_xml(self, opts = defaultdict(lambda: None)): if not self.launch_url or not self.secure_launch_url: raise InvalidLTIConfigError() root = etree.Element(, attrib = { %(NSMAP[], ): , : }, nsmap = NSMAP) for key in [, , , ]: option = etree.SubElement(root, %(NSMAP[], key)) option.text = getattr(self, key) vendor_keys = [, , , ] if any( + key for key in vendor_keys) or\ self.vendor_contact_email: vendor_node = etree.SubElement(root, %(NSMAP[], )) for key in vendor_keys: if getattr(self, + key) != None: v_node = etree.SubElement(vendor_node, %(NSMAP[], key)) v_node.text = getattr(self, + key) if getattr(self, ): v_node = etree.SubElement(vendor_node, %(NSMAP[], )) c_name = etree.SubElement(v_node, %(NSMAP[], )) c_name.text = self.vendor_contact_name c_email = etree.SubElement(v_node, %(NSMAP[], )) c_email.text = self.vendor_contact_email if len(self.custom_params) != 0: custom_node = etree.SubElement(root, %(NSMAP[], )) for (key, val) in sorted(self.custom_params.items()): c_node = etree.SubElement(custom_node, %(NSMAP[], )) c_node.set(, key) c_node.text = val if len(self.extensions) != 0: for (key, params) in sorted(self.extensions.items()): extension_node = etree.SubElement(root, %(NSMAP[], ), platform = key) self.recursive_options(extension_node,params) if getattr(self, ): identifierref = etree.SubElement(root, , identifierref = self.cartridge_bundle) if getattr(self, ): identifierref = etree.SubElement(root, , identifierref = self.cartridge_icon) return + etree.tostring(root)
Generate XML from the current settings.
10,871
def git_list_refs(repo_dir): command = [, , , ] raw = execute_git_command(command, repo_dir=repo_dir).splitlines() output = [l.strip() for l in raw if l.strip()] return {ref: commit_hash for commit_hash, ref in [l.split(None, 1) for l in output]}
List references available in the local repo with commit ids. This is similar to ls-remote, but shows the *local* refs. Return format: .. code-block:: python {<ref1>: <commit_hash1>, <ref2>: <commit_hash2>, ..., <refN>: <commit_hashN>, }
10,872
def iter(self, count=0, func=sum): while True: yield self.roll(count, func)
Iterator of infinite dice rolls. :param count: [0] Return list of ``count`` sums :param func: [sum] Apply func to list of individual die rolls func([])
10,873
def add(self, field, data_type=None, nullable=True, metadata=None): if isinstance(field, StructField): self.fields.append(field) self.names.append(field.name) else: if isinstance(field, str) and data_type is None: raise ValueError("Must specify DataType if passing name of struct_field to create.") if isinstance(data_type, str): data_type_f = _parse_datatype_json_value(data_type) else: data_type_f = data_type self.fields.append(StructField(field, data_type_f, nullable, metadata)) self.names.append(field) self._needConversion = [f.needConversion() for f in self] self._needSerializeAnyField = any(self._needConversion) return self
Construct a StructType by adding new elements to it to define the schema. The method accepts either: a) A single parameter which is a StructField object. b) Between 2 and 4 parameters as (name, data_type, nullable (optional), metadata(optional). The data_type parameter may be either a String or a DataType object. >>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None) >>> struct2 = StructType([StructField("f1", StringType(), True), \\ ... StructField("f2", StringType(), True, None)]) >>> struct1 == struct2 True >>> struct1 = StructType().add(StructField("f1", StringType(), True)) >>> struct2 = StructType([StructField("f1", StringType(), True)]) >>> struct1 == struct2 True >>> struct1 = StructType().add("f1", "string", True) >>> struct2 = StructType([StructField("f1", StringType(), True)]) >>> struct1 == struct2 True :param field: Either the name of the field or a StructField object :param data_type: If present, the DataType of the StructField to create :param nullable: Whether the field to add should be nullable (default True) :param metadata: Any additional metadata (default None) :return: a new updated StructType
10,874
def _scheduleUpgrade(self, ev_data: UpgradeLogData, failTimeout) -> None: logger.info( "{}{}' to version {} " "has been scheduled on {}" .format(ev_data.pkg_name, self.nodeName, ev_data.version, ev_data.when)) self._actionLog.append_scheduled(ev_data) callAgent = partial(self._callUpgradeAgent, ev_data, failTimeout) delay = 0 if now < ev_data.when: delay = (ev_data.when - now).total_seconds() self.scheduledAction = ev_data self._schedule(callAgent, delay)
Schedules node upgrade to a newer version :param ev_data: upgrade event parameters
10,875
def clean_gff(gff, cleaned, add_chr=False, chroms_to_ignore=None, featuretypes_to_ignore=None): logger.info("Cleaning GFF") chroms_to_ignore = chroms_to_ignore or [] featuretypes_to_ignore = featuretypes_to_ignore or [] with open(cleaned, ) as fout: for i in gffutils.iterators.DataIterator(gff): if add_chr: i.chrom = "chr" + i.chrom if i.chrom in chroms_to_ignore: continue if i.featuretype in featuretypes_to_ignore: continue fout.write(str(i) + ) return cleaned
Cleans a GFF file by removing features on unwanted chromosomes and of unwanted featuretypes. Optionally adds "chr" to chrom names.
10,876
def get_params(url, ignore_empty=False): try: params_start_index = url.index() except ValueError: params_start_index = 0 params_string = url[params_start_index + 1:] params_dict = {} for pair in params_string.split(): if not pair: continue splitted = pair.split() param, value = splitted if not value and ignore_empty: continue value = int(value) if value.isdigit() else value params_dict[param] = value return params_dict
Static method that parses a given `url` and retrieves `url`'s parameters. Could also ignore empty value parameters. Handles parameters-only urls as `q=banana&peel=false`. :param str url: url to parse :param bool ignore_empty: ignore empty value parameter or not :return: dictionary of params and their values :rtype: dict
10,877
def add_term_facet(self, *args, **kwargs): self.facets.append(TermFacet(*args, **kwargs))
Add a term factory facet
10,878
def wait_for_servers(session, servers): nclient = nova.Client(NOVA_VERSION, session=session, region_name=os.environ[]) while True: deployed = [] undeployed = [] for server in servers: c = nclient.servers.get(server.id) if c.addresses != {} and c.status == : deployed.append(server) if c.status == : undeployed.append(server) logger.info("[nova]: Polling the Deployment") logger.info("[nova]: %s deployed servers" % len(deployed)) logger.info("[nova]: %s undeployed servers" % len(undeployed)) if len(deployed) + len(undeployed) >= len(servers): break time.sleep(3) return deployed, undeployed
Wait for the servers to be ready. Note(msimonin): we don't garantee the SSH connection to be ready.
10,879
def to_html(self): if self.text is None: return else: return % ( self.html_attributes(), self.html_icon(), self.text.to_html())
Render a Paragraph MessageElement as html :returns: The html representation of the Paragraph MessageElement
10,880
def _migrate_db_pre010(self, dbname, newslab): dbnamedbname donekey = f if self.metadict.get(donekey, False): return if not self.layrslab.dbexists(dbname): self.metadict.set(donekey, True) return False oldslab = self.layrslab olddb = oldslab.initdb(dbname) entries = oldslab.stat(olddb)[] if not entries: self.metadict.set(donekey, True) return False if newslab.dbexists(dbname): logger.warning() newslab.dropdb(dbname) logger.info() logger.info(, dbname, entries) def progfunc(count): logger.info(, count, entries, count / entries * 100) oldslab.copydb(olddb, newslab, destdbname=dbname, progresscb=progfunc) logger.info(, dbname) oldslab.dropdb(dbname) logger.info(, dbname) self.metadict.set(donekey, True) return True
Check for any pre-010 entries in 'dbname' in my slab and migrate those to the new slab. Once complete, drop the database from me with the name 'dbname' Returns (bool): True if a migration occurred, else False
10,881
def get_version_info(): from astropy import __version__ astropy_version = __version__ from photutils import __version__ photutils_version = __version__ return .format(astropy_version, photutils_version)
Return astropy and photutils versions. Returns ------- result : str The astropy and photutils versions.
10,882
def datetime_to_ns(then): if then == datetime(1970, 1, 1, 0, 0): return now = datetime.utcnow() delta = now - then seconds = delta.total_seconds()
Transform a :any:`datetime.datetime` into a NationStates-style string. For example "6 days ago", "105 minutes ago", etc.
10,883
def _apply_dvs_config(config_spec, config_dict): if config_dict.get(): config_spec.name = config_dict[] if config_dict.get() or config_dict.get(): if not config_spec.contact: config_spec.contact = vim.DVSContactInfo() config_spec.contact.contact = config_dict.get() config_spec.contact.name = config_dict.get() if config_dict.get(): config_spec.description = config_dict.get() if config_dict.get(): config_spec.maxMtu = config_dict.get() if config_dict.get(): config_spec.lacpApiVersion = config_dict.get() if config_dict.get(): config_spec.networkResourceControlVersion = \ config_dict.get() if config_dict.get(): if not config_spec.uplinkPortPolicy or \ not isinstance(config_spec.uplinkPortPolicy, vim.DVSNameArrayUplinkPortPolicy): config_spec.uplinkPortPolicy = \ vim.DVSNameArrayUplinkPortPolicy() config_spec.uplinkPortPolicy.uplinkPortName = \ config_dict[]
Applies the values of the config dict dictionary to a config spec (vim.VMwareDVSConfigSpec)
10,884
def validate_enum_attribute(fully_qualified_name: str, spec: Dict[str, Any], attribute: str, candidates: Set[Union[str, int, float]]) -> Optional[InvalidValueError]: if attribute not in spec: return if spec[attribute] not in candidates: return InvalidValueError(fully_qualified_name, spec, attribute, candidates)
Validates to ensure that the value of an attribute lies within an allowed set of candidates
10,885
def _qmed_from_pot_records(self): pot_dataset = self.catchment.pot_dataset if not pot_dataset: raise InsufficientDataError("POT dataset must be set for catchment {} to estimate QMED from POT data." .format(self.catchment.id)) complete_year_records, length = self._complete_pot_years(pot_dataset) if length < 1: raise InsufficientDataError("Insufficient POT flow records available for catchment {}." .format(self.catchment.id)) position = 0.790715789 * length + 0.539684211 i = floor(position) w = 1 + i - position flows = [record.flow for record in complete_year_records] flows.sort(reverse=True) return w * flows[i - 1] + (1 - w) * flows[i]
Return QMED estimate based on peaks-over-threshold (POT) records. Methodology source: FEH, Vol. 3, pp. 77-78 :return: QMED in m³/s :rtype: float
10,886
def _vowelinstem(self, stem): for i in range(len(stem)): if not self._cons(stem, i): return True return False
vowelinstem(stem) is TRUE <=> stem contains a vowel
10,887
def _write_adminfile(kwargs): email = kwargs.get(, ) instance = kwargs.get(, ) partial = kwargs.get(, ) runlevel = kwargs.get(, ) idepend = kwargs.get(, ) rdepend = kwargs.get(, ) space = kwargs.get(, ) setuid = kwargs.get(, ) conflict = kwargs.get(, ) action = kwargs.get(, ) basedir = kwargs.get(, ) adminfile = salt.utils.files.mkstemp(prefix="salt-") def _write_line(fp_, line): fp_.write(salt.utils.stringutils.to_str(line)) with salt.utils.files.fopen(adminfile, ) as fp_: _write_line(fp_, .format(email)) _write_line(fp_, .format(instance)) _write_line(fp_, .format(partial)) _write_line(fp_, .format(runlevel)) _write_line(fp_, .format(idepend)) _write_line(fp_, .format(rdepend)) _write_line(fp_, .format(space)) _write_line(fp_, .format(setuid)) _write_line(fp_, .format(conflict)) _write_line(fp_, .format(action)) _write_line(fp_, .format(basedir)) return adminfile
Create a temporary adminfile based on the keyword arguments passed to pkg.install.
10,888
def job_get_log(object_id, input_params={}, always_retry=False, **kwargs): return DXHTTPRequest( % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /job-xxxx/getLog API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fjob-xxxx%2FgetLog
10,889
def visit_Dict(self, node: AST, dfltChaining: bool = True) -> str: items = (.join((self.visit(key), self.visit(value))) for key, value in zip(node.keys, node.values)) return f"{{{.join(items)}}}"
Return dict representation of `node`s elements.
10,890
def locality_preserving_projections(self, coordinates, num_dims=None): X = np.atleast_2d(coordinates) L = self.laplacian(normed=True) u,s,_ = np.linalg.svd(X.T.dot(X)) Fplus = np.linalg.pinv(u * np.sqrt(s)) n, d = X.shape if n >= d: T = Fplus.dot(X.T.dot(L.dot(X))).dot(Fplus.T) else: T = Fplus.dot(X.T).dot(L.dot(X.dot(Fplus.T))) L = 0.5*(T+T.T) return _null_space(L, num_vecs=num_dims, overwrite=True)
Locality Preserving Projections (LPP, linearized Laplacian Eigenmaps).
10,891
def _init_go2ntpresent(go_ntsets, go_all, gosubdag): go2ntpresent = {} ntobj = namedtuple(, " ".join(nt.hdr for nt in go_ntsets)) for goid_all in go_all: present_true = [goid_all in nt.go_set for nt in go_ntsets] present_str = [ if tf else for tf in present_true] go2ntpresent[goid_all] = ntobj._make(present_str) goids_ancestors = set(gosubdag.go2obj).difference(go2ntpresent) assert not goids_ancestors.intersection(go_all) strmark = [ for _ in range(len(go_ntsets))] for goid in goids_ancestors: go2ntpresent[goid] = ntobj._make(strmark) return go2ntpresent
Mark all GO IDs with an X if present in the user GO list.
10,892
def _create_data_files_directory(symlink=False): current_directory = os.path.abspath(os.path.dirname(__file__)) etc_kytos = os.path.join(BASE_ENV, ETC_KYTOS) if not os.path.exists(etc_kytos): os.makedirs(etc_kytos) src = os.path.join(current_directory, KYTOS_SKEL_PATH) dst = os.path.join(BASE_ENV, KYTOS_SKEL_PATH) if os.path.exists(dst): if not os.listdir(dst): os.rmdir(dst) shutil.copytree(src, dst) else: if symlink: os.symlink(src, dst) else: shutil.copytree(src, dst)
Install data_files in the /etc directory.
10,893
def get_jids(): with _get_serv(ret=None, commit=True) as cur: sql = cur.execute(sql) data = cur.fetchall() ret = {} for jid, load in data: ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load)) return ret
Return a list of all job ids
10,894
def AddBlob(self, blob_hash, length, chunk_number): if len(blob_hash.AsBytes()) != self._HASH_SIZE: raise ValueError("Hash doesnre adding a new blob, we should increase the size. If were adding a new one and if not self.ChunkExists(chunk_number): if chunk_number > self.last_chunk: self.last_chunk = chunk_number self._dirty = True index_urn = self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_number) with aff4.FACTORY.Create( index_urn, aff4.AFF4MemoryStream, token=self.token) as fd: fd.write(blob_hash.AsBytes()) if chunk_number in self.chunk_cache: self.chunk_cache.Pop(chunk_number)
Add another blob to this image using its hash.
10,895
def change_tz(cal, new_timezone, default, utc_only=False, utc_tz=icalendar.utc): for vevent in getattr(cal, , []): start = getattr(vevent, , None) end = getattr(vevent, , None) for node in (start, end): if node: dt = node.value if (isinstance(dt, datetime) and (not utc_only or dt.tzinfo == utc_tz)): if dt.tzinfo is None: dt = dt.replace(tzinfo = default) node.value = dt.astimezone(new_timezone)
Change the timezone of the specified component. Args: cal (Component): the component to change new_timezone (tzinfo): the timezone to change to default (tzinfo): a timezone to assume if the dtstart or dtend in cal doesn't have an existing timezone utc_only (bool): only convert dates that are in utc utc_tz (tzinfo): the tzinfo to compare to for UTC when processing utc_only=True
10,896
def determine_band_channel(kal_out): band = "" channel = "" tgt_freq = "" while band == "": for line in kal_out.splitlines(): if "Using " in line and " channel " in line: band = str(line.split()[1]) channel = str(line.split()[3]) tgt_freq = str(line.split()[4]).replace( "(", "").replace(")", "") if band == "": band = None return(band, channel, tgt_freq)
Return band, channel, target frequency from kal output.
10,897
def rolling_window(array, axis, window, center, fill_value): if isinstance(array, dask_array_type): return dask_array_ops.rolling_window( array, axis, window, center, fill_value) else: return nputils.rolling_window( array, axis, window, center, fill_value)
Make an ndarray with a rolling window of axis-th dimension. The rolling dimension will be placed at the last dimension.
10,898
def _ensure_programmer_executable(): updater_executable = shutil.which(, mode=os.F_OK) os.chmod(updater_executable, 0o777)
Find the lpc21isp executable and ensure it is executable
10,899
def tarball_files(work_dir, tar_name, uuid=None, files=None): with tarfile.open(os.path.join(work_dir, tar_name), ) as f_out: for fname in files: if uuid: f_out.add(os.path.join(work_dir, fname), arcname=uuid + + fname) else: f_out.add(os.path.join(work_dir, fname), arcname=fname)
Tars a group of files together into a tarball work_dir: str Current Working Directory tar_name: str Name of tarball uuid: str UUID to stamp files with files: str(s) List of filenames to place in the tarball from working directory