Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
23,800
def rgb_to_hsl(r, g, b): r = r or 0 g = g or 0 b = b or 0 r /= 255 g /= 255 b /= 255 max_ = max((r, g, b)) min_ = min((r, g, b)) d = max_ - min_ if not d: h = 0 elif r is max_: h = 60 * (g - b) / d elif g is max_: h = 60 * (b - r) / d + 120 else: h = 60 * (r - g) / d + 240 l = .5 * (max_ + min_) if not d: s = 0 elif l < 0.5: s = .5 * d / l else: s = .5 * d / (1 - l) return tuple(map(normalize_float, (h % 360, s * 100, l * 100)))
Convert a color in r, g, b to a color in h, s, l
23,801
def str_digit_to_int(chr): if chr in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"): n = int(chr) else: n = ord(chr) if n < 91: n -= 55 else: n -= 61 return n
Converts a string character to a decimal number. Where "A"->10, "B"->11, "C"->12, ...etc Args: chr(str): A single character in the form of a string. Returns: The integer value of the input string digit.
23,802
def _reshape_m_vecs(self): lst = [] for n in xrange(0, self.nmax + 1): mlst = [] if n <= self.mmax: nn = n else: nn = self.mmax for m in xrange(-nn, nn + 1): mlst.append(self.__getitem__((n, m))) lst.append(mlst) return lst
return list of arrays, each array represents a different n mode
23,803
def parse_args(args): parser = argparse.ArgumentParser( description="Build html reveal.js slides from markdown in docs/ dir") parser.add_argument( , , help=, action=) parser.add_argument( , help=, action=, version=.format(ver=__version__)) parser.add_argument( , , help=, default=BLOG_PATH) parser.add_argument( , , help=, default=DOCS_PATH) parser.add_argument( , , help=, default=) return parser.parse_args(args)
Parse command line parameters :param args: command line parameters as list of strings :return: command line parameters as :obj:`argparse.Namespace`
23,804
def __parse_precipfc_data(data, timeframe): result = {AVERAGE: None, TOTAL: None, TIMEFRAME: None} log.debug("Precipitation data: %s", data) lines = data.splitlines() index = 1 totalrain = 0 numberoflines = 0 nrlines = min(len(lines), round(float(timeframe) / 5) + 1) while index < nrlines: line = lines[index] log.debug("__parse_precipfc_data: line: %s", line) (val, key) = line.split("|") mmu = 10**(float((int(val) - 109)) / 32) totalrain = totalrain + float(mmu) numberoflines = numberoflines + 1 index += 1 if numberoflines > 0: result[AVERAGE] = round((totalrain / numberoflines), 2) else: result[AVERAGE] = 0 result[TOTAL] = round(totalrain / 12, 2) result[TIMEFRAME] = timeframe return result
Parse the forecasted precipitation data.
23,805
def check_api_error(api_response): print(api_response) if type(api_response) == dict and in api_response and api_response[] <> 200: print("Server response code: %s" % api_response[]) print("Server response: %s" % api_response) raise exceptions.HTTPError(, response=api_response) if type(api_response) == dict and (api_response.get() == ): if in api_response.get(): raise DatabaseError(message=, error=api_response) if ( in api_response.get() and in api_response.get()): msg = raise ProjectAlreadyExists(message=msg, error=api_response) if in api_response.get(): raise ProjectNotFound(message=, error=api_response) if in api_response.get(): raise TaskNotFound(message=, error=api_response) else: print("Server response: %s" % api_response) raise exceptions.HTTPError(, response=api_response)
Check if returned API response contains an error.
23,806
def checkout(self): tgt_ref = self.get_checkout_target() try: head_sha = self.repo.rev_parse().hexsha except Exception: head_sha = None return self.check_root() try: with self.gen_lock(lock_type=): self.repo.git.checkout(checkout_ref) log.debug( %s\, self.role, self.id, checkout_ref ) except GitLockError as exc: if exc.errno == errno.EEXIST: raise GitLockError( exc.errno, {1}\ .format(self.role, self.id) ) else: log.error( %s\, exc.errno, self.role, self.id ) return None except Exception: continue return self.check_root() log.error( %s\ , tgt_ref, self.role, self.id ) return None
Checkout the configured branch/tag. We catch an "Exception" class here instead of a specific exception class because the exceptions raised by GitPython when running these functions vary in different versions of GitPython.
23,807
def _sub16(ins): op1, op2 = tuple(ins.quad[2:4]) if is_int(op2): op = int16(op2) output = _16bit_oper(op1) if op == 0: output.append() return output if op < 4: output.extend([] * op) output.append() return output if op > 65531: output.extend([] * (0x10000 - op)) output.append() return output output.append( % op) output.append() output.append() return output if op2[0] == : rev = True op1, op2 = op2, op1 else: rev = False output = _16bit_oper(op1, op2, rev) output.append() output.append() output.append() return output
Pops last 2 words from the stack and subtract them. Then push the result onto the stack. Top of the stack is subtracted Top -1 Optimizations: * If 2nd op is ZERO, then do NOTHING: A - 0 = A * If any of the operands is < 4, then DEC is used * If any of the operands is > 65531 (-4..-1), then INC is used
23,808
def send(self, content=None, *, wait=False, username=None, avatar_url=None, tts=False, file=None, files=None, embed=None, embeds=None): payload = {} if files is not None and file is not None: raise InvalidArgument() if embeds is not None and embed is not None: raise InvalidArgument() if embeds is not None: if len(embeds) > 10: raise InvalidArgument() payload[] = [e.to_dict() for e in embeds] if embed is not None: payload[] = [embed.to_dict()] if content is not None: payload[] = str(content) payload[] = tts if avatar_url: payload[] = str(avatar_url) if username: payload[] = username return self._adapter.execute_webhook(wait=wait, file=file, files=files, payload=payload)
|maybecoro| Sends a message using the webhook. If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is not a coroutine. The content must be a type that can convert to a string through ``str(content)``. To upload a single file, the ``file`` parameter should be used with a single :class:`File` object. If the ``embed`` parameter is provided, it must be of type :class:`Embed` and it must be a rich embed type. You cannot mix the ``embed`` parameter with the ``embeds`` parameter, which must be a :class:`list` of :class:`Embed` objects to send. Parameters ------------ content: :class:`str` The content of the message to send. wait: :class:`bool` Whether the server should wait before sending a response. This essentially means that the return type of this function changes from ``None`` to a :class:`Message` if set to ``True``. username: :class:`str` The username to send with this message. If no username is provided then the default username for the webhook is used. avatar_url: Union[:class:`str`, :class:`Asset`] The avatar URL to send with this message. If no avatar URL is provided then the default avatar for the webhook is used. tts: :class:`bool` Indicates if the message should be sent using text-to-speech. file: :class:`File` The file to upload. This cannot be mixed with ``files`` parameter. files: List[:class:`File`] A list of files to send with the content. This cannot be mixed with the ``file`` parameter. embed: :class:`Embed` The rich embed for the content to send. This cannot be mixed with ``embeds`` parameter. embeds: List[:class:`Embed`] A list of embeds to send with the content. Maximum of 10. This cannot be mixed with the ``embed`` parameter. Raises -------- HTTPException Sending the message failed. NotFound This webhook was not found. Forbidden The authorization token for the webhook is incorrect. InvalidArgument You specified both ``embed`` and ``embeds`` or the length of ``embeds`` was invalid. Returns --------- Optional[:class:`Message`] The message that was sent.
23,809
def build(ctx, less=False, docs=False, js=False, force=False): specified = any([less, docs, js]) buildall = not specified if buildall or less: less_fname = ctx.pkg.source_less / ctx.pkg.name + if less_fname.exists(): lessc.LessRule( ctx, src=, dst=, force=force ) elif less: print("WARNING: build --less specified, but no file at:", less_fname) if buildall or docs: if WARN_ABOUT_SETTINGS: warnings.warn( "autodoc might need a dummy settings file in the root of " "your package. Since it runs in a separate process you cannot" "use settings.configure()" ) doctools.build(ctx, force=force) if buildall or js: build_js(ctx, force) if HAVE_SETTINGS and (force or changed(ctx.pkg.django_static)): collectstatic(ctx, DJANGO_SETTINGS_MODULE)
Build everything and collectstatic.
23,810
def parse_string(self): word = if self.prior_delim: delim = self.prior_delim self.prior_delim = None else: delim = self.char word += self.char self.update_chars() while True: if self.char == delim: self.update_chars() if self.char == delim: word += 2 * delim self.update_chars() else: word += delim break elif self.char == : self.prior_delim = delim break else: word += self.char self.update_chars() return word
Tokenize a Fortran string.
23,811
def query(params): r = requests_get(QUERY_URL, verify=True) return HospitalCollection(r.json(), params)
`params` is a city name or a city name + hospital name. CLI: 1. query all putian hospitals in a city: $ iquery -p 南京 +------+ | 南京 | +------+ |... | +------+ |... | +------+ ... 2. query if the hospital in the city is putian series, you can only input hospital's short name: $ iquery -p 南京 曙光 +------------+ |南京曙光医院| +------------+ | True | +------------+
23,812
def get_summary_and_description(self): summary = self.get_summary() _, description = super().get_summary_and_description() return summary, description
Compat: drf-yasg 1.12+
23,813
def simplify_recursive(typ): if isinstance(typ, UnionType): return combine_types(typ.items) elif isinstance(typ, ClassType): simplified = ClassType(typ.name, [simplify_recursive(arg) for arg in typ.args]) args = simplified.args if (simplified.name == and len(args) == 2 and isinstance(args[0], ClassType) and args[0].name in (, ) and isinstance(args[1], UnionType) and not is_optional(args[1])): return simplified elif isinstance(typ, TupleType): return TupleType([simplify_recursive(item) for item in typ.items]) return typ
Simplify all components of a type.
23,814
def get_port_channel_detail_output_lacp_aggr_member_interface_name(self, **kwargs): config = ET.Element("config") get_port_channel_detail = ET.Element("get_port_channel_detail") config = get_port_channel_detail output = ET.SubElement(get_port_channel_detail, "output") lacp = ET.SubElement(output, "lacp") aggr_member = ET.SubElement(lacp, "aggr-member") interface_name = ET.SubElement(aggr_member, "interface-name") interface_name.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
23,815
def _load_ini(path): cfg = RawConfigParser() with codecs.open(path, mode="r", encoding="utf-8") as f: try: cfg.read_file(f) except AttributeError: cfg.readfp(f) return cfg
Load an INI file from *path*.
23,816
def particle_clusters( particle_locations, particle_weights=None, eps=0.5, min_particles=5, metric=, weighted=False, w_pow=0.5, quiet=True ): if weighted == True and particle_weights is None: raise ValueError("Weights must be specified for weighted clustering.") new_weights = np.empty(particle_weights.shape) new_locs = np.empty(particle_locations.shape) if weighted: M = sklearn.metrics.pairwise.pairwise_distances(particle_locations, metric=metric) M = metrics.weighted_pairwise_distances(M, particle_weights, w_pow=w_pow) clusterer = sklearn.cluster.DBSCAN( min_samples=min_particles, eps=eps, metric= ) cluster_labels = clusterer.fit_predict(M) else: clusterer = sklearn.cluster.DBSCAN( min_samples=min_particles, eps=eps, metric=metric ) cluster_labels = clusterer.fit_predict(particle_locations) is_noise = -1 in cluster_labels n_clusters = len(set(cluster_labels)) - (1 if is_noise else 0) n_noise = np.sum(cluster_labels == -1) if n_noise / particle_weights.shape[0] >= 0.1: warnings.warn("More than 10% of the particles were classified as NOISE. Consider increasing the neighborhood size ``eps``.", ResamplerWarning) if not quiet: print("[Clustering] DBSCAN identified {} cluster{}. "\ "{} particles identified as NOISE.".format( n_clusters, "s" if n_clusters > 1 else "", n_noise )) for idx_cluster in range(-1 if is_noise else 0, n_clusters): this_cluster = cluster_labels == idx_cluster yield idx_cluster, this_cluster
Yields an iterator onto tuples ``(cluster_label, cluster_particles)``, where ``cluster_label`` is an `int` identifying the cluster (or ``NOISE`` for the particles lying outside of all clusters), and where ``cluster_particles`` is an array of ``dtype`` `bool` specifying the indices of all particles in that cluster. That is, particle ``i`` is in the cluster if ``cluster_particles[i] == True``.
23,817
def _run_introspection(self, runtime=, whitelist=[], verbose=False): found_objects = set() try: strace = subprocess.Popen([, runtime], stderr=subprocess.PIPE, stdout=subprocess.PIPE) (_, stderr) = strace.communicate() opened_objects = set() for line in stderr.split(): if in line and not in line: start = line.index() end = line.index(, start + 1) opened_objects.add(line[start + 1:end]) for obj in opened_objects: for wl in whitelist: m = re.match( + wl + , obj) if m: found_objects.add(obj) if verbose: print(.format(wl, obj)) continue except Exception as e: print e return found_objects
Figure out which objects are opened by a test binary and are matched by the white list. :param runtime: The binary to run. :type runtime: str :param whitelist: A list of regular expressions describing acceptable library names :type whitelist: [str]
23,818
def save_translations(self, *args, **kwargs): local_caches = self._translations_cache.copy() for meta in self._parler_meta: local_cache = local_caches[meta.model] translations = list(local_cache.values()) for translation in translations: if is_missing(translation): continue self.save_translation(translation, *args, **kwargs)
The method to save all translations. This can be overwritten to implement any custom additions. This method calls :func:`save_translation` for every fetched language. :param args: Any custom arguments to pass to :func:`save`. :param kwargs: Any custom arguments to pass to :func:`save`.
23,819
def room_members(self, stream_id): req_hook = + str(stream_id) + req_args = None status_code, response = self.__rest__.GET_query(req_hook, req_args) self.logger.debug( % (status_code, response)) return status_code, response
get list of room members
23,820
def get_timespan(name): if name not in data_quants.keys(): print("That name is currently not in pytplot") return print("Start Time: " + tplot_utilities.int_to_str(data_quants[name].trange[0])) print("End Time: " + tplot_utilities.int_to_str(data_quants[name].trange[1])) return(data_quants[name].trange[0], data_quants[name].trange[1])
This function extracts the time span from the Tplot Variables stored in memory. Parameters: name : str Name of the tplot variable Returns: time_begin : float The beginning of the time series time_end : float The end of the time series Examples: >>> # Retrieve the time span from Variable 1 >>> import pytplot >>> x_data = [1,2,3,4,5] >>> y_data = [1,2,3,4,5] >>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data}) >>> time1, time2 = pytplot.get_timespan("Variable1")
23,821
def _parse_date_time_time_zone(self, date_time_time_zone): if date_time_time_zone is None: return None local_tz = self.protocol.timezone if isinstance(date_time_time_zone, dict): try: timezone = pytz.timezone( get_iana_tz(date_time_time_zone.get(self._cc(), ))) except pytz.UnknownTimeZoneError: timezone = local_tz date_time = date_time_time_zone.get(self._cc(), None) try: date_time = timezone.localize(parse(date_time)) if date_time else None except OverflowError as e: log.debug(.format(date_time_time_zone, str(e))) date_time = None if date_time and timezone != local_tz: date_time = date_time.astimezone(local_tz) else: try: date_time = local_tz.localize(parse(date_time_time_zone)) if date_time_time_zone else None except Exception as e: log.debug(.format(date_time_time_zone, str(e))) date_time = None return date_time
Parses and convert to protocol timezone a dateTimeTimeZone resource This resource is a dict with a date time and a windows timezone This is a common structure on Microsoft apis so it's included here.
23,822
def try_get_department(department_or_code): try: value = take_first_department_code(department_or_code) except AssertionError: value = department_or_code if value in DEPARTMENT_MAPPING: value = DEPARTMENT_MAPPING[value] return value
Try to take the first department code, or fall back to string as passed
23,823
def add_subcomponent(self, name): if self.is_unknown() and is_base_datatype(self.datatype): raise ChildNotValid(name, self) return self.children.create_element(name)
Create an instance of :class:`SubComponent <hl7apy.core.SubComponent>` having the given name :param name: the name of the subcomponent to be created (e.g. CE_1) :return: an instance of :class:`SubComponent <hl7apy.core.SubComponent>` >>> c = Component(datatype='CE') >>> ce_1 = c.add_subcomponent('CE_1') >>> print(ce_1) <SubComponent CE_1> >>> print(ce_1 in c.children) True
23,824
def execute(tgt, fun, arg=(), timeout=None, tgt_type=, ret=, jid=, kwarg=None, **kwargs): salt.execute*mod.funsalt.executemy_nodegroupmod2.fun2nodegroup client = salt.client.get_local_client(__opts__[]) try: ret = client.cmd(tgt, fun, arg=arg, timeout=timeout or __opts__[], tgt_type=tgt_type, ret=ret, jid=jid, kwarg=kwarg, **kwargs) except SaltClientError as client_error: log.error(, fun, tgt, tgt_type) log.error(client_error) return {} return ret
.. versionadded:: 2017.7.0 Execute ``fun`` on all minions matched by ``tgt`` and ``tgt_type``. Parameter ``fun`` is the name of execution module function to call. This function should mainly be used as a helper for runner modules, in order to avoid redundant code. For example, when inside a runner one needs to execute a certain function on arbitrary groups of minions, only has to: .. code-block:: python ret1 = __salt__['salt.execute']('*', 'mod.fun') ret2 = __salt__['salt.execute']('my_nodegroup', 'mod2.fun2', tgt_type='nodegroup') It can also be used to schedule jobs directly on the master, for example: .. code-block:: yaml schedule: collect_bgp_stats: function: salt.execute args: - edge-routers - bgp.neighbors kwargs: tgt_type: nodegroup days: 1 returner: redis
23,825
def generate_component_annotation_miriam_match(elements, component, db): def is_faulty(annotation, key, pattern): if key not in annotation: return False test = annotation[key] if isinstance(test, native_str): return pattern.match(test) is None else: return any(pattern.match(elem) is None for elem in test) pattern = { "metabolites": METABOLITE_ANNOTATIONS, "reactions": REACTION_ANNOTATIONS, "genes": GENE_PRODUCT_ANNOTATIONS }[component][db] return [elem for elem in elements if is_faulty(elem.annotation, db, pattern)]
Tabulate which MIRIAM databases the element's annotation match. If the relevant MIRIAM identifier is not in an element's annotation it is ignored. Parameters ---------- elements : list Elements of a model, either metabolites or reactions. component : {"metabolites", "reactions"} A string denoting a type of ``cobra.Model`` component. db : str One of the MIRIAM database identifiers. Returns ------- list The components whose annotation does not match the pattern for the MIRIAM database.
23,826
def add_overlay_to_slice_file( self, filename, overlay, i_overlay, filename_out=None ): if filename_out is None: filename_out = filename filename = op.expanduser(filename) data = dicom.read_file(filename) data = self.encode_overlay_slice(data, overlay, i_overlay) data.save_as(filename_out)
Function adds overlay to existing file.
23,827
def fetch_twitter_lists_for_user_ids_generator(twitter_app_key, twitter_app_secret, user_id_list): twitter = login(twitter_app_key, twitter_app_secret) get_list_memberships_counter = 0 get_list_memberships_time_window_start = time.perf_counter() for user_twitter_id in user_id_list: try: twitter_lists_list, get_list_memberships_counter, get_list_memberships_time_window_start\ = safe_twitter_request_handler(twitter_api_func=twitter.get_list_memberships, call_rate_limit=15, call_counter=get_list_memberships_counter, time_window_start=get_list_memberships_time_window_start, max_retries=5, wait_period=2, user_id=user_twitter_id, count=500, cursor=-1) yield user_twitter_id, twitter_lists_list except twython.TwythonError: yield user_twitter_id, None except URLError: yield user_twitter_id, None except BadStatusLine: yield user_twitter_id, None
Collects at most 500 Twitter lists for each user from an input list of Twitter user ids. Inputs: - twitter_app_key: What is says on the tin. - twitter_app_secret: Ditto. - user_id_list: A python list of Twitter user ids. Yields: - user_twitter_id: A Twitter user id. - twitter_lists_list: A python list containing Twitter lists in dictionary (json) format.
23,828
def search(self, keyword): params = { "source": "map", "description": keyword } data = self._request(ENDPOINTS[], params) data[] = [res for res in data[] if isinstance(res, dict)] return data
Return all buildings related to the provided query. :param keyword: The keyword for your map search >>> results = n.search('Harrison')
23,829
def send_static_message(sender, message): dispatcher.send( signal=STATIC_MESSAGE_SIGNAL, sender=sender, message=message)
Send a static message to the listeners. Static messages represents a whole new message. Usually it will replace the previous message. .. versionadded:: 3.3 :param sender: The sender. :type sender: object :param message: An instance of our rich message class. :type message: safe.messaging.Message
23,830
def _parse_cod_segment(cls, fptr): offset = fptr.tell() - 2 read_buffer = fptr.read(2) length, = struct.unpack(, read_buffer) read_buffer = fptr.read(length - 2) lst = struct.unpack_from(, read_buffer, offset=0) scod, prog, nlayers, mct, nr, xcb, ycb, cstyle, xform = lst if len(read_buffer) > 10: precinct_size = _parse_precinct_size(read_buffer[10:]) else: precinct_size = None sop = (scod & 2) > 0 eph = (scod & 4) > 0 if sop or eph: cls._parse_tpart_flag = True else: cls._parse_tpart_flag = False pargs = (scod, prog, nlayers, mct, nr, xcb, ycb, cstyle, xform, precinct_size) return CODsegment(*pargs, length=length, offset=offset)
Parse the COD segment. Parameters ---------- fptr : file Open file object. Returns ------- CODSegment The current COD segment.
23,831
def get(self, option, default=undefined, cast=undefined): if option in self.repository: value = self.repository.get(option) else: value = default if isinstance(value, Undefined): raise UndefinedValueError( % option) if isinstance(cast, Undefined): cast = lambda v: v elif cast is bool: cast = self._cast_boolean return cast(value)
Return the value for option or default if defined.
23,832
def record(self, frame_parameters: dict=None, channels_enabled: typing.List[bool]=None, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: if frame_parameters: self.__hardware_source.set_record_frame_parameters(self.__hardware_source.get_frame_parameters_from_dict(frame_parameters)) if channels_enabled is not None: for channel_index, channel_enabled in enumerate(channels_enabled): self.__hardware_source.set_channel_enabled(channel_index, channel_enabled) self.__hardware_source.start_recording() return self.__hardware_source.get_next_xdatas_to_finish(timeout)
Record data and return a list of data_and_metadata objects. .. versionadded:: 1.0 :param frame_parameters: The frame parameters for the record. Pass None for defaults. :type frame_parameters: :py:class:`FrameParameters` :param channels_enabled: The enabled channels for the record. Pass None for defaults. :type channels_enabled: List of booleans. :param timeout: The timeout in seconds. Pass None to use default. :return: The list of data and metadata items that were read. :rtype: list of :py:class:`DataAndMetadata`
23,833
def do( self, params ): N = self.repetitions() e = self.experiment() results = [] for i in range(N): res = e.run() if not isinstance(res, list): res = [ res ] for r in res: r[Experiment.METADATA][self.I] = i r[Experiment.METADATA][self.REPETITIONS] = N results.extend(res) return results
Perform the number of repetitions we want. The results returned will be a list of the results dicts generated by the repeated experiments. The metedata for each experiment will include an entry :attr:`RepeatedExperiment.REPETITIONS` for the number of repetitions that occurred (which will be the length of this list) and an entry :attr:`RepeatedExperiment.I` for the index of the result in that sequence. :param params: the parameters to the experiment :returns: a list of result dicts
23,834
def zsum(s, *args, **kwargs): return 0 if s.empty else s.sum(*args, **kwargs)
pandas 0.21.0 changes sum() behavior so that the result of applying sum over an empty DataFrame is NaN. Meant to be set as pd.Series.zsum = zsum.
23,835
def dict_contents(self, use_dict=None, as_class=dict): if _debug: APDU._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class) if use_dict is None: use_dict = as_class() self.apci_contents(use_dict=use_dict, as_class=as_class) self.apdu_contents(use_dict=use_dict, as_class=as_class) return use_dict
Return the contents of an object as a dict.
23,836
def backup(self, id=None, src=None, timestamp=None): logging.basicConfig() log = logger.get_logger() log.logger.setLevel(logging.DEBUG) conf = LunrConfig.from_storage_conf() timestamp = timestamp or time() volume = VolumeHelper(conf) backup = BackupHelper(conf) try: snapshot = volume.create_snapshot(src, id, timestamp) print("Created snap-shot: ", pprint(snapshot)) with self.timeit(snapshot[]): print("Starting Backup") backup.save(snapshot, id) finally: if in locals(): self._remove_volume(snapshot[])
This runs a backup job outside of the storage api, which is useful for performance testing backups
23,837
def set_split_extents_by_tile_shape(self): self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1 self.split_begs = [[], ] * len(self.array_shape) self.split_ends = [[], ] * len(self.array_shape) for i in range(len(self.array_shape)): self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i]) self.split_ends[i] = _np.zeros_like(self.split_begs[i]) self.split_ends[i][0:-1] = self.split_begs[i][1:] self.split_ends[i][-1] = self.array_shape[i]
Sets split shape :attr:`split_shape` and split extents (:attr:`split_begs` and :attr:`split_ends`) from value of :attr:`tile_shape`.
23,838
def index_table(self, axis=None, baseline=None, prune=False): proportions = self.proportions(axis=axis) baseline = ( baseline if baseline is not None else self._prepare_index_baseline(axis) ) if ( axis == 0 and len(baseline.shape) <= 1 and self.ndim == len(self.get_shape()) ): baseline = baseline[:, None] indexes = proportions / baseline * 100 return self._apply_pruning_mask(indexes) if prune else indexes
Return index percentages for a given axis and baseline. The index values represent the difference of the percentages to the corresponding baseline values. The baseline values are the univariate percentages of the corresponding variable.
23,839
def login(self): pg = self.getPage("http://www.neopets.com") form = pg.form(action="/login.phtml") form.update({: self.username, : self.password}) pg = form.submit() logging.getLogger("neolib.user").info("Login check", {: pg}) return self.username in pg.content
Logs the user in, returns the result Returns bool - Whether or not the user logged in successfully
23,840
def _check_vpcs_version(self): try: output = yield from subprocess_check_output(self._vpcs_path(), "-v", cwd=self.working_dir) match = re.search("Welcome to Virtual PC Simulator, version ([0-9a-z\.]+)", output) if match: version = match.group(1) self._vpcs_version = parse_version(version) if self._vpcs_version < parse_version("0.6.1"): raise VPCSError("VPCS executable version must be >= 0.6.1 but not a 0.8") else: raise VPCSError("Could not determine the VPCS version for {}".format(self._vpcs_path())) except (OSError, subprocess.SubprocessError) as e: raise VPCSError("Error while looking for the VPCS version: {}".format(e))
Checks if the VPCS executable version is >= 0.8b or == 0.6.1.
23,841
def get_accessibles(request, roles=None): results = [] for role_name, organizations in six.iteritems(request.session.get( , {})): if roles is None or role_name in roles: results += organizations return results
Returns the list of *dictionnaries* for which the accounts are accessibles by ``request.user`` filtered by ``roles`` if present.
23,842
def plot_dop(bands, int_max, dop, hund_cu, name): data = ssplt.calc_z(bands, dop, np.arange(0, int_max, 0.1), hund_cu, name) ssplt.plot_curves_z(data, name)
Plot of Quasiparticle weight for N degenerate bands under selected doping shows transition only at half-fill the rest are metallic states
23,843
def collapse_pair(graph, survivor: BaseEntity, victim: BaseEntity) -> None: graph.add_edges_from( (survivor, successor, key, data) for _, successor, key, data in graph.out_edges(victim, keys=True, data=True) if successor != survivor ) graph.add_edges_from( (predecessor, survivor, key, data) for predecessor, _, key, data in graph.in_edges(victim, keys=True, data=True) if predecessor != survivor ) graph.remove_node(victim)
Rewire all edges from the synonymous node to the survivor node, then deletes the synonymous node. Does not keep edges between the two nodes. :param pybel.BELGraph graph: A BEL graph :param survivor: The BEL node to collapse all edges on the synonym to :param victim: The BEL node to collapse into the surviving node
23,844
def change_jira_status(test_key, test_status, test_comment, test_attachments): logger = logging.getLogger(__name__) if not execution_url: logger.warning("Test Case can not be updated: execution_url is not configured", test_key) return logger.info("Updating Test Case in Jira with status %s", test_key, test_status) composed_comments = comments if test_comment: composed_comments = .format(comments, test_comment) if comments else test_comment payload = {: test_key, : test_status, : summary_prefix, : labels, : composed_comments, : fix_version, : build} if only_if_changes: payload[] = try: if test_attachments and len(test_attachments) > 0: files = dict() for index in range(len(test_attachments)): files[.format(index)] = open(test_attachments[index], ) else: files = None response = requests.post(execution_url, data=payload, files=files) except Exception as e: logger.warning("Error updating Test Case : %s", test_key, e) return if response.status_code >= 400: logger.warning("Error updating Test Case : [%s] %s", test_key, response.status_code, get_error_message(response.content)) else: logger.debug("%s", response.content.decode().splitlines()[0])
Update test status in Jira :param test_key: test case key in Jira :param test_status: test case status :param test_comment: test case comments :param test_attachments: test case attachments
23,845
def get_score(self, terms): assert isinstance(terms, list) or isinstance(terms, tuple) score_li = np.asarray([self._get_score(t) for t in terms]) s_pos = np.sum(score_li[score_li > 0]) s_neg = -np.sum(score_li[score_li < 0]) s_pol = (s_pos-s_neg) * 1.0 / ((s_pos+s_neg)+self.EPSILON) s_sub = (s_pos+s_neg) * 1.0 / (len(score_li)+self.EPSILON) return {self.TAG_POS: s_pos, self.TAG_NEG: s_neg, self.TAG_POL: s_pol, self.TAG_SUB: s_sub}
Get score for a list of terms. :type terms: list :param terms: A list of terms to be analyzed. :returns: dict
23,846
def count_matches(self): try: self.fn = self.fo.name rows = self.file_rows(self.fo) self.fo.seek(0) except AttributeError: with open(self.fn) as fo: rows = self.file_rows(fo) matches_p = [] matches_c = [] for line in rows: cnt = len(re.findall(DATPRX, line)) matches_p.append(cnt) cnt = len(re.findall(DATCRX, line)) matches_c.append(cnt) self.rows = rows self.matches_p = matches_p self.matches_c = matches_c
Set the matches_p, matches_c and rows attributes.
23,847
def get_hook(hook_name): if not pkg_resources.resource_exists(__name__, hook_name): raise HookNotFoundError return pkg_resources.resource_string(__name__, hook_name)
Returns the specified hook. Args: hook_name (str) Returns: str - (the content of) the hook Raises: HookNotFoundError
23,848
def init_parser(): usage = "usage: %prog -u user -s secret -n name [-l label] \ [-t title] [-c callback] [TEXT]" parser = OptionParser(usage, version="%prog " + notifo.__version__) parser.add_option("-u", "--user", action="store", dest="user", help="your notifo username") parser.add_option("-s", "--secret", action="store", dest="secret", help="your notifo API secret") parser.add_option("-n", "--name", action="store", dest="name", help="recipient for the notification") parser.add_option("-l", "--label", action="store", dest="label", help="label for the notification") parser.add_option("-t", "--title", action="store", dest="title", help="title of the notification") parser.add_option("-c", "--callback", action="store", dest="callback", help="callback URL to call") parser.add_option("-m", "--message", action="store_true", dest="message", default=False, help="send message instead of notification") (options, args) = parser.parse_args() return (parser, options, args)
function to init option parser
23,849
def calculate_ellipse_description(covariance, scale = 2.0): eigh_values, eigh_vectors = numpy.linalg.eigh(covariance) order = eigh_values.argsort()[::-1] values, vectors = eigh_values[order], eigh_vectors[order] angle = numpy.degrees(numpy.arctan2(*vectors[:,0][::-1])) if 0.0 in values: return 0, 0, 0 width, height = 2.0 * scale * numpy.sqrt(values) return angle, width, height
! @brief Calculates description of ellipse using covariance matrix. @param[in] covariance (numpy.array): Covariance matrix for which ellipse area should be calculated. @param[in] scale (float): Scale of the ellipse. @return (float, float, float) Return ellipse description: angle, width, height.
23,850
def one(prompt, *args, **kwargs): indicator = if sys.version_info < (3, 0): indicator = def go_back(picker): return None, -1 options, verbose_options = prepare_options(args) idx = kwargs.get(, 0) picker = Picker(verbose_options, title=prompt, indicator=indicator, default_index=idx) picker.register_custom_handler(ord(), go_back) picker.register_custom_handler(curses.KEY_LEFT, go_back) with stdout_redirected(sys.stderr): option, index = picker.start() if index == -1: raise QuestionnaireGoBack if kwargs.get(, False): return index return options[index]
Instantiates a picker, registers custom handlers for going back, and starts the picker.
23,851
def average_gradients(tower_gradients): rs average of the gradients obtained from the GPUs. Note also that this code acts as a synchronization point as it requires all GPUs to be finished with their mini-batch before it can run to completion. towertower' dimension grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) grad_and_var = (grad, grad_and_vars[0][1]) average_grads.append(grad_and_var) return average_grads
r''' A routine for computing each variable's average of the gradients obtained from the GPUs. Note also that this code acts as a synchronization point as it requires all GPUs to be finished with their mini-batch before it can run to completion.
23,852
def get_attributes(var): is_valid = partial(is_valid_in_template, var) return list(filter(is_valid, dir(var)))
Given a varaible, return the list of attributes that are available inside of a template
23,853
def list_dataset_uris(cls, base_uri, config_path): parsed_uri = generous_parse_uri(base_uri) uri_list = [] path = parsed_uri.path if IS_WINDOWS: path = unix_to_windows_path(parsed_uri.path, parsed_uri.netloc) for d in os.listdir(path): dir_path = os.path.join(path, d) if not os.path.isdir(dir_path): continue storage_broker = cls(dir_path, config_path) if not storage_broker.has_admin_metadata(): continue uri = storage_broker.generate_uri( name=d, uuid=None, base_uri=base_uri ) uri_list.append(uri) return uri_list
Return list containing URIs in location given by base_uri.
23,854
def transp(I,J,c,d,M): model = Model("transportation") x = {} for i in I: for j in J: x[i,j] = model.addVar(vtype="C", name="x(%s,%s)" % (i, j)) for i in I: model.addCons(quicksum(x[i,j] for j in J if (i,j) in x) == d[i], name="Demand(%s)" % i) for j in J: model.addCons(quicksum(x[i,j] for i in I if (i,j) in x) <= M[j], name="Capacity(%s)" % j) model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize") model.optimize() model.data = x return model
transp -- model for solving the transportation problem Parameters: I - set of customers J - set of facilities c[i,j] - unit transportation cost on arc (i,j) d[i] - demand at node i M[j] - capacity Returns a model, ready to be solved.
23,855
def cancel_order(self, order_id, private_key): create_cancellation = self.create_cancellation(order_id=order_id, private_key=private_key) return self.execute_cancellation(cancellation_params=create_cancellation, private_key=private_key)
This function is a wrapper function around the create and execute cancellation functions to help make this processes simpler for the end user by combining these requests in 1 step. Execution of this function is as follows:: cancel_order(order_id=order['id'], private_key=kp) cancel_order(order_id=order['id'], private_key=eth_private_key) The expected return result for this function is the same as the execute_cancellation function:: { 'id': 'b8e617d5-f5ed-4600-b8f2-7d370d837750', 'blockchain': 'neo', 'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82', 'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59', 'side': 'buy', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'offer_amount': '2000000', 'want_amount': '10000000000', 'transfer_amount': '0', 'priority_gas_amount': '0', 'use_native_token': True, 'native_fee_transfer_amount': 0, 'deposit_txn': None, 'created_at': '2018-08-05T11:16:47.021Z', 'status': 'processed', 'fills': [], 'makes': [ { 'id': '6b9f40de-f9bb-46b6-9434-d281f8c06b74', 'offer_hash': '6830d82dbdda566ab32e9a8d9d9d94d3297f67c10374d69bb35d6c5a86bd3e92', 'available_amount': '0', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'offer_amount': '2000000', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'want_amount': '10000000000', 'filled_amount': '0.0', 'txn': None, 'cancel_txn': None, 'price': '0.0002', 'status': 'cancelling', 'created_at': '2018-08-05T11:16:47.036Z', 'transaction_hash': 'e5b08c4a55c7494f1ec7dd93ac2bb2b4e84e77dec9e00e91be1d520cb818c415', 'trades': [] } ] } :param order_id: The order ID of the open transaction on the order book that you want to cancel. :type order_id: str :param private_key: The KeyPair that will be used to sign the transaction sent to the blockchain. :type private_key: KeyPair :return: Dictionary of the transaction details and state after sending the signed transaction to the blockchain.
23,856
def context_chunks(self, context): N_chunks = len(self.contexts[context]) chunks = [] for j in xrange(N_chunks): chunks.append(self.context_chunk(context, j)) return chunks
Retrieves all tokens, divided into the chunks in context ``context``. Parameters ---------- context : str Context name. Returns ------- chunks : list Each item in ``chunks`` is a list of tokens.
23,857
def media_url(self, with_ssl=False): if self.serve_remote: url = self.remote_media_url(with_ssl) else: url = self.local_media_url return url.rstrip()
Used to return a base media URL. Depending on whether we're serving media remotely or locally, this either hands the decision off to the backend, or just uses the value in settings.STATIC_URL. args: with_ssl: (bool) If True, return an HTTPS url (depending on how the backend handles it).
23,858
def download(url, dir, filename=None, expect_size=None): mkdir_p(dir) if filename is None: filename = url.split()[-1] fpath = os.path.join(dir, filename) if os.path.isfile(fpath): if expect_size is not None and os.stat(fpath).st_size == expect_size: logger.info("File {} exists! Skip download.".format(filename)) return fpath else: logger.warn("File {} exists. Will overwrite with a new download!".format(filename)) def hook(t): last_b = [0] def inner(b, bsize, tsize=None): if tsize is not None: t.total = tsize t.update((b - last_b[0]) * bsize) last_b[0] = b return inner try: with tqdm.tqdm(unit=, unit_scale=True, miniters=1, desc=filename) as t: fpath, _ = urllib.request.urlretrieve(url, fpath, reporthook=hook(t)) statinfo = os.stat(fpath) size = statinfo.st_size except IOError: logger.error("Failed to download {}".format(url)) raise assert size > 0, "Downloaded an empty file from {}!".format(url) if expect_size is not None and size != expect_size: logger.error("File downloaded from {} does not match the expected size!".format(url)) logger.error("You may have downloaded a broken file, or the upstream may have modified the file.") logger.info( + filename + ". " + str(size) + ) return fpath
Download URL to a directory. Will figure out the filename automatically from URL, if not given.
23,859
def stop(self): self.publish.setsockopt(zmq.LINGER, 1) self.publish.close() return self
Stop the publisher.
23,860
def date_map(doc, datemap_list, time_format=None): if datemap_list: for i in datemap_list: if isinstance(i, datetime): doc=CursorFormatter.date_map_field(doc, i, time_format=time_format) return doc
For all the datetime fields in "datemap" find that key in doc and map the datetime object to a strftime string. This pprint and others will print out readable datetimes.
23,861
def remove_send_last_message(self, connection): if connection in self._send_last_message: del self._send_last_message[connection] LOGGER.debug("Removed send_last_message function " "for connection %s", connection) else: LOGGER.warning("Attempted to remove send_last_message " "function for connection %s, but no " "send_last_message function was registered", connection)
Removes a send_last_message function previously registered with the Dispatcher. Args: connection (str): A locally unique identifier provided by the receiver of messages.
23,862
def _expand_data(self, old_data, new_data, group): for file in old_data: if file: extension = file.split(".")[-1].lower() if extension in self.file_types.keys(): new_data[][group].append(self._expand_one_file(normpath(file), new_data, extension)) else: logger.debug("Filetype for file %s not recognized" % file) if hasattr(self, ): new_data[][group] = sorted(new_data[][group], key=self._expand_sort_key)
data expansion - uvision needs filename and path separately.
23,863
def install_package_command(package_name): if sys.platform == "win32": cmds = .format(package_name) else: cmds = .format(package_name) call(cmds, shell=True)
install python package from pip
23,864
def add_ssh_scheme_to_git_uri(uri): if isinstance(uri, six.string_types): if uri.startswith("git+") and "://" not in uri: uri = uri.replace("git+", "git+ssh://", 1) parsed = urlparse(uri) if ":" in parsed.netloc: netloc, _, path_start = parsed.netloc.rpartition(":") path = "/{0}{1}".format(path_start, parsed.path) uri = urlunparse(parsed._replace(netloc=netloc, path=path)) return uri
Cleans VCS uris from pipenv.patched.notpip format
23,865
def is_base_form(self, univ_pos, morphology=None): morphology = {} if morphology is None else morphology others = [key for key in morphology if key not in (POS, , , , )] if univ_pos == and morphology.get() == : return True elif univ_pos == and morphology.get() == : return True elif univ_pos == and (morphology.get() == and morphology.get() == and morphology.get() is None and not others): return True elif univ_pos == and morphology.get() == : return True elif VerbForm_inf in morphology: return True elif VerbForm_none in morphology: return True elif Number_sing in morphology: return True elif Degree_pos in morphology: return True else: return False
Check whether we're dealing with an uninflected paradigm, so we can avoid lemmatization entirely.
23,866
def alarm_on_log(self, alarm, matcher, skip=False): self.register_alarm(alarm) value = % ( .join(map(attrgetter(), listify(alarm))), matcher) self._set( if skip else , value) return self._section
Raise (or skip) the specified alarm when a log line matches the specified regexp. :param AlarmType|list[AlarmType] alarm: Alarm. :param str|unicode matcher: Regular expression to match log line. :param bool skip:
23,867
def autoconf(self): fpminfo = PHPfpmInfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) return fpminfo is not None
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
23,868
def list_loadbalancers(call=None): if call == : raise SaltCloudSystemExit( ) ret = {} conn = get_conn() datacenter = get_datacenter(conn) for item in conn.list_loadbalancers(datacenter[])[]: lb = {: item[]} lb.update(item[]) ret[lb[]] = lb return ret
Return a list of the loadbalancers that are on the provider
23,869
def add_circle(self, center_lat=None, center_lng=None, radius=None, **kwargs): kwargs.setdefault(, {}) if center_lat: kwargs[][] = center_lat if center_lng: kwargs[][] = center_lng if radius: kwargs[] = radius if set((, )) != set(kwargs[].keys()): raise AttributeError() if not in kwargs: raise AttributeError() kwargs.setdefault(, ) kwargs.setdefault(, .8) kwargs.setdefault(, 2) kwargs.setdefault(, ) kwargs.setdefault(, .3) self.circles.append(kwargs)
Adds a circle dict to the Map.circles attribute The circle in a sphere is called "spherical cap" and is defined in the Google Maps API by at least the center coordinates and its radius, in meters. A circle has color and opacity both for the border line and the inside area. It accepts a circle dict representation as well. Args: center_lat (float): The circle center latitude center_lng (float): The circle center longitude radius (float): The circle radius, in meters .. _Circle: https://developers.google.com/maps/documen tation/javascript/reference#Circle
23,870
def normalize_attachment(attachment): res = dict() res[] = res[] = attachment[] del(attachment[]) res[] = attachment[] del(attachment[]) res[] = attachment return res
Convert attachment metadata from es to archivant format This function makes side effect on input attachment
23,871
def parse_headers(self, use_cookies, raw): if not raw: packet = helper.to_str(helper.read_file(self.fpth)) else: packet = raw dat = {} pks = [x for x in packet.split() if x.replace(, )] url = pks[0].split()[1] for i, cnt in enumerate(pks[1:]): arr = cnt.split() if len(arr) < 2: continue arr = [x.replace(, ) for x in arr] _k, v = arr[0], .join(arr[1:]) dat[_k] = v if use_cookies: try: self.fmt_cookies(dat.pop()) except: pass self.headers = dat self.url = .format(self.headers.get(), url) return url, dat
analyze headers from file or raw messages :return: (url, dat) :rtype:
23,872
def get_variables(self, sort=None, collapse_same_ident=False): variables = [ ] if collapse_same_ident: raise NotImplementedError() for var in self._variables: if sort == and not isinstance(var, SimStackVariable): continue if sort == and not isinstance(var, SimRegisterVariable): continue variables.append(var) return variables
Get a list of variables. :param str or None sort: Sort of the variable to get. :param collapse_same_ident: Whether variables of the same identifier should be collapsed or not. :return: A list of variables. :rtype: list
23,873
def _dataframe_to_edge_list(df): cols = df.columns if len(cols): assert _SRC_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _SRC_VID_COLUMN assert _DST_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _DST_VID_COLUMN df = df[cols].T ret = [Edge(None, None, _series=df[col]) for col in df] return ret else: return []
Convert dataframe into list of edges, assuming that source and target ids are stored in _SRC_VID_COLUMN, and _DST_VID_COLUMN respectively.
23,874
def _get_solarflux(self): solar_spectrum = \ SolarIrradianceSpectrum(TOTAL_IRRADIANCE_SPECTRUM_2000ASTM, dlambda=0.0005, wavespace=self.wavespace) self.solar_flux = solar_spectrum.inband_solarflux(self.rsr[self.bandname])
Derive the in-band solar flux from rsr over the Near IR band (3.7 or 3.9 microns)
23,875
def _connect(host=None, port=None, db=None, password=None): if not host: host = __salt__[]() if not port: port = __salt__[]() if not db: db = __salt__[]() if not password: password = __salt__[]() return redis.StrictRedis(host, port, db, password, decode_responses=True)
Returns an instance of the redis client
23,876
def bootstrap_results(self, init_state): with tf.compat.v1.name_scope( name=mcmc_util.make_name(self.name, , ), values=[init_state]): pkr = self.inner_kernel.bootstrap_results(init_state) if not has_target_log_prob(pkr): raise ValueError( ) x = pkr.target_log_prob return MetropolisHastingsKernelResults( accepted_results=pkr, is_accepted=tf.ones_like(x, dtype=tf.bool), log_accept_ratio=tf.zeros_like(x), proposed_state=init_state, proposed_results=pkr, extra=[], )
Returns an object with the same type as returned by `one_step`. Args: init_state: `Tensor` or Python `list` of `Tensor`s representing the initial state(s) of the Markov chain(s). Returns: kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of `Tensor`s representing internal calculations made within this function. Raises: ValueError: if `inner_kernel` results doesn't contain the member "target_log_prob".
23,877
def serialize(self): return { : , : self.uid, : { : self.start, : self.end, : self.uid, : self.title, : self.get_data(), : Status.str(self.status) } }
Serialize this object as dictionary usable for conversion to JSON. :return: Dictionary representing this object.
23,878
def isInside(self, point, tol=0.0001): poly = self.polydata(True) points = vtk.vtkPoints() points.InsertNextPoint(point) pointsPolydata = vtk.vtkPolyData() pointsPolydata.SetPoints(points) sep = vtk.vtkSelectEnclosedPoints() sep.SetTolerance(tol) sep.CheckSurfaceOff() sep.SetInputData(pointsPolydata) sep.SetSurfaceData(poly) sep.Update() return sep.IsInside(0)
Return True if point is inside a polydata closed surface.
23,879
def retrieve_list(self, session, filters, *args, **kwargs): query = self.queryset(session) translator = IntegerField() pagination_count = translator.translate( filters.pop(self.pagination_count_query_arg, self.paginate_by) ) pagination_pk = translator.translate( filters.pop(self.pagination_pk_query_arg, 1) ) pagination_pk -= 1 query = query.filter_by(**filters) if pagination_pk: query = query.offset(pagination_pk * pagination_count) if pagination_count: query = query.limit(pagination_count + 1) count = query.count() next_link = None previous_link = None if count > pagination_count: next_link = {self.pagination_pk_query_arg: pagination_pk + 2, self.pagination_count_query_arg: pagination_count} if pagination_pk > 0: previous_link = {self.pagination_pk_query_arg: pagination_pk, self.pagination_count_query_arg: pagination_count} field_dict = self.dot_field_list_to_dict(self.list_fields) props = self.serialize_model(query[:pagination_count], field_dict=field_dict) meta = dict(links=dict(next=next_link, previous=previous_link)) return props, meta
Retrieves a list of the model for this manager. It is restricted by the filters provided. :param Session session: The SQLAlchemy session to use :param dict filters: The filters to restrict the returned models on :return: A tuple of the list of dictionary representation of the models and the dictionary of meta data :rtype: list, dict
23,880
def import_image(self, imported_image_name, image_name): c = self._oc_command(["import-image", imported_image_name, "--from=%s" % image_name, "--confirm"]) logger.info("Importing image from: %s, as: %s", image_name, imported_image_name) try: o = run_cmd(c, return_output=True, ignore_status=True) logger.debug(o) except subprocess.CalledProcessError as ex: raise ConuException("oc import-image failed: %s" % ex) return imported_image_name
Import image using `oc import-image` command. :param imported_image_name: str, short name of an image in internal registry, example: - hello-openshift:latest :param image_name: full repository name, example: - docker.io/openshift/hello-openshift:latest :return: str, short name in internal registry
23,881
def to_long_time_string(self) -> str: hour = self.time.hour minute = self.time.minute second = self.time.second return f"{hour:02}:{minute:02}:{second:02}"
Return the iso time string only
23,882
def init_device(self): Device.init_device(self) self._set_master_state() self._devProxy = DeviceProxy(self.get_name())
Device constructor.
23,883
def is_attr_protected(attrname: str) -> bool: return ( attrname[0] == "_" and attrname != "_" and not (attrname.startswith("__") and attrname.endswith("__")) )
return True if attribute name is protected (start with _ and some other details), False otherwise.
23,884
def union(self, other): return Interval(min(self.low, other.low), max(self.high, other.high))
Intersect current range with other.
23,885
def Close(self): if not self._append: for field_name in self._fields: query = .format( field_name) self._cursor.execute(query) if self._set_status: self._set_status(.format(field_name)) if self._set_status: self._set_status() for field in self._META_FIELDS: values = self._GetDistinctValues(field) self._cursor.execute(.format(field)) for name, frequency in iter(values.items()): self._cursor.execute(( ).format(field, name, frequency)) self._cursor.execute() for tag in self._ListTags(): self._cursor.execute(, [tag]) if self._set_status: self._set_status() self._connection.commit() self._cursor.close() self._connection.close() self._cursor = None self._connection = None
Disconnects from the database. This method will create the necessary indices and commit outstanding transactions before disconnecting.
23,886
def register(id, url=None): bucket = registration_s3_bucket() key = registration_key(id) obj = bucket.Object(key) obj.put(Body=url or "missing") return _generate_s3_url(bucket, key)
Register a UUID key in the global S3 bucket.
23,887
def set_thumbnail(self, thumbnail): headers = { "Authorization": "token {}".format(self._client.token), "Content-type": "image/png", } return True
Sets the thumbnail for this OAuth Client. If thumbnail is bytes, uploads it as a png. Otherwise, assumes thumbnail is a path to the thumbnail and reads it in as bytes before uploading.
23,888
def websocket_safe_read(self): data = [] while True: try: data.append(self.websocket.recv()) except (SSLError, SSLWantReadError) as err: if err.errno == 2: return data raise
Returns data if available, otherwise ''. Newlines indicate multiple messages
23,889
async def _parse_lines(lines, regex): results = [] if inspect.iscoroutinefunction(lines): lines = await lines for line in lines: if line: match = regex.search(line) if not match: _LOGGER.debug("Could not parse row: %s", line) continue results.append(match.groupdict()) return results
Parse the lines using the given regular expression. If a line can't be parsed it is logged and skipped in the output.
23,890
def attach_ip(self, server, family=): body = { : { : str(server), : family } } res = self.request(, , body) return IPAddress(cloud_manager=self, **res[])
Attach a new (random) IPAddress to the given server (object or UUID).
23,891
def answer_challenge(authzr, client, responders): responder, challb = _find_supported_challenge(authzr, responders) response = challb.response(client.key) def _stop_responding(): return maybeDeferred( responder.stop_responding, authzr.body.identifier.value, challb.chall, response) return ( maybeDeferred( responder.start_responding, authzr.body.identifier.value, challb.chall, response) .addCallback(lambda _: client.answer_challenge(challb, response)) .addCallback(lambda _: _stop_responding) )
Complete an authorization using a responder. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param .Client client: The ACME client. :type responders: List[`~txacme.interfaces.IResponder`] :param responders: A list of responders that can be used to complete the challenge with. :return: A deferred firing when the authorization is verified.
23,892
def editHook(self, repo_user, repo_name, hook_id, name, config, events=None, add_events=None, remove_events=None, active=None): post = dict( name=name, config=config, ) if events is not None: post[] = events if add_events is not None: post[] = add_events if remove_events is not None: post[] = remove_events if active is not None: post[] = active return self.api.makeRequest( [, repo_user, repo_name, , str(hook_id)], method=, post=post, )
PATCH /repos/:owner/:repo/hooks/:id :param hook_id: Id of the hook. :param name: The name of the service that is being called. :param config: A Hash containing key/value pairs to provide settings for this hook.
23,893
def handle_trunks(self, trunks, event_type): LOG.debug("Trunks event received: %(event_type)s. Trunks: %(trunks)s", {: event_type, : trunks}) if event_type == events.DELETED: for trunk in trunks: self._trunks.pop(trunk.id, None) else: for trunk in trunks: self._trunks[trunk.id] = trunk self._setup_trunk(trunk)
Trunk data model change from the server.
23,894
def select_by_index(self, val, level=0, squeeze=False, filter=False, return_mask=False): try: level[0] except: level = [level] try: val[0] except: val = [val] remove = [] if len(level) == 1: try: val[0][0] except: val = [val] if squeeze and not filter and len(val) == 1: remove.append(level[0]) else: for i in range(len(val)): try: val[i][0] except: val[i] = [val[i]] if squeeze and not filter and len(val[i]) == 1: remove.append(level[i]) if len(level) != len(val): raise ValueError("List of levels must be same length as list of corresponding values") p = product(*val) selected = set([x for x in p]) masks, ind = self._makemasks(index=self.index, level=level) nmasks = len(masks) masks = array([masks[x] for x in range(nmasks) if tuple(ind[x]) in selected]) final_mask = masks.any(axis=0) if filter: final_mask = logical_not(final_mask) indFinal = array(self.index) if len(indFinal.shape) == 1: indFinal = array(indFinal, ndmin=2).T indFinal = indFinal[final_mask] if squeeze: indFinal = delete(indFinal, remove, axis=1) if len(indFinal[0]) == 1: indFinal = ravel(indFinal) elif len(indFinal[1]) == 0: indFinal = arange(sum(final_mask)) result = self.map(lambda v: v[final_mask], index=indFinal) if return_mask: return result, final_mask else: return result
Select or filter elements of the Series by index values (across levels, if multi-index). The index is a property of a Series object that assigns a value to each position within the arrays stored in the records of the Series. This function returns a new Series where, within each record, only the elements indexed by a given value(s) are retained. An index where each value is a list of a fixed length is referred to as a 'multi-index', as it provides multiple labels for each index location. Each of the dimensions in these sublists is a 'level' of the multi-index. If the index of the Series is a multi-index, then the selection can proceed by first selecting one or more levels, and then selecting one or more values at each level. Parameters ---------- val : list of lists Specifies the selected index values. List must contain one list for each level of the multi-index used in the selection. For any singleton lists, the list may be replaced with just the integer. level : list of ints, optional, default=0 Specifies which levels in the multi-index to use when performing selection. If a single level is selected, the list can be replaced with an integer. Must be the same length as val. squeeze : bool, optional, default=False If True, the multi-index of the resulting Series will drop any levels that contain only a single value because of the selection. Useful if indices are used as unique identifiers. filter : bool, optional, default=False If True, selection process is reversed and all index values EXCEPT those specified are selected. return_mask : bool, optional, default=False If True, return the mask used to implement the selection.
23,895
def get_unused_color(self): if not self.unused_colors: self.reset() used_colors = [c for c in self.xlwt_colors if c not in self.unused_colors] result_color = max(self.unused_colors, key=lambda c: min(self.color_distance(c, c2) for c2 in used_colors)) result_index = self.xlwt_colors.index(result_color) self.unused_colors.discard(result_color) return result_index
Returns an xlwt color index that has not been previously returned by this instance. Attempts to maximize the distance between the color and all previously used colors.
23,896
def resolve_group_names(self, r, target_group_ids, groups): names = self.get_group_names(target_group_ids) if not names: return target_group_ids target_group_ids = list(target_group_ids) vpc_id = self.vpc_expr.search(r) if not vpc_id: raise PolicyExecutionError(self._format_error( "policy:{policy} non vpc attached resource used " "with modify-security-group: {resource_id}", resource_id=r[self.manager.resource_type.id])) found = False for n in names: for g in groups: if g[] == n and g[] == vpc_id: found = g[] if not found: raise PolicyExecutionError(self._format_error(( "policy:{policy} could not resolve sg:{name} for " "resource:{resource_id} in vpc:{vpc}"), name=n, resource_id=r[self.manager.resource_type.id], vpc=vpc_id)) target_group_ids.remove(n) target_group_ids.append(found) return target_group_ids
Resolve any security group names to the corresponding group ids With the context of a given network attached resource.
23,897
def fill(self, paths): for path in paths: tree = self.tree parts = tuple(path.split()) dir_parts = parts[:-1] built = () for part in dir_parts: self.cache[built] = tree built += (part, ) parent = tree tree = parent.folders.get(part, empty) if tree is empty: tree = parent.folders[part] = TreeItem(name=built, folders={}, files=set(), parent=parent) self.cache[dir_parts] = tree tree.files.add(parts[-1])
Initialise the tree. paths is a list of strings where each string is the relative path to some file.
23,898
def installed(name, features=None, recurse=False, restart=False, source=None, exclude=None): ret = {: name, : True, : {}, : } if features is None: features = name.split() if not isinstance(features, list): features = features.split() old = __salt__[]() cur_feat = [] for feature in features: if feature not in old: ret[][feature] = \ .format(recurse) elif recurse: ret[][feature] = \ else: cur_feat.append(feature) if cur_feat: cur_feat.insert(0, ) ret[] = .join(cur_feat) if not ret[]: return ret if __opts__[]: ret[] = None return ret status = __salt__[]( features, recurse=recurse, restart=restart, source=source, exclude=exclude) ret[] = status[] fail_feat = [] new_feat = [] rem_feat = [] for feature in status[]: if not status[][feature].get(, True): fail_feat.append(.format(feature)) elif not in status[][feature][]: new_feat.append(.format(feature)) elif in status[][feature][]: rem_feat.append(.format(feature)) if fail_feat: fail_feat.insert(0, ) if new_feat: new_feat.insert(0, ) if rem_feat: rem_feat.insert(0, ) ret[] = .join(fail_feat + new_feat + rem_feat) new = __salt__[]() ret[] = salt.utils.data.compare_dicts(old, new) return ret
Install the windows feature. To install a single feature, use the ``name`` parameter. To install multiple features, use the ``features`` parameter. .. note:: Some features require reboot after un/installation. If so, until the server is restarted other features can not be installed! Args: name (str): Short name of the feature (the right column in win_servermanager.list_available). This can be a single feature or a string of features in a comma delimited list (no spaces) .. note:: A list is not allowed in the name parameter of any state. Use the ``features`` parameter if you want to pass the features as a list features (Optional[list]): A list of features to install. If this is passed it will be used instead of the ``name`` parameter. .. versionadded:: 2018.3.0 recurse (Optional[bool]): Install all sub-features as well. If the feature is installed but one of its sub-features are not installed set this will install additional sub-features source (Optional[str]): Path to the source files if missing from the target system. None means that the system will use windows update services to find the required files. Default is None restart (Optional[bool]): Restarts the computer when installation is complete, if required by the role/feature installed. Default is False exclude (Optional[str]): The name of the feature to exclude when installing the named feature. This can be a single feature, a string of features in a comma-delimited list (no spaces), or a list of features. .. warning:: As there is no exclude option for the ``Add-WindowsFeature`` or ``Install-WindowsFeature`` PowerShell commands the features named in ``exclude`` will be installed with other sub-features and will then be removed. **If the feature named in ``exclude`` is not a sub-feature of one of the installed items it will still be removed.** Example: Do not use the role or feature names mentioned in the PKGMGR documentation. To get a list of available roles and features run the following command: .. code-block:: bash salt <minion_name> win_servermanager.list_available Use the name in the right column of the results. .. code-block:: yaml # Installs the IIS Web Server Role (Web-Server) IIS-WebServerRole: win_servermanager.installed: - recurse: True - name: Web-Server # Install multiple features, exclude the Web-Service install_multiple_features: win_servermanager.installed: - recurse: True - features: - RemoteAccess - XPS-Viewer - SNMP-Service - exclude: - Web-Server
23,899
def _results(self, scheduler_instance_id): with self.app.lock: res = self.app.get_results_from_passive(scheduler_instance_id) return serialize(res, True)
Get the results of the executed actions for the scheduler which instance id is provided Calling this method for daemons that are not configured as passive do not make sense. Indeed, this service should only be exposed on poller and reactionner daemons. :param scheduler_instance_id: instance id of the scheduler :type scheduler_instance_id: string :return: serialized list :rtype: str