Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
380,900
def ret(f, *args, **kwargs): kwargs.update({: True}) return _stump(f, *args, **kwargs)
Automatically log progress on function entry and exit. Default logging value: info. The function's return value will be included in the logs. *Logging with values contained in the parameters of the decorated function* Message (args[0]) may be a string to be formatted with parameters passed to the decorated function. Each '{varname}' will be replaced by the value of the parameter of the same name. *Keyword parameters* - log :: integer - Specifies a custom level of logging to pass to the active logger. - Default: INFO *Exceptions:* - IndexError and ValueError - will be returned if *args contains a string that does not correspond to a parameter name of the decorated function, or if there are more '{}'s than there are *args.
380,901
def get_yaml_parser_roundtrip_for_context(): yaml_writer = get_yaml_parser_roundtrip() yaml_writer.Representer.add_representer( Context, yamler.representer.RoundTripRepresenter.represent_dict) return yaml_writer
Create a yaml parser that can serialize the pypyr Context. Create yaml parser with get_yaml_parser_roundtrip, adding Context. This allows the yaml parser to serialize the pypyr Context.
380,902
def read_from_bpch(filename, file_position, shape, dtype, endian, use_mmap=False): offset = file_position + 4 if use_mmap: d = np.memmap(filename, dtype=dtype, mode=, shape=shape, offset=offset, order=) else: with FortranFile(filename, , endian) as ff: ff.seek(file_position) d = np.array(ff.readline()) d = d.reshape(shape, order=) if (d.shape != shape): raise IOError("Data chunk read from {} does not have the right shape," " (expected {} but got {})" .format(filename, shape, d.shape)) return d
Read a chunk of data from a bpch output file. Parameters ---------- filename : str Path to file on disk containing the data file_position : int Position (bytes) where desired data chunk begins shape : tuple of ints Resultant (n-dimensional) shape of requested data; the chunk will be read sequentially from disk and then re-shaped dtype : dtype Dtype of data; for best results, pass a dtype which includes an endian indicator, e.g. `dtype = np.dtype('>f4')` endian : str Endianness of data; should be consistent with `dtype` use_mmap : bool Memory map the chunk of data to the file on disk, else read immediately Returns ------- Array with shape `shape` and dtype `dtype` containing the requested chunk of data from `filename`.
380,903
def consolidate_output(job, config, mutect, pindel, muse): work_dir = job.fileStore.getLocalTempDir() mutect_tar, pindel_tar, muse_tar = None, None, None if mutect: mutect_tar = job.fileStore.readGlobalFile(mutect, os.path.join(work_dir, )) if pindel: pindel_tar = job.fileStore.readGlobalFile(pindel, os.path.join(work_dir, )) if muse: muse_tar = job.fileStore.readGlobalFile(muse, os.path.join(work_dir, )) out_tar = os.path.join(work_dir, config.uuid + ) tar_list = [x for x in [mutect_tar, pindel_tar, muse_tar] if x is not None] with tarfile.open(os.path.join(work_dir, out_tar), ) as f_out: for tar in tar_list: with tarfile.open(tar, ) as f_in: for tarinfo in f_in: with closing(f_in.extractfile(tarinfo)) as f_in_file: if tar is mutect_tar: tarinfo.name = os.path.join(config.uuid, , os.path.basename(tarinfo.name)) elif tar is pindel_tar: tarinfo.name = os.path.join(config.uuid, , os.path.basename(tarinfo.name)) else: tarinfo.name = os.path.join(config.uuid, , os.path.basename(tarinfo.name)) f_out.addfile(tarinfo, fileobj=f_in_file) if urlparse(config.output_dir).scheme == : job.fileStore.logToMaster(.format(config.uuid, config.output_dir)) s3am_upload(job=job, fpath=out_tar, s3_dir=config.output_dir, num_cores=config.cores) else: job.fileStore.logToMaster(.format(config.uuid, config.output_dir)) mkdir_p(config.output_dir) copy_files(file_paths=[out_tar], output_dir=config.output_dir)
Combine the contents of separate tarball outputs into one via streaming :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param str mutect: MuTect tarball FileStoreID :param str pindel: Pindel tarball FileStoreID :param str muse: MuSe tarball FileStoreID
380,904
def display(self, image): assert(image.size == self.size) self._last_image = image self._count += 1 filename = self._file_template.format(self._count) image = self.preprocess(image) surface = self.to_surface(image, alpha=self._contrast) logger.debug("Writing: {0}".format(filename)) self._pygame.image.save(surface, filename)
Takes a :py:mod:`PIL.Image` and dumps it to a numbered PNG file.
380,905
def crypto_sign_open(signed, pk): message = ffi.new("unsigned char[]", len(signed)) message_len = ffi.new("unsigned long long *") if lib.crypto_sign_open( message, message_len, signed, len(signed), pk) != 0: raise exc.BadSignatureError("Signature was forged or corrupt") return ffi.buffer(message, message_len[0])[:]
Verifies the signature of the signed message ``signed`` using the public key ``pk`` and returns the unsigned message. :param signed: bytes :param pk: bytes :rtype: bytes
380,906
def _prepare_load_balancers(self): stack = { A.NAME: self[A.NAME], A.VERSION: self[A.VERSION], } for load_balancer in self.get(R.LOAD_BALANCERS, []): svars = {A.STACK: stack} load_balancer[A.loadbalancer.VARS] = svars
Prepare load balancer variables
380,907
def get_best_ip_by_real_data_fetch(_type=): from QUANTAXIS.QAUtil.QADate import QA_util_today_str import time pre_trade_date=QA_util_get_real_date(QA_util_today_str()) pre_trade_date=QA_util_get_real_date(pre_trade_date) def get_stock_data_by_ip(ips): start=time.time() try: QA_fetch_get_stock_transaction(,pre_trade_date,pre_trade_date,2,ips[],ips[]) end=time.time() return end-start except: return 9999 def get_future_data_by_ip(ips): start=time.time() try: QA_fetch_get_future_transaction(,pre_trade_date,pre_trade_date,2,ips[],ips[]) end=time.time() return end-start except: return 9999 func,ip_list=0,0 if _type==: func,ip_list=get_stock_data_by_ip,stock_ip_list else: func,ip_list=get_future_data_by_ip,future_ip_list from pathos.multiprocessing import Pool def multiMap(func,sequence): res=[] pool=Pool(4) for i in sequence: res.append(pool.apply_async(func,(i,))) pool.close() pool.join() return list(map(lambda x:x.get(),res)) res=multiMap(func,ip_list) index=res.index(min(res)) return ip_list[index]
用特定的数据获取函数测试数据获得的时间,从而选择下载数据最快的服务器ip 默认使用特定品种1min的方式的获取
380,908
def cublasStbmv(handle, uplo, trans, diag, n, k, A, lda, x, incx): status = _libcublas.cublasStbmv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, k, int(A), lda, int(x), incx) cublasCheckStatus(status)
Matrix-vector product for real triangular-banded matrix.
380,909
def matches_all_rules(self, target_filename): for rule in self.match_rules: if rule.test(target_filename) is False: return False self.logger.debug(.format(self.name, os.path.basename(target_filename), )) return True
Returns true if the given file matches all the rules in this ruleset. :param target_filename: :return: boolean
380,910
def get_command(self, ctx, cmd_name): path = "%s.%s" % (__name__, cmd_name) path = path.replace("-", "_") module = importlib.import_module(path) return getattr(module, )
Get command for click.
380,911
def get(obj: JsonObj, item: str, default: JsonObjTypes=None) -> JsonObjTypes: return obj._get(item, default)
Dictionary get routine
380,912
def SURFstar_compute_scores(inst, attr, nan_entries, num_attributes, mcmap, NN_near, NN_far, headers, class_type, X, y, labels_std, data_type): scores = np.zeros(num_attributes) for feature_num in range(num_attributes): if len(NN_near) > 0: scores[feature_num] += compute_score(attr, mcmap, NN_near, feature_num, inst, nan_entries, headers, class_type, X, y, labels_std, data_type) if len(NN_far) > 0: scores[feature_num] -= compute_score(attr, mcmap, NN_far, feature_num, inst, nan_entries, headers, class_type, X, y, labels_std, data_type) return scores
Unique scoring procedure for SURFstar algorithm. Scoring based on nearest neighbors within defined radius, as well as 'anti-scoring' of far instances outside of radius of current target instance
380,913
def init_group(self, group, chunk_size, compression=None, compression_opts=None): create_index(group, chunk_size) self._entries[].create_dataset( group, chunk_size, compression=compression, compression_opts=compression_opts) self._entries[].create_dataset( group, chunk_size, compression=compression, compression_opts=compression_opts) self._entries[].create_dataset( group, self._entries[].nb_per_chunk, compression=compression, compression_opts=compression_opts) if self.has_properties(): self._entries[].create_dataset( group, compression=compression, compression_opts=compression_opts)
Initializes a HDF5 group compliant with the stored data. This method creates the datasets 'items', 'labels', 'features' and 'index' and leaves them empty. :param h5py.Group group: The group to initializes. :param float chunk_size: The size of a chunk in the file (in MB). :param str compression: Optional compression, see :class:`h5features.writer` for details :param str compression: Optional compression options, see :class:`h5features.writer` for details
380,914
def pilatus_description_metadata(description): result = {} if not description.startswith(): return result for c in : description = description.replace(c, ) for line in description.split(): if line[:2] != : continue line = line.split() name = line[0] if line[0] not in TIFF.PILATUS_HEADER: try: result[] = datetime.datetime.strptime( .join(line), ) except Exception: result[name] = .join(line[1:]) continue indices, dtype = TIFF.PILATUS_HEADER[line[0]] if isinstance(indices[0], slice): values = line[indices[0]] else: values = [line[i] for i in indices] if dtype is float and values[0] == : values = [] values = tuple(dtype(v) for v in values) if dtype == str: values = .join(values) elif len(values) == 1: values = values[0] result[name] = values return result
Return metatata from Pilatus image description as dict. Return metadata from Pilatus pixel array detectors by Dectris, created by camserver or TVX software. >>> pilatus_description_metadata('# Pixel_size 172e-6 m x 172e-6 m') {'Pixel_size': (0.000172, 0.000172)}
380,915
def _get_public_room(self, room_name, invitees: List[User]): room_name_full = f invitees_uids = [user.user_id for user in invitees] for _ in range(JOIN_RETRIES): try: room = self._client.join_room(room_name_full) except MatrixRequestError as error: if error.code == 404: self.log.debug( f, room_name=room_name_full, error=error, ) else: self.log.debug( f, room_name=room_name, error=error.content, error_code=error.code, ) else: member_ids = {user.user_id for user in room.get_joined_members(force_resync=True)} users_to_invite = set(invitees_uids) - member_ids self.log.debug(, room=room, invitee_ids=users_to_invite) for invitee_id in users_to_invite: room.invite_user(invitee_id) self.log.debug(, room=room) break room = self._client.create_room( None, invitees=invitees_uids, is_public=True, ) self.log.warning( , room=room, invitees=invitees, ) return room
Obtain a public, canonically named (if possible) room and invite peers
380,916
def import_module(module_fqname, superclasses=None): module_name = module_fqname.rpartition(".")[-1] module = __import__(module_fqname, globals(), locals(), [module_name]) modules = [class_ for cname, class_ in inspect.getmembers(module, inspect.isclass) if class_.__module__ == module_fqname] if superclasses: modules = [m for m in modules if issubclass(m, superclasses)] return modules
Imports the module module_fqname and returns a list of defined classes from that module. If superclasses is defined then the classes returned will be subclasses of the specified superclass or superclasses. If superclasses is plural it must be a tuple of classes.
380,917
def queryset(self, request, queryset): form = self.get_form(request) self.form = form start_date = form.start_date() end_date = form.end_date() if form.is_valid() and (start_date or end_date): args = self.__get_filterargs( start=start_date, end=end_date, ) return queryset.filter(**args)
That's the trick - we create self.form when django tries to get our queryset. This allows to create unbount and bound form in the single place.
380,918
def crop_to_extents(img1, img2, padding): beg_coords1, end_coords1 = crop_coords(img1, padding) beg_coords2, end_coords2 = crop_coords(img2, padding) beg_coords = np.fmin(beg_coords1, beg_coords2) end_coords = np.fmax(end_coords1, end_coords2) img1 = crop_3dimage(img1, beg_coords, end_coords) img2 = crop_3dimage(img2, beg_coords, end_coords) return img1, img2
Crop the images to ensure both fit within the bounding box
380,919
def sign_url_path(url, secret_key, expire_in=None, digest=None): result = urlparse(url) query_args = MultiValueDict(parse_qs(result.query)) query_args[] = token() if expire_in is not None: query_args[] = int(time() + expire_in) query_args[] = _generate_signature(result.path, secret_key, query_args, digest) return "%s?%s" % (result.path, urlencode(list(query_args.sorteditems(True))))
Sign a URL (excluding the domain and scheme). :param url: URL to sign :param secret_key: Secret key :param expire_in: Expiry time. :param digest: Specify the digest function to use; default is sha256 from hashlib :return: Signed URL
380,920
def prepare_actions(self, obs): now = time.time() while self._past_actions and self._past_actions[0].deadline < now: self._past_actions.pop(0) def add_act(ability_id, color, pos, timeout=1): if ability_id: ability = self._static_data.abilities[ability_id] if ability.remaps_to_ability_id: ability_id = ability.remaps_to_ability_id self._past_actions.append( PastAction(ability_id, color, pos, now, now + timeout)) for act in obs.actions: if (act.HasField("action_raw") and act.action_raw.HasField("unit_command") and act.action_raw.unit_command.HasField("target_world_space_pos")): pos = point.Point.build( act.action_raw.unit_command.target_world_space_pos) add_act(act.action_raw.unit_command.ability_id, colors.yellow, pos) if act.HasField("action_feature_layer"): act_fl = act.action_feature_layer if act_fl.HasField("unit_command"): if act_fl.unit_command.HasField("target_screen_coord"): pos = self._world_to_feature_screen_px.back_pt( point.Point.build(act_fl.unit_command.target_screen_coord)) add_act(act_fl.unit_command.ability_id, colors.cyan, pos) elif act_fl.unit_command.HasField("target_minimap_coord"): pos = self._world_to_feature_minimap_px.back_pt( point.Point.build(act_fl.unit_command.target_minimap_coord)) add_act(act_fl.unit_command.ability_id, colors.cyan, pos) else: add_act(act_fl.unit_command.ability_id, None, None) if (act_fl.HasField("unit_selection_point") and act_fl.unit_selection_point.HasField("selection_screen_coord")): pos = self._world_to_feature_screen_px.back_pt(point.Point.build( act_fl.unit_selection_point.selection_screen_coord)) add_act(None, colors.cyan, pos) if act_fl.HasField("unit_selection_rect"): for r in act_fl.unit_selection_rect.selection_screen_coord: rect = point.Rect( self._world_to_feature_screen_px.back_pt( point.Point.build(r.p0)), self._world_to_feature_screen_px.back_pt( point.Point.build(r.p1))) add_act(None, colors.cyan, rect, 0.3) if act.HasField("action_render"): act_rgb = act.action_render if act_rgb.HasField("unit_command"): if act_rgb.unit_command.HasField("target_screen_coord"): pos = self._world_to_rgb_screen_px.back_pt( point.Point.build(act_rgb.unit_command.target_screen_coord)) add_act(act_rgb.unit_command.ability_id, colors.red, pos) elif act_rgb.unit_command.HasField("target_minimap_coord"): pos = self._world_to_rgb_minimap_px.back_pt( point.Point.build(act_rgb.unit_command.target_minimap_coord)) add_act(act_rgb.unit_command.ability_id, colors.red, pos) else: add_act(act_rgb.unit_command.ability_id, None, None) if (act_rgb.HasField("unit_selection_point") and act_rgb.unit_selection_point.HasField("selection_screen_coord")): pos = self._world_to_rgb_screen_px.back_pt(point.Point.build( act_rgb.unit_selection_point.selection_screen_coord)) add_act(None, colors.red, pos) if act_rgb.HasField("unit_selection_rect"): for r in act_rgb.unit_selection_rect.selection_screen_coord: rect = point.Rect( self._world_to_rgb_screen_px.back_pt( point.Point.build(r.p0)), self._world_to_rgb_screen_px.back_pt( point.Point.build(r.p1))) add_act(None, colors.red, rect, 0.3)
Keep a list of the past actions so they can be drawn.
380,921
def _read_n_samples(channel_file): n_blocks = int((channel_file.stat().st_size - HDR_LENGTH) / BLK_SIZE) n_samples = n_blocks * BLK_LENGTH return n_blocks, n_samples
Calculate the number of samples based on the file size Parameters ---------- channel_file : Path path to single filename with the header Returns ------- int number of blocks (i.e. records, in which the data is cut) int number of samples
380,922
def pattern_to_str(pattern): if isinstance(pattern, str): return repr(pattern) else: return repr(pattern.pattern) if pattern else None
Convert regex pattern to string. If pattern is string it returns itself, if pattern is SRE_Pattern then return pattern attribute :param pattern: pattern object or string :return: str: pattern sttring
380,923
def dense_to_deeper_block(dense_layer, weighted=True): units = dense_layer.units weight = np.eye(units) bias = np.zeros(units) new_dense_layer = StubDense(units, units) if weighted: new_dense_layer.set_weights( (add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) ) return [StubReLU(), new_dense_layer]
deeper dense layer.
380,924
def phone_numbers(self): if self._phone_numbers is None: self._phone_numbers = PhoneNumberList(self) return self._phone_numbers
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberList
380,925
def format(self): values = {} title = "Description" description = self.command.description + "\n\n" + self.get_ending_note() if not self.is_cog() else inspect.getdoc(self.command) sections = [] if isinstance(self.command, Command): description = self.command.short_doc sections = [{"name": "Usage", "value": self.get_command_signature()}, {"name": "More Info", "value": self.command.help.replace(self.command.short_doc, "").format(prefix=self.clean_prefix), "inline": False}] def category(tup): cog = tup[1].cog_name return cog + if cog is not None else if self.is_bot(): title = self.bot.user.display_name + " Help" data = sorted(self.filter_command_list(), key=category) for category, commands in itertools.groupby(data, key=category): section = {} commands = list(commands) if len(commands) > 0: section[] = category section[] = self.add_commands(commands) section[] = False sections.append(section) elif not sections or self.has_subcommands(): section = {"name": "Commands:", "inline": False, "value": self.add_commands(self.filter_command_list())} sections.append(section) values[] = title values[] = description values[] = sections return values
Handles the actual behaviour involved with formatting. To change the behaviour, this method should be overridden. Returns -------- list A paginated output of the help command.
380,926
def crypto_aead_chacha20poly1305_ietf_encrypt(message, aad, nonce, key): ensure(isinstance(message, bytes), , raising=exc.TypeError) mlen = len(message) ensure(mlen <= crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX, .format( crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX), raising=exc.ValueError) ensure(isinstance(aad, bytes) or (aad is None), , raising=exc.TypeError) ensure(isinstance(nonce, bytes) and len(nonce) == crypto_aead_chacha20poly1305_ietf_NPUBBYTES, .format( crypto_aead_chacha20poly1305_ietf_NPUBBYTES), raising=exc.TypeError) ensure(isinstance(key, bytes) and len(key) == crypto_aead_chacha20poly1305_ietf_KEYBYTES, .format( crypto_aead_chacha20poly1305_ietf_KEYBYTES), raising=exc.TypeError) if aad: _aad = aad aalen = len(aad) else: _aad = ffi.NULL aalen = 0 mxout = mlen + crypto_aead_chacha20poly1305_ietf_ABYTES clen = ffi.new("unsigned long long *") ciphertext = ffi.new("unsigned char[]", mxout) res = lib.crypto_aead_chacha20poly1305_ietf_encrypt(ciphertext, clen, message, mlen, _aad, aalen, ffi.NULL, nonce, key) ensure(res == 0, "Encryption failed.", raising=exc.CryptoError) return ffi.buffer(ciphertext, clen[0])[:]
Encrypt the given ``message`` using the IETF ratified chacha20poly1305 construction described in RFC7539. :param message: :type message: bytes :param aad: :type aad: bytes :param nonce: :type nonce: bytes :param key: :type key: bytes :return: authenticated ciphertext :rtype: bytes
380,927
def _build_tree(self): groups = self._groups or self.get_children_paths(self.root_path) for group in groups: node = Node(name=group, parent=self.root) self.root.children.append(node) self._init_sub_groups(node)
Build a full or a partial tree, depending on the groups/sub-groups specified.
380,928
def execute_notebook(npth, dpth, timeout=1200, kernel=): ep = ExecutePreprocessor(timeout=timeout, kernel_name=kernel) nb = nbformat.read(npth, as_version=4) t0 = timer() ep.preprocess(nb, {: {: dpth}}) t1 = timer() with open(npth, ) as f: nbformat.write(nb, f) return t1 - t0
Execute the notebook at `npth` using `dpth` as the execution directory. The execution timeout and kernel are `timeout` and `kernel` respectively.
380,929
def clear(self): for track in self._tracks: self._tracks[track].setall(False)
Clear tracks in memory - all zero
380,930
def _finish_futures(self, responses): exception_args = None if len(self._target_objects) != len(responses): raise ValueError("Expected a response for every request.") for target_object, subresponse in zip(self._target_objects, responses): if not 200 <= subresponse.status_code < 300: exception_args = exception_args or subresponse elif target_object is not None: try: target_object._properties = subresponse.json() except ValueError: target_object._properties = subresponse.content if exception_args is not None: raise exceptions.from_http_response(exception_args)
Apply all the batch responses to the futures created. :type responses: list of (headers, payload) tuples. :param responses: List of headers and payloads from each response in the batch. :raises: :class:`ValueError` if no requests have been deferred.
380,931
def record(self, pipeline_name, from_study): try: return self._records[(pipeline_name, from_study)] except KeyError: found = [] for sname, pnames in groupby(sorted(self._records, key=itemgetter(1)), key=itemgetter(1)): found.append( " for ".format("".join(p for p, _ in pnames), sname)) raise ArcanaNameError( (pipeline_name, from_study), ("{} doesn{}{}; '.join(found))))
Returns the provenance record for a given pipeline Parameters ---------- pipeline_name : str The name of the pipeline that generated the record from_study : str The name of the study that the pipeline was generated from Returns ------- record : arcana.provenance.Record The provenance record generated by the specified pipeline
380,932
def rpm(self, vol_per_rev): return Stock.rpm(self, vol_per_rev, self.Q_stock()).to(u.rev/u.min)
Return the pump speed required for the reactor's stock of material given the volume of fluid output per revolution by the stock's pump. :param vol_per_rev: Volume of fluid pumped per revolution (dependent on pump and tubing) :type vol_per_rev: float :return: Pump speed for the material stock, in revolutions per minute :rtype: float
380,933
def parse(yaml, validate=True): data = read_yaml(yaml) if validate: from .validation import validate validate(data, raise_exc=True) return Config.parse(data)
Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :type yaml: list|dict|str|file :param validate: Whether to validate the data before attempting to parse it. :type validate: bool :return: Config object :rtype: valohai_yaml.objs.Config
380,934
def build_ellipse_model(shape, isolist, fill=0., high_harmonics=False): from scipy.interpolate import LSQUnivariateSpline finely_spaced_sma = np.arange(isolist[0].sma, isolist[-1].sma, 0.1) nodes = isolist.sma[2:-2] intens_array = LSQUnivariateSpline( isolist.sma, isolist.intens, nodes)(finely_spaced_sma) eps_array = LSQUnivariateSpline( isolist.sma, isolist.eps, nodes)(finely_spaced_sma) pa_array = LSQUnivariateSpline( isolist.sma, isolist.pa, nodes)(finely_spaced_sma) x0_array = LSQUnivariateSpline( isolist.sma, isolist.x0, nodes)(finely_spaced_sma) y0_array = LSQUnivariateSpline( isolist.sma, isolist.y0, nodes)(finely_spaced_sma) grad_array = LSQUnivariateSpline( isolist.sma, isolist.grad, nodes)(finely_spaced_sma) a3_array = LSQUnivariateSpline( isolist.sma, isolist.a3, nodes)(finely_spaced_sma) b3_array = LSQUnivariateSpline( isolist.sma, isolist.b3, nodes)(finely_spaced_sma) a4_array = LSQUnivariateSpline( isolist.sma, isolist.a4, nodes)(finely_spaced_sma) b4_array = LSQUnivariateSpline( isolist.sma, isolist.b4, nodes)(finely_spaced_sma) a3_array = -a3_array * grad_array * finely_spaced_sma b3_array = -b3_array * grad_array * finely_spaced_sma a4_array = -a4_array * grad_array * finely_spaced_sma b4_array = -b4_array * grad_array * finely_spaced_sma eps_array[np.where(eps_array < 0.)] = 0. result = np.zeros(shape=shape) weight = np.zeros(shape=shape) eps_array[np.where(eps_array < 0.)] = 0.05 for index in range(1, len(finely_spaced_sma)): sma0 = finely_spaced_sma[index] eps = eps_array[index] pa = pa_array[index] x0 = x0_array[index] y0 = y0_array[index] geometry = EllipseGeometry(x0, y0, sma0, eps, pa) intens = intens_array[index] r = sma0 phi = 0. while (phi <= 2*np.pi + geometry._phi_min): harm = 0. if high_harmonics: harm = (a3_array[index] * np.sin(3.*phi) + b3_array[index] * np.cos(3.*phi) + a4_array[index] * np.sin(4.*phi) + b4_array[index] * np.cos(4.*phi) / 4.) x = r * np.cos(phi + pa) + x0 y = r * np.sin(phi + pa) + y0 i = int(x) j = int(y) if (i > 0 and i < shape[1] - 1 and j > 0 and j < shape[0] - 1): fx = x - float(i) fy = y - float(j) result[j, i] += (intens + harm) * (1. - fy) * (1. - fx) result[j, i + 1] += (intens + harm) * (1. - fy) * fx result[j + 1, i] += (intens + harm) * fy * (1. - fx) result[j + 1, i + 1] += (intens + harm) * fy * fx weight[j, i] += (1. - fy) * (1. - fx) weight[j, i + 1] += (1. - fy) * fx weight[j + 1, i] += fy * (1. - fx) weight[j + 1, i + 1] += fy * fx phi = max((phi + 0.75 / r), geometry._phi_min) r = geometry.radius(phi) weight[np.where(weight <= 0.)] = 1. result /= weight result[np.where(result == 0.)] = fill return result
Build an elliptical model galaxy image from a list of isophotes. For each ellipse in the input isophote list the algorithm fills the output image array with the corresponding isophotal intensity. Pixels in the output array are in general only partially covered by the isophote "pixel". The algorithm takes care of this partial pixel coverage by keeping track of how much intensity was added to each pixel by storing the partial area information in an auxiliary array. The information in this array is then used to normalize the pixel intensities. Parameters ---------- shape : 2-tuple The (ny, nx) shape of the array used to generate the input ``isolist``. isolist : `~photutils.isophote.IsophoteList` instance The isophote list created by the `~photutils.isophote.Ellipse` class. fill : float, optional The constant value to fill empty pixels. If an output pixel has no contribution from any isophote, it will be assigned this value. The default is 0. high_harmonics : bool, optional Whether to add the higher-order harmonics (i.e. ``a3``, ``b3``, ``a4``, and ``b4``; see `~photutils.isophote.Isophote` for details) to the result. Returns ------- result : 2D `~numpy.ndarray` The image with the model galaxy.
380,935
def run_strelka(job, tumor_bam, normal_bam, univ_options, strelka_options, split=True): if strelka_options[]: chromosomes = strelka_options[] else: chromosomes = sample_chromosomes(job, strelka_options[]) num_cores = min(len(chromosomes), univ_options[]) strelka = job.wrapJobFn(run_strelka_full, tumor_bam, normal_bam, univ_options, strelka_options, disk=PromisedRequirement(strelka_disk, tumor_bam[], normal_bam[], strelka_options[]), memory=, cores=num_cores) job.addChild(strelka) if split: unmerge_strelka = job.wrapJobFn(wrap_unmerge, strelka.rv(), chromosomes, strelka_options, univ_options).encapsulate() strelka.addChild(unmerge_strelka) return unmerge_strelka.rv() else: return strelka.rv()
Run the strelka subgraph on the DNA bams. Optionally split the results into per-chromosome vcfs. :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq :param dict normal_bam: Dict of bam and bai for normal DNA-Seq :param dict univ_options: Dict of universal options used by almost all tools :param dict strelka_options: Options specific to strelka :param bool split: Should the results be split into perchrom vcfs? :return: Either the fsID to the genome-level vcf or a dict of results from running strelka on every chromosome perchrom_strelka: |- 'chr1': | |-'snvs': fsID | +-'indels': fsID |- 'chr2': | |-'snvs': fsID | +-'indels': fsID |-... | +- 'chrM': |-'snvs': fsID +-'indels': fsID :rtype: toil.fileStore.FileID|dict
380,936
def type_stmt(self, stmt, p_elem, pset): typedef = stmt.i_typedef if typedef and not stmt.i_is_derived: uname, dic = self.unique_def_name(typedef) if uname not in dic: self.install_def(uname, typedef, dic) SchemaNode("ref", p_elem).set_attr("name", uname) defst = typedef.search_one("default") if defst: dic[uname].default = defst.arg occur = 1 else: occur = dic[uname].occur if occur > 0: self.propagate_occur(p_elem, occur) return chain = [stmt] tdefault = None while typedef: type_ = typedef.search_one("type") chain.insert(0, type_) if tdefault is None: tdef = typedef.search_one("default") if tdef: tdefault = tdef.arg typedef = type_.i_typedef if tdefault and p_elem.occur == 0: p_elem.default = tdefault self.propagate_occur(p_elem, 1) self.type_handler[chain[0].arg](chain, p_elem)
Handle ``type`` statement. Built-in types are handled by one of the specific type callback methods defined below.
380,937
def daily_from_hourly(df): df_daily = pd.DataFrame() if in df: df_daily[] = df.temp.resample().mean() df_daily[] = df.temp.groupby(df.temp.index.date).min() df_daily[] = df.temp.groupby(df.temp.index.date).max() if in df: df_daily[] = df.precip.resample().sum() if in df: df_daily[] = df.glob.resample().mean() if in df: df_daily[] = df.hum.resample().mean() if in df: df_daily[] = df.hum.groupby(df.hum.index.date).min() if in df: df_daily[] = df.hum.groupby(df.hum.index.date).max() if in df: df_daily[] = df.wind.resample().mean() if in df: df_daily[] = df.ssd.resample().sum() / 60 df_daily.index.name = None return df_daily
Aggregates data (hourly to daily values) according to the characteristics of each variable (e.g., average for temperature, sum for precipitation) Args: df: dataframe including time series with one hour time steps Returns: dataframe (daily)
380,938
def _create_create_tracking_event(instance): event = _create_event(instance, CREATE) for field in instance._tracked_fields: if not isinstance(instance._meta.get_field(field), ManyToManyField): _create_tracked_field(event, instance, field)
Create a TrackingEvent and TrackedFieldModification for a CREATE event.
380,939
def token_generator(self, texts, **kwargs): for text_idx, text in enumerate(texts): if self.lower: text = text.lower() for char in text: yield text_idx, char
Yields tokens from texts as `(text_idx, character)`
380,940
def stringize( self, rnf_profile, ): coor_width = max(rnf_profile.coor_width, len(str(self.left)), len(str(self.right))) return "({},{},{},{},{})".format( str(self.genome_id).zfill(rnf_profile.genome_id_width), str(self.chr_id).zfill(rnf_profile.chr_id_width), self.direction, str(self.left).zfill(coor_width), str(self.right).zfill(coor_width) )
Create RNF representation of this segment. Args: rnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths).
380,941
def _initial_guess(self, countsmat): if self.theta_ is not None: return self.theta_ if self.guess == : transmat, pi = _transmat_mle_prinz(countsmat) K = np.real(scipy.linalg.logm(transmat)) / self.lag_time elif self.guess == : transmat, pi = _transmat_mle_prinz(countsmat) K = (transmat - np.eye(self.n_states_)) / self.lag_time elif isinstance(self.guess, np.ndarray): pi = _solve_ratemat_eigensystem(self.guess)[1][:, 0] K = self.guess S = np.multiply(np.sqrt(np.outer(pi, 1/pi)), K) sflat = np.maximum(S[np.triu_indices_from(countsmat, k=1)], 0) theta0 = np.concatenate((sflat, np.log(pi))) return theta0
Generate an initial guess for \theta.
380,942
def get_stp_mst_detail_output_msti_port_transmitted_stp_type(self, **kwargs): config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop() port = ET.SubElement(msti, "port") transmitted_stp_type = ET.SubElement(port, "transmitted-stp-type") transmitted_stp_type.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
380,943
def number_peaks(self, x, n=None): if n is None: n = 5 peaks = feature_calculators.number_peaks(x, n) logging.debug("agg linear trend by tsfresh calculated") return peaks
As in tsfresh `number_peaks <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\ feature_calculators.py#L1003>`_ Calculates the number of peaks of at least support n in the time series x. A peak of support n is defined \ as a subsequence of x where a value occurs, which is bigger than its n neighbours to the left and to the right. Hence in the sequence >>> x = [3, 0, 0, 4, 0, 0, 13] 4 is a peak of support 1 and 2 because in the subsequences >>> [0, 4, 0] >>> [0, 0, 4, 0, 0] 4 is still the highest value. Here, 4 is not a peak of support 3 because 13 is the 3th neighbour to the \ right of 4 and its bigger than 4. :param x: the time series to calculate the feature of :type x: pandas.Series :param n: the support of the peak :type n: int :return: the value of this feature :rtype: float
380,944
def file(cls, path, encoding=None, parser=None): cls.__hierarchy.append(file.File(path, encoding, parser))
Set a file as a source. File are parsed as literal python dicts by default, this behaviour can be configured. Args: path: The path to the file to be parsed encoding: The encoding of the file. Defaults to 'raw'. Available built-in values: 'ini', 'json', 'yaml'. Custom value can be used in conjunction with parser. parser: A parser function for a custom encoder. It is expected to return a dict containing the parsed values when called with the contents of the file as an argument.
380,945
def make_sshable(c): user = c.travis.sudo.user home = "~{0}".format(user) c.sudo("cp {0}/{{id_rsa.pub,authorized_keys}}".format(ssh_dir))
Set up passwordless SSH keypair & authorized_hosts access to localhost.
380,946
def flush(self, objects=None, batch_size=None, **kwargs): batch_size = batch_size or self.config.get() if objects: from_store = False else: from_store = True objects = self.itervalues() objects = sorted(objects, key=lambda x: x[]) batch, _ids = [], [] try: self.store.pop(_id) except KeyError: logger.warn( "failed to pop {} from self.store!".format(_id)) return sorted(_ids)
flush objects stored in self.container or those passed in
380,947
def registration_options(self): registration_options = {} rs = self.registration_model() for k, v in self.__dict__.items(): if k not in DEFAULT_BASE_FIELDS + []: try: getattr(rs, k) registration_options.update({k: v}) except AttributeError: pass registration_identifier = registration_options.get( ) if registration_identifier: registration_options[] = self.to_string( registration_identifier) return registration_options
Gathers values for common attributes between the registration model and this instance.
380,948
def point_dist(pt1, pt2): from scipy import linalg as spla dist = spla.norm(point_displ(pt1, pt2)) return dist
Calculate the Euclidean distance between two n-D points. |pt1 - pt2| .. todo:: Complete point_dist docstring
380,949
def teardown_logical_port_connectivity(self, context, port_db, hosting_device_id): if port_db is None or port_db.get() is None: LOG.warning("Port id is None! Cannot remove port " "from hosting_device:%s", hosting_device_id) return hosting_port_id = port_db.hosting_info.hosting_port.id try: self._dev_mgr.svc_vm_mgr.interface_detach(hosting_device_id, hosting_port_id) self._gt_pool.spawn_n(self._cleanup_hosting_port, context, hosting_port_id) LOG.debug("Teardown logicalport completed for port:%s", port_db.id) except Exception as e: LOG.error("Failed to detach interface corresponding to port:" "%(p_id)s on hosting device:%(hd_id)s due to " "error %(error)s", {: hosting_port_id, : hosting_device_id, : str(e)})
Removes connectivity for a logical port. Unplugs the corresponding data interface from the VM.
380,950
def _gcs_get_key_names(bucket, pattern): return [obj.metadata.name for obj in _gcs_get_keys(bucket, pattern)]
Get names of all Google Cloud Storage keys in a specified bucket that match a pattern.
380,951
def close(self): if self._con is not None: self._pool.cache(self._con) self._con = None
Close the pooled connection.
380,952
def getratio(self, code) : if len(code) == 0 : return 0 code_replaced = self.prog.sub(, code) return (len(code) - len(code_replaced)) / len(code)
Get ratio of code and pattern matched
380,953
def convert(cls, **kwargsql): filters = [] for k, v in kwargsql.items(): terms = k.split() if terms[-1] in cls.KWARGQL_SUPPORTED_MONGO_OPS: v = { + terms[-1]: v } if terms[-1] == : v[] = bool(v[]) terms = terms[:-1] elif terms[-1] in cls.KWARGSQL_REGEX_OPS: config = cls.KWARGSQL_REGEX_OPS[terms[-1]] pattern = .format( prefix=config.get(, ), pattern=re.escape(v), suffix=config.get(, ) ) v = { : pattern, : config.get(, ), } terms = terms[:-1] k = .join(terms) filters.append({k: v}) if len(filters) == 0: return {} if len(filters) == 1: return filters[0] else: return { : filters }
:param dict kwargsql: Kwargsql expression to convert :return: filter to be used in :py:method:`pymongo.collection.find` :rtype: dict
380,954
def load(cls, config: Optional[Config] = None): if cls._dfk is not None: raise RuntimeError() if config is None: cls._dfk = DataFlowKernel(Config()) else: cls._dfk = DataFlowKernel(config) return cls._dfk
Load a DataFlowKernel. Args: - config (Config) : Configuration to load. This config will be passed to a new DataFlowKernel instantiation which will be set as the active DataFlowKernel. Returns: - DataFlowKernel : The loaded DataFlowKernel object.
380,955
def switch_training(self, flag): if self._is_training == flag: return self._is_training = flag if flag: self._training_flag.set_value(1) else: self._training_flag.set_value(0)
Switch training mode. :param flag: switch on training mode when flag is True.
380,956
def export_coreml(self, filename): import coremltools def _create_vision_feature_print_scene(): prob_name = self.target + top_spec = coremltools.proto.Model_pb2.Model() top_spec.specificationVersion = 3 desc = top_spec.description desc.output.add().name = prob_name desc.output.add().name = self.target desc.predictedFeatureName = self.target desc.predictedProbabilitiesName = prob_name input = desc.input.add() input.name = self.feature input.type.imageType.width = 299 input.type.imageType.height = 299 BGR_VALUE = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value() input.type.imageType.colorSpace = BGR_VALUE pipelineClassifier = top_spec.pipelineClassifier scene_print = pipelineClassifier.pipeline.models.add() scene_print.specificationVersion = 3 scene_print.visionFeaturePrint.scene.version = 1 input = scene_print.description.input.add() input.name = self.feature input.type.imageType.width = 299 input.type.imageType.height = 299 input.type.imageType.colorSpace = BGR_VALUE output = scene_print.description.output.add() output.name = "output_name" DOUBLE_ARRAY_VALUE = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value() output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE output.type.multiArrayType.shape.append(2048) temp = top_spec.pipelineClassifier.pipeline.models.add() temp.specificationVersion = 3 nn_spec = temp.neuralNetworkClassifier feature_layer = nn_spec.layers.add() feature_layer.name = "feature_layer" feature_layer.input.append("output_name") feature_layer.output.append("softmax_input") fc_layer_params = feature_layer.innerProduct fc_layer_params.inputChannels = 2048 softmax = nn_spec.layers.add() softmax.name = "softmax" softmax.softmax.MergeFromString(b) softmax.input.append("softmax_input") softmax.output.append(prob_name) input = temp.description.input.add() input.name = "output_name" input.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE input.type.multiArrayType.shape.append(2048) desc = temp.description prob_output = desc.output.add() prob_output.name = prob_name label_output = desc.output.add() label_output.name = self.target if type(self.classifier.classes[0]) == int: prob_output.type.dictionaryType.int64KeyType.MergeFromString(b) label_output.type.int64Type.MergeFromString(b) else: prob_output.type.dictionaryType.stringKeyType.MergeFromString(b) label_output.type.stringType.MergeFromString(b) temp.description.predictedFeatureName = self.target temp.description.predictedProbabilitiesName = prob_name return top_spec def _update_last_two_layers(nn_spec): num_classes = self.num_classes fc_layer = nn_spec.layers[-2] fc_layer_params = fc_layer.innerProduct fc_layer_params.outputChannels = self.classifier.num_classes inputChannels = fc_layer_params.inputChannels fc_layer_params.hasBias = True coefs = self.classifier.coefficients weights = fc_layer_params.weights bias = fc_layer_params.bias del weights.floatValue[:] del bias.floatValue[:] import numpy as np W = np.array(coefs[coefs[] != None][], ndmin = 2).reshape( inputChannels, num_classes - 1, order = ) b = coefs[coefs[] == None][] Wa = np.hstack((np.zeros((inputChannels, 1)), W)) weights.floatValue.extend(Wa.flatten(order = )) bias.floatValue.extend([0.0] + list(b)) def _set_inputs_outputs_and_metadata(spec, nn_spec): class_labels = self.classifier.classes probOutput = spec.description.output[0] classLabel = spec.description.output[1] probOutput.type.dictionaryType.MergeFromString(b) if type(class_labels[0]) == int: nn_spec.ClearField() probOutput.type.dictionaryType.int64KeyType.MergeFromString(b) classLabel.type.int64Type.MergeFromString(b) del nn_spec.int64ClassLabels.vector[:] for c in class_labels: nn_spec.int64ClassLabels.vector.append(c) else: nn_spec.ClearField() probOutput.type.dictionaryType.stringKeyType.MergeFromString(b) classLabel.type.stringType.MergeFromString(b) del nn_spec.stringClassLabels.vector[:] for c in class_labels: nn_spec.stringClassLabels.vector.append(c) prob_name = self.target + label_name = self.target old_output_name = nn_spec.layers[-1].name coremltools.models.utils.rename_feature(spec, , label_name) coremltools.models.utils.rename_feature(spec, old_output_name, prob_name) if nn_spec.layers[-1].name == old_output_name: nn_spec.layers[-1].name = prob_name if nn_spec.labelProbabilityLayerName == old_output_name: nn_spec.labelProbabilityLayerName = prob_name coremltools.models.utils.rename_feature(spec, , self.feature) if len(nn_spec.preprocessing) > 0: nn_spec.preprocessing[0].featureName = self.feature mlmodel = coremltools.models.MLModel(spec) model_type = % self.model mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type) mlmodel.input_description[self.feature] = u mlmodel.output_description[prob_name] = mlmodel.output_description[label_name] = _coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, { : self.model, : self.target, : self.feature, : str(self.max_iterations), }, version=ImageClassifier._PYTHON_IMAGE_CLASSIFIER_VERSION) return mlmodel if self.model in _pre_trained_models.MODELS: ptModel = _pre_trained_models.MODELS[self.model]() feature_extractor = _image_feature_extractor.MXFeatureExtractor(ptModel) coreml_model = feature_extractor.get_coreml_model() spec = coreml_model.get_spec() nn_spec = spec.neuralNetworkClassifier else: spec = _create_vision_feature_print_scene() nn_spec = spec.pipelineClassifier.pipeline.models[1].neuralNetworkClassifier _update_last_two_layers(nn_spec) mlmodel = _set_inputs_outputs_and_metadata(spec, nn_spec) mlmodel.save(filename)
Save the model in Core ML format. See Also -------- save Examples -------- >>> model.export_coreml('myModel.mlmodel')
380,957
def _check_unpack_options(extensions, function, extra_args): existing_extensions = {} for name, info in _UNPACK_FORMATS.items(): for ext in info[0]: existing_extensions[ext] = name for extension in extensions: if extension in existing_extensions: msg = raise RegistryError(msg % (extension, existing_extensions[extension])) if not isinstance(function, collections.Callable): raise TypeError()
Checks what gets registered as an unpacker.
380,958
def get_sites(self, filter_func=lambda x: True): response = self._session.get(BASE_URL_TSQUARE + ) response.raise_for_status() site_list = response.json()[] if not site_list: if filter_func(t_site): result_list.append(t_site) return result_list
Returns a list of TSquareSite objects that represent the sites available to a user. @param filter_func - A function taking in a Site object as a parameter that returns a True or False, depending on whether or not that site should be returned by this function. Filter_func should be used to create filters on the list of sites (i.e. user's preferences on what sites to display by default). If not specified, no filter is applied. @returns - A list of TSquareSite objects encapsulating t-square's JSON response.
380,959
def _handle_struct_ref(self, node, scope, ctxt, stream): self._dlog("handling struct ref") struct = self._handle_node(node.name, scope, ctxt, stream) try: sub_field = getattr(struct, node.field.name) except AttributeError as e: if isinstance(struct, fields.Array) and struct.implicit: last_item = struct[-1] sub_field = getattr(last_item, node.field.name) else: raise return sub_field
TODO: Docstring for _handle_struct_ref. :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO
380,960
def delete_api_model(restApiId, modelName, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_model(restApiId=restApiId, modelName=modelName) return {: True} except ClientError as e: return {: False, : __utils__[](e)}
Delete a model identified by name in a given API CLI Example: .. code-block:: bash salt myminion boto_apigateway.delete_api_model restApiId modelName
380,961
def quote_value(value): try: if value == None: return SQL_NULL elif isinstance(value, SQL): return quote_sql(value.template, value.param) elif is_text(value): return SQL("") elif is_data(value): return quote_value(json_encode(value)) elif is_number(value): return SQL(text_type(value)) elif isinstance(value, datetime): return SQL("str_to_date(, )") elif isinstance(value, Date): return SQL("str_to_date(, )") elif hasattr(value, ): return quote_value(json_encode(value)) else: return quote_value(text_type(value)) except Exception as e: Log.error("problem quoting SQL {{value}}", value=repr(value), cause=e)
convert values to mysql code for the same mostly delegate directly to the mysql lib, but some exceptions exist
380,962
def _watch(inotify, watchers, watch_flags, s3_uploader): executor = futures.ThreadPoolExecutor(max_workers=1) last_pass_done = False stop_file_exists = False while not last_pass_done: for event in inotify.read(timeout=1000): for flag in inotify_simple.flags.from_mask(event.mask): if flag is inotify_simple.flags.ISDIR and inotify_simple.flags.CREATE & event.mask: path = os.path.join(intermediate_path, watchers[event.wd], event.name) for folder, dirs, files in os.walk(path): wd = inotify.add_watch(folder, watch_flags) relative_path = os.path.relpath(folder, intermediate_path) watchers[wd] = relative_path tmp_sub_folder = os.path.join(tmp_dir_path, relative_path) if not os.path.exists(tmp_sub_folder): os.makedirs(tmp_sub_folder) for file in files: _copy_file(executor, s3_uploader, relative_path, file) elif flag is inotify_simple.flags.CLOSE_WRITE: _copy_file(executor, s3_uploader, watchers[event.wd], event.name) last_pass_done = stop_file_exists stop_file_exists = os.path.exists(success_file_path) or os.path.exists(failure_file_path) executor.shutdown(wait=True)
As soon as a user is done with a file under `/opt/ml/output/intermediate` we would get notified by using inotify. We would copy this file under `/opt/ml/output/intermediate/.tmp.sagemaker_s3_sync` folder preserving the same folder structure to prevent it from being further modified. As we copy the file we would add timestamp with microseconds precision to avoid modification during s3 upload. After that we copy the file to s3 in a separate Thread. We keep the queue of the files we need to move as FIFO.
380,963
def ssh_invite(ctx, code_length, user, **kwargs): for name, value in kwargs.items(): setattr(ctx.obj, name, value) from . import cmd_ssh ctx.obj.code_length = code_length ctx.obj.ssh_user = user return go(cmd_ssh.invite, ctx.obj)
Add a public-key to a ~/.ssh/authorized_keys file
380,964
def identity_to_string(identity_dict): result = [] if identity_dict.get(): result.append(identity_dict[] + ) if identity_dict.get(): result.append(identity_dict[] + ) result.append(identity_dict[]) if identity_dict.get(): result.append( + identity_dict[]) if identity_dict.get(): result.append(identity_dict[]) log.debug(, result) return .join(result)
Dump Identity dictionary into its string representation.
380,965
def add_item(self, key, value, cache_name=None): cache_name = cache_name or value = % (cache_name, key, value) self._set(, value.strip(), multi=True) return self._section
Add an item into the given cache. This is a commodity option (mainly useful for testing) allowing you to store an item in a uWSGI cache during startup. :param str|unicode key: :param value: :param str|unicode cache_name: If not set, default will be used.
380,966
def contract_multiplier(self): try: return self.__dict__["contract_multiplier"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute ".format(self.order_book_id) )
[float] 合约乘数,例如沪深300股指期货的乘数为300.0(期货专用)
380,967
def get_params(self, deep=False): params = super(XGBModel, self).get_params(deep=deep) if isinstance(self.kwargs, dict): params.update(self.kwargs) if params[] is np.nan: params[] = None return params
Get parameters.
380,968
def create(self, name, script, params=None): params = update_params(params, { : name, : script }) return self.request(, params, )
/v1/startupscript/create POST - account Create a startup script Link: https://www.vultr.com/api/#startupscript_create
380,969
def thaw_from_args(parser): parser.add_argument(, dest=, help=) parser.add_argument(, dest=, help=)
Adds command line options for things related to inline thawing of icefiles
380,970
def get_language(): global sensor_graph, statement if sensor_graph is not None: return sensor_graph _create_primitives() _create_simple_statements() _create_block_bnf() sensor_graph = ZeroOrMore(statement) + StringEnd() sensor_graph.ignore(comment) return sensor_graph
Create or retrieve the parse tree for defining a sensor graph.
380,971
def invertible_1x1_conv(name, x, reverse=False): _, height, width, channels = common_layers.shape_list(x) w_shape = [channels, channels] random_matrix = np.random.rand(channels, channels) np_w = scipy.linalg.qr(random_matrix)[0].astype("float32") np_p, np_l, np_u = scipy.linalg.lu(np_w) np_s = np.diag(np_u) np_sign_s = np.sign(np_s) np_log_s = np.log(np.abs(np_s)) np_u = np.triu(np_u, k=1) with tf.variable_scope(name, reuse=tf.AUTO_REUSE): p = tf.get_variable("P", initializer=np_p, trainable=False) l = tf.get_variable("L", initializer=np_l) sign_s = tf.get_variable( "sign_S", initializer=np_sign_s, trainable=False) log_s = tf.get_variable("log_S", initializer=np_log_s) u = tf.get_variable("U", initializer=np_u) l_mask = np.tril(np.ones([channels, channels], dtype=np.float32), -1) l = l * l_mask + tf.eye(channels, channels) u = u * np.transpose(l_mask) + tf.diag(sign_s * tf.exp(log_s)) w = tf.matmul(p, tf.matmul(l, u)) objective = tf.reduce_sum(log_s) * tf.cast(height * width, log_s.dtype) if not reverse: w = tf.reshape(w, [1, 1] + w_shape) x = tf.nn.conv2d(x, w, [1, 1, 1, 1], "SAME", data_format="NHWC") else: def tpu_inv(m): q, r = tf.linalg.qr(m) return tf.linalg.triangular_solve(r, tf.transpose(q), lower=False) w_inv = tf.reshape(tpu_inv(w), [1, 1]+w_shape) x = tf.nn.conv2d( x, w_inv, [1, 1, 1, 1], "SAME", data_format="NHWC") objective *= -1 return x, objective
1X1 convolution on x. The 1X1 convolution is parametrized as P*L*(U + sign(s)*exp(log(s))) where 1. P is a permutation matrix. 2. L is a lower triangular matrix with diagonal entries unity. 3. U is a upper triangular matrix where the diagonal entries zero. 4. s is a vector. sign(s) and P are fixed and the remaining are optimized. P, L, U and s are initialized by the PLU decomposition of a random rotation matrix. Args: name: scope x: Input Tensor. reverse: whether the pass is from z -> x or x -> z. Returns: x_conv: x after a 1X1 convolution is applied on x. objective: sum(log(s))
380,972
def _handle_start_center(self, attrs): center_lat = attrs.get("lat") center_lon = attrs.get("lon") if center_lat is None or center_lon is None: raise ValueError("Unable to get lat or lon of way center.") self._curr["center_lat"] = Decimal(center_lat) self._curr["center_lon"] = Decimal(center_lon)
Handle opening center element :param attrs: Attributes of the element :type attrs: Dict
380,973
def consolidate_tarballs_job(job, fname_to_id): work_dir = job.fileStore.getLocalTempDir() tar_paths = [] for fname, file_store_id in fname_to_id.iteritems(): p = job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, fname + )) tar_paths.append((p, fname)) output_name = out_tar = os.path.join(work_dir, output_name) with tarfile.open(os.path.join(work_dir, out_tar), ) as f_out: for tar, fname in tar_paths: with tarfile.open(tar, ) as f_in: for tarinfo in f_in: with closing(f_in.extractfile(tarinfo)) as f_in_file: tarinfo.name = os.path.join(output_name, fname, os.path.basename(tarinfo.name)) f_out.addfile(tarinfo, fileobj=f_in_file) return job.fileStore.writeGlobalFile(out_tar)
Combine the contents of separate tarballs into one. Subdirs within the tarball will be named the keys in **fname_to_id :param JobFunctionWrappingJob job: passed automatically by Toil :param dict[str,str] fname_to_id: Dictionary of the form: file-name-prefix=FileStoreID :return: The file store ID of the generated tarball :rtype: str
380,974
def colless(self, normalize=): t_res = copy(self); t_res.resolve_polytomies(); leaves_below = dict(); n = 0; I = 0 for node in t_res.traverse_postorder(): if node.is_leaf(): leaves_below[node] = 1; n += 1 else: cl,cr = node.children; nl = leaves_below[cl]; nr = leaves_below[cr] leaves_below[node] = nl+nr; I += abs(nl-nr) if normalize is None or normalize is False: return I elif not isinstance(normalize,str): raise TypeError("normalize must be None or a string") normalize = normalize.lower() if normalize == : return (2.*I)/((n-1)*(n-2)) elif normalize == : return (I - n*log(n) - n*(EULER_GAMMA-1-log(2)))/n elif normalize == : return I/(n**1.5) else: raise RuntimeError("normalize must be None, , , or ")
Compute the Colless balance index of this ``Tree``. If the tree has polytomies, they will be randomly resolved Args: ``normalize`` (``str``): How to normalize the Colless index (if at all) * ``None`` to not normalize * ``"leaves"`` to normalize by the number of leaves * ``"yule"`` to normalize to the Yule model * ``"pda"`` to normalize to the Proportional to Distinguishable Arrangements model Returns: ``float``: Colless index (either normalized or not)
380,975
def __check_success(resp): if "success" not in resp.keys(): try: raise APIError(, , resp["error"]) except KeyError: raise APIError(, , str(resp)) return resp["success"]
Check a JSON server response to see if it was successful :type resp: Dictionary (parsed JSON from response) :param resp: the response string :rtype: String :returns: the success message, if it exists :raises: APIError if the success message is not present
380,976
def vm_netstats(vm_=None, **kwargs): your-vmrx_bytesrx_packetsrx_errsrx_droptx_bytestx_packetstx_errstx_drop* def _info(dom): nics = _get_nics(dom) ret = { : 0, : 0, : 0, : 0, : 0, : 0, : 0, : 0 } for attrs in six.itervalues(nics): if in attrs: dev = attrs[] stats = dom.interfaceStats(dev) ret[] += stats[0] ret[] += stats[1] ret[] += stats[2] ret[] += stats[3] ret[] += stats[4] ret[] += stats[5] ret[] += stats[6] ret[] += stats[7] return ret info = {} conn = __get_conn(**kwargs) if vm_: info[vm_] = _info(_get_domain(conn, vm_)) else: for domain in _get_domain(conn, iterable=True): info[domain.name()] = _info(domain) conn.close() return info
Return combined network counters used by the vms on this hyper in a list of dicts: :param vm_: domain name :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 .. code-block:: python [ 'your-vm': { 'rx_bytes' : 0, 'rx_packets' : 0, 'rx_errs' : 0, 'rx_drop' : 0, 'tx_bytes' : 0, 'tx_packets' : 0, 'tx_errs' : 0, 'tx_drop' : 0 }, ... ] If you pass a VM name in as an argument then it will return info for just the named VM, otherwise it will return all VMs. CLI Example: .. code-block:: bash salt '*' virt.vm_netstats
380,977
def reMutualReceptions(self, idA, idB): mr = self.mutualReceptions(idA, idB) filter_ = [, ] return [(a,b) for (a,b) in mr if (a in filter_ and b in filter_)]
Returns ruler and exaltation mutual receptions.
380,978
def getTargetNamespace(self): parent = self targetNamespace = tns = self.attributes.get(targetNamespace) while not tns and parent and parent._parent is not None: parent = parent._parent() tns = parent.attributes.get(targetNamespace) return tns or
return targetNamespace
380,979
def obspy_3d_plot(inventory, catalog, size=(10.5, 7.5), **kwargs): nodes = [] for ev in catalog: nodes.append((ev.preferred_origin().latitude, ev.preferred_origin().longitude, ev.preferred_origin().depth / 1000)) all_stas = [] for net in inventory: for sta in net: if len(sta.channels) > 0: all_stas.append((sta.latitude, sta.longitude, sta.elevation / 1000 - sta.channels[0].depth / 1000)) else: warnings.warn( ) all_stas.append((sta.latitude, sta.longitude, sta.elevation / 1000)) fig = threeD_seismplot( stations=all_stas, nodes=nodes, size=size, **kwargs) return fig
Plot obspy Inventory and obspy Catalog classes in three dimensions. :type inventory: obspy.core.inventory.inventory.Inventory :param inventory: Obspy inventory class containing station metadata :type catalog: obspy.core.event.catalog.Catalog :param catalog: Obspy catalog class containing event metadata :type save: bool :param save: False will plot to screen, true will save plot and not show \ to screen. :type savefile: str :param savefile: Filename to save to, required for save=True :type size: tuple :param size: Size of figure in inches. :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example: >>> from obspy.clients.fdsn import Client >>> from obspy import UTCDateTime >>> from eqcorrscan.utils.plotting import obspy_3d_plot >>> client = Client('IRIS') >>> t1 = UTCDateTime(2012, 3, 26) >>> t2 = t1 + 86400 >>> catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43, ... longitude=170, maxradius=5) >>> inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43, ... longitude=170, maxradius=10) >>> obspy_3d_plot(inventory=inventory, catalog=catalog) # doctest: +SKIP .. plot:: from obspy.clients.fdsn import Client from obspy import UTCDateTime from eqcorrscan.utils.plotting import obspy_3d_plot client = Client('IRIS') t1 = UTCDateTime(2012, 3, 26) t2 = t1 + 86400 catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43, longitude=170, maxradius=5) inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43, longitude=170, maxradius=10) obspy_3d_plot(inventory=inventory, catalog=catalog)
380,980
def save_svg(string, parent=None): if isinstance(string, unicode): string = string.encode() dialog = QtGui.QFileDialog(parent, ) dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave) dialog.setDefaultSuffix() dialog.setNameFilter() if dialog.exec_(): filename = dialog.selectedFiles()[0] f = open(filename, ) try: f.write(string) finally: f.close() return filename return None
Prompts the user to save an SVG document to disk. Parameters: ----------- string : basestring A Python string containing a SVG document. parent : QWidget, optional The parent to use for the file dialog. Returns: -------- The name of the file to which the document was saved, or None if the save was cancelled.
380,981
def dict_to_numpy_dict(obj_dict): return {key: np.asarray(value) if value is not None else None for key, value in obj_dict.items()}
Convert a dictionary of lists into a dictionary of numpy arrays
380,982
def all_files_exist(file_list): all_exist = True for filename in file_list: all_exist = all_exist and os.path.isfile(filename) return all_exist
Check if all files exist. :param file_list: the names of files to check. :type file_list: list :returns: ``True`` if all files exist, ``False`` otherwise.
380,983
def format_usage_masks(self, V_usage_mask_in, J_usage_mask_in, print_warnings = True): if V_usage_mask_in is None: V_usage_mask = self.d_V_usage_mask elif isinstance(V_usage_mask_in, list): e_V_usage_mask = set() for v in V_usage_mask_in: try: e_V_usage_mask = e_V_usage_mask.union(self.V_mask_mapping[v]) except KeyError: if print_warnings: print + v pass if len(e_V_usage_mask) == 0: if print_warnings: print V_usage_mask = self.d_V_usage_mask else: V_usage_mask = list(e_V_usage_mask) else: try: V_usage_mask = self.V_mask_mapping[V_usage_mask_in] except KeyError: if print_warnings: print + str(V_usage_mask_in) + V_usage_mask = self.d_V_usage_mask if J_usage_mask_in is None: J_usage_mask = self.d_J_usage_mask elif isinstance(J_usage_mask_in, list): e_J_usage_mask = set() for j in J_usage_mask_in: try: e_J_usage_mask = e_J_usage_mask.union(self.J_mask_mapping[j]) except KeyError: if print_warnings: print + j pass if len(e_J_usage_mask) == 0: if print_warnings: print J_usage_mask = self.d_J_usage_mask else: J_usage_mask = list(e_J_usage_mask) else: try: J_usage_mask = self.J_mask_mapping[J_usage_mask_in] except KeyError: if print_warnings: print + str(J_usage_mask_in) + J_usage_mask = self.d_J_usage_mask return V_usage_mask, J_usage_mask
Format raw usage masks into lists of indices. Usage masks allows the Pgen computation to be conditioned on the V and J gene/allele identities. The inputted masks are lists of strings, or a single string, of the names of the genes or alleles to be conditioned on. The default mask includes all productive V or J genes. Parameters ---------- V_usage_mask_in : str or list An object to indicate which V alleles should be considered. The default input is None which returns the list of all productive V alleles. J_usage_mask_in : str or list An object to indicate which J alleles should be considered. The default input is None which returns the list of all productive J alleles. print_warnings : bool Determines whether warnings are printed or not. Default ON. Returns ------- V_usage_mask : list of integers Indices of the V alleles to be considered in the Pgen computation J_usage_mask : list of integers Indices of the J alleles to be considered in the Pgen computation Examples -------- >>> generation_probability.format_usage_masks('TRBV27*01','TRBJ1-1*01') ([34], [0]) >>> generation_probability.format_usage_masks('TRBV27*01', '') ([34], [0, 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13]) >>> generation_probability.format_usage_masks(['TRBV27*01', 'TRBV13*01'], 'TRBJ1-1*01') ([34, 18], [0])
380,984
def simplex_find_cycle(self): nl = self.get_node_list() q = [nl[0]] visited = [] depth = {nl[0]:0} pred = {nl[0]:None} for n in nl: self.get_node(n).set_attr(, None) component_nr = int(nl[0]) self.get_node(nl[0]).set_attr(, component_nr) while True: while q: current = q.pop() visited.append(current) neighbors = self.in_neighbors[current] +\ self.neighbors[current] for n in neighbors: if n==pred[current]: continue self.get_node(n).set_attr(, component_nr) if n in depth: cycle1 = [] cycle2 = [] temp = n while temp is not None: cycle1.append(temp) temp = pred[temp] temp = current while temp is not None: cycle2.append(temp) temp = pred[temp] cycle1.pop() cycle1.reverse() cycle2.extend(cycle1) return cycle2 else: pred[n] = current depth[n] = depth[current] + 1 if n not in visited: q.append(n) flag = False for n in nl: if self.get_node(n).get_attr() is None: q.append(n) depth = {n:0} pred = {n:None} visited = [] component_nr = int(n) self.get_node(n).set_attr(, component_nr) flag = True break if not flag: break return None
API: simplex_find_cycle(self) Description: Returns a cycle (list of nodes) if the graph has one, returns None otherwise. Uses DFS. During DFS checks existence of arcs to lower depth regions. Note that direction of the arcs are not important. Return: Returns list of nodes that represents cycle. Returns None if the graph does not have any cycle.
380,985
def p_ports(self, p): wid = None port = Port(name=p[3], width=wid, type=None, lineno=p.lineno(1)) p[0] = p[1] + (port,) p.set_lineno(0, p.lineno(1))
ports : ports COMMA portname
380,986
def get_geostationary_angle_extent(geos_area): req = geos_area.proj_dict[] / 1000 rp = geos_area.proj_dict[] / 1000 h = geos_area.proj_dict[] / 1000 + req aeq = 1 - req**2 / (h ** 2) ap_ = 1 - rp**2 / (h ** 2) xmax = np.arccos(np.sqrt(aeq)) ymax = np.arccos(np.sqrt(ap_)) return xmax, ymax
Get the max earth (vs space) viewing angles in x and y.
380,987
def main(argv): import argparse description = \ \ \ parser = argparse.ArgumentParser(prog=, description=description) parser.add_argument(, nargs=, help=) parser.add_argument(, default=None, type=str, required=False) parser.add_argument(, action=, default=False, required=False, help=) parser.add_argument(, action=, default=False, required=False, help=) parser.add_argument(, action=, default=False, required=False) parser.add_argument(, action=, default=False, required=False) args = parser.parse_args(argv) do_approximate_matching = not args.prohibit_fuzzy_matching name_list = args.names if len(name_list) == 0: name_list = ["Homo sapiens", "Gorilla gorilla"] sys.stderr.write(.format(name_list)) else: for name in name_list: if name.startswith(): parser.print_help() match_and_print(name_list, context_name=args.context_name, do_approximate_matching=do_approximate_matching, include_dubious=args.include_dubious, include_deprecated=args.include_deprecated, include_subtree=args.subtree, output=sys.stdout)
This function sets up a command-line option parser and then calls match_and_print to do all of the real work.
380,988
def pdf_row_limiter(rows, limits=None, **kwargs): limits = limits or [None, None] upper_limit = limits[0] if limits else None lower_limit = limits[1] if len(limits) > 1 else None return rows[upper_limit: lower_limit]
Limit row passing a value. In this case we dont implementate a best effort algorithm because the posibilities are infite with a data text structure from a pdf.
380,989
def run(self): self._logger.info("running for <{url}>".format(url=self._url)) args = format_args(self._options) self._logger.debug("command: `{cmd}` / args: {args}". format(cmd=self._cmd, args=args)) try: process = Popen( args=[self._cmd] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE ) pid = process.pid self._logger.debug("running as PID except OSError as ex: raise PhantomasRunError( "Failed to run phantomas: {0}".format(ex), ex.errno) try: stdout, stderr = process.communicate() returncode = process.returncode except Exception: raise PhantomasRunError("Failed to complete the run") stdout = stdout.decode() stderr = stderr.decode() self._logger.debug("completed with return code format(returncode=returncode)) if stderr != : self._logger.debug("stderr: {stderr}".format(stderr=stderr)) raise PhantomasFailedError(stderr.strip(), returncode) try: results = json.loads(stdout) except Exception: raise PhantomasResponseParsingError("Unable to parse the response") if self._options.get("runs", 0) > 1: return Runs(self._url, results) else: return Results(self._url, results)
Perform phantomas run
380,990
def verify_invoice_params(self, price, currency): if re.match("^[A-Z]{3,3}$", currency) is None: raise BitPayArgumentError("Currency is invalid.") try: float(price) except: raise BitPayArgumentError("Price must be formatted as a float")
Deprecated, will be made private in 2.4
380,991
def register_opts(conf): conf.register_cli_opts(CLI_OPTS) conf.register_opts(EPISODE_OPTS) conf.register_opts(FORMAT_OPTS) conf.register_opts(CACHE_OPTS, )
Configure options within configuration library.
380,992
def run_star(job, fastqs, univ_options, star_options): assert star_options[] in (, ) job.fileStore.logToMaster( %univ_options[]) work_dir = job.fileStore.getLocalTempDir() input_files = { : fastqs[], : fastqs[], : star_options[]} input_files = get_files_from_filestore(job, input_files, work_dir, docker=True) parameters = [, str(star_options[]), , input_files[], , , , input_files[], input_files[], , , , , , , , , , , , , ] if star_options[] == : docker_call(tool=, tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options[]) else: docker_call(tool=, tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options[]) output_files = defaultdict() for bam_file in [, ]: output_files[bam_file] = job.fileStore.writeGlobalFile(.join([ work_dir, bam_file])) job.fileStore.deleteGlobalFile(fastqs[]) job.fileStore.deleteGlobalFile(fastqs[]) index_star = job.wrapJobFn(index_bamfile, output_files[], , univ_options, disk=) job.addChild(index_star) output_files[] = index_star.rv() return output_files
This module uses STAR to align the RNA fastqs to the reference ARGUMENTS 1. fastqs: REFER RETURN VALUE of run_cutadapt() 2. univ_options: Dict of universal arguments used by almost all tools univ_options +- 'dockerhub': <dockerhub to use> 3. star_options: Dict of parameters specific to STAR star_options |- 'index_tar': <JSid for the STAR index tarball> +- 'n': <number of threads to allocate> RETURN VALUES 1. output_files: Dict of aligned bams output_files |- 'rnaAligned.toTranscriptome.out.bam': <JSid> +- 'rnaAligned.sortedByCoord.out.bam': Dict of genome bam + bai |- 'rna_fix_pg_sorted.bam': <JSid> +- 'rna_fix_pg_sorted.bam.bai': <JSid> This module corresponds to node 9 on the tree
380,993
def log_message(self, msg, *args): if args: msg = msg % args self.logger.info(msg)
Hook to log a message.
380,994
def relabel(self, qubits: Qubits) -> : chan = copy(self) chan.vec = chan.vec.relabel(qubits) return chan
Return a copy of this channel with new qubits
380,995
def get(self, name): ws_list = self.list() return ws_list[name] if name in ws_list else None
Get workspace infos from name. Return None if workspace doesn't exists.
380,996
def update(self): for linenum in reversed(sorted(self.updates)): self.replace_baseline_repr(linenum, self.updates[linenum]) if not self.TEST_MODE: path = .format(*os.path.splitext(self.path)) with io.open(path, , encoding=) as fh: fh.write(.join(self.lines)) print(.format(self.showpath(path)))
Replace baseline representations previously registered for update.
380,997
def set_group_anonymous(self, *, group_id, enable=True): return super().__getattr__() \ (group_id=group_id, enable=enable)
群组匿名 ------------ :param int group_id: 群号 :param bool enable: 是否允许匿名聊天 :return: None :rtype: None
380,998
def get_relationship_info(tree, media, image_sizes): if tree is None: return {} result = {} for el in tree.iter(): el_id = el.get() if el_id is None: continue target = el.get() if any( target.lower().endswith(ext) for ext in IMAGE_EXTENSIONS_TO_SKIP): continue if target in media: image_size = image_sizes.get(el_id) target = convert_image(media[target], image_size) result[el_id] = cgi.escape(target) return result
There is a separate file holds the targets to links as well as the targets for images. Return a dictionary based on the relationship id and the target.
380,999
def _parse_name(self, name): if name is None: self.ifo = None self.tag = None self.version = None elif re_IFO_TAG_VERSION.match(name): match = re_IFO_TAG_VERSION.match(name).groupdict() self.ifo = match[] self.tag = match[] self.version = int(match[]) elif re_IFO_TAG.match(name): match = re_IFO_TAG.match(name).groupdict() self.ifo = match[] self.tag = match[] self.version = None elif re_TAG_VERSION.match(name): match = re_TAG_VERSION.match(name).groupdict() self.ifo = None self.tag = match[] self.version = int(match[]) else: raise ValueError("No flag name structure detected in , flags " "should be named as . " "For arbitrary strings, use the " "`DataQualityFlag.label` attribute" % name) return self.ifo, self.tag, self.version
Internal method to parse a `string` name into constituent `ifo, `name` and `version` components. Parameters ---------- name : `str`, `None` the full name of a `DataQualityFlag` to parse, e.g. ``'H1:DMT-SCIENCE:1'``, or `None` to set all components to `None` Returns ------- (ifo, name, version) A tuple of component string parts Raises ------ `ValueError` If the input ``name`` cannot be parsed into {ifo}:{tag}:{version} format.