docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Works like "filter" but joins given filters with OR operator. Args: **filters: Query filters as keyword arguments. Returns: Self. Queryset object. Example: >>> Person.objects.or_filter(age__gte=16, name__startswith='jo')
def or_filter(self, **filters): clone = copy.deepcopy(self) clone.adapter.add_query([("OR_QRY", filters)]) return clone
1,036,311
Applies query ordering. Args: **args: Order by fields names. Defaults to ascending, prepend with hypen (-) for desecending ordering. Returns: Self. Queryset object. Examples: >>> Person.objects.order_by('-name', 'join_date')
def order_by(self, *args): clone = copy.deepcopy(self) clone.adapter.ordered = True if args: clone.adapter.order_by(*args) return clone
1,036,314
make a raw query Args: query (str): solr query \*\*params: solr parameters
def raw(self, query): clone = copy.deepcopy(self) clone.adapter._pre_compiled_query = query clone.adapter.compiled_query = query return clone
1,036,317
Naively slice each data object in the container by the object's index. Args: key: Int, slice, or list by which to extra "sub"-container Returns: sub: Sub container of the same format with a view of the data Warning: To ensure that a new container is created, use the copy method. .. code-block:: Python mycontainer[slice].copy()
def slice_naive(self, key): kwargs = {'name': self.name, 'description': self.description, 'meta': self.meta} for name, data in self._data().items(): k = name[1:] if name.startswith('_') else name kwargs[k] = data.slice_naive(key) return self.__class__(**kwargs)
1,036,393
Get the memory usage estimate of the container. Args: string (bool): Human readable string (default false) See Also: :func:`~exa.core.container.Container.info`
def memory_usage(self, string=False): if string: n = getsizeof(self) return ' '.join((str(s) for s in convert_bytes(n))) return self.info()['size']
1,036,397
Save the container as an HDF5 archive. Args: path (str): Path where to save the container
def save(self, path=None, complevel=1, complib='zlib'): if path is None: path = self.hexuid + '.hdf5' elif os.path.isdir(path): path += os.sep + self.hexuid + '.hdf5' elif not (path.endswith('.hdf5') or path.endswith('.hdf')): raise ValueError('File path must have a ".hdf5" or ".hdf" extension.') with pd.HDFStore(path, 'w', complevel=complevel, complib=complib) as store: store['kwargs'] = pd.Series() store.get_storer('kwargs').attrs.metadata = self._rel() fc = 0 # Field counter (see special handling of fields below) for name, data in self._data().items(): if hasattr(data, '_revert_categories'): data._revert_categories() name = name[1:] if name.startswith('_') else name if isinstance(data, Field): # Fields are handled separately fname = 'FIELD{}_'.format(fc) + name + '/' store[fname + 'data'] = pd.DataFrame(data) for i, field in enumerate(data.field_values): ffname = fname + 'values' + str(i) if isinstance(field, pd.Series): store[ffname] = pd.Series(field) else: store[ffname] = pd.DataFrame(field) fc += 1 elif isinstance(data, Series): s = pd.Series(data) if isinstance(data.dtype, pd.types.dtypes.CategoricalDtype): s = s.astype('O') store[name] = s elif isinstance(data, DataFrame): store[name] = pd.DataFrame(data) elif isinstance(data, SparseSeries): s = pd.SparseSeries(data) if isinstance(data.dtype, pd.types.dtypes.CategoricalDtype): s = s.astype('O') store[name] = s elif isinstance(data, SparseDataFrame): store[name] = pd.SparseDataFrame(data) else: if hasattr(data, 'dtype') and isinstance(data.dtype, pd.types.dtypes.CategoricalDtype): data = data.astype('O') else: for col in data: if isinstance(data[col].dtype, pd.types.dtypes.CategoricalDtype): data[col] = data[col].astype('O') store[name] = data if hasattr(data, '_set_categories'): data._set_categories()
1,036,399
Load a container object from a persistent location or file path. Args: pkid_or_path: Integer pkid corresponding to the container table or file path Returns: container: The saved container object
def load(cls, pkid_or_path=None): path = pkid_or_path if isinstance(path, (int, np.int32, np.int64)): raise NotImplementedError('Lookup via CMS not implemented.') elif not os.path.isfile(path): raise FileNotFoundError('File {} not found.'.format(path)) kwargs = {} fields = defaultdict(dict) with pd.HDFStore(path) as store: for key in store.keys(): if 'kwargs' in key: kwargs.update(store.get_storer(key).attrs.metadata) elif "FIELD" in key: name, dname = "_".join(key.split("_")[1:]).split("/") dname = dname.replace('values', '') fields[name][dname] = store[key] else: name = str(key[1:]) kwargs[name] = store[key] for name, field_data in fields.items(): fps = field_data.pop('data') kwargs[name] = Field(fps, field_values=[field_data[str(arr)] for arr in sorted(map(int, field_data.keys()))]) return cls(**kwargs)
1,036,400
Usage: with (--version | <command>) Arguments: command The command to use as prefix to your context. Options: -h --help Show this screen. --version Show the current version.
def main(): arguments = docopt(main.__doc__) if arguments.get('--version'): print('with {}'.format(withtool.__version__)) sys.exit() while True: sub = yield from get_prompt(arguments['<command>']) call = '{cmd} {sub}'.format(cmd=arguments['<command>'], sub=sub) run(call)
1,036,613
Represents the class as a MappingNode. Args: dumper: The dumper to use. data: The user-defined object to dump. Returns: A yaml.Node representing the object.
def __call__(self, dumper: 'Dumper', data: Any) -> yaml.MappingNode: # make a dict with attributes logger.info('Representing {} of class {}'.format( data, self.class_.__name__)) if hasattr(data, 'yatiml_attributes'): logger.debug('Found yatiml_attributes()') attributes = data.yatiml_attributes() if attributes is None: raise RuntimeError(('{}.yatiml_attributes() returned None,' ' where a dict was expected.').format( self.class_.__name__)) else: logger.debug( 'No yatiml_attributes() found, using public attributes') argspec = inspect.getfullargspec(data.__init__) attribute_names = list(argspec.args[1:]) attrs = [(name, getattr(data, name)) for name in attribute_names if name != 'yatiml_extra'] if 'yatiml_extra' in attribute_names: if not hasattr(data, 'yatiml_extra'): raise RuntimeError( ('Class {} takes yatiml_extra but has ' ' no yatiml_extra attribute, and no ' ' yatiml_attributes().').format(self.class_.__name__)) attrs.extend(data.yatiml_extra.items()) attributes = yaml.comments.CommentedMap(attrs) # convert to a yaml.MappingNode represented = dumper.represent_mapping('tag:yaml.org,2002:map', attributes) # sweeten cnode = Node(represented) self.__sweeten(dumper, self.class_, cnode) represented = cnode.yaml_node logger.debug('End representing {}'.format(data)) return represented
1,036,643
Applies the user's yatiml_sweeten() function(s), if any. Sweetening is done for the base classes first, then for the \ derived classes, down the hierarchy to the class we're \ constructing. Args: dumper: The dumper that is dumping this object. class_: The type of the object to be dumped. represented_object: The object to be dumped.
def __sweeten(self, dumper: 'Dumper', class_: Type, node: Node) -> None: for base_class in class_.__bases__: if base_class in dumper.yaml_representers: logger.debug('Sweetening for class {}'.format( self.class_.__name__)) self.__sweeten(dumper, base_class, node) if hasattr(class_, 'yatiml_sweeten'): class_.yatiml_sweeten(node)
1,036,644
Represents the class as a ScalarNode. Args: dumper: The dumper to use. data: The user-defined object to dump. Returns: A yaml.Node representing the object.
def __call__(self, dumper: 'Dumper', data: Any) -> yaml.MappingNode: # make a ScalarNode of type str with name of value logger.info('Representing {} of class {}'.format( data, self.class_.__name__)) # convert to a yaml.ScalarNode represented = dumper.represent_str(data.name) # sweeten snode = Node(represented) if hasattr(self.class_, 'yatiml_sweeten'): self.class_.yatiml_sweeten(snode) represented = snode.yaml_node logger.debug('End representing {}'.format(data)) return represented
1,036,645
Generate a directory path, and create it if requested. .. code-block:: Python filepath = mkp('base', 'folder', 'file') dirpath = mkp('root', 'path', 'folder', mk=True) Args: \*args: File or directory path segments to be concatenated mk (bool): Make the directory (if it doesn't exist) Returns: path (str): File or directory path
def mkp(*args, **kwargs): mk = kwargs.pop('mk', False) path = os.sep.join(list(args)) if mk: while sep2 in path: path = path.replace(sep2, os.sep) try: os.makedirs(path) except FileExistsError: pass return path
1,036,728
Reduces bytes to more convenient units (i.e. KiB, GiB, TiB, etc.). Args: values (int): Value in Bytes Returns: tup (tuple): Tuple of value, unit (e.g. (10, 'MiB'))
def convert_bytes(value): n = np.rint(len(str(value))/4).astype(int) return value/(1024**n), sizes[n]
1,036,729
Get a list of modules belonging to the given package. Args: key (str): Package or library name (e.g. "exa")
def get_internal_modules(key='exa'): key += '.' return [v for k, v in sys.modules.items() if k.startswith(key)]
1,036,730
Parse a variant line Split a variant line and map the fields on the header columns Args: variant_line (str): A vcf variant line header_line (list): A list with the header columns Returns: variant_dict (dict): A variant dictionary
def get_variant_dict(variant_line, header_line=None): if not header_line: logger.debug("No header line, use only first 8 mandatory fields") header_line = ['CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO'] logger.debug("Building variant dict from variant line {0} and header"\ " line {1}".format(variant_line, '\t'.join(header_line))) splitted_line = variant_line.rstrip().split('\t') if len(splitted_line) < len(header_line): logger.info('\t'.join(header_line)) logger.info('\t'.join(splitted_line)) raise SyntaxError("Length of variant line differs from length of"\ " header line") return dict(zip(header_line, splitted_line))
1,036,800
Parse a info field of a variant Make a dictionary from the info field of a vcf variant. Keys are the info keys and values are the raw strings from the vcf If the field only have a key (no value), value of infodict is True. Args: info_line (str): The info field of a vcf variant Returns: info_dict (dict): A INFO dictionary
def get_info_dict(info_line): variant_info = {} for raw_info in info_line.split(';'): splitted_info = raw_info.split('=') if len(splitted_info) == 2: variant_info[splitted_info[0]] = splitted_info[1] else: variant_info[splitted_info[0]] = True return variant_info
1,036,801
Build a variant id The variant id is a string made of CHROM_POS_REF_ALT Args: variant_dict (dict): A variant dictionary Returns: variant_id (str)
def get_variant_id(variant_dict=None, variant_line=None): if variant_dict: chrom = variant_dict['CHROM'] position = variant_dict['POS'] ref = variant_dict['REF'] alt = variant_dict['ALT'] elif variant_line: splitted_line = variant_line.rstrip().split('\t') chrom = splitted_line[0] position = splitted_line[1] ref = splitted_line[3] alt = splitted_line[4] else: raise Exception("Have to provide variant dict or variant line") return '_'.join([ chrom, position, ref, alt, ])
1,036,802
Make the vep annotations into a dictionaries A vep dictionary will have the vep column names as keys and the vep annotations as values. The dictionaries are stored in a list Args: vep_string (string): A string with the CSQ annotation vep_header (list): A list with the vep header Return: vep_annotations (list): A list of vep dicts
def get_vep_info(vep_string, vep_header): vep_annotations = [ dict(zip(vep_header, vep_annotation.split('|'))) for vep_annotation in vep_string.split(',') ] return vep_annotations
1,036,803
Make the vep annotations into a dictionaries A snpeff dictionary will have the snpeff column names as keys and the vep annotations as values. The dictionaries are stored in a list. One dictionary for each transcript. Args: snpeff_string (string): A string with the ANN annotation snpeff_header (list): A list with the vep header Return: snpeff_annotations (list): A list of vep dicts
def get_snpeff_info(snpeff_string, snpeff_header): snpeff_annotations = [ dict(zip(snpeff_header, snpeff_annotation.split('|'))) for snpeff_annotation in snpeff_string.split(',') ] return snpeff_annotations
1,036,804
Replace the information of a info field of a vcf variant line or a variant dict. Arguments: variant_line (str): A vcf formatted variant line variant_dict (dict): A variant dictionary keyword (str): The info field key annotation (str): If the annotation is a key, value pair this is the string that represents the value Returns: variant_line (str): A annotated variant line
def replace_vcf_info(keyword, annotation, variant_line=None, variant_dict=None): new_info = '{0}={1}'.format(keyword, annotation) logger.debug("Replacing the variant information {0}".format(new_info)) fixed_variant = None new_info_list = [] if variant_line: logger.debug("Adding information to a variant line") splitted_variant = variant_line.rstrip('\n').split('\t') logger.debug("Adding information to splitted variant line") old_info = splitted_variant[7] if old_info == '.': new_info_string = new_info else: splitted_info_string = old_info.split(';') for info in splitted_info_string: splitted_info_entry = info.split('=') if splitted_info_entry[0] == keyword: new_info_list.append(new_info) else: new_info_list.append(info) new_info_string = ';'.join(new_info_list) splitted_variant[7] = new_info_string fixed_variant = '\t'.join(splitted_variant) elif variant_dict: logger.debug("Adding information to a variant dict") old_info = variant_dict['INFO'] if old_info == '.': variant_dict['INFO'] = new_info else: for info in old_info.split(';'): splitted_info_entry = info.split('=') if splitted_info_entry[0] == keyword: new_info_list.append(new_info) else: new_info_list.append(info) new_info_string = ';'.join(new_info_list) variant_dict['INFO'] = new_info_string fixed_variant = variant_dict return fixed_variant
1,037,009
Remove the information of a info field of a vcf variant line or a variant dict. Arguments: variant_line (str): A vcf formatted variant line variant_dict (dict): A variant dictionary keyword (str): The info field key Returns: variant_line (str): A annotated variant line
def remove_vcf_info(keyword, variant_line=None, variant_dict=None): logger.debug("Removing variant information {0}".format(keyword)) fixed_variant = None def get_new_info_string(info_string, keyword): new_info_list = [] splitted_info_string = info_string.split(';') for info in splitted_info_string: splitted_info_entry = info.split('=') if splitted_info_entry[0] != keyword: new_info_list.append(info) new_info_string = ';'.join(new_info_list) return new_info_string if variant_line: logger.debug("Removing information from a variant line") splitted_variant = variant_line.rstrip('\n').split('\t') old_info = splitted_variant[7] if old_info == '.': new_info_string = '.' else: new_info_string = get_new_info_string(old_info, keyword) splitted_variant[7] = new_info_string fixed_variant = '\t'.join(splitted_variant) elif variant_dict: logger.debug("Removing information to a variant dict") old_info = variant_dict['INFO'] if old_info == '.': variant_dict['INFO'] = old_info else: new_info_string = get_new_info_string(old_info, keyword) variant_dict['INFO'] = new_info_string fixed_variant = variant_dict return fixed_variant
1,037,010
Add information to the info field of a vcf variant line. Arguments: variant_line (str): A vcf formatted variant line keyword (str): The info field key annotation (str): If the annotation is a key, value pair this is the string that represents the value Returns: fixed_variant : str if variant line, or dict if variant_dict
def add_vcf_info(keyword, variant_line=None, variant_dict=None, annotation=None): logger = logging.getLogger(__name__) if annotation: new_info = '{0}={1}'.format(keyword, annotation) else: new_info = keyword logger.debug("Adding new variant information {0}".format(new_info)) fixed_variant = None if variant_line: logger.debug("Adding information to a variant line") splitted_variant = variant_line.rstrip('\n').split('\t') logger.debug("Adding information to splitted variant line") old_info = splitted_variant[7] if old_info == '.': splitted_variant[7] = new_info else: splitted_variant[7] = "{0};{1}".format(splitted_variant[7], new_info) fixed_variant = '\t'.join(splitted_variant) elif variant_dict: logger.debug("Adding information to a variant dict") old_info = variant_dict['INFO'] if old_info == '.': variant_dict['INFO'] = new_info else: variant_dict['INFO'] = "{0};{1}".format(old_info, new_info) fixed_variant = variant_dict return fixed_variant
1,037,011
Add fileformat line to the header. Arguments: fileformat (str): The id of the info line
def add_fileformat(self, fileformat): self.fileformat = fileformat logger.info("Adding fileformat to vcf: {0}".format(fileformat)) return
1,037,091
Adds an arbitrary metadata line to the header. This must be a key value pair Arguments: key (str): The key of the metadata line value (str): The value of the metadata line
def add_meta_line(self, key, value): meta_line = '##{0}={1}'.format( key, value ) logger.info("Adding meta line to vcf: {0}".format(meta_line)) self.parse_meta_data(meta_line) return
1,037,092
Add a filter line to the header. Arguments: filter_id (str): The id of the filter line description (str): A description of the info line
def add_filter(self, filter_id, description): filter_line = '##FILTER=<ID={0},Description="{1}">'.format( filter_id, description ) logger.info("Adding filter line to vcf: {0}".format(filter_line)) self.parse_meta_data(filter_line) return
1,037,093
Add a format line to the header. Arguments: format_id (str): The id of the format line number (str): Integer or any of [A,R,G,.] entry_type (str): Any of [Integer,Float,Flag,Character,String] description (str): A description of the info line
def add_format(self, format_id, number, entry_type, description): format_line = '##FORMAT=<ID={0},Number={1},Type={2},Description="{3}">'.format( format_id, number, entry_type, description ) logger.info("Adding format line to vcf: {0}".format(format_line)) self.parse_meta_data(format_line) return
1,037,094
Add a alternative allele format field line to the header. Arguments: alt_id (str): The id of the alternative line description (str): A description of the info line
def add_alt(self, alt_id, description): alt_line = '##ALT=<ID={0},Description="{1}">'.format( alt_id, description ) logger.info("Adding alternative allele line to vcf: {0}".format(alt_line)) self.parse_meta_data(alt_line) return
1,037,095
Add a contig line to the header. Arguments: contig_id (str): The id of the alternative line length (str): A description of the info line
def add_contig(self, contig_id, length): contig_line = '##contig=<ID={0},length={1}>'.format( contig_id, length ) logger.info("Adding contig line to vcf: {0}".format(contig_line)) self.parse_meta_data(contig_line) return
1,037,096
Expand given requirements file by extending it using pip freeze args: input_requirements_filename: the requirements filename to expand output_requirements_filename: the output filename for the expanded requirements file
def expand(conf, output_requirements_filename, input_requirements_filename): exit_if_file_not_exists(input_requirements_filename, conf) cireqs.expand_requirements( requirements_filename=input_requirements_filename, expanded_requirements_filename=output_requirements_filename, **conf._asdict() ) click.echo(click.style('✓', fg='green') + " {} has been expanded into {}".format( input_requirements_filename, output_requirements_filename ))
1,037,108
Verifying that given requirements file is not missing any pins args: input_requirements_filename: requriements file to verify
def verify(conf, input_requirements_filename): exit_if_file_not_exists(input_requirements_filename, conf) cireqs.check_if_requirements_are_up_to_date( requirements_filename=input_requirements_filename, **conf._asdict()) click.echo(click.style('✓', fg='green') + " {} has been verified".format(input_requirements_filename))
1,037,110
Return the matches of the given state |methcoro| Args: state: see :class:`MatchState` Raises: APIException
async def get_matches(self, state: MatchState = MatchState.all_): matches = await self.connection('GET', 'tournaments/{}/matches'.format(self._tournament_id), state=state.value, participant_id=self._id) # return [await self._tournament.get_match(m['match']['id']) for m in matches] 3.6 only... ms = [] for m in matches: ms.append(await self._tournament.get_match(m['match']['id'])) return ms
1,037,127
Standard dict-like .get() method. Args: item (str): See :meth:`.__getitem__` for details. alt (default None): Alternative value, if item is not found. Returns: obj: `item` or `alt`, if item is not found.
def get(self, item, alt=None): try: val = self[item] except ValueError: return alt return val if val is not None else alt
1,037,151
set the tournament start date (and check in duration) |methcoro| Args: date: fomatted date as YYYY/MM/DD (2017/02/14) time: fromatted time as HH:MM (20:15) check_in_duration (optional): duration in minutes Raises: APIException
async def set_start_date(self, date: str, time: str, check_in_duration: int = None): date_time = datetime.strptime(date + ' ' + time, '%Y/%m/%d %H:%M') res = await self.connection('PUT', 'tournaments/{}'.format(self._id), 'tournament', start_at=date_time, check_in_duration=check_in_duration or 0) self._refresh_from_json(res)
1,037,224
|methcoro| Args: match_win match_tie game_win game_tie bye Raises: APIException
async def setup_swiss_points(self, match_win: float = None, match_tie: float = None, game_win: float = None, game_tie: float = None, bye: float = None): params = {} if match_win is not None: params['pts_for_match_win'] = match_win if match_win is not None: params['pts_for_match_tie'] = match_tie if match_win is not None: params['pts_for_game_win'] = game_win if match_win is not None: params['pts_for_game_tie'] = game_tie if match_win is not None: params['pts_for_bye'] = bye assert_or_raise(len(params) > 0, ValueError, 'At least one of the points must be given') await self.update(**params)
1,037,225
|methcoro| Args: match_win match_tie game_win game_tie Raises: APIException
async def setup_round_robin_points(self, match_win: float = None, match_tie: float = None, game_win: float = None, game_tie: float = None): params = {} if match_win is not None: params['rr_pts_for_match_win'] = match_win if match_win is not None: params['rr_pts_for_match_tie'] = match_tie if match_win is not None: params['rr_pts_for_game_win'] = game_win if match_win is not None: params['rr_pts_for_game_tie'] = game_tie assert_or_raise(len(params) > 0, ValueError, 'At least one of the points must be given') await self.update(**params)
1,037,226
update participants notifications for this tournament |methcoro| Args: on_match_open: Email registered Challonge participants when matches open up for them on_tournament_end: Email registered Challonge participants the results when this tournament ends Raises: APIException
async def update_notifications(self, on_match_open: bool = None, on_tournament_end: bool = None): params = {} if on_match_open is not None: params['notify_users_when_matches_open'] = on_match_open if on_tournament_end is not None: params['notify_users_when_the_tournament_ends'] = on_tournament_end assert_or_raise(len(params) > 0, ValueError, 'At least one of the notifications must be given') await self.update(**params)
1,037,227
|methcoro| Args: hide_forum: Hide the forum tab on your Challonge page show_rounds: Double Elimination only - Label each round above the bracket open_signup: Have Challonge host a sign-up page (otherwise, you manually add all participants) Raises: APIException
async def update_website_options(self, hide_forum: bool = None, show_rounds: bool = None, open_signup: bool = None): params = {} if hide_forum is not None: params['hide_forum'] = hide_forum if show_rounds is not None: params['show_rounds'] = show_rounds if open_signup is not None: params['open_signup'] = open_signup assert_or_raise(len(params) > 0, ValueError, 'At least one of the options must be given') await self.update(**params)
1,037,228
|methcoro| Args: pairing: Raises: APIException
async def update_pairing_method(self, pairing: Pairing): do_sequential_pairing = pairing == Pairing.sequential await self.update(sequential_pairings=do_sequential_pairing)
1,037,229
get a participant by its id |methcoro| Args: p_id: participant id force_update (dfault=False): True to force an update to the Challonge API Returns: Participant: None if not found Raises: APIException
async def get_participant(self, p_id: int, force_update=False) -> Participant: found_p = self._find_participant(p_id) if force_update or found_p is None: await self.get_participants() found_p = self._find_participant(p_id) return found_p
1,037,230
get all participants |methcoro| Args: force_update (default=False): True to force an update to the Challonge API Returns: list[Participant]: Raises: APIException
async def get_participants(self, force_update=False) -> list: if force_update or self.participants is None: res = await self.connection('GET', 'tournaments/{}/participants'.format(self._id)) self._refresh_participants_from_json(res) return self.participants or []
1,037,231
search a participant by (display) name |methcoro| Args: name: display name of the participant force_update (dfault=False): True to force an update to the Challonge API Returns: Participant: None if not found Raises: APIException
async def search_participant(self, name, force_update=False): if force_update or self.participants is None: await self.get_participants() if self.participants is not None: for p in self.participants: if p.name == name: return p return None
1,037,232
remove a participant from the tournament |methcoro| Args: p: the participant to remove Raises: APIException
async def remove_participant(self, p: Participant): await self.connection('DELETE', 'tournaments/{}/participants/{}'.format(self._id, p._id)) if p in self.participants: self.participants.remove(p)
1,037,235
get a single match by id |methcoro| Args: m_id: match id force_update (default=False): True to force an update to the Challonge API Returns: Match Raises: APIException
async def get_match(self, m_id, force_update=False) -> Match: found_m = self._find_match(m_id) if force_update or found_m is None: await self.get_matches() found_m = self._find_match(m_id) return found_m
1,037,236
get all matches (once the tournament is started) |methcoro| Args: force_update (default=False): True to force an update to the Challonge API Returns: list[Match]: Raises: APIException
async def get_matches(self, force_update=False) -> list: if force_update or self.matches is None: res = await self.connection('GET', 'tournaments/{}/matches'.format(self._id), include_attachments=1) self._refresh_matches_from_json(res) return self.matches or []
1,037,237
Generator that yields typed object names of the class (or object's class). Args: obj_or_cls (object): Class object or instance of class Returns: name (array): Names of class attributes that are strongly typed
def yield_typed(obj_or_cls): if not isinstance(obj_or_cls, type): obj_or_cls = type(obj_or_cls) for attrname in dir(obj_or_cls): if hasattr(obj_or_cls, attrname): attr = getattr(obj_or_cls, attrname) # !!! Important hardcoded value here !!! if (isinstance(attr, property) and isinstance(attr.__doc__, six.string_types) and "__typed__" in attr.__doc__): yield attrname
1,037,293
Construct the property. Args: name (str): Attribute (property) name Returns: prop (property): Custom property definition with support for typing
def __call__(self, name): priv = "_" + name # Reference to the variable's value # The following is a definition of a Python property. Properties have # get, set, and delete functions as well as documentation. The variable # "this" references the class object instance where the property exists; # it does not reference the instance of this ("Typed") class. def getter(this): # If the variable value (reference by priv) does not exist # or is None AND the class has some automatic way of setting the value, # set the value first then proceed to getting it. if ((not hasattr(this, priv) or getattr(this, priv) is None) and hasattr(this, "_setters") and isinstance(this._setters, (list, tuple))): for prefix in this._setters: cmd = "{}{}".format(prefix, priv) if hasattr(this, cmd): getattr(this, cmd)() # Automatic method call if hasattr(this, priv): break # Perform pre-get actions (if any) if isinstance(self.pre_get, str): getattr(this, self.pre_get)() elif callable(self.pre_get): self.pre_get(this) return getattr(this, priv, None) # Returns None by default def setter(this, value): # If auto-conversion is on and the value is not the correct type (and # also is not None), attempt to convert types if self.autoconv and not isinstance(value, self.types) and value is not None: for t in self.types: try: value = t(value) break except Exception as e: # Catch all exceptions but if conversion fails ... if self.verbose: warnings.warn("Conversion of {} (with type {}) failed to type {}\n{}".format(name, type(value), t, str(e))) else: # ... raise a TypeError raise TypeError("Cannot convert object of type {} to any of {}.".format(type(value), self.types)) # If the value is none and none is not allowed, # or the value is some other type (that is not none) and not of a type # that is allowed, raise an error. elif ((value is None and self.allow_none == False) or (not isinstance(value, self.types) and value is not None)): raise TypeError("Object '{}' cannot have type {}, must be of type(s) {}.".format(name, type(value), self.types)) # Perform pre-set actions (if any) if isinstance(self.pre_set, str): getattr(this, self.pre_set)() elif callable(self.pre_set): self.pre_set(this) if isinstance(this, (pd.DataFrame, pd.SparseDataFrame)): this[priv] = value else: setattr(this, priv, value) # Set the property value # Perform post-set actions (if any) if isinstance(self.post_set, str): getattr(this, self.post_set)() elif callable(self.post_set): self.post_set(this) def deleter(this): # Perform pre-del actions (if any) if isinstance(self.pre_del, str): getattr(this, self.pre_del)() elif callable(self.pre_del): self.pre_del(this) delattr(this, priv) # Delete the attribute (allows for dynamic naming) # Perform post-del actions (if any) if isinstance(self.post_del, str): getattr(this, self.post_del)() elif callable(self.post_del): self.post_del(this) return property(getter, setter, deleter, doc=self.doc)
1,037,294
Get the choices for the given fields. Args: field (str): Name of field. Returns: List of tuples. [(name, value),...]
def get_choices_for(self, field): choices = self._fields[field].choices if isinstance(choices, six.string_types): return [(d['value'], d['name']) for d in self._choices_manager.get_all(choices)] else: return choices
1,037,321
Fills the object's fields with given data dict. Internally calls the self._load_data() method. Args: data (dict): Data to fill object's fields. from_db (bool): if data coming from db then we will use related field type's _load_data method Returns: Self. Returns objects itself for chainability.
def set_data(self, data, from_db=False): self._load_data(data, from_db) return self
1,037,322
Looks for changed relation fields between new and old data (before/after save). Creates back_link references for updated fields. Args: old_data: Object's data before save.
def _handle_changed_fields(self, old_data): for link in self.get_links(is_set=False): fld_id = un_camel_id(link['field']) if not old_data or old_data.get(fld_id) != self._data[fld_id]: # self is new or linked model changed if self._data[fld_id]: # exists linked_mdl = getattr(self, link['field']) self._add_back_link(linked_mdl, link)
1,037,328
Recognize a node that we expect to be a scalar. Args: node: The node to recognize. expected_type: The type it is expected to be. Returns: A list of recognized types and an error message
def __recognize_scalar(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a scalar') if (isinstance(node, yaml.ScalarNode) and node.tag == scalar_type_to_tag[expected_type]): return [expected_type], '' message = 'Failed to recognize a {}\n{}\n'.format( type_to_desc(expected_type), node.start_mark) return [], message
1,037,346
Recognize a node that we expect to be a list of some kind. Args: node: The node to recognize. expected_type: List[...something...] Returns expected_type and the empty string if it was recognized, [] and an error message otherwise.
def __recognize_list(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a list') if not isinstance(node, yaml.SequenceNode): message = '{}{}Expected a list here.'.format( node.start_mark, os.linesep) return [], message item_type = generic_type_args(expected_type)[0] for item in node.value: recognized_types, message = self.recognize(item, item_type) if len(recognized_types) == 0: return [], message if len(recognized_types) > 1: recognized_types = [ List[t] # type: ignore for t in recognized_types ] return recognized_types, message return [expected_type], ''
1,037,347
Recognize a node that we expect to be a dict of some kind. Args: node: The node to recognize. expected_type: Dict[str, ...something...] Returns: expected_type if it was recognized, [] otherwise.
def __recognize_dict(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a dict') if not issubclass(generic_type_args(expected_type)[0], str): raise RuntimeError( 'YAtiML only supports dicts with strings as keys') if not isinstance(node, yaml.MappingNode): message = '{}{}Expected a dict/mapping here'.format( node.start_mark, os.linesep) return [], message value_type = generic_type_args(expected_type)[1] for _, value in node.value: recognized_value_types, message = self.recognize(value, value_type) if len(recognized_value_types) == 0: return [], message if len(recognized_value_types) > 1: return [ Dict[str, t] # type: ignore for t in recognized_value_types ], message # type: ignore return [expected_type], ''
1,037,348
Recognize a node that we expect to be one of a union of types. Args: node: The node to recognize. expected_type: Union[...something...] Returns: The specific type that was recognized, multiple, or none.
def __recognize_union(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a union') recognized_types = [] message = '' union_types = generic_type_args(expected_type) logger.debug('Union types {}'.format(union_types)) for possible_type in union_types: recognized_type, msg = self.recognize(node, possible_type) if len(recognized_type) == 0: message += msg recognized_types.extend(recognized_type) recognized_types = list(set(recognized_types)) if bool in recognized_types and bool_union_fix in recognized_types: recognized_types.remove(bool_union_fix) if len(recognized_types) == 0: return recognized_types, message elif len(recognized_types) > 1: message = ('{}{}Could not determine which of the following types' ' this is: {}').format(node.start_mark, os.linesep, recognized_types) return recognized_types, message return recognized_types, ''
1,037,349
Recognize a user-defined class in the node. This tries to recognize only exactly the specified class. It \ recurses down into the class's attributes, but not to its \ subclasses. See also __recognize_user_classes(). Args: node: The node to recognize. expected_type: A user-defined class. Returns: A list containing the user-defined class, or an empty list.
def __recognize_user_class(self, node: yaml.Node, expected_type: Type) -> RecResult: logger.debug('Recognizing as a user-defined class') loc_str = '{}{}'.format(node.start_mark, os.linesep) if hasattr(expected_type, 'yatiml_recognize'): try: unode = UnknownNode(self, node) expected_type.yatiml_recognize(unode) return [expected_type], '' except RecognitionError as e: if len(e.args) > 0: message = ('Error recognizing a {}\n{}because of the' ' following error(s): {}').format( expected_type.__class__, loc_str, indent(e.args[0], ' ')) else: message = 'Error recognizing a {}\n{}'.format( expected_type.__class__, loc_str) return [], message else: if issubclass(expected_type, enum.Enum): if (not isinstance(node, yaml.ScalarNode) or node.tag != 'tag:yaml.org,2002:str'): message = 'Expected an enum value from {}\n{}'.format( expected_type.__class__, loc_str) return [], message elif (issubclass(expected_type, UserString) or issubclass(expected_type, str)): if (not isinstance(node, yaml.ScalarNode) or node.tag != 'tag:yaml.org,2002:str'): message = 'Expected a string matching {}\n{}'.format( expected_type.__class__, loc_str) return [], message else: # auto-recognize based on constructor signature if not isinstance(node, yaml.MappingNode): message = 'Expected a dict/mapping here\n{}'.format( loc_str) return [], message for attr_name, type_, required in class_subobjects( expected_type): cnode = Node(node) # try exact match first, dashes if that doesn't match for name in [attr_name, attr_name.replace('_', '-')]: if cnode.has_attribute(name): subnode = cnode.get_attribute(name) recognized_types, message = self.recognize( subnode.yaml_node, type_) if len(recognized_types) == 0: message = ('Failed when checking attribute' ' {}:\n{}').format( name, indent(message, ' ')) return [], message break else: if required: message = ( 'Error recognizing a {}\n{}because it' ' is missing an attribute named {}').format( expected_type.__name__, loc_str, attr_name) if '_' in attr_name: message += ' or maybe {}.\n'.format( attr_name.replace('_', '-')) else: message += '.\n' return [], message return [expected_type], ''
1,037,350
Uses riak http search query endpoint for advanced SOLR queries. Args: field (str): facet field count_deleted (bool): ignore deleted or not Returns: (dict): pairs of field values and number of counts
def distinct_values_of(self, field, count_deleted=False): solr_params = "facet=true&facet.field=%s&rows=0" % field result = self.riak_http_search_query(self.index_name, solr_params, count_deleted) facet_fields = result['facet_counts']['facet_fields'][field] keys = facet_fields[0::2] vals = facet_fields[1::2] return dict(zip(keys, vals))
1,037,404
Sends given tuples of list to multiget method and took riak objs' keys and data. For each multiget call, separate pools are used and after execution, pools are stopped. Args: key_list_tuple(list of tuples): [('bucket_type','bucket','riak_key')] Example: [('models','personel','McAPchPZzB6RVJ8QI2XSVQk4mUR')] Returns: objs(tuple): obj's key and obj's value
def riak_multi_get(self, key_list_tuple): pool = PyokoMG() objs = self._client.multiget(key_list_tuple, pool=pool) pool.stop() return objs
1,037,407
Writes a copy of the objects current state to write-once mirror bucket. Args: data (dict): Model instance's all data for versioning. model (instance): Model instance. Returns: Key of version record. key (str): Version_bucket key.
def _write_version(self, data, model): vdata = {'data': data, 'key': model.key, 'model': model.Meta.bucket_name, 'timestamp': time.time()} obj = version_bucket.new(data=vdata) obj.add_index('key_bin', model.key) obj.add_index('model_bin', vdata['model']) obj.add_index('timestamp_int', int(vdata['timestamp'])) obj.store() return obj.key
1,037,412
Creates a log entry for current object, Args: version_key(str): Version_bucket key from _write_version(). meta_data (dict): JSON serializable meta data for logging of save operation. {'lorem': 'ipsum', 'dolar': 5} index_fields (list): Tuple list for secondary indexing keys in riak (with 'bin' or 'int'). [('lorem','bin'),('dolar','int')] Returns:
def _write_log(self, version_key, meta_data, index_fields): meta_data = meta_data or {} meta_data.update({ 'version_key': version_key, 'timestamp': time.time(), }) obj = log_bucket.new(data=meta_data) obj.add_index('version_key_bin', version_key) obj.add_index('timestamp_int', int(meta_data['timestamp'])) for field, index_type in index_fields: obj.add_index('%s_%s' % (field, index_type), meta_data.get(field, "")) obj.store()
1,037,413
If key is not None, tries to get obj from cache first. If not found, tries to get from riak and sets to cache. If key is None, then execute solr query and checks result. Returns obj data and key tuple or raises exception ObjectDoesNotExist or MultipleObjectsReturned. Args: key(str): obj key Return: (tuple): obj data dict, obj key
def get(self, key=None): if key: key = ub_to_str(key) if settings.ENABLE_CACHING: return self.get_from_cache(key) or self.set_to_cache(self._get_from_riak(key)) else: return self._get_from_riak(key) else: self._exec_query() if not self._solr_cache['docs']: raise ObjectDoesNotExist("%s %s" % (self.index_name, self.compiled_query)) if self.count() > 1: raise MultipleObjectsReturned( "%s objects returned for %s" % (self.count(), self._model_class.__name__)) return self._get_from_riak(self._solr_cache['docs'][0]['_yz_rk'])
1,037,418
Applies query ordering. New parameters are appended to current ones, overwriting existing ones. Args: **args: Order by fields names. Defaults to ascending, prepend with hypen (-) for desecending ordering.
def order_by(self, *args): if self._solr_locked: raise Exception("Query already executed, no changes can be made." "%s %s" % (self._solr_query, self._solr_params) ) for arg in args: if arg.startswith('-'): self._solr_params['sort'][arg[1:]] = 'desc' else: self._solr_params['sort'][arg] = 'asc'
1,037,421
Escapes query if it's not already escaped. Args: query: Query value. escaped (bool): expresses if query already escaped or not. Returns: Escaped query value.
def _escape_query(self, query, escaped=False): if escaped: return query query = six.text_type(query) for e in ['+', '-', '&&', '||', '!', '(', ')', '{', '}', '[', ']', '^', '"', '~', '*', '?', ':', ' ']: query = query.replace(e, "\\%s" % e) return query
1,037,424
Parses query_value according to query_type Args: modifier (str): Type of query. Exact, contains, lte etc. qval: Value partition of the query. Returns: Parsed query_value.
def _parse_query_modifier(self, modifier, qval, is_escaped): if modifier == 'range': if not qval[0]: start = '*' elif isinstance(qval[0], date): start = self._handle_date(qval[0]) elif isinstance(qval[0], datetime): start = self._handle_datetime(qval[0]) elif not is_escaped: start = self._escape_query(qval[0]) else: start = qval[0] if not qval[1]: end = '*' elif isinstance(qval[1], date): end = self._handle_date(qval[1]) elif isinstance(qval[1], datetime): end = self._handle_datetime(qval[1]) elif not is_escaped: end = self._escape_query(qval[1]) else: end = qval[1] qval = '[%s TO %s]' % (start, end) else: if not is_escaped and not isinstance(qval, (date, datetime, int, float)): qval = self._escape_query(qval) if modifier == 'exact': qval = qval elif modifier == 'contains': qval = "*%s*" % qval elif modifier == 'startswith': qval = "%s*" % qval elif modifier == 'endswith': qval = "%s*" % qval elif modifier == 'lte': qval = '[* TO %s]' % qval elif modifier == 'gte': qval = '[%s TO *]' % qval elif modifier == 'lt': if isinstance(qval, int): qval -= 1 qval = '[* TO %s]' % qval elif modifier == 'gt': if isinstance(qval, int): qval += 1 qval = '[%s TO *]' % qval return qval
1,037,425
Strips query modifier from key and call's the appropriate value modifier. Args: key (str): Query key val: Query value Returns: Parsed query key and value.
def _parse_query_key(self, key, val, is_escaped): if key.endswith('__contains'): key = key[:-10] val = self._parse_query_modifier('contains', val, is_escaped) elif key.endswith('__range'): key = key[:-7] val = self._parse_query_modifier('range', val, is_escaped) elif key.endswith('__startswith'): key = key[:-12] val = self._parse_query_modifier('startswith', val, is_escaped) elif key.endswith('__endswith'): key = key[:-10] val = self._parse_query_modifier('endswith', val, is_escaped) # lower than elif key.endswith('__lt'): key = key[:-4] val = self._parse_query_modifier('lt', val, is_escaped) # greater than elif key.endswith('__gt'): key = key[:-4] val = self._parse_query_modifier('gt', val, is_escaped) # lower than or equal elif key.endswith('__lte'): key = key[:-5] val = self._parse_query_modifier('lte', val, is_escaped) # greater than or equal elif key.endswith('__gte'): key = key[:-5] val = self._parse_query_modifier('gte', val, is_escaped) elif key != 'NOKEY' and not is_escaped: val = self._escape_query(val) return key, val
1,037,426
Return the chromosome priority Arguments: chrom (str): The cromosome name from the vcf chrom_dict (dict): A map of chromosome names and theis priority Return: priority (str): The priority for this chromosom
def get_chromosome_priority(chrom, chrom_dict={}): priority = 0 chrom = str(chrom).lstrip('chr') if chrom_dict: priority = chrom_dict.get(chrom, 0) else: try: if int(chrom) < 23: priority = int(chrom) except ValueError: if chrom == 'X': priority = 23 elif chrom == 'Y': priority = 24 elif chrom == 'MT': priority = 25 else: priority = 26 return str(priority)
1,037,467
Sort the variants of a vcf file Args: vcf_handle mode (str): position or rank score Returns: sorted_variants (Iterable): An iterable with sorted variants
def sort_variants(vcf_handle): logger.debug("Creating temp file") temp_file = NamedTemporaryFile(delete=False) temp_file.close() logger.debug("Opening temp file with codecs") temp_file_handle = codecs.open( temp_file.name, mode='w', encoding='utf-8', errors='replace' ) try: with codecs.open(temp_file.name,mode='w',encoding='utf-8',errors='replace') as f: for line in vcf_handle: if not line.startswith('#'): line = line.rstrip().split('\t') chrom = line[0] priority = get_chromosome_priority(chrom) print_line = "{0}\t{1}\n".format(priority, '\t'.join(line)) f.write(print_line) #Sort the variants sort_variant_file(temp_file.name) with codecs.open(temp_file.name,mode='r',encoding='utf-8',errors='replace') as f: for line in f: line = line.rstrip().split('\t') yield '\t'.join(line[1:]) except Exception as err: logger.error("Something went wrong") logger.error(err) finally: logger.debug("Deleting temp file") os.remove(temp_file.name) logger.debug("Temp file deleted")
1,037,468
Sort a modified variant file. Sorting is based on the first column and the POS. Uses unix sort to sort the variants and overwrites the infile. Args: infile : A string that is the path to a file mode : 'chromosome' or 'rank' outfile : The path to an outfile where the variants should be printed Returns: 0 if sorting was performed 1 if variants where not sorted
def sort_variant_file(infile): command = [ 'sort', ] command.append('-n') command.append('-k1') command.append('-k3') command = command + [infile, '-o', infile] logger.info("Start sorting variants...") logger.info("Sort command: {0}".format(' '.join(command))) sort_start = datetime.now() try: call(command) except OSError as e: logger.warning("unix command sort does not seem to exist on your system...") logger.warning("genmod needs unix sort to provide a sorted output.") logger.warning("Output VCF will not be sorted since genmod can not find"\ "unix sort") raise e logger.info("Sorting done. Time to sort: {0}".format(datetime.now()-sort_start)) return
1,037,469
prepare the model fields, nodes and relations Args: node_name (str): name of the node we are currently processing attrs (dict): attribute dict class_type (str): Type of class. Can be one of these: 'ListNode', 'Model', 'Node'
def process_attributes_of_node(attrs, node_name, class_type): # print("Node: %s" % node_name) attrs['_nodes'] = {} attrs['_linked_models'] = defaultdict(list) attrs['_debug_linked_models'] = defaultdict(list) attrs['_lazy_linked_models'] = defaultdict(list) attrs['_fields'] = {} attrs['_uniques'] = [] # attrs['_many_to_models'] = [] # iterating over attributes of the soon to be created class object. for key, attr in list(attrs.items()): # if it's a class (not instance) and it's type is Node or ListNode if hasattr(attr, '__base__') and getattr(attr.__base__, '_TYPE', '') in ['Node', 'ListNode']: # converted pops to dict access to allow sphinx to # properly document the models # attrs['_nodes'][key] = attrs.pop(key) attrs['_nodes'][key] = attrs[key] else: # otherwise it should be a field or linked model attr_type = getattr(attr, '_TYPE', '') if attr_type == 'Model': attrs['%s_id' % key] = '' # lnk_mdl_ins = attrs.pop(key) lnk_mdl_ins = attrs[key] lnk = { 'null': lnk_mdl_ins.null or class_type == 'ListNode', 'link_source': True, 'mdl': lnk_mdl_ins.__class__, 'o2o': lnk_mdl_ins._is_one_to_one, 'm2m': class_type == 'ListNode', 'reverse': lnk_mdl_ins.reverse_name, 'verbose': lnk_mdl_ins.verbose_name, 'field': key, 'is_set': False, } attrs['_linked_models'][attr.__class__.__name__].append(lnk) debug_lnk = lnk.copy() debug_lnk['lnksrc']= 'process_attributes_of_node' attrs['_debug_linked_models'][attr.__class__.__name__].append(debug_lnk) elif attr_type == 'Field': attr.name = key attrs['_fields'][key] = attr if attr.unique: attrs['_uniques'].append(key) elif attr_type == 'Link': # lzy_lnk = attrs.pop(key) attrs['%s_id' % key] = '' lzy_lnk = attrs[key] attrs['_lazy_linked_models'][key].append({'from': node_name, 'to': lzy_lnk.link_to, 'o2o': lzy_lnk.one_to_one, 'verbose': lzy_lnk.verbose_name, 'reverse': lzy_lnk.reverse_name, 'field': key})
1,037,743
Creates a new user, validate its credentials and returns it |funccoro| Args: username: username as specified on the challonge website api_key: key as found on the challonge `settings <https://challonge.com/settings/developer>`_ Returns: User: a logged in user if no exception has been raised Raises: APIException
async def get_user(username: str, api_key: str, **kwargs) -> User: new_user = User(username, api_key, **kwargs) await new_user.validate() return new_user
1,037,753
gets all user's tournaments |methcoro| Args: subdomain: *optional* subdomain needs to be given explicitely to get tournaments in a subdomain force_update: *optional* set to True to force the data update from Challonge Returns: list[Tournament]: list of all the user tournaments Raises: APIException
async def get_tournaments(self, subdomain: str = None, force_update: bool = False) -> list: if self.tournaments is None: force_update = True self._subdomains_searched.append('' if subdomain is None else subdomain) elif subdomain is None and '' not in self._subdomains_searched: force_update = True self._subdomains_searched.append('') elif subdomain is not None and subdomain not in self._subdomains_searched: force_update = True self._subdomains_searched.append(subdomain) if force_update: params = { 'include_participants': 1 if AUTO_GET_PARTICIPANTS else 0, 'include_matches': 1 if AUTO_GET_MATCHES else 0 } if subdomain is not None: params['subdomain'] = subdomain res = await self.connection('GET', 'tournaments', **params) if len(res) == 0: self.tournaments = [] else: for t_data in res: self._refresh_tournament_from_json(t_data) return self.tournaments
1,037,759
creates a simple tournament with basic options |methcoro| Args: name: name of the new tournament url: url of the new tournament (http://challonge.com/url) tournament_type: Defaults to TournamentType.single_elimination params: optional params (see http://api.challonge.com/v1/documents/tournaments/create) Returns: Tournament: the newly created tournament Raises: APIException
async def create_tournament(self, name: str, url: str, tournament_type: TournamentType = TournamentType.single_elimination, **params) -> Tournament: params.update({ 'name': name, 'url': url, 'tournament_type': tournament_type.value, }) res = await self.connection('POST', 'tournaments', 'tournament', **params) self._refresh_tournament_from_json(res) return self._find_tournament_by_id(res['tournament']['id'])
1,037,760
converts py2 unicode / py3 bytestring into str Args: string (unicode, byte_string): string to be converted Returns: (str)
def ub_to_str(string): if not isinstance(string, str): if six.PY2: return str(string) else: return string.decode() return string
1,037,765
Prettier print for nested data Args: input: Input data return_data (bool): Default False. Print outs if False, returns if True. Returns: None | Pretty formatted text representation of input data.
def pprnt(input, return_data=False): HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[32m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' import json, re result = json.dumps(input, sort_keys=True, indent=4) result = re.sub(r'(")(\w*?_id)(":)', r'\1%s%s\2%s\3' % (BOLD, HEADER, ENDC), result) result = re.sub(r'(")(\w*?_set)(":)', r'\1%s%s\2%s\3' % (BOLD, HEADER, ENDC), result) result = re.sub(r'(\n *?")(\w*?)(":)', r'\1%s%s\2%s\3' % (BOLD, OKGREEN, ENDC), result) if not return_data: print(result) else: return result
1,037,768
Register user-defined classes with the Dumper. This enables the Dumper to write objects of your classes to a \ YAML file. Note that all the arguments are types, not instances! Args: dumper: Your dumper class(!), derived from yatiml.Dumper classes: One or more classes to add.
def add_to_dumper(dumper: Type, classes: List[Type]) -> None: if not isinstance(classes, list): classes = [classes] # type: ignore for class_ in classes: if issubclass(class_, enum.Enum): dumper.add_representer(class_, EnumRepresenter(class_)) elif issubclass(class_, str) or issubclass(class_, UserString): dumper.add_representer(class_, UserStringRepresenter(class_)) else: dumper.add_representer(class_, Representer(class_))
1,037,817
Display Warning. Method prints the warning message, message being given as an input. Arguments: message {string} -- The message to be displayed.
def warning(message, code='WARNING'): now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') output = now + ' [' + torn.plugins.colors.WARNING + \ code + torn.plugins.colors.ENDC + '] \t' + \ message print(output)
1,037,868
Display Information. Method prints the information message, message being given as an input. Arguments: message {string} -- The message to be displayed.
def info(message, code='INFO'): now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') output = now + ' [' + torn.plugins.colors.OKBLUE + \ code + torn.plugins.colors.ENDC + '] \t' + \ message print(output)
1,037,869
Display Error. Method prints the error message, message being given as an input. Arguments: message {string} -- The message to be displayed.
def error(message, code='ERROR'): now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') output = now + ' [' + torn.plugins.colors.FAIL + \ code + torn.plugins.colors.ENDC + '] \t' + \ message print(output)
1,037,870
Sort values, but put numbers after alphabetically sorted words. This function is here to make outputs diff-compatible with Aleph. Example:: >>> sorted(["b", "1", "a"]) ['1', 'a', 'b'] >>> resorted(["b", "1", "a"]) ['a', 'b', '1'] Args: values (iterable): any iterable object/list/tuple/whatever. Returns: list of sorted values, but with numbers after words
def resorted(values): if not values: return values values = sorted(values) # look for first word first_word = next( (cnt for cnt, val in enumerate(values) if val and not val[0].isdigit()), None ) # if not found, just return the values if first_word is None: return values words = values[first_word:] numbers = values[:first_word] return words + numbers
1,037,979
Sets the value of the node to a scalar value. After this, is_scalar(type(value)) will return true. Args: value: The value to set this node to, a str, int, float, \ bool, or None.
def set_value(self, value: ScalarType) -> None: if isinstance(value, bool): value_str = 'true' if value else 'false' else: value_str = str(value) start_mark = self.yaml_node.start_mark end_mark = self.yaml_node.end_mark # If we're of a class type, then we want to keep that tag so that the # correct Constructor is called. If we're a built-in type, set the tag # to the appropriate YAML tag. tag = self.yaml_node.tag if tag.startswith('tag:yaml.org,2002:'): tag = scalar_type_to_tag[type(value)] new_node = yaml.ScalarNode(tag, value_str, start_mark, end_mark) self.yaml_node = new_node
1,038,042
Whether the node has an attribute with the given name. Use only if is_mapping() returns True. Args: attribute: The name of the attribute to check for. Returns: True iff the attribute is present.
def has_attribute(self, attribute: str) -> bool: return any([ key_node.value == attribute for key_node, _ in self.yaml_node.value ])
1,038,044
Returns the node representing the given attribute's value. Use only if is_mapping() returns true. Args: attribute: The name of the attribute to retrieve. Raises: KeyError: If the attribute does not exist. Returns: A node representing the value.
def get_attribute(self, attribute: str) -> 'Node': matches = [ value_node for key_node, value_node in self.yaml_node.value if key_node.value == attribute ] if len(matches) != 1: raise SeasoningError( 'Attribute not found, or found multiple times: {}'.format( matches)) return Node(matches[0])
1,038,046
Remove an attribute from the node. Use only if is_mapping() returns True. Args: attribute: The name of the attribute to remove.
def remove_attribute(self, attribute: str) -> None: attr_index = self.__attr_index(attribute) if attr_index is not None: self.yaml_node.value.pop(attr_index)
1,038,048
Renames an attribute. Use only if is_mapping() returns true. If the attribute does not exist, this will do nothing. Args: attribute: The (old) name of the attribute to rename. new_name: The new name to rename it to.
def rename_attribute(self, attribute: str, new_name: str) -> None: for key_node, _ in self.yaml_node.value: if key_node.value == attribute: key_node.value = new_name break
1,038,049
Create an UnknownNode for a particular mapping node. The member functions will act on the contained node. Args: node: The node to operate on.
def __init__(self, recognizer: IRecognizer, node: yaml.Node) -> None: self.__recognizer = recognizer self.yaml_node = node
1,038,055
Require the node to be a scalar. If additional arguments are passed, these are taken as a list \ of valid types; if the node matches one of these, then it is \ accepted. Example: # Match either an int or a string node.require_scalar(int, str) Arguments: args: One or more types to match one of.
def require_scalar(self, *args: Type) -> None: node = Node(self.yaml_node) if len(args) == 0: if not node.is_scalar(): raise RecognitionError(('{}{}A scalar is required').format( self.yaml_node.start_mark, os.linesep)) else: for typ in args: if node.is_scalar(typ): return raise RecognitionError( ('{}{}A scalar of type {} is required').format( self.yaml_node.start_mark, os.linesep, args))
1,038,056
Require an attribute on the node to exist. If `typ` is given, the attribute must have this type. Args: attribute: The name of the attribute / mapping key. typ: The type the attribute must have.
def require_attribute(self, attribute: str, typ: Type = _Any) -> None: attr_nodes = [ value_node for key_node, value_node in self.yaml_node.value if key_node.value == attribute ] if len(attr_nodes) == 0: raise RecognitionError( ('{}{}Missing required attribute {}').format( self.yaml_node.start_mark, os.linesep, attribute)) attr_node = attr_nodes[0] if typ != _Any: recognized_types, message = self.__recognizer.recognize( attr_node, cast(Type, typ)) if len(recognized_types) == 0: raise RecognitionError(message)
1,038,059
Stores the data at self._data, actual object creation done at _generate_instances() Args: data (list): List of dicts. from_db (bool): Default False. Is this data coming from DB or not.
def _load_data(self, data, from_db=False): self._data = data[:] self.setattrs( values=[], node_stack=[], node_dict={}, ) self._from_db = from_db
1,038,127
Create a ListNode instance from node_data Args: node_data (dict): Data to create ListNode item. Returns: ListNode item.
def _make_instance(self, node_data): node_data['from_db'] = self._from_db clone = self.__call__(**node_data) clone.setattrs(container = self, _is_item = True) for name in self._nodes: _name = un_camel(name) if _name in node_data: # check for partial data getattr(clone, name)._load_data(node_data[_name]) _key = clone._get_linked_model_key() if _key: self.node_dict[_key] = clone return clone
1,038,129
Allow usage of "del" statement on ListNodes with bracket notation. Args: obj: ListNode item or relation key. Raises: TypeError: If it's called on a ListNode item (intstead of ListNode's itself)
def __delitem__(self, obj, sync=True): if self._is_item: raise TypeError("This an item of the parent ListNode") list(self._generate_instances()) _lnk_key = None if isinstance(obj, six.string_types): _lnk_key = obj _obj = self.node_dict[obj] elif not isinstance(obj, self.__class__): _lnk_key = obj.key _obj = self.node_dict[obj.key] del self.node_dict[obj.key] else: _obj = obj self.node_stack.remove(_obj) if _lnk_key and sync: # this is a "many_to_n" relationship, # we should cleanup other side too. rel_name = "%s.%s" % (_obj.__class__.__name__, _obj.get_link()['field']) remote_node_name = self._root_node.get_link(field=rel_name)['reverse'] _lnk_obj = getattr(_obj, _obj.get_link()['field']) getattr(_lnk_obj, remote_node_name).__delitem__(self._root_node.key, sync=False) # binding relation's save to root objects save self._root_node.on_save.append(_lnk_obj.save)
1,038,135
Slice a data object based on its index, either by value (.loc) or position (.iloc). Args: key: Single index value, slice, tuple, or list of indices/positionals Returns: data: Slice of self
def slice_naive(self, key): cls = self.__class__ key = check_key(self, key) return cls(self.loc[key])
1,038,226
Naively (on index) slice the field data and values. Args: key: Int, slice, or iterable to select data and values Returns: field: Sliced field object
def slice_naive(self, key): cls = self.__class__ key = check_key(self, key) enum = pd.Series(range(len(self))) enum.index = self.index values = self.field_values[enum[key].values] data = self.loc[key] return cls(data, field_values=values)
1,038,239
change the url of that attachment |methcoro| Args: url: url you want to change description: *optional* description for your attachment Raises: ValueError: url must not be None APIException
async def change_url(self, url: str, description: str = None): await self._change(url=url, description=description)
1,038,637
change the file of that attachment |methcoro| Warning: |unstable| Args: file_path: path to the file you want to add / modify description: *optional* description for your attachment Raises: ValueError: file_path must not be None APIException
async def change_file(self, file_path: str, description: str = None): with open(file_path, 'rb') as f: await self._change(asset=f.read())
1,038,638
Bindings to GNU Lightning library. Args: liblightning: Set to override path to liblightning. program: Set to override argument to init_jit, used with bfd.
def __init__(self, liblightning=None, program=None): self._load(liblightning) self._set_signatures() self._init() self._executable = None
1,038,714
Converts any sequence or mapping to list or OrderedDict Stops at anything that isn't a sequence or a mapping. One day, we'll extract the comments and formatting and store \ them out-of-band. Args: mapping: The mapping of constructed subobjects to edit
def __to_plain_containers(self, container: Union[CommentedSeq, CommentedMap] ) -> Union[OrderedDict, list]: if isinstance(container, CommentedMap): new_container = OrderedDict() # type: Union[OrderedDict, list] for key, value_obj in container.items(): if (isinstance(value_obj, CommentedMap) or isinstance(value_obj, CommentedSeq)): new_container[key] = self.__to_plain_containers(value_obj) else: new_container[key] = value_obj elif isinstance(container, CommentedSeq): new_container = list() for value_obj in container: if (isinstance(value_obj, CommentedMap) or isinstance(value_obj, CommentedSeq)): new_container.append(self.__to_plain_containers(value_obj)) else: new_container.append(value_obj) return new_container
1,038,721
Checks that the object matches the given type. Like isinstance(), but will work with union types using Union, \ Dict and List. Args: obj: The object to check type_: The type to check against Returns: True iff obj is of type type_
def __type_matches(self, obj: Any, type_: Type) -> bool: if is_generic_union(type_): for t in generic_type_args(type_): if self.__type_matches(obj, t): return True return False elif is_generic_list(type_): if not isinstance(obj, list): return False for item in obj: if not self.__type_matches(item, generic_type_args(type_)[0]): return False return True elif is_generic_dict(type_): if not isinstance(obj, OrderedDict): return False for key, value in obj: if not isinstance(key, generic_type_args(type_)[0]): return False if not self.__type_matches(value, generic_type_args(type_)[1]): return False return True else: return isinstance(obj, type_)
1,038,723
Checks that all required attributes are present. Also checks that they're of the correct type. Args: mapping: The mapping with subobjects of this object. Raises: RecognitionError: if an attribute is missing or the type \ is incorrect.
def __check_no_missing_attributes(self, node: yaml.Node, mapping: CommentedMap) -> None: logger.debug('Checking presence of required attributes') for name, type_, required in class_subobjects(self.class_): if required and name not in mapping: raise RecognitionError(('{}{}Missing attribute {} needed for' ' constructing a {}').format( node.start_mark, os.linesep, name, self.class_.__name__)) if name in mapping and not self.__type_matches( mapping[name], type_): raise RecognitionError(('{}{}Attribute {} has incorrect type' ' {}, expecting a {}').format( node.start_mark, os.linesep, name, type(mapping[name]), type_))
1,038,724
Strips tags from extra attributes. This prevents nodes under attributes that are not part of our \ data model from being converted to objects. They'll be plain \ CommentedMaps instead, which then get converted to OrderedDicts \ for the user. Args: node: The node to process known_attrs: The attributes to not strip
def __strip_extra_attributes(self, node: yaml.Node, known_attrs: List[str]) -> None: known_keys = list(known_attrs) known_keys.remove('self') if 'yatiml_extra' in known_keys: known_keys.remove('yatiml_extra') for key_node, value_node in node.value: if (not isinstance(key_node, yaml.ScalarNode) or key_node.tag != 'tag:yaml.org,2002:str'): raise RecognitionError( ('{}{}Mapping keys that are not of type' ' string are not supported by YAtiML.').format( node.start_mark, os.linesep)) if key_node.value not in known_keys: self.__strip_tags(value_node)
1,038,726
Strips tags from mappings in the tree headed by node. This keeps yaml from constructing any objects in this tree. Args: node: Head of the tree to strip
def __strip_tags(self, node: yaml.Node) -> None: if isinstance(node, yaml.SequenceNode): for subnode in node.value: self.__strip_tags(subnode) elif isinstance(node, yaml.MappingNode): node.tag = 'tag:yaml.org,2002:map' for key_node, value_node in node.value: self.__strip_tags(key_node) self.__strip_tags(value_node)
1,038,727
Generate a file from the current template and given arguments. Warning: Make certain to check the formatted editor for correctness! Args: args: Positional arguments to update the template kwargs: Keyword arguments to update the template Returns: editor: An editor containing the formatted template.
def compose(self, *args, **kwargs): linebreak = kwargs.pop("linebreak", "\n") # Update the internally stored args/kwargs from which formatting arguments come if len(args) > 0: self.args = args self._update(**kwargs) # Format string arguments (for the modified template) fkwargs = {} # Format string keyword arguments modtmpl = [] # The modified template lines #curpos = 0 # Positional argument counter #i = 0 for line in self: cline = copy(line) # If any special formatters exist, handle them for match in self._regex.findall(line): search = "[{}]".format("|".join(match)) name, indent, delim, qual, _ = match if indent != "": indent = " "*int(indent) delim = delim.replace("\\|", "|") # Collect and format the data accordingly data = getattr(self, name, None) # If no data exists, treat as optional if data is None: cline = cline.replace(search, "") continue elif delim.isdigit(): fkwargs[name] = getattr(self, "_fmt_"+name)() else: fkwargs[name] = linebreak.join([indent+k+delim+qual+v+qual for k, v in data.items()]) cline = cline.replace(search, "{"+name+"}") modtmpl.append(cline) modtmpl = "\n".join(modtmpl) print(modtmpl) dct = self.get_kwargs() dct.update(fkwargs) return self._constructor(textobj=modtmpl.format(*self.args, **dct))
1,038,771
Print the vcf headers. If a result file is provided headers will be printed here, otherwise they are printed to stdout. Args: head (HeaderParser): A vcf header object outfile (FileHandle): A file handle silent (Bool): If nothing should be printed.
def print_headers(head, outfile=None, silent=False): for header_line in head.print_header(): if outfile: outfile.write(header_line+'\n') else: if not silent: print(header_line) return
1,038,782