docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
gives the number of records in the table Args: table: a string indicating the name of the table Returns: an integer indicating the number of records in the table
def count(cls, cur, table:str, where_keys: list=None): if where_keys: where_clause, values = cls._get_where_clause_with_values(where_keys) query = cls._count_query_where.format(table, where_clause) q, t = query, values else: query = cls._count_query.format(table) q, t = query, () yield from cur.execute(q, t) result = yield from cur.fetchone() return int(result[0])
1,063,823
Creates an insert statement with only chosen fields Args: table: a string indicating the name of the table values: a dict of fields and values to be inserted Returns: A 'Record' object with table columns as properties
def insert(cls, cur, table: str, values: dict): keys = cls._COMMA.join(values.keys()) value_place_holder = cls._PLACEHOLDER * len(values) query = cls._insert_string.format(table, keys, value_place_holder[:-1]) yield from cur.execute(query, tuple(values.values())) return (yield from cur.fetchone())
1,063,824
Run a raw sql query Args: query : query string to execute values : tuple of values to be used with the query Returns: result of query as list of named tuple
def raw_sql(cls, cur, query: str, values: tuple): yield from cur.execute(query, values) return (yield from cur.fetchall())
1,063,829
Initialization of instances. Args: options (:class:`ConfOpt`): option metadata. The name of each *option* is the name of the keyword argument passed on to this function. Option names should be valid identifiers, otherwise an :class:`~loam.error.OptionError` is raised.
def __init__(self, **options): self._def = {} for opt_name, opt_meta in options.items(): if _is_valid(opt_name): self._def[opt_name] = opt_meta self[opt_name] = opt_meta.default else: raise error.OptionError(opt_name)
1,063,846
Update values of configuration section with dict. Args: sct_dict (dict): dict indexed with option names. Undefined options are discarded. conf_arg (bool): if True, only options that can be set in a config file are updated.
def update_(self, sct_dict, conf_arg=True): for opt, val in sct_dict.items(): if opt not in self.def_: continue if not conf_arg or self.def_[opt].conf_arg: self[opt] = val
1,063,848
Initialization of instances. Args: sections (:class:`~loam.manager.Section`): section metadata. The name of each *section* is the name of the keyword argument passed on to this function. Section names should be valid identifiers, otherwise a :class:`~loam.error.SectionError` is raised.
def __init__(self, **sections): self._sections = [] for sct_name, sct_meta in sections.items(): if _is_valid(sct_name): setattr(self, sct_name, Section(**sct_meta.def_)) self._sections.append(sct_name) else: raise error.SectionError(sct_name) self._parser = None self._nosub_valid = False self._subcmds = {} self._config_files = ()
1,063,850
Use a dictionary to create a :class:`ConfigurationManager`. Args: conf_dict (dict of dict of :class:`ConfOpt`): the first level of keys should be the section names. The second level should be the option names. The values are the options metadata. Returns: :class:`ConfigurationManager`: a configuration manager with the requested sections and options.
def from_dict_(cls, conf_dict): return cls(**{name: Section(**opts) for name, opts in conf_dict.items()})
1,063,851
Set the list of config files. Args: config_files (pathlike): path of config files, given in the order of reading.
def set_config_files_(self, *config_files): self._config_files = tuple(pathlib.Path(path) for path in config_files)
1,063,852
Create config file. Create config file in :attr:`config_files_[index]`. Parameters: index(int): index of config file. update (bool): if set to True and :attr:`config_files_` already exists, its content is read and all the options it sets are kept in the produced config file.
def create_config_(self, index=0, update=False): if not self.config_files_[index:]: return path = self.config_files_[index] if not path.parent.exists(): path.parent.mkdir(parents=True) conf_dict = {} for section in self.sections_(): conf_opts = [o for o, m in self[section].defaults_() if m.conf_arg] if not conf_opts: continue conf_dict[section] = {} for opt in conf_opts: conf_dict[section][opt] = (self[section][opt] if update else self[section].def_[opt].default) with path.open('w') as cfile: toml.dump(conf_dict, cfile)
1,063,855
Update values of configuration options with dict. Args: conf_dict (dict): dict of dict indexed with section and option names. conf_arg (bool): if True, only options that can be set in a config file are updated.
def update_(self, conf_dict, conf_arg=True): for section, secdict in conf_dict.items(): self[section].update_(secdict, conf_arg)
1,063,856
Miller-Rabin primality test. Keep in mind that this is not a deterministic algorithm: if it return True, it means that n is probably a prime. Args: n (int): the integer to check Returns: True if n is probably a prime number, False if it is not Raises: TypeError: if n is not an integer Note: Adapted from https://rosettacode.org/wiki/Miller%E2%80%93Rabin_primality_test#Python
def is_prime(n): if not isinstance(n, int): raise TypeError("Expecting an integer") if n < 2: return False if n in __known_primes: return True if any((n % p) == 0 for p in __known_primes): return False d, s = n - 1, 0 while not d % 2: d, s = d >> 1, s + 1 def try_composite(a): if pow(a, d, n) == 1: return False for i in range(s): if pow(a, 2 ** i * d, n) == n - 1: return False return True return not any(try_composite(a) for a in __known_primes[:16])
1,063,975
Find all the positive divisors of the given integer n. Args: n (int): strictly positive integer Returns: A generator of all the positive divisors of n Raises: TypeError: if n is not an integer ValueError: if n is negative
def find_divisors(n): if not isinstance(n, int): raise TypeError("Expecting a strictly positive integer") if n <= 0: raise ValueError("Expecting a strictly positive integer") for i in range(1, int(n**0.5) + 1): if n % i == 0: divisors = {i, n//i} for divisor in divisors: yield divisor
1,063,976
Count the number of divisors of an integer n Args: n (int): strictly positive integer Returns: The number of distinct divisors of n Raises: TypeError: if n is not an integer ValueError: if n is negative
def count_divisors(n): if not isinstance(n, int): raise TypeError("Expecting a strictly positive integer") if n <= 0: raise ValueError("Expecting a strictly positive integer") number_of_divisors = 1 remain = n for p in prime_generator(): if p > n: return number_of_divisors exponent = 1 while remain % p == 0: remain = remain // p exponent += 1 number_of_divisors *= exponent if remain == 1: return number_of_divisors
1,063,977
Calculate the binomial coefficient indexed by n and k. Args: n (int): positive integer k (int): positive integer Returns: The binomial coefficient indexed by n and k Raises: TypeError: If either n or k is not an integer ValueError: If either n or k is negative, or if k is strictly greater than n
def binomial_coefficient(n, k): if not isinstance(k, int) or not isinstance(n, int): raise TypeError("Expecting positive integers") if k > n: raise ValueError("k must be lower or equal than n") if k < 0 or n < 0: raise ValueError("Expecting positive integers") return factorial(n) // (factorial(k) * factorial(n - k))
1,063,980
Calculate the value of Euler's totient for a given integer Args: n (int): strictly positive integer Returns: The value of Euler's totient for n Raises: TypeError: If either n or k is not an integer ValueError: If either n or k is negative, or if k is strictly greater than n
def eulers_totient(n): if not isinstance(n, int): raise TypeError("Expecting a strictly positive integer") if n <= 0: raise ValueError("Expecting a strictly positive integer") if n == 1: return 1 result = 0 for i in range(1, n): if gcd(i, n) == 1: result += 1 return result
1,063,981
Set value for <input> / <select> tags based on MDT. Args: mdt (str): MDT of the conspect / subconspect.
def set(cls, mdt): if type(mdt) in [list, tuple]: mdt = mdt[0] if isinstance(mdt, dict): mdt = mdt["val"] if not mdt: cls.conspect_el.value = "" cls.subconspect_el.html = "" cls.input_el.value = "" return # set twoconspect data = conspectus.subs_by_mdt.get(mdt) if data: cls.conspect_el.value = data["conspect_id"] cls._draw_subconspects(data["conspect_id"]) cls.subconspect_el.value = mdt # set input cls.input_el.value = data["name"] else: cls.input_el.value = mdt + " not found"
1,063,987
Load a surface from a file. Args: path (str): Path to the BMP file to load. Returns: Surface: A surface containing the pixels loaded from the file. Raises: SDLError: If the file cannot be loaded.
def load_bmp(path): surface = object.__new__(Surface) surface._ptr = check_ptr_err(lib.SDL_LoadBMP_RW(lib.SDL_RWFromFile(path, "rb"), 1)) return surface
1,064,079
Load an image directly into a render texture. Args: renderer: The renderer to make the texture. file: The image file to load. Returns: A new texture
def load_texture(renderer, file): return Texture._from_ptr(check_ptr_err(lib.IMG_LoadTexture(renderer._ptr, file)))
1,064,094
Discordian date setup and mangling. Note: year, season and day_of_season are all required if any are used Args: date: optional date object with a timetuple method, or uses today year: optional integer discordian year to create from season: optional integer discodian season to create from day_of_season: optional int discordian day of season to create from
def __init__(self, date=None, year=None, season=None, day_of_season=None, *args, **kwargs): if year is not None and season is not None and \ day_of_season is not None: date = (datetime.datetime(year=year - 1166, month=1, day=1) + datetime.timedelta(days=(season * 73) + day_of_season - 1)) elif date is None or not hasattr(date, "timetuple"): date = datetime.date.today() self.date = date time_tuple = self.date.timetuple() # calculate leap year using tradtional methods to align holidays year = time_tuple.tm_year self.year = year + 1166 # then adjust accordingly and assign day_of_year = time_tuple.tm_yday - 1 # ordinal if is_leap_year(year) and day_of_year > 59: day_of_year -= 1 # St. Tib's doesn't count self.day_of_week = day_of_year % 5 self.day_of_season = day_of_year % 73 + 1 # cardinal self.season = int(day_of_year / 73) if is_leap_year(year) and time_tuple.tm_yday == 60: self.holiday = "St. Tib's Day" self.day_of_week = None self.day_of_season = None self.season = None elif self.day_of_season == 5: self.holiday = self.HOLIDAYS["apostle"][self.season] elif self.day_of_season == 50: self.holiday = self.HOLIDAYS["seasonal"][self.season] else: self.holiday = None super(DDate, self).__init__(*args, **kwargs)
1,064,132
Open the mixer with a certain audio format. Args: frequency (int): Output sampling frequency in samples per second (Hz). format (AudioFormat): Output sample format. channels (int): Number of sound channels in output. Set to 2 for stereo, 1 for mono. chunksize (int): Bytes used per output sample. Raises: SDLError: If the audio device cannot be opened.
def open_audio(frequency=44100, format=AudioFormat.default, channels=2, chunksize=1024): check_int_err(lib.Mix_OpenAudio(frequency, format, channels, chunksize))
1,064,439
Fill data from a given configuration section. Args: config (configparser): the configuration file section (str): the section to use
def fill(self, config, section): if config.has_section(section): default_url = self.DEFAULT_REPOSITORIES.get(self.name, '') self.url = RepositoryURL(config_get(config, section, 'repository', default_url)) self.username = config_get(config, section, 'username', '') self.password = config_get(config, section, 'password', '')
1,065,380
Retrieve configuration for a given repository. Args: repo (str): a repository "realm" (alias) or its URL Returns: RepositoryConfig: if there is configuration for that repository None: otherwise
def get_repo_config(self, repo='default'): for repo_config in self.repositories: if repo_config.name == repo or repo_config.url in RepositoryURL(repo): return repo_config return None
1,065,386
Return a fully recoded dataframe. Args: table (pd.DataFrame): A dataframe on which to apply recoding logic. validate (bool): If ``True``, recoded table must pass validation tests.
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame: df = pd.DataFrame(index=table.index) for column in self.columns: df = column.update_dataframe(df, table=table, validate=validate) return df
1,065,542
Return a dataframe of validation results for the appropriate series vs the vector of validators. Args: table (pd.DataFrame): A dataframe on which to apply validation logic. failed_only (bool): If ``True``: return only the indexes that failed to validate.
def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame: raise NotImplementedError("This method must be defined for each subclass.")
1,065,544
Pass the appropriate columns through each recoder function sequentially and return the final result. Args: table (pd.DataFrame): A dataframe on which to apply recoding logic. validate (bool): If ``True``, recoded table must pass validation tests.
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame: raise NotImplementedError("This method must be defined for each subclass.")
1,065,545
Construct a new `Column` object. Args: name (str): The exact name of the column in a ``pd.DataFrame``. dtype (type): The type that each member of the recoded column must belong to. unique (bool): Whether values are allowed to recur in this column. validators (list): A list of validator functions. recoders (list): A list of recoder functions.
def __init__( self, name: str, dtype: type, unique: bool, validators: t.List[VALIDATOR_FUNCTION], recoders: t.List[RECODER_FUNCTION],) -> None: if validators is None: validators = [] if recoders is None: recoders = [] self.name = name self.dtype = dtype self.unique = unique self.validators = self._dict_of_funcs(validators) self.recoders = self._dict_of_funcs(recoders)
1,065,546
Return a dataframe of validation results for the appropriate series vs the vector of validators. Args: table (pd.DataFrame): A dataframe on which to apply validation logic. failed_only (bool): If ``True``: return only the indexes that failed to validate.
def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame: series = table[self.name] self._check_series_name(series) validators = self.validators results = pd.DataFrame({validator: series for validator in validators}, index=series.index) for name, func in validators.items(): results[name] = func(results[name]) results['dtype'] = self._validate_series_dtype(series) if self.unique: results['unique'] = v.funcs.unique(series) if failed_only: results = find_failed_rows(results) return results
1,065,550
Pass the provided series obj through each recoder function sequentially and return the final result. Args: table (pd.DataFrame): A dataframe on which to apply recoding logic. validate (bool): If ``True``, recoded table must pass validation tests.
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame: series = table[self.name] self._check_series_name(series) col = self.name data = series.copy() for recoder in self.recoders.values(): try: data = recoder(data) except (BaseException) as err: raise RecodingError(col, recoder, err) if validate: failed_rows = find_failed_rows(self.validate(data.to_frame())) if failed_rows.shape[0] > 0: raise ValidationError(f"Rows that failed to validate for column '{self.name}':\n{failed_rows}") return data.to_frame()
1,065,551
Construct a new ``CompoundColumn`` object. Args: input_columns (list, Column): A list of ``Column`` objects representing column(s) from the SOURCE table. output_columns (list, Column): A list of ``Column`` objects representing column(s) from the FINAL table. column_transform (Callable): Function accepting the table object, performing transformations to it and returning a DataFrame containing the NEW columns only.
def __init__( self, input_columns: t.List[Column], output_columns: t.List[Column], column_transform,) -> None: self.input_columns = input_columns self.output_columns = output_columns self.column_transform = column_transform
1,065,552
Return a dataframe of validation results for the appropriate series vs the vector of validators. Args: table (pd.DataFrame): A dataframe on which to apply validation logic. failed_only (bool): If ``True``: return only the indexes that failed to validate.
def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame: return pd.concat([ self._validate_input(table, failed_only=failed_only), self._validate_output(table, failed_only=failed_only), ]).fillna(True)
1,065,559
Pass the appropriate columns through each recoder function sequentially and return the final result. Args: table (pd.DataFrame): A dataframe on which to apply recoding logic. validate (bool): If ``True``, recoded table must pass validation tests.
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame: return self._recode_output(self._recode_input(table, validate=validate), validate=validate)
1,065,560
List of config sections used by a command. Args: cmd (str): command name, set to ``None`` or ``''`` for the bare command. Returns: list of str: list of configuration sections used by that command.
def sections_list(self, cmd=None): sections = list(self.common.sections) if not cmd: if self.bare is not None: sections.extend(self.bare.sections) return sections return [] sections.extend(self.subcmds[cmd].sections) if cmd in self._conf: sections.append(cmd) return sections
1,065,575
Parse arguments and update options accordingly. Args: arglist (list of str): list of arguments to parse. If set to None, ``sys.argv[1:]`` is used. Returns: :class:`Namespace`: the argument namespace returned by the :class:`argparse.ArgumentParser`.
def parse_args(self, arglist=None): args = self._parser.parse_args(args=arglist) sub_cmd = args.loam_sub_name if sub_cmd is None: for opt, sct in self._opt_bare.items(): self._conf[sct][opt] = getattr(args, opt, None) else: for opt, sct in self._opt_cmds[sub_cmd].items(): self._conf[sct][opt] = getattr(args, opt, None) return args
1,065,579
Write zsh _arguments compdef for a given command. Args: zcf (file): zsh compdef file. cmd (str): command name, set to None or '' for bare command. grouping (bool): group options (zsh>=5.4). add_help (bool): add an help option.
def _zsh_comp_command(self, zcf, cmd, grouping, add_help=True): if add_help: if grouping: print("+ '(help)'", end=BLK, file=zcf) print("'--help[show help message]'", end=BLK, file=zcf) print("'-h[show help message]'", end=BLK, file=zcf) # could deal with duplicate by iterating in reverse and keep set of # already defined opts. no_comp = ('store_true', 'store_false') cmd_dict = self._opt_cmds[cmd] if cmd else self._opt_bare for opt, sct in cmd_dict.items(): meta = self._conf[sct].def_[opt] if meta.cmd_kwargs.get('action') == 'append': grpfmt, optfmt = "+ '{}'", "'*{}[{}]{}'" if meta.comprule is None: meta.comprule = '' else: grpfmt, optfmt = "+ '({})'", "'{}[{}]{}'" if meta.cmd_kwargs.get('action') in no_comp \ or meta.cmd_kwargs.get('nargs') == 0: meta.comprule = None if meta.comprule is None: compstr = '' elif meta.comprule == '': optfmt = optfmt.split('[') optfmt = optfmt[0] + '=[' + optfmt[1] compstr = ': :( )' else: optfmt = optfmt.split('[') optfmt = optfmt[0] + '=[' + optfmt[1] compstr = ': :{}'.format(meta.comprule) if grouping: print(grpfmt.format(opt), end=BLK, file=zcf) for name in _names(self._conf[sct], opt): print(optfmt.format(name, meta.help.replace("'", "'\"'\"'"), compstr), end=BLK, file=zcf)
1,065,580
Write zsh compdef script. Args: path (path-like): desired path of the compdef script. cmd (str): command name that should be completed. cmds (str): extra command names that should be completed. sourceable (bool): if True, the generated file will contain an explicit call to ``compdef``, which means it can be sourced to activate CLI completion.
def zsh_complete(self, path, cmd, *cmds, sourceable=False): grouping = internal.zsh_version() >= (5, 4) path = pathlib.Path(path) firstline = ['#compdef', cmd] firstline.extend(cmds) subcmds = list(self.subcmds.keys()) with path.open('w') as zcf: print(*firstline, end='\n\n', file=zcf) # main function print('function _{} {{'.format(cmd), file=zcf) print('local line', file=zcf) print('_arguments -C', end=BLK, file=zcf) if subcmds: # list of subcommands and their description substrs = ["{}\\:'{}'".format(sub, self.subcmds[sub].help) for sub in subcmds] print('"1:Commands:(({}))"'.format(' '.join(substrs)), end=BLK, file=zcf) self._zsh_comp_command(zcf, None, grouping) if subcmds: print("'*::arg:->args'", file=zcf) print('case $line[1] in', file=zcf) for sub in subcmds: print('{sub}) _{cmd}_{sub} ;;'.format(sub=sub, cmd=cmd), file=zcf) print('esac', file=zcf) print('}', file=zcf) # all subcommand completion handlers for sub in subcmds: print('\nfunction _{}_{} {{'.format(cmd, sub), file=zcf) print('_arguments', end=BLK, file=zcf) self._zsh_comp_command(zcf, sub, grouping) print('}', file=zcf) if sourceable: print('\ncompdef _{0} {0}'.format(cmd), *cmds, file=zcf)
1,065,581
Build a list of all options for a given command. Args: cmd (str): command name, set to None or '' for bare command. add_help (bool): add an help option. Returns: list of str: list of CLI options strings.
def _bash_comp_command(self, cmd, add_help=True): out = ['-h', '--help'] if add_help else [] cmd_dict = self._opt_cmds[cmd] if cmd else self._opt_bare for opt, sct in cmd_dict: out.extend(_names(self._conf[sct], opt)) return out
1,065,582
Write bash complete script. Args: path (path-like): desired path of the complete script. cmd (str): command name that should be completed. cmds (str): extra command names that should be completed.
def bash_complete(self, path, cmd, *cmds): path = pathlib.Path(path) subcmds = list(self.subcmds.keys()) with path.open('w') as bcf: # main function print('_{}() {{'.format(cmd), file=bcf) print('COMPREPLY=()', file=bcf) print(r'local cur=${COMP_WORDS[COMP_CWORD]}', end='\n\n', file=bcf) optstr = ' '.join(self._bash_comp_command(None)) print(r'local options="{}"'.format(optstr), end='\n\n', file=bcf) if subcmds: print('local commands="{}"'.format(' '.join(subcmds)), file=bcf) print('declare -A suboptions', file=bcf) for sub in subcmds: optstr = ' '.join(self._bash_comp_command(sub)) print('suboptions[{}]="{}"'.format(sub, optstr), file=bcf) condstr = 'if' for sub in subcmds: print(condstr, r'[[ "${COMP_LINE}" == *"', sub, '"* ]] ; then', file=bcf) print(r'COMPREPLY=( `compgen -W "${suboptions[', sub, r']}" -- ${cur}` )', sep='', file=bcf) condstr = 'elif' print(condstr, r'[[ ${cur} == -* ]] ; then', file=bcf) print(r'COMPREPLY=( `compgen -W "${options}" -- ${cur}`)', file=bcf) if subcmds: print(r'else', file=bcf) print(r'COMPREPLY=( `compgen -W "${commands}" -- ${cur}`)', file=bcf) print('fi', file=bcf) print('}', end='\n\n', file=bcf) print('complete -F _{0} {0}'.format(cmd), *cmds, file=bcf)
1,065,583
Constructor. Args: url (str): URL to which this request is related.
def __init__(self, url): self.url = url self.domain = urlparse(url).netloc self.index = None self.creation_ts = time.time() self.downloaded_ts = None self.processing_started_ts = None self.processing_ended_ts = None for key in worker_mapping().keys(): setattr(self, key, None)
1,065,693
Set property `name` to `value`, but only if it is part of the mapping returned from `worker_mapping` (ie - data transported to frontend). This method is used from the REST API DB, so it knows what to set and what not, to prevent users from setting internal values. Args: name (str): Name of the property to set. value (obj): Any python value. Raises: KeyError: If `name` can't be set.
def _set_property(self, name, value): if name in worker_mapping().keys(): setattr(self, name, value) return raise KeyError("Can't set `%s`!" % name)
1,065,694
Render template file for the registered user, which has some of the values prefilled. Args: url_id (str): Seeder URL id. remote_info (dict): Informations read from Seeder. Returns: str: Template filled with data.
def render_registered(url_id, remote_info): return template( read_index_template(), registered=True, url=remote_info["url"], seeder_data=json.dumps(remote_info), url_id=url_id, )
1,065,831
Render template file for the unregistered user. Args: error (str, default None): Optional error message. Returns: str: Template filled with data.
def render_unregistered(error=None): return template( read_index_template(), registered=False, error=error, seeder_data=None, url_id=None, )
1,065,832
Return the value of the feature. If the unit is specified and the feature has a unit, the value is converted Args: - unit(str,optional): A unit to convert the current feature value ('B','K','M','G')
def getValue(self, unit=None): if unit or self.unit: r = float(self.value * UnitToValue(self.unit)) / UnitToValue(unit) return int(round(r)) if isinstance(self.value, int) else r return self.value
1,065,850
Check type, operator and unit in a feature. Args: - check(tuple): - v[0]: expected type of the feature value. - v[1]: can be a list of possible values or a function to test the value or None. - v[2] (optional): can be a list of possible units; if None or not set the unit valid is none. - radl: second argument passed when calling v[1].
def _check(self, check, radl): # Check type # If the value must be float, int is also valid if check[0] == float: if not isinstance(self.value, int) and not isinstance(self.value, float): raise RADLParseException("Invalid type; expected %s" % check[0], line=self.line) elif check[0] == str: if not isinstance(self.value, str) and not isinstance(self.value, unicode): raise RADLParseException("Invalid type; expected %s" % check[0], line=self.line) else: if not isinstance(self.value, check[0]): raise RADLParseException("Invalid type; expected %s" % check[0], line=self.line) # Check operator if (isinstance(self.value, str) or isinstance(self.value, unicode)) and self.prop.find('version') == -1: if self.operator != "=": raise RADLParseException("Invalid operator; expected '='", line=self.line) elif isinstance(self.value, int) or isinstance(self.value, float) or self.prop.find('version') >= 0: if self.operator not in ["=", "<=", ">=", ">", "<"]: raise RADLParseException("Invalid operator; expected '=', '<=', " + "'>=', '>' or '<'", line=self.line) elif isinstance(self.value, Features): if self.operator != "contains": raise RADLParseException( "Invalid operator; expected 'contains'", line=self.line) # Check value if isinstance(check[1], list): if self.value.upper() not in check[1]: raise RADLParseException("Invalid value; expected one of %s" % check[1], line=self.line) elif callable(check[1]): if not check[1](self, radl): raise RADLParseException("Invalid value in property '%s'" % self.prop, line=self.line) # Check unit if len(check) < 3 or check[2] is None: if self.unit: raise RADLParseException("Invalid unit; expected none", line=self.line) elif len(check) > 2 and check[2]: if self.unit.upper() not in check[2]: raise RADLParseException( "Invalid unit; expected one of %s" % check[2], line=self.line) return True
1,065,851
Return the restriction of first interval by the second. Args: - inter0, inter1 (tuple of Feature): intervals Return(tuple of Feature): the resulting interval - conflict(str): if a property hasn't compatible values/constrains, do: - ``"error"``: raise exception. - ``"ignore"``: return None. - ``"me"``: return finter0. - ``"other"``: return finter1.
def _applyInter(finter0, finter1, conflict="ignore"): OPTIONS = ["error", "ignore", "me", "other"] assert conflict in OPTIONS, "Invalid value in `conflict`." # Compute the comparison of the interval extremes min_int = -2**63 # Remember, None <= number and None <= None are True, but number <= None is False. inter0 = tuple([f.getValue() if f else min_int for f in finter0]) inter1 = tuple([f.getValue() if f else min_int for f in finter1]) le00 = inter0[0] <= inter1[0] # finter0[0] <= finter1[0] le01 = inter1[1] == min_int or inter0[0] <= inter1[1] # finter0[0] <= finter1[1] le11 = inter1[1] == min_int or (inter0[1] != min_int and inter0[1] <= inter1[1]) # finter0[1] <= finter1[1] ge00 = not le00 or inter0[0] == inter1[0] # finter0[0] >= finter1[0] ge10 = inter0[1] == min_int or inter0[1] >= inter1[0] # finter0[1] >= finter1[0] # print "\n".join("%s: %s" % (s, v) for v, s in [ # (le00, "finter0[0] <= finter1[0]"), # (le01, "finter0[0] <= finter1[1]"), # (le11, "finter0[1] <= finter1[1]"), # (ge00, "finter0[0] >= finter1[0]"), # (ge10, "finter0[1] >= finter1[0]") ]) # First interval is ( ), second interval is [ ] if le00 and ge10 and le11: # ( [ ) ] chain first-second return finter1[0], finter0[1] elif le00 and ge10 and not le11: # ( [ ] ) second is inside first return finter1 elif ge00 and le01 and le11: # [ ( ) ] first is inside second return finter0 elif ge00 and le01 and not le11: # [ ( ] ) chain second-first return finter0[0], finter1[1] elif conflict == "me": return finter0 elif conflict == "other": return finter1 elif conflict == "error": raise Exception("Disjoint intervals!") return None
1,065,859
Check types, operators and units in features with numbers. Args: - checks(dict of dict of str:tuples): keys are property name prefixes, and the values are dict with keys are property name suffixes and values are iterable as in ``_check_feature``. - radl: passed to ``_check_feature``.
def check_num(self, checks, radl): prefixes = {} for f in self.features: if not isinstance(f, Feature): continue (prefix, sep, tail) = f.prop.partition(".") if not sep or prefix not in checks: continue checks0 = checks[prefix] (num, sep, suffix) = tail.partition(".") try: num = int(num) except: raise RADLParseException( "Invalid property name; expected an index.", line=f.line) if not sep or suffix not in checks0: continue f._check(checks0[suffix], radl) if prefix not in prefixes: prefixes[prefix] = set() prefixes[prefix].add(num) # Check consecutive indices for num properties. for prefix, nums in prefixes.items(): if min(nums) != 0 or max(nums) != len(nums) - 1: raise RADLParseException( "Invalid indices values in properties '%s'" % prefix) return prefixes
1,065,862
Return copy and score after being applied other system and soft features. Args: - other(system, optional): system to apply just before soft features. Return(tuple): tuple of the resulting system and its score.
def concrete(self, other=None): new_system = self.clone() if other: new_system.applyFeatures(other, missing="other") soft_features = self.getValue(SoftFeatures.SOFT, []) score = 0 for f in sorted(soft_features, key=lambda f: f.soft, reverse=True): try: new_system.applyFeatures(f, missing="other") score += f.soft except: pass new_system.delValue(SoftFeatures.SOFT) return new_system, score
1,065,898
Add a network, ansible_host, system, deploy, configure or contextualize. Args: - aspect(network, system, deploy, configure or contextualize): thing to add. - ifpresent(str): if it has been defined, do: - ``"ignore"``: not add the aspect. - ``"replace"``: replace by the old defined. - ``"error"``: raise an error. Return(bool): True if aspect was added.
def add(self, aspect, ifpresent="error"): # If aspect is a contextualization, it is trated separately if isinstance(aspect, contextualize): self.contextualize.update(aspect) return True classification = [(network, self.networks), (system, self.systems), (ansible, self.ansible_hosts), (deploy, self.deploys), (configure, self.configures)] aspect_list = [l for t, l in classification if isinstance(aspect, t)] assert len(aspect_list) == 1, "Unexpected aspect for RADL." aspect_list = aspect_list[0] old_aspect = [a for a in aspect_list if a.getId() == aspect.getId()] if old_aspect: # If some aspect with the same id is found if ifpresent == "error": raise Exception("Aspect with the same id was found.") elif ifpresent == "replace": for i, elem in enumerate(aspect_list): if elem.getId() == old_aspect[0].getId(): del aspect_list[i] break aspect_list.append(aspect) return True elif ifpresent == "ignore": return False else: raise ValueError else: # Otherwise add aspect aspect_list.append(aspect) return True
1,065,902
Creates heart on the Summary. Args: git_repo_url: The url (ssh or https) of the Repository, used for cloning max_commits: Maximum number of commits in a day weeks_from_now: The number of week from this week the Heart's Right center boundary will be.
def create_heart(self, git_repo_url, max_commits=10, weeks_from_now=1): self.weeks_from_now = weeks_from_now self.end_date = self.get_end_date() try: self.repository_name = git_repo_url.split('/')[-1][:-4] self.git_repo_url = git_repo_url self.max_commits = max_commits self.do_commits() self.do_commit_amends() except IndexError as ie: raise ErrorMessage( "Please provide the correct URL for the Repository") except Exception as e: raise ErrorMessage(str(e))
1,066,457
Parse list of :class:`TimeResource` objects based on the mementoweb.org. Args: url (str): Any url. Returns: list: :class:`TimeResource` objects.
def mementoweb_api_tags(url): memento_url = "http://labs.mementoweb.org/timemap/json/" r = requests.get(memento_url + url) if r.status_code != 200: return [] data = r.json().get("mementos", {}).get("list", []) if not data: return [] resources = ( TimeResource( url=item.get("uri", ""), date=item.get("datetime", ""), val=item.get("datetime", "").split("-")[0], source="MementoWeb.org", ) for item in data ) # deduplicate the resources resource_dict = { res.val: res for res in resources } return sorted(resource_dict.values(), key=lambda x: x.val)
1,066,767
Get :class:`TimeResource` objects with creation dates from Whois database. Args: domain (str): Domain without http://, relative paths and so on. Returns: list: :class:`TimeResource` objects.
def get_whois_tags(domain): data = pythonwhois.get_whois(domain) return [ TimeResource( url=WHOIS_URL % domain.strip(), date=date.isoformat("T"), val=date.strftime("%Y"), source="Whois", ) for date in data.get("creation_date", []) ]
1,066,768
Put together all data sources in this module and return it's output. Args: url (str): URL of the web. With relative paths and so on. domain (str): Just the domain of the web. as_dicts (bool, default False): Convert output to dictionaries compatible with :class:`.SourceString`? Returns: list: Sorted list of :class:`TimeResource` objects or dicts.
def get_creation_date_tags(url, domain, as_dicts=False): creation_date_tags = [ mementoweb_api_tags(url), get_whois_tags(domain), ] creation_date_tags = sorted( sum(creation_date_tags, []), key=lambda x: x.date ) if not as_dicts: return creation_date_tags return [ item._as_dict() for item in creation_date_tags ]
1,066,769
Return list of titles parsed from HTML. Args: index_page (str): HTML content of the page you wish to analyze. Returns: list: List of :class:`.SourceString` objects.
def get_html_titles(index_page): dom = dhtmlparser.parseString(index_page) title_tags = dom.find("title") return [ SourceString(tag.getContent().strip(), "HTML") for tag in title_tags if tag.getContent().strip() ]
1,067,022
Collect data from all the functions defined in this module and return list of titles parsed from HTML, ``<meta>`` tags and dublin core inlined in ``<meta>`` tags. Args: index_page (str): HTML content of the page you wish to analyze. Returns: list: List of :class:`.SourceString` objects.
def get_title_tags(index_page): dom = dhtmlparser.parseString(index_page) titles = [ get_html_titles(dom), get_html_meta_titles(dom), get_dublin_core_titles(dom), ] return sum(titles, [])
1,067,023
Normalize language codes to ISO 639-2. If all conversions fails, return the `code` as it was given. Args: code (str): Language / country code. Returns: str: ISO 639-2 country code.
def normalize(code): if len(code) == 3: return code normalized = translate(code) if normalized: return normalized country = countries.get(code, None) if country: return country.alpha3.lower() return code
1,067,353
Process the parsed command. Parameters ---------- twis: Pytwis A Pytwis instance which interacts with the Redis database of the Twitter toy clone. auth_secret: str The authentication secret of a logged-in user. args: The parsed command output by pytwis_command_parser().
def pytwis_command_processor(twis, auth_secret, args): command = args[pytwis_clt_constants.ARG_COMMAND] if command == pytwis_clt_constants.CMD_REGISTER: succeeded, result = twis.register(args[pytwis_clt_constants.ARG_USERNAME], args[pytwis_clt_constants.ARG_PASSWORD]) if succeeded: print('Registered {}'.format(args[pytwis_clt_constants.ARG_USERNAME])) else: print("Couldn't register {} with error = {}".\ format(args[pytwis_clt_constants.ARG_USERNAME], result[pytwis_constants.ERROR_KEY])) elif command == pytwis_clt_constants.CMD_LOGIN: succeeded, result = twis.login(args[pytwis_clt_constants.ARG_USERNAME], args[pytwis_clt_constants.ARG_PASSWORD]) if succeeded: auth_secret[0] = result[pytwis_constants.AUTH_KEY] print('Logged into username {}'.format(args[pytwis_clt_constants.ARG_USERNAME])) else: print("Couldn't log into username {} with error = {}".\ format(args[pytwis_clt_constants.ARG_USERNAME], result[pytwis_constants.ERROR_KEY])) elif command == pytwis_clt_constants.CMD_LOGOUT: succeeded, result = twis.logout(auth_secret[0]) if succeeded: auth_secret[0] = result[pytwis_constants.AUTH_KEY] print('Logged out of username {}'.format(result[pytwis_constants.USERNAME_KEY])) else: print("Couldn't log out with error = {}".format(result[pytwis_constants.ERROR_KEY])) elif command == pytwis_clt_constants.CMD_CHANGE_PASSWORD: succeeded, result = twis.change_password(auth_secret[0], args[pytwis_clt_constants.ARG_OLD_PASSWORD], args[pytwis_clt_constants.ARG_NEW_PASSWORD]) if succeeded: auth_secret[0] = result[pytwis_constants.AUTH_KEY] print('Changed the password') else: print("Couldn't change the password with error = {}".\ format(result[pytwis_constants.ERROR_KEY])) elif command == pytwis_clt_constants.CMD_GET_USER_PROFILE: succeeded, result = twis.get_user_profile(auth_secret[0]) if succeeded: print('Got the user profile') print('=' * 20) for key, value in result.items(): print('{}: {}'.format(key, value)) print('=' * 20) else: print("Couldn't get the user profile with error = {}".\ format(result[pytwis_constants.ERROR_KEY])) elif command == pytwis_clt_constants.CMD_POST: succeeded, result = twis.post_tweet(auth_secret[0], args['tweet']) if succeeded: print('Posted the tweet') else: print("Couldn't post the tweet with error = {}".\ format(result[pytwis_constants.ERROR_KEY])) elif command == pytwis_clt_constants.CMD_FOLLOW: succeeded, result = twis.follow(auth_secret[0], args[pytwis_clt_constants.ARG_FOLLOWEE]) if succeeded: print('Followed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE])) else: print("Couldn't follow the username {} with error = {}".\ format(args[pytwis_clt_constants.ARG_FOLLOWEE], result[pytwis_constants.ERROR_KEY])) elif command == pytwis_clt_constants.CMD_UNFOLLOW: succeeded, result = twis.unfollow(auth_secret[0], args[pytwis_clt_constants.ARG_FOLLOWEE]) if succeeded: print('Unfollowed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE])) else: print("Couldn't unfollow the username {} with error = {}".\ format(args[pytwis_clt_constants.ARG_FOLLOWEE], result[pytwis_constants.ERROR_KEY])) elif command == pytwis_clt_constants.CMD_GET_FOLLOWERS: succeeded, result = twis.get_followers(auth_secret[0]) if succeeded: print('Got the list of {} followers'.\ format(len(result[pytwis_constants.FOLLOWER_LIST_KEY]))) print('=' * 20) for follower in result[pytwis_constants.FOLLOWER_LIST_KEY]: print('\t' + follower) print('=' * 20) else: print("Couldn't get the follower list with error = {}".\ format(result[pytwis_constants.ERROR_KEY])) elif command == pytwis_clt_constants.CMD_GET_FOLLOWINGS: succeeded, result = twis.get_following(auth_secret[0]) if succeeded: print('Got the list of {} followings'.\ format(len(result[pytwis_constants.FOLLOWING_LIST_KEY]))) print('=' * 60) for following in result[pytwis_constants.FOLLOWING_LIST_KEY]: print('\t' + following) print('=' * 60) else: print("Couldn't get the following list with error = {}".\ format(result[pytwis_constants.ERROR_KEY])) elif command == pytwis_clt_constants.CMD_TIMELINE: succeeded, result = twis.get_timeline(auth_secret[0], args[pytwis_clt_constants.ARG_MAX_TWEETS]) if succeeded: if auth_secret[0] != '': print('Got {} tweets in the user timeline'.\ format(len(result[pytwis_constants.TWEETS_KEY]))) else: print('Got {} tweets in the general timeline'.\ format(len(result[pytwis_constants.TWEETS_KEY]))) print_tweets(result[pytwis_constants.TWEETS_KEY]) else: if auth_secret[0] != '': print("Couldn't get the user timeline with error = {}".\ format(result[pytwis_constants.ERROR_KEY])) else: print("Couldn't get the general timeline with error = {}".\ format(result[pytwis_constants.ERROR_KEY])) elif command == pytwis_clt_constants.CMD_GET_USER_TWEETS: # Get the username of the currently logged-in user if no username is given. if args[pytwis_clt_constants.ARG_USERNAME] is None: succeeded, result = twis.get_user_profile(auth_secret[0]) if succeeded: args[pytwis_clt_constants.ARG_USERNAME] = result[pytwis_constants.USERNAME_KEY] print('No username is given, so use the currently logged-in user {}'.\ format(args[pytwis_clt_constants.ARG_USERNAME])) else: print("Couldn't get the username of the currently logged-in user with error = {}".\ format(result[pytwis_constants.ERROR_KEY])) return succeeded, result = twis.get_user_tweets(auth_secret[0], args[pytwis_clt_constants.ARG_USERNAME], args[pytwis_clt_constants.ARG_MAX_TWEETS]) if succeeded: print('Got {} tweets posted by {}'.format(len(result[pytwis_constants.TWEETS_KEY]), args[pytwis_clt_constants.ARG_USERNAME])) print_tweets(result[pytwis_constants.TWEETS_KEY]) else: print("Couldn't get the tweets posted by {} with error = {}".\ format(args[pytwis_clt_constants.ARG_USERNAME], result[pytwis_constants.ERROR_KEY])) else: pass
1,067,539
Download `url` and return it as utf-8 encoded text. Args: url (str): What should be downloaded? Returns: str: Content of the page.
def download(url): headers = {"User-Agent": USER_AGENT} resp = requests.get( url, timeout=REQUEST_TIMEOUT, headers=headers, allow_redirects=True, verify=False, ) def decode(st, alt_encoding=None): encodings = ['ascii', 'utf-8', 'iso-8859-1', 'iso-8859-15'] if alt_encoding: if isinstance(alt_encoding, basestring): encodings.append(alt_encoding) else: encodings.extend(alt_encoding) for encoding in encodings: try: return st.encode(encoding).decode("utf-8") except UnicodeEncodeError, UnicodeDecodeError: pass raise UnicodeError('Could not find encoding.') return decode(resp.text, resp.encoding)
1,067,713
Create a 2D software rendering context for a surface. Args: surface (Surface): The surface where rendering is done. Returns: Renderer: A 2D software rendering context. Raises: SDLError: If there was an error creating the renderer.
def create_software_renderer(self, surface): renderer = object.__new__(Renderer) renderer._ptr = self._ptr = check_ptr_err(lib.SDL_CreateSoftwareRenderer(surface._ptr)) return renderer
1,067,856
Create a 2D rendering context for a window. Args: window (Window): The window where rendering is displayed. index (int): The index of the rendering driver to initialize, or -1 to initialize the first one supporting the requested flags. flags (Set[RendererFlags]): The requested renderer flags. Raises: SDLError: If there was an error creating the renderer.
def __init__(self, window, index=-1, flags=frozenset()): self._ptr = check_ptr_err(lib.SDL_CreateRenderer(window._ptr, index, enumtools.get_mask(flags)))
1,067,857
Draw a line on the current rendering target. Args: x1 (int): The x coordinate of the start point. y1 (int): The y coordinate of the start point. x2 (int): The x coordinate of the end point. y2 (int): The y coordinate of the end point. Raises: SDLError: If an error is encountered.
def draw_line(self, x1, y1, x2, y2): check_int_err(lib.SDL_RenderDrawLine(self._ptr, x1, y1, x2, y2))
1,067,867
Draw a series of connected lines on the current rendering target. Args: *points (Point): The points along the lines. Raises: SDLError: If an error is encountered.
def draw_lines(self, *points): point_array = ffi.new('SDL_Point[]', len(points)) for i, p in enumerate(points): point_array[i] = p._ptr[0] check_int_err(lib.SDL_RenderDrawLines(self._ptr, point_array, len(points)))
1,067,868
Draw a point on the current rendering target. Args: x (int): The x coordinate of the point. y (int): The y coordinate of the point. Raises: SDLError: If an error is encountered.
def draw_point(self, x, y): check_int_err(lib.SDL_RenderDrawPoint(self._ptr, x, y))
1,067,869
Draw multiple points on the current rendering target. Args: *points (Point): The points to draw. Raises: SDLError: If an error is encountered.
def draw_points(self, *points): point_array = ffi.new('SDL_Point[]', len(points)) for i, p in enumerate(points): point_array[i] = p._ptr[0] check_int_err(lib.SDL_RenderDrawPoints(self._ptr, point_array, len(points)))
1,067,870
Draw a rectangle on the current rendering target. Args: rect (Rect): The destination rectangle, or None to outline the entire rendering target. Raises: SDLError: If an error is encountered.
def draw_rect(self, rect): check_int_err(lib.SDL_RenderDrawRect(self._ptr, rect._ptr))
1,067,871
Draw some number of rectangles on the current rendering target. Args: *rects (Rect): The destination rectangles. Raises: SDLError: If an error is encountered.
def draw_rects(self, *rects): rect_array = ffi.new('SDL_Rect[]', len(rects)) for i, r in enumerate(rects): rect_array[i] = r._ptr[0] check_int_err(lib.SDL_RenderDrawRects(self._ptr, rect_array, len(rects)))
1,067,872
Fill a rectangle on the current rendering target with the drawing color. Args: rect (Rect): The destination rectangle, or None to fill the entire rendering target. Raises: SDLError: If an error is encountered.
def fill_rect(self, rect): check_int_err(lib.SDL_RenderFillRect(self._ptr, rect._ptr))
1,067,873
Fill some number of rectangles on the current rendering target with the drawing color. Args: *rects (Rect): The destination rectangles. Raises: SDLError: If an error is encountered.
def fill_rects(self, *rects): rect_array = ffi.new('SDL_Rect[]', len(rects)) for i, r in enumerate(rects): rect_array[i] = r._ptr[0] check_int_err(lib.SDL_RenderFillRects(self._ptr, rect_array, len(rects)))
1,067,874
Create a texture from an existing surface. Args: surface (Surface): The surface containing pixel data used to fill the texture. Returns: Texture: A texture containing the pixels from surface. Raises: SDLError: If an error is encountered.
def from_surface(renderer, surface): texture = object.__new__(Texture) texture._ptr = check_ptr_err(lib.SDL_CreateTextureFromSurface(renderer._ptr, surface._ptr)) return texture
1,067,877
Parse `authors` from HTML ``<meta>`` and dublin core. Args: index_page (str): HTML content of the page you wisht to analyze. Returns: list: List of :class:`.SourceString` objects.
def get_author_tags(index_page): dom = dhtmlparser.parseString(index_page) authors = [ get_html_authors(dom), get_dc_authors(dom), ] return sum(authors, [])
1,067,888
Show the progress bar and set it to `progress` tuple or value. Args: progress (tuple / int / float): Tuple ``(done / len(all))`` or the direct percentage value as int / float. msg (str, default None): Alternative background description.
def show(self, progress, msg=None): if self.whole_tag.style.display == "none": self.whole_tag.style.display = "block" # allow either direct percentage value, or (done / len(all)) pairs if isinstance(progress, int) or isinstance(progress, float): percentage = progress else: percentage = self.__class__._compute_percentage(progress) # toggle animation self.tag.class_name = "progress-bar" if percentage < 100: self.tag.class_name += " progress-bar-striped active" else: msg = "Hotovo" # show percentage in progress bar self.tag.aria_valuemin = percentage self.tag.style.width = "{}%".format(percentage) if msg: self.tag.text = msg
1,067,993
Update with an ordered iterable of items. Args: iterable: An ordered iterable of items. The relative order of the items in this iterable will be respected in the TopoSet (in the absence of cycles).
def update(self, iterable): for pair in pairwise_longest(iterable, fillvalue=_FILL): self._edges.append(pair) self._results = None
1,068,019
This function return the right configuration for the inspire_merge function in according to the given sources. Both parameters can not be None. Params: head(dict): the HEAD record update(dict): the UPDATE record head_source(string): the source of the HEAD record Returns: MergerConfigurationOperations: an object containing the rules needed to merge HEAD and UPDATE
def get_configuration(head, update, head_source=None): head_source = (head_source or get_head_source(head)) update_source = get_acquisition_source(update) if not is_arxiv_and_publisher(head_source, update_source) and is_manual_merge(head, update): return ManualMergeOperations if head_source == 'arxiv': if update_source == 'arxiv': return ArxivOnArxivOperations else: return PublisherOnArxivOperations else: if update_source == 'arxiv': return ArxivOnPublisherOperations else: return PublisherOnPublisherOperations
1,068,075
Set the elements from the data obtained from REST API. Args: values (dict): Dict with ``mrc``, ``oai``, ``dc`` and ``fn`` keys.
def set(cls, values): cls.mrc_out_el.text = values.get("mrc", "") cls.oai_out_el.text = values.get("oai", "") cls.dc_out_el.text = values.get("dc", "") cls.filename = values.get("fn", "fn") cls.values = values
1,068,125
Make AJAX request to `url` with given POST `data`. Call `on_complete` callback when complete. Args: url (str): URL. data (dict): Dictionary with POST data. on_complete (ref): Reference to function / method which will be called when the request is done.
def make_request(url, data, on_complete): req = ajax.ajax() req.bind('complete', on_complete) req.open('POST', url, True) req.set_header('content-type', 'application/x-www-form-urlencoded') req.send(data)
1,068,229
Build keyword dictionary from raw keyword data. Ignore invalid or invalidated records. Args: kw_list (list): List of dicts from :func:`read_kw_file`. Returns: OrderedDict: dictionary with keyword data.
def build_kw_dict(kw_list): kw_dict = OrderedDict() sorted_list = sorted( kw_list, key=lambda x: x.get("zahlavi").encode("utf-8") ) for keyword_data in sorted_list: if "zahlavi" not in keyword_data: continue zahlavi = keyword_data["zahlavi"].encode("utf-8") old_record = kw_dict.get(zahlavi) if not old_record: kw_dict[zahlavi] = keyword_data continue key = "angl_ekvivalent" if not old_record.get(key) and keyword_data.get(key): kw_dict[zahlavi] = keyword_data continue key = "zdroj_angl_ekvivalentu" if not old_record.get(key) and keyword_data.get(key): kw_dict[zahlavi] = keyword_data continue if len(str(keyword_data)) > len(str(old_record)): kw_dict[zahlavi] = keyword_data continue return kw_dict
1,068,326
Create a window with the specified position, dimensions, and flags. Args: title (str): The title of the window. x (int): The x postion of the window. y (int): The y position of the window. w (int): The width of the window. h (int): The height of the window. flags (Set[WindowFlags]): The flags for the window. Raises: SDLError: If the window could not be created.
def __init__(self, title='sdl2', x=lib.SDL_WINDOWPOS_CENTERED, y=lib.SDL_WINDOWPOS_CENTERED, w=640, h=480, flags=frozenset()): self._ptr = check_ptr_err(lib.SDL_CreateWindow(title.encode('utf-8'), x, y, w, h, enumtools.get_mask(flags)))
1,068,345
Returns a helpful position description for an index in a (multi-line) string using the format line:column. Arguments: string (str): The string to which the index refers. index (int): The index of the character in question. Returns: A string with the format line:column where line refers to the 1-indexed row/line in which the character is found within the string and column to the position of the character within (relative to) that line.
def position(string, index): if not string: return None if index < 0 or index >= len(string): raise InternalError("Out-of-range index passed to errors.position!") lines = string.split("\n") # If there only is one single line the # line:index format wouldn't be so intuitive if len(lines) == 1: return str(index) before = n = 0 for n, line in enumerate(lines): future = before + len(line) + 1 # \n # Note that we really want > and not # >= because the length is 1-indexed # while the index is not, i.e. the # value of 'before' already includes the # first character of the next line when # speaking of its 0-indexed index if future > index: break before = future # index - before to have only the # index within the relevant line return "{0}:{1}".format(n, index - before)
1,068,607
Gets a spoken-word representation for a number. Arguments: digit (int): An integer to convert into spoken-word. Returns: A spoken-word representation for a digit, including an article ('a' or 'an') and a suffix, e.g. 1 -> 'a 1st', 11 -> "an 11th". Adittionally delimits characters in pairs of three for values > 999.
def number(digit): spoken = str(digit) if spoken.startswith("8") or spoken[:len(spoken) % 3] == "11": article = "an " else: article = "a " if spoken.endswith("1") and spoken != "11": suffix = "st" elif spoken.endswith("2") and spoken != "12": suffix = "nd" elif spoken.endswith("3") and spoken != "13": suffix = "rd" else: suffix = "th" if digit > 999: prefix = len(spoken) % 3 separated = spoken[:prefix] for n in range(prefix, len(spoken), 3): separated += "," + spoken[n : n + 3] spoken = separated return article + spoken + suffix
1,068,608
Combines a warning with a call to errors.position(). Simple convenience function. Arguments: string (str): The string being parsed. pos (int): The index of the character that caused trouble.
def warn(what, string, pos): pos = position(string, pos) warnings.warn("{0} at position {1}!".format(what, pos), Warning)
1,068,609
Initializes the EcstasyError super-class. Arguments: what (str): A descriptive string regarding the cause of the error.
def __init__(self, what): self.what = what super(EcstasyError, self).__init__(what)
1,068,610
Fetch the RepositoryURL for a given repository, reading info from pypirc. Will try to find the repository in the .pypirc, including username/password. Args: pypirc (str): path to the .pypirc config file repository (str): URL or alias for the repository Returns: base.RepositoryURL for the repository
def get_repo_url(pypirc, repository): pypirc = os.path.abspath(os.path.expanduser(pypirc)) pypi_config = base.PyPIConfig(pypirc) repo_config = pypi_config.get_repo_config(repository) if repo_config: return repo_config.get_clean_url() else: return base.RepositoryURL(repository)
1,068,739
Change working node to astr_path. The path is converted to a list, split on '/'. By performing a 'cd' all parent and derived nodes need to be updated relative to new location. Args: astr_path (string): The path to cd to. Returns: {"status" : True/False , "path": l_cwd -- the path as list}
def cdnode(self, astr_path): # Start at the root and then navigate to the # relevant node l_absPath = [] b_valid, l_absPath = self.b_pathInTree(astr_path) if b_valid: #print "got cdpath = %s" % l_absPath self.l_cwd = l_absPath[:] self.snode_current = self.snode_root self.sbranch_current = self.sbranch_root #print l_absPath for node in l_absPath[1:]: self.snode_current = self.snode_current.d_nodes[node] self.sbranch_current.dict_branch = self.snode_current.snode_parent.d_nodes return {"status": True, "path": self.l_cwd} return {"status": False, "path": []}
1,068,793
Set options from a list of section.option=value string. Args: conf (:class:`~loam.manager.ConfigurationManager`): the conf to update. optstrs (list of str): the list of 'section.option=value' formatted string.
def set_conf_str(conf, optstrs): falsy = ['0', 'no', 'n', 'off', 'false', 'f'] bool_actions = ['store_true', 'store_false', internal.Switch] for optstr in optstrs: opt, val = optstr.split('=', 1) sec, opt = opt.split('.', 1) if sec not in conf: raise error.SectionError(sec) if opt not in conf[sec]: raise error.OptionError(opt) meta = conf[sec].def_[opt] if meta.default is None: if 'type' in meta.cmd_kwargs: cast = meta.cmd_kwargs['type'] else: act = meta.cmd_kwargs.get('action') cast = bool if act in bool_actions else str else: cast = type(meta.default) if cast is bool and val.lower() in falsy: val = '' conf[sec][opt] = cast(val)
1,069,025
Implement the behavior of a subcmd using config_conf_section Args: conf (:class:`~loam.manager.ConfigurationManager`): it should contain a section created with :func:`config_conf_section` function. config (str): name of the configuration section created with :func:`config_conf_section` function.
def config_cmd_handler(conf, config='config'): if conf[config].create or conf[config].update: conf.create_config_(update=conf[config].update) if conf[config].create_local: conf.create_config_(index=-1, update=conf[config].update) if conf[config].edit: if not conf.config_files_[0].is_file(): conf.create_config_(update=conf[config].update) subprocess.call(shlex.split('{} {}'.format(conf[config].editor, conf.config_files_[0])))
1,069,026
Called when a captcha must be solved Writes the image to a temporary file and asks the user to enter the code. Args: captcha_data: Bytestring of the PNG captcha image. message: Optional. A message from Steam service. Returns: A string containing the solved captcha code.
def _handle_captcha(captcha_data, message=''): # pylint:disable=unused-argument from tempfile import NamedTemporaryFile tmpf = NamedTemporaryFile(suffix='.png') tmpf.write(captcha_data) tmpf.flush() captcha_text = input('Please take a look at the captcha image "%s" and provide the code:' % tmpf.name) tmpf.close() return captcha_text
1,069,104
Called when SteamGuard requires authentication via e-mail. Asks the user to enter the code. Args: maildomain: Optional. The mail domain of the e-mail address the SteamGuard code is send to. message: Optional. A message from Steam service. Returns: A string containing the code.
def _handle_emailauth(maildomain='', message=''): # pylint:disable=unused-argument print('SteamGuard requires email authentication...') emailauth = input('Please enter the code sent to your mail address at "%s": ' % maildomain) emailauth.upper() return emailauth
1,069,105
Return `languages` stored in ``<meta>`` tags. ``<meta http-equiv="Content-language" content="cs">`` -> ``cs`` Args: index_page (str): HTML content of the page you wish to analyze. Returns: list: List of :class:`.SourceString` objects.
def get_html_lang_tags(index_page): dom = dhtmlparser.parseString(index_page) lang_tag = "content-language" lang_tags = dom.find( "meta", fn=lambda x: x.params.get("http-equiv", "").lower() == lang_tag ) return [ SourceString(tag.params["content"], "HTML") for tag in lang_tags if "content" in tag.params ]
1,069,340
Parse lang and xml:lang parameters in the ``<html>`` tag. See https://www.w3.org/International/questions/qa-html-language-declarations for details. Args: index_page (str): HTML content of the page you wisht to analyze. Returns: list: List of :class:`.SourceString` objects.
def get_html_tag_lang_params(index_page): dom = dhtmlparser.parseString(index_page) html_tag = dom.find("html") if not html_tag: return [] html_tag = html_tag[0] # parse parameters lang = html_tag.params.get("lang") xml_lang = html_tag.params.get("xml:lang") if lang and lang == xml_lang: return [SourceString(lang, source="<html> tag")] out = [] if lang: out.append(SourceString(lang, source="<html lang=..>")) if xml_lang: out.append(SourceString(xml_lang, source="<html xml:lang=..>")) return out
1,069,341
Detect `languages` using `langdetect` library. Args: index_page (str): HTML content of the page you wish to analyze. Returns: obj: One :class:`.SourceString` object.
def detect_language(index_page): dom = dhtmlparser.parseString(index_page) clean_content = dhtmlparser.removeTags(dom) lang = None try: lang = langdetect.detect(clean_content) except UnicodeDecodeError: lang = langdetect.detect(clean_content.decode("utf-8")) return SourceString( lang, source="langdetect" )
1,069,342
Collect informations about language of the page from HTML and Dublin core tags and langdetect guesses. Args: index_page (str): HTML content of the page you wish to analyze. Returns: list: List of :class:`.SourceString` objects.
def get_lang_tags(index_page): dom = dhtmlparser.parseString(index_page) lang_tags = [ get_html_lang_tags(dom), get_dc_lang_tags(dom), [detect_language(dom)], get_html_tag_lang_params(dom), ] return list(sorted(set( SourceString(normalize(lang), source=lang.source) for lang in sum(lang_tags, []) )))
1,069,343
Convenient interface to the ecstasy package. Arguments: string (str): The string to beautify with ecstasy. args (list): The positional arguments. kwargs (dict): The keyword ('always') arguments.
def beautify(string, *args, **kwargs): parser = Parser(args, kwargs) return parser.beautify(string)
1,069,392
Initializes a Parser instance. Arguments: args (list): The positional arguments. kwargs (dict): The 'always' (keyword) arguments.
def __init__(self, args, kwargs): self.always = kwargs self.positional = self.get_flags(args) if args else [] self.meta = re.compile(r"[()<>]") self.arguments = re.compile(r"^(-?\d,?)+!?$|" r"^!?(-?\d,?)+$|" r"^(!\+?|\+!?)$") # Used in self.stringify to auto-increment # positional argument positions self.counter = 0
1,069,395
Wraps together all actions needed to beautify a string, i.e. parse the string and then stringify the phrases (replace tags with formatting codes). Arguments: string (str): The string to beautify/parse. Returns: The parsed, stringified and ultimately beautified string. Raises: errors.ArgumentError if phrases were found, but not a single style (flag combination) was supplied.
def beautify(self, string): if not string: return string # string may differ because of escaped characters string, phrases = self.parse(string) if not phrases: return string if not self.positional and not self.always: raise errors.ArgumentError("Found phrases, but no styles " "were supplied!") return self.stringify(string, phrases)
1,069,397
Checks if a meta character is escaped or else warns about it. If the meta character has an escape character ('\') preceding it, the meta character is escaped. If it does not, a warning is emitted that the user should escape it. Arguments: string (str): The relevant string in which the character was found. pos (int): The index of the meta character within the string. Returns: The possibly escaped string and the next meta match.
def escape_meta(self, string, pos): # Replace escape character if pos > 0 and string[pos - 1] == "\\": string = string[:pos - 1] + string[pos:] else: warnings.warn("Un-escaped meta-character: '{0}' (Escape" " it with a '\\')".format(string[pos]), Warning) pos += 1 meta = self.meta.search(string, pos) return string, meta
1,069,399
Helper function of self.parse() handling opening tags. Arguments: string (str): The string being parsed. pos (int): The index/position of the opening tag in the string. Returns: The (possibly) escaped string, a child phrase if the opening tag was not escaped and otherwise None, and a new tag match, either starting at one index passed the escaped tag or one index passed the closing tag of the child.
def open_phrase(self, string, pos): # Check for escaping if string[pos - 1] == "\\": # Remove the escape character string = string[:pos - 1] + string[pos:] # When removing the escape character, the # pos tag index is pushed one back pos -= 1 # If the escape character was not itself (double) # escaped we can look for the next tag if pos == 0 or string[pos - 1] != "\\": tag = self.meta.search(string, pos + 1) return string, None, tag child = Phrase(pos) escaped, child = self.parse(string[pos + 1:], child) string = string[:pos + 1] + escaped tag = self.meta.search(string, child.closing + 1) return string, child, tag
1,069,400
Raises an errors.ArgumentError if not enough arguments were supplied. Takes care of formatting for detailed error messages. Arguments: string (str): The string of the phrase for which there weren't enough arguments. Raises: errors.ArgumentError with a detailed error message.
def raise_not_enough_arguments(self, string): requested = errors.number(self.counter + 1) number = len(self.positional) verb = "was" if number == 1 else "were" what = "Requested {} formatting argument for "\ "'{}' but only {} {} supplied!" what = what.format(requested, string, number, verb) raise errors.ArgumentError(what)
1,069,404
Return list of `keywords` parsed from HTML ``<meta>`` tags. Args: index_page (str): Content of the page as UTF-8 string Returns: list: List of :class:`.SourceString` objects.
def get_html_keywords(index_page): keyword_lists = ( keyword_list.split(",") for keyword_list in parse_meta(index_page, "keywords", "HTML") ) # create SourceStrings from the list of keywords return [ SourceString(keyword.strip(), source="HTML") for keyword in sum(keyword_lists, []) # flattern the list ]
1,069,414
Return list of `keywords` parsed from Dublin core. Args: index_page (str): Content of the page as UTF-8 string Returns: list: List of :class:`.SourceString` objects.
def get_dc_keywords(index_page): keyword_lists = ( keyword_list.split() for keyword_list in parse_meta(index_page, "dc.keywords", "DC") ) return [ SourceString(keyword, source="DC") for keyword in sum(keyword_lists, []) # flattern the list ]
1,069,415
Try to process text on the `index_page` deduce the keywords and then try to match them on the Aleph's dataset. Function returns maximally `no_items` items, to prevent spamming the user. Args: index_page (str): Content of the page as UTF-8 string no_items (int, default 5): Number of items to return. Returns: list: List of :class:`.SourceString` objects.
def extract_keywords_from_text(index_page, no_items=5): index_page = MLStripper.strip_tags(index_page) tokenized_index = TextBlob(index_page).lower() def to_str(key): if isinstance(key, unicode): return key.encode("utf-8") return key present_keywords = [ KEYWORDS_LOWER[key] for key in KEYWORDS_LOWER.keys() if len(key) > 3 and key in tokenized_index ] def to_source_string(key): source = "Keyword analysis" try: return SourceString(key, source) except UnicodeEncodeError: return SourceString(key.encode("utf-8"), source) multi_keywords = [ to_source_string(key) for key in present_keywords if tokenized_index.words.count(key) >= 1 ] multi_keywords = sorted(multi_keywords, key=lambda x: len(x), reverse=True) if len(multi_keywords) > no_items: return multi_keywords[:no_items] return multi_keywords
1,069,417
Parse `keywords` from HTML ``<meta>``, dublin core and from text. Args: index_page (str): Content of the page as UTF-8 string. map_to_nk_set (bool): Should the algorithm try to map keywords to keywords used in NK? Returns: list: List of :class:`.SourceString` objects.
def get_keyword_tags(index_page, map_to_nk_set=True): dom = dhtmlparser.parseString(index_page) keywords = [ get_html_keywords(dom), get_dc_keywords(dom), ] # do not try to match extracted_keywords, because they are based on Aleph's # dataset extracted_keywords = extract_keywords_from_text(index_page) keywords = sum(keywords, []) # flattern if not map_to_nk_set: return keywords + extracted_keywords def try_match(keyword): kw = KEYWORDS_LOWER.get(keyword.lower()) if kw: return kw return process.extractOne(str(keyword), KEYWORDS)[0].encode("utf-8") keywords = [ SourceString( try_match(keyword), source=keyword.source, ) for keyword in keywords ] return sorted(list(set(keywords + extracted_keywords)))
1,069,418
Return list of descriptions parsed from ``<meta>`` tags and dublin core inlined in ``<meta>`` tags. Args: index_page (str): HTML content of the page you wisht to analyze. Returns: list: List of ``SourceString`` objects.
def get_annotation_tags(index_page): dom = dhtmlparser.parseString(index_page) descriptions = [ get_html_annotations(dom), get_dc_annotations(dom), ] return sum(descriptions, [])
1,069,597