text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
βŒ€
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_text_file(self, text_file): """Sends a Slack message with the contents of a text file :param: test_file: (str) Full path to text file to send :return: None :raises: Cons3rtSlackerError """
log = logging.getLogger(self.cls_logger + '.send_text_file') if not isinstance(text_file, basestring): msg = 'arg text_file must be a string, found type: {t}'.format(t=text_file.__class__.__name__) raise Cons3rtSlackerError(msg) if not os.path.isfile(text_file): msg = 'The provided text_file was not found or is not a file: {f}'.format(f=text_file) raise Cons3rtSlackerError(msg) log.debug('Attempting to send a Slack message with the contents of file: {f}'.format(f=text_file)) try: with open(text_file, 'r') as f: file_text = f.read() except (IOError, OSError): _, ex, trace = sys.exc_info() msg = '{n}: There was a problem opening file: {f}\n{e}'.format( n=ex.__class__.__name__, f=text_file, e=str(ex)) raise Cons3rtSlackerError, msg, trace # Take the last 7000 characters file_text_trimmed = file_text[-7000:] attachment = SlackAttachment(fallback=file_text_trimmed, text=file_text_trimmed, color='#9400D3') self.add_attachment(attachment) self.send()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deploy(project, version, promote, quiet): """ Deploy the app to the target environment. The target environments can be configured using the ENVIRONMENTS conf variable. This will also collect all static files and compile translation messages """
from . import logic logic.deploy(project, version, promote, quiet)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def call_plugins(self, step): ''' For each plugins, check if a "step" method exist on it, and call it Args: step (str): The method to search and call on each plugin ''' for plugin in self.plugins: try: getattr(plugin, step)() except AttributeError: self.logger.debug("{} doesn't exist on plugin {}".format(step, plugin)) except TypeError: self.logger.debug("{} on plugin {} is not callable".format(step, plugin))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """ Run the application """
self.call_plugins("on_run") if vars(self.arguments).get("version", None): self.logger.info("{app_name}: {version}".format(app_name=self.app_name, version=self.version)) else: if self.arguments.command == "main": self.main() else: self.subcommands[self.arguments.command].run() self.call_plugins("on_end")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def changelog(): # type: () -> str """ Print change log since last release. """
# Skip 'v' prefix versions = [x for x in git.tags() if versioning.is_valid(x[1:])] cmd = 'git log --format=%H' if versions: cmd += ' {}..HEAD'.format(versions[-1]) hashes = shell.run(cmd, capture=True).stdout.strip().splitlines() commits = [git.CommitDetails.get(h) for h in hashes] tags = conf.get('changelog.tags', [ {'header': 'Features', 'tag': 'feature'}, {'header': 'Changes', 'tag': 'change'}, {'header': 'Fixes', 'tag': 'fix'}, ]) results = OrderedDict(( (x['header'], []) for x in tags )) for commit in commits: commit_items = extract_changelog_items(commit.desc, tags) for header, items in commit_items.items(): results[header] += items lines = [ '<35>v{}<0>'.format(versioning.current()), '', ] for header, items in results.items(): if items: lines += [ '', '<32>{}<0>'.format(header), '<32>{}<0>'.format('-' * len(header)), '', ] for item_text in items: item_lines = textwrap.wrap(item_text, 77) lines += ['- {}'.format('\n '.join(item_lines))] lines += [''] return '\n'.join(lines)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_changelog_items(text, tags): # type: (str) -> Dict[str, List[str]] """ Extract all tagged items from text. Args: text (str): Text to extract the tagged items from. Each tagged item is a paragraph that starts with a tag. It can also be a text list item. Returns: tuple[list[str], list[str], list[str]]: A tuple of `(features, changes, fixes)` extracted from the given text. The tagged items are usually features/changes/fixes but it can be configured through `pelconf.yaml`. """
patterns = {x['header']: tag_re(x['tag']) for x in tags} items = {x['header']: [] for x in tags} curr_tag = None curr_text = '' for line in text.splitlines(): if not line.strip(): if curr_tag is not None: items[curr_tag].append(curr_text) curr_text = '' curr_tag = None for tag in tags: m = patterns[tag['header']].match(line) if m: if curr_tag is not None: items[curr_tag].append(curr_text) curr_text = '' curr_tag = tag['header'] line = m.group('text') break if curr_tag is not None: curr_text = '{} {}'.format(curr_text.strip(), line.strip()).strip() if curr_tag is not None: items[curr_tag].append(curr_text) return items
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def per_delta(start: datetime, end: datetime, delta: timedelta): """ Iterates over time range in steps specified in delta. :param start: Start of time range (inclusive) :param end: End of time range (exclusive) :param delta: Step interval """
curr = start while curr < end: curr_end = curr + delta yield curr, curr_end curr = curr_end
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def per_month(start: datetime, end: datetime, n: int=1): """ Iterates over time range in one month steps. Clamps to number of days in given month. :param start: Start of time range (inclusive) :param end: End of time range (exclusive) :param n: Number of months to step. Default is 1. """
curr = start.replace(day=1, hour=0, minute=0, second=0, microsecond=0) while curr < end: curr_end = add_month(curr, n) yield curr, curr_end curr = curr_end
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_requirements() -> List[str]: """Return the requirements as a list of string."""
requirements_path = os.path.join( os.path.dirname(__file__), 'requirements.txt' ) with open(requirements_path) as f: return f.read().split()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def project_dev_requirements(): """ List requirements for peltak commands configured for the project. This list is dynamic and depends on the commands you have configured in your project's pelconf.yaml. This will be the combined list of packages needed to be installed in order for all the configured commands to work. """
from peltak.core import conf from peltak.core import shell for dep in sorted(conf.requirements): shell.cprint(dep)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_images(): # type: () -> None """ Build all docker images for the project. """
registry = conf.get('docker.registry') docker_images = conf.get('docker.images', []) for image in docker_images: build_image(registry, image)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def push_images(): # type: () -> None """ Push all project docker images to a remote registry. """
registry = conf.get('docker.registry') docker_images = conf.get('docker.images', []) if registry is None: log.err("You must define docker.registry conf variable to push images") sys.exit(-1) for image in docker_images: push_image(registry, image)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def docker_list(registry_pass): # type: (str) -> None """ List docker images stored in the remote registry. Args: registry_pass (str): Remote docker registry password. """
registry = conf.get('docker.registry', None) if registry is None: log.err("You must define docker.registry conf variable to list images") sys.exit(-1) registry_user = conf.get('docker.registry_user', None) if registry_user is None: registry_user = click.prompt("Username") rc = client.RegistryClient(registry, registry_user, registry_pass) images = {x: rc.list_tags(x) for x in rc.list_images()} shell.cprint("<32>Images in <34>{} <32>registry:", registry) for image, tags in images.items(): shell.cprint(' <92>{}', image) for tag in tags: shell.cprint(' <90>{}:<35>{}', image, tag)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_image(registry, image): # type: (str, Dict[str, Any]) -> None """ Build docker image. Args: registry (str): The name of the registry this image belongs to. If not given, the resulting image will have a name without the registry. image (dict[str, Any]): The dict containing the information about the built image. This is the same dictionary as defined in DOCKER_IMAGES variable. """
if ':' in image['name']: _, tag = image['name'].split(':', 1) else: _, tag = image['name'], None values = { 'registry': '' if registry is None else registry + '/', 'image': image['name'], 'tag': tag, } if tag is None: args = [ '-t {registry}{image}'.format(**values), '-t {registry}{image}:{version}'.format( version=versioning.current(), **values ), ] else: args = ['-t {registry}{image}'.format(**values)] if 'file' in image: args.append('-f {}'.format(conf.proj_path(image['file']))) with conf.within_proj_dir(image.get('path', '.')): log.info("Building <33>{registry}<35>/{image}", **values) shell.run('docker build {args} .'.format(args=' '.join(args)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def push_image(registry, image): # type: (str, Dict[str, Any]) -> None """ Push the given image to selected repository. Args: registry (str): The name of the registry we're pushing to. This is the address of the repository without the protocol specification (no http(s): //) image (dict[str, Any]): The dict containing the information about the image. This is the same dictionary as defined in DOCKER_IMAGES variable. """
values = { 'registry': registry, 'image': image['name'], } log.info("Pushing <33>{registry}<35>/{image}".format(**values)) shell.run('docker push {registry}/{image}'.format(**values))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_opr_data(self, data, store): """Returns a well formatted OPR data"""
return { "invoice_data": { "invoice": { "total_amount": data.get("total_amount"), "description": data.get("description") }, "store": store.info }, "opr_data": { "account_alias": data.get("account_alias") } }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(self, data={}, store=None): """Initiazes an OPR First step in the OPR process is to create the OPR request. Returns the OPR token """
_store = store or self.store _data = self._build_opr_data(data, _store) if data else self._opr_data return self._process('opr/create', _data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def charge(self, data): """Second stage of an OPR request"""
token = data.get("token", self._response["token"]) data = { "token": token, "confirm_token": data.get("confirm_token") } return self._process('opr/charge', data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_value(self, field, quick): # type: (Field, bool) -> Any """ Ask user the question represented by this instance. Args: field (Field): The field we're asking the user to provide the value for. quick (bool): Enable quick mode. In quick mode, the form will reduce the number of question asked by using defaults wherever possible. This can greatly reduce the number of interactions required on the user part, but will obviously limit the user choices. This should probably be enabled only by a specific user action (like passing a ``--quick`` flag etc.). Returns: The user response converted to a python type using the :py:attr:`cliform.core.Field.type` converter. """
if callable(field.default): default = field.default(self) else: default = field.default if quick and default is not None: return default shell.cprint('<90>{}', field.help) while True: try: answer = click.prompt(field.pretty_prompt, default=default) return field.type(answer) except ValueError: shell.cprint("<31>Unsupported value")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def recode_unicode(s, encoding='utf-8'): """ Inputs are encoded to utf-8 and then decoded to the desired output encoding @encoding: the desired encoding -> #str with the desired @encoding """
if isinstance(s, str): return s.encode().decode(encoding) return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_pycons3rt_home_dir(): """Returns the pycons3rt home directory based on OS :return: (str) Full path to pycons3rt home :raises: OSError """
if platform.system() == 'Linux': return os.path.join(os.path.sep, 'etc', 'pycons3rt') elif platform.system() == 'Windows': return os.path.join('C:', os.path.sep, 'pycons3rt') elif platform.system() == 'Darwin': return os.path.join(os.path.expanduser('~'), '.pycons3rt') else: raise OSError('Unsupported Operating System')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initialize_pycons3rt_dirs(): """Initializes the pycons3rt directories :return: None :raises: OSError """
for pycons3rt_dir in [get_pycons3rt_home_dir(), get_pycons3rt_user_dir(), get_pycons3rt_conf_dir(), get_pycons3rt_log_dir(), get_pycons3rt_src_dir()]: if os.path.isdir(pycons3rt_dir): continue try: os.makedirs(pycons3rt_dir) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(pycons3rt_dir): pass else: msg = 'Unable to create directory: {d}'.format(d=pycons3rt_dir) raise OSError(msg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean(exclude): # type: (bool, List[str]) -> None """ Remove all unnecessary files. Args: pretend (bool): If set to **True**, do not delete any files, just show what would be deleted. exclude (list[str]): A list of path patterns to exclude from deletion. """
pretend = context.get('pretend', False) exclude = list(exclude) + conf.get('clean.exclude', []) clean_patterns = conf.get('clean.patterns', [ '*__pycache__*', '*.py[cod]', '*.swp', ]) num_files = 0 with util.timed_block() as t: files = fs.filtered_walk(conf.proj_path(), clean_patterns, exclude) for path in files: try: num_files += 1 if not isdir(path): log.info(' <91>[file] <90>{}', path) not pretend and os.remove(path) else: log.info(' <91>[dir] <90>{}', path) not pretend and rmtree(path) except OSError: log.info("<33>Failed to remove <90>{}", path) if pretend: msg = "Would delete <33>{}<32> files. Took <33>{}<32>s" else: msg = "Deleted <33>{}<32> files in <33>{}<32>s" log.info(msg.format(num_files, t.elapsed_s))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init(quick): # type: () -> None """ Create an empty pelconf.yaml from template """
config_file = 'pelconf.yaml' prompt = "-- <35>{} <32>already exists. Wipe it?<0>".format(config_file) if exists(config_file) and not click.confirm(shell.fmt(prompt)): log.info("Canceled") return form = InitForm().run(quick=quick) log.info('Writing <35>{}'.format(config_file)) pelconf_template = conf.load_template('pelconf.yaml') fs.write_file(config_file, pelconf_template.format(**form.values))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tracebacks_from_lines(lines_iter): """Generator that yields tracebacks found in a lines iterator The lines iterator can be: - a file-like object - a list (or deque) of lines. - any other iterable sequence of strings """
tbgrep = TracebackGrep() for line in lines_iter: tb = tbgrep.process(line) if tb: yield tb
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tracebacks_from_file(fileobj, reverse=False): """Generator that yields tracebacks found in a file object With reverse=True, searches backwards from the end of the file. """
if reverse: lines = deque() for line in BackwardsReader(fileobj): lines.appendleft(line) if tb_head in line: yield next(tracebacks_from_lines(lines)) lines.clear() else: for traceback in tracebacks_from_lines(fileobj): yield traceback
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def BackwardsReader(file, BLKSIZE = 4096): """Read a file line by line, backwards"""
buf = "" file.seek(0, 2) lastchar = file.read(1) trailing_newline = (lastchar == "\n") while 1: newline_pos = buf.rfind("\n") pos = file.tell() if newline_pos != -1: # Found a newline line = buf[newline_pos+1:] buf = buf[:newline_pos] if pos or newline_pos or trailing_newline: line += "\n" yield line elif pos: # Need to fill buffer toread = min(BLKSIZE, pos) file.seek(pos-toread, 0) buf = file.read(toread) + buf file.seek(pos-toread, 0) if pos == toread: buf = "\n" + buf else: # Start-of-file return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prettify(string): """ replace markup emoji and progressbars with actual things # Example ```python from habitipy.util import prettify print(prettify('Write thesis :book: ![progress](http://progressed.io/bar/0 "progress")')) ``` ``` Write thesis πŸ“– β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ0% ``` """
string = emojize(string, use_aliases=True) if emojize else string string = progressed(string) return string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_translation_for(package_name: str) -> gettext.NullTranslations: """find and return gettext translation for package"""
localedir = None for localedir in pkg_resources.resource_filename(package_name, 'i18n'), None: localefile = gettext.find(package_name, localedir) # type: ignore if localefile: break else: pass return gettext.translation(package_name, localedir=localedir, fallback=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: """finds and installs translation functions for package"""
translation = get_translation_for(package_name) return [getattr(translation, x) for x in names]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def escape_keywords(arr): """append _ to all python keywords"""
for i in arr: i = i if i not in kwlist else i + '_' i = i if '-' not in i else i.replace('-', '_') yield i
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_api(branch=None) -> str: """download API documentation from _branch_ of Habitica\'s repo on Github"""
habitica_github_api = 'https://api.github.com/repos/HabitRPG/habitica' if not branch: branch = requests.get(habitica_github_api + '/releases/latest').json()['tag_name'] curl = local['curl']['-sL', habitica_github_api + '/tarball/{}'.format(branch)] tar = local['tar'][ 'axzf', '-', '--wildcards', '*/website/server/controllers/api-v3/*', '--to-stdout'] grep = local['grep']['@api'] sed = local['sed']['-e', 's/^[ */]*//g', '-e', 's/ / /g', '-'] return (curl | tar | grep | sed)()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_apidoc(text: str) -> None: """save `text` to apidoc cache"""
apidoc_local = local.path(APIDOC_LOCAL_FILE) if not apidoc_local.dirname.exists(): apidoc_local.dirname.mkdir() with open(apidoc_local, 'w') as f: f.write(text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_apidoc( file_or_branch, from_github=False, save_github_version=True ) -> List['ApiEndpoint']: """read file and parse apiDoc lines"""
apis = [] # type: List[ApiEndpoint] regex = r'(?P<group>\([^)]*\)){0,1} *(?P<type_>{[^}]*}){0,1} *' regex += r'(?P<field>[^ ]*) *(?P<description>.*)$' param_regex = re.compile(r'^@apiParam {1,}' + regex) success_regex = re.compile(r'^@apiSuccess {1,}' + regex) if from_github: text = download_api(file_or_branch) if save_github_version: save_apidoc(text) else: with open(file_or_branch) as f: text = f.read() for line in text.split('\n'): line = line.replace('\n', '') if line.startswith('@api '): if apis: if not apis[-1].retcode: apis[-1].retcode = 200 split_line = line.split(' ') assert len(split_line) >= 3 method = split_line[1] uri = split_line[2] assert method[0] == '{' assert method[-1] == '}' method = method[1:-1] if not uri.startswith(API_URI_BASE): warnings.warn(_("Wrong api url: {}").format(uri)) # noqa: Q000 title = ' '.join(split_line[3:]) apis.append(ApiEndpoint(method, uri, title)) elif line.startswith('@apiParam '): res = next(param_regex.finditer(line)).groupdict() apis[-1].add_param(**res) elif line.startswith('@apiSuccess '): res = next(success_regex.finditer(line)).groupdict() apis[-1].add_success(**res) if apis: if not apis[-1].retcode: apis[-1].retcode = 200 return apis
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def into(self, val: str) -> Union['ApiNode', 'ApiEndpoint']: """Get another leaf node with name `val` if possible"""
if val in self.paths: return self.paths[val] if self.param: return self.param raise IndexError(_("Value {} is missing from api").format(val))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def can_into(self, val: str) -> bool: """Determine if there is a leaf node with name `val`"""
return val in self.paths or (self.param and self.param_name == val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def keys(self) -> Iterator[str]: """return all possible paths one can take from this ApiNode"""
if self.param: yield self.param_name yield from self.paths.keys()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_param(self, group=None, type_='', field='', description=''): """parse and append a param"""
group = group or '(Parameter)' group = group.lower()[1:-1] p = Param(type_, field, description) self.params[group][p.field] = p
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_success(self, group=None, type_='', field='', description=''): """parse and append a success data param"""
group = group or '(200)' group = int(group.lower()[1:-1]) self.retcode = self.retcode or group if group != self.retcode: raise ValueError('Two or more retcodes!') type_ = type_ or '{String}' p = Param(type_, field, description) self.params['responce'][p.field] = p
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self, obj): """check if obj has this api param"""
if self.path: for i in self.path: obj = obj[i] obj = obj[self.field] raise NotImplementedError('Validation is not implemented yet')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_uuid(u): """validator for plumbum prompt"""
if isinstance(u, str) and u.replace('-', '') == uuid.UUID(u).hex: return u return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_content(api, rebuild_cache=False): """get content from server or cache"""
if hasattr(get_content, 'cache') and not rebuild_cache: return get_content.cache if not os.path.exists(CONTENT_JSON) or rebuild_cache: import locale content_endpoint = api.content.get # pylint: disable=protected-access try_langs = [] try: lang = get_translation_for('habitipy').info()['language'] try_langs.append(lang) except KeyError: pass try: loc = locale.getdefaultlocale()[0] if loc: try_langs.append(loc) try_langs.append(loc[:2]) except IndexError: pass server_lang = content_endpoint._node.params['query']['language'] # handle something like 'ru_RU' not available - only 'ru' for lang in try_langs: if lang in server_lang.possible_values: loc = {'language': lang} break else: loc = {} get_content.cache = content = content_endpoint(**loc) with open(CONTENT_JSON, 'w') as f: json.dump(content, f) return content try: with open(CONTENT_JSON) as f: get_content.cache = content = json.load(f) return content except JSONDecodeError: return get_content(api, rebuild_cache=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_item_class_name(cls_obj): """ Return the first part of the class name of this custom generator. This will be used for the class name of the items produced by this generator. Examples: FoobarGenerator -> Foobar QuuxGenerator -> Quux """
if '__tohu__items__name__' in cls_obj.__dict__: logger.debug(f"Using item class name '{cls_obj.__tohu_items_name__}' (derived from attribute '__tohu_items_name__')") else: m = re.match('^(.*)Generator$', cls_obj.__name__) if m is not None: cls_obj.__tohu_items_name__ = m.group(1) logger.debug(f"Using item class name '{cls_obj.__tohu_items_name__}' (derived from custom generator name)") else: raise ValueError("Cannot derive class name for items to be produced by custom generator. " "Please set '__tohu_items_name__' at the top of the custom generator's " "definition or change its name so that it ends in '...Generator'")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_that_operator_can_be_applied_to_produces_items(op, g1, g2): """ Helper function to check that the operator `op` can be applied to items produced by g1 and g2. """
g1_tmp_copy = g1.spawn() g2_tmp_copy = g2.spawn() sample_item_1 = next(g1_tmp_copy) sample_item_2 = next(g2_tmp_copy) try: op(sample_item_1, sample_item_2) except TypeError: raise TypeError(f"Operator '{op.__name__}' cannot be applied to items produced by {g1} and {g2} " f"(which have type {type(sample_item_1)} and {type(sample_item_2)}, respectively)")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate(self, N, *, seed=None, progressbar=False): """ Return sequence of `N` elements. If `seed` is not None, the generator is reset using this seed before generating the elements. """
if seed is not None: self.reset(seed) items = islice(self, N) if progressbar: items = tqdm(items, total=N) item_list = [x for x in items] #logger.warning("TODO: initialise ItemList with random seed!") return ItemList(item_list, N)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_csv_header_line(*, header_names, header_prefix='', header=True, sep=',', newline='\n'): """ Helper function to generate a CSV header line depending on the combination of arguments provided. """
if isinstance(header, str): # user-provided header line header_line = header + newline else: if not (header is None or isinstance(header, bool)): raise ValueError(f"Invalid value for argument `header`: {header}") else: if header: header_line = header_prefix + sep.join(header_names) + newline else: header_line = "" return header_line
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spawn_generator(self, g): """ Return a fresh spawn of g unless g is already contained in this SpawnMapping, in which case return the previously spawned generator. """
try: return self.mapping[g] except KeyError: return g._spawn(self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all_generators(self): """ Convenience property to iterate over all generators in arg_gens and kwarg_gens. """
for arg_gen in self.arg_gens: yield arg_gen for kwarg_gen in self.kwarg_gens.values(): yield kwarg_gen
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Split(g, *, maxbuffer=10, tuple_len=None): """ Split a tuple generator into individual generators. Parameters g: tohu generator The generator to be split. The items produced by `g` must be tuples. maxbuffer: integer Maximum number of items produced by `g` that will be buffered. """
if tuple_len is None: try: tuple_len = g.tuple_len except AttributeError: raise ValueError("Argument 'tuple_len' must be given since generator is not of type TupleGenerator.") g_buffered = BufferedTuple(g, maxbuffer=maxbuffer, tuple_len=tuple_len) return tuple(NthElementBuffered(g_buffered, i) for i in range(tuple_len))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tuple_len(self): """ Length of tuples produced by this generator. """
try: return self._tuple_len except AttributeError: raise NotImplementedError("Class {} does not implement attribute 'tuple_len'.".format(self.__class__.__name__))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __get_sentence(counts, sentence_id=None): @ :param counts: :param sentence_id: """
# First of all we need a cursor and a query to retrieve our ID's cursor = CONN.cursor() check_query = "select sen_id from sursentences" # Now we fetch the result of the query and save it into check_result cursor.execute(check_query) check_result = cursor.fetchall() # declare an empty list to be populated below id_list = [] id_to_fetch = None # Populate the id_list variable with all of the ID's we retrieved from the database query. for row in check_result: id_list.append(row[0]) if sentence_id is not None: if type(sentence_id) is int: id_to_fetch = sentence_id else: id_to_fetch = random.randint(1, counts['max_sen']) while id_to_fetch not in id_list: id_to_fetch = random.randint(1, counts['max_sen']) query = ("select * from sursentences where sen_id = {0}".format(id_to_fetch)) cursor.execute(query) result = cursor.fetchone() # cursor.close() return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __get_table_limits(): """Here we simply take a count of each of the database tables so we know our upper limits for our random number calls then return a dictionary of them
table_counts = { 'max_adjectives': None, 'max_names': None, 'max_nouns': None, 'max_sentences': None, 'max_faults': None, 'max_verbs': None } cursor = CONN.cursor() cursor.execute('SELECT count(*) FROM suradjs') table_counts['max_adjectives'] = cursor.fetchone() table_counts['max_adjectives'] = table_counts['max_adjectives'][0] cursor.execute('SELECT count(*) FROM surnames') table_counts['max_names'] = cursor.fetchone() table_counts['max_names'] = table_counts['max_names'][0] cursor.execute('SELECT count(*) FROM surnouns') table_counts['max_nouns'] = cursor.fetchone() table_counts['max_nouns'] = table_counts['max_nouns'][0] cursor.execute('SELECT count(*) FROM sursentences') table_counts['max_sen'] = cursor.fetchone() table_counts['max_sen'] = table_counts['max_sen'][0] cursor.execute('SELECT count(*) FROM surfaults') table_counts['max_fau'] = cursor.fetchone() table_counts['max_fau'] = table_counts['max_fau'][0] cursor.execute('SELECT count(*) FROM surverbs') table_counts['max_verb'] = cursor.fetchone() table_counts['max_verb'] = table_counts['max_verb'][0] return table_counts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __replace_repeat(sentence): """ Allows the use of repeating random-elements such as in the 'Ten green bottles' type sentences. :param sentence: """
######### USE SENTENCE_ID 47 for testing! repeat_dict = {} if sentence is not None: while sentence.find('#DEFINE_REPEAT') != -1: begin_index = sentence.find('#DEFINE_REPEAT') start_index = begin_index + 15 end_index = sentence.find(']') if sentence.find('#DEFINE_REPEAT') is not None: sub_list = sentence[start_index:end_index].split(',') choice = sub_list[0] repeat_text = sub_list[1] repeat_dict[choice] = repeat_text sentence = sentence.replace(sentence[begin_index:end_index + 1], '', 1) while sentence.find('#REPEAT') != -1: if sentence.find('#REPEAT') is not None: repeat_begin_index = sentence.find('#REPEAT') repeat_start_index = repeat_begin_index + 8 # by searching from repeat_index below we don't encounter dodgy bracket-matching errors. repeat_end_index = sentence.find(']', repeat_start_index) repeat_index = sentence[repeat_start_index:repeat_end_index] if repeat_index in repeat_dict: sentence = sentence.replace(sentence[repeat_begin_index:repeat_end_index + 1], str(repeat_dict[repeat_index])) if sentence.find('#REPEAT') == -1: return sentence return sentence else: return sentence
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __check_spaces(sentence): """ Here we check to see that we have the correct number of spaces in the correct locations. :param _sentence: :return: """
# We have to run the process multiple times: # Once to search for all spaces, and check if there are adjoining spaces; # The second time to check for 2 spaces after sentence-ending characters such as . and ! and ? if sentence is not None: words = sentence.split() new_sentence = '' for (i, word) in enumerate(words): if word[-1] in set('.!?'): word += ' ' new_word = ''.join(word) new_sentence += ' ' + new_word # remove any trailing whitespace new_sentence = new_sentence.strip() return new_sentence
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def explode_columns(df, colnames): """ Given a dataframe with certain columns that contain lists, return another dataframe where the elements in each list are "exploded" into individual rows. Example: col1 col2 col3 col4 0 foo 11 [DDD, AAA, CCC] [dd, aa, cc] 1 bar 22 [FFF] [ff] 2 quux 33 [EEE, BBB] [ee, bb] col1 col2 col3 col4 0 foo 11 DDD dd 1 foo 11 AAA aa 2 foo 11 CCC cc 3 bar 22 FFF ff 4 quux 33 EEE ee 5 quux 33 BBB bb col1 col2 col3_exploded col4 0 foo 11 DDD dd 1 foo 11 AAA aa 2 foo 11 CCC cc 3 bar 22 FFF ff 4 quux 33 EEE ee 5 quux 33 BBB bb """
if isinstance(colnames, (list, tuple)): colnames = {name: name for name in colnames} remaining_columns = list(df.columns.difference(colnames.values())) df2 = df.set_index(remaining_columns) df3 = pd.concat((make_exploded_column(df2, col_new, col_old) for col_new, col_old in colnames.items()), axis=1) levels_to_reset = list(range(len(remaining_columns))) df4 = df3.reset_index(level=levels_to_reset).reset_index(drop=True) return df4
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_generated_sequence(gen, num, *, sep=", ", fmt='', seed=None): """ Helper function which prints a sequence of `num` items produced by the random generator `gen`. """
if seed: gen.reset(seed) elems = [format(next(gen), fmt) for _ in range(num)] sep_initial = "\n\n" if '\n' in sep else " " print("Generated sequence:{}{}".format(sep_initial, sep.join(elems)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ensure_is_date_object(x): """ Ensure input represents a valid date and return the corresponding `datetime.date` object. Valid inputs: - string of the form "YYYY-MM-DD" - dt.date object - pd.Timestamp of the form "YYYY-MM-DD 00:00:00" with freq='D' (as is generated by pd.date_range()) """
error_msg = f"Cannot convert input to date object: {x} (type: {type(x)})" if isinstance(x, dt.date): if isinstance(x, pd.Timestamp): if x.freq != 'D': raise TohuDateError("Pandas Timestamp must have freq='D' set. Got: freq={x.freq!r}") elif pd.Timestamp(x.date()) == x: return x.date() else: raise TohuDateError(error_msg) elif isinstance(x, dt.datetime): raise TohuDateError(error_msg) else: return x elif isinstance(x, str): return parse_date_string(x) else: raise TohuDateError(error_msg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all_independent_generators(self): """ Return all generators in this namespace which are not clones. """
return {g: name for g, name in self._ns.items() if not is_clone(g)}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_tohu_items_name(cls): """ Return a string which defines the name of the namedtuple class which will be used to produce items for the custom generator. for example: FoobarGenerator -> Foobar QuuxGenerator -> Quux However, it can be set explicitly by the user by defining `__tohu_items_name__` in the class definition, for example: class Quux(CustomGenerator): __tohu_items_name__ = 'MyQuuxItem' """
assert issubclass(cls, TohuBaseGenerator) try: tohu_items_name = cls.__dict__['__tohu_items_name__'] logger.debug(f"Using item class name '{tohu_items_name}' (derived from attribute '__tohu_items_name__')") except KeyError: m = re.match('^(.*)Generator$', cls.__name__) if m is not None: tohu_items_name = m.group(1) logger.debug(f"Using item class name '{tohu_items_name}' (derived from custom generator name)") else: msg = ( "Cannot derive class name for items to be produced by custom generator. " "Please set '__tohu_items_name__' at the top of the custom generator's " "definition or change its name so that it ends in '...Generator'" ) raise ValueError(msg) return tohu_items_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_with_tohu_generators(field_gens, adict): """ Helper function which updates `field_gens` with any items in the dictionary `adict` that are instances of `TohuUltraBaseGenerator`. """
for name, gen in adict.items(): if isinstance(gen, TohuUltraBaseGenerator): field_gens[name] = gen
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset_input_generators(self, seed): """ Helper method which explicitly resets all input generators to the derived generator. This should only ever be called for testing or debugging. """
seed_generator = SeedGenerator().reset(seed=seed) for gen in self.input_generators: gen.reset(next(seed_generator)) try: # In case `gen` is itself a derived generator, # recursively reset its own input generators. gen.reset_input_generators(next(seed_generator)) except AttributeError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _spot_check_that_elements_produced_by_this_generator_have_attribute(self, name): """ Helper function to spot-check that the items produces by this generator have the attribute `name`. """
g_tmp = self.values_gen.spawn() sample_element = next(g_tmp)[0] try: getattr(sample_element, name) except AttributeError: raise AttributeError(f"Items produced by {self} do not have the attribute '{name}'")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_sql(self, url, table_name, *, schema=None, fields=None, fields_to_explode=None, if_exists="fail", dtype=None): """ Export items as rows in a PostgreSQL table. Parameters url: string Connection string to connect to the database. Example: "postgresql://[email protected]:5432/testdb" table_name: string Name of the database table. Note that if this name contains a dot ('.') and `schema` is not specified, the first part of the name before the dot will be interpreted as the schema name. schema : string, optional Specify the schema (if database flavor supports this). If None, use default schema or derive the schema name from `table_name`. fields: list or dict List of field names to export, or dictionary mapping output column names to attribute names of the generators. Examples: fields=['field_name_1', 'field_name_2'] fields={'COL1': 'field_name_1', 'COL2': 'field_name_2'} fields_to_explode: list or None Optional list of field names where each entry (which must itself be a sequence) is to be "exploded" into separate rows. if_exists : {'fail', 'do_nothing', 'replace', 'append'}, default 'fail' - fail: If table exists, raise an error. - do_nothing: If table exists, do nothing and immediately return. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. dtype : dict, optional Specifying the datatype for columns. The keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. This is passed through to pandas.DataFrame.to_sql(). """
if schema is None: schema, table_name = _extract_schema_if_given(table_name) engine = create_engine(url) ins = inspect(engine) if schema is not None and schema not in ins.get_schema_names(): logger.debug(f"Creating non-existing schema: '{schema}'") engine.execute(CreateSchema(schema)) if table_name in ins.get_table_names(schema=schema) and if_exists == 'do_nothing': logger.debug("Table already exists (use if_exists='replace' or if_exists='append' to modify it).") return if if_exists == 'do_nothing': # we handled the 'do nothing' case above; change to an option that pandas will understand if_exists = 'fail' with engine.begin() as conn: self.to_df(fields=fields, fields_to_explode=fields_to_explode).to_sql( table_name, conn, schema=schema, index=False, if_exists=if_exists, dtype=dtype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset(self, seed): """ Reset this generator's seed generator and any clones. """
logger.debug(f'Resetting {self} (seed={seed})') self.seed_generator.reset(seed) for c in self.clones: c.reset(seed)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def override_environment(settings, **kwargs): # type: (Settings, **str) -> Generator """ Override env vars and reload the Settings object NOTE: Obviously this context has to be in place before you import any module which reads env values at import time. NOTE: The values in `kwargs` must be strings else you will get a cryptic: TypeError: execve() arg 3 contains a non-string value """
old_env = os.environ.copy() os.environ.update(kwargs) settings._reload() try: yield except Exception: raise finally: for key in kwargs.keys(): del os.environ[key] os.environ.update(old_env) settings._reload()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _import_yaml(config_file_path): """Return a configuration object """
try: logger.info('Importing config %s...', config_file_path) with open(config_file_path) as config_file: return yaml.safe_load(config_file.read()) except IOError as ex: raise RepexError('{0}: {1} ({2})'.format( ERRORS['config_file_not_found'], config_file_path, ex)) except (yaml.parser.ParserError, yaml.scanner.ScannerError) as ex: raise RepexError('{0} ({1})'.format(ERRORS['invalid_yaml'], ex))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_all_files(filename_regex, path, base_dir, excluded_paths=None, excluded_filename_regex=None): """Get all files for processing. This starts iterating from `base_dir` and checks for all files that look like `filename_regex` under `path` regex excluding all paths under the `excluded_paths` list, whether they are files or folders. `excluded_paths` are explicit paths, not regex. `excluded_filename_regex` are files to be excluded as well. """
# For windows def replace_backslashes(string): return string.replace('\\', '/') excluded_paths = _normalize_excluded_paths(base_dir, excluded_paths) if excluded_paths: logger.info('Excluding paths: %s', excluded_paths) logger.info('Looking for %s under %s...', filename_regex, os.path.join(base_dir, path)) if excluded_filename_regex: logger.info('Excluding file names: %s', excluded_filename_regex) path_expression = re.compile(replace_backslashes(path)) target_files = [] for root, _, files in os.walk(base_dir): if not root.startswith(tuple(excluded_paths)) \ and path_expression.search(replace_backslashes(root)): for filename in files: filepath = os.path.join(root, filename) is_file, matched, excluded_filename, excluded_path = \ _set_match_parameters( filename, filepath, filename_regex, excluded_filename_regex, excluded_paths) if is_file and matched and not excluded_filename \ and not excluded_path: logger.debug('%s is a match. Appending to list...', filepath) target_files.append(filepath) return target_files
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _match_tags(repex_tags, path_tags): """Check for matching tags between what the user provided and the tags set in the config. If `any` is chosen, match. If no tags are chosen and none are configured, match. If the user provided tags match any of the configured tags, match. """
if 'any' in repex_tags or (not repex_tags and not path_tags): return True elif set(repex_tags) & set(path_tags): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iterate(config_file_path=None, config=None, variables=None, tags=None, validate=True, validate_only=False, with_diff=False): """Iterate over all paths in `config_file_path` :param string config_file_path: a path to a repex config file :param dict config: a dictionary representing a repex config :param dict variables: a dict of variables (can be None) :param list tags: a list of tags to check for :param bool validate: whether to perform schema validation on the config :param bool validate_only: only perform validation without running :param bool with_diff: whether to write a diff of all changes to a file """
# TODO: Check if tags can be a tuple instead of a list if not isinstance(variables or {}, dict): raise TypeError(ERRORS['variables_not_dict']) if not isinstance(tags or [], list): raise TypeError(ERRORS['tags_not_list']) config = _get_config(config_file_path, config) if validate or validate_only: _validate_config_schema(config) if validate_only: logger.info('Config file validation completed successfully!') sys.exit(0) repex_vars = _merge_variables(config['variables'], variables or {}) repex_tags = tags or [] logger.debug('Chosen tags: %s', repex_tags) for path in config['paths']: _process_path(path, repex_tags, repex_vars, with_diff)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handle_path(pathobj, variables=None, diff=False): """Iterate over all chosen files in a path :param dict pathobj: a dict of a specific path in the config :param dict variables: a dict of variables (can be None) """
logger.info('Handling path with description: %s', pathobj.get('description')) variables = variables or {} variable_expander = _VariablesHandler() pathobj = variable_expander.expand(variables, pathobj) pathobj = _set_path_defaults(pathobj) path_to_handle = os.path.join(pathobj['base_directory'], pathobj['path']) logger.debug('Path to process: %s', path_to_handle) validate = 'validator' in pathobj if validate: validator_config = pathobj['validator'] validator = _Validator(validator_config) validator_type = validator_config.get('type', 'per_type') rpx = Repex(pathobj) if not pathobj.get('type'): _handle_single_file( rpx=rpx, path_to_handle=path_to_handle, pathobj=pathobj, validate=validate, diff=diff, validator=validator if validate else None) else: _handle_multiple_files( rpx=rpx, path_to_handle=path_to_handle, pathobj=pathobj, validate=validate, diff=diff, validator=validator if validate else None, validator_type=validator_type if validate else None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_vars_dict(vars_file='', variables=None): """Merge variables into a single dictionary Applies to CLI provided variables only """
repex_vars = {} if vars_file: with open(vars_file) as varsfile: repex_vars = yaml.safe_load(varsfile.read()) for var in variables: key, value = var.split('=') repex_vars.update({str(key): str(value)}) return repex_vars
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(verbose, **kwargs): """Replace strings in one or multiple files. You must either provide `REGEX_PATH` or use the `-c` flag to provide a valid repex configuration. `REGEX_PATH` can be: a regex of paths under `basedir`, a path to a single directory under `basedir`, or a path to a single file. It's important to note that if the `REGEX_PATH` is a path to a directory, the `-t,--ftype` flag must be provided. """
config = kwargs['config'] if not config and not kwargs['regex_path']: click.echo('Must either provide a path or a viable repex config file.') sys.exit(1) if verbose: set_verbose() if config: repex_vars = _build_vars_dict(kwargs['vars_file'], kwargs['var']) try: iterate( config_file_path=config, variables=repex_vars, tags=list(kwargs['tag']), validate=kwargs['validate'], validate_only=kwargs['validate_only'], with_diff=kwargs['diff']) except (RepexError, IOError, OSError) as ex: sys.exit(str(ex)) else: pathobj = _construct_path_object(**kwargs) try: handle_path(pathobj) except (RepexError, IOError, OSError) as ex: sys.exit(str(ex))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def expand(self, repex_vars, fields): r"""Receive a dict of variables and a dict of fields and iterates through them to expand a variable in an field, then returns the fields dict with its variables expanded. This will fail if not all variables expand (due to not providing all necessary ones). fields: type: VERSION path: resources excluded: - excluded_file.file base_directory: '{{ .base_dir }}' match: '"version": "\d+\.\d+(\.\d+)?(-\w\d+)?' replace: \d+\.\d+(\.\d+)?(-\w\d+)? with: "{{ .version }}" must_include: - {{ .my_var }}/{{ .another_var }} - {{ .my_other_var }} - version validator: type: per_file path: {{ .my_validator_path }} function: validate variables: { 'version': 3, 'base_dir': . } :param dict vars: dict of variables :param dict fields: dict of fields as shown above. """
logger.debug('Expanding variables...') unexpanded_instances = set() # Expand variables in variables # TODO: This should be done in the global scope. # _VariableHandler is called per path, which makes this redundant # as variables are declared globally per config. for k, v in repex_vars.items(): repex_vars[k] = self._expand_var(v, repex_vars) instances = self._get_instances(repex_vars[k]) unexpanded_instances.update(instances) # TODO: Consolidate variable expansion code into single logic # Expand variables in path objects for key in fields.keys(): field = fields[key] if isinstance(field, str): fields[key] = self._expand_var(field, repex_vars) instances = self._get_instances(fields[key]) unexpanded_instances.update(instances) elif isinstance(field, dict): for k, v in field.items(): fields[key][k] = self._expand_var(v, repex_vars) instances = self._get_instances(fields[key][k]) unexpanded_instances.update(instances) elif isinstance(field, list): for index, item in enumerate(field): fields[key][index] = self._expand_var(item, repex_vars) instances = self._get_instances(fields[key][index]) unexpanded_instances.update(instances) if unexpanded_instances: raise RepexError( 'Variables failed to expand: {0}\n' 'Please make sure to provide all necessary variables '.format( list(unexpanded_instances))) return fields
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _expand_var(self, in_string, available_variables): """Expand variable to its corresponding value in_string :param string variable: variable name :param value: value to replace with :param string in_string: the string to replace in """
instances = self._get_instances(in_string) for instance in instances: for name, value in available_variables.items(): variable_string = self._get_variable_string(name) if instance == variable_string: in_string = in_string.replace(variable_string, value) return in_string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_before(self, content, file_to_handle): """Verify that all required strings are in the file """
logger.debug('Looking for required strings: %s', self.must_include) included = True for string in self.must_include: if not re.search(r'{0}'.format(string), content): logger.error('Required string `%s` not found in %s', string, file_to_handle) included = False if not included: logger.debug('Required strings not found') return False logger.debug('Required strings found') return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_matches(self, content, file_to_handle): """Find all matches of an expression in a file """
# look for all match groups in the content groups = [match.groupdict() for match in self.match_expression.finditer(content)] # filter out content not in the matchgroup matches = [group['matchgroup'] for group in groups if group.get('matchgroup')] logger.info('Found %s matches in %s', len(matches), file_to_handle) # We only need the unique strings found as we'll be replacing each # of them. No need to replace the ones already replaced. return list(set(matches))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def replace(self, match, content): """Replace all occurences of the regex in all matches from a file with a specific value. """
new_string = self.replace_expression.sub(self.replace_with, match) logger.info('Replacing: [ %s ] --> [ %s ]', match, new_string) new_content = content.replace(match, new_string) return new_content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_exit_events(self, no_workers=None, idle=None, reload=None, sig_term=None): """Do exit on certain events :param bool no_workers: Shutdown uWSGI when no workers are running. :param bool idle: Shutdown uWSGI when idle. :param bool reload: Force exit even if a reload is requested. :param bool sig_term: Exit on SIGTERM instead of brutal workers reload. .. note:: Before 2.1 SIGTERM reloaded the stack while SIGINT/SIGQUIT shut it down. """
self._set('die-on-no-workers', no_workers, cast=bool) self._set('exit-on-reload', reload, cast=bool) self._set('die-on-term', sig_term, cast=bool) self.set_idle_params(exit=idle) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_exception_handling_params(self, handler=None, catch=None, no_write_exception=None): """Exception handling related params. :param str|unicode|list[str|unicode] handler: Register one or more exception handling C-functions. :param bool catch: Catch exceptions and report them as http output (including stack trace and env params). .. warning:: Use only for testing purposes. :param bool no_write_exception: Disable exception generation on write()/writev(). .. note:: This can be combined with ``logging.set_filters(write_errors=False, sigpipe=False)``. .. note: Currently available for Python. """
self._set('exception-handler', handler, multi=True) self._set('catch-exceptions', catch, cast=bool) self._set('disable-write-exception', no_write_exception, cast=bool) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_idle_params(self, timeout=None, exit=None): """Activate idle mode - put uWSGI in cheap mode after inactivity timeout. :param int timeout: Inactivity timeout in seconds. :param bool exit: Shutdown uWSGI when idle. """
self._set('idle', timeout) self._set('die-on-idle', exit, cast=bool) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_reload_params(self, mercy=None, exit=None): """Set reload related params. :param int mercy: Set the maximum time (in seconds) we wait for workers and other processes to die during reload/shutdown. :param bool exit: Force exit even if a reload is requested. """
self._set('reload-mercy', mercy) self.set_exit_events(reload=exit) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_emergency_params( self, workers_step=None, idle_cycles_max=None, queue_size=None, queue_nonzero_delay=None): """Sets busyness algorithm emergency workers related params. Emergency workers could be spawned depending upon uWSGI backlog state. .. note:: These options are Linux only. :param int workers_step: Number of emergency workers to spawn. Default: 1. :param int idle_cycles_max: Idle cycles to reach before stopping an emergency worker. Default: 3. :param int queue_size: Listen queue (backlog) max size to spawn an emergency worker. Default: 33. :param int queue_nonzero_delay: If the request listen queue is > 0 for more than given amount of seconds new emergency workers will be spawned. Default: 60. """
self._set('cheaper-busyness-backlog-step', workers_step) self._set('cheaper-busyness-backlog-multiplier', idle_cycles_max) self._set('cheaper-busyness-backlog-alert', queue_size) self._set('cheaper-busyness-backlog-nonzero', queue_nonzero_delay) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_memory_limits(self, rss_soft=None, rss_hard=None): """Sets worker memory limits for cheapening. :param int rss_soft: Don't spawn new workers if total resident memory usage of all workers is higher than this limit in bytes. .. warning:: This option expects memory reporting enabled: ``.logging.set_basic_params(memory_report=1)`` :param int rss_hard: Try to stop workers if total workers resident memory usage is higher that thi limit in bytes. """
self._set('cheaper-rss-limit-soft', rss_soft) self._set('cheaper-rss-limit-hard', rss_hard) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_version(self, as_tuple=False): """Returns uWSGI version string or tuple. :param bool as_tuple: :rtype: str|tuple """
if as_tuple: return uwsgi.version_info return decode(uwsgi.version)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_route(self, route_rules, label=None): """Registers a routing rule. :param RouteRule|list[RouteRule] route_rules: :param str|unicode label: Label to mark the given set of rules. This can be used in conjunction with ``do_goto`` rule action. * http://uwsgi.readthedocs.io/en/latest/InternalRouting.html#goto """
route_rules = listify(route_rules) if route_rules and label: self._set(route_rules[0].command_label, label, multi=True) for route_rules in route_rules: self._set(route_rules.command, route_rules.value, multi=True) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_error_pages(self, codes_map=None, common_prefix=None): """Add an error pages for managed 403, 404, 500 responses. Shortcut for ``.set_error_page()``. :param dict codes_map: Status code mapped into an html filepath or just a filename if common_prefix is used. If not set, filename containing status code is presumed: 400.html, 500.html, etc. :param str|unicode common_prefix: Common path (prefix) for all files. """
statuses = [403, 404, 500] if common_prefix: if not codes_map: codes_map = {code: '%s.html' % code for code in statuses} for code, filename in codes_map.items(): codes_map[code] = os.path.join(common_prefix, filename) for code, filepath in codes_map.items(): self.set_error_page(code, filepath) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def header_add(self, name, value): """Automatically add HTTP headers to response. :param str|unicode name: :param str|unicode value: """
self._set('add-header', '%s: %s' % (name, value), multi=True) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def header_remove(self, value): """Automatically remove specified HTTP header from the response. :param str|unicode value: """
self._set('del-header', value, multi=True) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_expiration_rule(self, criterion, value, timeout, use_mod_time=False): """Adds statics expiration rule based on a criterion. :param str|unicode criterion: Criterion (subject) to base expiration on. See ``.expiration_criteria``. :param str|unicode|list[str|unicode] value: Value to test criteria upon. .. note:: Usually a regular expression. :param int timeout: Number of seconds to expire after. :param bool use_mod_time: Base on file modification time instead of the current time. """
command = 'static-expires' separator = ' ' if criterion != self.expiration_criteria.FILENAME: command += '-%s' % criterion if criterion == self.expiration_criteria.MIME_TYPE: separator = '=' if use_mod_time: command += '-mtime' for value in listify(value): self._set(command, '%s%s%s' % (value, separator, timeout), multi=True) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_paths_caching_params(self, timeout=None, cache_name=None): """Use the uWSGI caching subsystem to store mappings from URI to filesystem paths. * http://uwsgi.readthedocs.io/en/latest/StaticFiles.html#caching-paths-mappings-resolutions :param int timeout: Amount of seconds to put resolved paths in the uWSGI cache. :param str|unicode cache_name: Cache name to use for static paths. """
self._set('static-cache-paths', timeout) self._set('static-cache-paths-name', cache_name) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_socket_params( self, send_timeout=None, keep_alive=None, no_defer_accept=None, buffer_send=None, buffer_receive=None): """Sets common socket params. :param int send_timeout: Send (write) timeout in seconds. :param bool keep_alive: Enable TCP KEEPALIVEs. :param bool no_defer_accept: Disable deferred ``accept()`` on sockets by default (where available) uWSGI will defer the accept() of requests until some data is sent by the client (this is a security/performance measure). If you want to disable this feature for some reason, specify this option. :param int buffer_send: Set SO_SNDBUF (bytes). :param int buffer_receive: Set SO_RCVBUF (bytes). """
self._set('so-send-timeout', send_timeout) self._set('so-keepalive', keep_alive, cast=bool) self._set('no-defer-accept', no_defer_accept, cast=bool) self._set('socket-sndbuf', buffer_send) self._set('socket-rcvbuf', buffer_receive) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_unix_socket_params(self, abstract=None, permissions=None, owner=None, umask=None): """Sets Unix-socket related params. :param bool abstract: Force UNIX socket into abstract mode (Linux only). :param str permissions: UNIX sockets are filesystem objects that obey UNIX permissions like any other filesystem object. You can set the UNIX sockets' permissions with this option if your webserver would otherwise have no access to the uWSGI socket. When used without a parameter, the permissions will be set to 666. Otherwise the specified chmod value will be used. :param str|unicode owner: Chown UNIX sockets. :param str|unicode umask: Set UNIX socket umask. """
self._set('abstract-socket', abstract, cast=bool) self._set('chmod-socket', permissions) self._set('chown-socket', owner) self._set('umask', umask) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_bsd_socket_params(self, port_reuse=None): """Sets BSD-sockets related params. :param bool port_reuse: Enable REUSE_PORT flag on socket to allow multiple instances binding on the same address (BSD only). """
self._set('reuse-port', port_reuse, cast=bool) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enable(self, size, block_size=None, store=None, store_sync_interval=None): """Enables shared queue of the given size. :param int size: Queue size. :param int block_size: Block size in bytes. Default: 8 KiB. :param str|unicode store: Persist the queue into file. :param int store_sync_interval: Store sync interval in master cycles (usually seconds). """
self._set('queue', size) self._set('queue-blocksize', block_size) self._set('queue-store', store) self._set('queue-store-sync', store_sync_interval) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_ipcsem_params(self, ftok=None, persistent=None): """Sets ipcsem lock engine params. :param str|unicode ftok: Set the ipcsem key via ftok() for avoiding duplicates. :param bool persistent: Do not remove ipcsem's on shutdown. """
self._set('ftok', ftok) self._set('persistent-ipcsem', persistent, cast=bool) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lock_file(self, fpath, after_setup=False, wait=False): """Locks the specified file. :param str|unicode fpath: File path. :param bool after_setup: True - after logging/daemon setup False - before starting :param bool wait: True - wait if locked False - exit if locked """
command = 'flock-wait' if wait else 'flock' if after_setup: command = '%s2' % command self._set(command, fpath) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_rpc(name=None): """Decorator. Allows registering a function for RPC. * http://uwsgi.readthedocs.io/en/latest/RPC.html Example: .. code-block:: python @register_rpc() def expose_me(): do() :param str|unicode name: RPC function name to associate with decorated function. :rtype: callable """
def wrapper(func): func_name = func.__name__ rpc_name = name or func_name uwsgi.register_rpc(rpc_name, func) _LOG.debug("Registering '%s' for RPC under '%s' alias ...", func_name, rpc_name) return func return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_emperor_command_params( self, command_socket=None, wait_for_command=None, wait_for_command_exclude=None): """Emperor commands related parameters. * http://uwsgi-docs.readthedocs.io/en/latest/tutorials/EmperorSubscriptions.html :param str|unicode command_socket: Enable the Emperor command socket. It is a channel allowing external process to govern vassals. :param bool wait_for_command: Always wait for a 'spawn' Emperor command before starting a vassal. :param str|unicode|list[str|unicode] wait_for_command_exclude: Vassals that will ignore ``wait_for_command``. """
self._set('emperor-command-socket', command_socket) self._set('emperor-wait-for-command', wait_for_command, cast=bool) self._set('emperor-wait-for-command-ignore', wait_for_command_exclude, multi=True) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_vassals_wrapper_params(self, wrapper=None, overrides=None, fallbacks=None): """Binary wrapper for vassals parameters. :param str|unicode wrapper: Set a binary wrapper for vassals. :param str|unicode|list[str|unicode] overrides: Set a binary wrapper for vassals to try before the default one :param str|unicode|list[str|unicode] fallbacks: Set a binary wrapper for vassals to try as a last resort. Allows you to specify an alternative binary to execute when running a vassal and the default binary_path is not found (or returns an error). """
self._set('emperor-wrapper', wrapper) self._set('emperor-wrapper-override', overrides, multi=True) self._set('emperor-wrapper-fallback', fallbacks, multi=True) return self._section
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_throttle_params(self, level=None, level_max=None): """Throttling options. * http://uwsgi-docs.readthedocs.io/en/latest/Emperor.html#throttling * http://uwsgi-docs.readthedocs.io/en/latest/Emperor.html#loyalty :param int level: Set throttling level (in milliseconds) for bad behaving vassals. Default: 1000. :param int level_max: Set maximum throttling level (in milliseconds) for bad behaving vassals. Default: 3 minutes. """
self._set('emperor-throttle', level) self._set('emperor-max-throttle', level_max) return self._section