text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def randdomain(self):
""" -> a randomized domain-like name """ |
return '.'.join(
rand_readable(3, 6, use=self.random, density=3)
for _ in range(self.random.randint(1, 2))
).lower() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _to_tuple(self, _list):
""" Recursively converts lists to tuples """ |
result = list()
for l in _list:
if isinstance(l, list):
result.append(tuple(self._to_tuple(l)))
else:
result.append(l)
return tuple(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sequence(self, struct, size=1000, tree_depth=1, append_callable=None):
""" Generates random values for sequence-like objects @struct: the sequence-like structure you want to fill with random data @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|(value1, value2)| 2=|((value1, value2), (value1, value2))| @append_callable: #callable method which appends/adds data to your sequence-like structure - e.g. :meth:list.append -> random @struct .. from collections import UserList from vital.debug import RandData class MySequence(UserList):
pass rd = RandData(int) my_seq = MySequence() rd.sequence(my_seq, 3, 1, my_seq.append) # -> [88508293836062443, 49097807561770961, 55043550817099444] .. """ |
if not tree_depth:
return self._map_type()
_struct = struct()
add_struct = _struct.append if not append_callable \
else getattr(_struct, append_callable)
for x in range(size):
add_struct(self.sequence(
struct, size, tree_depth-1, append_callable))
return _struct |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mapping(self, struct, key_depth=1000, tree_depth=1, update_callable=None):
""" Generates random values for dict-like objects @struct: the dict-like structure you want to fill with random data @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|{key: value}| 2=|{key: {key: value}, key2: {key2: value2}}| @update_callable: #callable method which updates data in your dict-like structure - e.g. :meth:builtins.dict.update -> random @struct .. from collections import UserDict from vital.debug import RandData class MyDict(UserDict):
pass rd = RandData(int) my_dict = MyDict() rd.dict(my_dict, 3, 1, my_dict.update) # -> { # 'SE0ZNy0F6O': 42078648993195761, # 'pbK': 70822820981335987, # '0A5Aa7': 17503122029338459} .. """ |
if not tree_depth:
return self._map_type()
_struct = struct()
add_struct = _struct.update if not update_callable \
else getattr(_struct, update_callable)
for x in range(key_depth):
add_struct({
self.randstr: self.mapping(
struct, key_depth, tree_depth-1, update_callable)
})
return _struct |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _format_numeric_sequence(self, _sequence, separator="."):
""" Length of the highest index in chars = justification size """ |
if not _sequence:
return colorize(_sequence, "purple")
_sequence = _sequence if _sequence is not None else self.obj
minus = (2 if self._depth > 0 else 0)
just_size = len(str(len(_sequence)))
out = []
add_out = out.append
for i, item in enumerate(_sequence):
self._incr_just_size(just_size+minus)
add_out(self._numeric_prefix(
i, self.pretty(item, display=False),
just=just_size, color="blue", separator=separator))
self._decr_just_size(just_size+minus)
if not self._depth:
return padd("\n".join(out) if out else str(out), padding="top")
else:
return "\n".join(out) if out else str(out) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def objname(self, obj=None):
""" Formats object names in a pretty fashion """ |
obj = obj or self.obj
_objname = self.pretty_objname(obj, color=None)
_objname = "'{}'".format(colorize(_objname, "blue"))
return _objname |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _format_obj(self, item=None):
""" Determines the type of the object and maps it to the correct formatter """ |
# Order here matters, odd behavior with tuples
if item is None:
return getattr(self, 'number')(item)
elif isinstance(item, self.str_):
#: String
return item + " "
elif isinstance(item, bytes):
#: Bytes
return getattr(self, 'bytes')(item)
elif isinstance(item, self.numeric_):
#: Float, int, etc.
return getattr(self, 'number')(item)
elif isinstance(item, self.dict_):
#: Dict
return getattr(self, 'dict')(item)
elif isinstance(item, self.list_):
#: List
return getattr(self, 'list')(item)
elif isinstance(item, tuple):
#: Tuple
return getattr(self, 'tuple')(item)
elif isinstance(item, types.GeneratorType):
#: Generator
return getattr(self, 'generator')(item)
elif isinstance(item, self.set_):
#: Set
return getattr(self, 'set')(item)
elif isinstance(item, deque):
#: Deque
return getattr(self, 'deque')(item)
elif isinstance(item, Sequence):
#: Sequence
return getattr(self, 'sequence')(item)
#: Any other object
return getattr(self, 'object')(item) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pretty_objname(self, obj=None, maxlen=50, color="boldcyan"):
""" Pretty prints object name @obj: the object whose name you want to pretty print @maxlen: #int maximum length of an object name to print @color: your choice of :mod:colors or |None| -> #str pretty object name .. from vital.debug import Look print(Look.pretty_objname(dict)) # -> 'dict\x1b[1;36m<builtins>\x1b[1;m' .. """ |
parent_name = lambda_sub("", get_parent_name(obj) or "")
objname = get_obj_name(obj)
if color:
objname += colorize("<{}>".format(parent_name), color, close=False)
else:
objname += "<{}>".format(parent_name)
objname = objname if len(objname) < maxlen else \
objname[:(maxlen-1)]+"…>"
if color:
objname += colors.RESET
return objname |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _print_message(self, flag_message=None, color=None, padding=None, reverse=False):
""" Outputs the message to the terminal """ |
if flag_message:
flag_message = stdout_encode(flag(flag_message,
color=color if self.pretty else None,
show=False))
if not reverse:
print(padd(flag_message, padding),
self.format_messages(self.message))
else:
print(self.format_messages(self.message),
padd(flag_message, padding))
else:
print(self.format_messages(self.message))
self.message = [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start(self):
""" Starts the timer """ |
if not self._start:
self._first_start = time.perf_counter()
self._start = self._first_start
else:
self._start = time.perf_counter() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _pct_diff(self, best, other):
""" Calculates and colorizes the percent difference between @best and @other """ |
return colorize("{}%".format(
round(((best-other)/best)*100, 2)).rjust(10), "red") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def async_lru(size=100):
""" An LRU cache for asyncio coroutines in Python 3.5 .. @async_lru(1024) async def slow_coroutine(*args, **kwargs):
return await some_other_slow_coroutine() .. """ |
cache = collections.OrderedDict()
def decorator(fn):
@wraps(fn)
@asyncio.coroutine
def memoizer(*args, **kwargs):
key = str((args, kwargs))
try:
result = cache.pop(key)
cache[key] = result
except KeyError:
if len(cache) >= size:
cache.popitem(last=False)
result = cache[key] = yield from fn(*args, **kwargs)
return result
return memoizer
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def info(msg, *args, **kw):
# type: (str, *Any, **Any) -> None """ Print sys message to stdout. System messages should inform about the flow of the script. This should be a major milestones during the build. """ |
if len(args) or len(kw):
msg = msg.format(*args, **kw)
shell.cprint('-- <32>{}<0>'.format(msg)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def err(msg, *args, **kw):
# type: (str, *Any, **Any) -> None """ Per step status messages Use this locally in a command definition to highlight more important information. """ |
if len(args) or len(kw):
msg = msg.format(*args, **kw)
shell.cprint('-- <31>{}<0>'.format(msg)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def current_branch():
# type: () -> BranchDetails """ Return the BranchDetails for the current branch. Return: BranchDetails: The details of the current branch. """ |
cmd = 'git symbolic-ref --short HEAD'
branch_name = shell.run(
cmd,
capture=True,
never_pretend=True
).stdout.strip()
return BranchDetails.parse(branch_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def commit_branches(sha1):
# type: (str) -> List[str] """ Get the name of the branches that this commit belongs to. """ |
cmd = 'git branch --contains {}'.format(sha1)
return shell.run(
cmd,
capture=True,
never_pretend=True
).stdout.strip().split() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def guess_base_branch():
# type: (str) -> Optional[str, None] """ Try to guess the base branch for the current branch. Do not trust this guess. git makes it pretty much impossible to guess the base branch reliably so this function implements few heuristics that will work on most common use cases but anything a bit crazy will probably trip this function. Returns: Optional[str]: The name of the base branch for the current branch if guessable or **None** if can't guess. """ |
my_branch = current_branch(refresh=True).name
curr = latest_commit()
if len(curr.branches) > 1:
# We're possibly at the beginning of the new branch (currently both
# on base and new branch).
other = [x for x in curr.branches if x != my_branch]
if len(other) == 1:
return other[0]
return None
else:
# We're on one branch
parent = curr
while parent and my_branch in parent.branches:
curr = parent
if len(curr.branches) > 1:
other = [x for x in curr.branches if x != my_branch]
if len(other) == 1:
return other[0]
return None
parents = [p for p in curr.parents if my_branch in p.branches]
num_parents = len(parents)
if num_parents > 2:
# More than two parent, give up
return None
if num_parents == 2:
# This is a merge commit.
for p in parents:
if p.branches == [my_branch]:
parent = p
break
elif num_parents == 1:
parent = parents[0]
elif num_parents == 0:
parent = None
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def commit_author(sha1=''):
# type: (str) -> Author """ Return the author of the given commit. Args: sha1 (str):
The sha1 of the commit to query. If not given, it will return the sha1 for the current commit. Returns: Author: A named tuple ``(name, email)`` with the commit author details. """ |
with conf.within_proj_dir():
cmd = 'git show -s --format="%an||%ae" {}'.format(sha1)
result = shell.run(
cmd,
capture=True,
never_pretend=True
).stdout
name, email = result.split('||')
return Author(name, email) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unstaged():
# type: () -> List[str] """ Return a list of unstaged files in the project repository. Returns: list[str]: The list of files not tracked by project git repo. """ |
with conf.within_proj_dir():
status = shell.run(
'git status --porcelain',
capture=True,
never_pretend=True
).stdout
results = []
for file_status in status.split(os.linesep):
if file_status.strip() and file_status[0] == ' ':
results.append(file_status[3:].strip())
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ignore():
# type: () -> List[str] """ Return a list of patterns in the project .gitignore Returns: list[str]: List of patterns set to be ignored by git. """ |
def parse_line(line): # pylint: disable=missing-docstring
# Decode if necessary
if not isinstance(line, string_types):
line = line.decode('utf-8')
# Strip comment
line = line.split('#', 1)[0].strip()
return line
ignore_files = [
conf.proj_path('.gitignore'),
conf.proj_path('.git/info/exclude'),
config().get('core.excludesfile')
]
result = []
for ignore_file in ignore_files:
if not (ignore_file and os.path.exists(ignore_file)):
continue
with open(ignore_file) as fp:
parsed = (parse_line(l) for l in fp.readlines())
result += [x for x in parsed if x]
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def branches():
# type: () -> List[str] """ Return a list of branches in the current repo. Returns: list[str]: A list of branches in the current repo. """ |
out = shell.run(
'git branch',
capture=True,
never_pretend=True
).stdout.strip()
return [x.strip('* \t\n') for x in out.splitlines()] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tag(name, message, author=None):
# type: (str, str, Author, bool) -> None """ Tag the current commit. Args: name (str):
The tag name. message (str):
The tag message. Same as ``-m`` parameter in ``git tag``. author (Author):
The commit author. Will default to the author of the commit. pretend (bool):
If set to **True** it will print the full ``git tag`` command instead of actually executing it. """ |
cmd = (
'git -c "user.name={author.name}" -c "user.email={author.email}" '
'tag -a "{name}" -m "{message}"'
).format(
author=author or latest_commit().author,
name=name,
message=message.replace('"', '\\"').replace('`', '\\`'),
)
shell.run(cmd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def config():
# type: () -> dict[str, Any] """ Return the current git configuration. Returns: dict[str, Any]: The current git config taken from ``git config --list``. """ |
out = shell.run(
'git config --list',
capture=True,
never_pretend=True
).stdout.strip()
result = {}
for line in out.splitlines():
name, value = line.split('=', 1)
result[name.strip()] = value.strip()
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tags():
# type: () -> List[str] """ Returns all tags in the repo. Returns: list[str]: List of all tags in the repo, sorted as versions. All tags returned by this function will be parsed as if the contained versions (using ``v:refname`` sorting). """ |
return shell.run(
'git tag --sort=v:refname',
capture=True,
never_pretend=True
).stdout.strip().splitlines() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def verify_branch(branch_name):
# type: (str) -> bool """ Verify if the given branch exists. Args: branch_name (str):
The name of the branch to check. Returns: bool: **True** if a branch with name *branch_name* exits, **False** otherwise. """ |
try:
shell.run(
'git rev-parse --verify {}'.format(branch_name),
never_pretend=True
)
return True
except IOError:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def protected_branches():
# type: () -> list[str] """ Return branches protected by deletion. By default those are master and devel branches as configured in pelconf. Returns: list[str]: Names of important branches that should not be deleted. """ |
master = conf.get('git.master_branch', 'master')
develop = conf.get('git.devel_branch', 'develop')
return conf.get('git.protected_branches', (master, develop)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def branches(self):
# type: () -> List[str] """ List of all branches this commit is a part of. """ |
if self._branches is None:
cmd = 'git branch --contains {}'.format(self.sha1)
out = shell.run(
cmd,
capture=True,
never_pretend=True
).stdout.strip()
self._branches = [x.strip('* \t\n') for x in out.splitlines()]
return self._branches |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parents(self):
# type: () -> List[CommitDetails] """ Parents of the this commit. """ |
if self._parents is None:
self._parents = [CommitDetails.get(x) for x in self.parents_sha1]
return self._parents |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def number(self):
# type: () -> int """ Return this commits number. This is the same as the total number of commits in history up until this commit. This value can be useful in some CI scenarios as it allows to track progress on any given branch (although there can be two commits with the same number existing on different branches). Returns: int: The commit number/index. """ |
cmd = 'git log --oneline {}'.format(self.sha1)
out = shell.run(cmd, capture=True, never_pretend=True).stdout.strip()
return len(out.splitlines()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(cls, sha1=''):
# type: (str) -> CommitDetails """ Return details about a given commit. Args: sha1 (str):
The sha1 of the commit to query. If not given, it will return the details for the latest commit. Returns: CommitDetails: Commit details. You can use the instance of the class to query git tree further. """ |
with conf.within_proj_dir():
cmd = 'git show -s --format="%H||%an||%ae||%s||%b||%P" {}'.format(
sha1
)
result = shell.run(cmd, capture=True, never_pretend=True).stdout
sha1, name, email, title, desc, parents = result.split('||')
return CommitDetails(
sha1=sha1,
author=Author(name, email),
title=title,
desc=desc,
parents_sha1=parents.split(),
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_log_level(cls, log_level):
"""Sets the log level for cons3rt assets This method sets the logging level for cons3rt assets using pycons3rt. The loglevel is read in from a deployment property called loglevel and set appropriately. :type log_level: str :return: True if log level was set, False otherwise. """ |
log = logging.getLogger(cls.cls_logger + '.set_log_level')
log.info('Attempting to set the log level...')
if log_level is None:
log.info('Arg loglevel was None, log level will not be updated.')
return False
if not isinstance(log_level, basestring):
log.error('Passed arg loglevel must be a string')
return False
log_level = log_level.upper()
log.info('Attempting to set log level to: %s...', log_level)
if log_level == 'DEBUG':
cls._logger.setLevel(logging.DEBUG)
elif log_level == 'INFO':
cls._logger.setLevel(logging.INFO)
elif log_level == 'WARN':
cls._logger.setLevel(logging.WARN)
elif log_level == 'WARNING':
cls._logger.setLevel(logging.WARN)
elif log_level == 'ERROR':
cls._logger.setLevel(logging.ERROR)
else:
log.error('Could not set log level, this is not a valid log level: %s', log_level)
return False
log.info('pycons3rt loglevel set to: %s', log_level)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getattr_in(obj, name):
""" Finds an in @obj via a period-delimited string @name. @obj: (#object) @name: (#str) |.|-separated keys to search @obj in .. obj.deep.attr = 'deep value' getattr_in(obj, 'obj.deep.attr') .. |'deep value'| """ |
for part in name.split('.'):
obj = getattr(obj, part)
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def import_from(name):
""" Imports a module, class or method from string and unwraps it if wrapped by functools @name: (#str) name of the python object -> imported object """ |
obj = name
if isinstance(name, str) and len(name):
try:
obj = locate(name)
assert obj is not None
except (AttributeError, TypeError, AssertionError, ErrorDuringImport):
try:
name = name.split(".")
attr = name[-1]
name = ".".join(name[:-1])
mod = importlib.import_module(name)
obj = getattr(mod, attr)
except (SyntaxError, AttributeError, ImportError, ValueError):
try:
name = name.split(".")
attr_sup = name[-1]
name = ".".join(name[:-1])
mod = importlib.import_module(name)
obj = getattr(getattr(mod, attr_sup), attr)
except:
# We give up.
pass
obj = unwrap_obj(obj)
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unwrap_obj(obj):
""" Gets the actual object from a decorated or wrapped function @obj: (#object) the object to unwrap """ |
try:
obj = obj.fget
except (AttributeError, TypeError):
pass
try:
# Cached properties
if obj.func.__doc__ == obj.__doc__:
obj = obj.func
except AttributeError:
pass
try:
# Setter/Getters
obj = obj.getter
except AttributeError:
pass
try:
# Wrapped Funcs
obj = inspect.unwrap(obj)
except:
pass
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load():
# type: () -> None """ Load configuration from file. This will search the directory structure upwards to find the project root (directory containing ``pelconf.py`` file). Once found it will import the config file which should initialize all the configuration (using `peltak.core.conf.init()` function). You can also have both yaml (configuration) and python (custom commands) living together. Just remember that calling `conf.init()` will overwrite the config defined in YAML. """ |
with within_proj_dir():
if os.path.exists('pelconf.yaml'):
load_yaml_config('pelconf.yaml')
if os.path.exists('pelconf.py'):
load_py_config('pelconf.py') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_yaml_config(conf_file):
# type: (str) -> None """ Load a YAML configuration. This will not update the configuration but replace it entirely. Args: conf_file (str):
Path to the YAML config. This function will not check the file name or extension and will just crash if the given file does not exist or is not a valid YAML file. """ |
global g_config
with open(conf_file) as fp:
# Initialize config
g_config = util.yaml_load(fp)
# Add src_dir to sys.paths if it's set. This is only done with YAML
# configs, py configs have to do this manually.
src_dir = get_path('src_dir', None)
if src_dir is not None:
sys.path.insert(0, src_dir)
for cmd in get('commands', []):
_import(cmd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_py_config(conf_file):
# type: (str) -> None """ Import configuration from a python file. This will just import the file into python. Sky is the limit. The file has to deal with the configuration all by itself (i.e. call conf.init()). You will also need to add your src directory to sys.paths if it's not the current working directory. This is done automatically if you use yaml config as well. Args: conf_file (str):
Path to the py module config. This function will not check the file name or extension and will just crash if the given file does not exist or is not a valid python file. """ |
if sys.version_info >= (3, 5):
from importlib import util
spec = util.spec_from_file_location('pelconf', conf_file)
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod)
elif sys.version_info >= (3, 3):
from importlib import machinery
loader = machinery.SourceFileLoader('pelconf', conf_file)
_ = loader.load_module()
elif sys.version_info <= (3, 0):
import imp
imp.load_source('pelconf', conf_file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_template(filename):
# type: (str) -> str """ Load template from file. The templates are part of the package and must be included as ``package_data`` in project ``setup.py``. Args: filename (str):
The template path. Relative to `peltak` package directory. Returns: str: The content of the chosen template. """ |
template_file = os.path.join(PKG_DIR, 'templates', filename)
with open(template_file) as fp:
return fp.read() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def within_proj_dir(path='.'):
# type: (Optional[str]) -> str """ Return an absolute path to the given project relative path. :param path: Project relative path that will be converted to the system wide absolute path. :return: Absolute path. """ |
curr_dir = os.getcwd()
os.chdir(proj_path(path))
yield
os.chdir(curr_dir) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(name, *default):
# type: (str, Any) -> Any """ Get config value with the given name and optional default. Args: name (str):
The name of the config value. *default (Any):
If given and the key doesn't not exist, this will be returned instead. If it's not given and the config value does not exist, AttributeError will be raised Returns: The requested config value. This is one of the global values defined in this file. If the value does not exist it will return `default` if give or raise `AttributeError`. Raises: AttributeError: If the value does not exist and `default` was not given. """ |
global g_config
curr = g_config
for part in name.split('.'):
if part in curr:
curr = curr[part]
elif default:
return default[0]
else:
raise AttributeError("Config value '{}' does not exist".format(
name
))
return curr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_path(name, *default):
# type: (str, Any) -> Any """ Get config value as path relative to the project directory. This allows easily defining the project configuration within the fabfile as always relative to that fabfile. Args: name (str):
The name of the config value containing the path. *default (Any):
If given and the key doesn't not exist, this will be returned instead. If it's not given and the config value does not exist, AttributeError will be raised Returns: The requested config value. This is one of the global values defined in this file. If the value does not exist it will return `default` if give or raise `AttributeError`. Raises: AttributeError: If the value does not exist and `default` was not given. """ |
global g_config
value = get(name, *default)
if value is None:
return None
return proj_path(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_proj_root():
# type: () -> Optional[str] """ Find the project path by going up the file tree. This will look in the current directory and upwards for the pelconf file (.yaml or .py) """ |
proj_files = frozenset(('pelconf.py', 'pelconf.yaml'))
curr = os.getcwd()
while curr.startswith('/') and len(curr) > 1:
if proj_files & frozenset(os.listdir(curr)):
return curr
else:
curr = os.path.dirname(curr)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_bucket(self):
"""Verify the specified bucket exists This method validates that the bucket name passed in the S3Util constructor actually exists. :return: None """ |
log = logging.getLogger(self.cls_logger + '.validate_bucket')
log.info('Attempting to get bucket: {b}'.format(b=self.bucket_name))
max_tries = 10
count = 1
while count <= max_tries:
log.info('Attempting to connect to S3 bucket %s, try %s of %s',
self.bucket_name, count, max_tries)
try:
self.s3client.head_bucket(Bucket=self.bucket_name)
except ClientError as e:
_, ex, trace = sys.exc_info()
error_code = int(e.response['Error']['Code'])
log.debug(
'Connecting to bucket %s produced response code: %s',
self.bucket_name, error_code)
if error_code == 404:
msg = 'Error 404 response indicates that bucket {b} does not ' \
'exist:\n{e}'.format(b=self.bucket_name, e=str(ex))
log.error(msg)
raise S3UtilError, msg, trace
elif error_code == 500 or error_code == 503:
if count >= max_tries:
msg = 'S3 bucket is not accessible at this time: {b}\n{e}'.format(
b=self.bucket_name, e=str(ex))
log.error(msg)
raise S3UtilError, msg, trace
else:
log.warn('AWS returned error code 500 or 503, re-trying in 2 sec...')
time.sleep(5)
count += 1
continue
else:
msg = 'Connecting to S3 bucket {b} returned code: {c}\n{e}'.\
format(b=self.bucket_name, c=error_code, e=str(ex))
log.error(msg)
raise S3UtilError, msg, trace
else:
log.info('Found bucket: %s', self.bucket_name)
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __download_from_s3(self, key, dest_dir):
"""Private method for downloading from S3 This private helper method takes a key and the full path to the destination directory, assumes that the args have been validated by the public caller methods, and attempts to download the specified key to the dest_dir. :param key: (str) S3 key for the file to be downloaded :param dest_dir: (str) Full path destination directory :return: (str) Downloaded file destination if the file was downloaded successfully, None otherwise. """ |
log = logging.getLogger(self.cls_logger + '.__download_from_s3')
filename = key.split('/')[-1]
if filename is None:
log.error('Could not determine the filename from key: %s', key)
return None
destination = dest_dir + '/' + filename
log.info('Attempting to download %s from bucket %s to destination %s',
key, self.bucket_name, destination)
max_tries = 10
count = 1
while count <= max_tries:
log.info('Attempting to download file %s, try %s of %s', key,
count, max_tries)
try:
self.s3client.download_file(
Bucket=self.bucket_name, Key=key, Filename=destination)
except ClientError:
if count >= max_tries:
_, ex, trace = sys.exc_info()
msg = 'Unable to download key {k} from S3 bucket {b}:\n{e}'.format(
k=key, b=self.bucket_name, e=str(ex))
log.error(msg)
raise S3UtilError, msg, trace
else:
log.warn('Download failed, re-trying...')
count += 1
time.sleep(5)
continue
else:
log.info('Successfully downloaded %s from S3 bucket %s to: %s',
key,
self.bucket_name,
destination)
return destination |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_file_by_key(self, key, dest_dir):
"""Downloads a file by key from the specified S3 bucket This method takes the full 'key' as the arg, and attempts to download the file to the specified dest_dir as the destination directory. This method sets the downloaded filename to be the same as it is on S3. :param key: (str) S3 key for the file to be downloaded. :param dest_dir: (str) Full path destination directory :return: (str) Downloaded file destination if the file was downloaded successfully, None otherwise. """ |
log = logging.getLogger(self.cls_logger + '.download_file_by_key')
if not isinstance(key, basestring):
log.error('key argument is not a string')
return None
if not isinstance(dest_dir, basestring):
log.error('dest_dir argument is not a string')
return None
if not os.path.isdir(dest_dir):
log.error('Directory not found on file system: %s', dest_dir)
return None
try:
dest_path = self.__download_from_s3(key, dest_dir)
except S3UtilError:
raise
return dest_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_file(self, regex, dest_dir):
"""Downloads a file by regex from the specified S3 bucket This method takes a regular expression as the arg, and attempts to download the file to the specified dest_dir as the destination directory. This method sets the downloaded filename to be the same as it is on S3. :param regex: (str) Regular expression matching the S3 key for the file to be downloaded. :param dest_dir: (str) Full path destination directory :return: (str) Downloaded file destination if the file was downloaded successfully, None otherwise. """ |
log = logging.getLogger(self.cls_logger + '.download_file')
if not isinstance(regex, basestring):
log.error('regex argument is not a string')
return None
if not isinstance(dest_dir, basestring):
log.error('dest_dir argument is not a string')
return None
if not os.path.isdir(dest_dir):
log.error('Directory not found on file system: %s', dest_dir)
return None
key = self.find_key(regex)
if key is None:
log.warn('Could not find a matching S3 key for: %s', regex)
return None
return self.__download_from_s3(key, dest_dir) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_key(self, regex):
"""Attempts to find a single S3 key based on the passed regex Given a regular expression, this method searches the S3 bucket for a matching key, and returns it if exactly 1 key matches. Otherwise, None is returned. :param regex: (str) Regular expression for an S3 key :return: (str) Full length S3 key matching the regex, None otherwise """ |
log = logging.getLogger(self.cls_logger + '.find_key')
if not isinstance(regex, basestring):
log.error('regex argument is not a string')
return None
log.info('Looking up a single S3 key based on regex: %s', regex)
matched_keys = []
for item in self.bucket.objects.all():
log.debug('Checking if regex matches key: %s', item.key)
match = re.search(regex, item.key)
if match:
matched_keys.append(item.key)
if len(matched_keys) == 1:
log.info('Found matching key: %s', matched_keys[0])
return matched_keys[0]
elif len(matched_keys) > 1:
log.info('Passed regex matched more than 1 key: %s', regex)
return None
else:
log.info('Passed regex did not match any key: %s', regex)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upload_file(self, filepath, key):
"""Uploads a file using the passed S3 key This method uploads a file specified by the filepath to S3 using the provided S3 key. :param filepath: (str) Full path to the file to be uploaded :param key: (str) S3 key to be set for the upload :return: True if upload is successful, False otherwise. """ |
log = logging.getLogger(self.cls_logger + '.upload_file')
log.info('Attempting to upload file %s to S3 bucket %s as key %s...',
filepath, self.bucket_name, key)
if not isinstance(filepath, basestring):
log.error('filepath argument is not a string')
return False
if not isinstance(key, basestring):
log.error('key argument is not a string')
return False
if not os.path.isfile(filepath):
log.error('File not found on file system: %s', filepath)
return False
try:
self.s3client.upload_file(
Filename=filepath, Bucket=self.bucket_name, Key=key)
except ClientError as e:
log.error('Unable to upload file %s to bucket %s as key %s:\n%s',
filepath, self.bucket_name, key, e)
return False
else:
log.info('Successfully uploaded file to S3 bucket %s as key %s',
self.bucket_name, key)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_key(self, key_to_delete):
"""Deletes the specified key :param key_to_delete: :return: """ |
log = logging.getLogger(self.cls_logger + '.delete_key')
log.info('Attempting to delete key: {k}'.format(k=key_to_delete))
try:
self.s3client.delete_object(Bucket=self.bucket_name, Key=key_to_delete)
except ClientError:
_, ex, trace = sys.exc_info()
log.error('ClientError: Unable to delete key: {k}\n{e}'.format(k=key_to_delete, e=str(ex)))
return False
else:
log.info('Successfully deleted key: {k}'.format(k=key_to_delete))
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def assert_branch_type(branch_type):
# type: (str) -> None """ Print error and exit if the current branch is not of a given type. Args: branch_type (str):
The branch type. This assumes the branch is in the '<type>/<title>` format. """ |
branch = git.current_branch(refresh=True)
if branch.type != branch_type:
if context.get('pretend', False):
log.info("Would assert that you're on a <33>{}/*<32> branch",
branch_type)
else:
log.err("Not on a <33>{}<31> branch!", branch_type)
fmt = ("The branch must follow <33>{required_type}/<name><31>"
"format and your branch is called <33>{name}<31>.")
log.err(fmt, required_type=branch_type, name=branch.name)
sys.exit(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def git_branch_delete(branch_name):
# type: (str) -> None """ Delete the given branch. Args: branch_name (str):
Name of the branch to delete. """ |
if branch_name not in git.protected_branches():
log.info("Deleting branch <33>{}", branch_name)
shell.run('git branch -d {}'.format(branch_name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def git_branch_rename(new_name):
# type: (str) -> None """ Rename the current branch Args: new_name (str):
New name for the current branch. """ |
curr_name = git.current_branch(refresh=True).name
if curr_name not in git.protected_branches():
log.info("Renaming branch from <33>{}<32> to <33>{}".format(
curr_name, new_name
))
shell.run('git branch -m {}'.format(new_name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def git_checkout(branch_name, create=False):
# type: (str, bool) -> None """ Checkout or create a given branch Args: branch_name (str):
The name of the branch to checkout or create. create (bool):
If set to **True** it will create the branch instead of checking it out. """ |
log.info("Checking out <33>{}".format(branch_name))
shell.run('git checkout {} {}'.format('-b' if create else '', branch_name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_base_branch():
# type: () -> str """ Return the base branch for the current branch. This function will first try to guess the base branch and if it can't it will let the user choose the branch from the list of all local branches. Returns: str: The name of the branch the current branch is based on. """ |
base_branch = git.guess_base_branch()
if base_branch is None:
log.info("Can't guess the base branch, you have to pick one yourself:")
base_branch = choose_branch()
return base_branch |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def choose_branch(exclude=None):
# type: (List[str]) -> str """ Show the user a menu to pick a branch from the existing ones. Args: exclude (list[str]):
List of branch names to exclude from the menu. By default it will exclude master and develop branches. To show all branches pass an empty array here. Returns: str: The name of the branch chosen by the user. If the user inputs an invalid choice, he will be asked again (and again) until he picks a a valid branch. """ |
if exclude is None:
master = conf.get('git.master_branch', 'master')
develop = conf.get('git.devel_branch', 'develop')
exclude = {master, develop}
branches = list(set(git.branches()) - exclude)
# Print the menu
for i, branch_name in enumerate(branches):
shell.cprint('<90>[{}] <33>{}'.format(i + 1, branch_name))
# Get a valid choice from the user
choice = 0
while choice < 1 or choice > len(branches):
prompt = "Pick a base branch from the above [1-{}]".format(
len(branches)
)
choice = click.prompt(prompt, value_proc=int)
if not (1 <= choice <= len(branches)):
fmt = "Invalid choice {}, you must pick a number between {} and {}"
log.err(fmt.format(choice, 1, len(branches)))
return branches[choice - 1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def autorun():
'''
Call the run method of the decorated class if the current file is the main file
'''
def wrapper(cls):
import inspect
if inspect.getmodule(cls).__name__ == "__main__":
cls().run()
return cls
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def randrange(seq):
""" Yields random values from @seq until @seq is empty """ |
seq = seq.copy()
choose = rng().choice
remove = seq.remove
for x in range(len(seq)):
y = choose(seq)
remove(y)
yield y |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_ip_address(ip_address):
"""Validate the ip_address :param ip_address: (str) IP address :return: (bool) True if the ip_address is valid """ |
# Validate the IP address
log = logging.getLogger(mod_logger + '.validate_ip_address')
if not isinstance(ip_address, basestring):
log.warn('ip_address argument is not a string')
return False
# Ensure there are 3 dots
num_dots = 0
for c in ip_address:
if c == '.':
num_dots += 1
if num_dots != 3:
log.info('Not a valid IP address: {i}'.format(i=ip_address))
return False
# Use the socket module to test
try:
socket.inet_aton(ip_address)
except socket.error as e:
log.info('Not a valid IP address: {i}\n{e}'.format(i=ip_address, e=e))
return False
else:
log.info('Validated IP address: %s', ip_address)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ip_addr():
"""Uses the ip addr command to enumerate IP addresses by device :return: (dict) Containing device: ip_address """ |
log = logging.getLogger(mod_logger + '.ip_addr')
log.debug('Running the ip addr command...')
ip_addr_output = {}
command = ['ip', 'addr']
try:
ip_addr_result = run_command(command, timeout_sec=20)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem running command: {c}'.format(c=' '.join(command))
raise CommandError, msg, trace
ip_addr_lines = ip_addr_result['output'].split('\n')
for line in ip_addr_lines:
line = line.strip()
if line.startswith('inet6'):
continue
elif line.startswith('inet'):
parts = line.split()
try:
ip_address = parts[1].strip().split('/')[0]
except KeyError:
continue
else:
if not validate_ip_address(ip_address):
continue
else:
for part in parts:
part = part.strip()
if part.strip().startswith('eth') or part.strip().startswith('eno') or \
part.strip().startswith('ens'):
device = part
ip_addr_output[device] = ip_address
return ip_addr_output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_source_ip_for_interface(source_ip_address, desired_source_ip_address, device_num=0):
"""Configures the source IP address for a Linux interface :param source_ip_address: (str) Source IP address to change :param desired_source_ip_address: (str) IP address to configure as the source in outgoing packets :param device_num: (int) Integer interface device number to configure :return: None :raises: TypeError, ValueError, OSError """ |
log = logging.getLogger(mod_logger + '.set_source_ip_for_interface')
if not isinstance(source_ip_address, basestring):
msg = 'arg source_ip_address must be a string'
log.error(msg)
raise TypeError(msg)
if not isinstance(desired_source_ip_address, basestring):
msg = 'arg desired_source_ip_address must be a string'
log.error(msg)
raise TypeError(msg)
if not validate_ip_address(ip_address=source_ip_address):
msg = 'The arg source_ip_address was found to be an invalid IP address. Please pass a valid IP address'
log.error(msg)
raise ValueError(msg)
if not validate_ip_address(ip_address=desired_source_ip_address):
msg = 'The arg desired_source_ip_address was found to be an invalid IP address. Please pass a valid IP address'
log.error(msg)
raise ValueError(msg)
# Determine the device name based on the device_num
log.debug('Attempting to determine the device name based on the device_num arg...')
try:
int(device_num)
except ValueError:
if isinstance(device_num, basestring):
device_name = device_num
log.info('Provided device_num is not an int, assuming it is the full device name: {d}'.format(
d=device_name))
else:
raise TypeError('device_num arg must be a string or int')
else:
device_name = 'eth{n}'.format(n=str(device_num))
log.info('Provided device_num is an int, assuming device name is: {d}'.format(d=device_name))
# Build the command
# iptables -t nat -I POSTROUTING -o eth0 -s ${RA_ORIGINAL_IP} -j SNAT --to-source
command = ['iptables', '-t', 'nat', '-I', 'POSTROUTING', '-o', device_name, '-s',
source_ip_address, '-j', 'SNAT', '--to-source', desired_source_ip_address]
log.info('Running command: {c}'.format(c=command))
try:
result = run_command(command, timeout_sec=20)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem running iptables command: {c}\n{e}'.format(c=' '.join(command), e=str(ex))
log.error(msg)
raise OSError, msg, trace
if int(result['code']) != 0:
msg = 'The iptables command produced an error with exit code: {c}, and output:\n{o}'.format(
c=result['code'], o=result['output'])
log.error(msg)
raise OSError(msg)
log.info('Successfully configured the source IP for {d} to be: {i}'.format(
d=device_name, i=desired_source_ip_address)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fmt(msg, *args, **kw):
# type: (str, *Any, **Any) -> str """ Generate shell color opcodes from a pretty coloring syntax. """ |
global is_tty
if len(args) or len(kw):
msg = msg.format(*args, **kw)
opcode_subst = '\x1b[\\1m' if is_tty else ''
return re.sub(r'<(\d{1,2})>', opcode_subst, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cprint(msg, *args, **kw):
# type: (str, *Any, **Any) -> None """ Print colored message to stdout. """ |
if len(args) or len(kw):
msg = msg.format(*args, **kw)
print(fmt('{}<0>'.format(msg))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_cons3rt_agent_logs(self):
"""Send the cons3rt agent log file :return: """ |
log = logging.getLogger(self.cls_logger + '.send_cons3rt_agent_logs')
if self.cons3rt_agent_log_dir is None:
log.warn('There is not CONS3RT agent log directory on this system')
return
log.debug('Searching for log files in directory: {d}'.format(d=self.cons3rt_agent_log_dir))
for item in os.listdir(self.cons3rt_agent_log_dir):
item_path = os.path.join(self.cons3rt_agent_log_dir, item)
if os.path.isfile(item_path):
log.info('Sending email with cons3rt agent log file: {f}'.format(f=item_path))
try:
self.send_text_file(text_file=item_path)
except (TypeError, OSError, AssetMailerError):
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem sending CONS3RT agent log file: {f}\n{e}'.format(
n=ex.__class__.__name__, f=item_path, e=str(ex))
raise AssetMailerError, msg, trace
else:
log.info('Successfully sent email with file: {f}'.format(f=item_path)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode_password(password):
"""Performs URL encoding for passwords :param password: (str) password to encode :return: (str) encoded password """ |
log = logging.getLogger(mod_logger + '.password_encoder')
log.debug('Encoding password: {p}'.format(p=password))
encoded_password = ''
for c in password:
encoded_password += encode_character(char=c)
log.debug('Encoded password: {p}'.format(p=encoded_password))
return encoded_password |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode_character(char):
"""Returns URL encoding for a single character :param char (str) Single character to encode :returns (str) URL-encoded character """ |
if char == '!': return '%21'
elif char == '"': return '%22'
elif char == '#': return '%23'
elif char == '$': return '%24'
elif char == '%': return '%25'
elif char == '&': return '%26'
elif char == '\'': return '%27'
elif char == '(': return '%28'
elif char == ')': return '%29'
elif char == '*': return '%2A'
elif char == '+': return '%2B'
elif char == ',': return '%2C'
elif char == '-': return '%2D'
elif char == '.': return '%2E'
elif char == '/': return '%2F'
elif char == ':': return '%3A'
elif char == ';': return '%3B'
elif char == '<': return '%3C'
elif char == '=': return '%3D'
elif char == '>': return '%3E'
elif char == '?': return '%3F'
elif char == '@': return '%40'
elif char == '[': return '%5B'
elif char == '\\': return '%5C'
elif char == ']': return '%5D'
elif char == '^': return '%5E'
elif char == '_': return '%5F'
elif char == '`': return '%60'
elif char == '{': return '%7B'
elif char == '|': return '%7C'
elif char == '}': return '%7D'
elif char == '~': return '%7E'
elif char == ' ': return '%7F'
else: return char |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_tags_as_filters(tags):
"""Get different tags as dicts ready to use as dropdown lists.""" |
# set dicts
actions = {}
contacts = {}
formats = {}
inspire = {}
keywords = {}
licenses = {}
md_types = dict()
owners = defaultdict(str)
srs = {}
unused = {}
# 0/1 values
compliance = 0
type_dataset = 0
# parsing tags
print(len(tags.keys()))
i = 0
for tag in sorted(tags.keys()):
i += 1
# actions
if tag.startswith("action"):
actions[tags.get(tag, tag)] = tag
continue
# compliance INSPIRE
elif tag.startswith("conformity"):
compliance = 1
continue
# contacts
elif tag.startswith("contact"):
contacts[tags.get(tag)] = tag
continue
# formats
elif tag.startswith("format"):
formats[tags.get(tag)] = tag
continue
# INSPIRE themes
elif tag.startswith("keyword:inspire"):
inspire[tags.get(tag)] = tag
continue
# keywords
elif tag.startswith("keyword:isogeo"):
keywords[tags.get(tag)] = tag
continue
# licenses
elif tag.startswith("license"):
licenses[tags.get(tag)] = tag
continue
# owners
elif tag.startswith("owner"):
owners[tags.get(tag)] = tag
continue
# SRS
elif tag.startswith("coordinate-system"):
srs[tags.get(tag)] = tag
continue
# types
elif tag.startswith("type"):
md_types[tags.get(tag)] = tag
if tag in ("type:vector-dataset", "type:raster-dataset"):
type_dataset += 1
else:
pass
continue
# ignored tags
else:
unused[tags.get(tag)] = tag
continue
# override API tags to allow all datasets filter - see #
if type_dataset == 2:
md_types["Donnée"] = "type:dataset"
else:
pass
# printing
# print("There are:"
# "\n{} actions"
# "\n{} contacts"
# "\n{} formats"
# "\n{} INSPIRE themes"
# "\n{} keywords"
# "\n{} licenses"
# "\n{} owners"
# "\n{} SRS"
# "\n{} types"
# "\n{} unused".format(len(actions),
# len(contacts),
# len(formats),
# len(inspire),
# len(keywords),
# len(licenses),
# len(owners),
# len(srs),
# len(md_types),
# len(unused)
# ))
# storing dicts
tags_parsed = {
"actions": actions,
"compliance": compliance,
"contacts": contacts,
"formats": formats,
"inspire": inspire,
"keywords": keywords,
"licenses": licenses,
"owners": owners,
"srs": srs,
"types": md_types,
"unused": unused,
}
# method ending
return tags_parsed |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _histplot_bins(column, bins=100):
"""Helper to get bins for histplot.""" |
col_min = np.min(column)
col_max = np.max(column)
return range(col_min, col_max + 2, max((col_max - col_min) // bins, 1)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def image(request, data):
""" Generates identicon image based on passed data. Arguments: data - Data which should be used for generating an identicon. This data will be used in order to create a digest which is used for generating the identicon. If the data passed is a hex digest already, the digest will be used as-is. Returns: Identicon image in raw format. """ |
# Get image width, height, padding, and format from GET parameters, or
# fall-back to default values from settings.
try:
width = int(request.GET.get("w", PYDENTICON_WIDTH))
except ValueError:
raise SuspiciousOperation("Identicon width must be a positive integer.")
try:
height = int(request.GET.get("h", PYDENTICON_HEIGHT))
except ValueError:
raise SuspiciousOperation("Identicon height must be a positive integer.")
output_format = request.GET.get("f", PYDENTICON_FORMAT)
try:
padding = [int(p) for p in request.GET["p"].split(",")]
except KeyError:
padding = PYDENTICON_PADDING
except ValueError:
raise SuspiciousOperation("Identicon padding must consist out of 4 positive integers separated with commas.")
if "i" in request.GET:
inverted = request.GET.get("i")
if inverted.lower() == "true":
inverted = True
elif inverted.lower() == "false":
inverted = False
else:
raise SuspiciousOperation("Inversion parameter must be a boolean (true/false).")
else:
inverted = PYDENTICON_INVERT
# Validate the input parameters.
if not isinstance(width, int) or width <= 0:
raise SuspiciousOperation("Identicon width must be a positive integer.")
if not isinstance(height, int) or height <= 0:
raise SuspiciousOperation("Identicon height must be a positive integer.")
if not all([isinstance(p, int) and p >= 0 for p in padding]) or len(padding) != 4:
raise SuspiciousOperation("Padding must be a 4-element tuple consisting out of positive integers.")
# Set-up correct content type based on requested identicon format.
if output_format == "png":
content_type = "image/png"
elif output_format == "ascii":
content_type = "text/plain"
else:
raise SuspiciousOperation("Unsupported identicon format requested - '%s' % output_format")
# Initialise a generator.
generator = Generator(PYDENTICON_ROWS, PYDENTICON_COLUMNS,
foreground = PYDENTICON_FOREGROUND, background = PYDENTICON_BACKGROUND,
digest = PYDENTICON_DIGEST)
# Generate the identicion.
content = generator.generate(data, width, height, padding=padding, output_format=output_format, inverted=inverted)
# Create and return the response.
response = HttpResponse(content, content_type=content_type)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rename(name):
# type: (str) -> None """ Give the currently developed hotfix a new name. """ |
from peltak.extra.gitflow import logic
if name is None:
name = click.prompt('Hotfix name')
logic.hotfix.rename(name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, name, *default):
# type: (str, Any) -> Any """ Get context value with the given name and optional default. Args: name (str):
The name of the context value. *default (Any):
If given and the key doesn't not exist, this will be returned instead. If it's not given and the context value does not exist, `AttributeError` will be raised Returns: The requested context value. If the value does not exist it will return `default` if give or raise `AttributeError`. Raises: AttributeError: If the value does not exist and `default` was not given. """ |
curr = self.values
for part in name.split('.'):
if part in curr:
curr = curr[part]
elif default:
return default[0]
else:
fmt = "Context value '{}' does not exist:\n{}"
raise AttributeError(fmt.format(
name, util.yaml_dump(self.values)
))
return curr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set(self, name, value):
""" Set context value. Args: name (str):
The name of the context value to change. value (Any):
The new value for the selected context value """ |
curr = self.values
parts = name.split('.')
for i, part in enumerate(parts[:-1]):
try:
curr = curr.setdefault(part, {})
except AttributeError:
raise InvalidPath('.'.join(parts[:i + 1]))
try:
curr[parts[-1]] = value
except TypeError:
raise InvalidPath('.'.join(parts[:-1])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def alias_exists(alias, keystore_path=None, keystore_password='changeit'):
"""Checks if an alias already exists in a keystore :param alias: :param keystore_path: :param keystore_password: :return: (bool) True when the alias already exists in the keystore :raises: OSError """ |
log = logging.getLogger(mod_logger + '.alias_exists')
if not isinstance(alias, basestring):
msg = 'alias arg must be a string'
log.error(msg)
raise OSError(msg)
# Ensure JAVA_HOME is set
log.debug('Determining JAVA_HOME...')
try:
java_home = os.environ['JAVA_HOME']
except KeyError:
msg = 'JAVA_HOME is required but not set'
log.error(msg)
raise OSError(msg)
# Ensure keytool can be found
keytool = os.path.join(java_home, 'bin', 'keytool')
if not os.path.isfile(keytool):
msg = 'keytool file not found: {f}'.format(f=keytool)
log.error(msg)
raise OSError(msg)
# Find the cacerts file
if keystore_path is None:
keystore_path = os.path.join(java_home, 'lib', 'security', 'cacerts')
# If the JRE cacerts location is not found, look for the JDK cacerts
if not os.path .isfile(keystore_path):
keystore_path = os.path.join(java_home, 'jre', 'lib', 'security', 'cacerts')
if not os.path.isfile(keystore_path):
msg = 'Unable to file cacerts file'
log.error(msg)
raise OSError(msg)
log.info('Checking keystore {k} for alias: {a}...'.format(k=keystore_path, a=alias))
# Build the keytool command
command = [keytool, '-keystore', keystore_path, '-storepass', keystore_password, '-list']
# Running the keytool list command
log.debug('Running the keytool list command...')
try:
result = run_command(command)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem running keytool on keystore: {k}\n{e}'.format(k=keystore_path, e=str(ex))
log.error(msg)
raise OSError, msg, trace
if result['code'] != 0:
msg = 'keytool command exited with a non-zero code: {c}, and produced output: {o}'.format(
c=result['code'], o=result['output'])
log.error(msg)
raise OSError(msg)
# Check for the alias in the output
if alias in result['output']:
log.info('Found alias {a} in keystore: {k}'.format(a=alias, k=keystore_path))
return True
else:
log.info('Alias {a} was not found in keystore: {k}'.format(a=alias, k=keystore_path))
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter(self, query: Query, entity: type) -> Tuple[Query, Any]: """Define the filter function that every node must to implement. :param query: The sqlalchemy query. :type query: Query :param entity: The entity model. :type entity: type :return: The filtered query. :rtype: Tuple[Query, Any] """ |
raise NotImplementedError('You must implement this.') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_relation(self, related_model: type, relations: List[str]) -> Tuple[Optional[List[type]], Optional[type]]: """Transform the list of relation to list of class. :param related_mode: The model of the query. :type related_mode: type :param relations: The relation list get from the `_extract_relations`. :type relations: List[str] :return: Tuple with the list of relations (class) and the second element is the last relation class. :rtype: Tuple[Optional[List[type]], Optional[type]] """ |
relations_list, last_relation = [], related_model
for relation in relations:
relationship = getattr(last_relation, relation, None)
if relationship is None:
return (None, None)
last_relation = relationship.mapper.class_
relations_list.append(last_relation)
return (relations_list, last_relation) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _join_tables(self, query: Query, join_models: Optional[List[type]]) -> Query: """Method to make the join when relation is found. :param query: The sqlalchemy query. :type query: Query :param join_models: The list of joined models get from the method `_get_relation`. :type join_models: Optional[List[type]] :return: The new Query with the joined tables. :rtype: Query """ |
joined_query = query
# Create the list of already joined entities
joined_tables = [mapper.class_ for mapper in query._join_entities]
if join_models:
for j_model in join_models:
if not j_model in joined_tables:
# /!\ join return a new query /!\
joined_query = joined_query.join(j_model)
return joined_query |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_sms(phone: str, message: str, sender: str='', **kw):
""" Sends SMS via Kajala Group SMS API. Contact [email protected] for access. :param phone: Phone number :param message: Message to be esnd :param sender: Sender (max 11 characters) :param kw: Variable key-value pairs to be sent to SMS API :return: Response from requests.post """ |
if not hasattr(settings, 'SMS_TOKEN'):
raise Exception('Invalid configuration: settings.SMS_TOKEN missing')
if not sender:
sender = settings.SMS_SENDER_NAME
if not sender:
raise Exception('Invalid configuration: settings.SMS_SENDER_NAME missing')
headers = {
'Content-Type': 'application/json',
'Authorization': 'Token ' + settings.SMS_TOKEN,
}
data = {
'dst': phone_filter(phone),
'msg': message,
'src': sender,
}
for k, v in kw.items():
data[k] = v
return requests.post("https://sms.kajala.com/api/sms/", json=data, headers=headers) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _meta_get_resource_sync(md_uuid):
"""Just a meta func to get execution time""" |
isogeo.resource(id_resource=md_uuid)
elapsed = default_timer() - START_TIME
time_completed_at = "{:5.2f}s".format(elapsed)
print("{0:<30} {1:>20}".format(md_uuid, time_completed_at))
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def setup(self, app):
'''
Setup properties from parent app on the command
'''
self.logger = app.logger
self.shell.logger = self.logger
if not self.command_name:
raise EmptyCommandNameException()
self.app = app
self.arguments_declaration = self.arguments
self.arguments = app.arguments
if self.use_subconfig:
_init_config(self)
else:
self.config = self.app.config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_first_builder_window(builder):
"""Get the first toplevel widget in a Gtk.Builder hierarchy. This is mostly used for guessing purposes, and an explicit naming is always going to be a better situation. """ |
for obj in builder.get_objects():
if isinstance(obj, Gtk.Window):
# first window
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_slave(self, slave, container_name="widget"):
"""Add a slave delegate """ |
cont = getattr(self, container_name, None)
if cont is None:
raise AttributeError(
'Container name must be a member of the delegate')
cont.add(slave.widget)
self.slaves.append(slave)
return slave |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def round_sig(x, sig):
"""Round the number to the specified number of significant figures""" |
return round(x, sig - int(floor(log10(abs(x)))) - 1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def open_file(filename, as_text=False):
"""Open the file gunzipping it if it ends with .gz. If as_text the file is opened in text mode, otherwise the file's opened in binary mode.""" |
if filename.lower().endswith('.gz'):
if as_text:
return gzip.open(filename, 'rt')
else:
return gzip.open(filename, 'rb')
else:
if as_text:
return open(filename, 'rt')
else:
return open(filename, 'rb') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_squonk_datasetmetadata(outputBase, thinOutput, valueClassMappings, datasetMetaProps, fieldMetaProps):
"""This is a temp hack to write the minimal metadata that Squonk needs. Will needs to be replaced with something that allows something more complete to be written. :param outputBase: Base name for the file to write to :param thinOutput: Write only new data, not structures. Result type will be BasicObject :param valueClasses: A dict that describes the Java class of the value properties (used by Squonk) :param datasetMetaProps: A dict with metadata properties that describe the datset as a whole. The keys used for these metadata are up to the user, but common ones include source, description, created, history. :param fieldMetaProps: A list of dicts with the additional field metadata. Each dict has a key named fieldName whose value is the name of the field being described, and a key name values wholes values is a map of metadata properties. The keys used for these metadata are up to the user, but common ones include source, description, created, history. """ |
meta = {}
props = {}
# TODO add created property - how to handle date formats?
if datasetMetaProps:
props.update(datasetMetaProps)
if fieldMetaProps:
meta["fieldMetaProps"] = fieldMetaProps
if len(props) > 0:
meta["properties"] = props
if valueClassMappings:
meta["valueClassMappings"] = valueClassMappings
if thinOutput:
meta['type'] = 'org.squonk.types.BasicObject'
else:
meta['type'] = 'org.squonk.types.MoleculeObject'
s = json.dumps(meta)
meta = open(outputBase + '.metadata', 'w')
meta.write(s)
meta.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_metrics(baseName, values):
"""Write the metrics data :param baseName: The base name of the output files. e.g. extensions will be appended to this base name :param values dictionary of values to write """ |
m = open(baseName + '_metrics.txt', 'w')
for key in values:
m.write(key + '=' + str(values[key]) + "\n")
m.flush()
m.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_molecule_object_dict(source, format, values):
"""Generate a dictionary that represents a Squonk MoleculeObject when written as JSON :param source: Molecules in molfile or smiles format :param format: The format of the molecule. Either 'mol' or 'smiles' :param values: Optional dict of values (properties) for the MoleculeObject """ |
m = {"uuid": str(uuid.uuid4()), "source": source, "format": format}
if values:
m["values"] = values
return m |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def query_nexus(query_url, timeout_sec, basic_auth=None):
"""Queries Nexus for an artifact :param query_url: (str) Query URL :param timeout_sec: (int) query timeout :param basic_auth (HTTPBasicAuth) object or none :return: requests.Response object :raises: RuntimeError """ |
log = logging.getLogger(mod_logger + '.query_nexus')
# Attempt to query Nexus
retry_sec = 5
max_retries = 6
try_num = 1
query_success = False
nexus_response = None
while try_num <= max_retries:
if query_success:
break
log.debug('Attempt # {n} of {m} to query the Nexus URL: {u}'.format(n=try_num, u=query_url, m=max_retries))
try:
nexus_response = requests.get(query_url, auth=basic_auth, stream=True, timeout=timeout_sec)
except requests.exceptions.Timeout:
_, ex, trace = sys.exc_info()
msg = '{n}: Nexus initial query timed out after {t} seconds:\n{e}'.format(
n=ex.__class__.__name__, t=timeout_sec, r=retry_sec, e=str(ex))
log.warn(msg)
if try_num < max_retries:
log.info('Retrying query in {t} sec...'.format(t=retry_sec))
time.sleep(retry_sec)
except (requests.exceptions.RequestException, requests.exceptions.ConnectionError):
_, ex, trace = sys.exc_info()
msg = '{n}: Nexus initial query failed with the following exception:\n{e}'.format(
n=ex.__class__.__name__, r=retry_sec, e=str(ex))
log.warn(msg)
if try_num < max_retries:
log.info('Retrying query in {t} sec...'.format(t=retry_sec))
time.sleep(retry_sec)
else:
query_success = True
try_num += 1
if not query_success:
msg = 'Unable to query Nexus after {m} attempts using URL: {u}'.format(
u=query_url, m=max_retries)
log.error(msg)
raise RuntimeError(msg)
if nexus_response.status_code != 200:
msg = 'Nexus request returned code {c}, unable to query Nexus using URL: {u}'.format(
u=query_url, c=nexus_response.status_code)
log.error(msg)
raise RuntimeError(msg)
return nexus_response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Handles calling this module as a script :return: None """ |
log = logging.getLogger(mod_logger + '.main')
parser = argparse.ArgumentParser(description='This Python module retrieves artifacts from Nexus.')
parser.add_argument('-u', '--url', help='Nexus Server URL', required=False)
parser.add_argument('-g', '--groupId', help='Group ID', required=True)
parser.add_argument('-a', '--artifactId', help='Artifact ID', required=True)
parser.add_argument('-v', '--version', help='Artifact Version', required=True)
parser.add_argument('-c', '--classifier', help='Artifact Classifier', required=False)
parser.add_argument('-p', '--packaging', help='Artifact Packaging', required=True)
parser.add_argument('-r', '--repo', help='Nexus repository name', required=False)
parser.add_argument('-d', '--destinationDir', help='Directory to download to', required=True)
parser.add_argument('-n', '--username', help='Directory to download to', required=True)
parser.add_argument('-w', '--password', help='Directory to download to', required=True)
args = parser.parse_args()
try:
get_artifact(
nexus_url=args.url,
group_id=args.groupId,
artifact_id=args.artifactId,
version=args.version,
classifier=args.classifier,
packaging=args.packaging,
repo=args.repo,
destination_dir=args.destinationDir,
username=args.username,
password=args.password
)
except Exception as e:
msg = 'Caught exception {n}, unable for download artifact from Nexus\n{s}'.format(
n=e.__class__.__name__, s=e)
log.error(msg)
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_doc(additional_doc=False, field_prefix='$', field_suffix=':', indent=4):
"""Return a formated string containing documentation about the audio fields. """ |
if additional_doc:
f = fields.copy()
f.update(additional_doc)
else:
f = fields
field_length = get_max_field_length(f)
field_length = field_length + len(field_prefix) + len(field_suffix) + 4
description_indent = ' ' * (indent + field_length)
output = ''
for field, description in sorted(f.items()):
description = description['description']
field = ' ' * indent + field_prefix + field + ':'
output += field.ljust(field_length) + \
textwrap.fill(
description,
width=78,
initial_indent=description_indent,
subsequent_indent=description_indent
)[field_length:] + '\n\n\n'
return output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_images(self):
# type: () -> List[str] """ List images stored in the registry. Returns: list[str]: List of image names. """ |
r = self.get(self.registry_url + '/v2/_catalog', auth=self.auth)
return r.json()['repositories'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_tags(self, image_name):
# type: (str) -> Iterator[str] """ List all tags for the given image stored in the registry. Args: image_name (str):
The name of the image to query. The image must be present on the registry for this call to return any values. Returns: list[str]: List of tags for that image. """ |
tags_url = self.registry_url + '/v2/{}/tags/list'
r = self.get(tags_url.format(image_name), auth=self.auth)
data = r.json()
if 'tags' in data:
return reversed(sorted(data['tags']))
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(asset_dir):
"""Command line call to validate an asset structure :param asset_dir: (full path to the asset dir) :return: (int) """ |
try:
asset_name = validate_asset_structure(asset_dir_path=asset_dir)
except Cons3rtAssetStructureError:
_, ex, trace = sys.exc_info()
msg = 'Cons3rtAssetStructureError: Problem with asset validation\n{e}'.format(e=str(ex))
print('ERROR: {m}'.format(m=msg))
return 1
print('Validated asset with name: {n}'.format(n=asset_name))
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(asset_dir, dest_dir):
"""Command line call to create an asset zip :param asset_dir: (full path to the asset dir) :param dest_dir: (full path to the destination directory) :return: (int) """ |
val = validate(asset_dir=asset_dir)
if val != 0:
return 1
try:
asset_zip = make_asset_zip(asset_dir_path=asset_dir, destination_directory=dest_dir)
except AssetZipCreationError:
_, ex, trace = sys.exc_info()
msg = 'AssetZipCreationError: Problem with asset zip creation\n{e}'.format(e=str(ex))
print('ERROR: {m}'.format(m=msg))
return 1
print('Created asset zip file: {z}'.format(z=asset_zip))
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_queryset(self):
""" This view should return a list of all the addresses the identity has for the supplied query parameters. Currently only supports address_type and default params Always excludes addresses with optedout = True """ |
identity_id = self.kwargs["identity_id"]
address_type = self.kwargs["address_type"]
use_ct = "use_communicate_through" in self.request.query_params
default_only = "default" in self.request.query_params
if use_ct:
identity = Identity.objects.select_related("communicate_through").get(
id=identity_id
)
if identity.communicate_through is not None:
identity = identity.communicate_through
else:
identity = Identity.objects.get(id=identity_id)
addresses = identity.get_addresses_list(address_type, default_only)
return [Address(addr) for addr in addresses] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_api_response(self, response):
"""Check API response and raise exceptions if needed. :param requests.models.Response response: request response to check """ |
# check response
if response.status_code == 200:
return True
elif response.status_code >= 400:
logging.error(
"{}: {} - {} - URL: {}".format(
response.status_code,
response.reason,
response.json().get("error"),
response.request.url,
)
)
return False, response.status_code |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_is_uuid(self, uuid_str: str):
"""Check if it's an Isogeo UUID handling specific form. :param str uuid_str: UUID string to check """ |
# check uuid type
if not isinstance(uuid_str, str):
raise TypeError("'uuid_str' expected a str value.")
else:
pass
# handle Isogeo specific UUID in XML exports
if "isogeo:metadata" in uuid_str:
uuid_str = "urn:uuid:{}".format(uuid_str.split(":")[-1])
else:
pass
# test it
try:
uid = UUID(uuid_str)
return uid.hex == uuid_str.replace("-", "").replace("urn:uuid:", "")
except ValueError as e:
logging.error(
"uuid ValueError. {} ({}) -- {}".format(type(uuid_str), uuid_str, e)
)
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_edit_tab(self, tab: str, md_type: str):
"""Check if asked tab is part of Isogeo web form and reliable with metadata type. :param str tab: tab to check. Must be one one of EDIT_TABS attribute :param str md_type: metadata type. Must be one one of FILTER_TYPES """ |
# check parameters types
if not isinstance(tab, str):
raise TypeError("'tab' expected a str value.")
else:
pass
if not isinstance(md_type, str):
raise TypeError("'md_type' expected a str value.")
else:
pass
# check parameters values
if tab not in EDIT_TABS:
raise ValueError(
"'{}' isn't a valid edition tab. "
"Available values: {}".format(tab, " | ".join(EDIT_TABS))
)
else:
pass
if md_type not in FILTER_TYPES:
if md_type in FILTER_TYPES.values():
md_type = self._convert_md_type(md_type)
else:
raise ValueError(
"'{}' isn't a valid metadata type. "
"Available values: {}".format(md_type, " | ".join(FILTER_TYPES))
)
else:
pass
# check adequation tab/md_type
if md_type not in EDIT_TABS.get(tab):
raise ValueError(
"'{}' isn't a valid tab for a '{}'' metadata."
" Only for these types: {}.".format(tab, md_type, EDIT_TABS.get(tab))
)
else:
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_filter_specific_md(self, specific_md: list):
"""Check if specific_md parameter is valid. :param list specific_md: list of specific metadata UUID to check """ |
if isinstance(specific_md, list):
if len(specific_md) > 0:
# checking UUIDs and poping bad ones
for md in specific_md:
if not self.check_is_uuid(md):
specific_md.remove(md)
logging.error("Metadata UUID is not correct: {}".format(md))
# joining survivors
specific_md = ",".join(specific_md)
else:
specific_md = ""
else:
raise TypeError("'specific_md' expects a list")
return specific_md |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_filter_specific_tag(self, specific_tag: list):
"""Check if specific_tag parameter is valid. :param list specific_tag: list of specific tag to check """ |
if isinstance(specific_tag, list):
if len(specific_tag) > 0:
specific_tag = ",".join(specific_tag)
else:
specific_tag = ""
else:
raise TypeError("'specific_tag' expects a list")
return specific_tag |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def version_cli(ctx, porcelain):
# type: (click.Context, bool) -> None """ Show project version. Has sub commands. For this command to work you must specify where the project version is stored. You can do that with version_file conf variable. peltak supports multiple ways to store the project version. Right now you can store it in a python file using built-in __version__ variable. You can use node.js package.json and keep the version there or you can just use a plain text file that just holds the raw project version. The appropriate storage is guessed based on the file type and name. Example Configuration:: version_file: 'src/mypackage/__init__.py' Examples: \b $ peltak version # Pretty print current version $ peltak version --porcelain # Print version as raw string $ peltak version bump patch # Bump patch version component $ peltak version bump minor # Bump minor version component $ peltak version bump major # Bump major version component $ peltak version bump release # same as version bump patch $ peltak version bump --exact=1.2.1 # Set project version to 1.2.1 """ |
if ctx.invoked_subcommand:
return
from peltak.core import log
from peltak.core import versioning
current = versioning.current()
if porcelain:
print(current)
else:
log.info("Version: <35>{}".format(current)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bump_version(component='patch', exact=None):
# type: (str, str) -> None """ Bump current project version without committing anything. No tags are created either. Examples: \b $ peltak version bump patch # Bump patch version component $ peltak version bump minor # Bump minor version component $ peltak version bump major # Bump major version component $ peltak version bump release # same as version bump patch $ peltak version bump --exact=1.2.1 # Set project version to 1.2.1 """ |
from peltak.core import log
from peltak.core import versioning
old_ver, new_ver = versioning.bump(component, exact)
log.info("Project version bumped")
log.info(" old version: <35>{}".format(old_ver))
log.info(" new version: <35>{}".format(new_ver)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.