sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def by_lookup(self, style_key, style_value):
"""Return a processor that extracts the style from `mapping`.
Parameters
----------
style_key : str
A style key.
style_value : dict
A dictionary with a "lookup" key whose value is a "mapping" style
value that maps a field value to either a style attribute (str) and
a boolean flag indicating to use the style attribute named by
`style_key`.
Returns
-------
A function.
"""
style_attr = style_key if self.style_types[style_key] is bool else None
mapping = style_value["lookup"]
def proc(value, result):
try:
lookup_value = mapping[value]
except (KeyError, TypeError):
# ^ TypeError is included in case the user passes non-hashable
# values.
return result
if not lookup_value:
return result
return self.render(style_attr or lookup_value, result)
return proc | Return a processor that extracts the style from `mapping`.
Parameters
----------
style_key : str
A style key.
style_value : dict
A dictionary with a "lookup" key whose value is a "mapping" style
value that maps a field value to either a style attribute (str) and
a boolean flag indicating to use the style attribute named by
`style_key`.
Returns
-------
A function. | entailment |
def by_re_lookup(self, style_key, style_value, re_flags=0):
"""Return a processor for a "re_lookup" style value.
Parameters
----------
style_key : str
A style key.
style_value : dict
A dictionary with a "re_lookup" style value that consists of a
sequence of items where each item should have the form `(regexp,
x)`, where regexp is a regular expression to match against the
field value and x is either a style attribute (str) and a boolean
flag indicating to use the style attribute named by `style_key`.
re_flags : int
Passed through as flags argument to re.compile.
Returns
-------
A function.
"""
style_attr = style_key if self.style_types[style_key] is bool else None
regexps = [(re.compile(r, flags=re_flags), v)
for r, v in style_value["re_lookup"]]
def proc(value, result):
if not isinstance(value, six.string_types):
return result
for r, lookup_value in regexps:
if r.search(value):
if not lookup_value:
return result
return self.render(style_attr or lookup_value, result)
return result
return proc | Return a processor for a "re_lookup" style value.
Parameters
----------
style_key : str
A style key.
style_value : dict
A dictionary with a "re_lookup" style value that consists of a
sequence of items where each item should have the form `(regexp,
x)`, where regexp is a regular expression to match against the
field value and x is either a style attribute (str) and a boolean
flag indicating to use the style attribute named by `style_key`.
re_flags : int
Passed through as flags argument to re.compile.
Returns
-------
A function. | entailment |
def by_interval_lookup(self, style_key, style_value):
"""Return a processor for an "interval" style value.
Parameters
----------
style_key : str
A style key.
style_value : dict
A dictionary with an "interval" key whose value consists of a
sequence of tuples where each tuple should have the form `(start,
end, x)`, where start is the start of the interval (inclusive), end
is the end of the interval, and x is either a style attribute (str)
and a boolean flag indicating to use the style attribute named by
`style_key`.
Returns
-------
A function.
"""
style_attr = style_key if self.style_types[style_key] is bool else None
intervals = style_value["interval"]
def proc(value, result):
try:
value = float(value)
except TypeError:
return result
for start, end, lookup_value in intervals:
if start is None:
start = float("-inf")
if end is None:
end = float("inf")
if start <= value < end:
if not lookup_value:
return result
return self.render(style_attr or lookup_value, result)
return result
return proc | Return a processor for an "interval" style value.
Parameters
----------
style_key : str
A style key.
style_value : dict
A dictionary with an "interval" key whose value consists of a
sequence of tuples where each tuple should have the form `(start,
end, x)`, where start is the start of the interval (inclusive), end
is the end of the interval, and x is either a style attribute (str)
and a boolean flag indicating to use the style attribute named by
`style_key`.
Returns
-------
A function. | entailment |
def post_from_style(self, column_style):
"""Yield post-format processors based on `column_style`.
Parameters
----------
column_style : dict
A style where the top-level keys correspond to style attributes
such as "bold" or "color".
Returns
-------
A generator object.
"""
flanks = Flanks()
yield flanks.split_flanks
fns = {"simple": self.by_key,
"lookup": self.by_lookup,
"re_lookup": self.by_re_lookup,
"interval": self.by_interval_lookup}
for key in self.style_types:
if key not in column_style:
continue
vtype = value_type(column_style[key])
fn = fns[vtype]
args = [key, column_style[key]]
if vtype == "re_lookup":
args.append(sum(getattr(re, f)
for f in column_style.get("re_flags", [])))
yield fn(*args)
yield flanks.join_flanks | Yield post-format processors based on `column_style`.
Parameters
----------
column_style : dict
A style where the top-level keys correspond to style attributes
such as "bold" or "color".
Returns
-------
A generator object. | entailment |
def split_flanks(self, _, result):
"""Return `result` without flanking whitespace.
"""
if not result.strip():
self.left, self.right = "", ""
return result
match = self.flank_re.match(result)
assert match, "This regexp should always match"
self.left, self.right = match.group(1), match.group(3)
return match.group(2) | Return `result` without flanking whitespace. | entailment |
def render(self, style_attr, value):
"""Prepend terminal code for `key` to `value`.
Parameters
----------
style_attr : str
A style attribute (e.g., "bold" or "blue").
value : str
The value to render.
Returns
-------
The code for `key` (e.g., "\x1b[1m" for bold) plus the
original value.
"""
if not value.strip():
# We've got an empty string. Don't bother adding any
# codes.
return value
return six.text_type(getattr(self.term, style_attr)) + value | Prepend terminal code for `key` to `value`.
Parameters
----------
style_attr : str
A style attribute (e.g., "bold" or "blue").
value : str
The value to render.
Returns
-------
The code for `key` (e.g., "\x1b[1m" for bold) plus the
original value. | entailment |
def post_from_style(self, column_style):
"""A Terminal-specific reset to StyleProcessors.post_from_style.
"""
for proc in super(TermProcessors, self).post_from_style(column_style):
if proc.__name__ == "join_flanks":
# Reset any codes before adding back whitespace.
yield self._maybe_reset()
yield proc | A Terminal-specific reset to StyleProcessors.post_from_style. | entailment |
def connect(self, cback):
"See signal"
return self.signal.connect(cback,
subscribers=self.subscribers,
instance=self.instance) | See signal | entailment |
def disconnect(self, cback):
"See signal"
return self.signal.disconnect(cback,
subscribers=self.subscribers,
instance=self.instance) | See signal | entailment |
def get_subscribers(self):
"""Get per-instance subscribers from the signal.
"""
data = self.signal.instance_subscribers
if self.instance not in data:
data[self.instance] = MethodAwareWeakList()
return data[self.instance] | Get per-instance subscribers from the signal. | entailment |
def notify(self, *args, **kwargs):
"See signal"
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop).run(*args, **kwargs) | See signal | entailment |
def notify_prepared(self, args=None, kwargs=None, **opts):
"""Like notify allows to pass more options to the underlying
`Signal.prepare_notification()` method.
The allowed options are:
notify_external : bool
a flag indicating if the notification should also include the
registered `~.external.ExternalSignaller` in the notification. It's
``True`` by default
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
loop = kwargs.pop('loop', self.loop)
return self.signal.prepare_notification(
subscribers=self.subscribers, instance=self.instance,
loop=loop, **opts).run(*args, **kwargs) | Like notify allows to pass more options to the underlying
`Signal.prepare_notification()` method.
The allowed options are:
notify_external : bool
a flag indicating if the notification should also include the
registered `~.external.ExternalSignaller` in the notification. It's
``True`` by default | entailment |
def connect(self, cback, subscribers=None, instance=None):
"""Add a function or a method as an handler of this signal.
Any handler added can be a coroutine.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
"""
if subscribers is None:
subscribers = self.subscribers
# wrapper
if self._fconnect is not None:
def _connect(cback):
self._connect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fconnect(instance, cback, subscribers,
_connect, notify)
else:
result = self._fconnect(cback, subscribers, _connect, notify)
if inspect.isawaitable(result):
result = pull_result(result)
else:
self._connect(subscribers, cback)
result = None
return result | Add a function or a method as an handler of this signal.
Any handler added can be a coroutine.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper | entailment |
def disconnect(self, cback, subscribers=None, instance=None):
"""Remove a previously added function or method from the set of the
signal's handlers.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper
"""
if subscribers is None:
subscribers = self.subscribers
# wrapper
if self._fdisconnect is not None:
def _disconnect(cback):
self._disconnect(subscribers, cback)
notify = partial(self._notify_one, instance)
if instance is not None:
result = self._fdisconnect(instance, cback, subscribers,
_disconnect, notify)
else:
result = self._fdisconnect(cback, subscribers, _disconnect,
notify)
if inspect.isawaitable(result):
result = pull_result(result)
else:
self._disconnect(subscribers, cback)
result = None
return result | Remove a previously added function or method from the set of the
signal's handlers.
:param cback: the callback (or *handler*) to be added to the set
:returns: ``None`` or the value returned by the corresponding wrapper | entailment |
def ext_publish(self, instance, loop, *args, **kwargs):
"""If 'external_signaller' is defined, calls it's publish method to
notify external event systems.
This is for internal usage only, but it's doumented because it's part
of the interface with external notification systems.
"""
if self.external_signaller is not None:
# Assumes that the loop is managed by the external handler
return self.external_signaller.publish_signal(self, instance, loop,
args, kwargs) | If 'external_signaller' is defined, calls it's publish method to
notify external event systems.
This is for internal usage only, but it's doumented because it's part
of the interface with external notification systems. | entailment |
def prepare_notification(self, *, subscribers=None, instance=None,
loop=None, notify_external=True):
"""Sets up a and configures an `~.utils.Executor`:class: instance."""
# merge callbacks added to the class level with those added to the
# instance, giving the formers precedence while preserving overall
# order
self_subscribers = self.subscribers.copy()
# add in callbacks declared in the main class body and marked with
# @handler
if (instance is not None and self.name and
isinstance(instance.__class__, SignalAndHandlerInitMeta)):
class_handlers = type(instance)._get_class_handlers(
self.name, instance)
for ch in class_handlers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if ch not in self_subscribers:
self_subscribers.append(ch)
# add in the other instance level callbacks added at runtime
if subscribers is not None:
for el in subscribers:
# eventual methods are ephemeral and normally the following
# condition would always be True for methods but the dict used
# has logic to take that into account
if el not in self_subscribers:
self_subscribers.append(el)
loop = loop or self.loop
# maybe do a round of external publishing
if notify_external and self.external_signaller is not None:
self_subscribers.append(partial(self.ext_publish, instance, loop))
if self._fnotify is None:
fnotify = None
else:
if instance is None:
fnotify = self._fnotify
else:
fnotify = types.MethodType(self._fnotify, instance)
validator = self._fvalidation
if validator is not None and instance is not None:
validator = types.MethodType(validator, instance)
return Executor(self_subscribers, owner=self,
concurrent=SignalOptions.EXEC_CONCURRENT in self.flags,
loop=loop, exec_wrapper=fnotify,
fvalidation=validator) | Sets up a and configures an `~.utils.Executor`:class: instance. | entailment |
def configure_logging(
filename=None,
filemode="a",
datefmt=FMT_DATE,
fmt=FMT,
stdout_fmt=FMT_STDOUT,
level=logging.DEBUG,
stdout_level=logging.WARNING,
initial_file_message="",
max_size=1048576,
rotations_number=5,
remove_handlers=True,
):
"""Configure logging module.
Args:
filename (str): Specifies a filename to log to.
filemode (str): Specifies the mode to open the log file.
Values: ``'a'``, ``'w'``. *Default:* ``a``.
datefmt (str): Use the specified date/time format.
fmt (str): Format string for the file handler.
stdout_fmt (str): Format string for the stdout handler.
level (int): Log level for the file handler. Log levels are the same
as the log levels from the standard :mod:`logging` module.
*Default:* ``logging.DEBUG``
stdout_level (int): Log level for the stdout handler. Log levels are
the same as the log levels from the standard :mod:`logging` module.
*Default:* ``logging.WARNING``
initial_file_message (str): First log entry written in file.
max_size (int): Maximal size of the logfile. If the size of the file
exceed the maximal size it will be rotated.
rotations_number (int): Number of rotations to save.
remove_handlers (bool): Remove all existing handlers.
"""
logger = logging.getLogger()
logger.level = logging.NOTSET
# Remove all handlers
if remove_handlers:
while len(logger.handlers) > 0:
hdlr = logger.handlers[0]
hdlr.close()
logger.removeHandler(hdlr)
# Create stdout handler
if stdout_level is not None:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(stdout_level)
stdout_formatter = logging.Formatter(stdout_fmt, datefmt)
# stdoutFormatter.converter = time.gmtime
stdout_handler.setFormatter(stdout_formatter)
logger.addHandler(stdout_handler)
# Create file handler if filename is provided
if filename is not None:
# Check if filename directory exists and creates it if it doesn't
directory = os.path.abspath(os.path.dirname(filename))
if not os.path.isdir(directory):
shell.mkdir(directory)
# Create file handler
file_handler = RotatingFileHandler(
filename, filemode, max_size, rotations_number
)
file_handler.setLevel(level)
file_formatter = logging.Formatter(fmt, datefmt)
file_formatter.converter = time.gmtime
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
if initial_file_message:
message = " %s " % initial_file_message
file_handler.stream.write("\n" + message.center(100, "=") + "\n\n") | Configure logging module.
Args:
filename (str): Specifies a filename to log to.
filemode (str): Specifies the mode to open the log file.
Values: ``'a'``, ``'w'``. *Default:* ``a``.
datefmt (str): Use the specified date/time format.
fmt (str): Format string for the file handler.
stdout_fmt (str): Format string for the stdout handler.
level (int): Log level for the file handler. Log levels are the same
as the log levels from the standard :mod:`logging` module.
*Default:* ``logging.DEBUG``
stdout_level (int): Log level for the stdout handler. Log levels are
the same as the log levels from the standard :mod:`logging` module.
*Default:* ``logging.WARNING``
initial_file_message (str): First log entry written in file.
max_size (int): Maximal size of the logfile. If the size of the file
exceed the maximal size it will be rotated.
rotations_number (int): Number of rotations to save.
remove_handlers (bool): Remove all existing handlers. | entailment |
def create_plan(existing_users=None, proposed_users=None, purge_undefined=None, protected_users=None,
allow_non_unique_id=None, manage_home=True, manage_keys=True):
"""Determine what changes are required.
args:
existing_users (Users): List of discovered users
proposed_users (Users): List of proposed users
purge_undefined (bool): Remove discovered users that have not been defined in proposed users list
protected_users (list): List of users' names that should not be evaluated as part of the plan creation process
allow_non_unique_id (bool): Allow more than one user to have the same uid
manage_home (bool): Create/remove users' home directories
manage_keys (bool): Add/update/remove users' keys (manage_home must also be true)
returns:
list: Differences between discovered and proposed users with a
list of operations that will achieve the desired state.
"""
plan = list()
proposed_usernames = list()
if not purge_undefined:
purge_undefined = constants.PURGE_UNDEFINED
if not protected_users:
protected_users = constants.PROTECTED_USERS
if not allow_non_unique_id:
allow_non_unique_id = constants.ALLOW_NON_UNIQUE_ID
# Create list of modifications to make based on proposed users compared to existing users
for proposed_user in proposed_users:
proposed_usernames.append(proposed_user.name)
user_matching_name = existing_users.describe_users(users_filter=dict(name=proposed_user.name))
user_matching_id = get_user_by_uid(uid=proposed_user.uid, users=existing_users)
# If user does not exist
if not allow_non_unique_id and user_matching_id and not user_matching_name:
plan.append(
dict(action='fail', error='uid_clash', proposed_user=proposed_user, state='existing', result=None))
elif not user_matching_name:
plan.append(
dict(action='add', proposed_user=proposed_user, state='missing', result=None, manage_home=manage_home,
manage_keys=manage_keys))
# If they do, then compare
else:
user_comparison = compare_user(passed_user=proposed_user, user_list=existing_users)
if user_comparison.get('result'):
plan.append(
dict(action='update', proposed_user=proposed_user, state='existing',
user_comparison=user_comparison, manage_home=manage_home, manage_keys=manage_keys))
# Application of the proposed user list will not result in deletion of users that need to be removed
# If 'PURGE_UNDEFINED' then look for existing users that are not defined in proposed usernames and mark for removal
if purge_undefined:
for existing_user in existing_users:
if existing_user.name not in proposed_usernames:
if existing_user.name not in protected_users:
plan.append(
dict(action='delete', username=existing_user.name, state='existing', manage_home=manage_home,
manage_keys=manage_keys))
return plan | Determine what changes are required.
args:
existing_users (Users): List of discovered users
proposed_users (Users): List of proposed users
purge_undefined (bool): Remove discovered users that have not been defined in proposed users list
protected_users (list): List of users' names that should not be evaluated as part of the plan creation process
allow_non_unique_id (bool): Allow more than one user to have the same uid
manage_home (bool): Create/remove users' home directories
manage_keys (bool): Add/update/remove users' keys (manage_home must also be true)
returns:
list: Differences between discovered and proposed users with a
list of operations that will achieve the desired state. | entailment |
def execute_plan(plan=None):
"""Create, Modify or Delete, depending on plan item."""
execution_result = list()
for task in plan:
action = task['action']
if action == 'delete':
command = generate_delete_user_command(username=task.get('username'), manage_home=task['manage_home'])
command_output = execute_command(command)
execution_result.append(dict(task=task, command_output=command_output))
remove_sudoers_entry(username=task.get('username'))
elif action == 'add':
command = generate_add_user_command(proposed_user=task.get('proposed_user'), manage_home=task['manage_home'])
command_output = execute_command(command)
if task['proposed_user'].public_keys and task['manage_home'] and task['manage_keys']:
write_authorized_keys(task['proposed_user'])
if task['proposed_user'].sudoers_entry:
write_sudoers_entry(username=task['proposed_user'].name,
sudoers_entry=task['proposed_user'].sudoers_entry)
execution_result.append(dict(task=task, command_output=command_output))
elif action == 'update':
result = task['user_comparison'].get('result')
# Don't modify user if only keys have changed
action_count = 0
for k, _ in iteritems(result):
if '_action' in k:
action_count += 1
command_output = None
if task['manage_home'] and task['manage_keys'] and action_count == 1 and 'public_keys_action' in result:
write_authorized_keys(task['proposed_user'])
elif action_count == 1 and 'sudoers_entry_action' in result:
write_sudoers_entry(username=task['proposed_user'].name,
sudoers_entry=task['user_comparison']['result']['replacement_sudoers_entry'])
else:
command = generate_modify_user_command(task=task)
command_output = execute_command(command)
if task['manage_home'] and task['manage_keys'] and result.get('public_keys_action'):
write_authorized_keys(task['proposed_user'])
if result.get('sudoers_entry_action'):
write_sudoers_entry(username=task['proposed_user'].name,
sudoers_entry=task['user_comparison']['result']['replacement_sudoers_entry'])
execution_result.append(dict(task=task, command_output=command_output)) | Create, Modify or Delete, depending on plan item. | entailment |
def get(self,
variable_path: str,
default: t.Optional[t.Any] = None,
coerce_type: t.Optional[t.Type] = None,
coercer: t.Optional[t.Callable] = None,
**kwargs):
"""
Reads a value of ``variable_path`` from environment.
If ``coerce_type`` is ``bool`` and no ``coercer`` specified, ``coerces`` forced to be
:func:`~django_docker_helpers.utils.coerce_str_to_bool`
:param variable_path: a delimiter-separated path to a nested value
:param default: default value if there's no object by specified path
:param coerce_type: cast a type of a value to a specified one
:param coercer: perform a type casting with specified callback
:param kwargs: additional arguments inherited parser may need
:return: value or default
"""
var_name = self.get_env_var_name(variable_path)
val = self.env.get(var_name, self.sentinel)
if val is self.sentinel:
return default
# coerce to bool with default env coercer if no coercer specified
if coerce_type and coerce_type is bool and not coercer:
coercer = coerce_str_to_bool
return self.coerce(val, coerce_type=coerce_type, coercer=coercer) | Reads a value of ``variable_path`` from environment.
If ``coerce_type`` is ``bool`` and no ``coercer`` specified, ``coerces`` forced to be
:func:`~django_docker_helpers.utils.coerce_str_to_bool`
:param variable_path: a delimiter-separated path to a nested value
:param default: default value if there's no object by specified path
:param coerce_type: cast a type of a value to a specified one
:param coercer: perform a type casting with specified callback
:param kwargs: additional arguments inherited parser may need
:return: value or default | entailment |
def unzip(archive, destination, filenames=None):
"""Unzip a zip archive into destination directory.
It unzips either the whole archive or specific file(s) from the archive.
Usage:
>>> output = os.path.join(os.getcwd(), 'output')
>>> # Archive can be an instance of a ZipFile class
>>> archive = zipfile.ZipFile('test.zip', 'r')
>>> # Or just a filename
>>> archive = 'test.zip'
>>> # Extracts all files
>>> unzip(archive, output)
>>> # Extract only one file
>>> unzip(archive, output, 'my_file.txt')
>>> # Extract a list of files
>>> unzip(archive, output, ['my_file1.txt', 'my_file2.txt'])
>>> unzip_file('test.zip', 'my_file.txt', output)
Args:
archive (zipfile.ZipFile or str): Zipfile object to extract from or
path to the zip archive.
destination (str): Path to the output directory.
filenames (str or list of str or None): Path(s) to the filename(s)
inside the zip archive that you want to extract.
"""
close = False
try:
if not isinstance(archive, zipfile.ZipFile):
archive = zipfile.ZipFile(archive, "r", allowZip64=True)
close = True
logger.info("Extracting: %s -> %s" % (archive.filename, destination))
if isinstance(filenames, str):
filenames = [filenames]
if filenames is None: # extract all
filenames = archive.namelist()
for filename in filenames:
if filename.endswith("/"): # it's a directory
shell.mkdir(os.path.join(destination, filename))
else:
if not _extract_file(archive, destination, filename):
raise Exception()
logger.info('Extracting zip archive "%s" succeeded' % archive.filename)
return True
except Exception:
logger.exception("Error while unzipping archive %s" % archive.filename)
return False
finally:
if close:
archive.close() | Unzip a zip archive into destination directory.
It unzips either the whole archive or specific file(s) from the archive.
Usage:
>>> output = os.path.join(os.getcwd(), 'output')
>>> # Archive can be an instance of a ZipFile class
>>> archive = zipfile.ZipFile('test.zip', 'r')
>>> # Or just a filename
>>> archive = 'test.zip'
>>> # Extracts all files
>>> unzip(archive, output)
>>> # Extract only one file
>>> unzip(archive, output, 'my_file.txt')
>>> # Extract a list of files
>>> unzip(archive, output, ['my_file1.txt', 'my_file2.txt'])
>>> unzip_file('test.zip', 'my_file.txt', output)
Args:
archive (zipfile.ZipFile or str): Zipfile object to extract from or
path to the zip archive.
destination (str): Path to the output directory.
filenames (str or list of str or None): Path(s) to the filename(s)
inside the zip archive that you want to extract. | entailment |
def mkzip(archive, items, mode="w", save_full_paths=False):
"""Recursively zip a directory.
Args:
archive (zipfile.ZipFile or str): ZipFile object add to or path to the
output zip archive.
items (str or list of str): Single item or list of items (files and
directories) to be added to zipfile.
mode (str): w for create new and write a for append to.
save_full_paths (bool): Preserve full paths.
"""
close = False
try:
if not isinstance(archive, zipfile.ZipFile):
archive = zipfile.ZipFile(archive, mode, allowZip64=True)
close = True
logger.info("mkdzip: Creating %s, from: %s", archive.filename, items)
if isinstance(items, str):
items = [items]
for item in items:
item = os.path.abspath(item)
basename = os.path.basename(item)
if os.path.isdir(item):
for root, directoires, filenames in os.walk(item):
for filename in filenames:
path = os.path.join(root, filename)
if save_full_paths:
archive_path = path.encode("utf-8")
else:
archive_path = os.path.join(
basename, path.replace(item, "").strip("\\/")
).encode("utf-8")
archive.write(path, archive_path)
elif os.path.isfile(item):
if save_full_paths:
archive_name = item.encode("utf-8")
else:
archive_name = basename.encode("utf-8")
archive.write(item, archive_name) # , zipfile.ZIP_DEFLATED)
return True
except Exception as e:
logger.error("Error occurred during mkzip: %s" % e)
return False
finally:
if close:
archive.close() | Recursively zip a directory.
Args:
archive (zipfile.ZipFile or str): ZipFile object add to or path to the
output zip archive.
items (str or list of str): Single item or list of items (files and
directories) to be added to zipfile.
mode (str): w for create new and write a for append to.
save_full_paths (bool): Preserve full paths. | entailment |
def seven_zip(archive, items, self_extracting=False):
"""Create a 7z archive."""
if not isinstance(items, (list, tuple)):
items = [items]
if self_extracting:
return er(_get_sz(), "a", "-ssw", "-sfx", archive, *items)
else:
return er(_get_sz(), "a", "-ssw", archive, *items) | Create a 7z archive. | entailment |
def ensure_caches_alive(max_retries: int = 100,
retry_timeout: int = 5,
exit_on_failure: bool = True) -> bool:
"""
Checks every cache backend alias in ``settings.CACHES`` until it becomes available. After ``max_retries``
attempts to reach any backend are failed it returns ``False``. If ``exit_on_failure`` is set it shuts down with
``exit(1)``.
It sets the ``django-docker-helpers:available-check`` key for every cache backend to ensure
it's receiving connections. If check is passed the key is deleted.
:param exit_on_failure: set to ``True`` if there's no sense to continue
:param int max_retries: a number of attempts to reach cache backend, default is ``100``
:param int retry_timeout: a timeout in seconds between attempts, default is ``5``
:return: ``True`` if all backends are available ``False`` if any backend check failed
"""
for cache_alias in settings.CACHES.keys():
cache = caches[cache_alias]
wf('Checking if the cache backed is accessible for the alias `%s`... ' % cache_alias, False)
for i in range(max_retries):
try:
cache.set('django-docker-helpers:available-check', '1')
assert cache.get('django-docker-helpers:available-check') == '1'
cache.delete('django-docker-helpers:available-check')
wf('[+]\n')
break
except Exception as e:
wf(str(e) + '\n')
sleep(retry_timeout)
else:
wf('Tried %s time(s). Shutting down.\n' % max_retries)
exit_on_failure and exit(1)
return False
return True | Checks every cache backend alias in ``settings.CACHES`` until it becomes available. After ``max_retries``
attempts to reach any backend are failed it returns ``False``. If ``exit_on_failure`` is set it shuts down with
``exit(1)``.
It sets the ``django-docker-helpers:available-check`` key for every cache backend to ensure
it's receiving connections. If check is passed the key is deleted.
:param exit_on_failure: set to ``True`` if there's no sense to continue
:param int max_retries: a number of attempts to reach cache backend, default is ``100``
:param int retry_timeout: a timeout in seconds between attempts, default is ``5``
:return: ``True`` if all backends are available ``False`` if any backend check failed | entailment |
def ensure_databases_alive(max_retries: int = 100,
retry_timeout: int = 5,
exit_on_failure: bool = True) -> bool:
"""
Checks every database alias in ``settings.DATABASES`` until it becomes available. After ``max_retries``
attempts to reach any backend are failed it returns ``False``. If ``exit_on_failure`` is set it shuts down with
``exit(1)``.
For every database alias it tries to ``SELECT 1``. If no errors raised it checks the next alias.
:param exit_on_failure: set to ``True`` if there's no sense to continue
:param int max_retries: number of attempts to reach every database; default is ``100``
:param int retry_timeout: timeout in seconds between attempts
:return: ``True`` if all backends are available, ``False`` if any backend check failed
"""
template = """
=============================
Checking database connection `{CONNECTION}`:
Engine: {ENGINE}
Host: {HOST}
Database: {NAME}
User: {USER}
Password: {PASSWORD}
=============================\n"""
for connection_name in connections:
_db_settings = dict.fromkeys(['ENGINE', 'HOST', 'NAME', 'USER', 'PASSWORD'])
_db_settings.update(settings.DATABASES[connection_name])
_db_settings['CONNECTION'] = connection_name
if _db_settings.get('PASSWORD'):
_db_settings['PASSWORD'] = 'set'
wf(template.format(**_db_settings))
wf('Checking db connection alive... ', False)
for i in range(max_retries):
try:
cursor = connections[connection_name].cursor()
cursor.execute('SELECT 1')
cursor.fetchone()
wf('[+]\n')
break
except OperationalError as e:
wf(str(e))
sleep(retry_timeout)
else:
wf('Tried %s time(s). Shutting down.\n' % max_retries)
exit_on_failure and exit(1)
return False
return True | Checks every database alias in ``settings.DATABASES`` until it becomes available. After ``max_retries``
attempts to reach any backend are failed it returns ``False``. If ``exit_on_failure`` is set it shuts down with
``exit(1)``.
For every database alias it tries to ``SELECT 1``. If no errors raised it checks the next alias.
:param exit_on_failure: set to ``True`` if there's no sense to continue
:param int max_retries: number of attempts to reach every database; default is ``100``
:param int retry_timeout: timeout in seconds between attempts
:return: ``True`` if all backends are available, ``False`` if any backend check failed | entailment |
def migrate(*argv) -> bool:
"""
Runs Django migrate command.
:return: always ``True``
"""
wf('Applying migrations... ', False)
execute_from_command_line(['./manage.py', 'migrate'] + list(argv))
wf('[+]\n')
return True | Runs Django migrate command.
:return: always ``True`` | entailment |
def output(self, output, accepts, set_http_code, set_content_type):
""" Formats a response from a WSGI app to handle any RDF graphs
If a view function returns a single RDF graph, serialize it based on Accept header
If it's not an RDF graph, return it without any special handling
"""
graph = Decorator._get_graph(output)
if graph is not None:
# decide the format
output_mimetype, output_format = self.format_selector.decide(accepts, graph.context_aware)
# requested content couldn't find anything
if output_mimetype is None:
set_http_code("406 Not Acceptable")
return ['406 Not Acceptable'.encode('utf-8')]
# explicitly mark text mimetypes as utf-8
if 'text' in output_mimetype:
output_mimetype = output_mimetype + '; charset=utf-8'
# format the new response
serialized = graph.serialize(format=output_format)
set_content_type(output_mimetype)
return [serialized]
else:
return output | Formats a response from a WSGI app to handle any RDF graphs
If a view function returns a single RDF graph, serialize it based on Accept header
If it's not an RDF graph, return it without any special handling | entailment |
def decorate(self, app):
""" Wraps a WSGI application to return formatted RDF graphs
Uses content negotiation to serialize the graph to the client-preferred format
Passes other content through unmodified
"""
from functools import wraps
@wraps(app)
def decorated(environ, start_response):
# capture any start_response from the app
app_response = {}
app_response['status'] = "200 OK"
app_response['headers'] = []
app_response['written'] = BytesIO()
def custom_start_response(status, headers, *args, **kwargs):
app_response['status'] = status
app_response['headers'] = headers
app_response['args'] = args
app_response['kwargs'] = kwargs
return app_response['written'].write
returned = app(environ, custom_start_response)
# callbacks from the serialization
def set_http_code(status):
app_response['status'] = str(status)
def set_header(header, value):
app_response['headers'] = [(h,v) for (h,v) in app_response['headers'] if h.lower() != header.lower()]
app_response['headers'].append((header, value))
def set_content_type(content_type):
set_header('Content-Type', content_type)
# do the serialization
accept = environ.get('HTTP_ACCEPT', '')
new_return = self.output(returned, accept, set_http_code, set_content_type)
# set the Vary header
vary_headers = (v for (h,v) in app_response['headers'] if h.lower() == 'vary')
vary_elements = list(itertools.chain(*[v.split(',') for v in vary_headers]))
vary_elements = list(set([v.strip() for v in vary_elements]))
if '*' not in vary_elements and 'accept' not in (v.lower() for v in vary_elements):
vary_elements.append('Accept')
set_header('Vary', ', '.join(vary_elements))
# pass on the result to the parent WSGI server
parent_writer = start_response(app_response['status'],
app_response['headers'],
*app_response.get('args', []),
**app_response.get('kwargs', {}))
written = app_response['written'].getvalue()
if len(written) > 0:
parent_writer(written)
return new_return
return decorated | Wraps a WSGI application to return formatted RDF graphs
Uses content negotiation to serialize the graph to the client-preferred format
Passes other content through unmodified | entailment |
def is_handler(cls, name, value):
"""Detect an handler and return its wanted signal name."""
signal_name = False
config = None
if callable(value) and hasattr(value, SPEC_CONTAINER_MEMBER_NAME):
spec = getattr(value, SPEC_CONTAINER_MEMBER_NAME)
if spec['kind'] == 'handler':
signal_name = spec['name']
config = spec['config']
return signal_name, config | Detect an handler and return its wanted signal name. | entailment |
def _build_inheritance_chain(cls, bases, *names, merge=False):
"""For all of the names build a ChainMap containing a map for every
base class."""
result = []
for name in names:
maps = []
for base in bases:
bmap = getattr(base, name, None)
if bmap is not None:
assert isinstance(bmap, (dict, ChainMap))
if len(bmap):
if isinstance(bmap, ChainMap):
maps.extend(bmap.maps)
else:
maps.append(bmap)
result.append(ChainMap({}, *maps))
if merge:
result = [dict(map) for map in result]
if len(names) == 1:
return result[0]
return result | For all of the names build a ChainMap containing a map for every
base class. | entailment |
def _build_instance_handler_mapping(cls, instance, handle_d):
"""For every unbound handler, get the bound version."""
res = {}
for member_name, sig_name in handle_d.items():
if sig_name in res:
sig_handlers = res[sig_name]
else:
sig_handlers = res[sig_name] = []
sig_handlers.append(getattr(instance, member_name))
return res | For every unbound handler, get the bound version. | entailment |
def _check_local_handlers(cls, signals, handlers, namespace, configs):
"""For every marked handler, see if there is a suitable signal. If
not, raise an error."""
for aname, sig_name in handlers.items():
# WARN: this code doesn't take in account the case where a new
# method with the same name of an handler in a base class is
# present in this class but it isn't an handler (so the handler
# with the same name should be removed from the handlers)
if sig_name not in signals:
disable_check = configs[aname].get('disable_check', False)
if not disable_check:
raise SignalError("Cannot find a signal named '%s'"
% sig_name) | For every marked handler, see if there is a suitable signal. If
not, raise an error. | entailment |
def _find_local_signals(cls, signals, namespace):
"""Add name info to every "local" (present in the body of this class)
signal and add it to the mapping. Also complete signal
initialization as member of the class by injecting its name.
"""
from . import Signal
signaller = cls._external_signaller_and_handler
for aname, avalue in namespace.items():
if isinstance(avalue, Signal):
if avalue.name:
aname = avalue.name
else:
avalue.name = aname
assert ((aname not in signals) or
(aname in signals and avalue is not signals[aname])), \
("The same signal {name!r} was found "
"two times".format(name=aname))
if signaller:
avalue.external_signaller = signaller
signals[aname] = avalue | Add name info to every "local" (present in the body of this class)
signal and add it to the mapping. Also complete signal
initialization as member of the class by injecting its name. | entailment |
def _find_local_handlers(cls, handlers, namespace, configs):
"""Add name info to every "local" (present in the body of this class)
handler and add it to the mapping.
"""
for aname, avalue in namespace.items():
sig_name, config = cls._is_handler(aname, avalue)
if sig_name:
configs[aname] = config
handlers[aname] = sig_name | Add name info to every "local" (present in the body of this class)
handler and add it to the mapping. | entailment |
def _get_class_handlers(cls, signal_name, instance):
"""Returns the handlers registered at class level.
"""
handlers = cls._signal_handlers_sorted[signal_name]
return [getattr(instance, hname) for hname in handlers] | Returns the handlers registered at class level. | entailment |
def _sort_handlers(cls, signals, handlers, configs):
"""Sort class defined handlers to give precedence to those declared at
lower level. ``config`` can contain two keys ``begin`` or ``end`` that
will further reposition the handler at the two extremes.
"""
def macro_precedence_sorter(flags, hname):
"""The default is to sort 'bottom_up', with lower level getting
executed first, but sometimes you need them reversed."""
data = configs[hname]
topdown_sort = SignalOptions.SORT_TOPDOWN in flags
if topdown_sort:
level = levels_count - 1 - data['level']
else:
level = data['level']
if 'begin' in data:
return (-1, level, hname)
elif 'end' in data:
return (1, level, hname)
else:
return (0, level, hname)
levels_count = len(handlers.maps)
per_signal = defaultdict(list)
for level, m in enumerate(reversed(handlers.maps)):
for hname, sig_name in m.items():
sig_handlers = per_signal[sig_name]
if hname not in sig_handlers:
configs[hname]['level'] = level
sig_handlers.append(hname)
for sig_name, sig_handlers in per_signal.items():
if sig_name in signals: # it may be on a mixin
flags = signals[sig_name].flags
sig_handlers.sort(key=partial(macro_precedence_sorter,
flags))
return per_signal | Sort class defined handlers to give precedence to those declared at
lower level. ``config`` can contain two keys ``begin`` or ``end`` that
will further reposition the handler at the two extremes. | entailment |
def instance_signals_and_handlers(cls, instance):
"""Calculate per-instance signals and handlers."""
isignals = cls._signals.copy()
ihandlers = cls._build_instance_handler_mapping(
instance,
cls._signal_handlers
)
return isignals, ihandlers | Calculate per-instance signals and handlers. | entailment |
def add(self, src):
"""
:param src: file path
:return: checksum value
"""
checksum = get_checksum(src)
filename = self.get_filename(checksum)
if not filename:
new_name = self._get_new_name()
new_realpath = self._storage_dir + '/' + new_name
os.makedirs(os.path.split(new_realpath)[0], exist_ok=True)
shutil.copyfile(src, new_realpath)
self._log[new_name] = {
'checksum': checksum,
'mtime': os.path.getmtime(new_realpath),
'size': os.path.getsize(new_realpath)
}
self.write_log()
return checksum | :param src: file path
:return: checksum value | entailment |
def get_filename(self, checksum):
"""
:param checksum: checksum
:return: filename no storage base part
"""
filename = None
for _filename, metadata in self._log.items():
if metadata['checksum'] == checksum:
filename = _filename
break
return filename | :param checksum: checksum
:return: filename no storage base part | entailment |
def __retrieve(self, key):
''' Retrieve file location from cache DB
'''
with self.get_conn() as conn:
try:
c = conn.cursor()
if key is None:
c.execute("SELECT value FROM cache_entries WHERE key IS NULL")
else:
c.execute("SELECT value FROM cache_entries WHERE key = ?", (key,))
result = c.fetchone()
if result is None or len(result) != 1:
getLogger().info("There's no entry with key={key}".format(key=key))
return None
else:
return result[0]
except:
getLogger().exception("Cannot retrieve")
return None | Retrieve file location from cache DB | entailment |
def __insert(self, key, value):
'''
Insert a new key to database
'''
if key in self:
getLogger().warning("Cache entry exists, cannot insert a new entry with key='{key}'".format(key=key))
return False
with self.get_conn() as conn:
try:
c = conn.cursor()
c.execute("INSERT INTO cache_entries (key, value) VALUES (?,?)", (key, value))
conn.commit()
return True
except Exception as e:
# NOTE: A cache error can be forgiven, no?
getLogger().debug("Cache Error: Cannot insert | Detail = %s" % (e,))
return False | Insert a new key to database | entailment |
def __delete(self, key):
''' Delete file key from database
'''
with self.get_conn() as conn:
try:
c = conn.cursor()
c.execute("DELETE FROM cache_entries WHERE key = ?", (key,))
conn.commit()
except:
getLogger().exception("Cannot delete")
return None | Delete file key from database | entailment |
def __insert_internal_blob(self, key, blob, compressed=True):
''' This method will insert blob data to blob table
'''
with self.get_conn() as conn:
conn.isolation_level = None
c = conn.cursor()
try:
compressed_flag = 1 if compressed else 0
if compressed:
blob = zlib.compress(blob)
c.execute("BEGIN")
c.execute("INSERT INTO cache_entries (key, value) VALUES (?,?)", (key, JiCache.INTERNAL_BLOB))
c.execute("INSERT INTO blob_entries (key, compressed, blob_data) VALUES (?,?,?)", (key, compressed_flag, sqlite3.Binary(blob),))
c.execute("COMMIT")
return True
except:
getLogger().debug("Cannot insert")
return False | This method will insert blob data to blob table | entailment |
def __delete_internal_blob(self, key):
''' This method will insert blob data to blob table
'''
with self.get_conn() as conn:
conn.isolation_level = None
try:
c = conn.cursor()
c.execute("BEGIN")
if key is None:
c.execute("DELETE FROM cache_entries WHERE key IS NULL")
c.execute("DELETE FROM blob_entries WHERE KEY IS NULL")
else:
c.execute("DELETE FROM cache_entries WHERE key = ?", (key,))
c.execute("DELETE FROM blob_entries WHERE KEY = ?", (key,))
c.execute("COMMIT")
except:
getLogger().debug("Cannot delete")
return False
return True | This method will insert blob data to blob table | entailment |
def __retrieve_internal_blob(self, key):
''' Retrieve file location from cache DB
'''
logger = getLogger()
with self.get_conn() as conn:
try:
c = conn.cursor()
if key is None:
c.execute("SELECT compressed, blob_data FROM blob_entries WHERE KEY IS NULL")
else:
c.execute("SELECT compressed, blob_data FROM blob_entries WHERE KEY = ?", (key,))
result = c.fetchone()
if not result:
logger.debug("There's no blob entry with key={key}".format(key=key))
logger.debug("result = {res}".format(res=result))
return None
else:
compressed, blob_data = result
logger.debug("retrieving internal BLOB (key={key} | len={ln} | compressed={c})".format(key=key, ln=len(blob_data), c=compressed))
return blob_data if not compressed else zlib.decompress(blob_data)
except:
getLogger().exception("Cannot retrieve internal blob (key={})".format(key))
return None
return True | Retrieve file location from cache DB | entailment |
def retrieve_blob(self, key, encoding=None):
''' Retrieve blob in binary format (or string format if encoding is provided) '''
blob_key = self.__retrieve(key)
if blob_key is None:
return None
if not blob_key:
raise Exception("Invalid blob_key")
elif blob_key == JiCache.INTERNAL_BLOB:
blob_data = self.__retrieve_internal_blob(key)
return blob_data if not encoding else blob_data.decode(encoding)
else:
getLogger().debug("Key[{key}] -> [{blob_key}]".format(key=key, blob_key=blob_key))
blob_file = os.path.join(self.blob_location, blob_key)
return FileHelper.read(blob_file) | Retrieve blob in binary format (or string format if encoding is provided) | entailment |
def summarize(self, rows):
"""Return summary rows for `rows`.
Parameters
----------
rows : list of dicts
Normalized rows to summarize.
Returns
-------
A list of summary rows. Each row is a tuple where the first item is
the data and the second is a dict of keyword arguments that can be
passed to StyleFields.render.
"""
columns = list(rows[0].keys())
agg_styles = {c: self.style[c]["aggregate"]
for c in columns if "aggregate" in self.style[c]}
summaries = {}
for col, agg_fn in agg_styles.items():
lgr.debug("Summarizing column %r with %r", col, agg_fn)
colvals = filter(lambda x: not isinstance(x, Nothing),
(row[col] for row in rows))
summaries[col] = agg_fn(list(colvals))
# The rest is just restructuring the summaries into rows that are
# compatible with pyout.Content. Most the complexity below comes from
# the fact that a summary function is allowed to return either a single
# item or a list of items.
maxlen = max(len(v) if isinstance(v, list) else 1
for v in summaries.values())
summary_rows = []
for rowidx in range(maxlen):
sumrow = {}
for column, values in summaries.items():
if isinstance(values, list):
if rowidx >= len(values):
continue
sumrow[column] = values[rowidx]
elif rowidx == 0:
sumrow[column] = values
for column in columns:
if column not in sumrow:
sumrow[column] = ""
summary_rows.append((sumrow,
{"style": self.style.get("aggregate_"),
"adopt": False}))
return summary_rows | Return summary rows for `rows`.
Parameters
----------
rows : list of dicts
Normalized rows to summarize.
Returns
-------
A list of summary rows. Each row is a tuple where the first item is
the data and the second is a dict of keyword arguments that can be
passed to StyleFields.render. | entailment |
def _init(self, style, streamer, processors=None):
"""Do writer-specific setup.
Parameters
----------
style : dict
Style, as passed to __init__.
streamer : interface.Stream
A stream interface that takes __init__'s `stream` and `interactive`
arguments into account.
processors : field.StyleProcessors, optional
A writer-specific processors instance. Defaults to
field.PlainProcessors().
"""
self._stream = streamer
if streamer.interactive:
if streamer.supports_updates:
self.mode = "update"
else:
self.mode = "incremental"
else:
self.mode = "final"
if style and "width_" not in style and self._stream.width:
style["width_"] = self._stream.width
self._content = ContentWithSummary(
StyleFields(style, processors or PlainProcessors())) | Do writer-specific setup.
Parameters
----------
style : dict
Style, as passed to __init__.
streamer : interface.Stream
A stream interface that takes __init__'s `stream` and `interactive`
arguments into account.
processors : field.StyleProcessors, optional
A writer-specific processors instance. Defaults to
field.PlainProcessors(). | entailment |
def ids(self):
"""A list of unique IDs used to identify a row.
If not explicitly set, it defaults to the first column name.
"""
if self._ids is None:
if self._columns:
if isinstance(self._columns, OrderedDict):
return [list(self._columns.keys())[0]]
return [self._columns[0]]
else:
return self._ids | A list of unique IDs used to identify a row.
If not explicitly set, it defaults to the first column name. | entailment |
def wait(self):
"""Wait for asynchronous calls to return.
"""
if self._pool is None:
return
self._pool.close()
self._pool.join() | Wait for asynchronous calls to return. | entailment |
def _write_lock(self):
"""Acquire and release the lock around output calls.
This should allow multiple threads or processes to write output
reliably. Code that modifies the `_content` attribute should also do
so within this context.
"""
if self._lock:
lgr.debug("Acquiring write lock")
self._lock.acquire()
try:
yield
finally:
if self._lock:
lgr.debug("Releasing write lock")
self._lock.release() | Acquire and release the lock around output calls.
This should allow multiple threads or processes to write output
reliably. Code that modifies the `_content` attribute should also do
so within this context. | entailment |
def _start_callables(self, row, callables):
"""Start running `callables` asynchronously.
"""
id_vals = {c: row[c] for c in self.ids}
def callback(tab, cols, result):
if isinstance(result, Mapping):
pass
elif isinstance(result, tuple):
result = dict(zip(cols, result))
elif len(cols) == 1:
# Don't bother raising an exception if cols != 1
# because it would be lost in the thread.
result = {cols[0]: result}
result.update(id_vals)
tab._write(result)
if self._pool is None:
self._pool = Pool()
if self._lock is None:
self._lock = multiprocessing.Lock()
for cols, fn in callables:
cb_func = partial(callback, self, cols)
gen = None
if inspect.isgeneratorfunction(fn):
gen = fn()
elif inspect.isgenerator(fn):
gen = fn
if gen:
def callback_for_each():
for i in gen:
cb_func(i)
self._pool.apply_async(callback_for_each)
else:
self._pool.apply_async(fn, callback=cb_func) | Start running `callables` asynchronously. | entailment |
def schemas(self):
"""
Get a listing of all non-system schemas (prefixed with 'pg_') that
exist in the database.
"""
sql = """SELECT schema_name FROM information_schema.schemata
ORDER BY schema_name"""
schemas = self.query(sql).fetchall()
return [s[0] for s in schemas if s[0][:3] != "pg_"] | Get a listing of all non-system schemas (prefixed with 'pg_') that
exist in the database. | entailment |
def tables(self):
"""
Get a listing of all tables
- if schema specified on connect, return unqualifed table names in
that schema
- in no schema specified on connect, return all tables, with schema
prefixes
"""
if self.schema:
return self.tables_in_schema(self.schema)
else:
tables = []
for schema in self.schemas:
tables = tables + [
schema + "." + t for t in self.tables_in_schema(schema)
]
return tables | Get a listing of all tables
- if schema specified on connect, return unqualifed table names in
that schema
- in no schema specified on connect, return all tables, with schema
prefixes | entailment |
def _valid_table_name(self, table):
"""Check if the table name is obviously invalid.
"""
if table is None or not len(table.strip()):
raise ValueError("Invalid table name: %r" % table)
return table.strip() | Check if the table name is obviously invalid. | entailment |
def build_query(self, sql, lookup):
"""
Modify table and field name variables in a sql string with a dict.
This seems to be discouraged by psycopg2 docs but it makes small
adjustments to large sql strings much easier, making prepped queries
much more versatile.
USAGE
sql = 'SELECT $myInputField FROM $myInputTable'
lookup = {'myInputField':'customer_id', 'myInputTable':'customers'}
sql = db.build_query(sql, lookup)
"""
for key, val in six.iteritems(lookup):
sql = sql.replace("$" + key, val)
return sql | Modify table and field name variables in a sql string with a dict.
This seems to be discouraged by psycopg2 docs but it makes small
adjustments to large sql strings much easier, making prepped queries
much more versatile.
USAGE
sql = 'SELECT $myInputField FROM $myInputTable'
lookup = {'myInputField':'customer_id', 'myInputTable':'customers'}
sql = db.build_query(sql, lookup) | entailment |
def tables_in_schema(self, schema):
"""Get a listing of all tables in given schema
"""
sql = """SELECT table_name
FROM information_schema.tables
WHERE table_schema = %s"""
return [t[0] for t in self.query(sql, (schema,)).fetchall()] | Get a listing of all tables in given schema | entailment |
def parse_table_name(self, table):
"""Parse schema qualified table name
"""
if "." in table:
schema, table = table.split(".")
else:
schema = None
return (schema, table) | Parse schema qualified table name | entailment |
def load_table(self, table):
"""Loads a table. Returns None if the table does not already exist in db
"""
table = self._valid_table_name(table)
schema, table = self.parse_table_name(table)
if not schema:
schema = self.schema
tables = self.tables
else:
tables = self.tables_in_schema(schema)
if table in tables:
return Table(self, schema, table)
else:
return None | Loads a table. Returns None if the table does not already exist in db | entailment |
def mogrify(self, sql, params):
"""Return the query string with parameters added
"""
conn = self.engine.raw_connection()
cursor = conn.cursor()
return cursor.mogrify(sql, params) | Return the query string with parameters added | entailment |
def execute(self, sql, params=None):
"""Just a pointer to engine.execute
"""
# wrap in a transaction to ensure things are committed
# https://github.com/smnorris/pgdata/issues/3
with self.engine.begin() as conn:
result = conn.execute(sql, params)
return result | Just a pointer to engine.execute | entailment |
def query_one(self, sql, params=None):
"""Grab just one record
"""
r = self.engine.execute(sql, params)
return r.fetchone() | Grab just one record | entailment |
def create_schema(self, schema):
"""Create specified schema if it does not already exist
"""
if schema not in self.schemas:
sql = "CREATE SCHEMA " + schema
self.execute(sql) | Create specified schema if it does not already exist | entailment |
def drop_schema(self, schema, cascade=False):
"""Drop specified schema
"""
if schema in self.schemas:
sql = "DROP SCHEMA " + schema
if cascade:
sql = sql + " CASCADE"
self.execute(sql) | Drop specified schema | entailment |
def create_table(self, table, columns):
"""Creates a table
"""
schema, table = self.parse_table_name(table)
table = self._valid_table_name(table)
if not schema:
schema = self.schema
if table in self.tables:
return Table(self, schema, table)
else:
return Table(self, schema, table, columns) | Creates a table | entailment |
def ogr2pg(
self,
in_file,
in_layer=None,
out_layer=None,
schema="public",
s_srs=None,
t_srs="EPSG:3005",
sql=None,
dim=2,
cmd_only=False,
index=True
):
"""
Load a layer to provided pgdata database connection using OGR2OGR
-sql option is like an ESRI where_clause or the ogr2ogr -where option,
but to increase flexibility, it is in SQLITE dialect:
SELECT * FROM <in_layer> WHERE <sql>
"""
# if not provided a layer name, use the name of the input file
if not in_layer:
in_layer = os.path.splitext(os.path.basename(in_file))[0]
if not out_layer:
out_layer = in_layer.lower()
command = [
"ogr2ogr",
"-t_srs",
t_srs,
"-f",
"PostgreSQL",
"PG:host={h} user={u} dbname={db} password={pwd}".format(
h=self.host, u=self.user, db=self.database, pwd=self.password
),
"-lco",
"OVERWRITE=YES",
"-overwrite",
"-lco",
"SCHEMA={schema}".format(schema=schema),
"-lco",
"GEOMETRY_NAME=geom",
"-dim",
"{d}".format(d=dim),
"-nlt",
"PROMOTE_TO_MULTI",
"-nln",
out_layer,
in_file
]
if sql:
command.insert(
len(command),
"-sql"
)
command.insert(
len(command),
"SELECT * FROM {} WHERE {}".format(in_layer, sql)
)
command.insert(len(command), "-dialect")
command.insert(len(command), "SQLITE")
# only add output layer name if sql not included (it gets ignored)
if not sql:
command.insert(
len(command),
in_layer
)
if s_srs:
command.insert(len(command), "-s_srs")
command.insert(len(command), s_srs)
if not index:
command.insert(len(command), "-lco")
command.insert(len(command), "SPATIAL_INDEX=NO")
if cmd_only:
return " ".join(command)
else:
subprocess.run(command) | Load a layer to provided pgdata database connection using OGR2OGR
-sql option is like an ESRI where_clause or the ogr2ogr -where option,
but to increase flexibility, it is in SQLITE dialect:
SELECT * FROM <in_layer> WHERE <sql> | entailment |
def pg2ogr(
self,
sql,
driver,
outfile,
outlayer=None,
column_remap=None,
s_srs="EPSG:3005",
t_srs=None,
geom_type=None,
append=False,
):
"""
A wrapper around ogr2ogr, for quickly dumping a postgis query to file.
Suppported formats are ["ESRI Shapefile", "GeoJSON", "FileGDB", "GPKG"]
- for GeoJSON, transforms to EPSG:4326
- for Shapefile, consider supplying a column_remap dict
- for FileGDB, geom_type is required
(https://trac.osgeo.org/gdal/ticket/4186)
"""
if driver == "FileGDB" and geom_type is None:
raise ValueError("Specify geom_type when writing to FileGDB")
filename, ext = os.path.splitext(os.path.basename(outfile))
if not outlayer:
outlayer = filename
u = urlparse(self.url)
pgcred = "host={h} user={u} dbname={db} password={p}".format(
h=u.hostname, u=u.username, db=u.path[1:], p=u.password
)
# use a VRT so we can remap columns if a lookoup is provided
if column_remap:
# if specifiying output field names, all fields have to be specified
# rather than try and parse the input sql, just do a test run of the
# query and grab column names from that
columns = [c for c in self.query(sql).keys() if c != "geom"]
# make sure all columns are represented in the remap
for c in columns:
if c not in column_remap.keys():
column_remap[c] = c
field_remap_xml = " \n".join(
[
'<Field name="' + column_remap[c] + '" src="' + c + '"/>'
for c in columns
]
)
else:
field_remap_xml = ""
vrt = """<OGRVRTDataSource>
<OGRVRTLayer name="{layer}">
<SrcDataSource>PG:{pgcred}</SrcDataSource>
<SrcSQL>{sql}</SrcSQL>
{fieldremap}
</OGRVRTLayer>
</OGRVRTDataSource>
""".format(
layer=outlayer,
sql=escape(sql.replace("\n", " ")),
pgcred=pgcred,
fieldremap=field_remap_xml,
)
vrtpath = os.path.join(tempfile.gettempdir(), filename + ".vrt")
if os.path.exists(vrtpath):
os.remove(vrtpath)
with open(vrtpath, "w") as vrtfile:
vrtfile.write(vrt)
# GeoJSON writes to EPSG:4326
if driver == 'GeoJSON' and not t_srs:
t_srs = "EPSG:4326"
# otherwise, default to BC Albers
else:
t_srs = "EPSG:3005"
command = [
"ogr2ogr",
"-s_srs",
s_srs,
"-t_srs",
t_srs,
"-progress",
"-f",
driver,
outfile,
vrtpath
]
# if writing to gdb, specify geom type
if driver == "FileGDB":
command.insert(
len(command),
"-nlt"
)
command.insert(
len(command),
geom_type
)
# automatically update existing multilayer outputs
if driver in ("FileGDB", "GPKG") and os.path.exists(outfile):
command.insert(
len(command),
"-update"
)
# if specified, append to existing output
if append:
command.insert(
len(command),
"-append"
)
subprocess.run(command) | A wrapper around ogr2ogr, for quickly dumping a postgis query to file.
Suppported formats are ["ESRI Shapefile", "GeoJSON", "FileGDB", "GPKG"]
- for GeoJSON, transforms to EPSG:4326
- for Shapefile, consider supplying a column_remap dict
- for FileGDB, geom_type is required
(https://trac.osgeo.org/gdal/ticket/4186) | entailment |
def setup_logging(filename, log_dir=None, force_setup=False):
''' Try to load logging configuration from a file. Set level to INFO if failed.
'''
if not force_setup and ChirpCLI.SETUP_COMPLETED:
logging.debug("Master logging has been setup. This call will be ignored.")
return
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
if os.path.isfile(filename):
with open(filename) as config_file:
try:
config = json.load(config_file)
logging.config.dictConfig(config)
logging.info("logging was setup using {}".format(filename))
ChirpCLI.SETUP_COMPLETED = True
except Exception as e:
logging.exception("Could not load logging config")
# default logging config
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.INFO) | Try to load logging configuration from a file. Set level to INFO if failed. | entailment |
def config_logging(args):
''' Override root logger's level '''
if args.quiet:
logging.getLogger().setLevel(logging.CRITICAL)
elif args.verbose:
logging.getLogger().setLevel(logging.DEBUG) | Override root logger's level | entailment |
def add_task(self, task, func=None, **kwargs):
''' Add a task parser '''
if not self.__tasks:
raise Exception("Tasks subparsers is disabled")
if 'help' not in kwargs:
if func.__doc__:
kwargs['help'] = func.__doc__
task_parser = self.__tasks.add_parser(task, **kwargs)
if self.__add_vq:
self.add_vq(task_parser)
if func is not None:
task_parser.set_defaults(func=func)
return task_parser | Add a task parser | entailment |
def add_vq(self, parser):
''' Add verbose & quiet options '''
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true") | Add verbose & quiet options | entailment |
def add_version_func(self, show_version):
''' Enable --version and -V to show version information '''
if callable(show_version):
self.__show_version_func = show_version
else:
self.__show_version_func = lambda cli, args: print(show_version)
self.parser.add_argument("-V", "--version", action="store_true") | Enable --version and -V to show version information | entailment |
def logger(self):
''' Lazy logger '''
if self.__logger is None:
self.__logger = logging.getLogger(self.__name)
return self.__logger | Lazy logger | entailment |
def run(self, func=None):
''' Run the app '''
args = self.parser.parse_args()
if self.__add_vq is not None and self.__config_logging:
self.__config_logging(args)
if self.__show_version_func and args.version and callable(self.__show_version_func):
self.__show_version_func(self, args)
elif args.func is not None:
args.func(self, args)
elif func is not None:
func(self, args)
else:
self.parser.print_help() | Run the app | entailment |
def header(*msg, level='h1', separator=" ", print_out=print):
''' Print header block in text mode
'''
out_string = separator.join(str(x) for x in msg)
if level == 'h0':
# box_len = 80 if len(msg) < 80 else len(msg)
box_len = 80
print_out('+' + '-' * (box_len + 2))
print_out("| %s" % out_string)
print_out('+' + '-' * (box_len + 2))
elif level == 'h1':
print_out("")
print_out(out_string)
print_out('-' * 60)
elif level == 'h2':
print_out('\t%s' % out_string)
print_out('\t' + ('-' * 40))
else:
print_out('\t\t%s' % out_string)
print_out('\t\t' + ('-' * 20)) | Print header block in text mode | entailment |
def fetch(self, value_obj=None):
''' Fetch the next two values '''
val = None
try:
val = next(self.__iterable)
except StopIteration:
return None
if value_obj is None:
value_obj = Value(value=val)
else:
value_obj.value = val
return value_obj | Fetch the next two values | entailment |
def get_report_order(self):
''' Keys are sorted based on report order (i.e. some keys to be shown first)
Related: see sorted_by_count
'''
order_list = []
for x in self.__priority:
order_list.append([x, self[x]])
for x in sorted(list(self.keys())):
if x not in self.__priority:
order_list.append([x, self[x]])
return order_list | Keys are sorted based on report order (i.e. some keys to be shown first)
Related: see sorted_by_count | entailment |
def content(self):
''' Return report content as a string if mode == STRINGIO else an empty string '''
if isinstance(self.__report_file, io.StringIO):
return self.__report_file.getvalue()
else:
return '' | Return report content as a string if mode == STRINGIO else an empty string | entailment |
def format(self):
''' Format table to print out
'''
self.max_lengths = []
for row in self.rows:
if len(self.max_lengths) < len(row):
self.max_lengths += [0] * (len(row) - len(self.max_lengths))
for idx, val in enumerate(row):
len_cell = len(str(val)) if val else 0
if self.max_lengths[idx] < len_cell:
self.max_lengths[idx] = len_cell
return self.max_lengths | Format table to print out | entailment |
def getfullfilename(file_path):
''' Get full filename (with extension)
'''
warnings.warn("getfullfilename() is deprecated and will be removed in near future. Use chirptext.io.write_file() instead", DeprecationWarning)
if file_path:
return os.path.basename(file_path)
else:
return '' | Get full filename (with extension) | entailment |
def replace_ext(file_path, ext):
''' Change extension of a file_path to something else (provide None to remove) '''
if not file_path:
raise Exception("File path cannot be empty")
dirname = os.path.dirname(file_path)
filename = FileHelper.getfilename(file_path)
if ext:
filename = filename + '.' + ext
return os.path.join(dirname, filename) | Change extension of a file_path to something else (provide None to remove) | entailment |
def replace_name(file_path, new_name):
''' Change the file name in a path but keep the extension '''
if not file_path:
raise Exception("File path cannot be empty")
elif not new_name:
raise Exception("New name cannot be empty")
dirname = os.path.dirname(file_path)
ext = os.path.splitext(os.path.basename(file_path))[1]
return os.path.join(dirname, new_name + ext) | Change the file name in a path but keep the extension | entailment |
def get_child_folders(path):
''' Get all child folders of a folder '''
path = FileHelper.abspath(path)
return [dirname for dirname in os.listdir(path) if os.path.isdir(os.path.join(path, dirname))] | Get all child folders of a folder | entailment |
def get_child_files(path):
''' Get all child files of a folder '''
path = FileHelper.abspath(path)
return [filename for filename in os.listdir(path) if os.path.isfile(os.path.join(path, filename))] | Get all child files of a folder | entailment |
def remove_file(filepath):
''' Delete a file '''
try:
os.remove(os.path.abspath(os.path.expanduser(filepath)))
except OSError as e:
if e.errno != errno.ENOENT:
raise | Delete a file | entailment |
def _ptn2fn(self, pattern):
''' Pattern to filename '''
return [pattern.format(wd=self.working_dir, n=self.__name, mode=self.__mode),
pattern.format(wd=self.working_dir, n='{}.{}'.format(self.__name, self.__mode), mode=self.__mode)] | Pattern to filename | entailment |
def add_potential(self, *patterns):
''' Add a potential config file pattern '''
for ptn in patterns:
self.__potential.extend(self._ptn2fn(ptn)) | Add a potential config file pattern | entailment |
def locate_config(self):
''' Locate config file '''
for f in self.__potential:
f = FileHelper.abspath(f)
if os.path.isfile(f):
return f
return None | Locate config file | entailment |
def config(self):
''' Read config automatically if required '''
if self.__config is None:
config_path = self.locate_config()
if config_path:
self.__config = self.read_file(config_path)
self.__config_path = config_path
return self.__config | Read config automatically if required | entailment |
def read_file(self, file_path):
''' Read a configuration file and return configuration data '''
getLogger().info("Loading app config from {} file: {}".format(self.__mode, file_path))
if self.__mode == AppConfig.JSON:
return json.loads(FileHelper.read(file_path), object_pairs_hook=OrderedDict)
elif self.__mode == AppConfig.INI:
config = configparser.ConfigParser(allow_no_value=True)
config.read(file_path)
return config | Read a configuration file and return configuration data | entailment |
def load(self, file_path):
''' Load configuration from a specific file '''
self.clear()
self.__config = self.read_file(file_path) | Load configuration from a specific file | entailment |
def get_processes(sort_by_name=True):
"""Retrieve a list of processes sorted by name.
Args:
sort_by_name (bool): Sort the list by name or by process ID's.
Returns:
list of (int, str) or list of (int, str, str): List of process id,
process name and optional cmdline tuples.
"""
if sort_by_name:
return sorted(
_list_processes(),
key=cmp_to_key(
lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))
),
)
else:
return sorted(
_list_processes(),
key=cmp_to_key(
lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))
),
) | Retrieve a list of processes sorted by name.
Args:
sort_by_name (bool): Sort the list by name or by process ID's.
Returns:
list of (int, str) or list of (int, str, str): List of process id,
process name and optional cmdline tuples. | entailment |
def find(name, arg=None):
"""Find process by name or by argument in command line.
Args:
name (str): Process name to search for.
arg (str): Command line argument for a process to search for.
Returns:
tea.process.base.IProcess: Process object if found.
"""
for p in get_processes():
if p.name.lower().find(name.lower()) != -1:
if arg is not None:
for a in p.cmdline or []:
if a.lower().find(arg.lower()) != -1:
return p
else:
return p
return None | Find process by name or by argument in command line.
Args:
name (str): Process name to search for.
arg (str): Command line argument for a process to search for.
Returns:
tea.process.base.IProcess: Process object if found. | entailment |
def execute(command, *args, **kwargs):
"""Execute a command with arguments and wait for output.
Arguments should not be quoted!
Keyword arguments:
env (dict): Dictionary of additional environment variables.
wait (bool): Wait for the process to finish.
Example::
>>> code = 'import sys;sys.stdout.write('out');sys.exit(0)'
>>> status, out, err = execute('python', '-c', code)
>>> print('status: %s, output: %s, error: %s' % (status, out, err))
status: 0, output: out, error:
>>> code = 'import sys;sys.stderr.write('out');sys.exit(1)'
>>> status, out, err = execute('python', '-c', code)
>>> print('status: %s, output: %s, error: %s' % (status, out, err))
status: 1, output: , error: err
"""
wait = kwargs.pop("wait", True)
process = Process(command, args, env=kwargs.pop("env", None))
process.start()
if not wait:
return process
process.wait()
return process.exit_code, process.read(), process.eread() | Execute a command with arguments and wait for output.
Arguments should not be quoted!
Keyword arguments:
env (dict): Dictionary of additional environment variables.
wait (bool): Wait for the process to finish.
Example::
>>> code = 'import sys;sys.stdout.write('out');sys.exit(0)'
>>> status, out, err = execute('python', '-c', code)
>>> print('status: %s, output: %s, error: %s' % (status, out, err))
status: 0, output: out, error:
>>> code = 'import sys;sys.stderr.write('out');sys.exit(1)'
>>> status, out, err = execute('python', '-c', code)
>>> print('status: %s, output: %s, error: %s' % (status, out, err))
status: 1, output: , error: err | entailment |
def execute_and_report(command, *args, **kwargs):
"""Execute a command with arguments and wait for output.
If execution was successful function will return True,
if not, it will log the output using standard logging and return False.
"""
logging.info("Execute: %s %s" % (command, " ".join(args)))
try:
status, out, err = execute(command, *args, **kwargs)
if status == 0:
logging.info(
"%s Finished successfully. Exit Code: 0.",
os.path.basename(command),
)
return True
else:
try:
logging.error(
"%s failed! Exit Code: %s\nOut: %s\nError: %s",
os.path.basename(command),
status,
out,
err,
)
except Exception as e:
# This fails when some non ASCII characters are returned
# from the application
logging.error(
"%s failed [%s]! Exit Code: %s\nOut: %s\nError: %s",
e,
os.path.basename(command),
status,
repr(out),
repr(err),
)
return False
except Exception:
logging.exception(
"%s failed! Exception thrown!", os.path.basename(command)
)
return False | Execute a command with arguments and wait for output.
If execution was successful function will return True,
if not, it will log the output using standard logging and return False. | entailment |
def read_authorized_keys(username=None):
"""Read public keys from specified user's authorized_keys file.
args:
username (str): username.
returns:
list: Authorised keys for the specified user.
"""
authorized_keys_path = '{0}/.ssh/authorized_keys'.format(os.path.expanduser('~{0}'.format(username)))
rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH)
tmp_authorized_keys_path = '/tmp/authorized_keys_{0}_{1}'.format(username, rnd_chars)
authorized_keys = list()
copy_result = execute_command(
shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), authorized_keys_path, tmp_authorized_keys_path))))
result_message = copy_result[0][1].decode('UTF-8')
if 'you must have a tty to run sudo' in result_message: # pragma: no cover
raise OSError("/etc/sudoers is blocked sudo. Remove entry: 'Defaults requiretty'.")
elif 'No such file or directory' not in result_message:
execute_command(shlex.split(str('{0} chmod 755 {1}'.format(sudo_check(), tmp_authorized_keys_path))))
with open(tmp_authorized_keys_path) as keys_file:
for key in keys_file:
authorized_keys.append(PublicKey(raw=key))
execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_authorized_keys_path))))
return authorized_keys | Read public keys from specified user's authorized_keys file.
args:
username (str): username.
returns:
list: Authorised keys for the specified user. | entailment |
def write_authorized_keys(user=None):
"""Write public keys back to authorized_keys file. Create keys directory if it doesn't already exist.
args:
user (User): Instance of User containing keys.
returns:
list: Authorised keys for the specified user.
"""
authorized_keys = list()
authorized_keys_dir = '{0}/.ssh'.format(os.path.expanduser('~{0}'.format(user.name)))
rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH)
authorized_keys_path = '{0}/authorized_keys'.format(authorized_keys_dir)
tmp_authorized_keys_path = '/tmp/authorized_keys_{0}_{1}'.format(user.name, rnd_chars)
if not os.path.isdir(authorized_keys_dir):
execute_command(shlex.split(str('{0} mkdir -p {1}'.format(sudo_check(), authorized_keys_dir))))
for key in user.public_keys:
authorized_keys.append('{0}\n'.format(key.raw))
with open(tmp_authorized_keys_path, mode=text_type('w+')) as keys_file:
keys_file.writelines(authorized_keys)
execute_command(
shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), tmp_authorized_keys_path, authorized_keys_path))))
execute_command(shlex.split(str('{0} chown -R {1} {2}'.format(sudo_check(), user.name, authorized_keys_dir))))
execute_command(shlex.split(str('{0} chmod 700 {1}'.format(sudo_check(), authorized_keys_dir))))
execute_command(shlex.split(str('{0} chmod 600 {1}'.format(sudo_check(), authorized_keys_path))))
execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_authorized_keys_path)))) | Write public keys back to authorized_keys file. Create keys directory if it doesn't already exist.
args:
user (User): Instance of User containing keys.
returns:
list: Authorised keys for the specified user. | entailment |
def b64encoded(self):
"""Return a base64 encoding of the key.
returns:
str: base64 encoding of the public key
"""
if self._b64encoded:
return text_type(self._b64encoded).strip("\r\n")
else:
return base64encode(self.raw) | Return a base64 encoding of the key.
returns:
str: base64 encoding of the public key | entailment |
def raw(self):
"""Return raw key.
returns:
str: raw key
"""
if self._raw:
return text_type(self._raw).strip("\r\n")
else:
return text_type(base64decode(self._b64encoded)).strip("\r\n") | Return raw key.
returns:
str: raw key | entailment |
def inner_parser(self) -> BaseParser:
"""
Prepares inner config parser for config stored at ``endpoint``.
:return: an instance of :class:`~django_docker_helpers.config.backends.base.BaseParser`
:raises config.exceptions.KVStorageKeyDoestNotExist: if specified ``endpoint`` does not exists
:raises config.exceptions.KVStorageValueIsEmpty: if specified ``endpoint`` does not contain a config
"""
if self._inner_parser is not None:
return self._inner_parser
__index, response_config = self.client.kv.get(self.endpoint, **self.kv_get_opts)
if not response_config:
raise KVStorageKeyDoestNotExist('Key does not exist: `{0}`'.format(self.endpoint))
config = response_config['Value']
if not config or config is self.sentinel:
raise KVStorageValueIsEmpty('Read empty config by key `{0}`'.format(self.endpoint))
config = config.decode()
self._inner_parser = self.inner_parser_class(
config=io.StringIO(config),
path_separator=self.path_separator,
scope=None
)
return self._inner_parser | Prepares inner config parser for config stored at ``endpoint``.
:return: an instance of :class:`~django_docker_helpers.config.backends.base.BaseParser`
:raises config.exceptions.KVStorageKeyDoestNotExist: if specified ``endpoint`` does not exists
:raises config.exceptions.KVStorageValueIsEmpty: if specified ``endpoint`` does not contain a config | entailment |
Subsets and Splits