sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def get(self,
variable_path: str,
default: t.Optional[t.Any] = None,
coerce_type: t.Optional[t.Type] = None,
coercer: t.Optional[t.Callable] = None,
**kwargs):
"""
Reads a value of ``variable_path`` from consul kv storage.
:param variable_path: a delimiter-separated path to a nested value
:param default: default value if there's no object by specified path
:param coerce_type: cast a type of a value to a specified one
:param coercer: perform a type casting with specified callback
:param kwargs: additional arguments inherited parser may need
:return: value or default
:raises config.exceptions.KVStorageKeyDoestNotExist: if specified ``endpoint`` does not exists
:raises config.exceptions.KVStorageValueIsEmpty: if specified ``endpoint`` does not contain a config
"""
return self.inner_parser.get(
variable_path,
default=default,
coerce_type=coerce_type,
coercer=coercer,
**kwargs,
) | Reads a value of ``variable_path`` from consul kv storage.
:param variable_path: a delimiter-separated path to a nested value
:param default: default value if there's no object by specified path
:param coerce_type: cast a type of a value to a specified one
:param coercer: perform a type casting with specified callback
:param kwargs: additional arguments inherited parser may need
:return: value or default
:raises config.exceptions.KVStorageKeyDoestNotExist: if specified ``endpoint`` does not exists
:raises config.exceptions.KVStorageValueIsEmpty: if specified ``endpoint`` does not contain a config | entailment |
def add_format(mimetype, format, requires_context=False):
""" Registers a new format to be used in a graph's serialize call
If you've installed an rdflib serializer plugin, use this
to add it to the content negotiation system
Set requires_context=True if this format requires a context-aware graph
"""
global formats
global ctxless_mimetypes
global all_mimetypes
formats[mimetype] = format
if not requires_context:
ctxless_mimetypes.append(mimetype)
all_mimetypes.append(mimetype) | Registers a new format to be used in a graph's serialize call
If you've installed an rdflib serializer plugin, use this
to add it to the content negotiation system
Set requires_context=True if this format requires a context-aware graph | entailment |
def add_format(self, mimetype, format, requires_context=False):
""" Registers a new format to be used in a graph's serialize call
If you've installed an rdflib serializer plugin, use this
to add it to the content negotiation system
Set requires_context=True if this format requires a context-aware graph
"""
self.formats[mimetype] = format
if not requires_context:
self.ctxless_mimetypes.append(mimetype)
self.all_mimetypes.append(mimetype) | Registers a new format to be used in a graph's serialize call
If you've installed an rdflib serializer plugin, use this
to add it to the content negotiation system
Set requires_context=True if this format requires a context-aware graph | entailment |
def get_default_mimetype(self):
""" Returns the default mimetype """
mimetype = self.default_mimetype
if mimetype is None: # class inherits from module default
mimetype = DEFAULT_MIMETYPE
if mimetype is None: # module is set to None?
mimetype = 'application/rdf+xml'
return mimetype | Returns the default mimetype | entailment |
def get_wildcard_mimetype(self):
""" Returns the mimetype if the client sends */* """
mimetype = self.wildcard_mimetype
if mimetype is None: # class inherits from module default
mimetype = WILDCARD_MIMETYPE
if mimetype is None: # module is set to None?
mimetype = 'application/rdf+xml'
return mimetype | Returns the mimetype if the client sends */* | entailment |
def decide_mimetype(self, accepts, context_aware = False):
""" Returns what mimetype the client wants to receive
Parses the given Accept header and returns the best one that
we know how to output
An empty Accept will default to application/rdf+xml
An Accept with */* use rdf+xml unless a better match is found
An Accept that doesn't match anything will return None
"""
mimetype = None
# If the client didn't request a thing, use default
if accepts is None or accepts.strip() == '':
mimetype = self.get_default_mimetype()
return mimetype
# pick the mimetype
if context_aware:
mimetype = mimeparse.best_match(all_mimetypes + self.all_mimetypes + [WILDCARD], accepts)
else:
mimetype = mimeparse.best_match(ctxless_mimetypes + self.ctxless_mimetypes + [WILDCARD], accepts)
if mimetype == '':
mimetype = None
# if browser sent */*
if mimetype == WILDCARD:
mimetype = self.get_wildcard_mimetype()
return mimetype | Returns what mimetype the client wants to receive
Parses the given Accept header and returns the best one that
we know how to output
An empty Accept will default to application/rdf+xml
An Accept with */* use rdf+xml unless a better match is found
An Accept that doesn't match anything will return None | entailment |
def get_serialize_format(self, mimetype):
""" Get the serialization format for the given mimetype """
format = self.formats.get(mimetype, None)
if format is None:
format = formats.get(mimetype, None)
return format | Get the serialization format for the given mimetype | entailment |
def decide(self, accepts, context_aware=False):
""" Returns what (mimetype,format) the client wants to receive
Parses the given Accept header and picks the best one that
we know how to output
Returns (mimetype, format)
An empty Accept will default to rdf+xml
An Accept with */* use rdf+xml unless a better match is found
An Accept that doesn't match anything will return (None,None)
context_aware=True will allow nquad serialization
"""
mimetype = self.decide_mimetype(accepts, context_aware)
# return what format to serialize as
if mimetype is not None:
return (mimetype, self.get_serialize_format(mimetype))
else:
# couldn't find a matching mimetype for the Accepts header
return (None, None) | Returns what (mimetype,format) the client wants to receive
Parses the given Accept header and picks the best one that
we know how to output
Returns (mimetype, format)
An empty Accept will default to rdf+xml
An Accept with */* use rdf+xml unless a better match is found
An Accept that doesn't match anything will return (None,None)
context_aware=True will allow nquad serialization | entailment |
def wants_rdf(self, accepts):
""" Returns whether this client's Accept header indicates
that the client wants to receive RDF
"""
mimetype = mimeparse.best_match(all_mimetypes + self.all_mimetypes + [WILDCARD], accepts)
return mimetype and mimetype != WILDCARD | Returns whether this client's Accept header indicates
that the client wants to receive RDF | entailment |
async def send_http(session, method, url, *,
retries=1,
interval=1,
backoff=2,
http_status_codes_to_retry=HTTP_STATUS_CODES_TO_RETRY,
fn=lambda x:x,
**kwargs):
"""
Sends a HTTP request and implements a retry logic.
Arguments:
session (obj): A client aiohttp session object
method (str): Method to use
url (str): URL for the request
retries (int): Number of times to retry in case of failure
interval (float): Time to wait before retries
backoff (int): Multiply interval by this factor after each failure
http_status_codes_to_retry (List[int]): List of status codes to retry
fn (Callable[[x],x]: Function to call on successful connection
"""
backoff_interval = interval
raised_exc = None
attempt = 0
if method not in ['get', 'patch', 'post']:
raise ValueError
if retries == -1: # -1 means retry indefinitely
attempt = -1
elif retries == 0: # Zero means don't retry
attempt = 1
else: # any other value means retry N times
attempt = retries + 1
while attempt != 0:
if raised_exc:
logger.error('Caught "%s" url:%s method:%s, remaining tries %s, '
'sleeping %.2fsecs', raised_exc, method.upper(), url,
attempt, backoff_interval)
await asyncio.sleep(backoff_interval)
# bump interval for the next possible attempt
backoff_interval *= backoff
# logger.info('sending %s %s with %s', method.upper(), url, kwargs)
try:
async with await getattr(session, method)(url, **kwargs) as response:
if response.status == 200:
return await fn(response)
elif response.status in http_status_codes_to_retry:
logger.error(
'Received invalid response code:%s error:%s'
' response:%s url:%s', response.status, '', response.reason, url)
raise aiohttp.ClientResponseError(
code=response.status, message=response.reason, request_info=response.request_info,
history=response.history)
else:
raise FailedRequest(
code=response.status, message='Non-retryable response code',
raised='aiohttp.ClientResponseError', url=url)
except aiohttp.ClientError as exc:
try:
code = exc.code
except AttributeError:
code = ''
raised_exc = FailedRequest(code=code, message=exc,
raised='%s.%s' % (exc.__class__.__module__, exc.__class__.__qualname__), url=url)
except asyncio.TimeoutError as exc:
raised_exc = FailedRequest(code='', message='asyncio.TimeoutError',
raised='%s.%s' % (exc.__class__.__module__, exc.__class__.__qualname__), url=url)
else:
raised_exc = None
break
attempt -= 1
if raised_exc:
raise raised_exc | Sends a HTTP request and implements a retry logic.
Arguments:
session (obj): A client aiohttp session object
method (str): Method to use
url (str): URL for the request
retries (int): Number of times to retry in case of failure
interval (float): Time to wait before retries
backoff (int): Multiply interval by this factor after each failure
http_status_codes_to_retry (List[int]): List of status codes to retry
fn (Callable[[x],x]: Function to call on successful connection | entailment |
def generate_output(self, writer):
"""
Generates the sitemap file and the stylesheet file and puts them into the content dir.
:param writer: the writer instance
:type writer: pelican.writers.Writer
"""
# write xml stylesheet
with codecs_open(os.path.join(os.path.dirname(__file__), 'sitemap-stylesheet.xsl'), 'r', encoding='utf-8') as fd_origin:
with codecs_open(os.path.join(self.path_output, 'sitemap-stylesheet.xsl'), 'w', encoding='utf-8') as fd_destination:
xsl = fd_origin.read()
# replace some template markers
# TODO use pelican template magic
xsl = xsl.replace('{{ SITENAME }}', self.context.get('SITENAME'))
fd_destination.write(xsl)
# will contain the url nodes as text
urls = ''
# get all articles sorted by time
articles_sorted = sorted(self.context['articles'], key=self.__get_date_key, reverse=True)
# get all pages with date/modified date
pages_with_date = list(
filter(
lambda p: getattr(p, 'modified', False) or getattr(p, 'date', False),
self.context.get('pages')
)
)
pages_with_date_sorted = sorted(pages_with_date, key=self.__get_date_key, reverse=True)
# get all pages without date
pages_without_date = list(
filter(
lambda p: getattr(p, 'modified', None) is None and getattr(p, 'date', None) is None,
self.context.get('pages')
)
)
pages_without_date_sorted = sorted(pages_without_date, key=self.__get_title_key, reverse=False)
# join them, first date sorted, then title sorted
pages_sorted = pages_with_date_sorted + pages_without_date_sorted
# the landing page
if 'index' in self.context.get('DIRECT_TEMPLATES'):
# assume that the index page has changed with the most current article or page
# use the first article or page if no articles
index_reference = None
if len(articles_sorted) > 0:
index_reference = articles_sorted[0]
elif len(pages_sorted) > 0:
index_reference = pages_sorted[0]
if index_reference is not None:
urls += self.__create_url_node_for_content(
index_reference,
'index',
url=self.url_site,
)
# process articles
for article in articles_sorted:
urls += self.__create_url_node_for_content(
article,
'articles',
url=urljoin(self.url_site, article.url)
)
# process pages
for page in pages_sorted:
urls += self.__create_url_node_for_content(
page,
'pages',
url=urljoin(self.url_site, page.url)
)
# process category pages
if self.context.get('CATEGORY_URL'):
urls += self.__process_url_wrapper_elements(self.context.get('categories'))
# process tag pages
if self.context.get('TAG_URL'):
urls += self.__process_url_wrapper_elements(sorted(self.context.get('tags'), key=lambda x: x[0].name))
# process author pages
if self.context.get('AUTHOR_URL'):
urls += self.__process_url_wrapper_elements(self.context.get('authors'))
# handle all DIRECT_TEMPLATES but "index"
for direct_template in list(filter(lambda p: p != 'index', self.context.get('DIRECT_TEMPLATES'))):
# we assume the modification date of the last article as modification date for the listings of
# categories, authors and archives (all values of DIRECT_TEMPLATES but "index")
modification_time = getattr(articles_sorted[0], 'modified', getattr(articles_sorted[0], 'date', None))
url = self.__get_direct_template_url(direct_template)
urls += self.__create_url_node_for_content(None, 'others', url, modification_time)
# write the final sitemap file
with codecs_open(os.path.join(self.path_output, 'sitemap.xml'), 'w', encoding='utf-8') as fd:
fd.write(self.xml_wrap % {
'SITEURL': self.url_site,
'urls': urls
}) | Generates the sitemap file and the stylesheet file and puts them into the content dir.
:param writer: the writer instance
:type writer: pelican.writers.Writer | entailment |
def __get_direct_template_url(self, name):
"""
Returns the URL for the given DIRECT_TEMPLATE name.
Favors ${DIRECT_TEMPLATE}_SAVE_AS over the default path.
:param name: name of the direct template
:return: str
"""
url = self.pelican_settings.get('{}_SAVE_AS'.format(name.upper()))
if url is None:
url = self.settings.get('{}_URL'.format(name.upper()), '{}.html'.format(name))
return urljoin(self.url_site, url) | Returns the URL for the given DIRECT_TEMPLATE name.
Favors ${DIRECT_TEMPLATE}_SAVE_AS over the default path.
:param name: name of the direct template
:return: str | entailment |
def __process_url_wrapper_elements(self, elements):
"""
Creates the url nodes for pelican.urlwrappers.Category and pelican.urlwrappers.Tag.
:param elements: list of wrapper elements
:type elements: list
:return: the processes urls as HTML
:rtype: str
"""
urls = ''
for url_wrapper, articles in elements:
urls += self.__create_url_node_for_content(
url_wrapper,
'others',
url=urljoin(self.url_site, url_wrapper.url),
modification_time=self.__get_date_key(sorted(articles, key=self.__get_date_key, reverse=True)[0])
)
return urls | Creates the url nodes for pelican.urlwrappers.Category and pelican.urlwrappers.Tag.
:param elements: list of wrapper elements
:type elements: list
:return: the processes urls as HTML
:rtype: str | entailment |
def __create_url_node_for_content(self, content, content_type, url=None, modification_time=None):
"""
Creates the required <url> node for the sitemap xml.
:param content: the content class to handle
:type content: pelican.contents.Content | None
:param content_type: the type of the given content to match settings.EXTENDED_SITEMAP_PLUGIN
:type content_type; str
:param url; if given, the URL to use instead of the url of the content instance
:type url: str
:param modification_time: the modification time of the url, will be used instead of content date if given
:type modification_time: datetime.datetime | None
:returns: the text node
:rtype: str
"""
loc = url
if loc is None:
loc = urljoin(self.url_site, self.context.get('ARTICLE_URL').format(**content.url_format))
lastmod = None
if modification_time is not None:
lastmod = modification_time.strftime('%Y-%m-%d')
else:
if content is not None:
if getattr(content, 'modified', None) is not None:
lastmod = getattr(content, 'modified').strftime('%Y-%m-%d')
elif getattr(content, 'date', None) is not None:
lastmod = getattr(content, 'date').strftime('%Y-%m-%d')
output = "<loc>{}</loc>".format(loc)
if lastmod is not None:
output += "\n<lastmod>{}</lastmod>".format(lastmod)
output += "\n<changefreq>{}</changefreq>".format(self.settings.get('changefrequencies').get(content_type))
output += "\n<priority>{:.2f}</priority>".format(self.settings.get('priorities').get(content_type))
return self.template_url.format(output) | Creates the required <url> node for the sitemap xml.
:param content: the content class to handle
:type content: pelican.contents.Content | None
:param content_type: the type of the given content to match settings.EXTENDED_SITEMAP_PLUGIN
:type content_type; str
:param url; if given, the URL to use instead of the url of the content instance
:type url: str
:param modification_time: the modification time of the url, will be used instead of content date if given
:type modification_time: datetime.datetime | None
:returns: the text node
:rtype: str | entailment |
def get_missing_commands(_platform):
"""Check I can identify the necessary commands for managing users."""
missing = list()
if _platform in ('Linux', 'OpenBSD'):
if not LINUX_CMD_USERADD:
missing.append('useradd')
if not LINUX_CMD_USERMOD:
missing.append('usermod')
if not LINUX_CMD_USERDEL:
missing.append('userdel')
if not LINUX_CMD_GROUP_ADD:
missing.append('groupadd')
if not LINUX_CMD_GROUP_DEL:
missing.append('groupdel')
elif _platform == 'FreeBSD': # pragma: FreeBSD
# FREEBSD COMMANDS
if not FREEBSD_CMD_PW:
missing.append('pw')
if missing:
print('\nMISSING = {0}'.format(missing))
return missing | Check I can identify the necessary commands for managing users. | entailment |
def execute_command(command=None):
"""Execute a command and return the stdout and stderr."""
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stdin = process.communicate()
process.wait()
return (stdout, stdin), process.returncode | Execute a command and return the stdout and stderr. | entailment |
def base64encode(_input=None):
"""Return base64 encoded representation of a string."""
if PY2: # pragma: no cover
return base64.b64encode(_input)
elif PY3: # pragma: no cover
if isinstance(_input, bytes):
return base64.b64encode(_input).decode('UTF-8')
elif isinstance(_input, str):
return base64.b64encode(bytearray(_input, encoding='UTF-8')).decode('UTF-8') | Return base64 encoded representation of a string. | entailment |
def base64decode(_input=None):
"""Take a base64 encoded string and return the decoded string."""
missing_padding = 4 - len(_input) % 4
if missing_padding:
_input += '=' * missing_padding
if PY2: # pragma: no cover
return base64.decodestring(_input)
elif PY3: # pragma: no cover
if isinstance(_input, bytes):
return base64.b64decode(_input).decode('UTF-8')
elif isinstance(_input, str):
return base64.b64decode(bytearray(_input, encoding='UTF-8')).decode('UTF-8') | Take a base64 encoded string and return the decoded string. | entailment |
def read_sudoers():
""" Read the sudoers entry for the specified user.
args:
username (str): username.
returns:`r
str: sudoers entry for the specified user.
"""
sudoers_path = '/etc/sudoers'
rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH)
tmp_sudoers_path = '/tmp/sudoers_{0}'.format(rnd_chars)
sudoers_entries = list()
copy_result = execute_command(
shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), sudoers_path, tmp_sudoers_path))))
result_message = copy_result[0][1].decode('UTF-8')
if 'No such file or directory' not in result_message:
execute_command(shlex.split(str('{0} chmod 755 {1}'.format(sudo_check(), tmp_sudoers_path))))
with open(tmp_sudoers_path) as tmp_sudoers_file:
for line in tmp_sudoers_file:
stripped = line.strip().replace(os.linesep, '')
if stripped and not stripped.startswith('#'):
sudoers_entries.append(stripped)
execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_sudoers_path))))
return sudoers_entries | Read the sudoers entry for the specified user.
args:
username (str): username.
returns:`r
str: sudoers entry for the specified user. | entailment |
def write_sudoers_entry(username=None, sudoers_entry=None):
"""Write sudoers entry.
args:
user (User): Instance of User containing sudoers entry.
returns:
str: sudoers entry for the specified user.
"""
sudoers_path = '/etc/sudoers'
rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH)
tmp_sudoers_path = '/tmp/sudoers_{0}'.format(rnd_chars)
execute_command(
shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), sudoers_path, tmp_sudoers_path))))
execute_command(
shlex.split(str('{0} chmod 777 {1}'.format(sudo_check(), tmp_sudoers_path))))
with open(tmp_sudoers_path, mode=text_type('r')) as tmp_sudoers_file:
sudoers_entries = tmp_sudoers_file.readlines()
sudoers_output = list()
for entry in sudoers_entries:
if entry and not entry.startswith(username):
sudoers_output.append(entry)
if sudoers_entry:
sudoers_output.append('{0} {1}'.format(username, sudoers_entry))
sudoers_output.append('\n')
with open(tmp_sudoers_path, mode=text_type('w+')) as tmp_sudoers_file:
tmp_sudoers_file.writelines(sudoers_output)
sudoers_check_result = execute_command(
shlex.split(str('{0} {1} -cf {2}'.format(sudo_check(), LINUX_CMD_VISUDO, tmp_sudoers_path))))
if sudoers_check_result[1] > 0:
raise ValueError(sudoers_check_result[0][1])
execute_command(
shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), tmp_sudoers_path, sudoers_path))))
execute_command(shlex.split(str('{0} chown root:root {1}'.format(sudo_check(), sudoers_path))))
execute_command(shlex.split(str('{0} chmod 440 {1}'.format(sudo_check(), sudoers_path))))
execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_sudoers_path)))) | Write sudoers entry.
args:
user (User): Instance of User containing sudoers entry.
returns:
str: sudoers entry for the specified user. | entailment |
def get_sudoers_entry(username=None, sudoers_entries=None):
""" Find the sudoers entry in the sudoers file for the specified user.
args:
username (str): username.
sudoers_entries (list): list of lines from the sudoers file.
returns:`r
str: sudoers entry for the specified user.
"""
for entry in sudoers_entries:
if entry.startswith(username):
return entry.replace(username, '').strip() | Find the sudoers entry in the sudoers file for the specified user.
args:
username (str): username.
sudoers_entries (list): list of lines from the sudoers file.
returns:`r
str: sudoers entry for the specified user. | entailment |
def docstring(documentation, prepend=False, join=""):
r"""Prepend or append a string to the current documentation of the function.
This decorator should be robust even if ``func.__doc__`` is None
(for example, if -OO was passed to the interpreter).
Usage::
@docstring('Appended this line')
def func():
"This docstring will have a line below."
pass
>>> print(func.__doc__)
This docstring will have a line below.
Appended this line
Args:
documentation (str): Documentation string that should be added,
appended or prepended to the current documentation string.
prepend (bool): Prepend the documentation string to the current
documentation if ``True`` else append. default=``False``
join (str): String used to separate docstrings. default='\n'
"""
def decorator(func):
current = (func.__doc__ if func.__doc__ else "").strip()
doc = documentation.strip()
new = "\n".join(
[doc, join, current] if prepend else [current, join, doc]
)
lines = len(new.strip().splitlines())
if lines == 1:
# If it's a one liner keep it that way and strip whitespace
func.__doc__ = new.strip()
else:
# Else strip whitespace from the beginning and add a newline
# at the end
func.__doc__ = new.strip() + "\n"
return func
return decorator | r"""Prepend or append a string to the current documentation of the function.
This decorator should be robust even if ``func.__doc__`` is None
(for example, if -OO was passed to the interpreter).
Usage::
@docstring('Appended this line')
def func():
"This docstring will have a line below."
pass
>>> print(func.__doc__)
This docstring will have a line below.
Appended this line
Args:
documentation (str): Documentation string that should be added,
appended or prepended to the current documentation string.
prepend (bool): Prepend the documentation string to the current
documentation if ``True`` else append. default=``False``
join (str): String used to separate docstrings. default='\n' | entailment |
def create_admin(user_config_path: str = 'CONFIG.superuser') -> bool:
"""
Creates a superuser from a specified dict/object bundle located at ``user_config_path``.
Skips if the specified object contains no email or no username.
If a user with the specified username already exists and has no usable password it updates user's password with
a specified one.
``user_config_path`` can accept any path to a deep nested object, like dict of dicts,
object of dicts of objects, and so on. Let's assume you have this weird config in your ``settings.py``:
::
class MyConfigObject:
my_var = {
'user': {
'username': 'user',
'password': 'qwe',
'email': '[email protected]',
}
}
local_config = MyConfigObject()
To access the ``'user'`` bundle you have to specify: ``local_config.my_var.user``.
:param user_config_path: dot-separated path to object or dict, default is ``'CONFIG.superuser'``
:return: ``True`` if user has been created, ``False`` otherwise
"""
from django.conf import settings
wf('Creating superuser... ', False)
username, email, password = [
dot_path(settings, '{0}.{1}'.format(user_config_path, 'username')),
dot_path(settings, '{0}.{1}'.format(user_config_path, 'email')),
dot_path(settings, '{0}.{1}'.format(user_config_path, 'password')),
]
if not all([username, email]):
wf('[SKIP: username and email should not be empty]\n')
return False
from django.db import IntegrityError
try:
execute_from_command_line([
'./manage.py', 'createsuperuser',
'--username', username,
'--email', email,
'--noinput'
])
except IntegrityError:
pass
if password:
# after `execute_from_command_line` models are loaded
from django.contrib.auth.models import User
user = User.objects.get(username=username)
# do not change password if it was set before
if not user.has_usable_password():
user.set_password(password)
user.save()
else:
wf('[SKIP update password: password is empty]\n')
wf('[+]\n')
return True | Creates a superuser from a specified dict/object bundle located at ``user_config_path``.
Skips if the specified object contains no email or no username.
If a user with the specified username already exists and has no usable password it updates user's password with
a specified one.
``user_config_path`` can accept any path to a deep nested object, like dict of dicts,
object of dicts of objects, and so on. Let's assume you have this weird config in your ``settings.py``:
::
class MyConfigObject:
my_var = {
'user': {
'username': 'user',
'password': 'qwe',
'email': '[email protected]',
}
}
local_config = MyConfigObject()
To access the ``'user'`` bundle you have to specify: ``local_config.my_var.user``.
:param user_config_path: dot-separated path to object or dict, default is ``'CONFIG.superuser'``
:return: ``True`` if user has been created, ``False`` otherwise | entailment |
def run_gunicorn(application: WSGIHandler, gunicorn_module_name: str = 'gunicorn_prod'):
"""
Runs gunicorn with a specified config.
:param application: Django uwsgi application
:param gunicorn_module_name: gunicorn settings module name
:return: ``Application().run()``
"""
from gunicorn.app.base import Application
class DjangoApplication(Application):
def init(self, parser, opts, args):
cfg = self.get_config_from_module_name(gunicorn_module_name)
clean_cfg = {}
for k, v in cfg.items():
# Ignore unknown names
if k not in self.cfg.settings:
continue
clean_cfg[k.lower()] = v
return clean_cfg
def load(self) -> WSGIHandler:
return application
return DjangoApplication().run() | Runs gunicorn with a specified config.
:param application: Django uwsgi application
:param gunicorn_module_name: gunicorn settings module name
:return: ``Application().run()`` | entailment |
def set_color(
fg=Color.normal,
bg=Color.normal,
fg_dark=False,
bg_dark=False,
underlined=False,
):
"""Set the console color.
>>> set_color(Color.red, Color.blue)
>>> set_color('red', 'blue')
>>> set_color() # returns back to normal
"""
_set_color(fg, bg, fg_dark, bg_dark, underlined) | Set the console color.
>>> set_color(Color.red, Color.blue)
>>> set_color('red', 'blue')
>>> set_color() # returns back to normal | entailment |
def cprint(
text,
fg=Color.normal,
bg=Color.normal,
fg_dark=False,
bg_dark=False,
underlined=False,
parse=False,
):
"""Print string in to stdout using colored font.
See L{set_color} for more details about colors.
Args:
text (str): Text that needs to be printed.
"""
if parse:
color_re = Color.color_re()
lines = text.splitlines()
count = len(lines)
for i, line in enumerate(lines):
previous = 0
end = len(line)
for match in color_re.finditer(line):
sys.stdout.write(line[previous : match.start()])
d = match.groupdict()
set_color(
d["color"], fg_dark=False if d["dark"] is None else True
)
previous = match.end()
sys.stdout.write(
line[previous:end]
+ ("\n" if (i < (count - 1) or text[-1] == "\n") else "")
)
else:
set_color(fg, bg, fg_dark, bg_dark, underlined)
sys.stdout.write(text)
set_color() | Print string in to stdout using colored font.
See L{set_color} for more details about colors.
Args:
text (str): Text that needs to be printed. | entailment |
def colorize_output(output, colors, indent=0):
r"""Print output to console using provided color mappings.
Color mapping is dict with regular expressions as key and tuple of two as
values. Key is used to match if line should be colorized and tuple contains
color to be used and boolean value that indicates if dark foreground
is used.
For example:
>>> CLS = {
>>> re.compile(r'^(--- .*)$'): (Color.red, False)
>>> }
will colorize lines that start with '---' to red.
If different parts of line needs to be in different color then dict must be
supplied in colors with keys that are named group from regular expression
and values that are tuples of color and boolean that indicates if dark
foreground is used.
For example:
>>> CLS = {
>>> re.compile(r'^(?P<key>user:\s+)(?P<user>.*)$'): {
>>> 'key': (Color.yellow, True),
>>> 'user': (Color.cyan, False)
>>> }
>>> }
will colorize line 'user: Some user' so that 'user:' part is yellow with
dark foreground and 'Some user' part is cyan without dark foreground.
"""
for line in output.split("\n"):
cprint(" " * indent)
if line == "":
cprint("\n")
continue
for regexp, color_def in colors.items():
if regexp.match(line) is not None:
_colorize_single_line(line, regexp, color_def)
break
else:
cprint("%s\n" % line) | r"""Print output to console using provided color mappings.
Color mapping is dict with regular expressions as key and tuple of two as
values. Key is used to match if line should be colorized and tuple contains
color to be used and boolean value that indicates if dark foreground
is used.
For example:
>>> CLS = {
>>> re.compile(r'^(--- .*)$'): (Color.red, False)
>>> }
will colorize lines that start with '---' to red.
If different parts of line needs to be in different color then dict must be
supplied in colors with keys that are named group from regular expression
and values that are tuples of color and boolean that indicates if dark
foreground is used.
For example:
>>> CLS = {
>>> re.compile(r'^(?P<key>user:\s+)(?P<user>.*)$'): {
>>> 'key': (Color.yellow, True),
>>> 'user': (Color.cyan, False)
>>> }
>>> }
will colorize line 'user: Some user' so that 'user:' part is yellow with
dark foreground and 'Some user' part is cyan without dark foreground. | entailment |
def _colorize_single_line(line, regexp, color_def):
"""Print single line to console with ability to colorize parts of it."""
match = regexp.match(line)
groupdict = match.groupdict()
groups = match.groups()
if not groupdict:
# no named groups, just colorize whole line
color = color_def[0]
dark = color_def[1]
cprint("%s\n" % line, color, fg_dark=dark)
else:
rev_groups = {v: k for k, v in groupdict.items()}
for part in groups:
if part in rev_groups and rev_groups[part] in color_def:
group_name = rev_groups[part]
cprint(
part,
color_def[group_name][0],
fg_dark=color_def[group_name][1],
)
else:
cprint(part)
cprint("\n") | Print single line to console with ability to colorize parts of it. | entailment |
def height(self):
"""Terminal height.
"""
if self.interactive:
if self._height is None:
self._height = self.term.height
return self._height | Terminal height. | entailment |
def clear_last_lines(self, n):
"""Clear last N lines of terminal output.
"""
self.term.stream.write(
self.term.move_up * n + self.term.clear_eos)
self.term.stream.flush() | Clear last N lines of terminal output. | entailment |
def overwrite_line(self, n, text):
"""Move back N lines and overwrite line with `text`.
"""
with self._moveback(n):
self.term.stream.write(text) | Move back N lines and overwrite line with `text`. | entailment |
def move_to(self, n):
"""Move back N lines in terminal.
"""
self.term.stream.write(self.term.move_up * n) | Move back N lines in terminal. | entailment |
def get(self,
variable_path: str,
default: t.Optional[t.Any] = None,
coerce_type: t.Optional[t.Type] = None,
coercer: t.Optional[t.Callable] = None,
required: bool = False,
**kwargs):
"""
Tries to read a ``variable_path`` from each of the passed parsers.
It stops if read was successful and returns a retrieved value.
If none of the parsers contain a value for the specified path it returns ``default``.
:param variable_path: a path to variable in config
:param default: a default value if ``variable_path`` is not present anywhere
:param coerce_type: cast a result to a specified type
:param coercer: perform the type casting with specified callback
:param required: raise ``RequiredValueIsEmpty`` if no ``default`` and no result
:param kwargs: additional options to all parsers
:return: **the first successfully read** value from the list of parser instances or ``default``
:raises config.exceptions.RequiredValueIsEmpty: if nothing is read,``required``
flag is set, and there's no ``default`` specified
"""
for p in self.parsers:
try:
val = p.get(
variable_path, default=self.sentinel,
coerce_type=coerce_type, coercer=coercer,
**kwargs
)
if val != self.sentinel:
self.enqueue(variable_path, p, val)
return val
except Exception as e:
if not self.silent:
raise
if self.suppress_logs:
continue
self.logger.error('Parser {0} cannot get key `{1}`: {2}'.format(
p.__class__.__name__,
variable_path,
str(e)
))
self.enqueue(variable_path, value=default)
if not default and required:
raise exceptions.RequiredValueIsEmpty(
'No default provided and no value read for `{0}`'.format(variable_path))
return default | Tries to read a ``variable_path`` from each of the passed parsers.
It stops if read was successful and returns a retrieved value.
If none of the parsers contain a value for the specified path it returns ``default``.
:param variable_path: a path to variable in config
:param default: a default value if ``variable_path`` is not present anywhere
:param coerce_type: cast a result to a specified type
:param coercer: perform the type casting with specified callback
:param required: raise ``RequiredValueIsEmpty`` if no ``default`` and no result
:param kwargs: additional options to all parsers
:return: **the first successfully read** value from the list of parser instances or ``default``
:raises config.exceptions.RequiredValueIsEmpty: if nothing is read,``required``
flag is set, and there's no ``default`` specified | entailment |
def import_parsers(parser_modules: t.Iterable[str]) -> t.Generator[t.Type[BaseParser], None, None]:
"""
Resolves and imports all modules specified in ``parser_modules``. Short names from the local scope
are supported (the scope is ``django_docker_helpers.config.backends``).
:param parser_modules: a list of dot-separated module paths
:return: a generator of [probably] :class:`~django_docker_helpers.config.backends.base.BaseParser`
Example:
::
parsers = list(ConfigLoader.import_parsers([
'EnvironmentParser',
'django_docker_helpers.config.backends.YamlParser'
]))
assert parsers == [EnvironmentParser, YamlParser]
"""
for import_path in parser_modules:
path_parts = import_path.rsplit('.', 1)
if len(path_parts) == 2:
mod_path, parser_class_name = path_parts
else:
mod_path = DEFAULT_PARSER_MODULE_PATH
parser_class_name = import_path
yield import_from(mod_path, parser_class_name) | Resolves and imports all modules specified in ``parser_modules``. Short names from the local scope
are supported (the scope is ``django_docker_helpers.config.backends``).
:param parser_modules: a list of dot-separated module paths
:return: a generator of [probably] :class:`~django_docker_helpers.config.backends.base.BaseParser`
Example:
::
parsers = list(ConfigLoader.import_parsers([
'EnvironmentParser',
'django_docker_helpers.config.backends.YamlParser'
]))
assert parsers == [EnvironmentParser, YamlParser] | entailment |
def load_parser_options_from_env(
parser_class: t.Type[BaseParser],
env: t.Optional[t.Dict[str, str]] = None) -> t.Dict[str, t.Any]:
"""
Extracts arguments from ``parser_class.__init__`` and populates them from environment variables.
Uses ``__init__`` argument type annotations for correct type casting.
.. note::
Environment variables should be prefixed with ``<UPPERCASEPARSERCLASSNAME>__``.
:param parser_class: a subclass of :class:`~django_docker_helpers.config.backends.base.BaseParser`
:param env: a dict with environment variables, default is ``os.environ``
:return: parser's ``__init__`` arguments dict mapping
Example:
::
env = {
'REDISPARSER__ENDPOINT': 'go.deep',
'REDISPARSER__HOST': 'my-host',
'REDISPARSER__PORT': '66',
}
res = ConfigLoader.load_parser_options_from_env(RedisParser, env)
assert res == {'endpoint': 'go.deep', 'host': 'my-host', 'port': 66}
"""
env = env or os.environ
sentinel = object()
spec: inspect.FullArgSpec = inspect.getfullargspec(parser_class.__init__)
environment_parser = EnvironmentParser(scope=parser_class.__name__.upper(), env=env)
stop_args = ['self']
safe_types = [int, bool, str]
init_args = {}
for arg_name in spec.args:
if arg_name in stop_args:
continue
type_hint = spec.annotations.get(arg_name)
coerce_type = None
if type_hint in safe_types:
coerce_type = type_hint
elif hasattr(type_hint, '__args__'):
if len(type_hint.__args__) == 1: # one type
if type_hint.__args__[0] in safe_types:
coerce_type = type_hint.__args__[0]
elif len(type_hint.__args__) == 2: # t.Optional
try:
_args = list(type_hint.__args__)
_args.remove(type(None))
if _args[0] in safe_types:
coerce_type = _args[0]
except ValueError:
pass
val = environment_parser.get(arg_name, sentinel, coerce_type=coerce_type)
if val is sentinel:
continue
init_args[arg_name] = val
return init_args | Extracts arguments from ``parser_class.__init__`` and populates them from environment variables.
Uses ``__init__`` argument type annotations for correct type casting.
.. note::
Environment variables should be prefixed with ``<UPPERCASEPARSERCLASSNAME>__``.
:param parser_class: a subclass of :class:`~django_docker_helpers.config.backends.base.BaseParser`
:param env: a dict with environment variables, default is ``os.environ``
:return: parser's ``__init__`` arguments dict mapping
Example:
::
env = {
'REDISPARSER__ENDPOINT': 'go.deep',
'REDISPARSER__HOST': 'my-host',
'REDISPARSER__PORT': '66',
}
res = ConfigLoader.load_parser_options_from_env(RedisParser, env)
assert res == {'endpoint': 'go.deep', 'host': 'my-host', 'port': 66} | entailment |
def from_env(parser_modules: t.Optional[t.Union[t.List[str], t.Tuple[str]]] = DEFAULT_PARSER_MODULES,
env: t.Optional[t.Dict[str, str]] = None,
silent: bool = False,
suppress_logs: bool = False,
extra: t.Optional[dict] = None) -> 'ConfigLoader':
"""
Creates an instance of :class:`~django_docker_helpers.config.ConfigLoader`
with parsers initialized from environment variables.
By default it tries to initialize all bundled parsers.
Parsers may be customized with ``parser_modules`` argument or ``CONFIG__PARSERS`` environment variable.
Environment variable has a priority over the method argument.
:param parser_modules: a list of dot-separated module paths
:param env: a dict with environment variables, default is ``os.environ``
:param silent: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param suppress_logs: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param extra: pass extra arguments to *every* parser
:return: an instance of :class:`~django_docker_helpers.config.ConfigLoader`
Example:
::
env = {
'CONFIG__PARSERS': 'EnvironmentParser,RedisParser,YamlParser',
'ENVIRONMENTPARSER__SCOPE': 'nested',
'YAMLPARSER__CONFIG': './tests/data/config.yml',
'REDISPARSER__HOST': 'wtf.test',
'NESTED__VARIABLE': 'i_am_here',
}
loader = ConfigLoader.from_env(env=env)
assert [type(p) for p in loader.parsers] == [EnvironmentParser, RedisParser, YamlParser]
assert loader.get('variable') == 'i_am_here', 'Ensure env copied from ConfigLoader'
loader = ConfigLoader.from_env(parser_modules=['EnvironmentParser'], env={})
"""
env = env or os.environ
extra = extra or {}
environment_parser = EnvironmentParser(scope='config', env=env)
silent = environment_parser.get('silent', silent, coerce_type=bool)
suppress_logs = environment_parser.get('suppress_logs', suppress_logs, coerce_type=bool)
env_parsers = environment_parser.get('parsers', None, coercer=comma_str_to_list)
if not env_parsers and not parser_modules:
raise ValueError('Must specify `CONFIG__PARSERS` env var or `parser_modules`')
if env_parsers:
parser_classes = ConfigLoader.import_parsers(env_parsers)
else:
parser_classes = ConfigLoader.import_parsers(parser_modules)
parsers = []
for parser_class in parser_classes:
parser_options = ConfigLoader.load_parser_options_from_env(parser_class, env=env)
_init_args = inspect.getfullargspec(parser_class.__init__).args
# add extra args if parser's __init__ can take it it
if 'env' in _init_args:
parser_options['env'] = env
for k, v in extra.items():
if k in _init_args:
parser_options[k] = v
parser_instance = parser_class(**parser_options)
parsers.append(parser_instance)
return ConfigLoader(parsers=parsers, silent=silent, suppress_logs=suppress_logs) | Creates an instance of :class:`~django_docker_helpers.config.ConfigLoader`
with parsers initialized from environment variables.
By default it tries to initialize all bundled parsers.
Parsers may be customized with ``parser_modules`` argument or ``CONFIG__PARSERS`` environment variable.
Environment variable has a priority over the method argument.
:param parser_modules: a list of dot-separated module paths
:param env: a dict with environment variables, default is ``os.environ``
:param silent: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param suppress_logs: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param extra: pass extra arguments to *every* parser
:return: an instance of :class:`~django_docker_helpers.config.ConfigLoader`
Example:
::
env = {
'CONFIG__PARSERS': 'EnvironmentParser,RedisParser,YamlParser',
'ENVIRONMENTPARSER__SCOPE': 'nested',
'YAMLPARSER__CONFIG': './tests/data/config.yml',
'REDISPARSER__HOST': 'wtf.test',
'NESTED__VARIABLE': 'i_am_here',
}
loader = ConfigLoader.from_env(env=env)
assert [type(p) for p in loader.parsers] == [EnvironmentParser, RedisParser, YamlParser]
assert loader.get('variable') == 'i_am_here', 'Ensure env copied from ConfigLoader'
loader = ConfigLoader.from_env(parser_modules=['EnvironmentParser'], env={}) | entailment |
def print_config_read_queue(
self,
use_color: bool = False,
max_col_width: int = 50):
"""
Prints all read (in call order) options.
:param max_col_width: limit column width, ``50`` by default
:param use_color: use terminal colors
:return: nothing
"""
wf(self.format_config_read_queue(use_color=use_color, max_col_width=max_col_width))
wf('\n') | Prints all read (in call order) options.
:param max_col_width: limit column width, ``50`` by default
:param use_color: use terminal colors
:return: nothing | entailment |
def format_config_read_queue(self,
use_color: bool = False,
max_col_width: int = 50) -> str:
"""
Prepares a string with pretty printed config read queue.
:param use_color: use terminal colors
:param max_col_width: limit column width, ``50`` by default
:return:
"""
try:
from terminaltables import SingleTable
except ImportError:
import warnings
warnings.warn('Cannot display config read queue. Install terminaltables first.')
return ''
col_names_order = ['path', 'value', 'type', 'parser']
pretty_bundles = [[self._colorize(name, name.capitalize(), use_color=use_color)
for name in col_names_order]]
for config_read_item in self.config_read_queue:
pretty_attrs = [
config_read_item.variable_path,
config_read_item.value,
config_read_item.type,
config_read_item.parser_name
]
pretty_attrs = [self._pformat(pa, max_col_width) for pa in pretty_attrs]
if config_read_item.is_default:
pretty_attrs[0] = '*' + pretty_attrs[0]
if use_color:
pretty_attrs = [self._colorize(column_name, pretty_attr, use_color=use_color)
for column_name, pretty_attr in zip(col_names_order, pretty_attrs)]
pretty_bundles.append(pretty_attrs)
table = SingleTable(pretty_bundles)
table.title = self._colorize('title', 'CONFIG READ QUEUE', use_color=use_color)
table.justify_columns[0] = 'right'
# table.inner_row_border = True
return str(table.table) | Prepares a string with pretty printed config read queue.
:param use_color: use terminal colors
:param max_col_width: limit column width, ``50`` by default
:return: | entailment |
def get_graph(cls, response):
""" Given a Flask response, find the rdflib Graph """
if cls.is_graph(response): # single graph object
return response
if hasattr(response, '__getitem__'): # indexable tuple
if len(response) > 0 and \
cls.is_graph(response[0]): # graph object
return response[0] | Given a Flask response, find the rdflib Graph | entailment |
def replace_graph(cls, response, serialized):
""" Replace the rdflib Graph in a Flask response """
if cls.is_graph(response): # single graph object
return serialized
if hasattr(response, '__getitem__'): # indexable tuple
if len(response) > 0 and \
cls.is_graph(response[0]): # graph object
return (serialized,) + response[1:]
return response | Replace the rdflib Graph in a Flask response | entailment |
def _from_hex_digest(digest):
"""Convert hex digest to sequence of bytes."""
return "".join(
[chr(int(digest[x : x + 2], 16)) for x in range(0, len(digest), 2)]
) | Convert hex digest to sequence of bytes. | entailment |
def encrypt(data, digest=True):
"""Perform encryption of provided data."""
alg = get_best_algorithm()
enc = implementations["encryption"][alg](
data, implementations["get_key"]()
)
return "%s$%s" % (alg, (_to_hex_digest(enc) if digest else enc)) | Perform encryption of provided data. | entailment |
def decrypt(data, digest=True):
"""Decrypt provided data."""
alg, _, data = data.rpartition("$")
if not alg:
return data
data = _from_hex_digest(data) if digest else data
try:
return implementations["decryption"][alg](
data, implementations["get_key"]()
)
except KeyError:
raise CryptError("Can not decrypt key for algorithm: %s" % alg) | Decrypt provided data. | entailment |
def one_greedy(self,dp,namax=None,nimax=None,nomax=None):
"""Reconstructs a directed acyclic graph according to prior information of edge significance.
This function first ranks all edges and introduce the most significant one by one, avoiding
those that would create a loop. Optional constraints on the maximum total number of edges,
the number of incoming or outgoing edges for every gene can be specified.
dp: numpy.ndarray(nt,nt,dtype=ftype(='f4' by default))
Prior information of edge significance levels. Entry dp[i,j] is significance of edge i to j.
A larger values indicates the edge's presence is more probable.
One option to obtain the prior information is to use pairwise inference methods in findr.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
namax: Constraint on the maximum total number of edges in the reconstructed network.
nimax: Constraint on the maximum number of incoming edges for each node in the reconstructed
network.
nomax: Constraint on the maximum number of outgoing edges for each node in the reconstructed
network.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
net: numpy.ndarray((nt,nt),dtype=bool). The reconstructed direct acyclic graph or network
net[i,j]=True if an edge from i to j exists in the reconstructed network, and False otherwise.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis7
"""
try: from exceptions import ValueError
except ImportError: pass
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np
from .types import isint
if dp.dtype.char!=ftype_np:
raise ValueError('Wrong input dtype for prior matrix')
if len(dp.shape)!=2:
raise ValueError('Wrong input shape')
if not (namax is None or isint(namax)):
raise ValueError('Wrong namax type')
if namax is not None and namax<=0:
raise ValueError('Input requires namax>0.')
if namax is None:
namax=-1
if not (nimax is None or isint(nimax)):
raise ValueError('Wrong nimax type')
if nimax is not None and nimax<=0:
raise ValueError('Input requires nimax>0.')
if nimax is None:
nimax=-1
if not (nomax is None or isint(nomax)):
raise ValueError('Wrong nomax type')
if nomax is not None and nomax<=0:
raise ValueError('Input requires nomax>0.')
if nomax is None:
nomax=-1
nt=dp.shape[0]
if nt==0:
raise ValueError('Invalid prior dimension')
if dp.shape[1]!=nt:
raise ValueError('Wrong input shape')
if np.isnan(dp).sum()>0:
raise ValueError('NaN found.')
func=self.cfunc('netr_one_greedy',rettype='size_t',argtypes=['const MATRIXF*','MATRIXUC*','size_t','size_t','size_t'])
d=np.require(np.zeros((nt,nt),dtype='u1'),requirements=['A','C','O','W'])
dpr=np.require(dp,requirements=['A','C','O','W'])
ret=func(dpr,d,namax,nimax,nomax)
d=d.astype(bool)
ret=(ret==0)
ans={'ret':ret,'net':d}
return ans | Reconstructs a directed acyclic graph according to prior information of edge significance.
This function first ranks all edges and introduce the most significant one by one, avoiding
those that would create a loop. Optional constraints on the maximum total number of edges,
the number of incoming or outgoing edges for every gene can be specified.
dp: numpy.ndarray(nt,nt,dtype=ftype(='f4' by default))
Prior information of edge significance levels. Entry dp[i,j] is significance of edge i to j.
A larger values indicates the edge's presence is more probable.
One option to obtain the prior information is to use pairwise inference methods in findr.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
namax: Constraint on the maximum total number of edges in the reconstructed network.
nimax: Constraint on the maximum number of incoming edges for each node in the reconstructed
network.
nomax: Constraint on the maximum number of outgoing edges for each node in the reconstructed
network.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
net: numpy.ndarray((nt,nt),dtype=bool). The reconstructed direct acyclic graph or network
net[i,j]=True if an edge from i to j exists in the reconstructed network, and False otherwise.
ftype and gtype can be found in auto.py.
Example: see findr.examples.geuvadis7 | entailment |
def gen_vocab(cli, args):
''' Generate vocabulary list from a tokenized file '''
if args.topk and args.topk <= 0:
topk = None
cli.logger.warning("Invalid k will be ignored (k should be greater than or equal to 1)")
else:
topk = args.topk
if args.stopwords:
with open(args.stopwords, 'r') as swfile:
stopwords = swfile.read().splitlines()
else:
stopwords = []
if os.path.isfile(args.input):
cli.logger.info("Generating vocabulary list from file {}".format(args.input))
with codecs.open(args.input, encoding='utf-8') as infile:
if args.output:
cli.logger.info("Output: {}".format(args.output))
rp = TextReport(args.output)
lines = infile.read().splitlines()
c = Counter()
for line in lines:
words = line.split()
c.update(w for w in words if w not in stopwords)
# report vocab
word_freq = c.most_common(topk)
words = [k for k, v in word_freq]
rp.header("Lexicon")
rp.writeline("\n".join(textwrap.wrap(" ".join(w for w in words), width=70)))
for k, v in word_freq:
rp.print("{}: {}".format(k, v))
else:
cli.logger.warning("File {} does not exist".format(args.input)) | Generate vocabulary list from a tokenized file | entailment |
def main():
''' ChirpText Tools main function '''
app = CLIApp(desc='ChirpText Tools', logger=__name__, show_version=show_version)
# add tasks
vocab_task = app.add_task('vocab', func=gen_vocab)
vocab_task.add_argument('input', help='Input file')
vocab_task.add_argument('--output', help='Output file', default=None)
vocab_task.add_argument('--stopwords', help='Stop word to ignore', default=None)
vocab_task.add_argument('-k', '--topk', help='Only select the top k frequent elements', default=None, type=int)
# run app
app.run() | ChirpText Tools main function | entailment |
def add_attachment(message, attachment, rfc2231=True):
'''Attach an attachment to a message as a side effect.
Arguments:
message: MIMEMultipart instance.
attachment: Attachment instance.
'''
data = attachment.read()
part = MIMEBase('application', 'octet-stream')
part.set_payload(data)
encoders.encode_base64(part)
filename = attachment.name if rfc2231 else Header(attachment.name).encode()
part.add_header('Content-Disposition', 'attachment',
filename=filename)
message.attach(part) | Attach an attachment to a message as a side effect.
Arguments:
message: MIMEMultipart instance.
attachment: Attachment instance. | entailment |
def _login(self):
'''Login to the SMTP server specified at instantiation
Returns an authenticated SMTP instance.
'''
server, port, mode, debug = self.connection_details
if mode == 'SSL':
smtp_class = smtplib.SMTP_SSL
else:
smtp_class = smtplib.SMTP
smtp = smtp_class(server, port)
smtp.set_debuglevel(debug)
if mode == 'TLS':
smtp.starttls()
self.authenticate(smtp)
return smtp | Login to the SMTP server specified at instantiation
Returns an authenticated SMTP instance. | entailment |
def send(self, email, attachments=()):
'''Send an email. Connect/Disconnect if not already connected
Arguments:
email: Email instance to send.
attachments: iterable containing Attachment instances
'''
msg = email.as_mime(attachments)
if 'From' not in msg:
msg['From'] = self.sender_address()
if self._conn:
self._conn.sendmail(self.username, email.recipients,
msg.as_string())
else:
with self:
self._conn.sendmail(self.username, email.recipients,
msg.as_string()) | Send an email. Connect/Disconnect if not already connected
Arguments:
email: Email instance to send.
attachments: iterable containing Attachment instances | entailment |
def column_types(self):
"""Return a dict mapping column name to type for all columns in table
"""
column_types = {}
for c in self.sqla_columns:
column_types[c.name] = c.type
return column_types | Return a dict mapping column name to type for all columns in table | entailment |
def _valid_table_name(self, table_name):
"""Check if the table name is obviously invalid.
"""
if table_name is None or not len(table_name.strip()):
raise ValueError("Invalid table name: %r" % table_name)
return table_name.strip() | Check if the table name is obviously invalid. | entailment |
def add_primary_key(self, column="id"):
"""Add primary key constraint to specified column
"""
if not self.primary_key:
sql = """ALTER TABLE {s}.{t}
ADD PRIMARY KEY ({c})
""".format(
s=self.schema, t=self.name, c=column
)
self.db.execute(sql) | Add primary key constraint to specified column | entailment |
def drop(self):
"""Drop the table from the database
"""
if self._is_dropped is False:
self.table.drop(self.engine)
self._is_dropped = True | Drop the table from the database | entailment |
def create_column(self, name, type):
"""
Explicitely create a new column ``name`` of a specified type.
``type`` must be a `SQLAlchemy column type <http://docs.sqlalchemy.org/en/rel_0_8/core/types.html>`_.
::
table.create_column('created_at', sqlalchemy.DateTime)
"""
self._check_dropped()
if normalize_column_name(name) not in self._normalized_columns:
self.op.add_column(self.table.name, Column(name, type), self.table.schema)
self.table = self._update_table(self.table.name) | Explicitely create a new column ``name`` of a specified type.
``type`` must be a `SQLAlchemy column type <http://docs.sqlalchemy.org/en/rel_0_8/core/types.html>`_.
::
table.create_column('created_at', sqlalchemy.DateTime) | entailment |
def drop_column(self, name):
"""
Drop the column ``name``
::
table.drop_column('created_at')
"""
self._check_dropped()
if name in list(self.table.columns.keys()):
self.op.drop_column(self.table.name, name, schema=self.schema)
self.table = self._update_table(self.table.name) | Drop the column ``name``
::
table.drop_column('created_at') | entailment |
def create_index(self, columns, name=None, index_type="btree"):
"""
Create an index to speed up queries on a table.
If no ``name`` is given a random name is created.
::
table.create_index(['name', 'country'])
"""
self._check_dropped()
if not name:
sig = "||".join(columns + [index_type])
# This is a work-around for a bug in <=0.6.1 which would create
# indexes based on hash() rather than a proper hash.
key = abs(hash(sig))
name = "ix_%s_%s" % (self.table.name, key)
if name in self.indexes:
return self.indexes[name]
key = sha1(sig.encode("utf-8")).hexdigest()[:16]
name = "ix_%s_%s" % (self.table.name, key)
if name in self.indexes:
return self.indexes[name]
# self.db._acquire()
columns = [self.table.c[col] for col in columns]
idx = Index(name, *columns, postgresql_using=index_type)
idx.create(self.engine)
# finally:
# self.db._release()
self.indexes[name] = idx
return idx | Create an index to speed up queries on a table.
If no ``name`` is given a random name is created.
::
table.create_index(['name', 'country']) | entailment |
def distinct(self, *columns, **_filter):
"""
Returns all rows of a table, but removes rows in with duplicate values in ``columns``.
Interally this creates a `DISTINCT statement <http://www.w3schools.com/sql/sql_distinct.asp>`_.
::
# returns only one row per year, ignoring the rest
table.distinct('year')
# works with multiple columns, too
table.distinct('year', 'country')
# you can also combine this with a filter
table.distinct('year', country='China')
"""
self._check_dropped()
qargs = []
try:
columns = [self.table.c[c] for c in columns]
for col, val in _filter.items():
qargs.append(self.table.c[col] == val)
except KeyError:
return []
q = expression.select(
columns,
distinct=True,
whereclause=and_(*qargs),
order_by=[c.asc() for c in columns],
)
# if just looking at one column, return a simple list
if len(columns) == 1:
return itertools.chain.from_iterable(self.engine.execute(q))
# otherwise return specified row_type
else:
return ResultIter(self.engine.execute(q), row_type=self.db.row_type) | Returns all rows of a table, but removes rows in with duplicate values in ``columns``.
Interally this creates a `DISTINCT statement <http://www.w3schools.com/sql/sql_distinct.asp>`_.
::
# returns only one row per year, ignoring the rest
table.distinct('year')
# works with multiple columns, too
table.distinct('year', 'country')
# you can also combine this with a filter
table.distinct('year', country='China') | entailment |
def insert(self, row):
"""
Add a row (type: dict) by inserting it into the table.
Columns must exist.
::
data = dict(title='I am a banana!')
table.insert(data)
Returns the inserted row's primary key.
"""
self._check_dropped()
res = self.engine.execute(self.table.insert(row))
if len(res.inserted_primary_key) > 0:
return res.inserted_primary_key[0] | Add a row (type: dict) by inserting it into the table.
Columns must exist.
::
data = dict(title='I am a banana!')
table.insert(data)
Returns the inserted row's primary key. | entailment |
def insert_many(self, rows, chunk_size=1000):
"""
Add many rows at a time, which is significantly faster than adding
them one by one. Per default the rows are processed in chunks of
1000 per commit, unless you specify a different ``chunk_size``.
See :py:meth:`insert() <dataset.Table.insert>` for details on
the other parameters.
::
rows = [dict(name='Dolly')] * 10000
table.insert_many(rows)
"""
def _process_chunk(chunk):
self.table.insert().execute(chunk)
self._check_dropped()
chunk = []
for i, row in enumerate(rows, start=1):
chunk.append(row)
if i % chunk_size == 0:
_process_chunk(chunk)
chunk = []
if chunk:
_process_chunk(chunk) | Add many rows at a time, which is significantly faster than adding
them one by one. Per default the rows are processed in chunks of
1000 per commit, unless you specify a different ``chunk_size``.
See :py:meth:`insert() <dataset.Table.insert>` for details on
the other parameters.
::
rows = [dict(name='Dolly')] * 10000
table.insert_many(rows) | entailment |
def rename(self, name):
"""Rename the table
"""
sql = """ALTER TABLE {s}.{t} RENAME TO {name}
""".format(
s=self.schema, t=self.name, name=name
)
self.engine.execute(sql)
self.table = SQLATable(name, self.metadata, schema=self.schema, autoload=True) | Rename the table | entailment |
def find_one(self, **kwargs):
"""
Works just like :py:meth:`find() <dataset.Table.find>` but returns one result, or None.
::
row = table.find_one(country='United States')
"""
kwargs["_limit"] = 1
iterator = self.find(**kwargs)
try:
return next(iterator)
except StopIteration:
return None | Works just like :py:meth:`find() <dataset.Table.find>` but returns one result, or None.
::
row = table.find_one(country='United States') | entailment |
def find(
self,
_limit=None,
_offset=0,
_step=5000,
order_by="id",
return_count=False,
**_filter
):
"""
Performs a simple search on the table. Simply pass keyword arguments as ``filter``.
::
results = table.find(country='France')
results = table.find(country='France', year=1980)
Using ``_limit``::
# just return the first 10 rows
results = table.find(country='France', _limit=10)
You can sort the results by single or multiple columns. Append a minus sign
to the column name for descending order::
# sort results by a column 'year'
results = table.find(country='France', order_by='year')
# return all rows sorted by multiple columns (by year in descending order)
results = table.find(order_by=['country', '-year'])
By default :py:meth:`find() <dataset.Table.find>` will break the
query into chunks of ``_step`` rows to prevent huge tables
from being loaded into memory at once.
For more complex queries, please use :py:meth:`db.query()`
instead."""
self._check_dropped()
if not isinstance(order_by, (list, tuple)):
order_by = [order_by]
order_by = [
o
for o in order_by
if (o.startswith("-") and o[1:] or o) in self.table.columns
]
order_by = [self._args_to_order_by(o) for o in order_by]
args = self._args_to_clause(_filter)
# query total number of rows first
count_query = alias(
self.table.select(whereclause=args, limit=_limit, offset=_offset),
name="count_query_alias",
).count()
rp = self.engine.execute(count_query)
total_row_count = rp.fetchone()[0]
if return_count:
return total_row_count
if _limit is None:
_limit = total_row_count
if _step is None or _step is False or _step == 0:
_step = total_row_count
if total_row_count > _step and not order_by:
_step = total_row_count
log.warn(
"query cannot be broken into smaller sections because it is unordered"
)
queries = []
for i in count():
qoffset = _offset + (_step * i)
qlimit = min(_limit - (_step * i), _step)
if qlimit <= 0:
break
queries.append(
self.table.select(
whereclause=args, limit=qlimit, offset=qoffset, order_by=order_by
)
)
return ResultIter(
(self.engine.execute(q) for q in queries), row_type=self.db.row_type
) | Performs a simple search on the table. Simply pass keyword arguments as ``filter``.
::
results = table.find(country='France')
results = table.find(country='France', year=1980)
Using ``_limit``::
# just return the first 10 rows
results = table.find(country='France', _limit=10)
You can sort the results by single or multiple columns. Append a minus sign
to the column name for descending order::
# sort results by a column 'year'
results = table.find(country='France', order_by='year')
# return all rows sorted by multiple columns (by year in descending order)
results = table.find(order_by=['country', '-year'])
By default :py:meth:`find() <dataset.Table.find>` will break the
query into chunks of ``_step`` rows to prevent huge tables
from being loaded into memory at once.
For more complex queries, please use :py:meth:`db.query()`
instead. | entailment |
def connect(url=None, schema=None, sql_path=None, multiprocessing=False):
"""Open a new connection to postgres via psycopg2/sqlalchemy
"""
if url is None:
url = os.environ.get("DATABASE_URL")
return Database(url, schema, sql_path=sql_path, multiprocessing=multiprocessing) | Open a new connection to postgres via psycopg2/sqlalchemy | entailment |
def create_db(url=None):
"""Create a new database
"""
if url is None:
url = os.environ.get("DATABASE_URL")
parsed_url = urlparse(url)
db_name = parsed_url.path
db_name = db_name.strip("/")
db = connect("postgresql://" + parsed_url.netloc)
# check that db does not exist
q = """SELECT 1 as exists
FROM pg_database
WHERE datname = '{db_name}'""".format(
db_name=db_name
)
if not db.query(q).fetchone():
# CREATE DATABASE must be run outside of a transaction
# https://stackoverflow.com/questions/6506578/how-to-create-a-new-database-using-sqlalchemy
conn = db.engine.connect()
conn.execute("commit")
conn.execute("CREATE DATABASE " + db_name)
conn.close() | Create a new database | entailment |
def drop_db(url):
"""Drop specified database
"""
parsed_url = urlparse(url)
db_name = parsed_url.path
db_name = db_name.strip("/")
db = connect("postgresql://" + parsed_url.netloc)
# check that db exists
q = """SELECT 1 as exists
FROM pg_database
WHERE datname = '{db_name}'""".format(
db_name=db_name
)
if db.query(q).fetchone():
# DROP DATABASE must be run outside of a transaction
conn = db.engine.connect()
conn.execute("commit")
conn.execute("DROP DATABASE " + db_name)
conn.close() | Drop specified database | entailment |
def shred(key_name: str,
value: t.Any,
field_names: t.Iterable[str] = SHRED_DATA_FIELD_NAMES) -> t.Union[t.Any, str]:
"""
Replaces sensitive data in ``value`` with ``*`` if ``key_name`` contains something that looks like a secret.
:param field_names: a list of key names that can possibly contain sensitive data
:param key_name: a key name to check
:param value: a value to mask
:return: an unchanged value if nothing to hide, ``'*' * len(str(value))`` otherwise
"""
key_name = key_name.lower()
need_shred = False
for data_field_name in field_names:
if data_field_name in key_name:
need_shred = True
break
if not need_shred:
return value
return '*' * len(str(value)) | Replaces sensitive data in ``value`` with ``*`` if ``key_name`` contains something that looks like a secret.
:param field_names: a list of key names that can possibly contain sensitive data
:param key_name: a key name to check
:param value: a value to mask
:return: an unchanged value if nothing to hide, ``'*' * len(str(value))`` otherwise | entailment |
def dot_path(obj: t.Union[t.Dict, object],
path: str,
default: t.Any = None,
separator: str = '.'):
"""
Provides an access to elements of a mixed dict/object type by a delimiter-separated path.
::
class O1:
my_dict = {'a': {'b': 1}}
class O2:
def __init__(self):
self.nested = O1()
class O3:
final = O2()
o = O3()
assert utils.dot_path(o, 'final.nested.my_dict.a.b') == 1
.. testoutput::
True
:param obj: object or dict
:param path: path to value
:param default: default value if chain resolve failed
:param separator: ``.`` by default
:return: value or default
"""
path_items = path.split(separator)
val = obj
sentinel = object()
for item in path_items:
if isinstance(val, dict):
val = val.get(item, sentinel)
if val is sentinel:
return default
else:
val = getattr(val, item, sentinel)
if val is sentinel:
return default
return val | Provides an access to elements of a mixed dict/object type by a delimiter-separated path.
::
class O1:
my_dict = {'a': {'b': 1}}
class O2:
def __init__(self):
self.nested = O1()
class O3:
final = O2()
o = O3()
assert utils.dot_path(o, 'final.nested.my_dict.a.b') == 1
.. testoutput::
True
:param obj: object or dict
:param path: path to value
:param default: default value if chain resolve failed
:param separator: ``.`` by default
:return: value or default | entailment |
def dotkey(obj: dict, path: str, default=None, separator='.'):
"""
Provides an interface to traverse nested dict values by dot-separated paths. Wrapper for ``dpath.util.get``.
:param obj: dict like ``{'some': {'value': 3}}``
:param path: ``'some.value'``
:param separator: ``'.'`` or ``'/'`` or whatever
:param default: default for KeyError
:return: dict value or default value
"""
try:
return get(obj, path, separator=separator)
except KeyError:
return default | Provides an interface to traverse nested dict values by dot-separated paths. Wrapper for ``dpath.util.get``.
:param obj: dict like ``{'some': {'value': 3}}``
:param path: ``'some.value'``
:param separator: ``'.'`` or ``'/'`` or whatever
:param default: default for KeyError
:return: dict value or default value | entailment |
def _materialize_dict(bundle: dict, separator: str = '.') -> t.Generator[t.Tuple[str, t.Any], None, None]:
"""
Traverses and transforms a given dict ``bundle`` into tuples of ``(key_path, value)``.
:param bundle: a dict to traverse
:param separator: build paths with a given separator
:return: a generator of tuples ``(materialized_path, value)``
Example:
>>> list(_materialize_dict({'test': {'path': 1}, 'key': 'val'}, '.'))
>>> [('key', 'val'), ('test.path', 1)]
"""
for path_prefix, v in bundle.items():
if not isinstance(v, dict):
yield str(path_prefix), v
continue
for nested_path, nested_val in _materialize_dict(v, separator=separator):
yield '{0}{1}{2}'.format(path_prefix, separator, nested_path), nested_val | Traverses and transforms a given dict ``bundle`` into tuples of ``(key_path, value)``.
:param bundle: a dict to traverse
:param separator: build paths with a given separator
:return: a generator of tuples ``(materialized_path, value)``
Example:
>>> list(_materialize_dict({'test': {'path': 1}, 'key': 'val'}, '.'))
>>> [('key', 'val'), ('test.path', 1)] | entailment |
def materialize_dict(bundle: dict, separator: str = '.') -> t.List[t.Tuple[str, t.Any]]:
"""
Transforms a given ``bundle`` into a *sorted* list of tuples with materialized value paths and values:
``('path.to.value', <value>)``. Output is ordered by depth: the deepest element first.
:param bundle: a dict to materialize
:param separator: build paths with a given separator
:return: a depth descending and alphabetically ascending sorted list (-deep, asc), the longest first
::
sample = {
'a': 1,
'aa': 1,
'b': {
'c': 1,
'b': 1,
'a': 1,
'aa': 1,
'aaa': {
'a': 1
}
}
}
materialize_dict(sample, '/')
[
('b/aaa/a', 1),
('b/a', 1),
('b/aa', 1),
('b/b', 1),
('b/c', 1),
('a', 1),
('aa', 1)
]
"""
def _matkeysort(tup: t.Tuple[str, t.Any]):
return len(tup[0].split(separator))
s1 = sorted(_materialize_dict(bundle, separator=separator), key=lambda x: x[0])
return sorted(s1, key=_matkeysort, reverse=True) | Transforms a given ``bundle`` into a *sorted* list of tuples with materialized value paths and values:
``('path.to.value', <value>)``. Output is ordered by depth: the deepest element first.
:param bundle: a dict to materialize
:param separator: build paths with a given separator
:return: a depth descending and alphabetically ascending sorted list (-deep, asc), the longest first
::
sample = {
'a': 1,
'aa': 1,
'b': {
'c': 1,
'b': 1,
'a': 1,
'aa': 1,
'aaa': {
'a': 1
}
}
}
materialize_dict(sample, '/')
[
('b/aaa/a', 1),
('b/a', 1),
('b/aa', 1),
('b/b', 1),
('b/c', 1),
('a', 1),
('aa', 1)
] | entailment |
def mp_serialize_dict(
bundle: dict,
separator: str = '.',
serialize: t.Optional[t.Callable] = dump_yaml,
value_prefix: str = '::YAML::\n') -> t.List[t.Tuple[str, bytes]]:
"""
Transforms a given ``bundle`` into a *sorted* list of tuples with materialized value paths and values:
``('path.to.value', b'<some>')``. If the ``<some>`` value is not an instance of a basic type, it's serialized
with ``serialize`` callback. If this value is an empty string, it's serialized anyway to enforce correct
type if storage backend does not support saving empty strings.
:param bundle: a dict to materialize
:param separator: build paths with a given separator
:param serialize: a method to serialize non-basic types, default is ``yaml.dump``
:param value_prefix: a prefix for non-basic serialized types
:return: a list of tuples ``(mat_path, b'value')``
::
sample = {
'bool_flag': '', # flag
'unicode': 'вася',
'none_value': None,
'debug': True,
'mixed': ['ascii', 'юникод', 1, {'d': 1}, {'b': 2}],
'nested': {
'a': {
'b': 2,
'c': b'bytes',
}
}
}
result = mp_serialize_dict(sample, separator='/')
assert result == [
('nested/a/b', b'2'),
('nested/a/c', b'bytes'),
('bool_flag', b"::YAML::\\n''\\n"),
('debug', b'true'),
('mixed', b'::YAML::\\n- ascii\\n- '
b'"\\\\u044E\\\\u043D\\\\u0438\\\\u043A\\\\u043E\\\\u0434"\\n- 1\\n- '
b'{d: 1}\\n- {b: 2}\\n'),
('none_value', None),
('unicode', b'\\xd0\\xb2\\xd0\\xb0\\xd1\\x81\\xd1\\x8f')
]
"""
md = materialize_dict(bundle, separator=separator)
res = []
for path, value in md:
# have to serialize values (value should be None or a string / binary data)
if value is None:
pass
elif isinstance(value, str) and value != '':
# check for value != '' used to armor empty string with forced serialization
# since it can be not recognized by a storage backend
pass
elif isinstance(value, bytes):
pass
elif isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, (int, float, Decimal)):
value = str(value)
else:
value = (value_prefix + serialize(value))
if isinstance(value, str):
value = value.encode()
res.append((path, value))
return res | Transforms a given ``bundle`` into a *sorted* list of tuples with materialized value paths and values:
``('path.to.value', b'<some>')``. If the ``<some>`` value is not an instance of a basic type, it's serialized
with ``serialize`` callback. If this value is an empty string, it's serialized anyway to enforce correct
type if storage backend does not support saving empty strings.
:param bundle: a dict to materialize
:param separator: build paths with a given separator
:param serialize: a method to serialize non-basic types, default is ``yaml.dump``
:param value_prefix: a prefix for non-basic serialized types
:return: a list of tuples ``(mat_path, b'value')``
::
sample = {
'bool_flag': '', # flag
'unicode': 'вася',
'none_value': None,
'debug': True,
'mixed': ['ascii', 'юникод', 1, {'d': 1}, {'b': 2}],
'nested': {
'a': {
'b': 2,
'c': b'bytes',
}
}
}
result = mp_serialize_dict(sample, separator='/')
assert result == [
('nested/a/b', b'2'),
('nested/a/c', b'bytes'),
('bool_flag', b"::YAML::\\n''\\n"),
('debug', b'true'),
('mixed', b'::YAML::\\n- ascii\\n- '
b'"\\\\u044E\\\\u043D\\\\u0438\\\\u043A\\\\u043E\\\\u0434"\\n- 1\\n- '
b'{d: 1}\\n- {b: 2}\\n'),
('none_value', None),
('unicode', b'\\xd0\\xb2\\xd0\\xb0\\xd1\\x81\\xd1\\x8f')
] | entailment |
def wf(raw_str: str,
flush: bool = True,
prevent_completion_polluting: bool = True,
stream: t.TextIO = sys.stdout):
"""
Writes a given ``raw_str`` into a ``stream``. Ignores output if ``prevent_completion_polluting`` is set and there's
no extra ``sys.argv`` arguments present (a bash completion issue).
:param raw_str: a raw string to print
:param flush: execute ``flush()``
:param prevent_completion_polluting: don't write anything if ``len(sys.argv) <= 1``
:param stream: ``sys.stdout`` by default
:return: None
"""
if prevent_completion_polluting and len(sys.argv) <= 1:
return
stream.write(raw_str)
flush and hasattr(stream, 'flush') and stream.flush() | Writes a given ``raw_str`` into a ``stream``. Ignores output if ``prevent_completion_polluting`` is set and there's
no extra ``sys.argv`` arguments present (a bash completion issue).
:param raw_str: a raw string to print
:param flush: execute ``flush()``
:param prevent_completion_polluting: don't write anything if ``len(sys.argv) <= 1``
:param stream: ``sys.stdout`` by default
:return: None | entailment |
def coerce_str_to_bool(val: t.Union[str, int, bool, None], strict: bool = False) -> bool:
"""
Converts a given string ``val`` into a boolean.
:param val: any string representation of boolean
:param strict: raise ``ValueError`` if ``val`` does not look like a boolean-like object
:return: ``True`` if ``val`` is thruthy, ``False`` otherwise.
:raises ValueError: if ``strict`` specified and ``val`` got anything except
``['', 0, 1, true, false, on, off, True, False]``
"""
if isinstance(val, str):
val = val.lower()
flag = ENV_STR_BOOL_COERCE_MAP.get(val, None)
if flag is not None:
return flag
if strict:
raise ValueError('Unsupported value for boolean flag: `%s`' % val)
return bool(val) | Converts a given string ``val`` into a boolean.
:param val: any string representation of boolean
:param strict: raise ``ValueError`` if ``val`` does not look like a boolean-like object
:return: ``True`` if ``val`` is thruthy, ``False`` otherwise.
:raises ValueError: if ``strict`` specified and ``val`` got anything except
``['', 0, 1, true, false, on, off, True, False]`` | entailment |
def env_bool_flag(flag_name: str, strict: bool = False, env: t.Optional[t.Dict[str, str]] = None) -> bool:
"""
Converts an environment variable into a boolean. Empty string (presence in env) is treated as ``True``.
:param flag_name: an environment variable name
:param strict: raise ``ValueError`` if a ``flag_name`` value connot be coerced into a boolean in obvious way
:param env: a dict with environment variables, default is ``os.environ``
:return: ``True`` if ``flag_name`` is thruthy, ``False`` otherwise.
:raises ValueError: if ``strict`` specified and ``val`` got anything except ``['', 0, 1, true, false, True, False]``
"""
env = env or os.environ
sentinel = object()
val = env.get(flag_name, sentinel)
if val is sentinel:
return False
return coerce_str_to_bool(val, strict=strict) | Converts an environment variable into a boolean. Empty string (presence in env) is treated as ``True``.
:param flag_name: an environment variable name
:param strict: raise ``ValueError`` if a ``flag_name`` value connot be coerced into a boolean in obvious way
:param env: a dict with environment variables, default is ``os.environ``
:return: ``True`` if ``flag_name`` is thruthy, ``False`` otherwise.
:raises ValueError: if ``strict`` specified and ``val`` got anything except ``['', 0, 1, true, false, True, False]`` | entailment |
def run_env_once(f: t.Callable) -> t.Callable:
"""
A decorator to prevent ``manage.py`` from running code twice for everything.
(https://stackoverflow.com/questions/16546652/why-does-django-run-everything-twice)
:param f: function or method to decorate
:return: callable
"""
@wraps(f)
def wrapper(*args, **kwargs):
has_run = os.environ.get(wrapper.__name__)
if has_run == '1':
return
result = f(*args, **kwargs)
os.environ[wrapper.__name__] = '1'
return result
return wrapper | A decorator to prevent ``manage.py`` from running code twice for everything.
(https://stackoverflow.com/questions/16546652/why-does-django-run-everything-twice)
:param f: function or method to decorate
:return: callable | entailment |
def is_dockerized(flag_name: str = 'DOCKERIZED', strict: bool = False):
"""
Reads env ``DOCKERIZED`` variable as a boolean.
:param flag_name: environment variable name
:param strict: raise a ``ValueError`` if variable does not look like a normal boolean
:return: ``True`` if has truthy ``DOCKERIZED`` env, ``False`` otherwise
"""
return env_bool_flag(flag_name, strict=strict) | Reads env ``DOCKERIZED`` variable as a boolean.
:param flag_name: environment variable name
:param strict: raise a ``ValueError`` if variable does not look like a normal boolean
:return: ``True`` if has truthy ``DOCKERIZED`` env, ``False`` otherwise | entailment |
def is_production(flag_name: str = 'PRODUCTION', strict: bool = False):
"""
Reads env ``PRODUCTION`` variable as a boolean.
:param flag_name: environment variable name
:param strict: raise a ``ValueError`` if variable does not look like a normal boolean
:return: ``True`` if has truthy ``PRODUCTION`` env, ``False`` otherwise
"""
return env_bool_flag(flag_name, strict=strict) | Reads env ``PRODUCTION`` variable as a boolean.
:param flag_name: environment variable name
:param strict: raise a ``ValueError`` if variable does not look like a normal boolean
:return: ``True`` if has truthy ``PRODUCTION`` env, ``False`` otherwise | entailment |
def load_geuvadis_data():
"""This function loads downsampled data files from the Geuvadis study (Lappalainen, T. et al. Transcriptome and genome sequencing uncovers functional variation in humans. Nature 501, 506-511 (2013)), including expression levels of 10 miRNAs and 3000 genes for 360 European individuals. Among them, all miRNAs and 1000 genes have significant cis-eQTLs, whose haplotypes are also included. File data formats follow Findr's binary interface input/output requirement. A description of each file is available below:
dmi.dat: Expression levels of 10 miRNAs
dgmi.dat: Haplotypes of cis-eQTLs of 10 miRNAs
dc.dat: Continuous causal anchors for demonstration purposes, simulated from adding continuous noise to dgmi.dat
dt.dat: Expression levels of 1000 genes that have cis-eQTLs
dt2.dat: Expression levels of 3000 genes
dgt.dat: Haplotypes of cis-eQTLs of 1000 genes
namest.txt: 3000 gene names"""
from os.path import dirname,join
from .auto import gtype_np,ftype_np
import numpy as np
def getdata(name,dtype,shape):
d=join(dirname(__file__),'data','geuvadis',name)
d=np.fromfile(d,dtype=dtype)
d=d.reshape(*shape)
return d
ans={'dc':getdata('dc.dat',ftype_np,(10,360)),
'dgmi':getdata('dgmi.dat',gtype_np,(10,360)),
'dmi':getdata('dmi.dat',ftype_np,(10,360)),
'dgt':getdata('dgt.dat',gtype_np,(1000,360)),
'dt':getdata('dt.dat',ftype_np,(1000,360)),
'dt2':getdata('dt2.dat',ftype_np,(3000,360))}
f=open(join(dirname(__file__),'data','geuvadis','namest.txt'),'r')
namest=[x.strip('\r\n') for x in f.readlines()]
f.close()
ans['namest']=namest
return ans | This function loads downsampled data files from the Geuvadis study (Lappalainen, T. et al. Transcriptome and genome sequencing uncovers functional variation in humans. Nature 501, 506-511 (2013)), including expression levels of 10 miRNAs and 3000 genes for 360 European individuals. Among them, all miRNAs and 1000 genes have significant cis-eQTLs, whose haplotypes are also included. File data formats follow Findr's binary interface input/output requirement. A description of each file is available below:
dmi.dat: Expression levels of 10 miRNAs
dgmi.dat: Haplotypes of cis-eQTLs of 10 miRNAs
dc.dat: Continuous causal anchors for demonstration purposes, simulated from adding continuous noise to dgmi.dat
dt.dat: Expression levels of 1000 genes that have cis-eQTLs
dt2.dat: Expression levels of 3000 genes
dgt.dat: Haplotypes of cis-eQTLs of 1000 genes
namest.txt: 3000 gene names | entailment |
def runcode(code):
"""Run the given code line by line with printing, as list of lines, and return variable 'ans'."""
for line in code:
print('# '+line)
exec(line,globals())
print('# return ans')
return ans | Run the given code line by line with printing, as list of lines, and return variable 'ans'. | entailment |
def signal(*args, **kwargs):
from .core import Signal
"""A signal decorator designed to work both in the simpler way, like:
.. code:: python
@signal
def validation_function(arg1, ...):
'''Some doc'''
and also as a double-called decorator, like
.. code:: python
@signal(SignalOptions.EXEC_CONCURRENT)
def validation_function(arg1, ...):
'''Some doc'''
"""
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return Signal(fvalidation=args[0])
else:
sig = Signal(*args, **kwargs)
def wrapper(fvalidation):
sig._set_fvalidation(fvalidation)
return sig
return wrapper | A signal decorator designed to work both in the simpler way, like:
.. code:: python
@signal
def validation_function(arg1, ...):
'''Some doc'''
and also as a double-called decorator, like
.. code:: python
@signal(SignalOptions.EXEC_CONCURRENT)
def validation_function(arg1, ...):
'''Some doc''' | entailment |
def exec_all_endpoints(self, *args, **kwargs):
"""Execute each passed endpoint and collect the results. If a result
is anoter `MultipleResults` it will extend the results with those
contained therein. If the result is `NoResult`, skip the addition."""
results = []
for handler in self.endpoints:
if isinstance(handler, weakref.ref):
handler = handler()
if self.adapt_params:
bind = self._adapt_call_params(handler, args, kwargs)
res = handler(*bind.args, **bind.kwargs)
else:
res = handler(*args, **kwargs)
if isinstance(res, MultipleResults):
if res.done:
results += res.results
else:
results += res._results
elif res is not NoResult:
results.append(res)
return MultipleResults(results, concurrent=self.concurrent, owner=self) | Execute each passed endpoint and collect the results. If a result
is anoter `MultipleResults` it will extend the results with those
contained therein. If the result is `NoResult`, skip the addition. | entailment |
def run(self, *args, **kwargs):
"""Call all the registered handlers with the arguments passed.
If this signal is a class member, call also the handlers registered
at class-definition time. If an external publish function is
supplied, call it with the provided arguments.
:returns: an instance of `~.utils.MultipleResults`
"""
if self.fvalidation is not None:
try:
if self.fvalidation(*args, **kwargs) is False:
raise ExecutionError("Validation returned ``False``")
except Exception as e:
if __debug__:
logger.exception("Validation failed")
else:
logger.error("Validation failed")
raise ExecutionError(
"The validation of the arguments specified to ``run()`` "
"has failed") from e
try:
if self.exec_wrapper is None:
return self.exec_all_endpoints(*args, **kwargs)
else:
# if a exec wrapper is defined, defer notification to it,
# a callback to execute the default notification process
result = self.exec_wrapper(self.endpoints,
self.exec_all_endpoints,
*args, **kwargs)
if inspect.isawaitable(result):
result = pull_result(result)
return result
except Exception as e:
if __debug__:
logger.exception("Error while executing handlers")
else:
logger.error("Error while executing handlers")
raise ExecutionError("Error while executing handlers") from e | Call all the registered handlers with the arguments passed.
If this signal is a class member, call also the handlers registered
at class-definition time. If an external publish function is
supplied, call it with the provided arguments.
:returns: an instance of `~.utils.MultipleResults` | entailment |
def login_defs():
"""Discover the minimum and maximum UID number."""
uid_min = None
uid_max = None
login_defs_path = '/etc/login.defs'
if os.path.exists(login_defs_path):
with io.open(text_type(login_defs_path), encoding=text_type('utf-8')) as log_defs_file:
login_data = log_defs_file.readlines()
for line in login_data:
if PY3: # pragma: no cover
line = str(line)
if PY2: # pragma: no cover
line = line.encode(text_type('utf8'))
if line[:7] == text_type('UID_MIN'):
uid_min = int(line.split()[1].strip())
if line[:7] == text_type('UID_MAX'):
uid_max = int(line.split()[1].strip())
if not uid_min: # pragma: no cover
uid_min = DEFAULT_UID_MIN
if not uid_max: # pragma: no cover
uid_max = DEFAULT_UID_MAX
return uid_min, uid_max | Discover the minimum and maximum UID number. | entailment |
def collect_static() -> bool:
"""
Runs Django ``collectstatic`` command in silent mode.
:return: always ``True``
"""
from django.core.management import execute_from_command_line
# from django.conf import settings
# if not os.listdir(settings.STATIC_ROOT):
wf('Collecting static files... ', False)
execute_from_command_line(['./manage.py', 'collectstatic', '-c', '--noinput', '-v0'])
wf('[+]\n')
return True | Runs Django ``collectstatic`` command in silent mode.
:return: always ``True`` | entailment |
def inner_parser(self) -> BaseParser:
"""
Prepares inner config parser for config stored at ``endpoint``.
:return: an instance of :class:`~django_docker_helpers.config.backends.base.BaseParser`
:raises config.exceptions.KVStorageValueIsEmpty: if specified ``endpoint`` does not contain a config
"""
if self._inner_parser is not None:
return self._inner_parser
config = self.client.get(self.endpoint)
if not config:
raise KVStorageValueIsEmpty('Key `{0}` does not exist or value is empty'.format(self.endpoint))
config = config.decode()
self._inner_parser = self.inner_parser_class(
config=io.StringIO(config),
path_separator=self.path_separator,
scope=None
)
return self._inner_parser | Prepares inner config parser for config stored at ``endpoint``.
:return: an instance of :class:`~django_docker_helpers.config.backends.base.BaseParser`
:raises config.exceptions.KVStorageValueIsEmpty: if specified ``endpoint`` does not contain a config | entailment |
def generate_add_user_command(proposed_user=None, manage_home=None):
"""Generate command to add a user.
args:
proposed_user (User): User
manage_home: bool
returns:
list: The command string split into shell-like syntax
"""
command = None
if get_platform() in ('Linux', 'OpenBSD'):
command = '{0} {1}'.format(sudo_check(), LINUX_CMD_USERADD)
if proposed_user.uid:
command = '{0} -u {1}'.format(command, proposed_user.uid)
if proposed_user.gid:
command = '{0} -g {1}'.format(command, proposed_user.gid)
if proposed_user.gecos:
command = '{0} -c \'{1}\''.format(command, proposed_user.gecos)
if manage_home:
if proposed_user.home_dir:
if os.path.exists(proposed_user.home_dir):
command = '{0} -d {1}'.format(command, proposed_user.home_dir)
elif not os.path.exists('/home/{0}'.format(proposed_user.name)):
command = '{0} -m'.format(command)
if proposed_user.shell:
command = '{0} -s {1}'.format(command, proposed_user.shell)
command = '{0} {1}'.format(command, proposed_user.name)
elif get_platform() == 'FreeBSD': # pragma: FreeBSD
command = '{0} {1} useradd'.format(sudo_check(), FREEBSD_CMD_PW)
if proposed_user.uid:
command = '{0} -u {1}'.format(command, proposed_user.uid)
if proposed_user.gid:
command = '{0} -g {1}'.format(command, proposed_user.gid)
if proposed_user.gecos:
command = '{0} -c \'{1}\''.format(command, proposed_user.gecos)
if manage_home:
if proposed_user.home_dir:
command = '{0} -d {1}'.format(command, proposed_user.home_dir)
else:
command = '{0} -m'.format(command)
if proposed_user.shell:
command = '{0} -s {1}'.format(command, proposed_user.shell)
command = '{0} -n {1}'.format(command, proposed_user.name)
if command:
return shlex.split(str(command)) | Generate command to add a user.
args:
proposed_user (User): User
manage_home: bool
returns:
list: The command string split into shell-like syntax | entailment |
def generate_modify_user_command(task=None, manage_home=None):
"""Generate command to modify existing user to become the proposed user.
args:
task (dict): A proposed user and the differences between it and the existing user
returns:
list: The command string split into shell-like syntax
"""
name = task['proposed_user'].name
comparison_result = task['user_comparison']['result']
command = None
if get_platform() in ('Linux', 'OpenBSD'):
command = '{0} {1}'.format(sudo_check(), LINUX_CMD_USERMOD)
if comparison_result.get('replacement_uid_value'):
command = '{0} -u {1}'.format(command, comparison_result.get('replacement_uid_value'))
if comparison_result.get('replacement_gid_value'):
command = '{0} -g {1}'.format(command, comparison_result.get('replacement_gid_value'))
if comparison_result.get('replacement_gecos_value'):
command = '{0} -c {1}'.format(command, comparison_result.get('replacement_gecos_value'))
if comparison_result.get('replacement_shell_value'):
command = '{0} -s {1}'.format(command, comparison_result.get('replacement_shell_value'))
if manage_home and comparison_result.get('replacement_home_dir_value'):
command = '{0} -d {1}'.format(command, comparison_result.get('replacement_home_dir_value'))
command = '{0} {1}'.format(command, name)
if get_platform() == 'FreeBSD': # pragma: FreeBSD
command = '{0} {1} usermod'.format(sudo_check(), FREEBSD_CMD_PW)
if comparison_result.get('replacement_uid_value'):
command = '{0} -u {1}'.format(command, comparison_result.get('replacement_uid_value'))
if comparison_result.get('replacement_gid_value'):
command = '{0} -g {1}'.format(command, comparison_result.get('replacement_gid_value'))
if comparison_result.get('replacement_gecos_value'):
command = '{0} -c {1}'.format(command, comparison_result.get('replacement_gecos_value'))
if comparison_result.get('replacement_shell_value'):
command = '{0} -s {1}'.format(command, comparison_result.get('replacement_shell_value'))
if manage_home and comparison_result.get('replacement_home_dir_value'):
command = '{0} -d {1}'.format(command, comparison_result.get('replacement_home_dir_value'))
command = '{0} -n {1}'.format(command, name)
if command:
return shlex.split(str(command)) | Generate command to modify existing user to become the proposed user.
args:
task (dict): A proposed user and the differences between it and the existing user
returns:
list: The command string split into shell-like syntax | entailment |
def generate_delete_user_command(username=None, manage_home=None):
"""Generate command to delete a user.
args:
username (str): user name
manage_home (bool): manage home directory
returns:
list: The user delete command string split into shell-like syntax
"""
command = None
remove_home = '-r' if manage_home else ''
if get_platform() in ('Linux', 'OpenBSD'):
command = '{0} {1} {2} {3}'.format(sudo_check(), LINUX_CMD_USERDEL, remove_home, username)
elif get_platform() == 'FreeBSD': # pragma: FreeBSD
command = '{0} {1} userdel {2} -n {3}'.format(sudo_check(), FREEBSD_CMD_PW, remove_home, username)
if command:
return shlex.split(str(command)) | Generate command to delete a user.
args:
username (str): user name
manage_home (bool): manage home directory
returns:
list: The user delete command string split into shell-like syntax | entailment |
def compare_user(passed_user=None, user_list=None):
"""Check if supplied User instance exists in supplied Users list and, if so, return the differences.
args:
passed_user (User): the user instance to check for differences
user_list (Users): the Users instance containing a list of Users instances
returns:
dict: Details of the matching user and a list of differences
"""
# Check if user exists
returned = user_list.describe_users(users_filter=dict(name=passed_user.name))
replace_keys = False
# User exists, so compare attributes
comparison_result = dict()
if passed_user.uid and (not returned[0].uid == passed_user.uid):
comparison_result['uid_action'] = 'modify'
comparison_result['current_uid_value'] = returned[0].uid
comparison_result['replacement_uid_value'] = passed_user.uid
if passed_user.gid and (not returned[0].gid == passed_user.gid):
comparison_result['gid_action'] = 'modify'
comparison_result['current_gid_value'] = returned[0].gid
comparison_result['replacement_gid_value'] = passed_user.gid
if passed_user.gecos and (not returned[0].gecos == passed_user.gecos):
comparison_result['gecos_action'] = 'modify'
comparison_result['current_gecos_value'] = returned[0].gecos
comparison_result['replacement_gecos_value'] = passed_user.gecos
if passed_user.home_dir and (not returned[0].home_dir == passed_user.home_dir):
comparison_result['home_dir_action'] = 'modify'
comparison_result['current_home_dir_value'] = returned[0].home_dir
comparison_result['replacement_home_dir_value'] = passed_user.home_dir
# (Re)set keys if home dir changed
replace_keys = True
if passed_user.shell and (not returned[0].shell == passed_user.shell):
comparison_result['shell_action'] = 'modify'
comparison_result['current_shell_value'] = returned[0].shell
comparison_result['replacement_shell_value'] = passed_user.shell
if passed_user.sudoers_entry and (not returned[0].sudoers_entry == passed_user.sudoers_entry):
comparison_result['sudoers_entry_action'] = 'modify'
comparison_result['current_sudoers_entry'] = returned[0].sudoers_entry
comparison_result['replacement_sudoers_entry'] = passed_user.sudoers_entry
# if passed_user.public_keys and (not returned[0].public_keys == passed_user.public_keys):
existing_keys = returned[0].public_keys
passed_keys = passed_user.public_keys
# Check if existing and passed keys exist, and if so, compare
if all((existing_keys, passed_keys)) and len(existing_keys) == len(passed_user.public_keys):
# Compare each key, and if any differences, replace
existing = set(key.raw for key in existing_keys)
replacement = set(key.raw for key in passed_keys)
if set.difference(existing, replacement):
replace_keys = True
# If not existing keys but keys passed set, then
elif passed_keys and not existing_keys:
replace_keys = True
if replace_keys:
comparison_result['public_keys_action'] = 'modify'
comparison_result['current_public_keys_value'] = existing_keys
comparison_result['replacement_public_keys_value'] = passed_keys
return dict(state='existing', result=comparison_result, existing_user=returned) | Check if supplied User instance exists in supplied Users list and, if so, return the differences.
args:
passed_user (User): the user instance to check for differences
user_list (Users): the Users instance containing a list of Users instances
returns:
dict: Details of the matching user and a list of differences | entailment |
def gecos(self):
"""Force double quoted gecos.
returns:
str: The double quoted gecos.
"""
if not self._gecos:
return None
if self._gecos.startswith(text_type('\'')) and self._gecos.endswith(text_type('\'')):
self._gecos = '\"{0}\"'.format(self._gecos[1:-1])
return self._gecos
elif self._gecos.startswith(text_type('\"')) and self._gecos.endswith(text_type('\"')):
return self._gecos
else:
return '\"{0}\"'.format(self._gecos) | Force double quoted gecos.
returns:
str: The double quoted gecos. | entailment |
def to_dict(self):
""" Return the user as a dict. """
public_keys = [public_key.b64encoded for public_key in self.public_keys]
return dict(name=self.name, passwd=self.passwd, uid=self.uid, gid=self.gid, gecos=self.gecos,
home_dir=self.home_dir, shell=self.shell, public_keys=public_keys) | Return the user as a dict. | entailment |
def insert(self, index, value):
"""Insert an instance of User into the collection."""
self.check(value)
self._user_list.insert(index, value) | Insert an instance of User into the collection. | entailment |
def remove(self, username=None):
"""Remove User instance based on supplied user name."""
self._user_list = [user for user in self._user_list if user.name != username] | Remove User instance based on supplied user name. | entailment |
def describe_users(self, users_filter=None):
"""Return a list of users matching a filter (if provided)."""
user_list = Users(oktypes=User)
for user in self._user_list:
if users_filter and (users_filter.get('name') == user.name or users_filter.get('uid') == user.uid):
user_list.append(user)
return user_list | Return a list of users matching a filter (if provided). | entailment |
def from_yaml(cls, file_path=None):
"""Create collection from a YAML file."""
try:
import yaml
except ImportError: # pragma: no cover
yaml = None
if not yaml:
import sys
sys.exit('PyYAML is not installed, but is required in order to parse YAML files.'
'\nTo install, run:\n$ pip install PyYAML\nor visit'
' http://pyyaml.org/wiki/PyYAML for instructions.')
with io.open(file_path, encoding=text_type('utf-8')) as stream:
users_yaml = yaml.safe_load(stream)
if isinstance(users_yaml, dict):
return cls.construct_user_list(raw_users=users_yaml.get('users'))
else:
raise ValueError('No YAML object could be decoded') | Create collection from a YAML file. | entailment |
def from_json(cls, file_path=None):
"""Create collection from a JSON file."""
with io.open(file_path, encoding=text_type('utf-8')) as stream:
try:
users_json = json.load(stream)
except ValueError:
raise ValueError('No JSON object could be decoded')
return cls.construct_user_list(raw_users=users_json.get('users')) | Create collection from a JSON file. | entailment |
def from_passwd(uid_min=None, uid_max=None):
"""Create collection from locally discovered data, e.g. /etc/passwd."""
import pwd
users = Users(oktypes=User)
passwd_list = pwd.getpwall()
if not uid_min:
uid_min = UID_MIN
if not uid_max:
uid_max = UID_MAX
sudoers_entries = read_sudoers()
for pwd_entry in passwd_list:
if uid_min <= pwd_entry.pw_uid <= uid_max:
user = User(name=text_type(pwd_entry.pw_name),
passwd=text_type(pwd_entry.pw_passwd),
uid=pwd_entry.pw_uid,
gid=pwd_entry.pw_gid,
gecos=text_type(pwd_entry.pw_gecos),
home_dir=text_type(pwd_entry.pw_dir),
shell=text_type(pwd_entry.pw_shell),
public_keys=read_authorized_keys(username=pwd_entry.pw_name),
sudoers_entry=get_sudoers_entry(username=pwd_entry.pw_name,
sudoers_entries=sudoers_entries))
users.append(user)
return users | Create collection from locally discovered data, e.g. /etc/passwd. | entailment |
def construct_user_list(raw_users=None):
"""Construct a list of User objects from a list of dicts."""
users = Users(oktypes=User)
for user_dict in raw_users:
public_keys = None
if user_dict.get('public_keys'):
public_keys = [PublicKey(b64encoded=x, raw=None)
for x in user_dict.get('public_keys')]
users.append(User(name=user_dict.get('name'),
passwd=user_dict.get('passwd'),
uid=user_dict.get('uid'),
gid=user_dict.get('gid'),
home_dir=user_dict.get('home_dir'),
gecos=user_dict.get('gecos'),
shell=user_dict.get('shell'),
public_keys=public_keys,
sudoers_entry=user_dict.get('sudoers_entry')))
return users | Construct a list of User objects from a list of dicts. | entailment |
def to_dict(self):
""" Return a dict of the users. """
users = dict(users=list())
for user in self:
users['users'].append(user.to_dict())
return users | Return a dict of the users. | entailment |
def export(self, file_path=None, export_format=None):
""" Write the users to a file. """
with io.open(file_path, mode='w', encoding="utf-8") as export_file:
if export_format == 'yaml':
import yaml
yaml.safe_dump(self.to_dict(), export_file, default_flow_style=False)
elif export_format == 'json':
export_file.write(text_type(json.dumps(self.to_dict(), ensure_ascii=False)))
return True | Write the users to a file. | entailment |
Subsets and Splits