text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Put quotes around all paths and join them with space in-between.
<END_TASK>
<USER_TASK:>
Description:
def wrap_paths(paths):
# type: (list[str]) -> str
""" Put quotes around all paths and join them with space in-between. """ |
if isinstance(paths, string_types):
raise ValueError(
"paths cannot be a string. "
"Use array with one element instead."
)
return ' '.join('"' + path + '"' for path in paths) |
<SYSTEM_TASK:>
Run pep8 and pylint on all project files.
<END_TASK>
<USER_TASK:>
Description:
def lint_cli(ctx, exclude, skip_untracked, commit_only):
# type: (click.Context, List[str], bool, bool) -> None
""" Run pep8 and pylint on all project files.
You can configure the linting paths using the lint.paths config variable.
This should be a list of paths that will be linted. If a path to a directory
is given, all files in that directory and it's subdirectories will be
used.
The pep8 and pylint config paths are by default stored in ops/tools/pep8.ini
and ops/tools/pylint.ini. You can customise those paths in your config with
lint.pep8_cfg and lint.pylint_cfg variables.
**Config Example**::
\b
lint:
pylint_cfg: 'ops/tools/pylint.ini'
pep8_cfg: 'ops/tools/pep8.ini'
paths:
- 'src/mypkg'
**Examples**::
\b
$ peltak lint # Run linter in default mode, skip untracked
$ peltak lint --commit # Lint only files staged for commit
$ peltak lint --all # Lint all files, including untracked.
$ peltak lint --pretend # Print the list of files to lint
$ peltak lint -e "*.tox*" # Don't lint files inside .tox directory
""" |
if ctx.invoked_subcommand:
return
from peltak.logic import lint
lint.lint(exclude, skip_untracked, commit_only) |
<SYSTEM_TASK:>
Gets the ip addresses from ifconfig
<END_TASK>
<USER_TASK:>
Description:
def get_ip_addresses():
"""Gets the ip addresses from ifconfig
:return: (dict) of devices and aliases with the IPv4 address
""" |
log = logging.getLogger(mod_logger + '.get_ip_addresses')
command = ['/sbin/ifconfig']
try:
result = run_command(command)
except CommandError:
raise
ifconfig = result['output'].strip()
# Scan the ifconfig output for IPv4 addresses
devices = {}
parts = ifconfig.split()
device = None
for part in parts:
if device is None:
if 'eth' in part or 'eno' in part:
device = part
else:
test = part.split(':', 1)
if len(test) == 2:
if test[0] == 'addr':
ip_address = test[1]
log.info('Found IP address %s on device %s', ip_address,
device)
devices[device] = ip_address
device = None
return devices |
<SYSTEM_TASK:>
Returns the Mac Address given a device index
<END_TASK>
<USER_TASK:>
Description:
def get_mac_address(device_index=0):
"""Returns the Mac Address given a device index
:param device_index: (int) Device index
:return: (str) Mac address or None
""" |
log = logging.getLogger(mod_logger + '.get_mac_address')
command = ['ip', 'addr', 'show', 'eth{d}'.format(d=device_index)]
log.info('Attempting to find a mac address at device index: {d}'.format(d=device_index))
try:
result = run_command(command)
except CommandError:
_, ex, trace = sys.exc_info()
log.error('There was a problem running command, unable to determine mac address: {c}\n{e}'.format(
c=command, e=str(ex)))
return
ipaddr = result['output'].split()
get_next = False
mac_address = None
for part in ipaddr:
if get_next:
mac_address = part
log.info('Found mac address: {m}'.format(m=mac_address))
break
if 'link' in part:
get_next = True
if not mac_address:
log.info('mac address not found for device: {d}'.format(d=device_index))
return mac_address |
<SYSTEM_TASK:>
Emulates bash chmod command
<END_TASK>
<USER_TASK:>
Description:
def chmod(path, mode, recursive=False):
"""Emulates bash chmod command
This method sets the file permissions to the specified mode.
:param path: (str) Full path to the file or directory
:param mode: (str) Mode to be set (e.g. 0755)
:param recursive: (bool) Set True to make a recursive call
:return: int exit code of the chmod command
:raises CommandError
""" |
log = logging.getLogger(mod_logger + '.chmod')
# Validate args
if not isinstance(path, basestring):
msg = 'path argument is not a string'
log.error(msg)
raise CommandError(msg)
if not isinstance(mode, basestring):
msg = 'mode argument is not a string'
log.error(msg)
raise CommandError(msg)
# Ensure the item exists
if not os.path.exists(path):
msg = 'Item not found: {p}'.format(p=path)
log.error(msg)
raise CommandError(msg)
# Create the chmod command
command = ['chmod']
# Make it recursive if specified
if recursive:
command.append('-R')
command.append(mode)
command.append(path)
try:
result = run_command(command)
except CommandError:
raise
log.info('chmod command exited with code: {c}'.format(c=result['code']))
return result['code'] |
<SYSTEM_TASK:>
Emulates 'source' command in bash
<END_TASK>
<USER_TASK:>
Description:
def source(script):
"""Emulates 'source' command in bash
:param script: (str) Full path to the script to source
:return: Updated environment
:raises CommandError
""" |
log = logging.getLogger(mod_logger + '.source')
if not isinstance(script, basestring):
msg = 'script argument must be a string'
log.error(msg)
raise CommandError(msg)
log.info('Attempting to source script: %s', script)
try:
pipe = subprocess.Popen(". %s; env" % script, stdout=subprocess.PIPE, shell=True)
data = pipe.communicate()[0]
except ValueError:
_, ex, trace = sys.exc_info()
msg = 'Invalid argument:\n{e}'.format(e=str(ex))
log.error(msg)
raise CommandError, msg, trace
except OSError:
_, ex, trace = sys.exc_info()
msg = 'File not found: {s}\n{e}'.format(s=script, e=str(ex))
raise CommandError, msg, trace
except subprocess.CalledProcessError:
_, ex, trace = sys.exc_info()
msg = 'Script {s} returned a non-zero exit code: {c}\n{e}'.format(
s=script, e=str(ex), c=ex.returncode)
log.error(msg)
raise CommandError, msg, trace
env = {}
log.debug('Adding environment variables from data: {d}'.format(d=data))
for line in data.splitlines():
entry = line.split("=", 1)
if len(entry) != 2:
log.warn('This property is not in prop=value format, and will be skipped: {p}'.format(p=line))
continue
try:
env[entry[0]] = entry[1]
except IndexError:
_, ex, trace = sys.exc_info()
log.warn('IndexError: There was a problem setting environment variables from line: {p}\n{e}'.format(
p=line, e=str(ex)))
continue
else:
log.debug('Added environment variable {p}={v}'.format(p=entry[0], v=entry[1]))
os.environ.update(env)
return env |
<SYSTEM_TASK:>
Run a yum update on this system
<END_TASK>
<USER_TASK:>
Description:
def yum_update(downloadonly=False, dest_dir='/tmp'):
"""Run a yum update on this system
This public method runs the yum -y update command to update
packages from yum. If downloadonly is set to true, the yum
updates will be downloaded to the specified dest_dir.
:param dest_dir: (str) Full path to the download directory
:param downloadonly: Boolean
:return: int exit code from the yum command
:raises CommandError
""" |
log = logging.getLogger(mod_logger + '.yum_update')
# Type checks on the args
if not isinstance(dest_dir, basestring):
msg = 'dest_dir argument must be a string'
log.error(msg)
raise CommandError(msg)
if not isinstance(downloadonly, bool):
msg = 'downloadonly argument must be a bool'
log.error(msg)
raise CommandError(msg)
# If downloadonly was True, download packages to dest_dir
if downloadonly:
# Create the destination directory if it does not exist
log.info('Creating directory: %s', dest_dir)
try:
mkdir_p(dest_dir)
except OSError:
_, ex, trace = sys.exc_info()
msg = 'Unable to create destination directory: {d}'.format(
d=dest_dir)
log.error(msg)
raise CommandError, msg, trace
# Build command string with downloadonly options specified
command = ['yum', '-y', 'update', '--downloadonly',
'--downloaddir={d}'.format(d=dest_dir)]
log.info('Downloading updates from yum to %s...', dest_dir)
else:
# Build command string to update directly
command = ['yum', '-y', 'update']
log.info('Installing yum updates from RHN...')
# Run the command
try:
result = run_command(command)
except CommandError:
raise
log.info('Yum update completed and exit with code: {c}'.format(
c=result['code']))
return result['code'] |
<SYSTEM_TASK:>
This method installs all RPM files in a specific dir
<END_TASK>
<USER_TASK:>
Description:
def rpm_install(install_dir):
"""This method installs all RPM files in a specific dir
:param install_dir: (str) Full path to the directory
:return int exit code form the rpm command
:raises CommandError
""" |
log = logging.getLogger(mod_logger + '.rpm_install')
# Type checks on the args
if not isinstance(install_dir, basestring):
msg = 'install_dir argument must be a string'
log.error(msg)
raise CommandError(msg)
# Ensure the install_dir directory exists
if not os.path.isdir(install_dir):
msg = 'Directory not found: {f}'.format(f=install_dir)
log.error(msg)
raise CommandError(msg)
# Create the command
command = ['rpm', '-iv', '--force', '{d}/*.rpm'.format(d=install_dir)]
# Run the rpm command
try:
result = run_command(command)
except CommandError:
raise
log.info('RPM completed and exit with code: {c}'.format(
c=result['code']))
return result['code'] |
<SYSTEM_TASK:>
Python impl of the bash sed command
<END_TASK>
<USER_TASK:>
Description:
def sed(file_path, pattern, replace_str, g=0):
"""Python impl of the bash sed command
This method emulates the functionality of a bash sed command.
:param file_path: (str) Full path to the file to be edited
:param pattern: (str) Search pattern to replace as a regex
:param replace_str: (str) String to replace the pattern
:param g: (int) Whether to globally replace (0) or replace 1
instance (equivalent to the 'g' option in bash sed
:return: None
:raises CommandError
""" |
log = logging.getLogger(mod_logger + '.sed')
# Type checks on the args
if not isinstance(file_path, basestring):
msg = 'file_path argument must be a string'
log.error(msg)
raise CommandError(msg)
if not isinstance(pattern, basestring):
msg = 'pattern argument must be a string'
log.error(msg)
raise CommandError(msg)
if not isinstance(replace_str, basestring):
msg = 'replace_str argument must be a string'
log.error(msg)
raise CommandError(msg)
# Ensure the file_path file exists
if not os.path.isfile(file_path):
msg = 'File not found: {f}'.format(f=file_path)
log.error(msg)
raise CommandError(msg)
# Search for a matching pattern and replace matching patterns
log.info('Updating file: %s...', file_path)
for line in fileinput.input(file_path, inplace=True):
if re.search(pattern, line):
log.info('Updating line: %s', line)
new_line = re.sub(pattern, replace_str, line, count=g)
log.info('Replacing with line: %s', new_line)
sys.stdout.write(new_line)
else:
sys.stdout.write(line) |
<SYSTEM_TASK:>
Creates a zip file of a directory tree
<END_TASK>
<USER_TASK:>
Description:
def zip_dir(dir_path, zip_file):
"""Creates a zip file of a directory tree
This method creates a zip archive using the directory tree dir_path
and adds to zip_file output.
:param dir_path: (str) Full path to directory to be zipped
:param zip_file: (str) Full path to the output zip file
:return: None
:raises CommandError
""" |
log = logging.getLogger(mod_logger + '.zip_dir')
# Validate args
if not isinstance(dir_path, basestring):
msg = 'dir_path argument must be a string'
log.error(msg)
raise CommandError(msg)
if not isinstance(zip_file, basestring):
msg = 'zip_file argument must be a string'
log.error(msg)
raise CommandError(msg)
# Ensure the dir_path file exists
if not os.path.isdir(dir_path):
msg = 'Directory not found: {f}'.format(f=dir_path)
log.error(msg)
raise CommandError(msg)
try:
with contextlib.closing(zipfile.ZipFile(zip_file, 'w', allowZip64=True)) as zip_w:
for root, dirs, files in os.walk(dir_path):
for f in files:
log.debug('Adding file to zip: %s', f)
strip = len(dir_path) - len(os.path.split(dir_path)[-1])
file_name = os.path.join(root, f)
archive_name = os.path.join(root[strip:], f)
zip_w.write(file_name, archive_name)
except Exception:
_, ex, trace = sys.exc_info()
msg = 'Unable to create zip file: {f}\n{e}'.format(
f=zip_file, e=str(ex))
log.error(msg)
raise CommandError, msg, trace
log.info('Successfully created zip file: %s', zip_file) |
<SYSTEM_TASK:>
This method return the IP address
<END_TASK>
<USER_TASK:>
Description:
def get_ip(interface=0):
"""This method return the IP address
:param interface: (int) Interface number (e.g. 0 for eth0)
:return: (str) IP address or None
""" |
log = logging.getLogger(mod_logger + '.get_ip')
log.info('Getting the IP address for this system...')
ip_address = None
try:
log.info('Attempting to get IP address by hostname...')
ip_address = socket.gethostbyname(socket.gethostname())
except socket.error:
log.info('Unable to get IP address for this system using hostname, '
'using a bash command...')
command = 'ip addr show eth%s | grep inet | grep -v inet6 | ' \
'awk \'{ print $2 }\' | cut -d/ -f1 ' \
'>> /root/ip' % interface
try:
log.info('Running command: %s', command)
subprocess.check_call(command, shell=True)
except(OSError, subprocess.CalledProcessError):
_, ex, trace = sys.exc_info()
msg = 'Unable to get the IP address of this system\n{e}'.format(
e=str(ex))
log.error(msg)
raise CommandError, msg, trace
else:
ip_file = '/root/ip'
log.info('Command executed successfully, pulling IP address from '
'file: %s', ip_file)
if os.path.isfile(ip_file):
with open(ip_file, 'r') as f:
for line in f:
ip_address = line.strip()
log.info('Found IP address from file: %s', ip_address)
else:
msg = 'File not found: {f}'.format(f=ip_file)
log.error(msg)
raise CommandError(msg)
log.info('Returning IP address: %s', ip_address)
return ip_address |
<SYSTEM_TASK:>
Sets this hosts hostname
<END_TASK>
<USER_TASK:>
Description:
def set_hostname(new_hostname, pretty_hostname=None):
"""Sets this hosts hostname
This method updates /etc/sysconfig/network and calls the hostname
command to set a hostname on a Linux system.
:param new_hostname: (str) New hostname
:param pretty_hostname: (str) new pretty hostname, set to the same as
new_hostname if not provided
:return (int) exit code of the hostname command
:raises CommandError
""" |
log = logging.getLogger(mod_logger + '.set_hostname')
# Ensure the hostname is a str
if not isinstance(new_hostname, basestring):
msg = 'new_hostname argument must be a string'
raise CommandError(msg)
# Update the network config file
network_file = '/etc/sysconfig/network'
if os.path.isfile(network_file):
log.info('Updating {f} with the new hostname: {h}...'.format(f=network_file, h=new_hostname))
try:
sed(network_file, '^HOSTNAME=.*', 'HOSTNAME=' + new_hostname)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'Unable to update [{f}], produced output:\n{e}'.format(f=network_file, e=str(ex))
raise CommandError, msg, trace
else:
log.info('Network file not found, will not be updated: {f}'.format(f=network_file))
# Update the hostname
if is_systemd():
hostname_file = '/etc/hostname'
pretty_hostname_file = '/etc/machine-info'
log.info('This is systemd, updating files: {h} and {p}'.format(h=hostname_file, p=pretty_hostname_file))
# Update the hostname file
log.info('Updating hostname file: {h}...'.format(h=hostname_file))
if os.path.isfile(hostname_file):
os.remove(hostname_file)
with open(hostname_file, 'w') as f:
f.write(new_hostname)
log.info('Updating pretty hostname file: {p}'.format(p=pretty_hostname_file))
# Use the same thing if pretty hostname is not provided
if pretty_hostname is None:
log.info('Pretty hostname not provided, using: {p}'.format(p=pretty_hostname))
pretty_hostname = new_hostname
# Update the pretty hostname file
if os.path.isfile(pretty_hostname_file):
os.remove(pretty_hostname_file)
with open(pretty_hostname_file, 'w') as f:
f.write('PRETTY_HOSTNAME={p}'.format(p=pretty_hostname))
return 0
else:
command = ['/bin/hostname', new_hostname]
# Run the hostname command
log.info('Running hostname command to set the hostname: [{c}]'.format(c=' '.join(command)))
try:
result = run_command(command)
except CommandError:
raise
log.info('Hostname command completed with code: {c} and output:\n{o}'.format(
c=result['code'], o=result['output']))
return result['code'] |
<SYSTEM_TASK:>
Copies an existing ifcfg network script to another
<END_TASK>
<USER_TASK:>
Description:
def copy_ifcfg_file(source_interface, dest_interface):
"""Copies an existing ifcfg network script to another
:param source_interface: String (e.g. 1)
:param dest_interface: String (e.g. 0:0)
:return: None
:raises TypeError, OSError
""" |
log = logging.getLogger(mod_logger + '.copy_ifcfg_file')
# Validate args
if not isinstance(source_interface, basestring):
msg = 'source_interface argument must be a string'
log.error(msg)
raise TypeError(msg)
if not isinstance(dest_interface, basestring):
msg = 'dest_interface argument must be a string'
log.error(msg)
raise TypeError(msg)
network_script = '/etc/sysconfig/network-scripts/ifcfg-eth'
source_file = network_script + source_interface
dest_file = network_script + dest_interface
command = ['cp', '-f', source_file, dest_file]
try:
result = run_command(command)
code = result['code']
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'Unable to copy the ifcfg file from interface {s} to interface {d}\n{e}'.format(
s=source_interface, d=dest_interface, e=str(ex))
raise OSError, msg, trace
log.info('Copy command exited with code: {c}'.format(c=code))
if code != 0:
msg = 'There was a problem copying file {s} file to {d}'.format(s=source, d=dest_file)
log.error(msg)
raise OSError(msg)
# Updating the destination network script DEVICE property
try:
sed(file_path=dest_file, pattern='^DEVICE=.*',
replace_str='DEVICE="eth{i}"'.format(i=dest_interface))
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'Unable to update DEVICE in file: {d}\n{e}'.format(
d=dest_file, e=str(ex))
log.error(msg)
raise CommandError, msg, trace
log.info('Successfully created file: {d}'.format(d=dest_file))
log.info('Restarting networking in 10 seconds to ensure the changes take effect...')
time.sleep(10)
retry_time = 10
max_retries = 10
for i in range(1, max_retries+2):
if i > max_retries:
msg = 'Unable to successfully start the networking service after {m} attempts'.format(m=max_retries)
log.error(msg)
raise OSError(msg)
log.info('Attempting to restart the networking service, attempt #{i} of {m}'.format(i=i, m=max_retries))
try:
service_network_restart()
except CommandError:
_, ex, trace = sys.exc_info()
log.warn('Attempted unsuccessfully to restart networking on attempt #{i} of {m}, trying again in {t} '
'seconds\n{e}'.format(i=i, m=max_retries, t=retry_time, e=str(ex)))
time.sleep(retry_time)
else:
log.info('Successfully restarted networking')
break
log.info('Successfully configured interface: {d}'.format(d=dest_interface)) |
<SYSTEM_TASK:>
Removes the ifcfg file at the specified device index
<END_TASK>
<USER_TASK:>
Description:
def remove_ifcfg_file(device_index='0'):
"""Removes the ifcfg file at the specified device index
and restarts the network service
:param device_index: (int) Device Index
:return: None
:raises CommandError
""" |
log = logging.getLogger(mod_logger + '.remove_ifcfg_file')
if not isinstance(device_index, basestring):
msg = 'device_index argument must be a string'
log.error(msg)
raise CommandError(msg)
network_script = '/etc/sysconfig/network-scripts/ifcfg-eth{d}'.format(d=device_index)
if not os.path.isfile(network_script):
log.info('File does not exist, nothing will be removed: {n}'.format(n=network_script))
return
# Remove the network config script
log.info('Attempting to remove file: {n}'.format(n=network_script))
try:
os.remove(network_script)
except(IOError, OSError):
_, ex, trace = sys.exc_info()
msg = 'There was a problem removing network script file: {n}\n{e}'.format(n=network_script, e=str(ex))
log.error(msg)
raise OSError, msg, trace
else:
log.info('Successfully removed file: {n}'.format(n=network_script))
# Restart the network service
log.info('Restarting the network service...')
try:
service_network_restart()
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem restarting the network service\n{e}'.format(e=str(ex))
log.error(msg)
raise OSError, msg, trace
else:
log.info('Successfully restarted the network service') |
<SYSTEM_TASK:>
Adds a NAT rule to iptables
<END_TASK>
<USER_TASK:>
Description:
def add_nat_rule(port, source_interface, dest_interface):
"""Adds a NAT rule to iptables
:param port: String or int port number
:param source_interface: String (e.g. 1)
:param dest_interface: String (e.g. 0:0)
:return: None
:raises: TypeError, OSError
""" |
log = logging.getLogger(mod_logger + '.add_nat_rule')
# Validate args
if not isinstance(source_interface, basestring):
msg = 'source_interface argument must be a string'
log.error(msg)
raise TypeError(msg)
if not isinstance(dest_interface, basestring):
msg = 'dest_interface argument must be a string'
log.error(msg)
raise TypeError(msg)
ip_addresses = ip_addr()
destination_ip = ip_addresses['eth{i}'.format(i=dest_interface)]
log.info('Using destination IP address: {d}'.format(d=destination_ip))
command = ['iptables', '-t', 'nat', '-A', 'PREROUTING', '-i',
'eth{s}'.format(s=source_interface), '-p', 'tcp',
'--dport', str(port), '-j', 'DNAT', '--to',
'{d}:{p}'.format(p=port, d=destination_ip)]
log.info('Running command: {c}'.format(c=command))
try:
subprocess.check_call(command)
except OSError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem running command: {c}\n{e}'.format(c=command, e=str(ex))
log.error(msg)
raise OSError, msg, trace
except subprocess.CalledProcessError:
_, ex, trace = sys.exc_info()
msg = 'Command returned a non-zero exit code: {c}\n{e}'.format(c=command, e=str(ex))
log.error(msg)
raise OSError, msg, trace
else:
log.info('Successfully ran command: {c}'.format(c=command))
# Save the iptables with the new NAT rule
try:
save_iptables()
except OSError:
_, ex, trace = sys.exc_info()
msg = 'OSError: There was a problem saving iptables rules\n{e}'.format(e=str(ex))
raise OSError, msg, trace
log.info('Successfully saved iptables rules with the NAT rule') |
<SYSTEM_TASK:>
Saves iptables rules to the provided rules file
<END_TASK>
<USER_TASK:>
Description:
def save_iptables(rules_file='/etc/sysconfig/iptables'):
"""Saves iptables rules to the provided rules file
:return: None
:raises OSError
""" |
log = logging.getLogger(mod_logger + '.save_iptables')
# Run iptables-save to get the output
command = ['iptables-save']
log.debug('Running command: iptables-save')
try:
iptables_out = run_command(command, timeout_sec=20)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem running iptables command: {c}\n{e}'.format(c=' '.join(command), e=str(ex))
raise OSError, msg, trace
# Error if iptables-save did not exit clean
if int(iptables_out['code']) != 0:
raise OSError('Command [{g}] exited with code [{c}] and output:\n{o}'.format(
g=' '.join(command), c=iptables_out['code'], o=iptables_out['output']))
# Back up the existing rules file if it exists
if os.path.isfile(rules_file):
time_now = datetime.now().strftime('%Y%m%d-%H%M%S')
backup_file = '{f}.{d}'.format(f=rules_file, d=time_now)
log.debug('Creating backup file: {f}'.format(f=backup_file))
shutil.copy2(rules_file, backup_file)
# Save the output to the rules file
log.debug('Creating file: {f}'.format(f=rules_file))
with open(rules_file, 'w') as f:
f.write(iptables_out['output']) |
<SYSTEM_TASK:>
Queries a remote host over SSH to check for existence
<END_TASK>
<USER_TASK:>
Description:
def check_remote_host_marker_file(host, file_path):
"""Queries a remote host over SSH to check for existence
of a marker file
:param host: (str) host to query
:param file_path: (str) path to the marker file
:return: (bool) True if the marker file exists
:raises: TypeError, CommandError
""" |
log = logging.getLogger(mod_logger + '.check_remote_host_marker_file')
if not isinstance(host, basestring):
msg = 'host argument must be a string'
log.error(msg)
raise TypeError(msg)
if not isinstance(file_path, basestring):
msg = 'file_path argument must be a string'
log.error(msg)
raise TypeError(msg)
log.debug('Checking host {h} for marker file: {f}...'.format(h=host, f=file_path))
command = ['ssh', '{h}'.format(h=host), 'if [ -f {f} ] ; then exit 0 ; else exit 1 ; fi'.format(f=file_path)]
try:
result = run_command(command, timeout_sec=5.0)
code = result['code']
output = result['output']
except CommandError:
raise
if code == 0:
log.debug('Marker file <{f}> was found on host {h}'.format(f=file_path, h=host))
return True
elif code == 1 and output == '':
log.debug('Marker file <{f}> was not found on host {h}'.format(f=file_path, h=host))
return False
else:
msg = 'There was a problem checking the remote host {h} over SSH for marker file {f}, ' \
'command returned code {c} and produced output: {o}'.format(
h=host, f=file_path, c=code, o=output)
log.debug(msg)
raise CommandError(msg) |
<SYSTEM_TASK:>
Restores and saves firewall rules from the firewall_rules file
<END_TASK>
<USER_TASK:>
Description:
def restore_iptables(firewall_rules):
"""Restores and saves firewall rules from the firewall_rules file
:param firewall_rules: (str) Full path to the firewall rules file
:return: None
:raises OSError
""" |
log = logging.getLogger(mod_logger + '.restore_iptables')
log.info('Restoring firewall rules from file: {f}'.format(f=firewall_rules))
# Ensure the firewall rules file exists
if not os.path.isfile(firewall_rules):
msg = 'Unable to restore iptables, file not found: {f}'.format(f=firewall_rules)
log.error(msg)
raise OSError(msg)
# Restore the firewall rules
log.info('Restoring iptables from file: {f}'.format(f=firewall_rules))
command = ['/sbin/iptables-restore', firewall_rules]
try:
result = run_command(command)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'Unable to restore firewall rules from file: {f}\n{e}'.format(f=firewall_rules, e=str(ex))
log.error(msg)
raise OSError(msg)
log.info('Restoring iptables produced output:\n{o}'.format(o=result['output']))
# Save iptables
log.info('Saving iptables...')
command = ['/etc/init.d/iptables', 'save']
try:
result = run_command(command)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'Unable to save firewall rules\n{e}'.format(e=str(ex))
log.error(msg)
raise OSError(msg)
log.info('Saving iptables produced output:\n{o}'.format(o=result['output'])) |
<SYSTEM_TASK:>
Determines whether this system uses systemd
<END_TASK>
<USER_TASK:>
Description:
def is_systemd():
"""Determines whether this system uses systemd
:return: (bool) True if this distro has systemd
""" |
os_family = platform.system()
if os_family != 'Linux':
raise OSError('This method is only supported on Linux, found OS: {o}'.format(o=os_family))
linux_distro, linux_version, distro_name = platform.linux_distribution()
# Determine when to use systemd
systemd = False
if 'ubuntu' in linux_distro.lower() and '16' in linux_version:
systemd = True
elif 'red' in linux_distro.lower() and '7' in linux_version:
systemd = True
elif 'cent' in linux_distro.lower() and '7' in linux_version:
systemd = True
return systemd |
<SYSTEM_TASK:>
Reboots the system after a specified wait time. Must be run as root
<END_TASK>
<USER_TASK:>
Description:
def system_reboot(wait_time_sec=20):
"""Reboots the system after a specified wait time. Must be run as root
:param wait_time_sec: (int) number of sec to wait before performing the reboot
:return: None
:raises: SystemRebootError, SystemRebootTimeoutError
""" |
log = logging.getLogger(mod_logger + '.system_reboot')
try:
wait_time_sec = int(wait_time_sec)
except ValueError:
raise CommandError('wait_time_sec must be an int, or a string convertible to an int')
log.info('Waiting {t} seconds before reboot...'.format(t=str(wait_time_sec)))
time.sleep(wait_time_sec)
command = ['shutdown', '-r', 'now']
log.info('Shutting down with command: [{c}]'.format(c=' '.join(command)))
time.sleep(2)
log.info('Shutting down...')
try:
result = run_command(command=command, timeout_sec=60)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem running shutdown command: [{c}]\n{e}'.format(c=' '.join(command), e=str(ex))
raise SystemRebootError, msg, trace
if result['code'] != 0:
msg = 'Shutdown command exited with a non-zero code: [{c}], and produced output:\n{o}'.format(
c=str(result['code']), o=result['output'])
raise SystemRebootError(msg)
log.info('Waiting 60 seconds to ensure the reboot completes...')
time.sleep(60)
msg = 'Reboot has not completed after 60 seconds'
log.error(msg)
raise SystemRebootTimeoutError(msg) |
<SYSTEM_TASK:>
Adds padding to get the correct block sizes for AES encryption
<END_TASK>
<USER_TASK:>
Description:
def aes_pad(s, block_size=32, padding='{'):
""" Adds padding to get the correct block sizes for AES encryption
@s: #str being AES encrypted or decrypted
@block_size: the AES block size
@padding: character to pad with
-> padded #str
..
from vital.security import aes_pad
aes_pad("swing")
# -> 'swing{{{{{{{{{{{{{{{{{{{{{{{{{{{'
..
""" |
return s + (block_size - len(s) % block_size) * padding |
<SYSTEM_TASK:>
Converts integers to a sequence of strings, and reverse.
<END_TASK>
<USER_TASK:>
Description:
def strkey(val, chaffify=1, keyspace=string.ascii_letters + string.digits):
""" Converts integers to a sequence of strings, and reverse.
This is not intended to obfuscate numbers in any kind of
cryptographically secure way, in fact it's the opposite. It's
for predictable, reversable, obfuscation. It can also be used to
transform a random bit integer to a string of the same bit
length.
@val: #int or #str
@chaffify: #int multiple to avoid 0=a, 1=b, 2=c, ... obfuscates the
ordering
@keyspace: #str allowed output chars
-> #str if @val is #int, #int if @val is #str
..
from vital.security import strkey
strkey(0, chaffify=1)
# -> b
strkey(0, chaffify=4)
# -> e
strkey(90000000000050500502200302035023)
# -> 'f3yMpJQUazIZHp1UO7k'
strkey('f3yMpJQUazIZHp1UO7k')
# -> 90000000000050500502200302035023
strkey(2000000, chaffify=200000000000)
# -> 'DIaqtyo2sC'
..
""" |
chaffify = chaffify or 1
keylen = len(keyspace)
try:
# INT TO STRING
if val < 0:
raise ValueError("Input value must be greater than -1.")
# chaffify the value
val = val * chaffify
if val == 0:
return keyspace[0]
# output the new string value
out = []
out_add = out.append
while val > 0:
val, digit = divmod(val, keylen)
out_add(keyspace[digit])
return "".join(out)[::-1]
except TypeError:
# STRING TO INT
out = 0
val = str(val)
find = str.find
for c in val:
out = out * keylen + find(keyspace, c)
# dechaffify the value
out = out // chaffify
return int(out) |
<SYSTEM_TASK:>
Returns the status of the invoice
<END_TASK>
<USER_TASK:>
Description:
def confirm(self, token=None):
"""Returns the status of the invoice
STATUSES: pending, completed, cancelled
""" |
_token = token if token else self._response.get("token")
return self._process('checkout-invoice/confirm/' + str(_token)) |
<SYSTEM_TASK:>
Appends the data to the 'taxes' key in the request object
<END_TASK>
<USER_TASK:>
Description:
def add_taxes(self, taxes):
"""Appends the data to the 'taxes' key in the request object
'taxes' should be in format: [("tax_name", "tax_amount")]
For example:
[("Other TAX", 700), ("VAT", 5000)]
""" |
# fixme: how to resolve duplicate tax names
_idx = len(self.taxes) # current index to prevent overwriting
for idx, tax in enumerate(taxes):
tax_key = "tax_" + str(idx + _idx)
self.taxes[tax_key] = {"name": tax[0], "amount": tax[1]} |
<SYSTEM_TASK:>
Updates the list of items in the current transaction
<END_TASK>
<USER_TASK:>
Description:
def add_item(self, item):
"""Updates the list of items in the current transaction""" |
_idx = len(self.items)
self.items.update({"item_" + str(_idx + 1): item}) |
<SYSTEM_TASK:>
Formats the data in the current transaction for processing
<END_TASK>
<USER_TASK:>
Description:
def _prepare_data(self):
"""Formats the data in the current transaction for processing""" |
total_amount = self.total_amount or self.calculate_total_amt()
self._data = {
"invoice": {
"items": self.__encode_items(self.items),
"taxes": self.taxes,
"total_amount": total_amount,
"description": self.description,
"channels": self.channels
},
"store": self.store.info,
"custom_data": self.custom_data,
"actions": {
"cancel_url": self.cancel_url,
"return_url": self.return_url,
"callback_url": self.callback_url
}
}
return self._data |
<SYSTEM_TASK:>
Sets self.deployment_home
<END_TASK>
<USER_TASK:>
Description:
def set_deployment_home(self):
"""Sets self.deployment_home
This method finds and sets deployment home, primarily based on
the DEPLOYMENT_HOME environment variable. If not set, this
method will attempt to determine deployment home.
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.set_deployment_home')
try:
self.deployment_home = os.environ['DEPLOYMENT_HOME']
except KeyError:
log.warn('DEPLOYMENT_HOME environment variable is not set, attempting to set it...')
else:
log.info('Found DEPLOYMENT_HOME environment variable set to: {d}'.format(d=self.deployment_home))
return
if self.cons3rt_agent_run_dir is None:
msg = 'This is not Windows nor Linux, cannot determine DEPLOYMENT_HOME'
log.error(msg)
raise DeploymentError(msg)
# Ensure the run directory can be found
if not os.path.isdir(self.cons3rt_agent_run_dir):
msg = 'Could not find the cons3rt run directory, DEPLOYMENT_HOME cannot be set'
log.error(msg)
raise DeploymentError(msg)
run_dir_contents = os.listdir(self.cons3rt_agent_run_dir)
results = []
for item in run_dir_contents:
if 'Deployment' in item:
results.append(item)
if len(results) != 1:
msg = 'Could not find deployment home in the cons3rt run directory, deployment home cannot be set'
log.error(msg)
raise DeploymentError(msg)
# Ensure the Deployment Home is a directory
candidate_deployment_home = os.path.join(self.cons3rt_agent_run_dir, results[0])
if not os.path.isdir(candidate_deployment_home):
msg = 'The candidate deployment home is not a valid directory: {d}'.format(d=candidate_deployment_home)
log.error(msg)
raise DeploymentError(msg)
# Ensure the deployment properties file can be found
self.deployment_home = candidate_deployment_home
os.environ['DEPLOYMENT_HOME'] = self.deployment_home
log.info('Set DEPLOYMENT_HOME in the environment to: {d}'.format(d=self.deployment_home)) |
<SYSTEM_TASK:>
Reads the deployment properties file
<END_TASK>
<USER_TASK:>
Description:
def read_deployment_properties(self):
"""Reads the deployment properties file
This method reads the deployment properties file into the
"properties" dictionary object.
:return: None
:raises: DeploymentError
""" |
log = logging.getLogger(self.cls_logger + '.read_deployment_properties')
# Ensure deployment properties file exists
self.properties_file = os.path.join(self.deployment_home, 'deployment.properties')
if not os.path.isfile(self.properties_file):
msg = 'Deployment properties file not found: {f}'.format(f=self.properties_file)
log.error(msg)
raise DeploymentError(msg)
log.info('Found deployment properties file: {f}'.format(f=self.properties_file))
log.info('Reading deployment properties...')
try:
f = open(self.properties_file)
except (IOError, OSError):
_, ex, trace = sys.exc_info()
msg = 'Could not open file {file} to read property: {prop}'.format(
file=self.properties_file,
prop=property)
log.error(msg)
raise DeploymentError, msg, trace
for line in f:
log.debug('Processing deployment properties file line: {l}'.format(l=line))
if not isinstance(line, basestring):
log.debug('Skipping line that is not a string: {l}'.format(l=line))
continue
elif line.startswith('#'):
log.debug('Skipping line that is a comment: {l}'.format(l=line))
continue
elif '=' in line:
split_line = line.strip().split('=', 1)
if len(split_line) == 2:
prop_name = split_line[0].strip()
prop_value = split_line[1].strip()
if prop_name is None or not prop_name or prop_value is None or not prop_value:
log.debug('Property name <{n}> or value <v> is none or blank, not including it'.format(
n=prop_name, v=prop_value))
else:
log.debug('Adding property {n} with value {v}...'.format(n=prop_name, v=prop_value))
self.properties[prop_name] = prop_value
else:
log.debug('Skipping line that did not split into 2 part on an equal sign...')
log.info('Successfully read in deployment properties') |
<SYSTEM_TASK:>
Gets the name of a specific property
<END_TASK>
<USER_TASK:>
Description:
def get_property(self, regex):
"""Gets the name of a specific property
This public method is passed a regular expression and
returns the matching property name. If either the property
is not found or if the passed string matches more than one
property, this function will return None.
:param regex: Regular expression to search on
:return: (str) Property name matching the passed regex or None.
""" |
log = logging.getLogger(self.cls_logger + '.get_property')
if not isinstance(regex, basestring):
log.error('regex arg is not a string found type: {t}'.format(t=regex.__class__.__name__))
return None
log.debug('Looking up property based on regex: {r}'.format(r=regex))
prop_list_matched = []
for prop_name in self.properties.keys():
match = re.search(regex, prop_name)
if match:
prop_list_matched.append(prop_name)
if len(prop_list_matched) == 1:
log.debug('Found matching property: {p}'.format(p=prop_list_matched[0]))
return prop_list_matched[0]
elif len(prop_list_matched) > 1:
log.debug('Passed regex {r} matched more than 1 property, checking for an exact match...'.format(r=regex))
for matched_prop in prop_list_matched:
if matched_prop == regex:
log.debug('Found an exact match: {p}'.format(p=matched_prop))
return matched_prop
log.debug('Exact match not found for regex {r}, returning None'.format(r=regex))
return None
else:
log.debug('Passed regex did not match any deployment properties: {r}'.format(r=regex))
return None |
<SYSTEM_TASK:>
Returns a list of property names matching the provided
<END_TASK>
<USER_TASK:>
Description:
def get_matching_property_names(self, regex):
"""Returns a list of property names matching the provided
regular expression
:param regex: Regular expression to search on
:return: (list) of property names matching the regex
""" |
log = logging.getLogger(self.cls_logger + '.get_matching_property_names')
prop_list_matched = []
if not isinstance(regex, basestring):
log.warn('regex arg is not a string, found type: {t}'.format(t=regex.__class__.__name__))
return prop_list_matched
log.debug('Finding properties matching regex: {r}'.format(r=regex))
for prop_name in self.properties.keys():
match = re.search(regex, prop_name)
if match:
prop_list_matched.append(prop_name)
return prop_list_matched |
<SYSTEM_TASK:>
Returns the value associated to the passed property
<END_TASK>
<USER_TASK:>
Description:
def get_value(self, property_name):
"""Returns the value associated to the passed property
This public method is passed a specific property as a string
and returns the value of that property. If the property is not
found, None will be returned.
:param property_name (str) The name of the property
:return: (str) value for the passed property, or None.
""" |
log = logging.getLogger(self.cls_logger + '.get_value')
if not isinstance(property_name, basestring):
log.error('property_name arg is not a string, found type: {t}'.format(t=property_name.__class__.__name__))
return None
# Ensure a property with that name exists
prop = self.get_property(property_name)
if not prop:
log.debug('Property name not found matching: {n}'.format(n=property_name))
return None
value = self.properties[prop]
log.debug('Found value for property {n}: {v}'.format(n=property_name, v=value))
return value |
<SYSTEM_TASK:>
Set the cons3rt_role_name member for this system
<END_TASK>
<USER_TASK:>
Description:
def set_cons3rt_role_name(self):
"""Set the cons3rt_role_name member for this system
:return: None
:raises: DeploymentError
""" |
log = logging.getLogger(self.cls_logger + '.set_cons3rt_role_name')
try:
self.cons3rt_role_name = os.environ['CONS3RT_ROLE_NAME']
except KeyError:
log.warn('CONS3RT_ROLE_NAME is not set, attempting to determine it from deployment properties...')
if platform.system() == 'Linux':
log.info('Attempting to determine CONS3RT_ROLE_NAME on Linux...')
try:
self.determine_cons3rt_role_name_linux()
except DeploymentError:
raise
else:
log.warn('Unable to determine CONS3RT_ROLE_NAME on this System')
else:
log.info('Found environment variable CONS3RT_ROLE_NAME: {r}'.format(r=self.cons3rt_role_name))
return |
<SYSTEM_TASK:>
Determines the CONS3RT_ROLE_NAME for this Linux system, and
<END_TASK>
<USER_TASK:>
Description:
def determine_cons3rt_role_name_linux(self):
"""Determines the CONS3RT_ROLE_NAME for this Linux system, and
Set the cons3rt_role_name member for this system
This method determines the CONS3RT_ROLE_NAME for this system
in the deployment by first checking for the environment
variable, if not set, determining the value from the
deployment properties.
:return: None
:raises: DeploymentError
""" |
log = logging.getLogger(self.cls_logger + '.determine_cons3rt_role_name_linux')
# Determine IP addresses for this system
log.info('Determining the IPv4 addresses for this system...')
try:
ip_addresses = get_ip_addresses()
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'Unable to get the IP address of this system, thus cannot determine the ' \
'CONS3RT_ROLE_NAME\n{e}'.format(e=str(ex))
log.error(msg)
raise DeploymentError, msg, trace
else:
log.info('Found IP addresses: {a}'.format(a=ip_addresses))
log.info('Trying to determine IP address for eth0...')
try:
ip_address = ip_addresses['eth0']
except KeyError:
_, ex, trace = sys.exc_info()
msg = 'Unable to determine the IP address for eth0. Found the ' \
'following IP addresses: {i}\n{e}'.format(i=ip_addresses,
e=str(ex))
log.error(msg)
raise DeploymentError, msg, trace
else:
log.info('Found IP address for eth0: {i}'.format(i=ip_address))
pattern = '^cons3rt\.fap\.deployment\.machine.*0.internalIp=' + ip_address + '$'
try:
f = open(self.properties_file)
except IOError:
_, ex, trace = sys.exc_info()
msg = 'Could not open file {f}'.format(f=self.properties_file)
log.error(msg)
raise DeploymentError, msg, trace
prop_list_matched = []
log.debug('Searching for deployment properties matching pattern: {p}'.format(p=pattern))
for line in f:
log.debug('Processing deployment properties file line: {l}'.format(l=line))
if line.startswith('#'):
continue
elif '=' in line:
match = re.search(pattern, line)
if match:
log.debug('Found matching prop: {l}'.format(l=line))
prop_list_matched.append(line)
log.debug('Number of matching properties found: {n}'.format(n=len(prop_list_matched)))
if len(prop_list_matched) == 1:
prop_parts = prop_list_matched[0].split('.')
if len(prop_parts) > 5:
self.cons3rt_role_name = prop_parts[4]
log.info('Found CONS3RT_ROLE_NAME from deployment properties: {c}'.format(c=self.cons3rt_role_name))
log.info('Adding CONS3RT_ROLE_NAME to the current environment...')
os.environ['CONS3RT_ROLE_NAME'] = self.cons3rt_role_name
return
else:
log.error('Property found was not formatted as expected: %s',
prop_parts)
else:
log.error('Did not find a unique matching deployment property')
msg = 'Could not determine CONS3RT_ROLE_NAME from deployment properties'
log.error(msg)
raise DeploymentError(msg) |
<SYSTEM_TASK:>
Returns the ASSET_DIR environment variable
<END_TASK>
<USER_TASK:>
Description:
def set_asset_dir(self):
"""Returns the ASSET_DIR environment variable
This method gets the ASSET_DIR environment variable for the
current asset install. It returns either the string value if
set or None if it is not set.
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.get_asset_dir')
try:
self.asset_dir = os.environ['ASSET_DIR']
except KeyError:
log.warn('Environment variable ASSET_DIR is not set!')
else:
log.info('Found environment variable ASSET_DIR: {a}'.format(a=self.asset_dir)) |
<SYSTEM_TASK:>
Populates the list of scenario role names in this deployment and
<END_TASK>
<USER_TASK:>
Description:
def set_scenario_role_names(self):
"""Populates the list of scenario role names in this deployment and
populates the scenario_master with the master role
Gets a list of deployment properties containing "isMaster" because
there is exactly one per scenario host, containing the role name
:return:
""" |
log = logging.getLogger(self.cls_logger + '.set_scenario_role_names')
is_master_props = self.get_matching_property_names('isMaster')
for is_master_prop in is_master_props:
role_name = is_master_prop.split('.')[-1]
log.info('Adding scenario host: {n}'.format(n=role_name))
self.scenario_role_names.append(role_name)
# Determine if this is the scenario master
is_master = self.get_value(is_master_prop).lower().strip()
if is_master == 'true':
log.info('Found master scenario host: {r}'.format(r=role_name))
self.scenario_master = role_name |
<SYSTEM_TASK:>
Populates a list of network info for each scenario host from
<END_TASK>
<USER_TASK:>
Description:
def set_scenario_network_info(self):
"""Populates a list of network info for each scenario host from
deployment properties
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.set_scenario_network_info')
for scenario_host in self.scenario_role_names:
scenario_host_network_info = {'scenario_role_name': scenario_host}
log.debug('Looking up network info from deployment properties for scenario host: {s}'.format(
s=scenario_host))
network_name_props = self.get_matching_property_names(
'cons3rt.fap.deployment.machine.*{r}.*networkName'.format(r=scenario_host)
)
log.debug('Found {n} network name props'.format(n=str(len(network_name_props))))
network_info_list = []
for network_name_prop in network_name_props:
network_info = {}
network_name = self.get_value(network_name_prop)
if not network_name:
log.debug('Network name not found for prop: {n}'.format(n=network_name_prop))
continue
log.debug('Adding info for network name: {n}'.format(n=network_name))
network_info['network_name'] = network_name
interface_name_prop = 'cons3rt.fap.deployment.machine.{r}.{n}.interfaceName'.format(
r=scenario_host, n=network_name)
interface_name = self.get_value(interface_name_prop)
if interface_name:
network_info['interface_name'] = interface_name
external_ip_prop = 'cons3rt.fap.deployment.machine.{r}.{n}.externalIp'.format(
r=scenario_host, n=network_name)
external_ip = self.get_value(external_ip_prop)
if external_ip:
network_info['external_ip'] = external_ip
internal_ip_prop = 'cons3rt.fap.deployment.machine.{r}.{n}.internalIp'.format(
r=scenario_host, n=network_name)
internal_ip = self.get_value(internal_ip_prop)
if internal_ip:
network_info['internal_ip'] = internal_ip
is_cons3rt_connection_prop = 'cons3rt.fap.deployment.machine.{r}.{n}.isCons3rtConnection'.format(
r=scenario_host, n=network_name)
is_cons3rt_connection = self.get_value(is_cons3rt_connection_prop)
if is_cons3rt_connection:
if is_cons3rt_connection.lower().strip() == 'true':
network_info['is_cons3rt_connection'] = True
else:
network_info['is_cons3rt_connection'] = False
mac_address_prop = 'cons3rt.fap.deployment.machine.{r}.{n}.mac'.format(r=scenario_host, n=network_name)
mac_address = self.get_value(mac_address_prop)
if mac_address:
# Trim the escape characters from the mac address
mac_address = mac_address.replace('\\', '')
network_info['mac_address'] = mac_address
log.debug('Found network info: {n}'.format(n=str(network_info)))
network_info_list.append(network_info)
scenario_host_network_info['network_info'] = network_info_list
self.scenario_network_info.append(scenario_host_network_info) |
<SYSTEM_TASK:>
Sets the deployment name from deployment properties
<END_TASK>
<USER_TASK:>
Description:
def set_deployment_name(self):
"""Sets the deployment name from deployment properties
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.set_deployment_name')
self.deployment_name = self.get_value('cons3rt.deployment.name')
log.info('Found deployment name: {n}'.format(n=self.deployment_name)) |
<SYSTEM_TASK:>
Sets the deployment ID from deployment properties
<END_TASK>
<USER_TASK:>
Description:
def set_deployment_id(self):
"""Sets the deployment ID from deployment properties
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.set_deployment_id')
deployment_id_val = self.get_value('cons3rt.deployment.id')
if not deployment_id_val:
log.debug('Deployment ID not found in deployment properties')
return
try:
deployment_id = int(deployment_id_val)
except ValueError:
log.debug('Deployment ID found was unable to convert to an int: {d}'.format(d=deployment_id_val))
return
self.deployment_id = deployment_id
log.info('Found deployment ID: {i}'.format(i=str(self.deployment_id))) |
<SYSTEM_TASK:>
Sets the deployment run name from deployment properties
<END_TASK>
<USER_TASK:>
Description:
def set_deployment_run_name(self):
"""Sets the deployment run name from deployment properties
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.set_deployment_run_name')
self.deployment_run_name = self.get_value('cons3rt.deploymentRun.name')
log.info('Found deployment run name: {n}'.format(n=self.deployment_run_name)) |
<SYSTEM_TASK:>
Sets the deployment run ID from deployment properties
<END_TASK>
<USER_TASK:>
Description:
def set_deployment_run_id(self):
"""Sets the deployment run ID from deployment properties
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.set_deployment_run_id')
deployment_run_id_val = self.get_value('cons3rt.deploymentRun.id')
if not deployment_run_id_val:
log.debug('Deployment run ID not found in deployment properties')
return
try:
deployment_run_id = int(deployment_run_id_val)
except ValueError:
log.debug('Deployment run ID found was unable to convert to an int: {d}'.format(d=deployment_run_id_val))
return
self.deployment_run_id = deployment_run_id
log.info('Found deployment run ID: {i}'.format(i=str(self.deployment_run_id))) |
<SYSTEM_TASK:>
Sets the virtualization realm type from deployment properties
<END_TASK>
<USER_TASK:>
Description:
def set_virtualization_realm_type(self):
"""Sets the virtualization realm type from deployment properties
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.set_virtualization_realm_type')
self.virtualization_realm_type = self.get_value('cons3rt.deploymentRun.virtRealm.type')
log.info('Found virtualization realm type : {t}'.format(t=self.virtualization_realm_type)) |
<SYSTEM_TASK:>
Updated the hosts file depending on the OS
<END_TASK>
<USER_TASK:>
Description:
def update_hosts_file(self, ip, entry):
"""Updated the hosts file depending on the OS
:param ip: (str) IP address to update
:param entry: (str) entry to associate to the IP address
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.update_hosts_file')
if get_os() in ['Linux', 'Darwin']:
update_hosts_file_linux(ip=ip, entry=entry)
elif get_os() == 'Windows':
update_hosts_file_windows(ip=ip, entry=entry)
else:
log.warn('OS detected was not Windows nor Linux') |
<SYSTEM_TASK:>
Adds hosts file entries for each system in the scenario
<END_TASK>
<USER_TASK:>
Description:
def set_scenario_hosts_file(self, network_name='user-net', domain_name=None):
"""Adds hosts file entries for each system in the scenario
for the specified network_name provided
:param network_name: (str) Name of the network to add to the hosts file
:param domain_name: (str) Domain name to include in the hosts file entries if provided
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.set_scenario_hosts_file')
log.info('Scanning scenario hosts to make entries in the hosts file for network: {n}'.format(n=network_name))
for scenario_host in self.scenario_network_info:
if domain_name:
host_file_entry = '{r}.{d} {r}'.format(r=scenario_host['scenario_role_name'], d=domain_name)
else:
host_file_entry = scenario_host['scenario_role_name']
for host_network_info in scenario_host['network_info']:
if host_network_info['network_name'] == network_name:
self.update_hosts_file(ip=host_network_info['internal_ip'], entry=host_file_entry) |
<SYSTEM_TASK:>
Adds an entry to the hosts file for a scenario host given
<END_TASK>
<USER_TASK:>
Description:
def set_hosts_file_entry_for_role(self, role_name, network_name='user-net', fqdn=None, domain_name=None):
"""Adds an entry to the hosts file for a scenario host given
the role name and network name
:param role_name: (str) role name of the host to add
:param network_name: (str) Name of the network to add to the hosts file
:param fqdn: (str) Fully qualified domain name to use in the hosts file entry (trumps domain name)
:param domain_name: (str) Domain name to include in the hosts file entries if provided
:return:
""" |
log = logging.getLogger(self.cls_logger + '.set_hosts_file_entry_for_role')
# Determine the host file entry portion
if fqdn:
host_file_entry = fqdn
else:
if domain_name:
host_file_entry = '{r}.{d} {r}'.format(r=role_name, d=domain_name)
else:
host_file_entry = role_name
log.info('Using hosts file entry: {e}'.format(e=host_file_entry))
log.info('Scanning scenario hosts for role name [{r}] and network: {n}'.format(r=role_name, n=network_name))
for scenario_host in self.scenario_network_info:
if scenario_host['scenario_role_name'] == role_name:
for host_network_info in scenario_host['network_info']:
if host_network_info['network_name'] == network_name:
self.update_hosts_file(ip=host_network_info['internal_ip'], entry=host_file_entry) |
<SYSTEM_TASK:>
Given a cons3rt network name, return the network interface name
<END_TASK>
<USER_TASK:>
Description:
def get_device_for_network_linux(self, network_name):
"""Given a cons3rt network name, return the network interface name
on this Linux system
:param network_name: (str) Name of the network to search for
:return: (str) name of the network interface device or None
""" |
log = logging.getLogger(self.cls_logger + '.get_device_for_network_linux')
if get_os() not in ['Linux']:
log.warn('Non-linux OS detected, returning...')
return
# Get the IP address for the network name according to cons3rt
ip_address = self.get_ip_on_network(network_name=network_name)
if not ip_address:
log.warn('IP address not found for network with name: {n}'.format(n=network_name))
return
# Get the system device names and ip addresses
sys_info = ip_addr()
# Check for a matching IP address
device_name = None
for device_name, sys_ip_address in sys_info.iteritems():
if sys_ip_address == ip_address:
log.debug('Found matching system IP [{i}] for device: {d}'.format(i=ip_address, d=device_name))
if not device_name:
log.warn('Network device not found with IP address {i} in system network data: {d}'.format(
i=ip_address, d=str(sys_info)))
return
log.debug('Found device name [{d}] with IP address [{i}] for network: {n}'.format(
d=device_name, i=ip_address, n=network_name))
return device_name |
<SYSTEM_TASK:>
Add sane defaults rules to the raw and filter tables
<END_TASK>
<USER_TASK:>
Description:
def insertSaneDefaults(self):
""" Add sane defaults rules to the raw and filter tables """ |
self.raw.insert(0, '-A OUTPUT -o lo -j NOTRACK')
self.raw.insert(1, '-A PREROUTING -i lo -j NOTRACK')
self.filters.insert(0, '-A INPUT -i lo -j ACCEPT')
self.filters.insert(1, '-A OUTPUT -o lo -j ACCEPT')
self.filters.insert(2, '-A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT')
self.filters.insert(3, '-A OUTPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT')
return self |
<SYSTEM_TASK:>
Add a DROP policy at the end of the rules
<END_TASK>
<USER_TASK:>
Description:
def appendDefaultDrop(self):
""" Add a DROP policy at the end of the rules """ |
self.filters.append('-A INPUT -j DROP')
self.filters.append('-A OUTPUT -j DROP')
self.filters.append('-A FORWARD -j DROP')
return self |
<SYSTEM_TASK:>
Create a rules file in iptables-restore format
<END_TASK>
<USER_TASK:>
Description:
def template(self):
""" Create a rules file in iptables-restore format """ |
s = Template(self._IPTABLES_TEMPLATE)
return s.substitute(filtertable='\n'.join(self.filters),
rawtable='\n'.join(self.raw),
mangletable='\n'.join(self.mangle),
nattable='\n'.join(self.nat),
date=datetime.today()) |
<SYSTEM_TASK:>
wrapper to search_s
<END_TASK>
<USER_TASK:>
Description:
def query(self, base, filterstr, attrlist=None):
""" wrapper to search_s """ |
return self.conn.search_s(base, ldap.SCOPE_SUBTREE, filterstr, attrlist) |
<SYSTEM_TASK:>
search for a user in LDAP and return its DN and uid
<END_TASK>
<USER_TASK:>
Description:
def getUserByNumber(self, base, uidNumber):
""" search for a user in LDAP and return its DN and uid """ |
res = self.query(base, "uidNumber="+str(uidNumber), ['uid'])
if len(res) > 1:
raise InputError(uidNumber, "Multiple users found. Expecting one.")
return res[0][0], res[0][1]['uid'][0] |
<SYSTEM_TASK:>
Determines if this system is on AWS
<END_TASK>
<USER_TASK:>
Description:
def is_aws():
"""Determines if this system is on AWS
:return: bool True if this system is running on AWS
""" |
log = logging.getLogger(mod_logger + '.is_aws')
log.info('Querying AWS meta data URL: {u}'.format(u=metadata_url))
# Re-try logic for checking the AWS meta data URL
retry_time_sec = 10
max_num_tries = 10
attempt_num = 1
while True:
if attempt_num > max_num_tries:
log.info('Unable to query the AWS meta data URL, this system is NOT running on AWS\n{e}')
return False
# Query the AWS meta data URL
try:
response = urllib.urlopen(metadata_url)
except(IOError, OSError) as ex:
log.warn('Failed to query the AWS meta data URL\n{e}'.format(e=str(ex)))
attempt_num += 1
time.sleep(retry_time_sec)
continue
# Check the code
if response.getcode() == 200:
log.info('AWS metadata service returned code 200, this system is running on AWS')
return True
else:
log.warn('AWS metadata service returned code: {c}'.format(c=response.getcode()))
attempt_num += 1
time.sleep(retry_time_sec)
continue |
<SYSTEM_TASK:>
Gets the AWS Availability Zone ID for this system
<END_TASK>
<USER_TASK:>
Description:
def get_availability_zone():
"""Gets the AWS Availability Zone ID for this system
:return: (str) Availability Zone ID where this system lives
""" |
log = logging.getLogger(mod_logger + '.get_availability_zone')
# Exit if not running on AWS
if not is_aws():
log.info('This machine is not running in AWS, exiting...')
return
availability_zone_url = metadata_url + 'placement/availability-zone'
try:
response = urllib.urlopen(availability_zone_url)
except(IOError, OSError) as ex:
msg = 'Unable to query URL to get Availability Zone: {u}\n{e}'.format(u=availability_zone_url, e=ex)
log.error(msg)
return
# Check the code
if response.getcode() != 200:
msg = 'There was a problem querying url: {u}, returned code: {c}, unable to get the Availability Zone'.format(
u=availability_zone_url, c=response.getcode())
log.error(msg)
return
availability_zone = response.read()
return availability_zone |
<SYSTEM_TASK:>
Gets the AWS Region ID for this system
<END_TASK>
<USER_TASK:>
Description:
def get_region():
"""Gets the AWS Region ID for this system
:return: (str) AWS Region ID where this system lives
""" |
log = logging.getLogger(mod_logger + '.get_region')
# First get the availability zone
availability_zone = get_availability_zone()
if availability_zone is None:
msg = 'Unable to determine the Availability Zone for this system, cannot determine the AWS Region'
log.error(msg)
return
# Strip of the last character to get the region
region = availability_zone[:-1]
return region |
<SYSTEM_TASK:>
Determines the MAC address to use for querying the AWS
<END_TASK>
<USER_TASK:>
Description:
def get_primary_mac_address():
"""Determines the MAC address to use for querying the AWS
meta data service for network related queries
:return: (str) MAC address for the eth0 interface
:raises: AWSMetaDataError
""" |
log = logging.getLogger(mod_logger + '.get_primary_mac_address')
log.debug('Attempting to determine the MAC address for eth0...')
try:
mac_address = netifaces.ifaddresses('eth0')[netifaces.AF_LINK][0]['addr']
except Exception:
_, ex, trace = sys.exc_info()
msg = '{n}: Unable to determine the eth0 mac address for this system:\n{e}'.format(
n=ex.__class__.__name__, e=str(ex))
raise AWSMetaDataError, msg, trace
return mac_address |
<SYSTEM_TASK:>
Set the new value for the attribute separator.
<END_TASK>
<USER_TASK:>
Description:
def attr_sep(self, new_sep: str) -> None:
"""Set the new value for the attribute separator.
When the new value is assigned a new tree is generated.
""" |
self._attr_sep = new_sep
self._filters_tree = self._generate_filters_tree() |
<SYSTEM_TASK:>
Build the documentation for the project.
<END_TASK>
<USER_TASK:>
Description:
def docs(recreate, gen_index, run_doctests):
# type: (bool, bool, bool) -> None
""" Build the documentation for the project.
Args:
recreate (bool):
If set to **True**, the build and output directories will be cleared
prior to generating the docs.
gen_index (bool):
If set to **True**, it will generate top-level index file for the
reference documentation.
run_doctests (bool):
Set to **True** if you want to run doctests after the documentation
is generated.
pretend (bool):
If set to **True**, do not actually execute any shell commands, just
print the command that would be executed.
""" |
build_dir = conf.get_path('build_dir', '.build')
docs_dir = conf.get_path('docs.path', 'docs')
refdoc_paths = conf.get('docs.reference', [])
docs_html_dir = conf.get_path('docs.out', os.path.join(docs_dir, 'html'))
docs_tests_dir = conf.get_path('docs.tests_out',
os.path.join(docs_dir, 'doctest'))
docs_build_dir = os.path.join(build_dir, 'docs')
if recreate:
for path in (docs_html_dir, docs_build_dir):
if os.path.exists(path):
log.info("<91>Deleting <94>{}".format(path))
shutil.rmtree(path)
if refdoc_paths:
gen_ref_docs(gen_index)
else:
log.err('Not generating any reference documentation - '
'No docs.reference specified in config')
with conf.within_proj_dir(docs_dir):
log.info('Building docs')
shell.run('sphinx-build -b html -d {build} {docs} {out}'.format(
build=docs_build_dir,
docs=docs_dir,
out=docs_html_dir,
))
if run_doctests:
log.info('Running doctests')
shell.run('sphinx-build -b doctest -d {build} {docs} {out}'.format(
build=docs_build_dir,
docs=docs_dir,
out=docs_tests_dir,
))
log.info('You can view the docs by browsing to <34>file://{}'.format(
os.path.join(docs_html_dir, 'index.html')
)) |
<SYSTEM_TASK:>
Generate reference documentation for the project.
<END_TASK>
<USER_TASK:>
Description:
def gen_ref_docs(gen_index=False):
# type: (int, bool) -> None
""" Generate reference documentation for the project.
This will use **sphinx-refdoc** to generate the source .rst files for the
reference documentation.
Args:
gen_index (bool):
Set it to **True** if you want to generate the index file with the
list of top-level packages. This is set to default as in most cases
you only have one package per project so you can link directly to
that package reference (and if index were generated sphinx would
complain about file not included in toctree).
""" |
try:
from refdoc import generate_docs
except ImportError as ex:
msg = ("You need to install sphinx-refdoc if you want to generate "
"code reference docs.")
print(msg, file=sys.stderr)
log.err("Exception: {}".format(ex))
sys.exit(-1)
pretend = context.get('pretend', False)
docs_dir = conf.get_path('docs.path', 'docs')
docs_ref_dir = os.path.join(docs_dir, 'ref')
refdoc_paths = conf.get('docs.reference', [])
if os.path.exists(docs_ref_dir):
if not pretend:
log.info('Removing existing reference docs')
shutil.rmtree(docs_ref_dir)
else:
log.info('Would remove old reference docs')
args = {
'out_dir': docs_ref_dir,
'verbose': context.get('verbose', 0),
}
if gen_index:
args['gen_index'] = True
pkg_paths = [conf.proj_path(p) for p in refdoc_paths]
if not pretend:
log.info('Generating reference documentation')
generate_docs(pkg_paths, **args)
else:
log.info("Would generate reference docs with the following params")
shell.cprint('<90>{}', util.yaml_dump(args).rstrip())
shell.cprint('<90>paths:\n<34>{}', util.yaml_dump(pkg_paths).rstrip()) |
<SYSTEM_TASK:>
Finds a key in @obj via a period-delimited string @name.
<END_TASK>
<USER_TASK:>
Description:
def getitem_in(obj, name):
""" Finds a key in @obj via a period-delimited string @name.
@obj: (#dict)
@name: (#str) |.|-separated keys to search @obj in
..
obj = {'foo': {'bar': {'baz': True}}}
getitem_in(obj, 'foo.bar.baz')
..
|True|
""" |
for part in name.split('.'):
obj = obj[part]
return obj |
<SYSTEM_TASK:>
Create a new release.
<END_TASK>
<USER_TASK:>
Description:
def start(component, exact):
# type: (str) -> None
""" Create a new release.
It will bump the current version number and create a release branch called
`release/<version>` with one new commit (the version bump).
**Example Config**::
\b
version_file: 'src/mypkg/__init__.py'
**Examples**::
\b
$ peltak release start patch # Make a new patch release
$ peltak release start minor # Make a new minor release
$ peltak release start major # Make a new major release
$ peltak release start # same as start patch
""" |
from peltak.extra.gitflow import logic
logic.release.start(component, exact) |
<SYSTEM_TASK:>
Tag the current commit with as the current version release.
<END_TASK>
<USER_TASK:>
Description:
def tag_release(message):
# type: (str, bool) -> None
""" Tag the current commit with as the current version release.
This should be the same commit as the one that's uploaded as the release
(to pypi for example).
**Example Config**::
\b
version_file: 'src/mypkg/__init__.py'
Examples::
$ peltak release tag # Tag the current commit as release
""" |
from peltak.extra.gitflow import logic
logic.release.tag(message) |
<SYSTEM_TASK:>
This function asks for a location file address from the command terminal
<END_TASK>
<USER_TASK:>
Description:
def query_file_location(question, default_address):
"""
This function asks for a location file address from the command terminal
it checks if the file exists before proceeding with the code
"question" is a string that is presented to the user.
"default_address" is the presumed file location.
""" |
while True:
if default_address == None:
prompt = '{}:'.format(question, default_address)
else:
prompt = '{} [{}]'.format(question, default_address)
sys.stdout.write(prompt)
input_address = raw_input()
if default_address is not None and input_address == '':
input_address = default_address
if os.path.isfile(input_address):
return input_address
else:
print 'sorry no file was found at that location\n' |
<SYSTEM_TASK:>
Handler starting the asyncio part.
<END_TASK>
<USER_TASK:>
Description:
def worker_allocator(self, async_loop, to_do, **kwargs):
""" Handler starting the asyncio part. """ |
d = kwargs
threading.Thread(
target=self._asyncio_thread, args=(async_loop, to_do, d)
).start() |
<SYSTEM_TASK:>
Clean up a multiple-line, potentially multiple-paragraph text
<END_TASK>
<USER_TASK:>
Description:
def _clean_text(text):
"""
Clean up a multiple-line, potentially multiple-paragraph text
string. This is used to extract the first paragraph of a string
and eliminate line breaks and indentation. Lines will be joined
together by a single space.
:param text: The text string to clean up. It is safe to pass
``None``.
:returns: The first paragraph, cleaned up as described above.
""" |
desc = []
for line in (text or '').strip().split('\n'):
# Clean up the line...
line = line.strip()
# We only want the first paragraph
if not line:
break
desc.append(line)
return ' '.join(desc) |
<SYSTEM_TASK:>
Decorator used to specify the program name for the console script
<END_TASK>
<USER_TASK:>
Description:
def prog(text):
"""
Decorator used to specify the program name for the console script
help message.
:param text: The text to use for the program name.
""" |
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.prog = text
return func
return decorator |
<SYSTEM_TASK:>
Decorator used to specify a usage string for the console script
<END_TASK>
<USER_TASK:>
Description:
def usage(text):
"""
Decorator used to specify a usage string for the console script
help message.
:param text: The text to use for the usage.
""" |
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.usage = text
return func
return decorator |
<SYSTEM_TASK:>
Decorator used to specify a short description of the console
<END_TASK>
<USER_TASK:>
Description:
def description(text):
"""
Decorator used to specify a short description of the console
script. This can be used to override the default, which is
derived from the docstring of the function.
:param text: The text to use for the description.
""" |
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.description = text
return func
return decorator |
<SYSTEM_TASK:>
Decorator used to specify an epilog for the console script help
<END_TASK>
<USER_TASK:>
Description:
def epilog(text):
"""
Decorator used to specify an epilog for the console script help
message.
:param text: The text to use for the epilog.
""" |
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.epilog = text
return func
return decorator |
<SYSTEM_TASK:>
Decorator used to specify the formatter class for the console
<END_TASK>
<USER_TASK:>
Description:
def formatter_class(klass):
"""
Decorator used to specify the formatter class for the console
script.
:param klass: The formatter class to use.
""" |
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.formatter_class = klass
return func
return decorator |
<SYSTEM_TASK:>
Decorator used to load subcommands from a given ``pkg_resources``
<END_TASK>
<USER_TASK:>
Description:
def load_subcommands(group):
"""
Decorator used to load subcommands from a given ``pkg_resources``
entrypoint group. Each function must be appropriately decorated
with the ``cli_tools`` decorators to be considered an extension.
:param group: The name of the ``pkg_resources`` entrypoint group.
""" |
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor._add_extensions(group)
return func
return decorator |
<SYSTEM_TASK:>
Create intermediate nodes if hierarchy does not exist.
<END_TASK>
<USER_TASK:>
Description:
def _create_intermediate_nodes(self, name):
"""Create intermediate nodes if hierarchy does not exist.""" |
hierarchy = self._split_node_name(name, self.root_name)
node_tree = [
self.root_name
+ self._node_separator
+ self._node_separator.join(hierarchy[: num + 1])
for num in range(len(hierarchy))
]
iobj = [
(child[: child.rfind(self._node_separator)], child)
for child in node_tree
if child not in self._db
]
for parent, child in iobj:
self._db[child] = {"parent": parent, "children": [], "data": []}
self._db[parent]["children"] = sorted(
self._db[parent]["children"] + [child]
) |
<SYSTEM_TASK:>
Delete subtree private method.
<END_TASK>
<USER_TASK:>
Description:
def _delete_subtree(self, nodes):
"""
Delete subtree private method.
No argument validation and usage of getter/setter private methods is
used for speed
""" |
nodes = nodes if isinstance(nodes, list) else [nodes]
iobj = [
(self._db[node]["parent"], node)
for node in nodes
if self._node_name_in_tree(node)
]
for parent, node in iobj:
# Delete link to parent (if not root node)
del_list = self._get_subtree(node)
if parent:
self._db[parent]["children"].remove(node)
# Delete children (sub-tree)
for child in del_list:
del self._db[child]
if self._empty_tree():
self._root = None
self._root_hierarchy_length = None |
<SYSTEM_TASK:>
Find common prefix between two nodes.
<END_TASK>
<USER_TASK:>
Description:
def _find_common_prefix(self, node1, node2):
"""Find common prefix between two nodes.""" |
tokens1 = [item.strip() for item in node1.split(self.node_separator)]
tokens2 = [item.strip() for item in node2.split(self.node_separator)]
ret = []
for token1, token2 in zip(tokens1, tokens2):
if token1 == token2:
ret.append(token1)
else:
break
return self.node_separator.join(ret) |
<SYSTEM_TASK:>
Rename node private method.
<END_TASK>
<USER_TASK:>
Description:
def _rename_node(self, name, new_name):
"""
Rename node private method.
No argument validation and usage of getter/setter private methods is
used for speed
""" |
# Update parent
if not self.is_root(name):
parent = self._db[name]["parent"]
self._db[parent]["children"].remove(name)
self._db[parent]["children"] = sorted(
self._db[parent]["children"] + [new_name]
)
# Update children
iobj = self._get_subtree(name) if name != self.root_name else self.nodes
for key in iobj:
new_key = key.replace(name, new_name, 1)
new_parent = (
self._db[key]["parent"]
if key == name
else self._db[key]["parent"].replace(name, new_name, 1)
)
self._db[new_key] = {
"parent": new_parent,
"children": [
child.replace(name, new_name, 1)
for child in self._db[key]["children"]
],
"data": copy.deepcopy(self._db[key]["data"]),
}
del self._db[key]
if name == self.root_name:
self._root = new_name
self._root_hierarchy_length = len(
self.root_name.split(self._node_separator)
) |
<SYSTEM_TASK:>
Search_tree for nodes that contain a specific hierarchy name.
<END_TASK>
<USER_TASK:>
Description:
def _search_tree(self, name):
"""Search_tree for nodes that contain a specific hierarchy name.""" |
tpl1 = "{sep}{name}{sep}".format(sep=self._node_separator, name=name)
tpl2 = "{sep}{name}".format(sep=self._node_separator, name=name)
tpl3 = "{name}{sep}".format(sep=self._node_separator, name=name)
return sorted(
[
node
for node in self._db
if (tpl1 in node)
or node.endswith(tpl2)
or node.startswith(tpl3)
or (name == node)
]
) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def add_nodes(self, nodes): # noqa: D302
r"""
Add nodes to tree.
:param nodes: Node(s) to add with associated data. If there are
several list items in the argument with the same node
name the resulting node data is a list with items
corresponding to the data of each entry in the argument
with the same node name, in their order of appearance,
in addition to any existing node data if the node is
already present in the tree
:type nodes: :ref:`NodesWithData`
:raises:
* RuntimeError (Argument \`nodes\` is not valid)
* ValueError (Illegal node name: *[node_name]*)
For example:
.. =[=cog
.. import docs.support.incfile
.. docs.support.incfile.incfile('ptrie_example.py', cog.out)
.. =]=
.. code-block:: python
# ptrie_example.py
import ptrie
def create_tree():
tobj = ptrie.Trie()
tobj.add_nodes([
{'name':'root.branch1', 'data':5},
{'name':'root.branch1', 'data':7},
{'name':'root.branch2', 'data':[]},
{'name':'root.branch1.leaf1', 'data':[]},
{'name':'root.branch1.leaf1.subleaf1', 'data':333},
{'name':'root.branch1.leaf2', 'data':'Hello world!'},
{'name':'root.branch1.leaf2.subleaf2', 'data':[]},
])
return tobj
.. =[=end=]=
.. code-block:: python
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.get_data('root.branch1')
[5, 7]
""" |
self._validate_nodes_with_data(nodes)
nodes = nodes if isinstance(nodes, list) else [nodes]
# Create root node (if needed)
if not self.root_name:
self._set_root_name(nodes[0]["name"].split(self._node_separator)[0].strip())
self._root_hierarchy_length = len(
self.root_name.split(self._node_separator)
)
self._create_node(name=self.root_name, parent="", children=[], data=[])
# Process new data
for node_dict in nodes:
name, data = node_dict["name"], node_dict["data"]
if name not in self._db:
# Validate node name (root of new node same as tree root)
if not name.startswith(self.root_name + self._node_separator):
raise ValueError("Illegal node name: {0}".format(name))
self._create_intermediate_nodes(name)
self._db[name]["data"] += copy.deepcopy(
data
if isinstance(data, list) and data
else ([] if isinstance(data, list) else [data])
) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def collapse_subtree(self, name, recursive=True): # noqa: D302
r"""
Collapse a sub-tree.
Nodes that have a single child and no data are combined with their
child as a single tree node
:param name: Root of the sub-tree to collapse
:type name: :ref:`NodeName`
:param recursive: Flag that indicates whether the collapse operation
is performed on the whole sub-tree (True) or whether
it stops upon reaching the first node where the
collapsing condition is not satisfied (False)
:type recursive: boolean
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Argument \`recursive\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.collapse_subtree('root.branch1')
>>> print(tobj)
root
├branch1 (*)
│├leaf1.subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
``root.branch1.leaf1`` is collapsed because it only has one child
(``root.branch1.leaf1.subleaf1``) and no data; ``root.branch1.leaf2``
is not collapsed because although it has one child
(``root.branch1.leaf2.subleaf2``) and this child does have data
associated with it, :code:`'Hello world!'`
""" |
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
if not isinstance(recursive, bool):
raise RuntimeError("Argument `recursive` is not valid")
self._node_in_tree(name)
self._collapse_subtree(name, recursive) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def copy_subtree(self, source_node, dest_node): # noqa: D302
r"""
Copy a sub-tree from one sub-node to another.
Data is added if some nodes of the source sub-tree exist in the
destination sub-tree
:param source_name: Root node of the sub-tree to copy from
:type source_name: :ref:`NodeName`
:param dest_name: Root node of the sub-tree to copy to
:type dest_name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`dest_node\` is not valid)
* RuntimeError (Argument \`source_node\` is not valid)
* RuntimeError (Illegal root in destination node)
* RuntimeError (Node *[source_node]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.copy_subtree('root.branch1', 'root.branch3')
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
├branch2
└branch3 (*)
├leaf1
│└subleaf1 (*)
└leaf2 (*)
└subleaf2
""" |
if self._validate_node_name(source_node):
raise RuntimeError("Argument `source_node` is not valid")
if self._validate_node_name(dest_node):
raise RuntimeError("Argument `dest_node` is not valid")
if source_node not in self._db:
raise RuntimeError("Node {0} not in tree".format(source_node))
if not dest_node.startswith(self.root_name + self._node_separator):
raise RuntimeError("Illegal root in destination node")
for node in self._get_subtree(source_node):
self._db[node.replace(source_node, dest_node, 1)] = {
"parent": self._db[node]["parent"].replace(source_node, dest_node, 1),
"children": [
child.replace(source_node, dest_node, 1)
for child in self._db[node]["children"]
],
"data": copy.deepcopy(self._db[node]["data"]),
}
self._create_intermediate_nodes(dest_node)
parent = self._node_separator.join(dest_node.split(self._node_separator)[:-1])
self._db[dest_node]["parent"] = parent
self._db[parent]["children"] = sorted(
self._db[parent]["children"] + [dest_node]
) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def flatten_subtree(self, name): # noqa: D302
r"""
Flatten sub-tree.
Nodes that have children and no data are merged with each child
:param name: Ending hierarchy node whose sub-trees are to be
flattened
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> tobj.add_nodes([
... {'name':'root.branch1.leaf1.subleaf2', 'data':[]},
... {'name':'root.branch2.leaf1', 'data':'loren ipsum'},
... {'name':'root.branch2.leaf1.another_subleaf1', 'data':[]},
... {'name':'root.branch2.leaf1.another_subleaf2', 'data':[]}
... ])
>>> print(str(tobj))
root
├branch1 (*)
│├leaf1
││├subleaf1 (*)
││└subleaf2
│└leaf2 (*)
│ └subleaf2
└branch2
└leaf1 (*)
├another_subleaf1
└another_subleaf2
>>> tobj.flatten_subtree('root.branch1.leaf1')
>>> print(str(tobj))
root
├branch1 (*)
│├leaf1.subleaf1 (*)
│├leaf1.subleaf2
│└leaf2 (*)
│ └subleaf2
└branch2
└leaf1 (*)
├another_subleaf1
└another_subleaf2
>>> tobj.flatten_subtree('root.branch2.leaf1')
>>> print(str(tobj))
root
├branch1 (*)
│├leaf1.subleaf1 (*)
│├leaf1.subleaf2
│└leaf2 (*)
│ └subleaf2
└branch2
└leaf1 (*)
├another_subleaf1
└another_subleaf2
""" |
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
parent = self._db[name]["parent"]
if (parent) and (not self._db[name]["data"]):
children = self._db[name]["children"]
for child in children:
self._db[child]["parent"] = parent
self._db[parent]["children"].remove(name)
self._db[parent]["children"] = sorted(
self._db[parent]["children"] + children
)
del self._db[name] |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def get_children(self, name):
r"""
Get the children node names of a node.
:param name: Parent node name
:type name: :ref:`NodeName`
:rtype: list of :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
""" |
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return sorted(self._db[name]["children"]) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def get_data(self, name):
r"""
Get the data associated with a node.
:param name: Node name
:type name: :ref:`NodeName`
:rtype: any type or list of objects of any type
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
""" |
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return self._db[name]["data"] |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def get_node(self, name):
r"""
Get a tree node structure.
The structure is a dictionary with the following keys:
* **parent** (*NodeName*) Parent node name, :code:`''` if the
node is the root node
* **children** (*list of NodeName*) Children node names, an
empty list if node is a leaf
* **data** (*list*) Node data, an empty list if node contains no data
:param name: Node name
:type name: string
:rtype: dictionary
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
""" |
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return self._db[name] |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def get_node_children(self, name):
r"""
Get the list of children structures of a node.
See :py:meth:`ptrie.Trie.get_node` for details about the structure
:param name: Parent node name
:type name: :ref:`NodeName`
:rtype: list
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
""" |
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return [self._db[child] for child in self._db[name]["children"]] |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def get_node_parent(self, name):
r"""
Get the parent structure of a node.
See :py:meth:`ptrie.Trie.get_node` for details about the structure
:param name: Child node name
:type name: :ref:`NodeName`
:rtype: dictionary
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
""" |
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return self._db[self._db[name]["parent"]] if not self.is_root(name) else {} |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def get_subtree(self, name): # noqa: D302
r"""
Get all node names in a sub-tree.
:param name: Sub-tree root node name
:type name: :ref:`NodeName`
:rtype: list of :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example, pprint
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> pprint.pprint(tobj.get_subtree('root.branch1'))
['root.branch1',
'root.branch1.leaf1',
'root.branch1.leaf1.subleaf1',
'root.branch1.leaf2',
'root.branch1.leaf2.subleaf2']
""" |
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return self._get_subtree(name) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def in_tree(self, name):
r"""
Test if a node is in the tree.
:param name: Node name to search for
:type name: :ref:`NodeName`
:rtype: boolean
:raises: RuntimeError (Argument \`name\` is not valid)
""" |
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
return name in self._db |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def make_root(self, name): # noqa: D302
r"""
Make a sub-node the root node of the tree.
All nodes not belonging to the sub-tree are deleted
:param name: New root node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.make_root('root.branch1')
>>> print(tobj)
root.branch1 (*)
├leaf1
│└subleaf1 (*)
└leaf2 (*)
└subleaf2
""" |
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
if (name != self.root_name) and (self._node_in_tree(name)):
for key in [node for node in self.nodes if node.find(name) != 0]:
del self._db[key]
self._db[name]["parent"] = ""
self._root = name
self._root_hierarchy_length = len(
self.root_name.split(self._node_separator)
) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def rename_node(self, name, new_name): # noqa: D302
r"""
Rename a tree node.
It is typical to have a root node name with more than one hierarchy
level after using :py:meth:`ptrie.Trie.make_root`. In this instance the
root node *can* be renamed as long as the new root name has the same or
less hierarchy levels as the existing root name
:param name: Node name to rename
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Argument \`new_name\` has an illegal root node)
* RuntimeError (Argument \`new_name\` is an illegal root node name)
* RuntimeError (Argument \`new_name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
* RuntimeError (Node *[new_name]* already exists)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.rename_node(
... 'root.branch1.leaf1',
... 'root.branch1.mapleleaf1'
... )
>>> print(tobj)
root
├branch1 (*)
│├leaf2 (*)
││└subleaf2
│└mapleleaf1
│ └subleaf1 (*)
└branch2
""" |
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
if self._validate_node_name(new_name):
raise RuntimeError("Argument `new_name` is not valid")
self._node_in_tree(name)
if self.in_tree(new_name) and (name != self.root_name):
raise RuntimeError("Node {0} already exists".format(new_name))
sep = self._node_separator
if (name.split(sep)[:-1] != new_name.split(sep)[:-1]) and (
name != self.root_name
):
raise RuntimeError("Argument `new_name` has an illegal root node")
old_hierarchy_length = len(name.split(self._node_separator))
new_hierarchy_length = len(new_name.split(self._node_separator))
if (name == self.root_name) and (old_hierarchy_length < new_hierarchy_length):
raise RuntimeError("Argument `new_name` is an illegal root node name")
self._rename_node(name, new_name) |
<SYSTEM_TASK:>
Find nearest file matching some criteria.
<END_TASK>
<USER_TASK:>
Description:
def find_in_bids(filename, pattern=None, generator=False, upwards=False,
wildcard=True, **kwargs):
"""Find nearest file matching some criteria.
Parameters
----------
filename : instance of Path
search the root for this file
pattern : str
glob string for search criteria of the filename of interest (remember
to include '*'). The pattern is passed directly to rglob.
wildcard : bool
use wildcards for unspecified fields or not (if True, add "_*_" between
fields)
upwards : bool
where to keep on searching upwards
kwargs : dict
Returns
-------
Path
filename matching the pattern
""" |
if upwards and generator:
raise ValueError('You cannot search upwards and have a generator')
if pattern is None:
pattern = _generate_pattern(wildcard, kwargs)
lg.debug(f'Searching {pattern} in {filename}')
if upwards and filename == find_root(filename):
raise FileNotFoundError(f'Could not find file matchting {pattern} in {filename}')
if generator:
return filename.rglob(pattern)
matches = list(filename.rglob(pattern))
if len(matches) == 1:
return matches[0]
elif len(matches) == 0:
if upwards:
return find_in_bids(filename.parent, pattern=pattern, upwards=upwards)
else:
raise FileNotFoundError(f'Could not find file matching {pattern} in {filename}')
else:
matches_str = '"\n\t"'.join(str(x) for x in matches)
raise FileNotFoundError(f'Multiple files matching "{pattern}":\n\t"{matches_str}"') |
<SYSTEM_TASK:>
Get xyz coordinates for these electrodes
<END_TASK>
<USER_TASK:>
Description:
def get_xyz(self, list_of_names=None):
"""Get xyz coordinates for these electrodes
Parameters
----------
list_of_names : list of str
list of electrode names to use
Returns
-------
list of tuples of 3 floats (x, y, z)
list of xyz coordinates for all the electrodes
TODO
----
coordinate system of electrodes
""" |
if list_of_names is not None:
filter_lambda = lambda x: x['name'] in list_of_names
else:
filter_lambda = None
return self.electrodes.get(filter_lambda=filter_lambda,
map_lambda=lambda e: (float(e['x']),
float(e['y']),
float(e['z']))) |
<SYSTEM_TASK:>
Constructs Monte Carlo simulated data set using the
<END_TASK>
<USER_TASK:>
Description:
def bootstrap(v):
"""
Constructs Monte Carlo simulated data set using the
Bootstrap algorithm.
Usage:
>>> bootstrap(x)
where x is either an array or a list of arrays. If it is a
list, the code returns the corresponding list of bootstrapped
arrays assuming that the same position in these arrays map the
same "physical" object.
Rodrigo Nemmen, http://goo.gl/8S1Oo
""" |
if type(v)==list:
vboot=[] # list of boostrapped arrays
n=v[0].size
iran=scipy.random.randint(0,n,n) # Array of random indexes
for x in v: vboot.append(x[iran])
else: # if v is an array, not a list of arrays
n=v.size
iran=scipy.random.randint(0,n,n) # Array of random indexes
vboot=v[iran]
return vboot |
<SYSTEM_TASK:>
Does the BCES with bootstrapping.
<END_TASK>
<USER_TASK:>
Description:
def bcesboot_backup(y1,y1err,y2,y2err,cerr,nsim=10000):
"""
Does the BCES with bootstrapping.
Usage:
>>> a,b,aerr,berr,covab=bcesboot(x,xerr,y,yerr,cov,nsim)
:param x,y: data
:param xerr,yerr: measurement errors affecting x and y
:param cov: covariance between the measurement errors (all are arrays)
:param nsim: number of Monte Carlo simulations (bootstraps)
:returns: a,b -- best-fit parameters a,b of the linear regression
:returns: aerr,berr -- the standard deviations in a,b
:returns: covab -- the covariance between a and b (e.g. for plotting confidence bands)
.. note:: this method is definitely not nearly as fast as bces_regress.f. Needs to be optimized. Maybe adapt the fortran routine using f2python?
v1 Mar 2012: ported from bces_regress.f. Added covariance output.
Rodrigo Nemmen, http://goo.gl/8S1Oo
""" |
import fish
# Progress bar initialization
peixe = fish.ProgressFish(total=nsim)
print "Bootstrapping progress:"
"""
My convention for storing the results of the bces code below as
matrixes for processing later are as follow:
simulation\method y|x x|y bisector orthogonal
sim0 ...
Am = sim1 ...
sim2 ...
sim3 ...
"""
for i in range(nsim):
[y1sim,y1errsim,y2sim,y2errsim,cerrsim]=bootstrap([y1,y1err,y2,y2err,cerr])
asim,bsim,errasim,errbsim,covabsim=bces(y1sim,y1errsim,y2sim,y2errsim,cerrsim)
if i==0:
# Initialize the matrixes
am,bm=asim.copy(),bsim.copy()
else:
am=numpy.vstack((am,asim))
bm=numpy.vstack((bm,bsim))
# Progress bar
peixe.animate(amount=i)
# Bootstrapping results
a=numpy.array([ am[:,0].mean(),am[:,1].mean(),am[:,2].mean(),am[:,3].mean() ])
b=numpy.array([ bm[:,0].mean(),bm[:,1].mean(),bm[:,2].mean(),bm[:,3].mean() ])
# Error from unbiased sample variances
erra,errb,covab=numpy.zeros(4),numpy.zeros(4),numpy.zeros(4)
for i in range(4):
erra[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(am[:,i]**2)-nsim*(am[:,i].mean())**2 ))
errb[i]=numpy.sqrt( 1./(nsim-1) * ( numpy.sum(bm[:,i]**2)-nsim*(bm[:,i].mean())**2 ))
covab[i]=1./(nsim-1) * ( numpy.sum(am[:,i]*bm[:,i])-nsim*am[:,i].mean()*bm[:,i].mean() )
return a,b,erra,errb,covab |
<SYSTEM_TASK:>
Calcuates the median value in a @lst
<END_TASK>
<USER_TASK:>
Description:
def median(lst):
""" Calcuates the median value in a @lst """ |
#: http://stackoverflow.com/a/24101534
sortedLst = sorted(lst)
lstLen = len(lst)
index = (lstLen - 1) // 2
if (lstLen % 2):
return sortedLst[index]
else:
return (sortedLst[index] + sortedLst[index + 1])/2.0 |
<SYSTEM_TASK:>
Unsafely attempts to remove HTML whitespace. This is not an HTML parser
<END_TASK>
<USER_TASK:>
Description:
def remove_whitespace(s):
""" Unsafely attempts to remove HTML whitespace. This is not an HTML parser
which is why its considered 'unsafe', but it should work for most
implementations. Just use on at your own risk.
@s: #str
-> HTML with whitespace removed, ignoring <pre>, script, textarea and code
tags
""" |
ignores = {}
for ignore in html_ignore_whitespace_re.finditer(s):
name = "{}{}{}".format(r"{}", uuid.uuid4(), r"{}")
ignores[name] = ignore.group()
s = s.replace(ignore.group(), name)
s = whitespace_re(r' ', s).strip()
for name, val in ignores.items():
s = s.replace(name, val)
return s |
<SYSTEM_TASK:>
Turns hashtag-like strings into HTML links
<END_TASK>
<USER_TASK:>
Description:
def hashtag_links(uri, s):
""" Turns hashtag-like strings into HTML links
@uri: /uri/ root for the hashtag-like
@s: the #str string you're looking for |#|hashtags in
-> #str HTML link |<a href="/uri/hashtag">hashtag</a>|
""" |
for tag, after in hashtag_re.findall(s):
_uri = '/' + (uri or "").lstrip("/") + quote(tag)
link = '<a href="{}">#{}</a>{}'.format(_uri.lower(), tag, after)
s = s.replace('#' + tag, link)
return s |
<SYSTEM_TASK:>
Upload the release to a pypi server.
<END_TASK>
<USER_TASK:>
Description:
def upload(target):
# type: (str) -> None
""" Upload the release to a pypi server.
TODO: Make sure the git directory is clean before allowing a release.
Args:
target (str):
pypi target as defined in ~/.pypirc
""" |
log.info("Uploading to pypi server <33>{}".format(target))
with conf.within_proj_dir():
shell.run('python setup.py sdist register -r "{}"'.format(target))
shell.run('python setup.py sdist upload -r "{}"'.format(target)) |
<SYSTEM_TASK:>
Sets the text attribute of the payload
<END_TASK>
<USER_TASK:>
Description:
def set_text(self, text):
"""Sets the text attribute of the payload
:param text: (str) Text of the message
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.set_text')
if not isinstance(text, basestring):
msg = 'text arg must be a string'
log.error(msg)
raise ValueError(msg)
self.payload['text'] = text
log.debug('Set message text to: {t}'.format(t=text)) |
<SYSTEM_TASK:>
Adds an attachment to the SlackMessage payload
<END_TASK>
<USER_TASK:>
Description:
def add_attachment(self, attachment):
"""Adds an attachment to the SlackMessage payload
This public method adds a slack message to the attachment
list.
:param attachment: SlackAttachment object
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.add_attachment')
if not isinstance(attachment, SlackAttachment):
msg = 'attachment must be of type: SlackAttachment'
log.error(msg)
raise ValueError(msg)
self.attachments.append(attachment.attachment)
log.debug('Added attachment: {a}'.format(a=attachment)) |
<SYSTEM_TASK:>
Sends the Slack message
<END_TASK>
<USER_TASK:>
Description:
def send(self):
"""Sends the Slack message
This public method sends the Slack message along with any
attachments, then clears the attachments array.
:return: None
:raises: OSError
""" |
log = logging.getLogger(self.cls_logger + '.send')
if self.attachments:
self.payload['attachments'] = self.attachments
# Encode payload in JSON
log.debug('Using payload: %s', self.payload)
try:
json_payload = json.JSONEncoder().encode(self.payload)
except(TypeError, ValueError, OverflowError):
_, ex, trace = sys.exc_info()
msg = 'There was a problem encoding the JSON payload\n{e}'.format(e=str(ex))
raise OSError, msg, trace
else:
log.debug('JSON payload: %s', json_payload)
# Post to Slack!
log.debug('Posting message to Slack...')
try:
result = requests.post(url=self.webhook_url, data=json_payload)
except requests.exceptions.ConnectionError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem posting to Slack\n{e}'.format(n=ex.__class__.__name__, e=str(ex))
raise OSError, msg, trace
# Check return code
if result.status_code != 200:
log.error('Slack post to url {u} failed with code: {c}'.format(c=result.status_code, u=self.webhook_url))
else:
log.debug('Posted message to Slack successfully.')
# Clear out attachments after sending
self.attachments = []
self.payload.pop('attachments', None) |
<SYSTEM_TASK:>
Sends a Slack message with an attachment for each cons3rt agent log
<END_TASK>
<USER_TASK:>
Description:
def send_cons3rt_agent_logs(self):
"""Sends a Slack message with an attachment for each cons3rt agent log
:return:
""" |
log = logging.getLogger(self.cls_logger + '.send_cons3rt_agent_logs')
log.debug('Searching for log files in directory: {d}'.format(d=self.dep.cons3rt_agent_log_dir))
for item in os.listdir(self.dep.cons3rt_agent_log_dir):
item_path = os.path.join(self.dep.cons3rt_agent_log_dir, item)
if os.path.isfile(item_path):
log.debug('Adding slack attachment with cons3rt agent log file: {f}'.format(f=item_path))
try:
with open(item_path, 'r') as f:
file_text = f.read()
except (IOError, OSError) as e:
log.warn('There was a problem opening file: {f}\n{e}'.format(f=item_path, e=e))
continue
# Take the last 7000 characters
file_text_trimmed = file_text[-7000:]
attachment = SlackAttachment(fallback=file_text_trimmed, text=file_text_trimmed, color='#9400D3')
self.add_attachment(attachment)
self.send() |
<SYSTEM_TASK:>
Sends a Slack message with the contents of a text file
<END_TASK>
<USER_TASK:>
Description:
def send_text_file(self, text_file):
"""Sends a Slack message with the contents of a text file
:param: test_file: (str) Full path to text file to send
:return: None
:raises: Cons3rtSlackerError
""" |
log = logging.getLogger(self.cls_logger + '.send_text_file')
if not isinstance(text_file, basestring):
msg = 'arg text_file must be a string, found type: {t}'.format(t=text_file.__class__.__name__)
raise Cons3rtSlackerError(msg)
if not os.path.isfile(text_file):
msg = 'The provided text_file was not found or is not a file: {f}'.format(f=text_file)
raise Cons3rtSlackerError(msg)
log.debug('Attempting to send a Slack message with the contents of file: {f}'.format(f=text_file))
try:
with open(text_file, 'r') as f:
file_text = f.read()
except (IOError, OSError):
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem opening file: {f}\n{e}'.format(
n=ex.__class__.__name__, f=text_file, e=str(ex))
raise Cons3rtSlackerError, msg, trace
# Take the last 7000 characters
file_text_trimmed = file_text[-7000:]
attachment = SlackAttachment(fallback=file_text_trimmed, text=file_text_trimmed, color='#9400D3')
self.add_attachment(attachment)
self.send() |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.