Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def modify_file_in_place(self, fp, length, iso_path, rr_name=None, # pylint: disable=unused-argument
joliet_path=None, udf_path=None): # pylint: disable=unused-argument
# type: (BinaryIO, int, str, Optional[str], Optional[str], Optional[str]) -> None
'''
An API to modify a file in place on the ISO. This can be extremely fast
(much faster than calling the write method), but has many restrictions.
1. The original ISO file pointer must have been opened for reading
and writing.
2. Only an existing *file* can be modified; directories cannot be
changed.
3. Only an existing file can be *modified*; no new files can be added
or removed.
4. The new file contents must use the same number of extents (typically
2048 bytes) as the old file contents. If using this API to shrink
a file, this is usually easy since the new contents can be padded
out with zeros or newlines to meet the requirement. If using this
API to grow a file, the new contents can only grow up to the next
extent boundary.
Unlike all other APIs in PyCdlib, this API actually modifies the
originally opened on-disk file, so use it with caution.
Parameters:
fp - The file object to use for the contents of the new file.
length - The length of the new data for the file.
iso_path - The ISO9660 absolute path to the file destination on the ISO.
rr_name - The Rock Ridge name of the file destination on the ISO.
joliet_path - The Joliet absolute path to the file destination on the ISO.
udf_path - The UDF absolute path to the file destination on the ISO.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
if hasattr(self._cdfp, 'mode') and not self._cdfp.mode.startswith(('r+', 'w', 'a', 'rb+')):
raise pycdlibexception.PyCdlibInvalidInput('To modify a file in place, the original ISO must have been opened in a write mode (r+, w, or a)')
log_block_size = self.pvd.logical_block_size()
child = self._find_iso_record(utils.normpath(iso_path))
old_num_extents = utils.ceiling_div(child.get_data_length(), log_block_size)
new_num_extents = utils.ceiling_div(length, log_block_size)
if old_num_extents != new_num_extents:
raise pycdlibexception.PyCdlibInvalidInput('When modifying a file in-place, the number of extents for a file cannot change!')
if not child.is_file():
raise pycdlibexception.PyCdlibInvalidInput('Cannot modify a directory with modify_file_in_place')
if child.inode is None:
raise pycdlibexception.PyCdlibInternalError('Child file found without inode')
child.inode.update_fp(fp, length)
# Remove the old size from the PVD size
for pvd in self.pvds:
pvd.remove_from_space_size(child.get_data_length())
# And add the new size to the PVD size
for pvd in self.pvds:
pvd.add_to_space_size(length)
if self.enhanced_vd is not None:
self.enhanced_vd.copy_sizes(self.pvd)
# If we made it here, we have successfully updated all of the in-memory
# metadata. Now we can go and modify the on-disk file.
self._cdfp.seek(self.pvd.extent_location() * log_block_size)
# First write out the PVD.
rec = self.pvd.record()
self._cdfp.write(rec)
# Write out the joliet VD
if self.joliet_vd is not None:
self._cdfp.seek(self.joliet_vd.extent_location() * log_block_size)
rec = self.joliet_vd.record()
self._cdfp.write(rec)
# Write out the enhanced VD
if self.enhanced_vd is not None:
self._cdfp.seek(self.enhanced_vd.extent_location() * log_block_size)
rec = self.enhanced_vd.record()
self._cdfp.write(rec)
# We don't have to write anything out for UDF since it only tracks
# extents, and we know we aren't changing the number of extents.
# Write out the actual file contents
self._cdfp.seek(child.extent_location() * log_block_size)
with inode.InodeOpenData(child.inode, log_block_size) as (data_fp, data_len):
utils.copy_data(data_len, log_block_size, data_fp, self._cdfp)
utils.zero_pad(self._cdfp, data_len, log_block_size)
# Finally write out the directory record entry.
# This is a little tricky because of what things mean. First of all,
# child.extents_to_here represents the total number of extents up to
# this child in the parent. Thus, to get the absolute extent offset,
# we start with the parent's extent location, add on the number of
# extents to here, and remove 1 (since our offset will be zero-based).
# Second, child.offset_to_here is the *last* byte that the child uses,
# so to get the start of it we subtract off the length of the child.
# Then we can multiple the extent location by the logical block size,
# add on the offset, and get to the absolute location in the file.
first_joliet = True
for record in child.inode.linked_records:
if isinstance(record, dr.DirectoryRecord):
if self.joliet_vd is not None and id(record.vd) == id(self.joliet_vd) and first_joliet:
first_joliet = False
self.joliet_vd.remove_from_space_size(record.get_data_length())
self.joliet_vd.add_to_space_size(length)
if record.parent is None:
raise pycdlibexception.PyCdlibInternalError('Modifying file with empty parent')
abs_extent_loc = record.parent.extent_location() + record.extents_to_here - 1
offset = record.offset_to_here - record.dr_len
abs_offset = abs_extent_loc * log_block_size + offset
elif isinstance(record, udfmod.UDFFileEntry):
abs_offset = record.extent_location() * log_block_size
record.set_data_length(length)
self._cdfp.seek(abs_offset)
self._cdfp.write(record.record()) | [] |
Please provide a description of the function:def add_hard_link(self, **kwargs):
# type: (Any) -> None
'''
Add a hard link to the ISO. Hard links are alternate names for the
same file contents that don't take up any additional space on the the
ISO. This API can be used to create hard links between two files on
the ISO9660 filesystem, between two files on the Joliet filesystem, or
between a file on the ISO9660 filesystem and the Joliet filesystem.
In all cases, exactly one old path must be specified, and exactly one
new path must be specified.
Note that this is an advanced API, so using it in combination with the
higher-level APIs (like rm_file) may result in unexpected behavior.
Once this API has been used, this API and rm_hard_link() should be
preferred over add_file() and rm_file(), respectively.
Parameters:
iso_old_path - The old path on the ISO9660 filesystem to link from.
iso_new_path - The new path on the ISO9660 filesystem to link to.
joliet_old_path - The old path on the Joliet filesystem to link from.
joliet_new_path - The new path on the Joliet filesystem to link to.
rr_name - The Rock Ridge name to use for the new file if this is a Rock
Ridge ISO and the new path is on the ISO9660 filesystem.
boot_catalog_old - Use the El Torito boot catalog as the old path.
udf_old_path - The old path on the UDF filesystem to link from.
udf_new_path - The new path on the UDF filesystem to link to.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
num_old = 0
iso_old_path = None
joliet_old_path = None
boot_catalog_old = False
udf_old_path = None
keys_to_remove = []
for key in kwargs:
if key == 'iso_old_path' and kwargs[key] is not None:
num_old += 1
iso_old_path = utils.normpath(kwargs[key])
keys_to_remove.append(key)
elif key == 'joliet_old_path' and kwargs[key] is not None:
num_old += 1
joliet_old_path = self._normalize_joliet_path(kwargs[key])
keys_to_remove.append(key)
elif key == 'boot_catalog_old' and kwargs[key] is not None:
num_old += 1
boot_catalog_old = True
if self.eltorito_boot_catalog is None:
raise pycdlibexception.PyCdlibInvalidInput('Attempting to make link to non-existent El Torito boot catalog')
keys_to_remove.append(key)
elif key == 'udf_old_path' and kwargs[key] is not None:
num_old += 1
udf_old_path = utils.normpath(kwargs[key])
keys_to_remove.append(key)
if num_old != 1:
raise pycdlibexception.PyCdlibInvalidInput('Exactly one old path must be specified')
# Once we've iterated over the keys we know about, remove them from
# the map so that _add_hard_link_to_rec() can parse the rest.
for key in keys_to_remove:
del kwargs[key]
# It would be nice to allow the addition of a link to the El Torito
# Initial/Default Entry. Unfortunately, the information we need for
# a 'hidden' Initial entry just doesn't exist on the ISO. In
# particular, we don't know the real size that the file should be, we
# only know the number of emulated sectors (512 bytes) that it will be
# loaded into. Since the true length and the number of sectors are not
# the same thing, we can't actually add a hard link.
old_rec = None # type: Optional[Union[dr.DirectoryRecord, udfmod.UDFFileEntry]]
if iso_old_path is not None:
# A link from a file on the ISO9660 filesystem...
old_rec = self._find_iso_record(iso_old_path)
elif joliet_old_path is not None:
# A link from a file on the Joliet filesystem...
old_rec = self._find_joliet_record(joliet_old_path)
elif boot_catalog_old:
# A link from the El Torito boot catalog...
if self.eltorito_boot_catalog is None:
raise pycdlibexception.PyCdlibInvalidInput('Attempting to make link to non-existent El Torito boot catalog')
old_rec = self.eltorito_boot_catalog.dirrecords[0]
elif udf_old_path is not None:
# A link from a file on the UDF filesystem...
(old_ident_unused, old_rec) = self._find_udf_record(udf_old_path)
if old_rec is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot make hard link to a UDF file with an empty UDF File Entry')
# Above we checked to make sure we got at least one old path, so we
# don't need to worry about the else situation here.
num_bytes_to_add = self._add_hard_link_to_rec(old_rec, boot_catalog_old,
**kwargs)
self._finish_add(0, num_bytes_to_add) | [] |
Please provide a description of the function:def rm_hard_link(self, iso_path=None, joliet_path=None, udf_path=None):
# type: (Optional[str], Optional[str], Optional[str]) -> None
'''
Remove a hard link from the ISO. If the number of links to a piece of
data drops to zero, then the contents will be removed from the ISO.
Thus, this can be thought of as a lower-level interface to rm_file.
Either an ISO9660 path or a Joliet path must be passed to this API, but
not both. Thus, this interface can be used to hide files from either
the ISO9660 filesystem, the Joliet filesystem, or both (if there is
another reference to the data on the ISO, such as in El Torito).
Note that this is an advanced API, so using it in combination with the
higher-level APIs (like rm_file) may result in unexpected behavior.
Once this API has been used, this API and add_hard_link() should be
preferred over rm_file() and add_file(), respectively.
Parameters:
iso_path - The ISO link path to remove.
joliet_path - The Joliet link path to remove.
udf_path - The UDF link path to remove.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
if len([x for x in (iso_path, joliet_path, udf_path) if x]) != 1:
raise pycdlibexception.PyCdlibInvalidInput('Must provide exactly one of iso_path, joliet_path, or udf_path')
num_bytes_to_remove = 0
rec = None # type: Optional[Union[dr.DirectoryRecord, udfmod.UDFFileEntry]]
if iso_path is not None:
rec = self._find_iso_record(utils.normpath(iso_path))
num_bytes_to_remove += self._rm_dr_link(rec)
elif joliet_path is not None:
if self.joliet_vd is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot remove Joliet link from non-Joliet ISO')
joliet_path_bytes = self._normalize_joliet_path(joliet_path)
rec = self._find_joliet_record(joliet_path_bytes)
num_bytes_to_remove += self._rm_dr_link(rec)
elif udf_path is not None:
# UDF hard link removal
if self.udf_root is None:
raise pycdlibexception.PyCdlibInvalidInput('Can only specify a udf_path for a UDF ISO')
(ident, rec) = self._find_udf_record(utils.normpath(udf_path))
if rec is None:
# If the rec is None, that means that this pointed to an 'empty'
# UDF File Entry. Just remove the UDF File Identifier, which is
# as much as we can do.
if ident is not None and ident.parent is not None:
num_bytes_to_remove += self._rm_udf_file_ident(ident.parent, ident.fi)
# We also have to remove the "zero" UDF File Entry, since nothing
# else will.
num_bytes_to_remove += self.pvd.logical_block_size()
else:
num_bytes_to_remove += self._rm_udf_link(rec)
else:
raise pycdlibexception.PyCdlibInvalidInput("One of 'iso_path', 'joliet_path', or 'udf_path' must be specified")
self._finish_remove(num_bytes_to_remove, True) | [] |
Please provide a description of the function:def add_directory(self, iso_path=None, rr_name=None, joliet_path=None,
file_mode=None, udf_path=None):
# type: (Optional[str], Optional[str], Optional[str], int, Optional[str]) -> None
'''
Add a directory to the ISO. At least one of an iso_path, joliet_path,
or udf_path must be provided. Providing joliet_path on a non-Joliet
ISO, or udf_path on a non-UDF ISO, is an error. If the ISO contains
Rock Ridge, then a Rock Ridge name must be provided.
Parameters:
iso_path - The ISO9660 absolute path to use for the directory.
rr_name - The Rock Ridge name to use for the directory.
joliet_path - The Joliet absolute path to use for the directory.
file_mode - The POSIX file mode to use for the directory. This only
applies for Rock Ridge ISOs.
udf_path - The UDF absolute path to use for the directory.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
if iso_path is None and joliet_path is None and udf_path is None:
raise pycdlibexception.PyCdlibInvalidInput('Either iso_path or joliet_path must be passed')
if file_mode is not None and not self.rock_ridge:
raise pycdlibexception.PyCdlibInvalidInput('A file mode can only be specified for Rock Ridge ISOs')
# For backwards-compatibility reasons, if the mode was not specified we
# just assume 555. We should probably eventually make file_mode
# required for Rock Ridge and remove this assumption.
if file_mode is None:
file_mode = 0o040555
num_bytes_to_add = 0
if iso_path is not None:
iso_path_bytes = utils.normpath(iso_path)
new_rr_name = self._check_rr_name(rr_name)
depth = len(utils.split_path(iso_path_bytes))
if not self.rock_ridge and self.enhanced_vd is None:
_check_path_depth(iso_path_bytes)
(name, parent) = self._iso_name_and_parent_from_path(iso_path_bytes)
_check_iso9660_directory(name, self.interchange_level)
relocated = False
fake_dir_rec = None
orig_parent = None
iso9660_name = name
if self.rock_ridge and (depth % 8) == 0 and self.enhanced_vd is None:
# If the depth was a multiple of 8, then we are going to have to
# make a relocated entry for this record.
num_bytes_to_add += self._find_or_create_rr_moved()
# With a depth of 8, we have to add the directory both to the
# original parent with a CL link, and to the new parent with an
# RE link. Here we make the 'fake' record, as a child of the
# original place; the real one will be done below.
fake_dir_rec = dr.DirectoryRecord()
fake_dir_rec.new_dir(self.pvd, name, parent,
self.pvd.sequence_number(),
self.rock_ridge, new_rr_name,
self.pvd.logical_block_size(), True, False,
self.xa, file_mode)
num_bytes_to_add += self._add_child_to_dr(fake_dir_rec,
self.pvd.logical_block_size())
# The fake dir record doesn't get an entry in the path table record.
relocated = True
orig_parent = parent
parent = self._rr_moved_record
# Since we are moving the entry underneath the RR_MOVED
# directory, there is now the chance of a name collision (this
# can't happen without relocation since _add_child_to_dr() below
# won't allow duplicate names). Check for that here and
# generate a new name.
index = 0
while True:
for child in self._rr_moved_record.children:
if child.file_ident == iso9660_name:
# Python 3.4 doesn't support substitution with a byte
# array, so we do it as a string and encode to bytes.
iso9660_name = name + ('%03d' % (index)).encode()
index += 1
break
else:
break
rec = dr.DirectoryRecord()
rec.new_dir(self.pvd, iso9660_name, parent,
self.pvd.sequence_number(), self.rock_ridge, new_rr_name,
self.pvd.logical_block_size(), False, relocated,
self.xa, file_mode)
num_bytes_to_add += self._add_child_to_dr(rec, self.pvd.logical_block_size())
if rec.rock_ridge is not None:
if relocated:
fake_dir_rec.rock_ridge.cl_to_moved_dr = rec # type: ignore
rec.rock_ridge.moved_to_cl_dr = fake_dir_rec # type: ignore
num_bytes_to_add += self._update_rr_ce_entry(rec)
self._create_dot(self.pvd, rec, self.rock_ridge, self.xa, file_mode)
parent_file_mode = -1
if parent.rock_ridge is not None:
parent_file_mode = parent.rock_ridge.get_file_mode()
else:
if parent.is_root:
parent_file_mode = file_mode
dotdot = self._create_dotdot(self.pvd, rec, self.rock_ridge,
relocated, self.xa, parent_file_mode)
if dotdot.rock_ridge is not None and relocated:
dotdot.rock_ridge.parent_link = orig_parent
# We always need to add an entry to the path table record
ptr = path_table_record.PathTableRecord()
ptr.new_dir(iso9660_name)
num_bytes_to_add += self._add_to_ptr_size(ptr) + self.pvd.logical_block_size()
rec.set_ptr(ptr)
if joliet_path is not None:
num_bytes_to_add += self._add_joliet_dir(self._normalize_joliet_path(joliet_path))
if udf_path is not None:
if self.udf_root is None:
raise pycdlibexception.PyCdlibInvalidInput('Can only specify a udf_path for a UDF ISO')
log_block_size = self.pvd.logical_block_size()
udf_path_bytes = utils.normpath(udf_path)
(udf_name, udf_parent) = self._udf_name_and_parent_from_path(udf_path_bytes)
file_ident = udfmod.UDFFileIdentifierDescriptor()
file_ident.new(True, False, udf_name, udf_parent)
num_new_extents = udf_parent.add_file_ident_desc(file_ident, log_block_size)
num_bytes_to_add += num_new_extents * log_block_size
file_entry = udfmod.UDFFileEntry()
file_entry.new(0, 'dir', udf_parent, log_block_size)
file_ident.file_entry = file_entry
file_entry.file_ident = file_ident
num_bytes_to_add += log_block_size
udf_dotdot = udfmod.UDFFileIdentifierDescriptor()
udf_dotdot.new(True, True, b'', udf_parent)
num_new_extents = file_ident.file_entry.add_file_ident_desc(udf_dotdot, log_block_size)
num_bytes_to_add += num_new_extents * log_block_size
self.udf_logical_volume_integrity.logical_volume_impl_use.num_dirs += 1
self._finish_add(0, num_bytes_to_add) | [] |
Please provide a description of the function:def rm_file(self, iso_path, rr_name=None, joliet_path=None, udf_path=None): # pylint: disable=unused-argument
# type: (str, Optional[str], Optional[str], Optional[str]) -> None
'''
Remove a file from the ISO.
Parameters:
iso_path - The path to the file to remove.
rr_name - The Rock Ridge name of the file to remove.
joliet_path - The Joliet path to the file to remove.
udf_path - The UDF path to the file to remove.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
iso_path_bytes = utils.normpath(iso_path)
if not utils.starts_with_slash(iso_path_bytes):
raise pycdlibexception.PyCdlibInvalidInput('Must be a path starting with /')
child = self._find_iso_record(iso_path_bytes)
if not child.is_file():
raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a directory with rm_file (try rm_directory instead)')
# We also want to check to see if this Directory Record is currently
# being used as an El Torito Boot Catalog, Initial Entry, or Section
# Entry. If it is, we throw an exception; we don't know if the user
# meant to remove El Torito from this ISO, or if they meant to 'hide'
# the entry, but we need them to call the correct API to let us know.
if self.eltorito_boot_catalog is not None:
if any([id(child) == id(rec) for rec in self.eltorito_boot_catalog.dirrecords]):
raise pycdlibexception.PyCdlibInvalidInput("Cannot remove a file that is referenced by El Torito; either use 'rm_eltorito' to remove El Torito first, or use 'rm_hard_link' to hide the entry")
eltorito_entries = {}
eltorito_entries[id(self.eltorito_boot_catalog.initial_entry.inode)] = True
for sec in self.eltorito_boot_catalog.sections:
for entry in sec.section_entries:
eltorito_entries[id(entry.inode)] = True
if id(child.inode) in eltorito_entries:
raise pycdlibexception.PyCdlibInvalidInput("Cannot remove a file that is referenced by El Torito; either use 'rm_eltorito' to remove El Torito first, or use 'rm_hard_link' to hide the entry")
num_bytes_to_remove = 0
udf_file_ident = None
udf_file_entry = None
if udf_path is not None:
# Find the UDF record if the udf_path was specified; this may be
# used later on.
if self.udf_root is None:
raise pycdlibexception.PyCdlibInvalidInput('Can only specify a udf_path for a UDF ISO')
udf_path_bytes = utils.normpath(udf_path)
(udf_file_ident, udf_file_entry) = self._find_udf_record(udf_path_bytes)
# If the child is a Rock Ridge symlink, then it has no inode since
# there is no data attached to it.
if child.inode is None:
num_bytes_to_remove += self._remove_child_from_dr(child,
child.index_in_parent,
self.pvd.logical_block_size())
else:
while child.inode.linked_records:
rec = child.inode.linked_records[0]
if isinstance(rec, dr.DirectoryRecord):
num_bytes_to_remove += self._rm_dr_link(rec)
elif isinstance(rec, udfmod.UDFFileEntry):
num_bytes_to_remove += self._rm_udf_link(rec)
else:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Saw a linked record that was neither ISO or UDF')
if udf_file_ident is not None and udf_file_entry is None and udf_file_ident.parent is not None:
# If the udf_path was specified, go looking for the UDF File Ident
# that corresponds to this record. If the UDF File Ident exists,
# and the File Entry is None, this means that it is an "zeroed"
# UDF File Entry and we have to remove it by hand.
self._rm_udf_file_ident(udf_file_ident.parent, udf_file_ident.fi)
# We also have to remove the "zero" UDF File Entry, since nothing
# else will.
num_bytes_to_remove += self.pvd.logical_block_size()
self._finish_remove(num_bytes_to_remove, True) | [] |
Please provide a description of the function:def rm_directory(self, iso_path=None, rr_name=None, joliet_path=None, udf_path=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[str]) -> None
'''
Remove a directory from the ISO.
Parameters:
iso_path - The path to the directory to remove.
rr_name - The Rock Ridge name of the directory to remove.
joliet_path - The Joliet path to the directory to remove.
udf_path - The UDF absolute path to the directory to remove.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
if iso_path is None and joliet_path is None and udf_path is None:
raise pycdlibexception.PyCdlibInvalidInput('Either iso_path or joliet_path must be passed')
num_bytes_to_remove = 0
if iso_path is not None:
iso_path_bytes = utils.normpath(iso_path)
if iso_path_bytes == b'/':
raise pycdlibexception.PyCdlibInvalidInput('Cannot remove base directory')
self._check_rr_name(rr_name)
child = self._find_iso_record(iso_path_bytes)
if not child.is_dir():
raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a file with rm_directory (try rm_file instead)')
if len(child.children) > 2:
raise pycdlibexception.PyCdlibInvalidInput('Directory must be empty to use rm_directory')
num_bytes_to_remove += self._remove_child_from_dr(child,
child.index_in_parent,
self.pvd.logical_block_size())
if child.ptr is not None:
num_bytes_to_remove += self._remove_from_ptr_size(child.ptr)
# Remove space for the directory itself.
num_bytes_to_remove += child.get_data_length()
if child.rock_ridge is not None and child.rock_ridge.relocated_record():
# OK, this child was relocated. If the parent of this relocated
# record is empty (only . and ..), we can remove it.
parent = child.parent
if parent is None:
raise pycdlibexception.PyCdlibInternalError('Relocated child has empty parent; this should not be')
if len(parent.children) == 2:
if parent.parent is None:
raise pycdlibexception.PyCdlibInternalError('Tried to remove a directory that has no parent; this should not happen')
for index, c in enumerate(parent.parent.children):
if c.file_ident == parent.file_ident:
parent_index = index
break
else:
raise pycdlibexception.PyCdlibInvalidISO('Could not find parent in its own parent!')
num_bytes_to_remove += self._remove_child_from_dr(parent,
parent_index,
self.pvd.logical_block_size())
num_bytes_to_remove += parent.get_data_length()
if parent.ptr is not None:
num_bytes_to_remove += self._remove_from_ptr_size(parent.ptr)
cl = child.rock_ridge.moved_to_cl_dr
if cl is None:
raise pycdlibexception.PyCdlibInternalError('Invalid child link record')
if cl.parent is None:
raise pycdlibexception.PyCdlibInternalError('Invalid parent to child link record; this should not be')
for index, c in enumerate(cl.parent.children):
if cl.file_ident == c.file_ident:
clindex = index
break
else:
raise pycdlibexception.PyCdlibInvalidISO('CL record does not exist')
if cl.children:
raise pycdlibexception.PyCdlibInvalidISO('Parent link should have no children!')
num_bytes_to_remove += self._remove_child_from_dr(cl, clindex,
self.pvd.logical_block_size())
# Note that we do not remove additional space from the PVD for the child_link
# record because it is a 'fake' record that has no real size.
if child.rock_ridge is not None and child.rock_ridge.dr_entries.ce_record is not None and child.rock_ridge.ce_block is not None:
child.rock_ridge.ce_block.remove_entry(child.rock_ridge.dr_entries.ce_record.offset_cont_area,
child.rock_ridge.dr_entries.ce_record.len_cont_area)
if joliet_path is not None:
num_bytes_to_remove += self._rm_joliet_dir(self._normalize_joliet_path(joliet_path))
if udf_path is not None:
if self.udf_root is None:
raise pycdlibexception.PyCdlibInvalidInput('Can only specify a udf_path for a UDF ISO')
udf_path_bytes = utils.normpath(udf_path)
if udf_path_bytes == b'/':
raise pycdlibexception.PyCdlibInvalidInput('Cannot remove base directory')
(udf_name, udf_parent) = self._udf_name_and_parent_from_path(udf_path_bytes)
num_extents_to_remove = udf_parent.remove_file_ident_desc_by_name(udf_name,
self.pvd.logical_block_size())
# Remove space (if necessary) in the parent File Identifier
# Descriptor area.
num_bytes_to_remove += num_extents_to_remove * self.pvd.logical_block_size()
# Remove space for the File Entry.
num_bytes_to_remove += self.pvd.logical_block_size()
# Remove space for the list of File Identifier Descriptors.
num_bytes_to_remove += self.pvd.logical_block_size()
self.udf_logical_volume_integrity.logical_volume_impl_use.num_dirs -= 1
self._find_udf_record.cache_clear() # pylint: disable=no-member
self._finish_remove(num_bytes_to_remove, True) | [] |
Please provide a description of the function:def add_eltorito(self, bootfile_path, bootcatfile=None,
rr_bootcatname=None, joliet_bootcatfile=None,
boot_load_size=None, platform_id=0, boot_info_table=False,
efi=False, media_name='noemul', bootable=True,
boot_load_seg=0, udf_bootcatfile=None):
# type: (str, Optional[str], Optional[str], Optional[str], int, int, bool, bool, str, bool, int, Optional[str]) -> None
'''
Add an El Torito Boot Record, and associated files, to the ISO. The
file that will be used as the bootfile must be passed into this function
and must already be present on the ISO.
Parameters:
bootfile_path - The file to use as the boot file; it must already
exist on this ISO.
bootcatfile - The fake file to use as the boot catalog entry; set to
BOOT.CAT;1 by default.
rr_bootcatname - The Rock Ridge name for the fake file to use as the
boot catalog entry; set to 'boot.cat' by default.
joliet_bootcatfile - The Joliet name for the fake file to use as the
boot catalog entry; set to 'boot.cat' by default.
boot_load_size - The number of sectors to use for the boot entry; if
set to None (the default), the number of sectors will
be calculated.
platform_id - The platform ID to set for the El Torito entry; 0 is for
x86, 1 is for Power PC, and 2 is for Mac. 0 is the
default.
boot_info_table - Whether to add a boot info table to the ISO. The
default is False.
efi - Whether this is an EFI entry for El Torito. The default is False.
media_name - The name of the media type, one of 'noemul', 'floppy', or 'hdemul'.
bootable - Whether the boot media is bootable. The default is True.
boot_load_seg - The load segment address of the boot image.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
# In order to add an El Torito boot, we need to do the following:
# 1. Find the boot file record (which must already exist).
# 2. Construct a BootRecord.
# 3. Construct a BootCatalog, and add it to the filesystem.
# 4. Add the boot record to the ISO.
if not bootcatfile:
bootcatfile = '/BOOT.CAT;1'
bootfile_path_bytes = utils.normpath(bootfile_path)
if self.joliet_vd is not None:
if not joliet_bootcatfile:
joliet_bootcatfile = '/boot.cat'
else:
if joliet_bootcatfile:
raise pycdlibexception.PyCdlibInvalidInput('A joliet path must not be passed when adding El Torito to a non-Joliet ISO')
if self.udf_root is not None:
if not udf_bootcatfile:
udf_bootcatfile = '/boot.cat'
else:
if udf_bootcatfile:
raise pycdlibexception.PyCdlibInvalidInput('A UDF path must not be passed when adding El Torito to a non-UDF ISO')
log_block_size = self.pvd.logical_block_size()
# Step 1.
boot_dirrecord = self._find_iso_record(bootfile_path_bytes)
if boot_load_size is None:
sector_count = utils.ceiling_div(boot_dirrecord.get_data_length(),
log_block_size) * log_block_size // 512
else:
sector_count = boot_load_size
if boot_dirrecord.inode is None:
raise pycdlibexception.PyCdlibInternalError('Tried to add an empty boot dirrecord inode to the El Torito boot catalog')
if boot_info_table:
orig_len = boot_dirrecord.get_data_length()
bi_table = eltorito.EltoritoBootInfoTable()
with inode.InodeOpenData(boot_dirrecord.inode, log_block_size) as (data_fp, data_len):
bi_table.new(self.pvd, boot_dirrecord.inode, orig_len,
self._calculate_eltorito_boot_info_table_csum(data_fp, data_len))
boot_dirrecord.inode.add_boot_info_table(bi_table)
system_type = 0
if media_name == 'hdemul':
with inode.InodeOpenData(boot_dirrecord.inode, log_block_size) as (data_fp, data_len):
disk_mbr = data_fp.read(512)
if len(disk_mbr) != 512:
raise pycdlibexception.PyCdlibInvalidInput('Could not read entire HD MBR, must be at least 512 bytes')
system_type = eltorito.hdmbrcheck(disk_mbr, sector_count, bootable)
num_bytes_to_add = 0
if self.eltorito_boot_catalog is not None:
# All right, we already created the boot catalog. Add a new section
# to the boot catalog
self.eltorito_boot_catalog.add_section(boot_dirrecord.inode,
sector_count, boot_load_seg,
media_name, system_type, efi,
bootable)
else:
# Step 2.
br = headervd.BootRecord()
br.new(b'EL TORITO SPECIFICATION')
self.brs.append(br)
# On a UDF ISO, adding a new Boot Record doesn't actually increase
# the size, since there are a bunch of gaps at the beginning.
if not self._has_udf:
num_bytes_to_add += log_block_size
# Step 3.
self.eltorito_boot_catalog = eltorito.EltoritoBootCatalog(br)
self.eltorito_boot_catalog.new(br, boot_dirrecord.inode, sector_count,
boot_load_seg, media_name, system_type,
platform_id, bootable)
# Step 4.
rrname = ''
if self.rock_ridge:
if rr_bootcatname is None:
rrname = 'boot.cat'
else:
rrname = rr_bootcatname
num_bytes_to_add += self._add_fp(None, log_block_size, False, bootcatfile,
rrname, joliet_bootcatfile,
udf_bootcatfile, None, True)
self._finish_add(0, num_bytes_to_add) | [] |
Please provide a description of the function:def rm_eltorito(self):
# type: () -> None
'''
Remove the El Torito boot record (and Boot Catalog) from the ISO.
Parameters:
None.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
if self.eltorito_boot_catalog is None:
raise pycdlibexception.PyCdlibInvalidInput('This ISO does not have an El Torito Boot Record')
for brindex, br in enumerate(self.brs):
if br.boot_system_identifier == b'EL TORITO SPECIFICATION'.ljust(32, b'\x00'):
eltorito_index = brindex
break
else:
# There was a boot catalog, but no corresponding boot record. This
# should never happen.
raise pycdlibexception.PyCdlibInternalError('El Torito boot catalog found with no corresponding boot record')
del self.brs[eltorito_index]
num_bytes_to_remove = 0
# On a UDF ISO, removing the Boot Record doesn't actually decrease
# the size, since there are a bunch of gaps at the beginning.
if not self._has_udf:
num_bytes_to_remove += self.pvd.logical_block_size()
# Remove all of the DirectoryRecord/UDFFileEntries associated with
# the Boot Catalog
for rec in self.eltorito_boot_catalog.dirrecords:
if isinstance(rec, dr.DirectoryRecord):
num_bytes_to_remove += self._rm_dr_link(rec)
elif isinstance(rec, udfmod.UDFFileEntry):
num_bytes_to_remove += self._rm_udf_link(rec)
else:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Saw an El Torito record that was neither ISO nor UDF')
# Remove the linkage from the El Torito Entries to the inodes
entries_to_remove = [self.eltorito_boot_catalog.initial_entry]
for sec in self.eltorito_boot_catalog.sections:
for entry in sec.section_entries:
entries_to_remove.append(entry)
for entry in entries_to_remove:
if entry.inode is not None:
new_list = []
for linkrec in entry.inode.linked_records:
if id(linkrec) != id(entry):
new_list.append(linkrec)
entry.inode.linked_records = new_list
num_bytes_to_remove += len(self.eltorito_boot_catalog.record())
self.eltorito_boot_catalog = None
self._finish_remove(num_bytes_to_remove, True) | [] |
Please provide a description of the function:def list_dir(self, iso_path, joliet=False):
# type: (str, bool) -> Generator
'''
(deprecated) Generate a list of all of the file/directory objects in the
specified location on the ISO. It is recommended to use the
'list_children' API instead.
Parameters:
iso_path - The path on the ISO to look up information for.
joliet - Whether to look for the path in the Joliet portion of the ISO.
Yields:
Children of this path.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
if joliet:
rec = self._get_entry(None, None, self._normalize_joliet_path(iso_path))
else:
normpath = utils.normpath(iso_path)
try_rr = False
try:
rec = self._get_entry(normpath, None, None)
except pycdlibexception.PyCdlibInvalidInput:
try_rr = True
if try_rr:
rec = self._get_entry(None, normpath, None)
for c in _yield_children(rec):
yield c | [] |
Please provide a description of the function:def list_children(self, **kwargs):
# type: (str) -> Generator
'''
Generate a list of all of the file/directory objects in the
specified location on the ISO.
Parameters:
iso_path - The absolute path on the ISO to list the children for.
rr_path - The absolute Rock Ridge path on the ISO to list the children for.
joliet_path - The absolute Joliet path on the ISO to list the children for.
udf_path - The absolute UDF path on the ISO to list the children for.
Yields:
Children of this path.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
num_paths = 0
for key in kwargs:
if key in ['joliet_path', 'rr_path', 'iso_path', 'udf_path']:
if kwargs[key] is not None:
num_paths += 1
else:
raise pycdlibexception.PyCdlibInvalidInput("Invalid keyword, must be one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if num_paths != 1:
raise pycdlibexception.PyCdlibInvalidInput("Must specify one, and only one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if 'udf_path' in kwargs:
udf_rec = self._get_udf_entry(kwargs['udf_path'])
if not udf_rec.is_dir():
raise pycdlibexception.PyCdlibInvalidInput('UDF File Entry is not a directory!')
for fi_desc in udf_rec.fi_descs:
yield fi_desc.file_entry
else:
if 'joliet_path' in kwargs:
rec = self._get_entry(None, None, self._normalize_joliet_path(kwargs['joliet_path']))
elif 'rr_path' in kwargs:
rec = self._get_entry(None, utils.normpath(kwargs['rr_path']), None)
else:
rec = self._get_entry(utils.normpath(kwargs['iso_path']), None, None)
for c in _yield_children(rec):
yield c | [] |
Please provide a description of the function:def get_entry(self, iso_path, joliet=False):
# type: (str, bool) -> dr.DirectoryRecord
'''
(deprecated) Get the directory record for a particular path. It is
recommended to use the 'get_record' API instead.
Parameters:
iso_path - The path on the ISO to look up information for.
joliet - Whether to look for the path in the Joliet portion of the ISO.
Returns:
A dr.DirectoryRecord object representing the path.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
if joliet:
return self._get_entry(None, None, self._normalize_joliet_path(iso_path))
return self._get_entry(utils.normpath(iso_path), None, None) | [] |
Please provide a description of the function:def get_record(self, **kwargs):
# type: (str) -> Union[dr.DirectoryRecord, udfmod.UDFFileEntry]
'''
Get the directory record for a particular path.
Parameters:
iso_path - The absolute path on the ISO9660 filesystem to get the
record for.
rr_path - The absolute path on the Rock Ridge filesystem to get the
record for.
joliet_path - The absolute path on the Joliet filesystem to get the
record for.
udf_path - The absolute path on the UDF filesystem to get the record
for.
Returns:
An object that represents the path. This may be a dr.DirectoryRecord
object (in the cases of iso_path, rr_path, or joliet_path), or a
udf.UDFFileEntry object (in the case of udf_path).
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
num_paths = 0
for key in kwargs:
if key in ['joliet_path', 'rr_path', 'iso_path', 'udf_path']:
if kwargs[key] is not None:
num_paths += 1
else:
raise pycdlibexception.PyCdlibInvalidInput("Invalid keyword, must be one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if num_paths != 1:
raise pycdlibexception.PyCdlibInvalidInput("Must specify one, and only one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if 'joliet_path' in kwargs:
return self._get_entry(None, None, self._normalize_joliet_path(kwargs['joliet_path']))
if 'rr_path' in kwargs:
return self._get_entry(None, utils.normpath(kwargs['rr_path']), None)
if 'udf_path' in kwargs:
return self._get_udf_entry(kwargs['udf_path'])
return self._get_entry(utils.normpath(kwargs['iso_path']), None, None) | [] |
Please provide a description of the function:def add_isohybrid(self, part_entry=1, mbr_id=None, part_offset=0,
geometry_sectors=32, geometry_heads=64, part_type=0x17,
mac=False):
# type: (int, Optional[int], int, int, int, int, bool) -> None
'''
Make an ISO a 'hybrid', which means that it can be booted either from a
CD or from more traditional media (like a USB stick). This requires
that the ISO already have El Torito, and will use the El Torito boot
file as a bootable image. That image must contain a certain signature
in order to work as a hybrid (if using syslinux, this generally means
the isohdpfx.bin files).
Parameters:
part_entry - The partition entry to use; one by default.
mbr_id - The mbr_id to use. If set to None (the default), a random one
will be generated.
part_offset - The partition offset to use; zero by default.
geometry_sectors - The number of sectors to assign; thirty-two by default.
geometry_heads - The number of heads to assign; sixty-four by default.
part_type - The partition type to assign; twenty-three by default.
mac - Add support for Mac; False by default.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
if self.eltorito_boot_catalog is None:
raise pycdlibexception.PyCdlibInvalidInput('The ISO must have an El Torito Boot Record to add isohybrid support')
if self.eltorito_boot_catalog.initial_entry.sector_count != 4:
raise pycdlibexception.PyCdlibInvalidInput('El Torito Boot Catalog sector count must be 4 (was actually 0x%x)' % (self.eltorito_boot_catalog.initial_entry.sector_count))
# Now check that the eltorito boot file contains the appropriate
# signature (offset 0x40, '\xFB\xC0\x78\x70')
with inode.InodeOpenData(self.eltorito_boot_catalog.initial_entry.inode, self.pvd.logical_block_size()) as (data_fp, data_len_unused):
data_fp.seek(0x40, os.SEEK_CUR)
signature = data_fp.read(4)
if signature != b'\xfb\xc0\x78\x70':
raise pycdlibexception.PyCdlibInvalidInput('Invalid signature on boot file for iso hybrid')
self.isohybrid_mbr = isohybrid.IsoHybrid()
self.isohybrid_mbr.new(mac, part_entry, mbr_id, part_offset,
geometry_sectors, geometry_heads, part_type) | [] |
Please provide a description of the function:def full_path_from_dirrecord(self, rec, rockridge=False):
# type: (Union[dr.DirectoryRecord, udfmod.UDFFileEntry], bool) -> str
'''
A method to get the absolute path of a directory record.
Parameters:
rec - The directory record to get the full path for.
rockridge - Whether to get the rock ridge full path.
Returns:
A string representing the absolute path to the file on the ISO.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
ret = b''
if isinstance(rec, dr.DirectoryRecord):
encoding = 'utf-8'
if self.joliet_vd is not None and id(rec.vd) == id(self.joliet_vd):
encoding = 'utf-16_be'
slash = '/'.encode(encoding)
# A root entry has no Rock Ridge entry, even on a Rock Ridge ISO. Just
# always return / here.
if rec.is_root:
return '/'
if rockridge and rec.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot generate a Rock Ridge path on a non-Rock Ridge ISO')
parent = rec # type: Optional[dr.DirectoryRecord]
while parent is not None:
if not parent.is_root:
if rockridge and parent.rock_ridge is not None:
ret = slash + parent.rock_ridge.name() + ret
else:
ret = slash + parent.file_identifier() + ret
parent = parent.parent
else:
if rec.parent is None:
return '/'
if rec.file_ident is not None:
encoding = rec.file_ident.encoding
else:
encoding = 'utf-8'
slash = '/'.encode(encoding)
udfparent = rec # type: Optional[udfmod.UDFFileEntry]
while udfparent is not None:
ident = udfparent.file_identifier()
if ident != b'/':
ret = slash + ident + ret
udfparent = udfparent.parent
if sys.version_info >= (3, 0):
# Python 3, just return the encoded version
return ret.decode(encoding)
# Python 2.
return ret.decode(encoding).encode('utf-8') | [] |
Please provide a description of the function:def duplicate_pvd(self):
# type: () -> None
'''
A method to add a duplicate PVD to the ISO. This is a mostly useless
feature allowed by Ecma-119 to have duplicate PVDs to avoid possible
corruption.
Parameters:
None.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
pvd = headervd.PrimaryOrSupplementaryVD(headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY)
pvd.copy(self.pvd)
self.pvds.append(pvd)
self._finish_add(self.pvd.logical_block_size(), 0) | [] |
Please provide a description of the function:def clear_hidden(self, iso_path=None, rr_path=None, joliet_path=None):
# type: (Optional[str], Optional[str], Optional[str]) -> None
'''
Clear the ISO9660 hidden attribute on a file or directory. This will
cause the file or directory to show up when listing entries on the ISO.
Exactly one of iso_path, rr_path, or joliet_path must be specified.
Parameters:
iso_path - The path on the ISO to clear the hidden bit from.
rr_path - The Rock Ridge path on the ISO to clear the hidden bit from.
joliet_path - The Joliet path on the ISO to clear the hidden bit from.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
if len([x for x in (iso_path, rr_path, joliet_path) if x is not None]) != 1:
raise pycdlibexception.PyCdlibInvalidInput('Must provide exactly one of iso_path, rr_path, or joliet_path')
if iso_path is not None:
rec = self._find_iso_record(utils.normpath(iso_path))
elif rr_path is not None:
rec = self._find_rr_record(utils.normpath(rr_path))
elif joliet_path is not None:
joliet_path_bytes = self._normalize_joliet_path(joliet_path)
rec = self._find_joliet_record(joliet_path_bytes)
rec.change_existence(False) | [] |
Please provide a description of the function:def set_relocated_name(self, name, rr_name):
# type: (str, str) -> None
'''
Set the name of the relocated directory on a Rock Ridge ISO. The ISO
must be a Rock Ridge one, and must not have previously had the relocated
name set.
Parameters:
name - The name for a relocated directory.
rr_name - The Rock Ridge name for a relocated directory.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
if not self.rock_ridge:
raise pycdlibexception.PyCdlibInvalidInput('Can only set the relocated name on a Rock Ridge ISO')
encoded_name = name.encode('utf-8')
encoded_rr_name = rr_name.encode('utf-8')
if self._rr_moved_name is not None:
if self._rr_moved_name == encoded_name and self._rr_moved_rr_name == encoded_rr_name:
return
raise pycdlibexception.PyCdlibInvalidInput('Changing the existing rr_moved name is not allowed')
_check_iso9660_directory(encoded_name, self.interchange_level)
self._rr_moved_name = encoded_name
self._rr_moved_rr_name = encoded_rr_name | [] |
Please provide a description of the function:def walk(self, **kwargs):
# type: (str) -> Generator
'''
Walk the entries on the ISO, starting at the given path. One, and only
one, of iso_path, rr_path, joliet_path, and udf_path is allowed.
Similar to os.walk(), yield a 3-tuple of (path-to-here, dirlist, filelist)
for each directory level.
Parameters:
iso_path - The absolute ISO path to the starting entry on the ISO.
rr_path - The absolute Rock Ridge path to the starting entry on the ISO.
joliet_path - The absolute Joliet path to the starting entry on the ISO.
udf_path - The absolute UDF path to the starting entry on the ISO.
Yields:
3-tuples of (path-to-here, dirlist, filelist)
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
num_paths = 0
for key in kwargs:
if key in ['joliet_path', 'rr_path', 'iso_path', 'udf_path']:
if kwargs[key] is not None:
num_paths += 1
else:
raise pycdlibexception.PyCdlibInvalidInput("Invalid keyword, must be one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if num_paths != 1:
raise pycdlibexception.PyCdlibInvalidInput("Must specify one, and only one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
rec = None # type: Optional[Union[dr.DirectoryRecord, udfmod.UDFFileEntry]]
if 'joliet_path' in kwargs:
joliet_path = self._normalize_joliet_path(kwargs['joliet_path'])
rec = self._find_joliet_record(joliet_path)
path_type = 'joliet_path'
encoding = 'utf-16_be'
elif 'udf_path' in kwargs:
if self.udf_root is None:
raise pycdlibexception.PyCdlibInvalidInput('Can only specify a UDF path for a UDF ISO')
(ident_unused, rec) = self._find_udf_record(utils.normpath(kwargs['udf_path']))
if rec is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot get entry for empty UDF File Entry')
path_type = 'udf_path'
encoding = ''
elif 'rr_path' in kwargs:
if not self.rock_ridge:
raise pycdlibexception.PyCdlibInvalidInput('Cannot fetch a rr_path from a non-Rock Ridge ISO')
rec = self._find_rr_record(utils.normpath(kwargs['rr_path']))
path_type = 'rr_path'
encoding = 'utf-8'
else:
rec = self._find_iso_record(utils.normpath(kwargs['iso_path']))
path_type = 'iso_path'
encoding = 'utf-8'
dirs = collections.deque([rec])
while dirs:
dir_record = dirs.popleft()
relpath = self.full_path_from_dirrecord(dir_record,
rockridge=path_type == 'rr_path')
dirlist = []
filelist = []
dirdict = {}
for child in reversed(list(self.list_children(**{path_type: relpath}))):
if child is None or child.is_dot() or child.is_dotdot():
continue
if isinstance(child, udfmod.UDFFileEntry) and child.file_ident is not None:
encoding = child.file_ident.encoding
if path_type == 'rr_path':
name = child.rock_ridge.name()
else:
name = child.file_identifier()
if sys.version_info >= (3, 0):
# Python 3, just return the encoded version
encoded = name.decode(encoding)
else:
# Python 2.
encoded = name.decode(encoding).encode('utf-8')
if child.is_dir():
dirlist.append(encoded)
dirdict[encoded] = child
else:
filelist.append(encoded)
yield relpath, dirlist, filelist
# We allow the user to modify dirlist along the way, so we
# add the children to dirs *after* yield returns.
for name in dirlist:
dirs.appendleft(dirdict[name]) | [] |
Please provide a description of the function:def open_file_from_iso(self, **kwargs):
# type: (str) -> PyCdlibIO
'''
Open a file for reading in a context manager. This allows the user to
operate on the file in user-defined chunks (utilizing the read() method
of the returned context manager).
Parameters:
iso_path - The absolute ISO path to the file on the ISO.
rr_path - The absolute Rock Ridge path to the file on the ISO.
joliet_path - The absolute Joliet path to the file on the ISO.
udf_path - The absolute UDF path to the file on the ISO.
Returns:
A PyCdlibIO object allowing access to the file.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
num_paths = 0
rec = None # type: Optional[Union[dr.DirectoryRecord, udfmod.UDFFileEntry]]
for key in kwargs:
if key in ['joliet_path', 'rr_path', 'iso_path', 'udf_path']:
if kwargs[key] is not None:
num_paths += 1
else:
raise pycdlibexception.PyCdlibInvalidInput("Invalid keyword, must be one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if num_paths != 1:
raise pycdlibexception.PyCdlibInvalidInput("Must specify one, and only one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'")
if 'joliet_path' in kwargs:
joliet_path = self._normalize_joliet_path(kwargs['joliet_path'])
rec = self._find_joliet_record(joliet_path)
elif 'udf_path' in kwargs:
if self.udf_root is None:
raise pycdlibexception.PyCdlibInvalidInput('Can only specify a UDF path for a UDF ISO')
(ident_unused, rec) = self._find_udf_record(utils.normpath(kwargs['udf_path']))
if rec is None:
raise pycdlibexception.PyCdlibInvalidInput('Cannot get entry for empty UDF File Entry')
elif 'rr_path' in kwargs:
if not self.rock_ridge:
raise pycdlibexception.PyCdlibInvalidInput('Cannot fetch a rr_path from a non-Rock Ridge ISO')
rec = self._find_rr_record(utils.normpath(kwargs['rr_path']))
else:
rec = self._find_iso_record(utils.normpath(kwargs['iso_path']))
if not rec.is_file():
raise pycdlibexception.PyCdlibInvalidInput('Path to open must be a file')
if rec.inode is None:
raise pycdlibexception.PyCdlibInvalidInput('File has no data')
return PyCdlibIO(rec.inode, self.pvd.logical_block_size()) | [] |
Please provide a description of the function:def close(self):
# type: () -> None
'''
Close the PyCdlib object, and re-initialize the object to the defaults.
The object can then be re-used for manipulation of another ISO.
Parameters:
None.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
if self._managing_fp:
# In this case, we are managing self._cdfp, so we need to close it
self._cdfp.close()
self._initialize() | [] |
Please provide a description of the function:def make_arg_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
usage="vex [OPTIONS] VIRTUALENV_NAME COMMAND_TO_RUN ...",
)
make = parser.add_argument_group(title='To make a new virtualenv')
make.add_argument(
'-m', '--make',
action="store_true",
help="make named virtualenv before running command"
)
make.add_argument(
'--python',
help="specify which python for virtualenv to be made",
action="store",
default=None,
)
make.add_argument(
'--site-packages',
help="allow site package imports from new virtualenv",
action="store_true",
)
make.add_argument(
'--always-copy',
help="use copies instead of symlinks in new virtualenv",
action="store_true",
)
remove = parser.add_argument_group(title='To remove a virtualenv')
remove.add_argument(
'-r', '--remove',
action="store_true",
help="remove the named virtualenv after running command"
)
parser.add_argument(
"--path",
metavar="DIR",
help="absolute path to virtualenv to use",
action="store"
)
parser.add_argument(
'--cwd',
metavar="DIR",
action="store",
default='.',
help="path to run command in (default: '.' aka $PWD)",
)
parser.add_argument(
"--config",
metavar="FILE",
default=None,
action="store",
help="path to config file to read (default: '~/.vexrc')"
)
parser.add_argument(
'--shell-config',
metavar="SHELL",
dest="shell_to_configure",
action="store",
default=None,
help="print optional config for the specified shell"
)
parser.add_argument(
'--list',
metavar="PREFIX",
nargs="?",
const="",
default=None,
help="print a list of available virtualenvs [matching PREFIX]",
action="store"
)
parser.add_argument(
'--version',
help="print the version of vex that is being run",
action="store_true"
)
parser.add_argument(
"rest",
nargs=argparse.REMAINDER,
help=argparse.SUPPRESS)
return parser | [
"Return a standard ArgumentParser object.\n "
] |
Please provide a description of the function:def get_options(argv):
arg_parser = make_arg_parser()
options, unknown = arg_parser.parse_known_args(argv)
if unknown:
arg_parser.print_help()
raise exceptions.UnknownArguments(
"unknown args: {0!r}".format(unknown))
options.print_help = arg_parser.print_help
return options | [
"Called to parse the given list as command-line arguments.\n\n :returns:\n an options object as returned by argparse.\n "
] |
Please provide a description of the function:def _update_bird_conf_file(self, operation):
conf_updated = False
prefixes = []
ip_version = operation.ip_version
config_file = self.bird_configuration[ip_version]['config_file']
variable_name = self.bird_configuration[ip_version]['variable_name']
changes_counter =\
self.bird_configuration[ip_version]['changes_counter']
dummy_ip_prefix =\
self.bird_configuration[ip_version]['dummy_ip_prefix']
try:
prefixes = get_ip_prefixes_from_bird(config_file)
except OSError as error:
self.log.error("failed to open Bird configuration %s, this is a "
"FATAL error, thus exiting main program", error)
sys.exit(1)
if not prefixes:
self.log.error("found empty bird configuration %s, this is a FATAL"
" error, thus exiting main program", config_file)
sys.exit(1)
if dummy_ip_prefix not in prefixes:
self.log.warning("dummy IP prefix %s wasn't found in bird "
"configuration, adding it. This shouldn't have "
"happened!", dummy_ip_prefix)
prefixes.insert(0, dummy_ip_prefix)
conf_updated = True
ip_prefixes_without_check = set(prefixes).difference(
self.ip_prefixes[ip_version])
if ip_prefixes_without_check:
self.log.warning("found %s IP prefixes in Bird configuration but "
"we aren't configured to run health checks on "
"them. Either someone modified the configuration "
"manually or something went horrible wrong. We "
"remove them from Bird configuration",
','.join(ip_prefixes_without_check))
# This is faster than using lambda and filter.
# NOTE: We don't use remove method as we want to remove more than
# occurrences of the IP prefixes without check.
prefixes[:] = (ip for ip in prefixes
if ip not in ip_prefixes_without_check)
conf_updated = True
# Update the list of IP prefixes based on the status of health check.
if operation.update(prefixes):
conf_updated = True
if not conf_updated:
self.log.info('no updates for bird configuration')
return conf_updated
if self.bird_configuration[ip_version]['keep_changes']:
archive_bird_conf(config_file, changes_counter)
# some IP prefixes are either removed or added, create
# configuration with new data.
tempname = write_temp_bird_conf(
dummy_ip_prefix,
config_file,
variable_name,
prefixes
)
try:
os.rename(tempname, config_file)
except OSError as error:
self.log.critical("failed to create Bird configuration %s, this "
"is a FATAL error, thus exiting main program",
error)
sys.exit(1)
else:
self.log.info("Bird configuration for IPv%s is updated",
ip_version)
# dummy_ip_prefix is always there
if len(prefixes) == 1:
self.log.warning("Bird configuration doesn't have IP prefixes for "
"any of the services we monitor! It means local "
"node doesn't receive any traffic")
return conf_updated | [
"Update BIRD configuration.\n\n It adds to or removes IP prefix from BIRD configuration. It also\n updates generation time stamp in the configuration file.\n\n Main program will exit if configuration file cant be read/written.\n\n Arguments:\n operation (obj): Either an AddOperation or DeleteOperation object\n\n Returns:\n True if BIRD configuration was updated otherwise False.\n\n "
] |
Please provide a description of the function:def run(self):
# Lunch a thread for each configuration
if not self.services:
self.log.warning("no service checks are configured")
else:
self.log.info("going to lunch %s threads", len(self.services))
if self.config.has_option('daemon', 'splay_startup'):
splay_startup = self.config.getfloat('daemon', 'splay_startup')
else:
splay_startup = None
for service in self.services:
self.log.debug("lunching thread for %s", service)
_config = {}
for option, getter in SERVICE_OPTIONS_TYPE.items():
try:
_config[option] = getattr(self.config, getter)(service,
option)
except NoOptionError:
pass # for optional settings
_thread = ServiceCheck(service, _config, self.action,
splay_startup)
_thread.start()
# Stay running until we are stopped
while True:
# Fetch items from action queue
operation = self.action.get(block=True)
if isinstance(operation, ServiceCheckDiedError):
self.log.critical(operation)
self.log.critical("This is a fatal error and the only way to "
"recover is to restart, thus exiting with a "
"non-zero code and let systemd act by "
"triggering a restart")
sys.exit(1)
self.log.info("returned an item from the queue for %s with IP "
"prefix %s and action to %s Bird configuration",
operation.name,
operation.ip_prefix,
operation)
bird_updated = self._update_bird_conf_file(operation)
self.action.task_done()
if bird_updated:
ip_version = operation.ip_version
if operation.bird_reconfigure_cmd is None:
reconfigure_bird(
self.bird_configuration[ip_version]['reconfigure_cmd'])
else:
run_custom_bird_reconfigure(operation) | [
"Lunch checks and triggers updates on BIRD configuration."
] |
Please provide a description of the function:def valid_ip_prefix(ip_prefix):
try:
ip_prefix = ipaddress.ip_network(ip_prefix)
except ValueError:
return False
else:
if ip_prefix.version == 4 and ip_prefix.max_prefixlen != 32:
return False
if ip_prefix.version == 6 and ip_prefix.max_prefixlen != 128:
return False
return True | [
"Perform a sanity check on ip_prefix.\n\n Arguments:\n ip_prefix (str): The IP-Prefix to validate\n\n Returns:\n True if ip_prefix is a valid IPv4 address with prefix length 32 or a\n valid IPv6 address with prefix length 128, otherwise False\n\n "
] |
Please provide a description of the function:def get_ip_prefixes_from_config(config, services, ip_version):
ip_prefixes = set()
for service in services:
ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix'))
if ip_prefix.version == ip_version:
ip_prefixes.add(ip_prefix.with_prefixlen)
return ip_prefixes | [
"Build a set of IP prefixes found in service configuration files.\n\n Arguments:\n config (obg): A configparser object which holds our configuration.\n services (list): A list of section names which are the name of the\n service checks.\n ip_version (int): IP protocol version\n\n Returns:\n A set of IP prefixes.\n\n "
] |
Please provide a description of the function:def ip_prefixes_sanity_check(config, bird_configuration):
for ip_version in bird_configuration:
modify_ip_prefixes(config,
bird_configuration[ip_version]['config_file'],
bird_configuration[ip_version]['variable_name'],
bird_configuration[ip_version]['dummy_ip_prefix'],
bird_configuration[ip_version]['reconfigure_cmd'],
bird_configuration[ip_version]['keep_changes'],
bird_configuration[ip_version]['changes_counter'],
ip_version) | [
"Sanity check on IP prefixes.\n\n Arguments:\n config (obg): A configparser object which holds our configuration.\n bird_configuration (dict): A dictionary, which holds Bird configuration\n per IP protocol version.\n\n "
] |
Please provide a description of the function:def modify_ip_prefixes(
config,
config_file,
variable_name,
dummy_ip_prefix,
reconfigure_cmd,
keep_changes,
changes_counter,
ip_version):
log = logging.getLogger(PROGRAM_NAME)
services = config.sections()
services.remove('daemon') # not needed during sanity check for IP-Prefixes
update_bird_conf = False
try:
ip_prefixes_in_bird = get_ip_prefixes_from_bird(config_file)
except OSError as error:
log.error("failed to open Bird configuration %s, this is a FATAL "
"error, thus exiting main program", error)
sys.exit(1)
_name = get_variable_name_from_bird(config_file)
if _name is None:
log.warning("failed to find variable name in %s, going to add it",
config_file)
update_bird_conf = True
elif _name != variable_name:
log.warning("found incorrect variable name in %s, going to add the "
"correct one %s", _name, variable_name)
update_bird_conf = True
if dummy_ip_prefix not in ip_prefixes_in_bird:
log.warning("dummy IP prefix %s is missing from bird configuration "
"%s, adding it", dummy_ip_prefix, config_file)
ip_prefixes_in_bird.insert(0, dummy_ip_prefix)
update_bird_conf = True
# Find IP prefixes in Bird configuration without a check.
ip_prefixes_with_check = get_ip_prefixes_from_config(
config,
services,
ip_version)
# dummy_ip_prefix doesn't have a config by design
ip_prefixes_with_check.add(dummy_ip_prefix)
ip_prefixes_without_check = set(ip_prefixes_in_bird).difference(
ip_prefixes_with_check)
if ip_prefixes_without_check:
if config.getboolean('daemon', 'purge_ip_prefixes'):
log.warning("removing IP prefix(es) %s from %s because they don't "
"have a service check configured",
','.join(ip_prefixes_without_check),
config_file)
ip_prefixes_in_bird[:] = (ip for ip in ip_prefixes_in_bird
if ip not in ip_prefixes_without_check)
update_bird_conf = True
else:
log.warning("found IP prefixes %s in %s without a service "
"check configured",
','.join(ip_prefixes_without_check),
config_file)
if update_bird_conf:
if keep_changes:
archive_bird_conf(config_file, changes_counter)
tempname = write_temp_bird_conf(
dummy_ip_prefix,
config_file,
variable_name,
ip_prefixes_in_bird
)
try:
os.rename(tempname, config_file)
except OSError as error:
msg = ("CRITICAL: failed to create Bird configuration {e}, "
"this is FATAL error, thus exiting main program"
.format(e=error))
sys.exit("{m}".format(m=msg))
else:
log.info("Bird configuration for IPv%s is updated", ip_version)
reconfigure_bird(reconfigure_cmd) | [
"Modify IP prefixes in Bird configuration.\n\n Depending on the configuration either removes or reports IP prefixes found\n in Bird configuration for which we don't have a service check associated\n with them. Moreover, it adds the dummy IP prefix if it isn't present and\n ensures that the correct variable name is set.\n\n Arguments:\n config (obg): A configparser object which holds our configuration.\n config_file (str): The file name of bird configuration\n variable_name (str): The name of the variable set in bird configuration\n dummy_ip_prefix (str): The dummy IP prefix, which must be always\n reconfigure_cmd (str): The command to run to trigger a reconfiguration\n on Bird daemon upon successful configuration update\n keep_changes (boolean): To enable keeping a history of changes applied\n to bird configuration\n changes_counter (int): The number of configuration changes to keep\n ip_version (int): IP protocol version of Bird configuration\n\n "
] |
Please provide a description of the function:def load_configuration(config_file, config_dir, service_file):
config_files = [config_file]
config = configparser.ConfigParser()
config.read_dict(DEFAULT_OPTIONS)
if not os.path.isfile(config_file):
raise ValueError("{f} configuration file either isn't readable or "
"doesn't exist".format(f=config_file))
if service_file is not None:
if not os.path.isfile(service_file):
raise ValueError("{f} configuration file for a service check "
"doesn't exist".format(f=service_file))
else:
config_files.append(service_file)
elif config_dir is not None:
if not os.path.isdir(config_dir):
raise ValueError("{d} directory with configuration files for "
"service checks doesn't exist"
.format(d=config_dir))
else:
config_files.extend(glob.glob(os.path.join(config_dir, '*.conf')))
try:
config.read(config_files)
except configparser.Error as exc:
raise ValueError(exc)
configuration_check(config)
bird_configuration = build_bird_configuration(config)
create_bird_config_files(bird_configuration)
return config, bird_configuration | [
"Build configuration objects.\n\n If all sanity checks against daemon and service check settings are passed\n then it builds a ConfigParser object which holds all our configuration\n and a dictionary data structure which holds Bird configuration per IP\n protocol version.\n\n Arguments:\n config_file (str): The file name which holds daemon settings\n config_dir (str): The directory name which has configuration files\n for each service check\n service_file (str): A file which contains configuration for a single\n service check\n\n Returns:\n A tuple with 1st element a ConfigParser object and 2nd element\n a dictionary.\n Raises:\n ValueError if a sanity check fails.\n\n "
] |
Please provide a description of the function:def configuration_check(config):
log_level = config.get('daemon', 'loglevel')
num_level = getattr(logging, log_level.upper(), None)
pidfile = config.get('daemon', 'pidfile')
# Catch the case where the directory, under which we store the pid file, is
# missing.
if not os.path.isdir(os.path.dirname(pidfile)):
raise ValueError("{d} doesn't exit".format(d=os.path.dirname(pidfile)))
if not isinstance(num_level, int):
raise ValueError('Invalid log level: {}'.format(log_level))
for _file in 'log_file', 'stderr_file':
if config.has_option('daemon', _file):
try:
touch(config.get('daemon', _file))
except OSError as exc:
raise ValueError(exc)
for option, getter in DAEMON_OPTIONS_TYPE.items():
try:
getattr(config, getter)('daemon', option)
except configparser.NoOptionError as error:
if option not in DAEMON_OPTIONAL_OPTIONS:
raise ValueError(error)
except configparser.Error as error:
raise ValueError(error)
except ValueError as exc:
msg = ("invalid data for '{opt}' option in daemon section: {err}"
.format(opt=option, err=exc))
raise ValueError(msg)
service_configuration_check(config) | [
"Perform a sanity check on configuration.\n\n First it performs a sanity check against settings for daemon\n and then against settings for each service check.\n\n Arguments:\n config (obj): A configparser object which holds our configuration.\n\n Returns:\n None if all checks are successfully passed otherwise raises a\n ValueError exception.\n\n "
] |
Please provide a description of the function:def service_configuration_check(config):
ipv4_enabled = config.getboolean('daemon', 'ipv4')
ipv6_enabled = config.getboolean('daemon', 'ipv6')
services = config.sections()
# we don't need it during sanity check for services check
services.remove('daemon')
ip_prefixes = []
for service in services:
for option, getter in SERVICE_OPTIONS_TYPE.items():
try:
getattr(config, getter)(service, option)
except configparser.NoOptionError as error:
if option not in SERVICE_OPTIONAL_OPTIONS:
raise ValueError(error)
except configparser.Error as error:
raise ValueError(error)
except ValueError as exc:
msg = ("invalid data for '{opt}' option in service check "
"{name}: {err}"
.format(opt=option, name=service, err=exc))
raise ValueError(msg)
if (config.get(service, 'on_disabled') != 'withdraw' and
config.get(service, 'on_disabled') != 'advertise'):
msg = ("'on_disabled' option has invalid value ({val}) for "
"service check {name}, 'on_disabled option should be set "
"either to 'withdraw' or to 'advertise'"
.format(name=service,
val=config.get(service, 'on_disabled')))
raise ValueError(msg)
ip_prefixes.append(config.get(service, 'ip_prefix'))
if not valid_ip_prefix(config.get(service, 'ip_prefix')):
msg = ("invalid value ({val}) for 'ip_prefix' option in service "
"check {name}. It should be an IP PREFIX in form of "
"ip/prefixlen."
.format(name=service, val=config.get(service, 'ip_prefix')))
raise ValueError(msg)
_ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix'))
if not ipv6_enabled and _ip_prefix.version == 6:
raise ValueError("IPv6 support is disabled in "
"anycast-healthchecker while there is an IPv6 "
"prefix configured for {name} service check"
.format(name=service))
if not ipv4_enabled and _ip_prefix.version == 4:
raise ValueError("IPv4 support is disabled in "
"anycast-healthchecker while there is an IPv4 "
"prefix configured for {name} service check"
.format(name=service))
cmd = shlex.split(config.get(service, 'check_cmd'))
try:
proc = subprocess.Popen(cmd)
proc.kill()
except (OSError, subprocess.SubprocessError) as exc:
msg = ("failed to run check command '{cmd}' for service check "
"{name}: {err}"
.format(name=service,
cmd=config.get(service, 'check_cmd'),
err=exc))
raise ValueError(msg)
occurrences_of_ip_prefixes = Counter(ip_prefixes)
for ip_prefix, counter in occurrences_of_ip_prefixes.items():
if counter > 1:
raise ValueError("{ip} is used by {c} service checks"
.format(ip=ip_prefix, c=counter)) | [
"Perform a sanity check against options for each service check.\n\n Arguments:\n config (obj): A configparser object which holds our configuration.\n\n Returns:\n None if all sanity checks are successfully passed otherwise raises a\n ValueError exception.\n\n "
] |
Please provide a description of the function:def build_bird_configuration(config):
bird_configuration = {}
if config.getboolean('daemon', 'ipv4'):
if os.path.islink(config.get('daemon', 'bird_conf')):
config_file = os.path.realpath(config.get('daemon', 'bird_conf'))
print("'bird_conf' is set to a symbolic link ({s} -> {d}, but we "
"will use the canonical path of that link"
.format(s=config.get('daemon', 'bird_conf'), d=config_file))
else:
config_file = config.get('daemon', 'bird_conf')
dummy_ip_prefix = config.get('daemon', 'dummy_ip_prefix')
if not valid_ip_prefix(dummy_ip_prefix):
raise ValueError("invalid dummy IPv4 prefix: {i}"
.format(i=dummy_ip_prefix))
bird_configuration[4] = {
'config_file': config_file,
'variable_name': config.get('daemon', 'bird_variable'),
'dummy_ip_prefix': dummy_ip_prefix,
'reconfigure_cmd': config.get('daemon', 'bird_reconfigure_cmd'),
'keep_changes': config.getboolean('daemon', 'bird_keep_changes'),
'changes_counter': config.getint('daemon', 'bird_changes_counter')
}
if config.getboolean('daemon', 'ipv6'):
if os.path.islink(config.get('daemon', 'bird6_conf')):
config_file = os.path.realpath(config.get('daemon', 'bird6_conf'))
print("'bird6_conf' is set to a symbolic link ({s} -> {d}, but we "
"will use the canonical path of that link"
.format(s=config.get('daemon', 'bird6_conf'), d=config_file))
else:
config_file = config.get('daemon', 'bird6_conf')
dummy_ip_prefix = config.get('daemon', 'dummy_ip6_prefix')
if not valid_ip_prefix(dummy_ip_prefix):
raise ValueError("invalid dummy IPv6 prefix: {i}"
.format(i=dummy_ip_prefix))
bird_configuration[6] = {
'config_file': config_file,
'variable_name': config.get('daemon', 'bird6_variable'),
'dummy_ip_prefix': dummy_ip_prefix,
'reconfigure_cmd': config.get('daemon', 'bird6_reconfigure_cmd'),
'keep_changes': config.getboolean('daemon', 'bird6_keep_changes'),
'changes_counter': config.getint('daemon', 'bird6_changes_counter')
}
return bird_configuration | [
"Build bird configuration structure.\n\n First it performs a sanity check against bird settings and then builds a\n dictionary structure with bird configuration per IP version.\n\n Arguments:\n config (obj): A configparser object which holds our configuration.\n\n Returns:\n A dictionary\n\n Raises:\n ValueError if sanity check fails.\n\n "
] |
Please provide a description of the function:def get_variable_name_from_bird(bird_conf):
bird_variable_pattern = re.compile(
r'''
^\s*
define\s+
(?P<name>\S+\b)
\s+
=
''', re.VERBOSE
)
with open(bird_conf, 'r') as content:
for line in content.readlines():
variable_match = bird_variable_pattern.search(line)
if variable_match:
return variable_match.group('name')
return None | [
"Return the variable name set in Bird configuration.\n\n The variable name in Bird configuration is set with the keyword 'define',\n here is an example:\n\n define ACAST_PS_ADVERTISE =\n\n and we exract the string between the word 'define' and the equals sign.\n\n Arguments:\n bird_conf (str): The absolute file name path of Bird configuration.\n\n Returns:\n The variable name as a string or None if it isn't found.\n\n "
] |
Please provide a description of the function:def create_bird_config_files(bird_configuration):
for ip_version in bird_configuration:
# This creates the file if it doesn't exist.
config_file = bird_configuration[ip_version]['config_file']
try:
touch(config_file)
except OSError as exc:
raise ValueError("failed to create {f}:{e}"
.format(f=config_file, e=exc))
if bird_configuration[ip_version]['keep_changes']:
history_dir = os.path.join(os.path.dirname(config_file), 'history')
try:
os.mkdir(history_dir)
except FileExistsError:
pass
except OSError as exc:
raise ValueError("failed to make directory {d} for keeping a "
"history of changes for {b}:{e}"
.format(d=history_dir, b=config_file, e=exc))
else:
print("{d} is created".format(d=history_dir)) | [
"Create bird configuration files per IP version.\n\n Creates bird configuration files if they don't exist. It also creates the\n directories where we store the history of changes, if this functionality is\n enabled.\n\n Arguments:\n bird_configuration (dict): A dictionary with settings for bird.\n\n Returns:\n None\n\n Raises:\n ValueError if we can't create bird configuration files and the\n directory to store the history of changes in bird configuration file.\n\n "
] |
Please provide a description of the function:def running(processid):
try:
# From kill(2)
# If sig is 0 (the null signal), error checking is performed but no
# signal is actually sent. The null signal can be used to check the
# validity of pid
os.kill(processid, 0)
except OverflowError as exc:
print("checking validity of pid ({p}) failed with: {e}"
.format(p=processid, e=exc))
sys.exit(1)
except OSError:
return False
else:
return True | [
"Check the validity of a process ID.\n\n Arguments:\n processid (int): Process ID number.\n\n Returns:\n True if process ID is found otherwise False.\n\n "
] |
Please provide a description of the function:def get_ip_prefixes_from_bird(filename):
prefixes = []
with open(filename, 'r') as bird_conf:
lines = bird_conf.read()
for line in lines.splitlines():
line = line.strip(', ')
if valid_ip_prefix(line):
prefixes.append(line)
return prefixes | [
"Build a list of IP prefixes found in Bird configuration.\n\n Arguments:\n filename (str): The absolute path of the Bird configuration file.\n\n Notes:\n It can only parse a file with the following format\n\n define ACAST_PS_ADVERTISE =\n [\n 10.189.200.155/32,\n 10.189.200.255/32\n ];\n\n Returns:\n A list of IP prefixes.\n\n "
] |
Please provide a description of the function:def reconfigure_bird(cmd):
log = logging.getLogger(PROGRAM_NAME)
cmd = shlex.split(cmd)
log.info("reconfiguring BIRD by running %s", ' '.join(cmd))
try:
output = subprocess.check_output(
cmd,
timeout=2,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
except subprocess.TimeoutExpired:
log.error("reconfiguring bird timed out")
return
except subprocess.CalledProcessError as error:
# birdc returns 0 even when it fails due to invalid config,
# but it returns 1 when BIRD is down.
log.error("reconfiguring BIRD failed, either BIRD daemon is down or "
"we don't have privileges to reconfigure it (sudo problems?)"
":%s", error.output.strip())
return
except FileNotFoundError as error:
log.error("reconfiguring BIRD failed with: %s", error)
return
# 'Reconfigured' string will be in the output if and only if conf is valid.
pattern = re.compile('^Reconfigured$', re.MULTILINE)
if pattern.search(str(output)):
log.info('reconfigured BIRD daemon')
else:
# We will end up here only if we generated an invalid conf
# or someone broke bird.conf.
log.error("reconfiguring BIRD returned error, most likely we generated"
" an invalid configuration file or Bird configuration in is "
"broken:%s", output) | [
"Reconfigure BIRD daemon.\n\n Arguments:\n cmd (string): A command to trigger a reconfiguration of Bird daemon\n\n Notes:\n Runs 'birdc configure' to reconfigure BIRD. Some useful information on\n how birdc tool works:\n -- Returns a non-zero exit code only when it can't access BIRD\n daemon via the control socket (/var/run/bird.ctl). This happens\n when BIRD daemon is either down or when the caller of birdc\n doesn't have access to the control socket.\n -- Returns zero exit code when reconfigure fails due to invalid\n configuration. Thus, we catch this case by looking at the output\n and not at the exit code.\n -- Returns zero exit code when reconfigure was successful.\n -- Should never timeout, if it does then it is a bug.\n\n "
] |
Please provide a description of the function:def write_temp_bird_conf(dummy_ip_prefix,
config_file,
variable_name,
prefixes):
log = logging.getLogger(PROGRAM_NAME)
comment = ("# {i} is a dummy IP Prefix. It should NOT be used and "
"REMOVED from the constant.".format(i=dummy_ip_prefix))
# the temporary file must be on the same filesystem as the bird config
# as we use os.rename to perform an atomic update on the bird config.
# Thus, we create it in the same directory that bird config is stored.
tm_file = os.path.join(os.path.dirname(config_file), str(time.time()))
log.debug("going to write to %s", tm_file)
try:
with open(tm_file, 'w') as tmpf:
tmpf.write("# Generated {t} by {n} (pid={p})\n"
.format(t=datetime.datetime.now(),
n=PROGRAM_NAME,
p=os.getpid()))
tmpf.write("{c}\n".format(c=comment))
tmpf.write("define {n} =\n".format(n=variable_name))
tmpf.write("{s}[\n".format(s=4 * ' '))
# all entries of the array need a trailing comma except the last
# one. A single element array doesn't need a trailing comma.
tmpf.write(',\n'.join([' '*8 + n for n in prefixes]))
tmpf.write("\n{s}];\n".format(s=4 * ' '))
except OSError as error:
log.critical("failed to write temporary file %s: %s. This is a FATAL "
"error, this exiting main program", tm_file, error)
sys.exit(1)
else:
return tm_file | [
"Write in a temporary file the list of IP-Prefixes.\n\n A failure to create and write the temporary file will exit main program.\n\n Arguments:\n dummy_ip_prefix (str): The dummy IP prefix, which must be always\n config_file (str): The file name of bird configuration\n variable_name (str): The name of the variable set in bird configuration\n prefixes (list): The list of IP-Prefixes to write\n\n Returns:\n The filename of the temporary file\n\n "
] |
Please provide a description of the function:def archive_bird_conf(config_file, changes_counter):
log = logging.getLogger(PROGRAM_NAME)
history_dir = os.path.join(os.path.dirname(config_file), 'history')
dst = os.path.join(history_dir, str(time.time()))
log.debug("coping %s to %s", config_file, dst)
history = [x for x in os.listdir(history_dir)
if os.path.isfile(os.path.join(history_dir, x))]
if len(history) > changes_counter:
log.info("threshold of %s is reached, removing old files",
changes_counter)
for _file in sorted(history, reverse=True)[changes_counter - 1:]:
_path = os.path.join(history_dir, _file)
try:
os.remove(_path)
except OSError as exc:
log.warning("failed to remove %s: %s", _file, exc)
else:
log.info("removed %s", _path)
try:
shutil.copy2(config_file, dst)
except OSError as exc:
log.warning("failed to copy %s to %s: %s", config_file, dst, exc) | [
"Keep a history of Bird configuration files.\n\n Arguments:\n config_file (str): file name of bird configuration\n changes_counter (int): number of configuration files to keep in the\n history\n "
] |
Please provide a description of the function:def update_pidfile(pidfile):
try:
with open(pidfile, mode='r') as _file:
pid = _file.read(1024).rstrip()
try:
pid = int(pid)
except ValueError:
print("cleaning stale pidfile with invalid data:'{}'".format(pid))
write_pid(pidfile)
else:
if running(pid):
# This is to catch migration issues from 0.7.x to 0.8.x
# version, where old process is still around as it failed to
# be stopped. Since newer version has a different locking
# mechanism, we can end up with both versions running.
# In order to avoid this situation we refuse to startup.
sys.exit("process {} is already running".format(pid))
else:
# pidfile exists with a PID for a process that is not running.
# Let's update PID.
print("updating stale processID({}) in pidfile".format(pid))
write_pid(pidfile)
except FileNotFoundError:
# Either it's 1st time we run or previous run was terminated
# successfully.
print("creating pidfile {f}".format(f=pidfile))
write_pid(pidfile)
except OSError as exc:
sys.exit("failed to update pidfile:{e}".format(e=exc)) | [
"Update pidfile.\n\n Notice:\n We should call this function only after we have successfully acquired\n a lock and never before. It exits main program if it fails to parse\n and/or write pidfile.\n\n Arguments:\n pidfile (str): pidfile to update\n\n "
] |
Please provide a description of the function:def write_pid(pidfile):
pid = str(os.getpid())
try:
with open(pidfile, mode='w') as _file:
print("writing processID {p} to pidfile".format(p=pid))
_file.write(pid)
except OSError as exc:
sys.exit("failed to write pidfile:{e}".format(e=exc)) | [
"Write processID to the pidfile.\n\n Notice:\n It exits main program if it fails to write pidfile.\n\n Arguments:\n pidfile (str): pidfile to update\n\n "
] |
Please provide a description of the function:def shutdown(pidfile, signalnb=None, frame=None):
log = logging.getLogger(PROGRAM_NAME)
log.info("received %s at %s", signalnb, frame)
log.info("going to remove pidfile %s", pidfile)
# no point to catch possible errors when we delete the pid file
os.unlink(pidfile)
log.info('shutdown is complete')
sys.exit(0) | [
"Clean up pidfile upon shutdown.\n\n Notice:\n We should register this function as signal handler for the following\n termination signals:\n SIGHUP\n SIGTERM\n SIGABRT\n SIGINT\n\n Arguments:\n pidfile (str): pidfile to remove\n signalnb (int): The ID of signal\n frame (obj): Frame object at the time of receiving the signal\n\n "
] |
Please provide a description of the function:def setup_logger(config):
logger = logging.getLogger(PROGRAM_NAME)
num_level = getattr(
logging,
config.get('daemon', 'loglevel').upper(), # pylint: disable=no-member
None
)
logger.setLevel(num_level)
lengths = []
for section in config:
lengths.append(len(section))
width = sorted(lengths)[-1] + 1
def log_format():
supported_keys = [
'asctime',
'levelname',
'process',
# 'funcName',
# 'lineno',
'threadName',
'message',
]
return ' '.join(['%({0:s})'.format(i) for i in supported_keys])
custom_format = log_format()
json_formatter = CustomJsonFormatter(custom_format,
prefix=PROGRAM_NAME + ': ')
formatter = logging.Formatter(
'%(asctime)s {program}[%(process)d] %(levelname)-8s '
'%(threadName)-{width}s %(message)s'
.format(program=PROGRAM_NAME, width=width)
)
# Register logging handlers based on configuration.
if config.has_option('daemon', 'log_file'):
file_handler = logging.handlers.RotatingFileHandler(
config.get('daemon', 'log_file'),
maxBytes=config.getint('daemon', 'log_maxbytes'),
backupCount=config.getint('daemon', 'log_backups')
)
if config.getboolean('daemon', 'json_log_file'):
file_handler.setFormatter(json_formatter)
else:
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
if config.has_option('daemon', 'log_server'):
udp_handler = logging.handlers.SysLogHandler(
(
config.get('daemon', 'log_server'),
config.getint('daemon', 'log_server_port')
)
)
if config.getboolean('daemon', 'json_log_server'):
udp_handler.setFormatter(json_formatter)
else:
udp_handler.setFormatter(formatter)
logger.addHandler(udp_handler)
# Log to STDOUT if and only if log_file and log_server aren't enabled
if (not config.has_option('daemon', 'log_file')
and not config.has_option('daemon', 'log_server')):
stream_handler = logging.StreamHandler()
if config.getboolean('daemon', 'json_stdout'):
stream_handler.setFormatter(json_formatter)
else:
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
# We can redirect STDERR only to one destination.
if config.has_option('daemon', 'stderr_file'):
sys.stderr = CustomRotatingFileLogger(
filepath=config.get('daemon', 'stderr_file'),
maxbytes=config.getint('daemon', 'log_maxbytes'),
backupcount=config.getint('daemon', 'log_backups')
)
elif (config.has_option('daemon', 'stderr_log_server')
and not config.has_option('daemon', 'stderr_file')):
sys.stderr = CustomUdpLogger(
server=config.get('daemon', 'log_server'),
port=config.getint('daemon', 'log_server_port')
)
else:
print('messages for unhandled exceptions will go to STDERR')
return logger | [
"Configure the logging environment.\n\n Notice:\n By default logging will go to STDOUT and messages for unhandled\n exceptions or crashes will go to STDERR. If log_file and/or log_server\n is set then we don't log to STDOUT. Messages for unhandled exceptions\n or crashes can only go to either STDERR or to stderr_file or to\n stderr_log_server.\n\n Arguments:\n config (obj): A configparser object which holds our configuration.\n\n Returns:\n A logger with all possible handlers configured.\n\n ",
"Produce a log format line."
] |
Please provide a description of the function:def run_custom_bird_reconfigure(operation):
log = logging.getLogger(PROGRAM_NAME)
if isinstance(operation, AddOperation):
status = 'up'
else:
status = 'down'
cmd = shlex.split(operation.bird_reconfigure_cmd + " " + status)
log.info("reconfiguring BIRD by running custom command %s", ' '.join(cmd))
try:
proc = subprocess.Popen(cmd,
start_new_session=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, errs = proc.communicate(
timeout=operation.bird_reconfigure_timeout
)
except OSError as exc:
log.error("reconfiguring BIRD failed with: %s", exc)
except subprocess.TimeoutExpired as exc:
log.error("reconfiguring bird timed out")
if proc.poll() is None: # if process is still alive
try:
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
except PermissionError as exc:
log.error("failed to terminate custom bird command: %s", exc)
else:
if proc.returncode != 0:
log.error("reconfiguring BIRD failed with return code: %s and "
"stderr: %s", proc.returncode, errs)
else:
log.info("custom command successfully reconfigured Bird") | [
"Reconfigure BIRD daemon by running a custom command.\n\n It adds one argument to the command, either \"up\" or \"down\".\n If command times out then we kill it. In order to avoid leaving any orphan\n processes, that may have been started by the command, we start a new\n session when we invoke the command and then we kill process group of that\n session.\n\n Arguments:\n operation (obj): Either a AddOperation or DeleteOperation object.\n\n "
] |
Please provide a description of the function:def update(self, prefixes):
if self.ip_prefix not in prefixes:
prefixes.append(self.ip_prefix)
self.log.info("announcing %s for %s", self.ip_prefix, self.name)
return True
return False | [
"Add a value to the list.\n\n Arguments:\n prefixes(list): A list to add the value\n "
] |
Please provide a description of the function:def write(self, string):
string = string.rstrip()
if string: # Don't log empty lines
self.logger.critical(string) | [
"Erase newline from a string and write to the logger."
] |
Please provide a description of the function:def process_log_record(self, log_record):
log_record["version"] = __version__
log_record["program"] = PROGRAM_NAME
log_record["service_name"] = log_record.pop('threadName', None)
# return jsonlogger.JsonFormatter.process_log_record(self, log_record)
return log_record | [
"Add customer record keys and rename threadName key."
] |
Please provide a description of the function:def get_vexrc(options, environ):
# Complain if user specified nonexistent file with --config.
# But we don't want to complain just because ~/.vexrc doesn't exist.
if options.config and not os.path.exists(options.config):
raise exceptions.InvalidVexrc("nonexistent config: {0!r}".format(options.config))
filename = options.config or os.path.expanduser('~/.vexrc')
vexrc = config.Vexrc.from_file(filename, environ)
return vexrc | [
"Get a representation of the contents of the config file.\n\n :returns:\n a Vexrc instance.\n "
] |
Please provide a description of the function:def get_cwd(options):
if not options.cwd:
return None
if not os.path.exists(options.cwd):
raise exceptions.InvalidCwd(
"can't --cwd to invalid path {0!r}".format(options.cwd))
return options.cwd | [
"Discover what directory the command should run in.\n "
] |
Please provide a description of the function:def get_virtualenv_path(ve_base, ve_name):
if not ve_base:
raise exceptions.NoVirtualenvsDirectory(
"could not figure out a virtualenvs directory. "
"make sure $HOME is set, or $WORKON_HOME,"
" or set virtualenvs=something in your .vexrc")
# Using this requires get_ve_base to pass through nonexistent dirs
if not os.path.exists(ve_base):
message = (
"virtualenvs directory {0!r} not found. "
"Create it or use vex --make to get started."
).format(ve_base)
raise exceptions.NoVirtualenvsDirectory(message)
if not ve_name:
raise exceptions.InvalidVirtualenv("no virtualenv name")
# n.b.: if ve_name is absolute, ve_base is discarded by os.path.join,
# and an absolute path will be accepted as first arg.
# So we check if they gave an absolute path as ve_name.
# But we don't want this error if $PWD == $WORKON_HOME,
# in which case 'foo' is a valid relative path to virtualenv foo.
ve_path = os.path.join(ve_base, ve_name)
if ve_path == ve_name and os.path.basename(ve_name) != ve_name:
raise exceptions.InvalidVirtualenv(
'To run in a virtualenv by its path, '
'use "vex --path {0}"'.format(ve_path))
ve_path = os.path.abspath(ve_path)
if not os.path.exists(ve_path):
raise exceptions.InvalidVirtualenv(
"no virtualenv found at {0!r}.".format(ve_path))
return ve_path | [
"Check a virtualenv path, raising exceptions to explain problems.\n "
] |
Please provide a description of the function:def get_command(options, vexrc, environ):
command = options.rest
if not command:
command = vexrc.get_shell(environ)
if command and command[0].startswith('--'):
raise exceptions.InvalidCommand(
"don't put flags like '%s' after the virtualenv name."
% command[0])
if not command:
raise exceptions.InvalidCommand("no command given")
return command | [
"Get a command to run.\n\n :returns:\n a list of strings representing a command to be passed to Popen.\n "
] |
Please provide a description of the function:def _main(environ, argv):
options = get_options(argv)
if options.version:
return handle_version()
vexrc = get_vexrc(options, environ)
# Handle --shell-config as soon as its arguments are available.
if options.shell_to_configure:
return handle_shell_config(options.shell_to_configure, vexrc, environ)
if options.list is not None:
return handle_list(vexrc.get_ve_base(environ), options.list)
# Do as much as possible before a possible make, so errors can raise
# without leaving behind an unused virtualenv.
# get_virtualenv_name is destructive and must happen before get_command
cwd = get_cwd(options)
ve_base = vexrc.get_ve_base(environ)
ve_name = get_virtualenv_name(options)
command = get_command(options, vexrc, environ)
# Either we create ve_path, get it from options.path or find it
# in ve_base.
if options.make:
if options.path:
make_path = os.path.abspath(options.path)
else:
make_path = os.path.abspath(os.path.join(ve_base, ve_name))
handle_make(environ, options, make_path)
ve_path = make_path
elif options.path:
ve_path = os.path.abspath(options.path)
if not os.path.exists(ve_path) or not os.path.isdir(ve_path):
raise exceptions.InvalidVirtualenv(
"argument for --path is not a directory")
else:
try:
ve_path = get_virtualenv_path(ve_base, ve_name)
except exceptions.NoVirtualenvName:
options.print_help()
raise
# get_environ has to wait until ve_path is defined, which might
# be after a make; of course we can't run until we have env.
env = get_environ(environ, vexrc['env'], ve_path)
returncode = run(command, env=env, cwd=cwd)
if options.remove:
handle_remove(ve_path)
if returncode is None:
raise exceptions.InvalidCommand(
"command not found: {0!r}".format(command[0]))
return returncode | [
"Logic for main(), with less direct system interaction.\n\n Routines called here raise InvalidArgument with messages that\n should be delivered on stderr, to be caught by main.\n "
] |
Please provide a description of the function:def main():
argv = sys.argv[1:]
returncode = 1
try:
returncode = _main(os.environ, argv)
except exceptions.InvalidArgument as error:
if error.message:
sys.stderr.write("Error: " + error.message + '\n')
else:
raise
sys.exit(returncode) | [
"The main command-line entry point, with system interactions.\n "
] |
Please provide a description of the function:def get_processid(config):
pidfile = config.get('daemon', 'pidfile', fallback=None)
if pidfile is None:
raise ValueError("Configuration doesn't have pidfile option!")
try:
with open(pidfile, 'r') as _file:
pid = _file.read().rstrip()
try:
pid = int(pid)
except ValueError:
raise ValueError("stale pid file with invalid data:{}"
.format(pid))
else:
if pid in [-1, 1]:
raise ValueError("invalid PID ({})".format(pid))
else:
return pid
except OSError as exc:
if exc.errno == 2:
print("CRITICAL: anycast-healthchecker could be down as pid file "
"{} doesn't exist".format(pidfile))
sys.exit(2)
else:
raise ValueError("error while reading pid file:{}".format(exc)) | [
"Return process id of anycast-healthchecker.\n\n Arguments:\n config (obj): A configparser object with the configuration of\n anycast-healthchecker.\n\n Returns:\n The process id found in the pid file\n\n Raises:\n ValueError in the following cases\n - pidfile option is missing from the configuration\n - pid is either -1 or 1\n - stale pidfile, either with no data or invalid data\n - failure to read pidfile\n\n "
] |
Please provide a description of the function:def parse_services(config, services):
enabled = 0
for service in services:
check_disabled = config.getboolean(service, 'check_disabled')
if not check_disabled:
enabled += 1
return enabled | [
"Parse configuration to return number of enabled service checks.\n\n Arguments:\n config (obj): A configparser object with the configuration of\n anycast-healthchecker.\n services (list): A list of section names which holds configuration\n for each service check\n\n Returns:\n A number (int) of enabled service checks.\n\n "
] |
Please provide a description of the function:def main():
arguments = docopt(__doc__)
config_file = '/etc/anycast-healthchecker.conf'
config_dir = '/etc/anycast-healthchecker.d'
config = configparser.ConfigParser()
config_files = [config_file]
config_files.extend(glob.glob(os.path.join(config_dir, '*.conf')))
config.read(config_files)
try:
pid = get_processid(config)
except ValueError as exc:
print("UNKNOWN: {e}".format(e=exc))
sys.exit(3)
else:
process_up = running(pid)
if not process_up:
print("CRITICAL: anycast-healthchecker with pid ({p}) isn't running"
.format(p=pid))
sys.exit(3)
services = config.sections()
services.remove('daemon')
if not services:
print("UNKNOWN: No service checks are configured")
sys.exit(3)
enabled_service_checks = parse_services(config, services)
if enabled_service_checks == 0:
print("OK: Number of service checks is zero, no threads are running")
sys.exit(0)
else:
# parent process plus nummber of threads for each service check
configured_threads = enabled_service_checks + 1
cmd = ['/bin/ps', 'h', '-T', '-p', '{n}'.format(n=pid)]
try:
if arguments['-v']:
print("running {}".format(' '.join(cmd)))
out = subprocess.check_output(cmd, timeout=1)
except subprocess.CalledProcessError as exc:
print("UNKNOWN: running '{c}' failed with return code: {r}"
.format(c=' '.join(cmd), r=exc.returncode))
sys.exit(3)
except subprocess.TimeoutExpired:
print("UNKNOWN: running '{}' timed out".format(' '.join(cmd)))
sys.exit(3)
else:
output_lines = out.splitlines()
if arguments['-v']:
for line in output_lines:
print(line)
running_threads = len(output_lines)
if running_threads == configured_threads:
print("OK: UP (pid={p}) and all threads ({t}) are running"
.format(p=pid, t=configured_threads - 1))
sys.exit(0)
elif running_threads - 1 == 0: # minus parent process
print("CRITICAL: No threads are running OpDocs ANYCAST-03")
sys.exit(2)
else:
print("CRITICAL: Found {n} running threads while configured "
"number of threads is {c} OpDocs ANYCAST-03"
.format(n=running_threads - 1, c=configured_threads - 1))
sys.exit(2) | [
"Run check.\n\n anycast-healthchecker is a multi-threaded software and for each\n service check it holds a thread. If a thread dies then the service\n is not monitored anymore and the route for the IP associated with service\n it wont be withdrawn in case service goes down in the meantime.\n "
] |
Please provide a description of the function:def scary_path(path):
if not path:
return True
assert isinstance(path, bytes)
return not NOT_SCARY.match(path) | [
"Whitelist the WORKON_HOME strings we're willing to substitute in\n to strings that we provide for user's shell to evaluate.\n\n If it smells at all bad, return True.\n "
] |
Please provide a description of the function:def shell_config_for(shell, vexrc, environ):
here = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(here, 'shell_configs', shell)
try:
with open(path, 'rb') as inp:
data = inp.read()
except FileNotFoundError as error:
if error.errno != 2:
raise
return b''
ve_base = vexrc.get_ve_base(environ).encode('ascii')
if ve_base and not scary_path(ve_base) and os.path.exists(ve_base):
data = data.replace(b'$WORKON_HOME', ve_base)
return data | [
"return completion config for the named shell.\n "
] |
Please provide a description of the function:def handle_shell_config(shell, vexrc, environ):
from vex import shell_config
data = shell_config.shell_config_for(shell, vexrc, environ)
if not data:
raise exceptions.OtherShell("unknown shell: {0!r}".format(shell))
if hasattr(sys.stdout, 'buffer'):
sys.stdout.buffer.write(data)
else:
sys.stdout.write(data)
return 0 | [
"Carry out the logic of the --shell-config option.\n "
] |
Please provide a description of the function:def _run_check(self):
cmd = shlex.split(self.config['check_cmd'])
self.log.info("running %s", ' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
start_time = time.time()
try:
outs, errs = proc.communicate(timeout=self.config['check_timeout'])
except subprocess.TimeoutExpired:
self.log.error("check timed out")
if proc.poll() is None:
try:
proc.kill()
except PermissionError:
self.log.warning("failed to kill check due to adequate "
"access rights, check could be running "
"under another user(root) via sudo")
return False
else:
msg = "check duration {t:.3f}ms".format(
t=(time.time() - start_time) * 1000)
self.log.info(msg)
if proc.returncode != 0:
self.log.info("stderr from the check %s", errs)
self.log.info("stdout from the check %s", outs)
return proc.returncode == 0 | [
"Execute a check command.\n\n Returns:\n True if the exit code of the command is 0 otherwise False.\n\n "
] |
Please provide a description of the function:def _ip_assigned(self):
output = []
cmd = [
'/sbin/ip',
'address',
'show',
'dev',
self.config['interface'],
'to',
self.ip_with_prefixlen,
]
if self.ip_check_disabled:
self.log.info("checking for IP assignment on interface %s is "
"disabled", self.config['interface'])
return True
self.log.debug("running %s", ' '.join(cmd))
try:
output = subprocess.check_output(
cmd,
universal_newlines=True,
timeout=1)
except subprocess.CalledProcessError as error:
self.log.error("error checking IP-PREFIX %s: %s",
cmd, error.output)
# Because it is unlikely to ever get an error we return True
return True
except subprocess.TimeoutExpired:
self.log.error("timeout running %s", ' '.join(cmd))
# Because it is unlikely to ever get a timeout we return True
return True
except ValueError as error:
# We have been getting intermittent ValueErrors, see here
# gist.github.com/unixsurfer/67db620d87f667423f6f6e3a04e0bff5
# It has happened ~5 times and this code is executed from multiple
# threads and every ~10secs on several (~40) production servers for
# more than 18months.
# It could be a bug in Python or system returns corrupted data.
# As a consequence of the raised exception thread dies and the
# service isn't monitored anymore!. So, we now catch the exception.
# While checking if an IP is assigned, we get an error unrelated to
# that prevents us from knowing if it's assigned. We simply don't
# know. A retry logic could be a more proper solution.
self.log.error("running %s raised ValueError exception:%s",
' '.join(cmd), error)
return True
else:
if self.ip_with_prefixlen in output: # pylint: disable=E1135,R1705
msg = "{i} assigned to loopback interface".format(
i=self.ip_with_prefixlen)
self.log.debug(msg)
return True
else:
msg = ("{i} isn't assigned to {d} interface"
.format(i=self.ip_with_prefixlen,
d=self.config['interface']))
self.log.warning(msg)
return False
self.log.debug("I shouldn't land here!, it is a BUG")
return False | [
"Check if IP prefix is assigned to loopback interface.\n\n Returns:\n True if IP prefix found assigned otherwise False.\n\n "
] |
Please provide a description of the function:def _check_disabled(self):
if self.config['check_disabled']:
if self.config['on_disabled'] == 'withdraw':
self.log.info("Check is disabled and ip_prefix will be "
"withdrawn")
self.log.info("adding %s in the queue", self.ip_with_prefixlen)
self.action.put(self.del_operation)
self.log.info("Check is now permanently disabled")
elif self.config['on_disabled'] == 'advertise':
self.log.info("check is disabled, ip_prefix wont be withdrawn")
self.log.info("adding %s in the queue", self.ip_with_prefixlen)
self.action.put(self.add_operation)
self.log.info('check is now permanently disabled')
return True
return False | [
"Check if health check is disabled.\n\n It logs a message if health check is disabled and it also adds an item\n to the action queue based on 'on_disabled' setting.\n\n Returns:\n True if check is disabled otherwise False.\n\n "
] |
Please provide a description of the function:def run(self):
# Catch all possible exceptions raised by the running thread
# and let parent process know about it.
try:
self._run()
except Exception: # pylint: disable=broad-except
self.action.put(
ServiceCheckDiedError(self.name, traceback.format_exc())
) | [
"Wrap _run method."
] |
Please provide a description of the function:def _run(self):
up_cnt = 0
down_cnt = 0
# The current established state of the service check, it can be
# either UP or DOWN but only after a number of consecutive successful
# or unsuccessful health checks.
check_state = 'Unknown'
for key, value in self.config.items():
self.log.debug("%s=%s:%s", key, value, type(value))
# Service check will abort if it is disabled.
if self._check_disabled():
return
if self.splay_startup is not None:
sleep_time = float("%.3f" % random.uniform(0, self.splay_startup))
self.log.info("delaying startup for %ssecs", sleep_time)
time.sleep(sleep_time)
interval = self.config['check_interval']
start_offset = time.time() % interval
# Go in a loop until we are told to stop
while True:
timestamp = time.time()
if not self._ip_assigned():
up_cnt = 0
self.extra['status'] = 'down'
self.log.warning("status DOWN because %s isn't assigned to "
"loopback interface.",
self.ip_with_prefixlen,
extra=self.extra)
if check_state != 'DOWN':
check_state = 'DOWN'
self.log.info("adding %s in the queue",
self.ip_with_prefixlen,
extra=self.extra)
self.action.put(self.del_operation)
elif self._run_check():
if up_cnt == (self.config['check_rise'] - 1):
self.extra['status'] = 'up'
self.log.info("status UP", extra=self.extra)
# Service exceeded all consecutive checks. Set its state
# accordingly and put an item in queue. But do it only if
# previous state was different, to prevent unnecessary bird
# reloads when a service flaps between states.
if check_state != 'UP':
check_state = 'UP'
self.log.info("adding %s in the queue",
self.ip_with_prefixlen,
extra=self.extra)
self.action.put(self.add_operation)
elif up_cnt < self.config['check_rise']:
up_cnt += 1
self.log.info("going up %s", up_cnt, extra=self.extra)
else:
self.log.error("up_cnt is higher %s, it's a BUG!",
up_cnt,
extra=self.extra)
down_cnt = 0
else:
if down_cnt == (self.config['check_fail'] - 1):
self.extra['status'] = 'down'
self.log.info("status DOWN", extra=self.extra)
# Service exceeded all consecutive checks.
# Set its state accordingly and put an item in queue.
# But do it only if previous state was different, to
# prevent unnecessary bird reloads when a service flaps
# between states
if check_state != 'DOWN':
check_state = 'DOWN'
self.log.info("adding %s in the queue",
self.ip_with_prefixlen,
extra=self.extra)
self.action.put(self.del_operation)
elif down_cnt < self.config['check_fail']:
down_cnt += 1
self.log.info("going down %s", down_cnt, extra=self.extra)
else:
self.log.error("up_cnt is higher %s, it's a BUG!",
up_cnt,
extra=self.extra)
up_cnt = 0
self.log.info("wall clock time %.3fms",
(time.time() - timestamp) * 1000,
extra=self.extra)
# calculate sleep time
sleep = start_offset - time.time() % interval
if sleep < 0:
sleep += interval
self.log.debug("sleeping for %.3fsecs", sleep, extra=self.extra)
time.sleep(sleep) | [
"Discovers the health of a service.\n\n Runs until it is being killed from main program and is responsible to\n put an item into the queue based on the status of the health check.\n The status of service is consider UP after a number of consecutive\n successful health checks, in that case it asks main program to add the\n IP prefix associated with service to BIRD configuration, otherwise ask\n for a removal.\n Rise and fail options prevent unnecessary configuration changes when\n check is flapping.\n "
] |
Please provide a description of the function:def main():
args = docopt(__doc__, version=__version__)
if args['--print']:
for section in DEFAULT_OPTIONS:
print("[{}]".format(section))
for key, value in DEFAULT_OPTIONS[section].items():
print("{k} = {v}".format(k=key, v=value))
print()
sys.exit(0)
try:
config, bird_configuration = load_configuration(args['--file'],
args['--dir'],
args['--service-file'])
except ValueError as exc:
sys.exit('Invalid configuration: ' + str(exc))
if args['--check']:
print("OK")
sys.exit(0)
if args['--print-conf']:
for section in config:
print("[{}]".format(section))
for key, value in config[section].items():
print("{k} = {v}".format(k=key, v=value))
print()
sys.exit(0)
try:
lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
lock_socket.bind('\0' + "{}".format(PROGRAM_NAME))
except socket.error as exc:
sys.exit("failed to acquire a lock by creating an abstract namespace"
" socket: {}".format(exc))
else:
print("acquired a lock by creating an abstract namespace socket: {}"
.format(lock_socket))
# Clean old pidfile, if it exists, and write PID to it.
pidfile = config.get('daemon', 'pidfile')
update_pidfile(pidfile)
# Register our shutdown handler to various termination signals.
shutdown_handler = partial(shutdown, pidfile)
signal.signal(signal.SIGHUP, shutdown_handler)
signal.signal(signal.SIGTERM, shutdown_handler)
signal.signal(signal.SIGABRT, shutdown_handler)
signal.signal(signal.SIGINT, shutdown_handler)
# Set up loggers.
logger = setup_logger(config)
# Perform a sanity check on IP-Prefixes
ip_prefixes_sanity_check(config, bird_configuration)
# Create our master process.
checker = healthchecker.HealthChecker(config, bird_configuration)
logger.info("starting %s version %s", PROGRAM_NAME, __version__)
checker.run() | [
"Parse CLI and starts main program."
] |
Please provide a description of the function:def get_environ(environ, defaults, ve_path):
# Copy the parent environment, add in defaults from .vexrc.
env = environ.copy()
env.update(defaults)
# Leaving in existing PYTHONHOME can cause some errors
if 'PYTHONHOME' in env:
del env['PYTHONHOME']
# Now we have to adjust PATH to find scripts for the virtualenv...
# PATH being unset/empty is OK, but ve_path must be set
# or there is nothing for us to do here and it's bad.
if not ve_path:
raise exceptions.BadConfig('ve_path must be set')
if platform.system() == 'Windows':
ve_bin = os.path.join(ve_path, 'Scripts')
else:
ve_bin = os.path.join(ve_path, 'bin')
# If user is currently in a virtualenv, DON'T just prepend
# to its path (vex foo; echo $PATH -> " /foo/bin:/bar/bin")
# but don't incur this cost unless we're already in one.
# activate handles this by running 'deactivate' first, we don't
# have that so we have to use other ways.
# This would not be necessary and things would be simpler if vex
# did not have to interoperate with a ubiquitous existing tool.
# virtualenv doesn't...
current_ve = env.get('VIRTUAL_ENV', '')
system_path = environ.get('PATH', '')
segments = system_path.split(os.pathsep)
if current_ve:
# Since activate doesn't export _OLD_VIRTUAL_PATH, we are going to
# manually remove the virtualenv's bin.
# A virtualenv's bin should not normally be on PATH except
# via activate or similar, so I'm OK with this solution.
current_ve_bin = os.path.join(current_ve, 'bin')
try:
segments.remove(current_ve_bin)
except ValueError:
raise exceptions.BadConfig(
"something set VIRTUAL_ENV prior to this vex execution, "
"implying that a virtualenv is already activated "
"and PATH should contain the virtualenv's bin directory. "
"Unfortunately, it doesn't: it's {0!r}. "
"You might want to check that PATH is not "
"getting clobbered somewhere, e.g. in your shell's configs."
.format(system_path)
)
segments.insert(0, ve_bin)
env['PATH'] = os.pathsep.join(segments)
env['VIRTUAL_ENV'] = ve_path
return env | [
"Make an environment to run with.\n "
] |
Please provide a description of the function:def run(command, env, cwd):
assert command
if cwd:
assert os.path.exists(cwd)
if platform.system() == "Windows":
exe = distutils.spawn.find_executable(command[0], path=env['PATH'])
if exe:
command[0] = exe
_, command_name = os.path.split(command[0])
if (command_name in ('bash', 'zsh')
and 'VIRTUALENVWRAPPER_PYTHON' not in env):
env['VIRTUALENVWRAPPER_PYTHON'] = ':'
try:
process = subprocess.Popen(command, env=env, cwd=cwd)
process.wait()
except exceptions.CommandNotFoundError as error:
if error.errno != 2:
raise
return None
return process.returncode | [
"Run the given command.\n "
] |
Please provide a description of the function:def extract_key_value(line, environ):
segments = line.split("=", 1)
if len(segments) < 2:
return None
key, value = segments
# foo passes through as-is (with spaces stripped)
# '{foo}' passes through literally
# "{foo}" substitutes from environ's foo
value = value.strip()
if value[0] == "'" and _SQUOTE_RE.match(value):
value = value[1:-1]
elif value[0] == '"' and _DQUOTE_RE.match(value):
template = value[1:-1]
value = template.format(**environ)
key = key.strip()
value = value.strip()
return key, value | [
"Return key, value from given line if present, else return None.\n "
] |
Please provide a description of the function:def parse_vexrc(inp, environ):
heading = None
errors = []
with inp:
for line_number, line in enumerate(inp):
line = line.decode("utf-8")
if not line.strip():
continue
extracted_heading = extract_heading(line)
if extracted_heading is not None:
heading = extracted_heading
continue
kv_tuple = extract_key_value(line, environ)
if kv_tuple is None:
errors.append((line_number, line))
continue
try:
yield heading, kv_tuple[0], kv_tuple[1]
except GeneratorExit:
break
if errors:
raise InvalidConfigError(inp.name, errors) | [
"Iterator yielding key/value pairs from given stream.\n\n yields tuples of heading, key, value.\n "
] |
Please provide a description of the function:def from_file(cls, path, environ):
instance = cls()
instance.read(path, environ)
return instance | [
"Make a Vexrc instance from given file in given environ.\n "
] |
Please provide a description of the function:def read(self, path, environ):
try:
inp = open(path, 'rb')
except FileNotFoundError as error:
if error.errno != 2:
raise
return None
parsing = parse_vexrc(inp, environ)
for heading, key, value in parsing:
heading = self.default_heading if heading is None else heading
if heading not in self.headings:
self.headings[heading] = OrderedDict()
self.headings[heading][key] = value
parsing.close() | [
"Read data from file into this vexrc instance.\n "
] |
Please provide a description of the function:def get_ve_base(self, environ):
# set ve_base to a path we can look for virtualenvs:
# 1. .vexrc
# 2. WORKON_HOME (as defined for virtualenvwrapper's benefit)
# 3. $HOME/.virtualenvs
# (unless we got --path, then we don't need it)
ve_base_value = self.headings[self.default_heading].get('virtualenvs')
if ve_base_value:
ve_base = os.path.expanduser(ve_base_value)
else:
ve_base = environ.get('WORKON_HOME', '')
if not ve_base:
# On Cygwin os.name == 'posix' and we want $HOME.
if platform.system() == 'Windows' and os.name == 'nt':
_win_drive = environ.get('HOMEDRIVE')
home = environ.get('HOMEPATH', '')
if home:
home = os.path.join(_win_drive, home)
else:
home = environ.get('HOME', '')
if not home:
home = os.path.expanduser('~')
if not home:
return ''
ve_base = os.path.join(home, '.virtualenvs')
# pass through invalid paths so messages can be generated
# if not os.path.exists(ve_base) or os.path.isfile(ve_base):
# return ''
return ve_base or '' | [
"Find a directory to look for virtualenvs in.\n "
] |
Please provide a description of the function:def get_shell(self, environ):
command = self.headings[self.default_heading].get('shell')
if not command and os.name != 'nt':
command = environ.get('SHELL', '')
command = shlex.split(command) if command else None
return command | [
"Find a command to run.\n "
] |
Please provide a description of the function:def files(self):
info = self.metainfo['info']
if 'length' in info: # Singlefile
yield info['name']
elif 'files' in info: # Multifile torrent
rootdir = self.name
for fileinfo in info['files']:
yield os.path.join(rootdir, os.path.join(*fileinfo['path'])) | [
"\n Yield relative file paths specified in :attr:`metainfo`\n\n Each paths starts with :attr:`name`.\n\n Note that the paths may not exist. See :attr:`filepaths` for existing\n files.\n "
] |
Please provide a description of the function:def filepaths(self):
if self.path is not None:
yield from utils.filepaths(self.path, exclude=self.exclude,
hidden=False, empty=False) | [
"\n Yield absolute paths to existing files in :attr:`path`\n\n Any files that match patterns in :attr:`exclude` as well as hidden and\n empty files are not included.\n "
] |
Please provide a description of the function:def filetree(self):
tree = {} # Complete directory tree
prefix = []
paths = (f.split(os.sep) for f in self.files)
for path in paths:
dirpath = path[:-1] # Path without filename
filename = path[-1]
subtree = tree
for item in dirpath:
if item not in subtree:
subtree[item] = {}
subtree = subtree[item]
subtree[filename] = None
return tree | [
"\n :attr:`files` as a dictionary tree\n\n Each node is a ``dict`` that maps directory/file names to child nodes.\n Each child node is a ``dict`` for directories and ``None`` for files.\n\n If :attr:`path` is ``None``, this is an empty ``dict``.\n "
] |
Please provide a description of the function:def size(self):
if 'length' in self.metainfo['info']: # Singlefile
return self.metainfo['info']['length']
elif 'files' in self.metainfo['info']: # Multifile torrent
return sum(fileinfo['length']
for fileinfo in self.metainfo['info']['files']) | [
"\n Total size of content in bytes or ``None`` if :attr:`path` is ``None``\n "
] |
Please provide a description of the function:def piece_size(self):
if 'piece length' not in self.metainfo['info']:
if self.size is None:
return None
else:
self.calculate_piece_size()
return self.metainfo['info']['piece length'] | [
"\n Piece size/length or ``None``\n\n If set to ``None``, :attr:`calculate_piece_size` is called.\n\n If :attr:`size` returns ``None``, this also returns ``None``.\n\n Setting this property sets ``piece length`` in :attr:`metainfo`\\\n ``['info']``.\n "
] |
Please provide a description of the function:def calculate_piece_size(self):
size = self.size
if not size:
raise RuntimeError(f'Cannot calculate piece size with no "path" specified')
else:
self.metainfo['info']['piece length'] = utils.calc_piece_size(
size, self.MAX_PIECES, self.MIN_PIECE_SIZE, self.MAX_PIECE_SIZE) | [
"\n Calculate and add ``piece length`` to ``info`` in :attr:`metainfo`\n\n The piece size is calculated so that there are no more than\n :attr:`MAX_PIECES` pieces unless it is larger than\n :attr:`MAX_PIECE_SIZE`, in which case there is no limit on the number of\n pieces.\n\n :raises RuntimeError: if :attr:`size` returns ``None``\n "
] |
Please provide a description of the function:def pieces(self):
if self.piece_size is None:
return None
else:
return math.ceil(self.size / self.piece_size) | [
"\n Number of pieces the content is split into or ``None`` if :attr:`piece_size`\n returns ``None``\n "
] |
Please provide a description of the function:def name(self):
if 'name' not in self.metainfo['info'] and self.path is not None:
self.metainfo['info']['name'] = os.path.basename(self.path)
return self.metainfo['info'].get('name', None) | [
"\n Name of the torrent\n\n Default to last item in :attr:`path` or ``None`` if :attr:`path` is\n ``None``.\n\n Setting this property sets or removes ``name`` in :attr:`metainfo`\\\n ``['info']``.\n "
] |
Please provide a description of the function:def trackers(self):
announce_list = self.metainfo.get('announce-list', None)
if not announce_list:
announce = self.metainfo.get('announce', None)
if announce:
return [[announce]]
else:
return announce_list | [
"\n List of tiers of announce URLs or ``None`` for no trackers\n\n A tier is either a single announce URL (:class:`str`) or an\n :class:`~collections.abc.Iterable` (e.g. :class:`list`) of announce\n URLs.\n\n Setting this property sets or removes ``announce`` and ``announce-list``\n in :attr:`metainfo`. ``announce`` is set to the first tracker of the\n first tier.\n\n :raises URLError: if any of the announce URLs is invalid\n "
] |
Please provide a description of the function:def infohash(self):
self.validate()
info = self.convert()[b'info']
return sha1(bencode(info)).hexdigest() | [
"SHA1 info hash"
] |
Please provide a description of the function:def infohash_base32(self):
self.validate()
info = self.convert()[b'info']
return b32encode(sha1(bencode(info)).digest()) | [
"Base32 encoded SHA1 info hash"
] |
Please provide a description of the function:def generate(self, callback=None, interval=0):
if self.path is None:
raise RuntimeError('generate() called with no path specified')
elif self.size <= 0:
raise error.PathEmptyError(self.path)
elif not os.path.exists(self.path):
raise error.PathNotFoundError(self.path)
if callback is not None:
cancel = lambda *status: callback(*status) is not None
else:
cancel = lambda *status: False
if os.path.isfile(self.path):
pieces = self._set_pieces_singlefile()
elif os.path.isdir(self.path):
pieces = self._set_pieces_multifile()
# Iterate over hashed pieces and send status information
last_cb_call = 0
for filepath,pieces_done,pieces_total in pieces:
now = time.time()
if now - last_cb_call >= interval or \
pieces_done >= pieces_total:
last_cb_call = now
if cancel(self, filepath, pieces_done, pieces_total):
return False
return True | [
"\n Hash pieces and report progress to `callback`\n\n This method sets ``pieces`` in :attr:`metainfo`\\ ``['info']`` when all\n pieces are hashed successfully.\n\n :param callable callback: Callable with signature ``(torrent, filepath,\n pieces_done, pieces_total)``; if `callback` returns anything else\n than None, hashing is canceled\n\n :param float interval: Minimum number of seconds between calls to\n `callback` (if 0, `callback` is called once per piece)\n :raises PathEmptyError: if :attr:`path` contains only empty\n files/directories\n :raises PathNotFoundError: if :attr:`path` does not exist\n :raises ReadError: if :attr:`path` or any file beneath it is not\n readable\n\n :return: ``True`` if all pieces were successfully hashed, ``False``\n otherwise\n "
] |
Please provide a description of the function:def convert(self):
try:
return utils.encode_dict(self.metainfo)
except ValueError as e:
raise error.MetainfoError(str(e)) | [
"\n Return :attr:`metainfo` with all keys encoded to :class:`bytes` and all\n values encoded to :class:`bytes`, :class:`int`, :class:`list` or\n :class:`OrderedDict`\n\n :raises MetainfoError: on values that cannot be converted properly\n "
] |
Please provide a description of the function:def validate(self):
md = self.metainfo
info = md['info']
# Check values shared by singlefile and multifile torrents
utils.assert_type(md, ('info', 'name'), (str,), must_exist=True)
utils.assert_type(md, ('info', 'piece length'), (int,), must_exist=True)
utils.assert_type(md, ('info', 'pieces'), (bytes, bytearray), must_exist=True)
if 'length' in info and 'files' in info:
raise error.MetainfoError("['info'] includes both 'length' and 'files'")
elif 'length' in info:
# Validate info as singlefile torrent
utils.assert_type(md, ('info', 'length'), (int, float), must_exist=True)
utils.assert_type(md, ('info', 'md5sum'), (str,), must_exist=False, check=utils.is_md5sum)
if self.path is not None:
# Check if filepath actually points to a file
if not os.path.isfile(self.path):
raise error.MetainfoError(f"Metainfo includes {self.path} as file, but it is not a file")
# Check if size matches
if os.path.getsize(self.path) != info['length']:
raise error.MetainfoError(f"Mismatching file sizes in metainfo ({info['length']})"
f" and local file system ({os.path.getsize(self.path)}): "
f"{self.path!r}")
elif 'files' in info:
# Validate info as multifile torrent
utils.assert_type(md, ('info', 'files'), (list,), must_exist=True)
for i,fileinfo in enumerate(info['files']):
utils.assert_type(md, ('info', 'files', i, 'length'), (int, float), must_exist=True)
utils.assert_type(md, ('info', 'files', i, 'path'), (list,), must_exist=True)
utils.assert_type(md, ('info', 'files', i, 'md5sum'), (str,), must_exist=False,
check=utils.is_md5sum)
if self.path is not None:
# Check if filepath actually points to a directory
if not os.path.isdir(self.path):
raise error.MetainfoError(f"Metainfo includes {self.path} as directory, but it is not a directory")
for i,fileinfo in enumerate(info['files']):
for j,item in enumerate(fileinfo['path']):
utils.assert_type(md, ('info', 'files', i, 'path', j), (str,))
filepath = os.path.join(self.path, os.path.join(*fileinfo['path']))
# Check if filepath exists and is a file
if not os.path.exists(filepath):
raise error.MetainfoError(f"Metainfo inclues file that doesn't exist: {filepath!r}")
if not os.path.isfile(filepath):
raise error.MetainfoError(f"Metainfo inclues non-file: {filepath!r}")
# Check if sizes match
if os.path.getsize(filepath) != fileinfo['length']:
raise error.MetainfoError(f"Mismatching file sizes in metainfo ({fileinfo['length']})"
f" and local file system ({os.path.getsize(filepath)}): "
f"{filepath!r}")
else:
raise error.MetainfoError("Missing 'length' or 'files' in metainfo") | [
"\n Check if all mandatory keys exist in :attr:`metainfo` and are of expected\n types\n\n The necessary values are documented here:\n | http://bittorrent.org/beps/bep_0003.html\n | https://wiki.theory.org/index.php/BitTorrentSpecification#Metainfo_File_Structure\n\n Note that ``announce`` is not considered mandatory because clients can\n find peers via DHT.\n\n :raises MetainfoError: if :attr:`metainfo` would not generate a valid\n torrent file or magnet link\n "
] |
Please provide a description of the function:def dump(self, validate=True):
if validate:
self.validate()
return bencode(self.convert()) | [
"\n Create bencoded :attr:`metainfo` (i.e. the content of a torrent file)\n\n :param bool validate: Whether to run :meth:`validate` first\n\n :return: :attr:`metainfo` as bencoded :class:`bytes`\n "
] |
Please provide a description of the function:def write_stream(self, stream, validate=True):
content = self.dump(validate=validate)
try:
# Remove existing data from stream *after* dump() didn't raise
# anything so we don't destroy it prematurely.
if stream.seekable():
stream.seek(0)
stream.truncate(0)
stream.write(content)
except OSError as e:
raise error.WriteError(e.errno) | [
"\n Write :attr:`metainfo` to a file-like object\n\n Before any data is written, `stream` is truncated if possible.\n\n :param stream: Writable file-like object (e.g. :class:`io.BytesIO`)\n :param bool validate: Whether to run :meth:`validate` first\n\n :raises WriteError: if writing to `stream` fails\n :raises MetainfoError: if `validate` is `True` and :attr:`metainfo`\n contains invalid data\n "
] |
Please provide a description of the function:def write(self, filepath, validate=True, overwrite=False, mode=0o666):
if not overwrite and os.path.exists(filepath):
raise error.WriteError(errno.EEXIST, filepath)
# Get file content before opening the file in case there are errors like
# incomplete metainfo
content = io.BytesIO()
self.write_stream(content, validate=validate)
content.seek(0)
try:
with open(filepath, 'wb') as f:
f.write(content.read())
except OSError as e:
raise error.WriteError(e.errno, filepath) | [
"\n Write :attr:`metainfo` to torrent file\n\n This method is essentially equivalent to:\n\n >>> with open('my.torrent', 'wb') as f:\n ... f.write(torrent.dump())\n\n :param filepath: Path of the torrent file\n :param bool validate: Whether to run :meth:`validate` first\n :param bool overwrite: Whether to silently overwrite `filepath` (only\n if all pieces were hashed successfully)\n :param mode: File permissions of `filepath`\n\n :raises WriteError: if writing to `filepath` fails\n :raises MetainfoError: if `validate` is `True` and :attr:`metainfo`\n contains invalid data\n "
] |
Please provide a description of the function:def magnet(self, name=True, size=True, trackers=True, tracker=False, validate=True):
if validate:
self.validate()
parts = [f'xt=urn:btih:{self.infohash}']
if name:
parts.append(f'dn={utils.urlquote(self.name)}')
if size:
parts.append(f'xl={self.size}')
if self.trackers is not None:
if tracker:
parts.append(f'tr={utils.urlquote(self.trackers[0][0])}')
elif trackers:
for tier in self.trackers:
for url in tier:
parts.append(f'tr={utils.urlquote(url)}')
return 'magnet:?' + '&'.join(parts) | [
"\n BTIH Magnet URI\n\n :param bool name: Whether to include the name\n :param bool size: Whether to include the size\n :param bool trackers: Whether to include all trackers\n :param bool tracker: Whether to include only the first tracker of the\n first tier (overrides `trackers`)\n :param bool validate: Whether to run :meth:`validate` first\n "
] |
Please provide a description of the function:def read_stream(cls, stream, validate=True):
try:
content = stream.read(cls.MAX_TORRENT_FILE_SIZE)
except OSError as e:
raise error.ReadError(e.errno)
else:
try:
metainfo_enc = bdecode(content)
except BTFailure as e:
raise error.ParseError()
if validate:
if b'info' not in metainfo_enc:
raise error.MetainfoError("Missing 'info'")
elif not isinstance(metainfo_enc[b'info'], abc.Mapping):
raise error.MetainfoError("'info' is not a dictionary")
elif b'pieces' not in metainfo_enc[b'info']:
raise error.MetainfoError("Missing 'pieces' in ['info']")
# Extract 'pieces' from metainfo because it's the only byte string
# that isn't supposed to be decoded to unicode.
if b'info' in metainfo_enc and b'pieces' in metainfo_enc[b'info']:
pieces = metainfo_enc[b'info'].pop(b'pieces')
metainfo = utils.decode_dict(metainfo_enc)
metainfo['info']['pieces'] = pieces
else:
metainfo = utils.decode_dict(metainfo_enc)
torrent = cls()
torrent._metainfo = metainfo
# Convert some values from official types to something nicer
# (e.g. int -> datetime)
for attr in ('creation_date', 'private'):
setattr(torrent, attr, getattr(torrent, attr))
# Auto-set 'include_md5'
info = torrent.metainfo['info']
torrent.include_md5 = ('length' in info and 'md5sum' in info) or \
('files' in info and all('md5sum' in fileinfo
for fileinfo in info['files']))
if validate:
torrent.validate()
return torrent | [
"\n Read torrent metainfo from file-like object\n\n :param stream: Readable file-like object (e.g. :class:`io.BytesIO`)\n :param bool validate: Whether to run :meth:`validate` on the new Torrent\n object\n\n :raises ReadError: if reading from `stream` fails\n :raises ParseError: if `stream` does not produce a valid bencoded byte\n string\n :raises MetainfoError: if `validate` is `True` and the read metainfo is\n invalid\n\n :return: New Torrent object\n "
] |
Please provide a description of the function:def read(cls, filepath, validate=True):
try:
with open(filepath, 'rb') as fh:
return cls.read_stream(fh)
except (OSError, error.ReadError) as e:
raise error.ReadError(e.errno, filepath)
except error.ParseError:
raise error.ParseError(filepath) | [
"\n Read torrent metainfo from file\n\n :param filepath: Path of the torrent file\n :param bool validate: Whether to run :meth:`validate` on the new Torrent\n object\n\n :raises ReadError: if reading from `filepath` fails\n :raises ParseError: if `filepath` does not contain a valid bencoded byte\n string\n :raises MetainfoError: if `validate` is `True` and the read metainfo is\n invalid\n\n :return: New Torrent object\n "
] |
Please provide a description of the function:def copy(self):
from copy import deepcopy
cp = type(self)()
cp._metainfo = deepcopy(self._metainfo)
return cp | [
"\n Return a new object with the same metainfo\n\n Internally, this simply copies the internal metainfo dictionary with\n :func:`copy.deepcopy` and gives it to the new instance.\n "
] |
Please provide a description of the function:def validated_url(url):
try:
u = urlparse(url)
u.port # Trigger 'invalid port' exception
except Exception:
raise error.URLError(url)
else:
if not u.scheme or not u.netloc:
raise error.URLError(url)
return url | [
"Return url if valid, raise URLError otherwise"
] |
Please provide a description of the function:def read_chunks(filepath, chunk_size):
try:
with open(filepath, 'rb') as f:
while True:
chunk = f.read(chunk_size)
if chunk:
yield chunk
else:
break # EOF
except OSError as e:
raise error.ReadError(e.errno, filepath) | [
"Generator that yields chunks from file"
] |
Please provide a description of the function:def calc_piece_size(total_size, max_pieces, min_piece_size, max_piece_size):
ps = 1 << max(0, math.ceil(math.log(total_size / max_pieces, 2)))
if ps < min_piece_size:
ps = min_piece_size
if ps > max_piece_size:
ps = max_piece_size
return ps | [
"Calculate piece size"
] |
Please provide a description of the function:def is_power_of_2(num):
log = math.log2(num)
return int(log) == float(log) | [
"Return whether `num` is a power of two"
] |
Please provide a description of the function:def is_hidden(path):
for name in path.split(os.sep):
if name != '.' and name != '..' and name and name[0] == '.':
return True
return False | [
"Whether file or directory is hidden"
] |
Please provide a description of the function:def filepaths(path, exclude=(), hidden=True, empty=True):
if not os.path.exists(path):
raise error.PathNotFoundError(path)
elif not os.access(path, os.R_OK,
effective_ids=os.access in os.supports_effective_ids):
raise error.ReadError(errno.EACCES, path)
if os.path.isfile(path):
return [path]
else:
filepaths = []
for dirpath, dirnames, filenames in os.walk(path):
# Ignore hidden directory
if not hidden and is_hidden(dirpath):
continue
for filename in filenames:
# Ignore hidden file
if not hidden and is_hidden(filename):
continue
filepath = os.path.join(dirpath, filename)
# Ignore excluded file
if any(is_match(filepath, pattern) for pattern in exclude):
continue
else:
# Ignore empty file
if empty or os.path.getsize(os.path.realpath(filepath)) > 0:
filepaths.append(filepath)
return sorted(filepaths, key=lambda fp: fp.casefold()) | [
"\n Return list of absolute, sorted file paths\n\n path: Path to file or directory\n exclude: List of file name patterns to exclude\n hidden: Whether to include hidden files\n empty: Whether to include empty files\n\n Raise PathNotFoundError if path doesn't exist.\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.