code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def update(self, sent):
self.offset = sent
now = datetime.datetime.now()
elapsed = (now - self.startTime).total_seconds()
if elapsed > 0:
mbps = (sent * 8 / (10 ** 6)) / elapsed
else:
mbps = None
self._display(sent, now, self.name, mbps) | Update self and parent with intermediate progress. |
def _display(self, sent, now, chunk, mbps):
if self.parent is not None:
self.parent._display(self.parent.offset + sent, now, chunk, mbps)
return
elapsed = now - self.startTime
if sent > 0 and self.total is not None and sent <= self.total:
eta = (self.total - sent) * elapsed.total_seconds() / sent
eta = datetime.timedelta(seconds=eta)
else:
eta = None
self.output.write(
"\r %s: Sent %s%s%s ETA: %s (%s) %s%20s\r" % (
elapsed,
util.humanize(sent),
"" if self.total is None else " of %s" % (util.humanize(self.total),),
"" if self.total is None else " (%d%%)" % (int(100 * sent / self.total),),
eta,
"" if not mbps else "%.3g Mbps " % (mbps,),
chunk or "",
" ",
)
)
self.output.flush() | Display intermediate progress. |
def close(self):
if self.parent:
self.parent.update(self.parent.offset + self.offset)
return
self.output.write("\n")
self.output.flush() | Stop overwriting display, or update parent. |
def transfer(sendContext, receiveContext, chunkSize):
try:
chunkSize = receiveContext.chunkSize
except AttributeError:
pass
if sendContext is not None and receiveContext is not None:
with receiveContext as writer:
# Open reader after writer,
# so any raised errors will abort write before writer closes.
with sendContext as reader:
checkBefore = None
if hasattr(writer, 'skipChunk'):
checkBefore = hasattr(reader, 'checkSum')
while True:
if checkBefore is True:
(size, checkSum) = reader.checkSum(chunkSize)
if writer.skipChunk(size, checkSum):
reader.seek(size, io.SEEK_CUR)
continue
data = reader.read(chunkSize)
if len(data) == 0:
break
if checkBefore is False:
checkSum = hashlib.md5(data).hexdigest()
if writer.skipChunk(len(data), checkSum, data):
continue
writer.write(data) | Transfer (large) data from sender to receiver. |
def display(obj, detail='phrase'):
try:
return obj.display(detail=detail)
except AttributeError:
return str(obj) | Friendly string for volume, using sink paths. |
def _printUUID(uuid, detail='word'):
if not isinstance(detail, int):
detail = detailNum[detail]
if detail > detailNum['word']:
return uuid
if uuid is None:
return None
return "%s...%s" % (uuid[:4], uuid[-4:]) | Return friendly abbreviated string for uuid. |
def skipDryRun(logger, dryRun, level=logging.DEBUG):
# This is an undocumented "feature" of logging module:
# logging.log() requires a numeric level
# logging.getLevelName() maps names to numbers
if not isinstance(level, int):
level = logging.getLevelName(level)
return (
functools.partial(_logDryRun, logger, level) if dryRun
else functools.partial(logger.log, level)
) | Return logging function.
When logging function called, will return True if action should be skipped.
Log will indicate if skipped because of dry run. |
def listContents(self):
vols = list(self.listVolumes())
vols.sort(key=lambda v: self.getSendPath(v))
return [vol.display(self, detail="line") for vol in vols] | Return list of volumes or diffs in this Store's selected directory. |
def listVolumes(self):
for (vol, paths) in self.paths.items():
for path in paths:
if path.startswith('/'):
continue
if path == '.':
continue
if self.userVolume is not None and os.path.basename(path) != self.userVolume:
continue
yield vol
break | Return list of all volumes in this Store's selected directory. |
def getSendPath(self, volume):
try:
return self._fullPath(next(iter(self.getPaths(volume))))
except StopIteration:
return None | Get a path appropriate for sending the volume from this Store.
The path may be relative or absolute in this Store. |
def selectReceivePath(self, paths):
logger.debug("%s", paths)
if not paths:
path = os.path.basename(self.userPath) + '/Anon'
try:
# Relative paths are preferred
path = [p for p in paths if not p.startswith("/")][0]
except IndexError:
# If no relative path, just use the first path
path = os.path.relpath(list(paths)[0], self.userPath)
return self._fullPath(path) | From a set of source paths, recommend a destination path.
The paths are relative or absolute, in a source Store.
The result will be absolute, suitable for this destination Store. |
def _relativePath(self, fullPath):
if fullPath is None:
return None
assert fullPath.startswith("/"), fullPath
path = os.path.relpath(fullPath, self.userPath)
if not path.startswith("../"):
return path
elif self.ignoreExtraVolumes:
return None
else:
return fullPath | Return fullPath relative to Store directory.
Return fullPath if fullPath is not inside directory.
Return None if fullPath is outside our scope. |
def setSize(self, size, sizeIsEstimated):
self._size = size
self._sizeIsEstimated = sizeIsEstimated
if self.fromVol is not None and size is not None and not sizeIsEstimated:
Diff.theKnownSizes[self.toUUID][self.fromUUID] = size | Update size. |
def sendTo(self, dest, chunkSize):
vol = self.toVol
paths = self.sink.getPaths(vol)
if self.sink == dest:
logger.info("Keep: %s", self)
self.sink.keep(self)
else:
# Log, but don't skip yet, so we can log more detailed skipped actions later
skipDryRun(logger, dest.dryrun, 'INFO')("Xfer: %s", self)
receiveContext = dest.receive(self, paths)
sendContext = self.sink.send(self)
# try:
# receiveContext.metadata['btrfsVersion'] = self.btrfsVersion
# except AttributeError:
# pass
transfer(sendContext, receiveContext, chunkSize)
if vol.hasInfo():
infoContext = dest.receiveVolumeInfo(paths)
if infoContext is None:
# vol.writeInfo(sys.stdout)
pass
else:
with infoContext as stream:
vol.writeInfo(stream) | Send this difference to the dest Store. |
def writeInfoLine(self, stream, fromUUID, size):
if size is None or fromUUID is None:
return
if not isinstance(size, int):
logger.warning("Bad size: %s", size)
return
stream.write(str("%s\t%s\t%d\n" % (
self.uuid,
fromUUID,
size,
))) | Write one line of diff information. |
def writeInfo(self, stream):
for (fromUUID, size) in Diff.theKnownSizes[self.uuid].iteritems():
self.writeInfoLine(stream, fromUUID, size) | Write information about diffs into a file stream for use later. |
def hasInfo(self):
count = len([None
for (fromUUID, size)
in Diff.theKnownSizes[self.uuid].iteritems()
if size is not None and fromUUID is not None
])
return count > 0 | Will have information to write. |
def readInfo(stream):
try:
for line in stream:
(toUUID, fromUUID, size) = line.split()
try:
size = int(size)
except Exception:
logger.warning("Bad size: %s", size)
continue
logger.debug("diff info: %s %s %d", toUUID, fromUUID, size)
Diff.theKnownSizes[toUUID][fromUUID] = size
except Exception as error:
logger.warn("Can't read .bs info file (%s)", error) | Read previously-written information about diffs. |
def display(self, sink=None, detail='phrase'):
if not isinstance(detail, int):
detail = detailNum[detail]
if detail >= detailNum['line'] and self.size is not None:
size = " (%s%s)" % (
humanize(self.size),
"" if self.exclusiveSize is None else (
" %s exclusive" % (humanize(self.exclusiveSize))
)
)
else:
size = ""
vol = "%s %s" % (
_printUUID(self._uuid, detail - 1),
sink.getSendPath(self) if sink else "",
)
return vol + size | Friendly string for volume, using sink paths. |
def make(cls, vol):
if isinstance(vol, cls):
return vol
elif vol is None:
return None
else:
return cls(vol, None) | Convert uuid to Volume, if necessary. |
def _fillVolumesAndPaths(self, paths):
self.diffs = collections.defaultdict((lambda: []))
self.extraKeys = {}
for key in self.bucket.list():
if key.name.startswith(theTrashPrefix):
continue
keyInfo = self._parseKeyName(key.name)
if keyInfo is None:
if key.name[-1:] != '/':
logger.warning("Ignoring '%s' in S3", key.name)
continue
if keyInfo['type'] == 'info':
stream = io.BytesIO()
key.get_contents_to_file(stream)
Store.Volume.readInfo(stream)
continue
if keyInfo['from'] == 'None':
keyInfo['from'] = None
path = self._relativePath("/" + keyInfo['fullpath'])
if path is None:
continue
diff = Store.Diff(self, keyInfo['to'], keyInfo['from'], key.size)
logger.debug("Adding %s in %s", diff, path)
self.diffs[diff.fromVol].append(diff)
paths[diff.toVol].append(path)
self.extraKeys[diff] = path | Fill in paths.
:arg paths: = { Store.Volume: ["linux path",]} |
def listContents(self):
items = list(self.extraKeys.items())
items.sort(key=lambda t: t[1])
(count, size) = (0, 0)
for (diff, path) in items:
if path.startswith("/"):
continue
yield str(diff)
count += 1
size += diff.size
yield "TOTAL: %d diffs %s" % (count, humanize(size)) | Return list of volumes or diffs in this Store's selected directory. |
def hasEdge(self, diff):
return diff.toVol in [d.toVol for d in self.diffs[diff.fromVol]] | Test whether edge is in this sink. |
def receive(self, diff, paths):
path = self.selectReceivePath(paths)
keyName = self._keyName(diff.toUUID, diff.fromUUID, path)
if self._skipDryRun(logger)("receive %s in %s", keyName, self):
return None
progress = _BotoProgress(diff.size) if self.showProgress is True else None
return _Uploader(self.bucket, keyName, progress) | Return Context Manager for a file-like (stream) object to store a diff. |
def receiveVolumeInfo(self, paths):
path = self.selectReceivePath(paths)
path = path + Store.theInfoExtension
if self._skipDryRun(logger)("receive info in '%s'", path):
return None
return _Uploader(self.bucket, path, bufferSize=theInfoBufferSize) | Return Context Manager for a file-like (stream) object to store volume info. |
def _parseKeyName(self, name):
if name.endswith(Store.theInfoExtension):
return {'type': 'info'}
match = self.keyPattern.match(name)
if not match:
return None
match = match.groupdict()
match.update(type='diff')
return match | Returns dict with fullpath, to, from. |
def send(self, diff):
path = self._fullPath(self.extraKeys[diff])
keyName = self._keyName(diff.toUUID, diff.fromUUID, path)
key = self.bucket.get_key(keyName)
if self._skipDryRun(logger)("send %s in %s", keyName, self):
return None
progress = _BotoProgress(diff.size) if self.showProgress is True else None
return _Downloader(key, progress) | Write the diff (toVol from fromVol) to the stream context manager. |
def keep(self, diff):
path = self.extraKeys[diff]
if not path.startswith("/"):
logger.debug("Keeping %s", path)
del self.extraKeys[diff]
return
# Copy into self.userPath, if not there already
keyName = self._keyName(diff.toUUID, diff.fromUUID, path)
newPath = os.path.join(self.userPath, os.path.basename(path))
newName = self._keyName(diff.toUUID, diff.fromUUID, newPath)
if not self._skipDryRun(logger)("Copy %s to %s", keyName, newName):
self.bucket.copy_key(newName, self.bucket.name, keyName) | Mark this diff (or volume) to be kept in path. |
def deleteUnused(self):
(count, size) = (0, 0)
for (diff, path) in self.extraKeys.items():
if path.startswith("/"):
continue
keyName = self._keyName(diff.toUUID, diff.fromUUID, path)
count += 1
size += diff.size
if self._skipDryRun(logger, 'INFO')("Trash: %s", diff):
continue
try:
self.bucket.copy_key(theTrashPrefix + keyName, self.bucket.name, keyName)
self.bucket.delete_key(keyName)
except boto.exception.S3ResponseError as error:
logger.error("%s: %s", error.code, error.message)
try:
keyName = os.path.dirname(keyName) + Store.theInfoExtension
self.bucket.copy_key(theTrashPrefix + keyName, self.bucket.name, keyName)
self.bucket.delete_key(keyName)
except boto.exception.S3ResponseError as error:
logger.debug("%s: %s", error.code, error.message)
logger.info("Trashed %d diffs (%s)", count, humanize(size)) | Delete any old snapshots in path, if not kept. |
def main():
args = command.parse_args()
with btrfs.FileSystem(args.dir) as mount:
# mount.rescanSizes()
fInfo = mount.FS_INFO()
pprint.pprint(fInfo)
vols = mount.subvolumes
# for dev in mount.devices:
# pprint.pprint(dev)
for vol in vols:
print(vol)
return 0 | Main program. |
def humanize(number):
# units = ('bytes', 'KB', 'MB', 'GB', 'TB')
# base = 1000
units = ('bytes', 'KiB', 'MiB', 'GiB', 'TiB')
base = 1024
if number is None:
return None
pow = int(math.log(number, base)) if number > 0 else 0
pow = min(pow, len(units) - 1)
mantissa = number / (base ** pow)
return "%.4g %s" % (mantissa, units[pow]) | Return a human-readable string for number. |
def receive(self, path, diff, showProgress=True):
directory = os.path.dirname(path)
cmd = ["btrfs", "receive", "-e", directory]
if Store.skipDryRun(logger, self.dryrun)("Command: %s", cmd):
return None
if not os.path.exists(directory):
os.makedirs(directory)
process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=DEVNULL,
)
_makeNice(process)
return _Writer(process, process.stdin, path, diff, showProgress) | Return a context manager for stream that will store a diff. |
def send(self, targetPath, parent, diff, showProgress=True, allowDryRun=True):
if parent is not None:
cmd = ["btrfs", "send", "-p", parent, targetPath]
else:
cmd = ["btrfs", "send", targetPath]
if Store.skipDryRun(logger, self.dryrun and allowDryRun)("Command: %s", cmd):
return None
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=DEVNULL)
_makeNice(process)
return _Reader(process, process.stdout, targetPath, diff, showProgress) | Return context manager for stream to send a (incremental) snapshot. |
def analyze(self, chunkSize, *sinks):
measureSize = False
if self.measureSize:
for sink in sinks:
if sink.isRemote:
measureSize = True
# Use destination (already uploaded) edges first
sinks = list(sinks)
sinks.reverse()
self.dest = sinks[0]
def currentSize():
return sum([
n.diffSize
for n in self.nodes.values()
if n.diff is not None and n.diff.sink != self.dest
])
while True:
self._analyzeDontMeasure(chunkSize, measureSize, *sinks)
if not measureSize:
return
estimatedSize = currentSize()
# logger.info("Measuring any estimated diffs")
for node in self.nodes.values():
edge = node.diff
if edge is not None and edge.sink != self.dest and edge.sizeIsEstimated:
edge.sink.measureSize(edge, chunkSize)
actualSize = currentSize()
logger.info(
"measured size (%s), estimated size (%s)",
humanize(actualSize), humanize(estimatedSize),
)
if actualSize <= 1.2 * estimatedSize:
return | Figure out the best diffs to use to reach all our required volumes. |
def iterDiffs(self):
nodes = self.nodes.values()
nodes.sort(key=lambda node: self._height(node))
for node in nodes:
yield node.diff | Return all diffs used in optimal network. |
def _prune(self):
done = False
while not done:
done = True
for node in [node for node in self.nodes.values() if node.intermediate]:
if not [dep for dep in self.nodes.values() if dep.previous == node.volume]:
# logger.debug("Removing unnecessary node %s", node)
del self.nodes[node.volume]
done = False | Get rid of all intermediate nodes that aren't needed. |
def __compress_attributes(self, dic):
result = {}
for k, v in dic.iteritems():
if isinstance(v, types.ListType) and len(v) == 1:
if k not in ('msExchMailboxSecurityDescriptor', 'msExchSafeSendersHash', 'msExchBlockedSendersHash',
'replicationSignature', 'msExchSafeRecipientsHash', 'sIDHistory',
'msRTCSIP-UserRoutingGroupId', 'mSMQDigests', 'mSMQSignCertificates',
'msExchMasterAccountSid', 'msExchPreviousAccountSid', 'msExchUMPinChecksum',
'userSMIMECertificate', 'userCertificate', 'userCert',
'msExchDisabledArchiveGUID', 'msExchUMPinChecksum', 'msExchUMSpokenName',
'objectSid', 'objectGUID', 'msExchArchiveGUID', 'thumbnailPhoto', 'msExchMailboxGuid'):
try:
result[k] = v[0].decode('utf-8')
except Exception as e:
logging. error("Failed to decode attribute: %s -- %s" % (k, e))
result[k] = v[0]
return result | This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn
t make sense.
:param dic:
:return: |
def _fillVolumesAndPaths(self, paths):
with self.btrfs as mount:
for bv in mount.subvolumes:
if not bv.readOnly:
continue
vol = self._btrfsVol2StoreVol(bv)
if vol is None:
continue
path = bv.fullPath
if path is None:
logger.info("Skipping deleted volume %s", bv.uuid)
continue
relPath = None
for path in bv.linuxPaths:
path = self._relativePath(path)
if path is None:
continue # path is outside store scope
paths[vol].append(path)
infoPath = self._fullPath(path + Store.theInfoExtension)
if os.path.exists(infoPath):
logger.debug("Reading %s", infoPath)
with open(infoPath) as info:
Store.Volume.readInfo(info)
if not path.startswith("/"):
relPath = path
if vol not in paths:
continue
logger.debug("%s", vol.display(sink=self, detail='phrase'))
if vol.uuid in self.butterVolumes:
logger.warn(
"Duplicate effective uuid %s in '%s' and '%s'",
vol.uuid, path, self.butterVolumes[vol.uuid].fullPath
)
self.butterVolumes[vol.uuid] = bv
if relPath is not None:
# vol is inside Store directory
self.extraVolumes[vol] = relPath | Fill in paths.
:arg paths: = { Store.Volume: ["linux path",]} |
def getEdges(self, fromVol):
if fromVol is None:
for toVol in self.paths:
yield Store.Diff(self, toVol, fromVol, toVol.size)
return
if fromVol not in self.paths:
return
fromBVol = self.butterVolumes[fromVol.uuid]
parentUUID = fromBVol.parent_uuid
butterDir = os.path.dirname(fromBVol.fullPath)
vols = [vol for vol in self.butterVolumes.values()
if vol.parent_uuid == parentUUID or
os.path.dirname(vol.fullPath) == butterDir
]
changeRate = self._calcChangeRate(vols)
for toBVol in vols:
if toBVol == fromBVol:
continue
# This gives a conservative estimate of the size of the diff
estimatedSize = self._estimateSize(toBVol, fromBVol, changeRate)
toVol = self._btrfsVol2StoreVol(toBVol)
yield Store.Diff(self, toVol, fromVol, estimatedSize, sizeIsEstimated=True) | Return the edges available from fromVol. |
def receive(self, diff, paths):
if not self.dryrun:
self._fileSystemSync()
path = self.selectReceivePath(paths)
if os.path.exists(path):
raise Exception(
"Path %s exists, can't receive %s" % (path, diff.toUUID)
)
return self.butter.receive(path, diff, self.showProgress is True) | Return Context Manager for a file-like (stream) object to store a diff. |
def receiveVolumeInfo(self, paths):
path = self.selectReceivePath(paths)
path = path + Store.theInfoExtension
if Store.skipDryRun(logger, self.dryrun)("receive info to %s", path):
return None
return open(path, "w") | Return Context Manager for a file-like (stream) object to store volume info. |
def measureSize(self, diff, chunkSize):
self._fileSystemSync()
sendContext = self.butter.send(
self.getSendPath(diff.toVol),
self.getSendPath(diff.fromVol),
diff,
showProgress=self.showProgress is not False,
allowDryRun=False,
)
class _Measure(io.RawIOBase):
def __init__(self, estimatedSize, showProgress):
self.totalSize = None
self.progress = progress.DisplayProgress(estimatedSize) if showProgress else None
def __enter__(self):
self.totalSize = 0
if self.progress:
self.progress.__enter__()
return self
def __exit__(self, exceptionType, exceptionValue, traceback):
if self.progress:
self.progress.__exit__(exceptionType, exceptionValue, traceback)
return False # Don't supress exception
def writable(self):
return True
def write(self, bytes):
self.totalSize += len(bytes)
if self.progress:
self.progress.update(self.totalSize)
logger.info("Measuring %s", diff)
measure = _Measure(diff.size, self.showProgress is not False)
Store.transfer(sendContext, measure, chunkSize)
diff.setSize(measure.totalSize, False)
for path in self.getPaths(diff.toVol):
path = self._fullPath(path) + Store.theInfoExtension
with open(path, "a") as infoFile:
diff.toVol.writeInfoLine(infoFile, diff.fromUUID, measure.totalSize) | Spend some time to get an accurate size. |
def send(self, diff):
if not self.dryrun:
self._fileSystemSync()
return self.butter.send(
self.getSendPath(diff.toVol),
self.getSendPath(diff.fromVol),
diff,
self.showProgress is True,
) | Write the diff (toVol from fromVol) to the stream context manager. |
def keep(self, diff):
self._keepVol(diff.toVol)
self._keepVol(diff.fromVol) | Mark this diff (or volume) to be kept in path. |
def _keepVol(self, vol):
if vol is None:
return
if vol in self.extraVolumes:
del self.extraVolumes[vol]
return
if vol not in self.paths:
raise Exception("%s not in %s" % (vol, self))
paths = [os.path.basename(path) for path in self.paths[vol]]
newPath = self.selectReceivePath(paths)
if self._skipDryRun(logger, 'INFO')("Copy %s to %s", vol, newPath):
return
self.butterVolumes[vol.uuid].copy(newPath) | Mark this volume to be kept in path. |
def deletePartials(self, dryrun=False):
for (vol, path) in self.extraVolumes.items():
if not path.endswith(".part"):
continue
if self._skipDryRun(logger, 'INFO', dryrun=dryrun)("Delete subvolume %s", path):
continue
self.butterVolumes[vol.uuid].destroy() | Delete any old partial uploads/downloads in path. |
def _parseDefinition(typeDef, name, len=1, reader=None, writer=None):
if isinstance(typeDef, Structure):
return (name, typeDef.fmt, typeDef)
if len != 1:
size = struct.calcsize(typeDef)
if typeDef not in "xspP":
typeDef = 's'
typeDef = str(len * size) + typeDef
fmtChar = typeDef[-1:]
if fmtChar == 'x':
typeObj = Structure.skipType
else:
typeObj = _TypeWriter(Structure.defaults[fmtChar], reader, writer)
return (name, typeDef, typeObj) | Return (name, format, type) for field.
type.popValue() and type.yieldArgs() must be implemented. |
def yieldArgs(self, keyArgs):
try:
keyArgs = keyArgs._asdict() if keyArgs else {}
except AttributeError:
pass
logger.debug('Args: %s', keyArgs)
for (name, typeObj) in self._types.items():
logger.debug('Yielding %s: %s', name, typeObj)
for arg in typeObj.yieldArgs(keyArgs.get(name, None)):
yield arg | Take (nested) dict(s) of args to set, and return flat list of args. |
def write(self, keyArgs):
# bytearray doesn't work with fcntl
args = array.array('B', (0,) * self.size)
self._struct.pack_into(args, 0, *list(self.yieldArgs(keyArgs)))
return args | Write specified key arguments into data structure. |
def popValue(self, argList):
# return self._Tuple(*[name for (name, typeObj) in self._types.items()])
return self._Tuple(*[typeObj.popValue(argList) for (name, typeObj) in self._types.items()]) | Take a flat arglist, and pop relevent values and return as a value or tuple. |
def read(self, data, offset=0):
if isinstance(data, Buffer):
return data.read(self)
try:
args = list(self._struct.unpack_from(data, offset))
except TypeError as error:
# Working around struct.unpack_from issue #10212
logger.debug("error: %s", error)
args = list(self._struct.unpack_from(str(bytearray(data)), offset))
args.reverse()
return self.popValue(args) | Read data structure and return (nested) named tuple(s). |
def read(self, structure):
start = self.offset
self.skip(structure.size)
return structure.read(self.buf, start) | Read and advance. |
def readView(self, newLength=None):
if newLength is None:
newLength = self.len
result = self.peekView(newLength)
self.skip(newLength)
return result | Return a view of the next newLength bytes, and skip it. |
def peekView(self, newLength):
# Note: In Python 2.7, memoryviews can't be written to
# by the struct module. (BUG)
return memoryview(self.buf)[self.offset:self.offset + newLength] | Return a view of the next newLength bytes. |
def readBuffer(self, newLength):
result = Buffer(self.buf, self.offset, newLength)
self.skip(newLength)
return result | Read next chunk as another buffer. |
def _IOC(cls, dir, op, structure=None):
control = cls(dir, op, structure)
def do(dev, **args):
return control(dev, **args)
return do | Encode an ioctl id. |
def IOWR(cls, op, structure):
return cls._IOC(READ | WRITE, op, structure) | Returns an ioctl Device method with READ and WRITE arguments. |
def bytes2uuid(b):
if b.strip(chr(0)) == '':
return None
s = b.encode('hex')
return "%s-%s-%s-%s-%s" % (s[0:8], s[8:12], s[12:16], s[16:20], s[20:]) | Return standard human-friendly UUID. |
def _addLink(self, dirTree, dirID, dirSeq, dirPath, name):
logger.debug("Link %d-%d-%d '%s%s'", dirTree, dirID, dirSeq, dirPath, name)
# assert dirTree != 0, (dirTree, dirID, dirSeq, dirPath, name)
assert (dirTree, dirID, dirSeq) not in self.links, (dirTree, dirID, dirSeq)
self.links[(dirTree, dirID, dirSeq)] = (dirPath, name)
assert len(self.links) == 1, self.links # Cannot have multiple hardlinks to a directory
logger.debug("%s", self) | Add tree reference and name. (Hardlink). |
def fullPath(self):
for ((dirTree, dirID, dirSeq), (dirPath, name)) in self.links.items():
try:
path = self.fileSystem.volumes[dirTree].fullPath
if path is not None:
return path + ("/" if path[-1] != "/" else "") + dirPath + name
except Exception:
logging.debug("Haven't imported %d yet", dirTree)
if self.id == BTRFS_FS_TREE_OBJECTID:
return "/"
else:
return None | Return full butter path from butter root. |
def linuxPaths(self):
for ((dirTree, dirID, dirSeq), (dirPath, name)) in self.links.items():
for path in self.fileSystem.volumes[dirTree].linuxPaths:
yield path + "/" + dirPath + name
if self.fullPath in self.fileSystem.mounts:
yield self.fileSystem.mounts[self.fullPath] | Return full paths from linux root.
The first path returned will be the path through the top-most mount.
(Usually the root). |
def destroy(self):
path = next(iter(self.linuxPaths))
directory = _Directory(os.path.dirname(path))
with directory as device:
device.SNAP_DESTROY(name=str(os.path.basename(path)), ) | Delete this subvolume from the filesystem. |
def copy(self, path):
directoryPath = os.path.dirname(path)
if not os.path.exists(directoryPath):
os.makedirs(directoryPath)
logger.debug('Create copy of %s in %s', os.path.basename(path), directoryPath)
with self._snapshot() as source, _Directory(directoryPath) as dest:
dest.SNAP_CREATE_V2(
flags=BTRFS_SUBVOL_RDONLY,
name=str(os.path.basename(path)),
fd=source.fd,
)
with SnapShot(path) as destShot:
flags = destShot.SUBVOL_GETFLAGS()
destShot.SUBVOL_SETFLAGS(flags=flags.flags & ~BTRFS_SUBVOL_RDONLY)
destShot.SET_RECEIVED_SUBVOL(
uuid=self.received_uuid or self.uuid,
stransid=self.sent_gen or self.current_gen,
stime=timeOrNone(self.info.stime) or timeOrNone(self.info.ctime) or 0,
flags=0,
)
destShot.SUBVOL_SETFLAGS(flags=flags.flags) | Make another snapshot of this into dirName. |
def subvolumes(self):
self.SYNC()
self._getDevices()
self._getRoots()
self._getMounts()
self._getUsage()
volumes = self.volumes.values()
volumes.sort(key=(lambda v: v.fullPath))
return volumes | Subvolumes contained in this mount. |
def _rescanSizes(self, force=True):
status = self.QUOTA_CTL(cmd=BTRFS_QUOTA_CTL_ENABLE).status
logger.debug("CTL Status: %s", hex(status))
status = self.QUOTA_RESCAN_STATUS()
logger.debug("RESCAN Status: %s", status)
if not status.flags:
if not force:
return
self.QUOTA_RESCAN()
logger.warn("Waiting for btrfs quota usage scan...")
self.QUOTA_RESCAN_WAIT() | Zero and recalculate quota sizes to subvolume sizes will be correct. |
def TLV_GET(attrs, attrNum, format):
attrView = attrs[attrNum]
if format == 's':
format = str(attrView.len) + format
try:
(result,) = struct.unpack_from(format, attrView.buf, attrView.offset)
except TypeError:
# Working around struct.unpack_from issue #10212
(result,) = struct.unpack_from(format, str(bytearray(attrView.buf)), attrView.offset)
return result | Get a tag-length-value encoded attribute. |
def TLV_PUT(attrs, attrNum, format, value):
attrView = attrs[attrNum]
if format == 's':
format = str(attrView.len) + format
struct.pack_into(format, attrView.buf, attrView.offset, value) | Put a tag-length-value encoded attribute. |
def command(name, mode):
def decorator(fn):
commands[name] = fn.__name__
_Client._addMethod(fn.__name__, name, mode)
return fn
return decorator | Label a method as a command with name. |
def diff(self, diff):
if diff is None:
return None
return dict(
toVol=diff.toUUID,
fromVol=diff.fromUUID,
size=diff.size,
sizeIsEstimated=diff.sizeIsEstimated,
) | Serialize to a dictionary. |
def _fillVolumesAndPaths(self, paths):
for (volDict, volPaths) in self._client.fillVolumesAndPaths():
vol = Store.Volume(**volDict)
paths[vol] = volPaths | Fill in paths.
:arg paths: = { Store.Volume: ["linux path",]} |
def getEdges(self, fromVol):
return [
self.toObj.diff(diff)
for diff in self._client.getEdges(self.toArg.vol(fromVol))
] | Return the edges available from fromVol. |
def measureSize(self, diff, chunkSize):
(toUUID, fromUUID) = self.toArg.diff(diff)
isInteractive = sys.stderr.isatty()
return self.toObj.diff(self._client.measureSize(
toUUID,
fromUUID,
diff.size,
chunkSize,
isInteractive,
)) | Spend some time to get an accurate size. |
def send(self, diff):
if Store.skipDryRun(logger, self.dryrun)("send %s", diff):
return None
(diffTo, diffFrom) = self.toArg.diff(diff)
self._client.send(diffTo, diffFrom)
progress = DisplayProgress(diff.size) if self.showProgress is True else None
return _SSHStream(self._client, progress) | Return Context Manager for a file-like (stream) object to send a diff. |
def receive(self, diff, paths):
path = self.selectReceivePath(paths)
path = self._relativePath(path)
if Store.skipDryRun(logger, self.dryrun)("receive to %s", path):
return None
(diffTo, diffFrom) = self.toArg.diff(diff)
self._client.receive(path, diffTo, diffFrom)
progress = DisplayProgress(diff.size) if self.showProgress is True else None
return _SSHStream(self._client, progress) | Return Context Manager for a file-like (stream) object to store a diff. |
def receiveVolumeInfo(self, paths):
path = self.selectReceivePath(paths)
path = path + Store.theInfoExtension
if Store.skipDryRun(logger, self.dryrun)("receive info to %s", path):
return None
self._client.receiveInfo(path)
return _SSHStream(self._client) | Return Context Manager for a file-like (stream) object to store volume info. |
def keep(self, diff):
(toUUID, fromUUID) = self.toArg.diff(diff)
self._client.keep(toUUID, fromUUID)
logger.debug("Kept %s", diff) | Mark this diff (or volume) to be kept in path. |
def deleteUnused(self):
if self.dryrun:
self._client.listUnused()
else:
self._client.deleteUnused() | Delete any old snapshots in path, if not kept. |
def deletePartials(self):
if self.dryrun:
self._client.listPartials()
else:
self._client.deletePartials() | Delete any old partial uploads/downloads in path. |
def _open(self):
if self._process is not None:
return
cmd = [
'ssh',
self._host,
'sudo',
'buttersink',
'--server',
'--mode',
self._mode,
self._directory
]
logger.debug("Connecting with: %s", cmd)
self._process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stderr=sys.stderr,
# stdout=sys.stdout,
stdout=subprocess.PIPE,
)
version = self.version()
logger.info("Remote version: %s", version) | Open connection to remote host. |
def _close(self):
if self._process is None:
return
self.quit()
self._process.stdin.close()
logger.debug("Waiting for ssh process to finish...")
self._process.wait() # Wait for ssh session to finish.
# self._process.terminate()
# self._process.kill()
self._process = None | Close connection to remote host. |
def run(self):
normalized = os.path.normpath(self.path) + ("/" if self.path.endswith("/") else "")
if self.path != normalized:
sys.stderr.write("Please use full path '%s'" % (normalized,))
return -1
self.butterStore = ButterStore.ButterStore(None, self.path, self.mode, dryrun=False)
# self.butterStore.ignoreExtraVolumes = True
self.toObj = _Arg2Obj(self.butterStore)
self.toDict = _Obj2Dict()
self.running = True
with self.butterStore:
with self:
while self.running:
self._processCommand()
return 0 | Run the server. Returns with system error code. |
def _sendResult(self, result):
# logger.debug("Result: %s", result)
try:
result = json.dumps(result)
except Exception as error:
result = json.dumps(self._errorInfo(command, error))
sys.stdout.write(result)
sys.stdout.write("\n")
sys.stdout.flush() | Send parseable json result of command. |
def version(self):
return dict(
buttersink=theVersion,
btrfs=self.butterStore.butter.btrfsVersion,
linux=platform.platform(),
) | Return kernel and btrfs version. |
def send(self, diffTo, diffFrom):
diff = self.toObj.diff(diffTo, diffFrom)
self._open(self.butterStore.send(diff)) | Do a btrfs send. |
def receive(self, path, diffTo, diffFrom):
diff = self.toObj.diff(diffTo, diffFrom)
self._open(self.butterStore.receive(diff, [path, ])) | Receive a btrfs diff. |
def streamWrite(self, size):
size = int(size)
if size == 0:
self._close()
return
self._sendResult(dict(message="writing...", stream=True, size=size))
data = sys.stdin.read(size)
self.stream.write(data) | Send or receive a chunk of data.
:arg size: Amount of data. 0 indicates EOT. |
def streamRead(self, size):
size = int(size)
data = self.stream.read(size)
size = len(data)
if size == 0:
self._close()
return dict(message="Finished", size=0)
self._sendResult(dict(message="reading...", stream=True, size=size))
sys.stdout.write(data) | Send or receive a chunk of data.
:arg size: Amount of data requested. |
def fillVolumesAndPaths(self):
return [
(self.toDict.vol(vol), paths)
for vol, paths in self.butterStore.paths.items()
] | Get all volumes for initialization. |
def getEdges(self, fromVol):
return [self.toDict.diff(d) for d in self.butterStore.getEdges(self.toObj.vol(fromVol))] | Return the edges available from fromVol. |
def measureSize(self, diffTo, diffFrom, estimatedSize, chunkSize, isInteractive):
diff = self.toObj.diff(diffTo, diffFrom, estimatedSize)
isInteractive = self.toObj.bool(isInteractive)
self.butterStore.showProgress = None if isInteractive else False
self.butterStore.measureSize(diff, int(chunkSize))
return self.toDict.diff(diff) | Spend some time to get an accurate size. |
def keep(self, diffTo, diffFrom):
diff = self.toObj.diff(diffTo, diffFrom)
self.butterStore.keep(diff) | Mark this diff (or volume) to be kept in path. |
def load_lists(keys=[], values=[], name='NT'):
mapping = dict(zip(keys, values))
return mapper(mapping, _nt_name=name) | Map namedtuples given a pair of key, value lists. |
def load_json(data=None, path=None, name='NT'):
if data and not path:
return mapper(json.loads(data), _nt_name=name)
if path and not data:
return mapper(json.load(path), _nt_name=name)
if data and path:
raise ValueError('expected one source and received two') | Map namedtuples with json data. |
def load_yaml(data=None, path=None, name='NT'):
if data and not path:
return mapper(yaml.load(data), _nt_name=name)
if path and not data:
with open(path, 'r') as f:
data = yaml.load(f)
return mapper(data, _nt_name=name)
if data and path:
raise ValueError('expected one source and received two') | Map namedtuples with yaml data. |
def load_env(keys=[], name='NT', use_getpass=False):
NT = namedtuple(name, keys)
if use_getpass:
values = [os.getenv(x) or getpass.getpass(x) for x in keys]
else:
values = [os.getenv(x) or input(x) for x in keys]
return NT(*values) | Returns a namedtuple from a list of environment variables.
If not found in shell, gets input with *input* or *getpass*. |
def mapper(mapping, _nt_name='NT'):
if isinstance(mapping, Mapping) and not isinstance(mapping, AsDict):
for key, value in list(mapping.items()):
mapping[key] = mapper(value)
return namedtuple_wrapper(_nt_name, **mapping)
elif isinstance(mapping, list):
return [mapper(item) for item in mapping]
return mapping | Convert mappings to namedtuples recursively. |
def ignore(mapping):
if isinstance(mapping, Mapping):
return AsDict(mapping)
elif isinstance(mapping, list):
return [ignore(item) for item in mapping]
return mapping | Use ignore to prevent a mapping from being mapped to a namedtuple. |
def ensure_dir(dir_path):
exists = dir_exists(dir_path)
if not exists:
try:
os.makedirs(dir_path)
except(Exception,RuntimeError), e:
raise Exception("Unable to create directory %s. Cause %s" %
(dir_path, e))
return exists | If DIR_PATH does not exist, makes it. Failing that, raises Exception.
Returns True if dir already existed; False if it had to be made. |
def is_same_host(host1, host2):
try:
if host1 == host2:
return True
else:
ips1 = get_host_ips(host1)
ips2 = get_host_ips(host2)
return len(set(ips1) & set(ips2)) > 0
except Exception, ex:
log_exception(ex)
return False | Returns true if host1 == host2 OR map to the same host (using DNS) |
def is_same_address(addr1, addr2):
hostport1 = addr1.split(":")
hostport2 = addr2.split(":")
return (is_same_host(hostport1[0], hostport2[0]) and
hostport1[1] == hostport2[1]) | Where the two addresses are in the host:port
Returns true if ports are equals and hosts are the same using is_same_host |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.