Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def _hierarchy_bounds(intervals_hier):
'''Compute the covered time range of a hierarchical segmentation.
Parameters
----------
intervals_hier : list of ndarray
A hierarchical segmentation, encoded as a list of arrays of segment
intervals.
Returns
-------
t_min : float
t_max : float
The minimum and maximum times spanned by the annotation
'''
boundaries = list(itertools.chain(*list(itertools.chain(*intervals_hier))))
return min(boundaries), max(boundaries) | [] |
Please provide a description of the function:def _align_intervals(int_hier, lab_hier, t_min=0.0, t_max=None):
'''Align a hierarchical annotation to span a fixed start and end time.
Parameters
----------
int_hier : list of list of intervals
lab_hier : list of list of str
Hierarchical segment annotations, encoded as a
list of list of intervals (int_hier) and list of
list of strings (lab_hier)
t_min : None or number >= 0
The minimum time value for the segmentation
t_max : None or number >= t_min
The maximum time value for the segmentation
Returns
-------
intervals_hier : list of list of intervals
labels_hier : list of list of str
`int_hier` `lab_hier` aligned to span `[t_min, t_max]`.
'''
return [list(_) for _ in zip(*[util.adjust_intervals(np.asarray(ival),
labels=lab,
t_min=t_min,
t_max=t_max)
for ival, lab in zip(int_hier, lab_hier)])] | [] |
Please provide a description of the function:def _lca(intervals_hier, frame_size):
'''Compute the (sparse) least-common-ancestor (LCA) matrix for a
hierarchical segmentation.
For any pair of frames ``(s, t)``, the LCA is the deepest level in
the hierarchy such that ``(s, t)`` are contained within a single
segment at that level.
Parameters
----------
intervals_hier : list of ndarray
An ordered list of segment interval arrays.
The list is assumed to be ordered by increasing specificity (depth).
frame_size : number
The length of the sample frames (in seconds)
Returns
-------
lca_matrix : scipy.sparse.csr_matrix
A sparse matrix such that ``lca_matrix[i, j]`` contains the depth
of the deepest segment containing frames ``i`` and ``j``.
'''
frame_size = float(frame_size)
# Figure out how many frames we need
n_start, n_end = _hierarchy_bounds(intervals_hier)
n = int((_round(n_end, frame_size) -
_round(n_start, frame_size)) / frame_size)
# Initialize the LCA matrix
lca_matrix = scipy.sparse.lil_matrix((n, n), dtype=np.uint8)
for level, intervals in enumerate(intervals_hier, 1):
for ival in (_round(np.asarray(intervals),
frame_size) / frame_size).astype(int):
idx = slice(ival[0], ival[1])
lca_matrix[idx, idx] = level
return lca_matrix.tocsr() | [] |
Please provide a description of the function:def _meet(intervals_hier, labels_hier, frame_size):
'''Compute the (sparse) least-common-ancestor (LCA) matrix for a
hierarchical segmentation.
For any pair of frames ``(s, t)``, the LCA is the deepest level in
the hierarchy such that ``(s, t)`` are contained within a single
segment at that level.
Parameters
----------
intervals_hier : list of ndarray
An ordered list of segment interval arrays.
The list is assumed to be ordered by increasing specificity (depth).
labels_hier : list of list of str
``labels_hier[i]`` contains the segment labels for the
``i``th layer of the annotations
frame_size : number
The length of the sample frames (in seconds)
Returns
-------
meet_matrix : scipy.sparse.csr_matrix
A sparse matrix such that ``meet_matrix[i, j]`` contains the depth
of the deepest segment label containing both ``i`` and ``j``.
'''
frame_size = float(frame_size)
# Figure out how many frames we need
n_start, n_end = _hierarchy_bounds(intervals_hier)
n = int((_round(n_end, frame_size) -
_round(n_start, frame_size)) / frame_size)
# Initialize the meet matrix
meet_matrix = scipy.sparse.lil_matrix((n, n), dtype=np.uint8)
for level, (intervals, labels) in enumerate(zip(intervals_hier,
labels_hier), 1):
# Encode the labels at this level
lab_enc = util.index_labels(labels)[0]
# Find unique agreements
int_agree = np.triu(np.equal.outer(lab_enc, lab_enc))
# Map intervals to frame indices
int_frames = (_round(intervals, frame_size) / frame_size).astype(int)
# For each intervals i, j where labels agree, update the meet matrix
for (seg_i, seg_j) in zip(*np.where(int_agree)):
idx_i = slice(*list(int_frames[seg_i]))
idx_j = slice(*list(int_frames[seg_j]))
meet_matrix[idx_i, idx_j] = level
if seg_i != seg_j:
meet_matrix[idx_j, idx_i] = level
return scipy.sparse.csr_matrix(meet_matrix) | [] |
Please provide a description of the function:def _gauc(ref_lca, est_lca, transitive, window):
'''Generalized area under the curve (GAUC)
This function computes the normalized recall score for correctly
ordering triples ``(q, i, j)`` where frames ``(q, i)`` are closer than
``(q, j)`` in the reference annotation.
Parameters
----------
ref_lca : scipy.sparse
est_lca : scipy.sparse
The least common ancestor matrices for the reference and
estimated annotations
transitive : bool
If True, then transitive comparisons are counted, meaning that
``(q, i)`` and ``(q, j)`` can differ by any number of levels.
If False, then ``(q, i)`` and ``(q, j)`` can differ by exactly one
level.
window : number or None
The maximum number of frames to consider for each query.
If `None`, then all frames are considered.
Returns
-------
score : number [0, 1]
The percentage of reference triples correctly ordered by
the estimation.
Raises
------
ValueError
If ``ref_lca`` and ``est_lca`` have different shapes
'''
# Make sure we have the right number of frames
if ref_lca.shape != est_lca.shape:
raise ValueError('Estimated and reference hierarchies '
'must have the same shape.')
# How many frames?
n = ref_lca.shape[0]
# By default, the window covers the entire track
if window is None:
window = n
# Initialize the score
score = 0.0
# Iterate over query frames
num_frames = 0
for query in range(n):
# Find all pairs i,j such that ref_lca[q, i] > ref_lca[q, j]
results = slice(max(0, query - window), min(n, query + window))
ref_score = ref_lca[query, results]
est_score = est_lca[query, results]
# Densify the results
ref_score = ref_score.toarray().squeeze()
est_score = est_score.toarray().squeeze()
# Don't count the query as a result
# when query < window, query itself is the index within the slice
# otherwise, query is located at the center of the slice, window
# (this also holds when the slice goes off the end of the array.)
idx = min(query, window)
ref_score = np.concatenate((ref_score[:idx], ref_score[idx+1:]))
est_score = np.concatenate((est_score[:idx], est_score[idx+1:]))
inversions, normalizer = _compare_frame_rankings(ref_score, est_score,
transitive=transitive)
if normalizer:
score += 1.0 - inversions / float(normalizer)
num_frames += 1
# Normalize by the number of frames counted.
# If no frames are counted, take the convention 0/0 -> 0
if num_frames:
score /= float(num_frames)
else:
score = 0.0
return score | [] |
Please provide a description of the function:def _count_inversions(a, b):
'''Count the number of inversions in two numpy arrays:
# points i, j where a[i] >= b[j]
Parameters
----------
a, b : np.ndarray, shape=(n,) (m,)
The arrays to be compared.
This implementation is optimized for arrays with many
repeated values.
Returns
-------
inversions : int
The number of detected inversions
'''
a, a_counts = np.unique(a, return_counts=True)
b, b_counts = np.unique(b, return_counts=True)
inversions = 0
i = 0
j = 0
while i < len(a) and j < len(b):
if a[i] < b[j]:
i += 1
elif a[i] >= b[j]:
inversions += np.sum(a_counts[i:]) * b_counts[j]
j += 1
return inversions | [] |
Please provide a description of the function:def _compare_frame_rankings(ref, est, transitive=False):
'''Compute the number of ranking disagreements in two lists.
Parameters
----------
ref : np.ndarray, shape=(n,)
est : np.ndarray, shape=(n,)
Reference and estimate ranked lists.
`ref[i]` is the relevance score for point `i`.
transitive : bool
If true, all pairs of reference levels are compared.
If false, only adjacent pairs of reference levels are compared.
Returns
-------
inversions : int
The number of pairs of indices `i, j` where
`ref[i] < ref[j]` but `est[i] >= est[j]`.
normalizer : float
The total number of pairs (i, j) under consideration.
If transitive=True, then this is |{(i,j) : ref[i] < ref[j]}|
If transitive=False, then this is |{i,j) : ref[i] +1 = ref[j]}|
'''
idx = np.argsort(ref)
ref_sorted = ref[idx]
est_sorted = est[idx]
# Find the break-points in ref_sorted
levels, positions, counts = np.unique(ref_sorted,
return_index=True,
return_counts=True)
positions = list(positions)
positions.append(len(ref_sorted))
index = collections.defaultdict(lambda: slice(0))
ref_map = collections.defaultdict(lambda: 0)
for level, cnt, start, end in zip(levels, counts,
positions[:-1], positions[1:]):
index[level] = slice(start, end)
ref_map[level] = cnt
# Now that we have values sorted, apply the inversion-counter to
# pairs of reference values
if transitive:
level_pairs = itertools.combinations(levels, 2)
else:
level_pairs = [(i, i+1) for i in levels]
level_pairs, lcounter = itertools.tee(level_pairs)
normalizer = float(sum([ref_map[i] * ref_map[j] for (i, j) in lcounter]))
if normalizer == 0:
return 0, 0.0
inversions = 0
for level_1, level_2 in level_pairs:
inversions += _count_inversions(est_sorted[index[level_1]],
est_sorted[index[level_2]])
return inversions, float(normalizer) | [] |
Please provide a description of the function:def validate_hier_intervals(intervals_hier):
'''Validate a hierarchical segment annotation.
Parameters
----------
intervals_hier : ordered list of segmentations
Raises
------
ValueError
If any segmentation does not span the full duration of the top-level
segmentation.
If any segmentation does not start at 0.
'''
# Synthesize a label array for the top layer.
label_top = util.generate_labels(intervals_hier[0])
boundaries = set(util.intervals_to_boundaries(intervals_hier[0]))
for level, intervals in enumerate(intervals_hier[1:], 1):
# Make sure this level is consistent with the root
label_current = util.generate_labels(intervals)
validate_structure(intervals_hier[0], label_top,
intervals, label_current)
# Make sure all previous boundaries are accounted for
new_bounds = set(util.intervals_to_boundaries(intervals))
if boundaries - new_bounds:
warnings.warn('Segment hierarchy is inconsistent '
'at level {:d}'.format(level))
boundaries |= new_bounds | [] |
Please provide a description of the function:def tmeasure(reference_intervals_hier, estimated_intervals_hier,
transitive=False, window=15.0, frame_size=0.1, beta=1.0):
'''Computes the tree measures for hierarchical segment annotations.
Parameters
----------
reference_intervals_hier : list of ndarray
``reference_intervals_hier[i]`` contains the segment intervals
(in seconds) for the ``i`` th layer of the annotations. Layers are
ordered from top to bottom, so that the last list of intervals should
be the most specific.
estimated_intervals_hier : list of ndarray
Like ``reference_intervals_hier`` but for the estimated annotation
transitive : bool
whether to compute the t-measures using transitivity or not.
window : float > 0
size of the window (in seconds). For each query frame q,
result frames are only counted within q +- window.
frame_size : float > 0
length (in seconds) of frames. The frame size cannot be longer than
the window.
beta : float > 0
beta parameter for the F-measure.
Returns
-------
t_precision : number [0, 1]
T-measure Precision
t_recall : number [0, 1]
T-measure Recall
t_measure : number [0, 1]
F-beta measure for ``(t_precision, t_recall)``
Raises
------
ValueError
If either of the input hierarchies are inconsistent
If the input hierarchies have different time durations
If ``frame_size > window`` or ``frame_size <= 0``
'''
# Compute the number of frames in the window
if frame_size <= 0:
raise ValueError('frame_size ({:.2f}) must be a positive '
'number.'.format(frame_size))
if window is None:
window_frames = None
else:
if frame_size > window:
raise ValueError('frame_size ({:.2f}) cannot exceed '
'window ({:.2f})'.format(frame_size, window))
window_frames = int(_round(window, frame_size) / frame_size)
# Validate the hierarchical segmentations
validate_hier_intervals(reference_intervals_hier)
validate_hier_intervals(estimated_intervals_hier)
# Build the least common ancestor matrices
ref_lca = _lca(reference_intervals_hier, frame_size)
est_lca = _lca(estimated_intervals_hier, frame_size)
# Compute precision and recall
t_recall = _gauc(ref_lca, est_lca, transitive, window_frames)
t_precision = _gauc(est_lca, ref_lca, transitive, window_frames)
t_measure = util.f_measure(t_precision, t_recall, beta=beta)
return t_precision, t_recall, t_measure | [] |
Please provide a description of the function:def lmeasure(reference_intervals_hier, reference_labels_hier,
estimated_intervals_hier, estimated_labels_hier,
frame_size=0.1, beta=1.0):
'''Computes the tree measures for hierarchical segment annotations.
Parameters
----------
reference_intervals_hier : list of ndarray
``reference_intervals_hier[i]`` contains the segment intervals
(in seconds) for the ``i`` th layer of the annotations. Layers are
ordered from top to bottom, so that the last list of intervals should
be the most specific.
reference_labels_hier : list of list of str
``reference_labels_hier[i]`` contains the segment labels for the
``i``th layer of the annotations
estimated_intervals_hier : list of ndarray
estimated_labels_hier : list of ndarray
Like ``reference_intervals_hier`` and ``reference_labels_hier``
but for the estimated annotation
frame_size : float > 0
length (in seconds) of frames. The frame size cannot be longer than
the window.
beta : float > 0
beta parameter for the F-measure.
Returns
-------
l_precision : number [0, 1]
L-measure Precision
l_recall : number [0, 1]
L-measure Recall
l_measure : number [0, 1]
F-beta measure for ``(l_precision, l_recall)``
Raises
------
ValueError
If either of the input hierarchies are inconsistent
If the input hierarchies have different time durations
If ``frame_size > window`` or ``frame_size <= 0``
'''
# Compute the number of frames in the window
if frame_size <= 0:
raise ValueError('frame_size ({:.2f}) must be a positive '
'number.'.format(frame_size))
# Validate the hierarchical segmentations
validate_hier_intervals(reference_intervals_hier)
validate_hier_intervals(estimated_intervals_hier)
# Build the least common ancestor matrices
ref_meet = _meet(reference_intervals_hier, reference_labels_hier,
frame_size)
est_meet = _meet(estimated_intervals_hier, estimated_labels_hier,
frame_size)
# Compute precision and recall
l_recall = _gauc(ref_meet, est_meet, True, None)
l_precision = _gauc(est_meet, ref_meet, True, None)
l_measure = util.f_measure(l_precision, l_recall, beta=beta)
return l_precision, l_recall, l_measure | [] |
Please provide a description of the function:def evaluate(ref_intervals_hier, ref_labels_hier,
est_intervals_hier, est_labels_hier, **kwargs):
'''Compute all hierarchical structure metrics for the given reference and
estimated annotations.
Examples
--------
A toy example with two two-layer annotations
>>> ref_i = [[[0, 30], [30, 60]], [[0, 15], [15, 30], [30, 45], [45, 60]]]
>>> est_i = [[[0, 45], [45, 60]], [[0, 15], [15, 30], [30, 45], [45, 60]]]
>>> ref_l = [ ['A', 'B'], ['a', 'b', 'a', 'c'] ]
>>> est_l = [ ['A', 'B'], ['a', 'a', 'b', 'b'] ]
>>> scores = mir_eval.hierarchy.evaluate(ref_i, ref_l, est_i, est_l)
>>> dict(scores)
{'T-Measure full': 0.94822745804853459,
'T-Measure reduced': 0.8732458222764804,
'T-Precision full': 0.96569179094693058,
'T-Precision reduced': 0.89939075137018787,
'T-Recall full': 0.93138358189386117,
'T-Recall reduced': 0.84857799953694923}
A more realistic example, using SALAMI pre-parsed annotations
>>> def load_salami(filename):
... "load SALAMI event format as labeled intervals"
... events, labels = mir_eval.io.load_labeled_events(filename)
... intervals = mir_eval.util.boundaries_to_intervals(events)[0]
... return intervals, labels[:len(intervals)]
>>> ref_files = ['data/10/parsed/textfile1_uppercase.txt',
... 'data/10/parsed/textfile1_lowercase.txt']
>>> est_files = ['data/10/parsed/textfile2_uppercase.txt',
... 'data/10/parsed/textfile2_lowercase.txt']
>>> ref = [load_salami(fname) for fname in ref_files]
>>> ref_int = [seg[0] for seg in ref]
>>> ref_lab = [seg[1] for seg in ref]
>>> est = [load_salami(fname) for fname in est_files]
>>> est_int = [seg[0] for seg in est]
>>> est_lab = [seg[1] for seg in est]
>>> scores = mir_eval.hierarchy.evaluate(ref_int, ref_lab,
... est_hier, est_lab)
>>> dict(scores)
{'T-Measure full': 0.66029225561405358,
'T-Measure reduced': 0.62001868041578034,
'T-Precision full': 0.66844764668949885,
'T-Precision reduced': 0.63252297209957919,
'T-Recall full': 0.6523334654992341,
'T-Recall reduced': 0.60799919710921635}
Parameters
----------
ref_intervals_hier : list of list-like
ref_labels_hier : list of list of str
est_intervals_hier : list of list-like
est_labels_hier : list of list of str
Hierarchical annotations are encoded as an ordered list
of segmentations. Each segmentation itself is a list (or list-like)
of intervals (\*_intervals_hier) and a list of lists of labels
(\*_labels_hier).
kwargs
additional keyword arguments to the evaluation metrics.
Returns
-------
scores : OrderedDict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
T-measures are computed in both the "full" (``transitive=True``) and
"reduced" (``transitive=False``) modes.
Raises
------
ValueError
Thrown when the provided annotations are not valid.
'''
# First, find the maximum length of the reference
_, t_end = _hierarchy_bounds(ref_intervals_hier)
# Pre-process the intervals to match the range of the reference,
# and start at 0
ref_intervals_hier, ref_labels_hier = _align_intervals(ref_intervals_hier,
ref_labels_hier,
t_min=0.0,
t_max=None)
est_intervals_hier, est_labels_hier = _align_intervals(est_intervals_hier,
est_labels_hier,
t_min=0.0,
t_max=t_end)
scores = collections.OrderedDict()
# Force the transitivity setting
kwargs['transitive'] = False
(scores['T-Precision reduced'],
scores['T-Recall reduced'],
scores['T-Measure reduced']) = util.filter_kwargs(tmeasure,
ref_intervals_hier,
est_intervals_hier,
**kwargs)
kwargs['transitive'] = True
(scores['T-Precision full'],
scores['T-Recall full'],
scores['T-Measure full']) = util.filter_kwargs(tmeasure,
ref_intervals_hier,
est_intervals_hier,
**kwargs)
(scores['L-Precision'],
scores['L-Recall'],
scores['L-Measure']) = util.filter_kwargs(lmeasure,
ref_intervals_hier,
ref_labels_hier,
est_intervals_hier,
est_labels_hier,
**kwargs)
return scores | [] |
Please provide a description of the function:def __expand_limits(ax, limits, which='x'):
'''Helper function to expand axis limits'''
if which == 'x':
getter, setter = ax.get_xlim, ax.set_xlim
elif which == 'y':
getter, setter = ax.get_ylim, ax.set_ylim
else:
raise ValueError('invalid axis: {}'.format(which))
old_lims = getter()
new_lims = list(limits)
# infinite limits occur on new axis objects with no data
if np.isfinite(old_lims[0]):
new_lims[0] = min(old_lims[0], limits[0])
if np.isfinite(old_lims[1]):
new_lims[1] = max(old_lims[1], limits[1])
setter(new_lims) | [] |
Please provide a description of the function:def __get_axes(ax=None, fig=None):
'''Get or construct the target axes object for a new plot.
Parameters
----------
ax : matplotlib.pyplot.axes, optional
If provided, return this axes object directly.
fig : matplotlib.figure.Figure, optional
The figure to query for axes.
By default, uses the current figure `plt.gcf()`.
Returns
-------
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
new_axes : bool
If `True`, the axis object was newly constructed.
If `False`, the axis object already existed.
'''
new_axes = False
if ax is not None:
return ax, new_axes
if fig is None:
import matplotlib.pyplot as plt
fig = plt.gcf()
if not fig.get_axes():
new_axes = True
return fig.gca(), new_axes | [] |
Please provide a description of the function:def segments(intervals, labels, base=None, height=None, text=False,
text_kw=None, ax=None, **kwargs):
'''Plot a segmentation as a set of disjoint rectangles.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
base : number
The vertical position of the base of the rectangles.
By default, this will be the bottom of the plot.
height : number
The height of the rectangles.
By default, this will be the top of the plot (minus ``base``).
text : bool
If true, each segment's label is displayed in its
upper-left corner
text_kw : dict
If ``text == True``, the properties of the text
object can be specified here.
See ``matplotlib.pyplot.Text`` for valid parameters
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to pass to
``matplotlib.patches.Rectangle``.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
if text_kw is None:
text_kw = dict()
text_kw.setdefault('va', 'top')
text_kw.setdefault('clip_on', True)
text_kw.setdefault('bbox', dict(boxstyle='round', facecolor='white'))
# Make sure we have a numpy array
intervals = np.atleast_2d(intervals)
seg_def_style = dict(linewidth=1)
ax, new_axes = __get_axes(ax=ax)
if new_axes:
ax.set_ylim([0, 1])
# Infer height
if base is None:
base = ax.get_ylim()[0]
if height is None:
height = ax.get_ylim()[1]
cycler = ax._get_patches_for_fill.prop_cycler
seg_map = dict()
for lab in labels:
if lab in seg_map:
continue
style = next(cycler)
seg_map[lab] = seg_def_style.copy()
seg_map[lab].update(style)
# Swap color -> facecolor here so we preserve edgecolor on rects
seg_map[lab]['facecolor'] = seg_map[lab].pop('color')
seg_map[lab].update(kwargs)
seg_map[lab]['label'] = lab
for ival, lab in zip(intervals, labels):
rect = Rectangle((ival[0], base), ival[1] - ival[0], height,
**seg_map[lab])
ax.add_patch(rect)
seg_map[lab].pop('label', None)
if text:
ann = ax.annotate(lab,
xy=(ival[0], height), xycoords='data',
xytext=(8, -10), textcoords='offset points',
**text_kw)
ann.set_clip_path(rect)
if new_axes:
ax.set_yticks([])
# Only expand if we have data
if intervals.size:
__expand_limits(ax, [intervals.min(), intervals.max()], which='x')
return ax | [] |
Please provide a description of the function:def labeled_intervals(intervals, labels, label_set=None,
base=None, height=None, extend_labels=True,
ax=None, tick=True, **kwargs):
'''Plot labeled intervals with each label on its own row.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
label_set : list
An (ordered) list of labels to determine the plotting order.
If not provided, the labels will be inferred from
``ax.get_yticklabels()``.
If no ``yticklabels`` exist, then the sorted set of unique values
in ``labels`` is taken as the label set.
base : np.ndarray, shape=(n,), optional
Vertical positions of each label.
By default, labels are positioned at integers
``np.arange(len(labels))``.
height : scalar or np.ndarray, shape=(n,), optional
Height for each label.
If scalar, the same value is applied to all labels.
By default, each label has ``height=1``.
extend_labels : bool
If ``False``, only values of ``labels`` that also exist in
``label_set`` will be shown.
If ``True``, all labels are shown, with those in `labels` but
not in `label_set` appended to the top of the plot.
A horizontal line is drawn to indicate the separation between
values in or out of ``label_set``.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the intervals.
If none is provided, a new set of axes is created.
tick : bool
If ``True``, sets tick positions and labels on the y-axis.
kwargs
Additional keyword arguments to pass to
`matplotlib.collection.BrokenBarHCollection`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
# Get the axes handle
ax, _ = __get_axes(ax=ax)
# Make sure we have a numpy array
intervals = np.atleast_2d(intervals)
if label_set is None:
# If we have non-empty pre-existing tick labels, use them
label_set = [_.get_text() for _ in ax.get_yticklabels()]
# If none of the label strings have content, treat it as empty
if not any(label_set):
label_set = []
else:
label_set = list(label_set)
# Put additional labels at the end, in order
if extend_labels:
ticks = label_set + sorted(set(labels) - set(label_set))
elif label_set:
ticks = label_set
else:
ticks = sorted(set(labels))
style = dict(linewidth=1)
style.update(next(ax._get_patches_for_fill.prop_cycler))
# Swap color -> facecolor here so we preserve edgecolor on rects
style['facecolor'] = style.pop('color')
style.update(kwargs)
if base is None:
base = np.arange(len(ticks))
if height is None:
height = 1
if np.isscalar(height):
height = height * np.ones_like(base)
seg_y = dict()
for ybase, yheight, lab in zip(base, height, ticks):
seg_y[lab] = (ybase, yheight)
xvals = defaultdict(list)
for ival, lab in zip(intervals, labels):
if lab not in seg_y:
continue
xvals[lab].append((ival[0], ival[1] - ival[0]))
for lab in seg_y:
ax.add_collection(BrokenBarHCollection(xvals[lab], seg_y[lab],
**style))
# Pop the label after the first time we see it, so we only get
# one legend entry
style.pop('label', None)
# Draw a line separating the new labels from pre-existing labels
if label_set != ticks:
ax.axhline(len(label_set), color='k', alpha=0.5)
if tick:
ax.grid(True, axis='y')
ax.set_yticks([])
ax.set_yticks(base)
ax.set_yticklabels(ticks, va='bottom')
ax.yaxis.set_major_formatter(IntervalFormatter(base, ticks))
if base.size:
__expand_limits(ax, [base.min(), (base + height).max()], which='y')
if intervals.size:
__expand_limits(ax, [intervals.min(), intervals.max()], which='x')
return ax | [] |
Please provide a description of the function:def hierarchy(intervals_hier, labels_hier, levels=None, ax=None, **kwargs):
'''Plot a hierarchical segmentation
Parameters
----------
intervals_hier : list of np.ndarray
A list of segmentation intervals. Each element should be
an n-by-2 array of segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
Segmentations should be ordered by increasing specificity.
labels_hier : list of list-like
A list of segmentation labels. Each element should
be a list of labels for the corresponding element in
`intervals_hier`.
levels : list of string
Each element ``levels[i]`` is a label for the ```i`` th segmentation.
This is used in the legend to denote the levels in a segment hierarchy.
kwargs
Additional keyword arguments to `labeled_intervals`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
# This will break if a segment label exists in multiple levels
if levels is None:
levels = list(range(len(intervals_hier)))
# Get the axes handle
ax, _ = __get_axes(ax=ax)
# Count the pre-existing patches
n_patches = len(ax.patches)
for ints, labs, key in zip(intervals_hier[::-1],
labels_hier[::-1],
levels[::-1]):
labeled_intervals(ints, labs, label=key, ax=ax, **kwargs)
# Reverse the patch ordering for anything we've added.
# This way, intervals are listed in the legend from top to bottom
ax.patches[n_patches:] = ax.patches[n_patches:][::-1]
return ax | [] |
Please provide a description of the function:def events(times, labels=None, base=None, height=None, ax=None, text_kw=None,
**kwargs):
'''Plot event times as a set of vertical lines
Parameters
----------
times : np.ndarray, shape=(n,)
event times, in the format returned by
:func:`mir_eval.io.load_events` or
:func:`mir_eval.io.load_labeled_events`.
labels : list, shape=(n,), optional
event labels, in the format returned by
:func:`mir_eval.io.load_labeled_events`.
base : number
The vertical position of the base of the line.
By default, this will be the bottom of the plot.
height : number
The height of the lines.
By default, this will be the top of the plot (minus `base`).
ax : matplotlib.pyplot.axes
An axis handle on which to draw the segmentation.
If none is provided, a new set of axes is created.
text_kw : dict
If `labels` is provided, the properties of the text
objects can be specified here.
See `matplotlib.pyplot.Text` for valid parameters
kwargs
Additional keyword arguments to pass to
`matplotlib.pyplot.vlines`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
if text_kw is None:
text_kw = dict()
text_kw.setdefault('va', 'top')
text_kw.setdefault('clip_on', True)
text_kw.setdefault('bbox', dict(boxstyle='round', facecolor='white'))
# make sure we have an array for times
times = np.asarray(times)
# Get the axes handle
ax, new_axes = __get_axes(ax=ax)
# If we have fresh axes, set the limits
if new_axes:
# Infer base and height
if base is None:
base = 0
if height is None:
height = 1
ax.set_ylim([base, height])
else:
if base is None:
base = ax.get_ylim()[0]
if height is None:
height = ax.get_ylim()[1]
cycler = ax._get_patches_for_fill.prop_cycler
style = next(cycler).copy()
style.update(kwargs)
# If the user provided 'colors', don't override it with 'color'
if 'colors' in style:
style.pop('color', None)
lines = ax.vlines(times, base, base + height, **style)
if labels:
for path, lab in zip(lines.get_paths(), labels):
ax.annotate(lab,
xy=(path.vertices[0][0], height),
xycoords='data',
xytext=(8, -10), textcoords='offset points',
**text_kw)
if new_axes:
ax.set_yticks([])
__expand_limits(ax, [base, base + height], which='y')
if times.size:
__expand_limits(ax, [times.min(), times.max()], which='x')
return ax | [] |
Please provide a description of the function:def pitch(times, frequencies, midi=False, unvoiced=False, ax=None, **kwargs):
'''Visualize pitch contours
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : np.ndarray, shape=(n,)
frequencies (in Hz) of the pitch contours.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitch contours are plotted and indicated
by transparency.
Otherwise, unvoiced pitch contours are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `matplotlib.pyplot.plot`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
ax, _ = __get_axes(ax=ax)
times = np.asarray(times)
# First, segment into contiguously voiced contours
frequencies, voicings = freq_to_voicing(np.asarray(frequencies,
dtype=np.float))
# Here are all the change-points
v_changes = 1 + np.flatnonzero(voicings[1:] != voicings[:-1])
v_changes = np.unique(np.concatenate([[0], v_changes, [len(voicings)]]))
# Set up arrays of slices for voiced and unvoiced regions
v_slices, u_slices = [], []
for start, end in zip(v_changes, v_changes[1:]):
idx = slice(start, end)
# A region is voiced if its starting sample is voiced
# It's unvoiced if none of the samples in the region are voiced.
if voicings[start]:
v_slices.append(idx)
elif frequencies[idx].all():
u_slices.append(idx)
# Now we just need to plot the contour
style = dict()
style.update(next(ax._get_lines.prop_cycler))
style.update(kwargs)
if midi:
idx = frequencies > 0
frequencies[idx] = hz_to_midi(frequencies[idx])
# Tick at integer midi notes
ax.yaxis.set_minor_locator(MultipleLocator(1))
for idx in v_slices:
ax.plot(times[idx], frequencies[idx], **style)
style.pop('label', None)
# Plot the unvoiced portions
if unvoiced:
style['alpha'] = style.get('alpha', 1.0) * 0.5
for idx in u_slices:
ax.plot(times[idx], frequencies[idx], **style)
return ax | [] |
Please provide a description of the function:def multipitch(times, frequencies, midi=False, unvoiced=False, ax=None,
**kwargs):
'''Visualize multiple f0 measurements
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : list of np.ndarray
frequencies (in Hz) of the pitch measurements.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
`times` and `frequencies` should be in the format produced by
:func:`mir_eval.io.load_ragged_time_series`
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitches are plotted and indicated
by transparency.
Otherwise, unvoiced pitches are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `plt.scatter`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
# Get the axes handle
ax, _ = __get_axes(ax=ax)
# Set up a style for the plot
style_voiced = dict()
style_voiced.update(next(ax._get_lines.prop_cycler))
style_voiced.update(kwargs)
style_unvoiced = style_voiced.copy()
style_unvoiced.pop('label', None)
style_unvoiced['alpha'] = style_unvoiced.get('alpha', 1.0) * 0.5
# We'll collect all times and frequencies first, then plot them
voiced_times = []
voiced_freqs = []
unvoiced_times = []
unvoiced_freqs = []
for t, freqs in zip(times, frequencies):
if not len(freqs):
continue
freqs, voicings = freq_to_voicing(np.asarray(freqs, dtype=np.float))
# Discard all 0-frequency measurements
idx = freqs > 0
freqs = freqs[idx]
voicings = voicings[idx]
if midi:
freqs = hz_to_midi(freqs)
n_voiced = sum(voicings)
voiced_times.extend([t] * n_voiced)
voiced_freqs.extend(freqs[voicings])
unvoiced_times.extend([t] * (len(freqs) - n_voiced))
unvoiced_freqs.extend(freqs[~voicings])
# Plot the voiced frequencies
ax.scatter(voiced_times, voiced_freqs, **style_voiced)
# Plot the unvoiced frequencies
if unvoiced:
ax.scatter(unvoiced_times, unvoiced_freqs, **style_unvoiced)
# Tick at integer midi notes
if midi:
ax.yaxis.set_minor_locator(MultipleLocator(1))
return ax | [] |
Please provide a description of the function:def piano_roll(intervals, pitches=None, midi=None, ax=None, **kwargs):
'''Plot a quantized piano roll as intervals
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
timing intervals for notes
pitches : np.ndarray, shape=(n,), optional
pitches of notes (in Hz).
midi : np.ndarray, shape=(n,), optional
pitches of notes (in MIDI numbers).
At least one of ``pitches`` or ``midi`` must be provided.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the intervals.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to :func:`labeled_intervals`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes
'''
if midi is None:
if pitches is None:
raise ValueError('At least one of `midi` or `pitches` '
'must be provided.')
midi = hz_to_midi(pitches)
scale = np.arange(128)
ax = labeled_intervals(intervals, np.round(midi).astype(int),
label_set=scale,
tick=False,
ax=ax,
**kwargs)
# Minor tick at each semitone
ax.yaxis.set_minor_locator(MultipleLocator(1))
ax.axis('auto')
return ax | [] |
Please provide a description of the function:def separation(sources, fs=22050, labels=None, alpha=0.75, ax=None, **kwargs):
'''Source-separation visualization
Parameters
----------
sources : np.ndarray, shape=(nsrc, nsampl)
A list of waveform buffers corresponding to each source
fs : number > 0
The sampling rate
labels : list of strings
An optional list of descriptors corresponding to each source
alpha : float in [0, 1]
Maximum alpha (opacity) of spectrogram values.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the spectrograms.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to ``scipy.signal.spectrogram``
Returns
-------
ax
The axis handle for this plot
'''
# Get the axes handle
ax, new_axes = __get_axes(ax=ax)
# Make sure we have at least two dimensions
sources = np.atleast_2d(sources)
if labels is None:
labels = ['Source {:d}'.format(_) for _ in range(len(sources))]
kwargs.setdefault('scaling', 'spectrum')
# The cumulative spectrogram across sources
# is used to establish the reference power
# for each individual source
cumspec = None
specs = []
for i, src in enumerate(sources):
freqs, times, spec = spectrogram(src, fs=fs, **kwargs)
specs.append(spec)
if cumspec is None:
cumspec = spec.copy()
else:
cumspec += spec
ref_max = cumspec.max()
ref_min = ref_max * 1e-6
color_conv = ColorConverter()
for i, spec in enumerate(specs):
# For each source, grab a new color from the cycler
# Then construct a colormap that interpolates from
# [transparent white -> new color]
color = next(ax._get_lines.prop_cycler)['color']
color = color_conv.to_rgba(color, alpha=alpha)
cmap = LinearSegmentedColormap.from_list(labels[i],
[(1.0, 1.0, 1.0, 0.0),
color])
ax.pcolormesh(times, freqs, spec,
cmap=cmap,
norm=LogNorm(vmin=ref_min, vmax=ref_max),
shading='gouraud',
label=labels[i])
# Attach a 0x0 rect to the axis with the corresponding label
# This way, it will show up in the legend
ax.add_patch(Rectangle((0, 0), 0, 0, color=color, label=labels[i]))
if new_axes:
ax.axis('tight')
return ax | [] |
Please provide a description of the function:def __ticker_midi_note(x, pos):
'''A ticker function for midi notes.
Inputs x are interpreted as midi numbers, and converted
to [NOTE][OCTAVE]+[cents].
'''
NOTES = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
cents = float(np.mod(x, 1.0))
if cents >= 0.5:
cents = cents - 1.0
x = x + 0.5
idx = int(x % 12)
octave = int(x / 12) - 1
if cents == 0:
return '{:s}{:2d}'.format(NOTES[idx], octave)
return '{:s}{:2d}{:+02d}'.format(NOTES[idx], octave, int(cents * 100)) | [] |
Please provide a description of the function:def ticker_notes(ax=None):
'''Set the y-axis of the given axes to MIDI notes
Parameters
----------
ax : matplotlib.pyplot.axes
The axes handle to apply the ticker.
By default, uses the current axes handle.
'''
ax, _ = __get_axes(ax=ax)
ax.yaxis.set_major_formatter(FMT_MIDI_NOTE)
# Get the tick labels and reset the vertical alignment
for tick in ax.yaxis.get_ticklabels():
tick.set_verticalalignment('baseline') | [] |
Please provide a description of the function:def ticker_pitch(ax=None):
'''Set the y-axis of the given axes to MIDI frequencies
Parameters
----------
ax : matplotlib.pyplot.axes
The axes handle to apply the ticker.
By default, uses the current axes handle.
'''
ax, _ = __get_axes(ax=ax)
ax.yaxis.set_major_formatter(FMT_MIDI_HZ) | [] |
Please provide a description of the function:def run(self, **import_params):
if self.file:
import_params["url"] = self.file
self.id_field = "id"
if "connection" in import_params:
self.fields.append("connector")
self.update_from_dict(import_params["connection"])
self.save(force_create=True)
else:
super(FileImportJob, self).run(params=import_params,
files=self.files) | [
"\n Actually creates the import job on the CARTO server\n\n :param import_params: To be send to the Import API, see CARTO's docs\n on Import API for an updated list of accepted\n params\n :type import_params: kwargs\n\n :return:\n\n .. note:: The import job is asynchronous, so you should take care of the progression, by calling the :func:`carto.resources.AsyncResource.refresh` method and check the import job :py:attr:`~state` attribute. See :func:`carto.datasets.DatasetManager.create` for a unified method to import files into CARTO\n "
] |
Please provide a description of the function:def filter(self):
try:
response = self.send(self.get_collection_endpoint(), "get")
if self.json_collection_attribute is not None:
resource_ids = self.client.get_response_data(
response,
self.Meta.parse_json)[self.json_collection_attribute]
else:
resource_ids = self.client.get_response_data(
response, self.Meta.parse_json)
except Exception as e:
raise CartoException(e)
resources = []
for resource_id in resource_ids:
try:
resource = self.resource_class(self.client)
except (ValueError, TypeError):
continue
else:
setattr(resource, resource.Meta.id_field, resource_id)
resources.append(resource)
return resources | [
"\n Get a filtered list of file imports\n\n :return: A list of file imports, with only the id set (you need to\n refresh them if you want all the attributes to be filled in)\n :rtype: list of :class:`carto.file_import.FileImportJob`\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def send(self, relative_path, http_method, **requests_args):
try:
http_method, requests_args = self.prepare_send(http_method, **requests_args)
response = super(APIKeyAuthClient, self).send(relative_path, http_method, **requests_args)
except Exception as e:
raise CartoException(e)
if CartoRateLimitException.is_rate_limited(response):
raise CartoRateLimitException(response)
return response | [
"\n Makes an API-key-authorized request\n\n :param relative_path: URL path relative to self.base_url\n :param http_method: HTTP method\n :param requests_args: kwargs to be sent to requests\n :type relative_path: str\n :type http_method: str\n :type requests_args: kwargs\n\n :return:\n A request response object\n :raise:\n CartoException\n "
] |
Please provide a description of the function:def is_valid_api_key(self):
res = self.send('api/v3/api_keys', 'get')
return \
res.ok and \
self.api_key in (ak['token'] for ak in res.json()['result']) | [
"\n Checks validity. Right now, an API key is considered valid if it\n can list user API keys and the result contains that API key.\n This might change in the future.\n\n :return: True if the API key is considered valid for current user.\n "
] |
Please provide a description of the function:def run(self, **export_params):
export_params["visualization_id"] = self.visualization_id
return super(ExportJob, self).run(params=export_params) | [
"\n Make the actual request to the Import API (exporting is part of the\n Import API).\n\n :param export_params: Any additional parameters to be sent to the\n Import API\n :type export_params: kwargs\n\n :return:\n\n .. note:: The export is asynchronous, so you should take care of the progression, by calling the :func:`carto.resources.AsyncResource.refresh` method and check the export job :py:attr:`~state` attribute. See :func:`carto.visualizations.Visualization.export` method implementation for more details\n "
] |
Please provide a description of the function:def send(self, url, http_method, **client_args):
try:
client_args = client_args or {}
if "params" not in client_args:
client_args["params"] = {}
client_args["params"].update({"type": "table",
"exclude_shared": "true"})
return super(DatasetManager, self).send(url,
http_method,
**client_args)
except Exception as e:
raise CartoException(e) | [
"\n Sends an API request, taking into account that datasets are part of\n the visualization endpoint.\n\n :param url: Endpoint URL\n :param http_method: The method used to make the request to the API\n :param client_args: Arguments to be sent to the auth client\n :type url: str\n :type http_method: str\n :type client_args: kwargs\n\n :return: A request response object\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def is_sync_table(self, archive, interval, **import_args):
return (hasattr(archive, "startswith") and archive.startswith("http")
or "connection" in import_args) \
and interval is not None | [
"\n Checks if this is a request for a sync dataset.\n\n The condition for creating a sync dataset is to provide a URL or a\n connection to an external database and an interval in seconds\n\n :param archive: URL to the file (both remote URLs or local paths are\n supported) or StringIO object\n :param interval: Interval in seconds.\n :param import_args: Connection parameters for an external database\n :type url: str\n :type interval: int\n :type import_args: kwargs\n\n :return: True if it is a sync dataset\n\n "
] |
Please provide a description of the function:def create(self, archive, interval=None, **import_args):
archive = archive.lower() if hasattr(archive, "lower") else archive
if self.is_sync_table(archive, interval, **import_args):
manager = SyncTableJobManager(self.client)
else:
manager = FileImportJobManager(self.client)
import_job = manager.create(archive) if interval is None \
else manager.create(archive, interval)
import_job.run(**import_args)
if import_job.get_id() is None:
raise CartoException(_("Import API returned corrupt job details \
when creating dataset"))
import_job.refresh()
count = 0
while import_job.state in ("enqueued", "queued", "pending", "uploading",
"unpacking", "importing", "guessing") \
or (isinstance(manager, SyncTableJobManager)
and import_job.state == "created"):
if count >= MAX_NUMBER_OF_RETRIES:
raise CartoException(_("Maximum number of retries exceeded \
when polling the import API for \
dataset creation"))
time.sleep(INTERVAL_BETWEEN_RETRIES_S)
import_job.refresh()
count += 1
if import_job.state == "failure":
raise CartoException(_("Dataset creation was not successful \
because of failed import (error: {error}")
.format(error=json.dumps(
import_job.get_error_text)))
if (import_job.state != "complete" and import_job.state != "created"
and import_job.state != "success") \
or import_job.success is False:
raise CartoException(_("Dataset creation was not successful \
because of unknown import error"))
if hasattr(import_job, "visualization_id") \
and import_job.visualization_id is not None:
visualization_id = import_job.visualization_id
else:
table = TableManager(self.client).get(import_job.table_id)
visualization_id = table.table_visualization.get_id() \
if table is not None else None
try:
return self.get(visualization_id) if visualization_id is not None \
else None
except AttributeError:
raise CartoException(_("Dataset creation was not successful \
because of unknown error")) | [
"\n Creating a table means uploading a file or setting up a sync table\n\n :param archive: URL to the file (both remote URLs or local paths are\n supported) or StringIO object\n :param interval: Interval in seconds.\n If not None, CARTO will try to set up a sync table\n against the (remote) URL\n :param import_args: Arguments to be sent to the import job when run\n :type archive: str\n :type interval: int\n :type import_args: kwargs\n\n :return: New dataset object\n :rtype: Dataset\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def export(self):
export_job = ExportJob(self.client, self.get_id())
export_job.run()
export_job.refresh()
count = 0
while export_job.state in ("exporting", "enqueued", "pending"):
if count >= MAX_NUMBER_OF_RETRIES:
raise CartoException(_("Maximum number of retries exceeded \
when polling the import API for \
visualization export"))
time.sleep(INTERVAL_BETWEEN_RETRIES_S)
export_job.refresh()
count += 1
if export_job.state == "failure":
raise CartoException(_("Visualization export failed"))
if (export_job.state != "complete" and export_job.state != "created"):
raise CartoException(_("Unexpected problem on visualization export \
(state: {state})").
format(state=export_job.state))
return export_job.url | [
"\n Make the actual request to the Import API (exporting is part of the\n Import API) to export a map visualization as a .carto file\n\n :return: A URL pointing to the .carto file\n :rtype: str\n\n :raise: CartoException\n\n .. warning:: Non-public API. It may change with no previous notice\n\n .. note:: The export is asynchronous, but this method waits for the export to complete. See `MAX_NUMBER_OF_RETRIES` and `INTERVAL_BETWEEN_RETRIES_S`\n "
] |
Please provide a description of the function:def send(self, url, http_method, **client_args):
try:
client_args.setdefault('params', {})
client_args["params"].update({"type": "derived",
"exclude_shared": "true"})
return super(VisualizationManager, self).send(url,
http_method,
**client_args)
except Exception as e:
raise CartoException(e) | [
"\n Sends API request, taking into account that visualizations are only a\n subset of the resources available at the visualization endpoint\n\n :param url: Endpoint URL\n :param http_method: The method used to make the request to the API\n :param client_args: Arguments to be sent to the auth client\n :type url: str\n :type http_method: str\n :type client_args: kwargs\n\n :return:\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def is_rate_limited(response):
if (response.status_code == codes.too_many_requests and 'Retry-After' in response.headers and
int(response.headers['Retry-After']) >= 0):
return True
return False | [
"\n Checks if the response has been rate limited by CARTO APIs\n\n :param response: The response rate limited by CARTO APIs\n :type response: requests.models.Response class\n\n :return: Boolean\n "
] |
Please provide a description of the function:def get_tile_url(self, x, y, z, layer_id=None, feature_id=None,
filter=None, extension="png"):
base_url = self.client.base_url + self.Meta.collection_endpoint
template_id = self.template_id if hasattr(self, 'template_id') \
else self.layergroupid
if layer_id is not None and feature_id is not None:
url = urljoin(base_url,
"{template_id}/{layer}/attributes/{feature_id}"). \
format(template_id=template_id,
layer=layer_id,
feature_id=feature_id)
elif layer_id is not None and filter is not None:
url = urljoin(base_url,
"{template_id}/{filter}/{z}/{x}/{y}.{extension}"). \
format(template_id=template_id,
filter=filter,
z=z, x=x, y=y,
extension=extension)
elif layer_id is not None:
url = urljoin(base_url,
"{template_id}/{layer}/{z}/{x}/{y}.{extension}"). \
format(template_id=template_id,
layer=layer_id,
z=z, x=x, y=y,
extension=extension)
else:
url = urljoin(base_url, "{template_id}/{z}/{x}/{y}.{extension}"). \
format(
template_id=template_id,
z=z, x=x, y=y,
extension=extension)
if hasattr(self, 'auth') and self.auth is not None \
and len(self.auth['valid_tokens']) > 0:
url = urljoin(url, "?auth_token={auth_token}"). \
format(auth_token=self.auth['valid_tokens'][0])
return url | [
"\n Prepares a URL to get data (raster or vector) from a NamedMap or\n AnonymousMap\n\n :param x: The x tile\n :param y: The y tile\n :param z: The zoom level\n :param layer_id: Can be a number (referring to the # layer of your \\\n map), all layers of your map, or a list of layers.\n To show just the basemap layer, enter the value 0\n To show the first layer, enter the value 1\n To show all layers, enter the value 'all'\n To show a list of layers, enter the comma separated \\\n layer value as '0,1,2'\n :param feature_id: The id of the feature\n :param filter: The filter to be applied to the layer\n :param extension: The format of the data to be retrieved: png, mvt, ...\n :type x: int\n :type y: int\n :type z: int\n :type layer_id: str\n :type feature_id: str\n :type filter: str\n :type extension: str\n\n :return: A URL to download data\n :rtype: str\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def instantiate(self, params, auth=None):
try:
endpoint = (self.Meta.collection_endpoint
+ "{template_id}"). \
format(template_id=self.template_id)
if (auth is not None):
endpoint = (endpoint + "?auth_token={auth_token}"). \
format(auth_token=auth)
self.send(endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e) | [
"\n Allows you to fetch the map tiles of a created map\n\n :param params: The json with the styling info for the named map\n :param auth: The auth client\n :type params: dict\n :type auth: :class:`carto.auth.APIKeyAuthClient`\n\n :return:\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def update_from_dict(self, attribute_dict):
if 'template' in attribute_dict:
self.update_from_dict(attribute_dict['template'])
setattr(self,
self.Meta.id_field, attribute_dict['template']['name'])
return
try:
for k, v in attribute_dict.items():
setattr(self, k, v)
except Exception:
setattr(self, self.Meta.id_field, attribute_dict) | [
"\n Method overriden from the base class\n\n "
] |
Please provide a description of the function:def instantiate(self, params):
try:
self.send(self.Meta.collection_endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e) | [
"\n Allows you to fetch the map tiles of a created map\n\n :param params: The json with the styling info for the named map\n :type params: dict\n\n :return:\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def run(self, **client_params):
try:
self.send(self.get_collection_endpoint(),
http_method="POST",
**client_params)
except Exception as e:
raise CartoException(e) | [
"\n Actually creates the async job on the CARTO server\n\n\n :param client_params: To be send to the CARTO API. See CARTO's\n documentation depending on the subclass\n you are using\n :type client_params: kwargs\n\n\n :return:\n :raise: CartoException\n "
] |
Please provide a description of the function:def send(self, sql, parse_json=True, do_post=True, format=None, **request_args):
try:
params = {'q': sql}
if format:
params['format'] = format
if format not in ['json', 'geojson']:
parse_json = False
if request_args is not None:
for attr in request_args:
params[attr] = request_args[attr]
if len(sql) < MAX_GET_QUERY_LEN and do_post is False:
resp = self.auth_client.send(self.api_url,
'GET',
params=params)
else:
resp = self.auth_client.send(self.api_url, 'POST', data=params)
return self.auth_client.get_response_data(resp, parse_json)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e) | [
"\n Executes SQL query in a CARTO server\n\n :param sql: The SQL\n :param parse_json: Set it to False if you want raw reponse\n :param do_post: Set it to True to force post request\n :param format: Any of the data export formats allowed by CARTO's\n SQL API\n :param request_args: Additional parameters to send with the request\n :type sql: str\n :type parse_json: boolean\n :type do_post: boolean\n :type format: str\n :type request_args: dictionary\n\n :return: response data, either as json or as a regular\n response.content object\n :rtype: object\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def update_from_dict(self, data_dict):
for k, v in data_dict.items():
setattr(self, k, v)
if "item_queue_id" in data_dict:
self.id = data_dict["item_queue_id"] | [
"\n :param data_dict: Dictionary to be mapped into object attributes\n :type data_dict: dict\n\n :return:\n "
] |
Please provide a description of the function:def send(self, url, http_method, json_body=None, http_header=None):
try:
data = self.client.send(url,
http_method=http_method,
headers=http_header,
json=json_body)
data_json = self.client.get_response_data(data)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
return data_json | [
"\n Executes Batch SQL query in a CARTO server\n\n :param url: Endpoint url\n :param http_method: The method used to make the request to the API\n :param json_body: The information that needs to be sent, by default\n is set to None\n :param http_header: The header used to make write requests to the API,\n by default is none\n :type url: str\n :type http_method: str\n :type json_body: dict\n :type http_header: str\n\n :return: Response data, either as json or as a regular response.content\n object\n :rtype: object\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def create(self, sql_query):
header = {'content-type': 'application/json'}
data = self.send(self.api_url,
http_method="POST",
json_body={"query": sql_query},
http_header=header)
return data | [
"\n Creates a new batch SQL query.\n\n Batch SQL jobs are asynchronous, once created you should call\n :func:`carto.sql.BatchSQLClient.read` method given the `job_id`\n to retrieve the state of the batch query\n\n :param sql_query: The SQL query to be used\n :type sql_query: str or list of str\n\n :return: Response data, either as json or as a regular response.content\n object\n :rtype: object\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def create_and_wait_for_completion(self, sql_query):
header = {'content-type': 'application/json'}
data = self.send(self.api_url,
http_method="POST",
json_body={"query": sql_query},
http_header=header)
warnings.warn('Batch SQL job created with job_id: {job_id}'.format(job_id=data['job_id']))
while data and data['status'] in BATCH_JOBS_PENDING_STATUSES:
time.sleep(BATCH_READ_STATUS_AFTER_SECONDS)
data = self.read(data['job_id'])
if data['status'] in BATCH_JOBS_FAILED_STATUSES:
raise CartoException(_("Batch SQL job failed with result: {data}".format(data=data)))
return data | [
"\n Creates a new batch SQL query and waits for its completion or failure\n\n Batch SQL jobs are asynchronous, once created this method\n automatically queries the job status until it's one of 'done',\n 'failed', 'canceled', 'unknown'\n\n :param sql_query: The SQL query to be used\n :type sql_query: str or list of str\n\n :return: Response data, either as json or as a regular response.content\n object\n :rtype: object\n\n :raise: CartoException when there's an exception in the BatchSQLJob execution or the batch job status is one of the BATCH_JOBS_FAILED_STATUSES ('failed', 'canceled', 'unknown')\n "
] |
Please provide a description of the function:def read(self, job_id):
data = self.send(self.api_url + job_id, http_method="GET")
return data | [
"\n Reads the information for a specific Batch API request\n\n :param job_id: The id of the job to be read from\n :type job_id: str\n\n :return: Response data, either as json or as a regular response.content\n object\n :rtype: object\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def update(self, job_id, sql_query):
header = {'content-type': 'application/json'}
data = self.send(self.api_url + job_id,
http_method="PUT",
json_body={"query": sql_query},
http_header=header)
return data | [
"\n Updates the sql query of a specific job\n\n :param job_id: The id of the job to be updated\n :param sql_query: The new SQL query for the job\n :type job_id: str\n :type sql_query: str\n\n :return: Response data, either as json or as a regular response.content\n object\n :rtype: object\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def cancel(self, job_id):
try:
confirmation = self.send(self.api_url + job_id, http_method="DELETE")
except CartoException as e:
if 'Cannot set status from done to cancelled' in e.args[0].args[0]:
return 'done'
else:
raise e
return confirmation['status'] | [
"\n Cancels a job\n\n :param job_id: The id of the job to be cancelled\n :type job_id: str\n\n :return: A status code depending on whether the cancel request was\n successful\n :rtype: str\n\n :raise CartoException:\n "
] |
Please provide a description of the function:def copyfrom(self, query, iterable_data, compress=True,
compression_level=DEFAULT_COMPRESSION_LEVEL):
url = self.api_url + '/copyfrom'
headers = {
'Content-Type': 'application/octet-stream',
'Transfer-Encoding': 'chunked'
}
params = {'api_key': self.api_key, 'q': query}
if compress:
headers['Content-Encoding'] = 'gzip'
_iterable_data = self._compress_chunks(iterable_data,
compression_level)
else:
_iterable_data = iterable_data
try:
response = self.client.send(url,
http_method='POST',
params=params,
data=_iterable_data,
headers=headers,
stream=True)
response_json = self.client.get_response_data(response)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
return response_json | [
"\n Gets data from an iterable object into a table\n\n :param query: The \"COPY table_name [(column_name[, ...])]\n FROM STDIN [WITH(option[,...])]\" query to execute\n :type query: str\n\n :param iterable_data: An object that can be iterated\n to retrieve the data\n :type iterable_data: object\n\n :return: Response data as json\n :rtype: str\n\n :raise CartoException:\n "
] |
Please provide a description of the function:def copyfrom_file_object(self, query, file_object, compress=True,
compression_level=DEFAULT_COMPRESSION_LEVEL):
chunk_generator = self._read_in_chunks(file_object)
return self.copyfrom(query, chunk_generator, compress,
compression_level) | [
"\n Gets data from a readable file object into a table\n\n :param query: The \"COPY table_name [(column_name[, ...])]\n FROM STDIN [WITH(option[,...])]\" query to execute\n :type query: str\n\n :param file_object: A file-like object.\n Normally the return value of open('file.ext', 'rb')\n :type file_object: file\n\n :return: Response data as json\n :rtype: str\n\n :raise CartoException:\n "
] |
Please provide a description of the function:def copyfrom_file_path(self, query, path, compress=True,
compression_level=DEFAULT_COMPRESSION_LEVEL):
with open(path, 'rb') as f:
result = self.copyfrom_file_object(query, f, compress,
compression_level)
return result | [
"\n Gets data from a readable file into a table\n\n :param query: The \"COPY table_name [(column_name[, ...])]\n FROM STDIN [WITH(option[,...])]\" query to execute\n :type query: str\n\n :param path: A path to a file\n :type path: str\n\n :return: Response data as json\n :rtype: str\n\n :raise CartoException:\n "
] |
Please provide a description of the function:def copyto(self, query):
url = self.api_url + '/copyto'
params = {'api_key': self.api_key, 'q': query}
try:
response = self.client.send(url,
http_method='GET',
params=params,
stream=True)
response.raise_for_status()
except CartoRateLimitException as e:
raise e
except HTTPError as e:
if 400 <= response.status_code < 500:
# Client error, provide better reason
reason = response.json()['error'][0]
error_msg = u'%s Client Error: %s' % (response.status_code,
reason)
raise CartoException(error_msg)
else:
raise CartoException(e)
except Exception as e:
raise CartoException(e)
return response | [
"\n Gets data from a table into a Response object that can be iterated\n\n :param query: The \"COPY { table_name [(column_name[, ...])] | (query) }\n TO STDOUT [WITH(option[,...])]\" query to execute\n :type query: str\n\n :return: response object\n :rtype: Response\n\n :raise CartoException:\n "
] |
Please provide a description of the function:def copyto_file_object(self, query, file_object):
response = self.copyto(query)
for block in response.iter_content(DEFAULT_CHUNK_SIZE):
file_object.write(block) | [
"\n Gets data from a table into a writable file object\n\n :param query: The \"COPY { table_name [(column_name[, ...])] | (query) }\n TO STDOUT [WITH(option[,...])]\" query to execute\n :type query: str\n\n :param file_object: A file-like object.\n Normally the return value of open('file.ext', 'wb')\n :type file_object: file\n\n :raise CartoException:\n "
] |
Please provide a description of the function:def copyto_file_path(self, query, path, append=False):
file_mode = 'wb' if not append else 'ab'
with open(path, file_mode) as f:
self.copyto_file_object(query, f) | [
"\n Gets data from a table into a writable file\n\n :param query: The \"COPY { table_name [(column_name[, ...])] | (query) }\n TO STDOUT [WITH(option[,...])]\" query to execute\n :type query: str\n\n :param path: A path to a writable file\n :type path: str\n\n :param append: Whether to append or not if the file already exists\n Default value is False\n :type append: bool\n\n :raise CartoException:\n "
] |
Please provide a description of the function:def run(self, **import_params):
import_params["url"] = self.url
import_params["interval"] = self.interval
if "connection" in import_params:
self.fields.append("connector")
import_params["connection"]["interval"] = self.interval
self.update_from_dict(import_params["connection"])
self.save(force_create=True)
else:
return super(SyncTableJob, self).run(params=import_params) | [
"\n Actually creates the job import on the CARTO server\n\n :param import_params: To be send to the Import API, see CARTO's docs\n on Import API for an updated list of accepted\n params\n :type import_params: kwargs\n\n :return:\n\n .. note:: The sync table job is asynchronous, so you should take care of the progression, by calling the :func:`carto.resources.AsyncResource.refresh` method and check the import job :py:attr:`~state` attribute. See :func:`carto.datasets.DatasetManager.create` for a unified method to import files into CARTO\n "
] |
Please provide a description of the function:def force_sync(self):
try:
self.send(self.get_resource_endpoint(), "put")
except Exception as e:
raise CartoException(e) | [
"\n Forces to sync the SyncTableJob\n\n :return:\n\n :raise: CartoException\n "
] |
Please provide a description of the function:def load_data(flist, drop_duplicates=False):
'''
Usage: set train, target, and test key and feature files.
FEATURE_LIST_stage2 = {
'train':(
TEMP_PATH + 'v1_stage1_all_fold.csv',
TEMP_PATH + 'v2_stage1_all_fold.csv',
TEMP_PATH + 'v3_stage1_all_fold.csv',
),#target is not in 'train'
'target':(
INPUT_PATH + 'target.csv',
),#target is in 'target'
'test':(
TEMP_PATH + 'v1_stage1_test.csv',
TEMP_PATH + 'v2_stage1_test.csv',
TEMP_PATH + 'v3_stage1_test.csv',
),
}
'''
if (len(flist['train'])==0) or (len(flist['target'])==0) or (len(flist['test'])==0):
raise Exception('train, target, and test must be set at \
least one file, respectively.')
X_train = pd.DataFrame()
test = pd.DataFrame()
print 'Reading train dataset'
for i in flist['train']:
X_train = pd.concat([X_train, paratext.load_csv_to_pandas(PATH+i, allow_quoted_newlines=True)],axis=1)
print 'train dataset is created'
print 'Reading target data'
y_train = paratext.load_csv_to_pandas(PATH+flist['target'][0], allow_quoted_newlines=True)['target']
print 'Reading train dataset'
for i in flist['test']:
test = pd.concat([test, paratext.load_csv_to_pandas(PATH+i, allow_quoted_newlines=True)],axis=1)
#del test['t_id']
#print X_train.columns
#print test.columns
assert( (False in X_train.columns == test.columns) == False)
print 'train shape :{}'.format(X_train.shape)
if drop_duplicates == True:
#delete identical columns
unique_col = X_train.T.drop_duplicates().T.columns
X_train = X_train[unique_col]
test = test[unique_col]
assert( all(X_train.columns == test.columns))
print 'train shape after concat and drop_duplicates :{}'.format(X_train.shape)
# drop constant features
#X_train = X_train.loc[:, (X_train != X_train.ix[0]).any()]
#test = test.loc[:, (test != test.ix[0]).any()]
#common_col = list(set(X_train.columns.tolist()) and set(test.columns.tolist()))
#X_train = X_train[common_col]
#test = test[common_col]
#print 'shape after dropping constant features: {}'.format(X_train.shape)
return X_train, y_train, test | [] |
Please provide a description of the function:def set_prob_type(cls, problem_type, classification_type, eval_type):
assert problem_type in problem_type_list, 'Need to set Problem Type'
if problem_type == 'classification':
assert classification_type in classification_type_list,\
'Need to set Classification Type'
assert eval_type in eval_type_list, 'Need to set Evaluation Type'
cls.problem_type = problem_type
cls.classification_type = classification_type
cls.eval_type = eval_type
if cls.problem_type == 'classification':
print 'Setting Problem:{}, Type:{}, Eval:{}'.format(cls.problem_type,
cls.classification_type,
cls.eval_type)
elif cls.problem_type == 'regression':
print 'Setting Problem:{}, Eval:{}'.format(cls.problem_type,
cls.eval_type)
return | [
" Set problem type "
] |
Please provide a description of the function:def make_multi_cols(self, num_class, name):
'''make cols for multi-class predictions'''
cols = ['c' + str(i) + '_' for i in xrange(num_class)]
cols = map(lambda x: x + name, cols)
return cols | [] |
Please provide a description of the function:def parse(self, data):
# type: (bytes) -> None
'''
A method to parse an ISO9660 Path Table Record out of a string.
Parameters:
data - The string to parse.
Returns:
Nothing.
'''
(self.len_di, self.xattr_length, self.extent_location,
self.parent_directory_num) = struct.unpack_from(self.FMT, data[:8], 0)
if self.len_di % 2 != 0:
self.directory_identifier = data[8:-1]
else:
self.directory_identifier = data[8:]
self.dirrecord = None
self._initialized = True | [] |
Please provide a description of the function:def _record(self, ext_loc, parent_dir_num):
# type: (int, int) -> bytes
'''
An internal method to generate a string representing this Path Table Record.
Parameters:
ext_loc - The extent location to place in this Path Table Record.
parent_dir_num - The parent directory number to place in this Path Table
Record.
Returns:
A string representing this Path Table Record.
'''
return struct.pack(self.FMT, self.len_di, self.xattr_length,
ext_loc, parent_dir_num) + self.directory_identifier + b'\x00' * (self.len_di % 2) | [] |
Please provide a description of the function:def record_little_endian(self):
# type: () -> bytes
'''
A method to generate a string representing the little endian version of
this Path Table Record.
Parameters:
None.
Returns:
A string representing the little endian version of this Path Table Record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
return self._record(self.extent_location, self.parent_directory_num) | [] |
Please provide a description of the function:def record_big_endian(self):
# type: () -> bytes
'''
A method to generate a string representing the big endian version of
this Path Table Record.
Parameters:
None.
Returns:
A string representing the big endian version of this Path Table Record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
return self._record(utils.swab_32bit(self.extent_location),
utils.swab_16bit(self.parent_directory_num)) | [] |
Please provide a description of the function:def _new(self, name, parent_dir_num):
# type: (bytes, int) -> None
'''
An internal method to create a new Path Table Record.
Parameters:
name - The name for this Path Table Record.
parent_dir_num - The directory number of the parent of this Path Table
Record.
Returns:
Nothing.
'''
self.len_di = len(name)
self.xattr_length = 0 # FIXME: we don't support xattr for now
self.parent_directory_num = parent_dir_num
self.directory_identifier = name
self._initialized = True | [] |
Please provide a description of the function:def new_dir(self, name):
# type: (bytes) -> None
'''
A method to create a new Path Table Record.
Parameters:
name - The name for this Path Table Record.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record already initialized')
# Zero for the parent dir num is bogus, but that will get fixed later.
self._new(name, 0) | [] |
Please provide a description of the function:def update_extent_location(self, extent_loc):
# type: (int) -> None
'''
A method to update the extent location for this Path Table Record.
Parameters:
extent_loc - The new extent location.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
self.extent_location = extent_loc | [] |
Please provide a description of the function:def update_parent_directory_number(self, parent_dir_num):
# type: (int) -> None
'''
A method to update the parent directory number for this Path Table
Record from the directory record.
Parameters:
parent_dir_num - The new parent directory number to assign to this PTR.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')
self.parent_directory_num = parent_dir_num | [] |
Please provide a description of the function:def equal_to_be(self, be_record):
# type: (PathTableRecord) -> bool
'''
A method to compare a little-endian path table record to its
big-endian counterpart. This is used to ensure that the ISO is sane.
Parameters:
be_record - The big-endian object to compare with the little-endian
object.
Returns:
True if this record is equal to the big-endian record passed in,
False otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Path Table Record is not yet initialized')
if be_record.len_di != self.len_di or \
be_record.xattr_length != self.xattr_length or \
utils.swab_32bit(be_record.extent_location) != self.extent_location or \
utils.swab_16bit(be_record.parent_directory_num) != self.parent_directory_num or \
be_record.directory_identifier != self.directory_identifier:
return False
return True | [] |
Please provide a description of the function:def copy_data(data_length, blocksize, infp, outfp):
# type: (int, int, BinaryIO, BinaryIO) -> None
'''
A utility function to copy data from the input file object to the output
file object. This function will use the most efficient copy method available,
which is often sendfile.
Parameters:
data_length - The amount of data to copy.
blocksize - How much data to copy per iteration.
infp - The file object to copy data from.
outfp - The file object to copy data to.
Returns:
Nothing.
'''
use_sendfile = False
if have_sendfile:
# Python 3 implements the fileno method for all file-like objects, so
# we can't just use the existence of the method to tell whether it is
# available. Instead, we try to assign it, and if we fail, then we
# assume it is not available.
try:
x_unused = infp.fileno() # NOQA
y_unused = outfp.fileno() # NOQA
use_sendfile = True
except (AttributeError, io.UnsupportedOperation):
pass
if use_sendfile:
# This is one of those instances where using the file object and the
# file descriptor causes problems. The sendfile() call actually updates
# the underlying file descriptor, but the file object does not know
# about it. To get around this, we instead get the offset, allow
# sendfile() to update the offset, then manually seek the file object
# to the right location. This ensures that the file object gets updated
# properly.
in_offset = infp.tell()
out_offset = outfp.tell()
sendfile(outfp.fileno(), infp.fileno(), in_offset, data_length)
infp.seek(in_offset + data_length)
outfp.seek(out_offset + data_length)
else:
left = data_length
readsize = blocksize
while left > 0:
if left < readsize:
readsize = left
data = infp.read(readsize)
# We have seen ISOs in the wild (Tribes Vengeance 1of4.iso) that
# lie about the size of their files, causing reads to fail (since
# we hit EOF before the supposed end of the file). If we are using
# sendfile above, sendfile just silently returns as much data as it
# can, with no additional checking. We should do the same here, so
# if we got less data than we asked for, abort the loop silently.
data_len = len(data)
if data_len != readsize:
data_len = left
outfp.write(data)
left -= data_len | [] |
Please provide a description of the function:def encode_space_pad(instr, length, encoding):
# type: (bytes, int, str) -> bytes
'''
A function to pad out an input string with spaces to the length specified.
The space is first encoded into the specified encoding, then appended to
the input string until the length is reached.
Parameters:
instr - The input string to encode and pad.
length - The length to pad the input string to.
encoding - The encoding to use.
Returns:
The input string encoded in the encoding and padded with encoded spaces.
'''
output = instr.decode('utf-8').encode(encoding)
if len(output) > length:
raise pycdlibexception.PyCdlibInvalidInput('Input string too long!')
encoded_space = ' '.encode(encoding)
left = length - len(output)
while left > 0:
output += encoded_space
left -= len(encoded_space)
if left < 0:
output = output[:left]
return output | [] |
Please provide a description of the function:def normpath(path):
# type: (str) -> bytes
'''
A method to normalize the path, eliminating double slashes, etc. This
method is a copy of the built-in python normpath, except we do *not* allow
double slashes at the start.
Parameters:
path - The path to normalize.
Returns:
The normalized path.
'''
sep = '/'
empty = ''
dot = '.'
dotdot = '..'
if path == empty:
return dot.encode('utf-8')
initial_slashes = path.startswith(sep)
comps = path.split(sep)
new_comps = [] # type: List[str]
for comp in comps:
if comp in (empty, dot):
continue
if comp != dotdot or (not initial_slashes and not new_comps) or (new_comps and new_comps[-1] == dotdot):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
newpath = sep * initial_slashes + sep.join(new_comps)
if sys.version_info >= (3, 0):
newpath_bytes = newpath.encode('utf-8')
else:
newpath_bytes = newpath.decode('utf-8').encode('utf-8')
return newpath_bytes or dot.encode('utf-8') | [] |
Please provide a description of the function:def gmtoffset_from_tm(tm, local):
# type: (float, time.struct_time) -> int
'''
A function to compute the GMT offset from the time in seconds since the epoch
and the local time object.
Parameters:
tm - The time in seconds since the epoch.
local - The struct_time object representing the local time.
Returns:
The gmtoffset.
'''
gmtime = time.gmtime(tm)
tmpyear = gmtime.tm_year - local.tm_year
tmpyday = gmtime.tm_yday - local.tm_yday
tmphour = gmtime.tm_hour - local.tm_hour
tmpmin = gmtime.tm_min - local.tm_min
if tmpyday < 0:
tmpyday = -1
else:
if tmpyear > 0:
tmpyday = 1
return -(tmpmin + 60 * (tmphour + 24 * tmpyday)) // 15 | [] |
Please provide a description of the function:def zero_pad(fp, data_size, pad_size):
# type: (BinaryIO, int, int) -> None
'''
A function to write padding out from data_size up to pad_size
efficiently.
Parameters:
fp - The file object to use to write padding out to.
data_size - The current size of the data.
pad_size - The boundary size of data to pad out to.
Returns:
Nothing.
'''
padbytes = pad_size - (data_size % pad_size)
if padbytes == pad_size:
# Nothing to pad, get out.
return
fp.seek(padbytes - 1, os.SEEK_CUR)
fp.write(b'\x00') | [] |
Please provide a description of the function:def file_object_supports_binary(fp):
# type: (BinaryIO) -> bool
'''
A function to check whether a file-like object supports binary mode.
Parameters:
fp - The file-like object to check for binary mode support.
Returns:
True if the file-like object supports binary mode, False otherwise.
'''
if hasattr(fp, 'mode'):
return 'b' in fp.mode
# Python 3
if sys.version_info >= (3, 0):
return isinstance(fp, (io.RawIOBase, io.BufferedIOBase))
# Python 2
return isinstance(fp, (cStringIO.OutputType, cStringIO.InputType, io.RawIOBase, io.BufferedIOBase)) | [] |
Please provide a description of the function:def parse(self, instr):
# type: (bytes) -> bool
'''
A method to parse ISO hybridization info out of an existing ISO.
Parameters:
instr - The data for the ISO hybridization.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('This IsoHybrid object is already initialized')
if len(instr) != 512:
raise pycdlibexception.PyCdlibInvalidISO('Invalid size of the instr')
if instr[0:32] == self.ORIG_HEADER:
self.header = self.ORIG_HEADER
elif instr[0:32] == self.MAC_AFP:
self.header = self.MAC_AFP
else:
# If we didn't see anything that we expected, then this is not an
# IsoHybrid ISO, so just quietly return False
return False
(self.mbr, self.rba, unused1, self.mbr_id,
unused2) = struct.unpack_from(self.FMT, instr[:32 + struct.calcsize(self.FMT)], 32)
if unused1 != 0:
raise pycdlibexception.PyCdlibInvalidISO('Invalid IsoHybrid section')
if unused2 != 0:
raise pycdlibexception.PyCdlibInvalidISO('Invalid IsoHybrid section')
offset = 32 + struct.calcsize(self.FMT)
for i in range(1, 5):
if bytes(bytearray([instr[offset]])) == b'\x80':
self.part_entry = i
(const_unused, self.bhead, self.bsect, self.bcyle, self.ptype,
self.ehead, self.esect, self.ecyle, self.part_offset,
self.psize) = struct.unpack_from('=BBBBBBBBLL', instr[:offset + 16], offset)
break
offset += 16
else:
raise pycdlibexception.PyCdlibInvalidISO('No valid partition found in IsoHybrid!')
if bytes(bytearray([instr[-2]])) != b'\x55' or bytes(bytearray([instr[-1]])) != b'\xaa':
raise pycdlibexception.PyCdlibInvalidISO('Invalid tail on isohybrid section')
self.geometry_heads = self.ehead + 1
# FIXME: I can't see any way to compute the number of sectors from the
# available information. For now, we just hard-code this at 32 and
# hope for the best.
self.geometry_sectors = 32
self._initialized = True
return True | [] |
Please provide a description of the function:def new(self, mac, part_entry, mbr_id, part_offset,
geometry_sectors, geometry_heads, part_type):
# type: (bool, int, Optional[int], int, int, int, int) -> None
'''
A method to add ISO hybridization to an ISO.
Parameters:
mac - Whether this ISO should be made bootable for the Macintosh.
part_entry - The partition entry for the hybridization.
mbr_id - The mbr_id to use for the hybridization.
part_offset - The partition offset to use for the hybridization.
geometry_sectors - The number of sectors to use for the hybridization.
geometry_heads - The number of heads to use for the hybridization.
part_type - The partition type for the hybridization.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('This IsoHybrid object is already initialized')
if mac:
self.header = self.MAC_AFP
else:
self.header = self.ORIG_HEADER
isohybrid_data_hd0 = b'\x33\xed\xfa\x8e\xd5\xbc\x00\x7c\xfb\xfc\x66\x31\xdb\x66\x31\xc9\x66\x53\x66\x51\x06\x57\x8e\xdd\x8e\xc5\x52\xbe\x00\x7c\xbf\x00\x06\xb9\x00\x01\xf3\xa5\xea\x4b\x06\x00\x00\x52\xb4\x41\xbb\xaa\x55\x31\xc9\x30\xf6\xf9\xcd\x13\x72\x16\x81\xfb\x55\xaa\x75\x10\x83\xe1\x01\x74\x0b\x66\xc7\x06\xf1\x06\xb4\x42\xeb\x15\xeb\x00\x5a\x51\xb4\x08\xcd\x13\x83\xe1\x3f\x5b\x51\x0f\xb6\xc6\x40\x50\xf7\xe1\x53\x52\x50\xbb\x00\x7c\xb9\x04\x00\x66\xa1\xb0\x07\xe8\x44\x00\x0f\x82\x80\x00\x66\x40\x80\xc7\x02\xe2\xf2\x66\x81\x3e\x40\x7c\xfb\xc0\x78\x70\x75\x09\xfa\xbc\xec\x7b\xea\x44\x7c\x00\x00\xe8\x83\x00\x69\x73\x6f\x6c\x69\x6e\x75\x78\x2e\x62\x69\x6e\x20\x6d\x69\x73\x73\x69\x6e\x67\x20\x6f\x72\x20\x63\x6f\x72\x72\x75\x70\x74\x2e\x0d\x0a\x66\x60\x66\x31\xd2\x66\x03\x06\xf8\x7b\x66\x13\x16\xfc\x7b\x66\x52\x66\x50\x06\x53\x6a\x01\x6a\x10\x89\xe6\x66\xf7\x36\xe8\x7b\xc0\xe4\x06\x88\xe1\x88\xc5\x92\xf6\x36\xee\x7b\x88\xc6\x08\xe1\x41\xb8\x01\x02\x8a\x16\xf2\x7b\xcd\x13\x8d\x64\x10\x66\x61\xc3\xe8\x1e\x00\x4f\x70\x65\x72\x61\x74\x69\x6e\x67\x20\x73\x79\x73\x74\x65\x6d\x20\x6c\x6f\x61\x64\x20\x65\x72\x72\x6f\x72\x2e\x0d\x0a\x5e\xac\xb4\x0e\x8a\x3e\x62\x04\xb3\x07\xcd\x10\x3c\x0a\x75\xf1\xcd\x18\xf4\xeb\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
self.mbr = isohybrid_data_hd0
self.rba = 0 # This will be set later
self.mbr_id = mbr_id
if self.mbr_id is None:
self.mbr_id = random.getrandbits(32)
self.part_entry = part_entry
self.bhead = (part_offset // geometry_sectors) % geometry_heads
self.bsect = (part_offset % geometry_sectors) + 1
self.bcyle = part_offset // (geometry_heads * geometry_sectors)
self.bsect += (self.bcyle & 0x300) >> 2
self.bcyle &= 0xff
self.ptype = part_type
self.ehead = geometry_heads - 1
self.part_offset = part_offset
self.geometry_heads = geometry_heads
self.geometry_sectors = geometry_sectors
self._initialized = True | [] |
Please provide a description of the function:def _calc_cc(self, iso_size):
# type: (int) -> Tuple[int, int]
'''
A method to calculate the 'cc' and the 'padding' values for this
hybridization.
Parameters:
iso_size - The size of the ISO, excluding the hybridization.
Returns:
A tuple containing the cc value and the padding.
'''
cylsize = self.geometry_heads * self.geometry_sectors * 512
frac = iso_size % cylsize
padding = 0
if frac > 0:
padding = cylsize - frac
cc = (iso_size + padding) // cylsize
if cc > 1024:
cc = 1024
return (cc, padding) | [] |
Please provide a description of the function:def record(self, iso_size):
# type: (int) -> bytes
'''
A method to generate a string containing the ISO hybridization.
Parameters:
iso_size - The size of the ISO, excluding the hybridization.
Returns:
A string containing the ISO hybridization.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This IsoHybrid object is not yet initialized')
outlist = [struct.pack('=32s400sLLLH', self.header, self.mbr, self.rba,
0, self.mbr_id, 0)]
for i in range(1, 5):
if i == self.part_entry:
cc, padding_unused = self._calc_cc(iso_size)
esect = self.geometry_sectors + (((cc - 1) & 0x300) >> 2)
ecyle = (cc - 1) & 0xff
psize = cc * self.geometry_heads * self.geometry_sectors - self.part_offset
outlist.append(struct.pack('=BBBBBBBBLL', 0x80, self.bhead,
self.bsect, self.bcyle, self.ptype,
self.ehead, esect, ecyle,
self.part_offset, psize))
else:
outlist.append(b'\x00' * 16)
outlist.append(b'\x55\xaa')
return b''.join(outlist) | [] |
Please provide a description of the function:def record_padding(self, iso_size):
# type: (int) -> bytes
'''
A method to record padding for the ISO hybridization.
Parameters:
iso_size - The size of the ISO, excluding the hybridization.
Returns:
A string of zeros the right size to pad the ISO.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This IsoHybrid object is not yet initialized')
return b'\x00' * self._calc_cc(iso_size)[1] | [] |
Please provide a description of the function:def update_rba(self, current_extent):
# type: (int) -> None
'''
A method to update the current rba for the ISO hybridization.
Parameters:
current_extent - The new extent to set the RBA to.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This IsoHybrid object is not yet initialized')
self.rba = current_extent | [] |
Please provide a description of the function:def parse(self, xastr):
# type: (bytes) -> None
'''
Parse an Extended Attribute Record out of a string.
Parameters:
xastr - The string to parse.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('This XARecord is already initialized!')
(self._group_id, self._user_id, self._attributes, signature, self._filenum,
unused) = struct.unpack_from(self.FMT, xastr, 0)
if signature != b'XA':
raise pycdlibexception.PyCdlibInvalidISO('Invalid signature on the XARecord!')
if unused != b'\x00\x00\x00\x00\x00':
raise pycdlibexception.PyCdlibInvalidISO('Unused fields should be 0')
self._initialized = True | [] |
Please provide a description of the function:def new(self):
# type: () -> None
'''
Create a new Extended Attribute Record.
Parameters:
None.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('This XARecord is already initialized!')
# FIXME: we should allow the user to set these
self._group_id = 0
self._user_id = 0
self._attributes = 0
self._filenum = 0
self._initialized = True | [] |
Please provide a description of the function:def record(self):
# type: () -> bytes
'''
Record this Extended Attribute Record.
Parameters:
None.
Returns:
A string representing this Extended Attribute Record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This XARecord is not yet initialized!')
return struct.pack(self.FMT, self._group_id, self._user_id,
self._attributes, b'XA', self._filenum, b'\x00' * 5) | [] |
Please provide a description of the function:def parse(self, vd, record, parent):
# type: (headervd.PrimaryOrSupplementaryVD, bytes, Optional[DirectoryRecord]) -> str
'''
Parse a directory record out of a string.
Parameters:
vd - The Volume Descriptor this record is part of.
record - The string to parse for this record.
parent - The parent of this record.
Returns:
The Rock Ridge version as a string if this Directory Record has Rock
Ridge, None otherwise.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
if len(record) > 255:
# Since the length is supposed to be 8 bits, this should never
# happen.
raise pycdlibexception.PyCdlibInvalidISO('Directory record longer than 255 bytes!')
(self.dr_len, self.xattr_len, extent_location_le, extent_location_be,
data_length_le, data_length_be_unused, dr_date, self.file_flags,
self.file_unit_size, self.interleave_gap_size, seqnum_le, seqnum_be,
self.len_fi) = struct.unpack_from(self.FMT, record[:33], 0)
# In theory we should have a check here that checks to make sure that
# the length of the record we were passed in matches the data record
# length. However, we have seen ISOs in the wild where this is
# incorrect, so we elide the check here.
if extent_location_le != utils.swab_32bit(extent_location_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian (%d) and big-endian (%d) extent location disagree' % (extent_location_le, utils.swab_32bit(extent_location_be)))
self.orig_extent_loc = extent_location_le
# Theoretically, we should check to make sure that the little endian
# data length is the same as the big endian data length. In practice,
# though, we've seen ISOs where this is wrong. Skip the check, and just
# pick the little-endian as the 'actual' size, and hope for the best.
self.data_length = data_length_le
if seqnum_le != utils.swab_16bit(seqnum_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian seqnum disagree')
self.seqnum = seqnum_le
self.date = dates.DirectoryRecordDate()
self.date.parse(dr_date)
# OK, we've unpacked what we can from the beginning of the string. Now
# we have to use the len_fi to get the rest.
self.parent = parent
self.vd = vd
if self.parent is None:
self.is_root = True
# A root directory entry should always be exactly 34 bytes.
# However, we have seen ISOs in the wild that get this wrong, so we
# elide a check for it.
self.file_ident = bytes(bytearray([record[33]]))
# A root directory entry should always have 0 as the identifier.
# However, we have seen ISOs in the wild that don't have this set
# properly to 0. In that case, we override what we parsed out from
# the original with the correct value (\x00), and hope for the best.
if self.file_ident != b'\x00':
self.file_ident = b'\x00'
self.isdir = True
else:
record_offset = 33
self.file_ident = record[record_offset:record_offset + self.len_fi]
record_offset += self.len_fi
if self.file_flags & (1 << self.FILE_FLAG_DIRECTORY_BIT):
self.isdir = True
if self.len_fi % 2 == 0:
record_offset += 1
if len(record[record_offset:]) >= XARecord.length():
xa_rec = XARecord()
try:
xa_rec.parse(record[record_offset:record_offset + XARecord.length()])
self.xa_record = xa_rec
record_offset += XARecord.length()
except pycdlibexception.PyCdlibInvalidISO:
# We've seen some ISOs in the wild (Windows 98 SE) that
# put the XA record all the way at the back, with some
# padding. Try again from the back.
try:
xa_rec.parse(record[-XARecord.length():])
self.xa_record = xa_rec
self.xa_pad_size = len(record) - record_offset - XARecord.length()
record_offset = len(record)
except pycdlibexception.PyCdlibInvalidISO:
pass
if len(record[record_offset:]) >= 2 and record[record_offset:record_offset + 2] in (b'SP', b'RR', b'CE', b'PX', b'ER', b'ES', b'PN', b'SL', b'NM', b'CL', b'PL', b'TF', b'SF', b'RE'):
self.rock_ridge = rockridge.RockRidge()
is_first_dir_record_of_root = False
if self.parent.is_root:
if self.file_ident == b'\x00':
is_first_dir_record_of_root = True
bytes_to_skip = 0
else:
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Parent has no dot child')
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child does not have Rock Ridge; ISO is corrupt')
bytes_to_skip = self.parent.children[0].rock_ridge.bytes_to_skip
else:
if self.parent.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Parent does not have Rock Ridge; ISO is corrupt')
bytes_to_skip = self.parent.rock_ridge.bytes_to_skip
self.rock_ridge.parse(record[record_offset:],
is_first_dir_record_of_root,
bytes_to_skip,
False)
if self.xattr_len != 0:
if self.file_flags & (1 << self.FILE_FLAG_RECORD_BIT):
raise pycdlibexception.PyCdlibInvalidISO('Record Bit not allowed with Extended Attributes')
if self.file_flags & (1 << self.FILE_FLAG_PROTECTION_BIT):
raise pycdlibexception.PyCdlibInvalidISO('Protection Bit not allowed with Extended Attributes')
if self.rock_ridge is None:
ret = ''
else:
ret = self.rock_ridge.rr_version
if self.is_root:
self._printable_name = b'/'
elif self.file_ident == b'\x00':
self._printable_name = b'.'
elif self.file_ident == b'\x01':
self._printable_name = b'..'
else:
self._printable_name = self.file_ident
self._initialized = True
return ret | [] |
Please provide a description of the function:def _rr_new(self, rr_version, rr_name, rr_symlink_target, rr_relocated_child,
rr_relocated, rr_relocated_parent, file_mode):
# type: (str, bytes, bytes, bool, bool, bool, int) -> None
'''
Internal method to add Rock Ridge to a Directory Record.
Parameters:
rr_version - A string containing the version of Rock Ridge to use for
this record.
rr_name - The Rock Ridge name to associate with this directory record.
rr_symlink_target - The target for the symlink, if this is a symlink
record (otherwise, None).
rr_relocated_child - True if this is a directory record for a rock
ridge relocated child.
rr_relocated - True if this is a directory record for a relocated
entry.
rr_relocated_parent - True if this is a directory record for a rock
ridge relocated parent.
file_mode - The Unix file mode for this Rock Ridge entry.
Returns:
Nothing.
'''
if self.parent is None:
raise pycdlibexception.PyCdlibInternalError('Invalid call to create new Rock Ridge on root directory')
self.rock_ridge = rockridge.RockRidge()
is_first_dir_record_of_root = self.file_ident == b'\x00' and self.parent.is_root
bytes_to_skip = 0
if self.xa_record is not None:
bytes_to_skip = XARecord.length()
self.dr_len = self.rock_ridge.new(is_first_dir_record_of_root, rr_name,
file_mode, rr_symlink_target,
rr_version, rr_relocated_child,
rr_relocated, rr_relocated_parent,
bytes_to_skip, self.dr_len)
# For files, we are done
if not self.isdir:
return
# If this is a directory, we have to manipulate the file links
# appropriately.
if self.parent.is_root:
if self.file_ident == b'\x00' or self.file_ident == b'\x01':
# For the dot and dotdot children of the root, add one
# directly to their Rock Ridge links.
self.rock_ridge.add_to_file_links()
else:
# For all other children of the root, make sure to add one
# to each of the dot and dotdot entries.
if len(self.parent.children) < 2:
raise pycdlibexception.PyCdlibInvalidISO('Expected at least 2 children of the root directory record, saw %d' % (len(self.parent.children)))
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child of directory has no Rock Ridge; ISO is corrupt')
self.parent.children[0].rock_ridge.add_to_file_links()
if self.parent.children[1].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot-dot child of directory has no Rock Ridge; ISO is corrupt')
self.parent.children[1].rock_ridge.add_to_file_links()
else:
if self.parent.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Parent of the entry did not have Rock Ridge, ISO is corrupt')
if self.file_ident == b'\x00':
# If we are adding the dot directory, increment the parent
# file links and our file links.
self.parent.rock_ridge.add_to_file_links()
self.rock_ridge.add_to_file_links()
elif self.file_ident == b'\x01':
# If we are adding the dotdot directory, copy the file links
# from the dot directory of the grandparent.
if self.parent.parent is None:
raise pycdlibexception.PyCdlibInternalError('Grandparent of the entry did not exist; this cannot be')
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Grandparent of the entry did not have a dot entry; ISO is corrupt')
if self.parent.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Grandparent dotdot entry did not have Rock Ridge; ISO is corrupt')
self.rock_ridge.copy_file_links(self.parent.parent.children[0].rock_ridge)
else:
# For all other entries, increment the parents file links
# and the parents dot file links.
self.parent.rock_ridge.add_to_file_links()
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Parent of the entry did not have a dot entry; ISO is corrupt')
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child of the parent did not have a dot entry; ISO is corrupt')
self.parent.children[0].rock_ridge.add_to_file_links() | [] |
Please provide a description of the function:def _new(self, vd, name, parent, seqnum, isdir, length, xa):
# type: (headervd.PrimaryOrSupplementaryVD, bytes, Optional[DirectoryRecord], int, bool, int, bool) -> None
'''
Internal method to create a new Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
name - The name for this directory record.
parent - The parent of this directory record.
seqnum - The sequence number to associate with this directory record.
isdir - Whether this directory record represents a directory.
length - The length of the data for this directory record.
xa - True if this is an Extended Attribute record.
Returns:
Nothing.
'''
# Adding a new time should really be done when we are going to write
# the ISO (in record()). Ecma-119 9.1.5 says:
#
# 'This field shall indicate the date and the time of the day at which
# the information in the Extent described by the Directory Record was
# recorded.'
#
# We create it here just to have something in the field, but we'll
# redo the whole thing when we are mastering.
self.date = dates.DirectoryRecordDate()
self.date.new()
if length > 2**32 - 1:
raise pycdlibexception.PyCdlibInvalidInput('Maximum supported file length is 2^32-1')
self.data_length = length
self.file_ident = name
self.isdir = isdir
self.seqnum = seqnum
# For a new directory record entry, there is no original_extent_loc,
# so we leave it at None.
self.orig_extent_loc = None
self.len_fi = len(self.file_ident)
self.dr_len = struct.calcsize(self.FMT) + self.len_fi
# From Ecma-119, 9.1.6, the file flag bits are:
#
# Bit 0 - Existence - 0 for existence known, 1 for hidden
# Bit 1 - Directory - 0 for file, 1 for directory
# Bit 2 - Associated File - 0 for not associated, 1 for associated
# Bit 3 - Record - 0=structure not in xattr, 1=structure in xattr
# Bit 4 - Protection - 0=no owner and group, 1=owner and group in xattr
# Bit 5 - Reserved
# Bit 6 - Reserved
# Bit 7 - Multi-extent - 0=final directory record, 1=not final directory record
self.file_flags = 0
if self.isdir:
self.file_flags |= (1 << self.FILE_FLAG_DIRECTORY_BIT)
self.file_unit_size = 0 # FIXME: we don't support setting file unit size for now
self.interleave_gap_size = 0 # FIXME: we don't support setting interleave gap size for now
self.xattr_len = 0 # FIXME: we don't support xattrs for now
self.parent = parent
if parent is None:
# If no parent, then this is the root
self.is_root = True
if xa:
self.xa_record = XARecord()
self.xa_record.new()
self.dr_len += XARecord.length()
self.dr_len += (self.dr_len % 2)
if self.is_root:
self._printable_name = b'/'
elif self.file_ident == b'\x00':
self._printable_name = b'.'
elif self.file_ident == b'\x01':
self._printable_name = b'..'
else:
self._printable_name = self.file_ident
self.vd = vd
self._initialized = True | [] |
Please provide a description of the function:def new_symlink(self, vd, name, parent, rr_target, seqnum, rock_ridge,
rr_name, xa):
# type: (headervd.PrimaryOrSupplementaryVD, bytes, DirectoryRecord, bytes, int, str, bytes, bool) -> None
'''
Create a new symlink Directory Record. This implies that the new
record will be Rock Ridge.
Parameters:
vd - The Volume Descriptor this record is part of.
name - The name for this directory record.
parent - The parent of this directory record.
rr_target - The symlink target for this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - The version of Rock Ridge to use for this directory record.
rr_name - The Rock Ridge name for this directory record.
xa - True if this is an Extended Attribute record.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, name, parent, seqnum, False, 0, xa)
if rock_ridge:
self._rr_new(rock_ridge, rr_name, rr_target, False, False, False,
0o0120555) | [] |
Please provide a description of the function:def new_file(self, vd, length, isoname, parent, seqnum, rock_ridge, rr_name,
xa, file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, int, bytes, DirectoryRecord, int, str, bytes, bool, int) -> None
'''
Create a new file Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
length - The length of the data.
isoname - The name for this directory record.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
rr_name - The Rock Ridge name for this directory record.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode for this entry.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, isoname, parent, seqnum, False, length, xa)
if rock_ridge:
self._rr_new(rock_ridge, rr_name, b'', False, False, False,
file_mode) | [] |
Please provide a description of the function:def new_root(self, vd, seqnum, log_block_size):
# type: (headervd.PrimaryOrSupplementaryVD, int, int) -> None
'''
Create a new root Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
seqnum - The sequence number for this directory record.
log_block_size - The logical block size to use.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, b'\x00', None, seqnum, True, log_block_size, False) | [] |
Please provide a description of the function:def new_dot(self, vd, parent, seqnum, rock_ridge, log_block_size, xa,
file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, DirectoryRecord, int, str, int, bool, int) -> None
'''
Create a new 'dot' Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
log_block_size - The logical block size to use.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode to set for this directory.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, b'\x00', parent, seqnum, True, log_block_size, xa)
if rock_ridge:
self._rr_new(rock_ridge, b'', b'', False, False, False, file_mode) | [] |
Please provide a description of the function:def new_dotdot(self, vd, parent, seqnum, rock_ridge, log_block_size,
rr_relocated_parent, xa, file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, DirectoryRecord, int, str, int, bool, bool, int) -> None
'''
Create a new 'dotdot' Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
log_block_size - The logical block size to use.
rr_relocated_parent - True if this is a Rock Ridge relocated parent.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode to set for this directory.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, b'\x01', parent, seqnum, True, log_block_size, xa)
if rock_ridge:
self._rr_new(rock_ridge, b'', b'', False, False, rr_relocated_parent, file_mode) | [] |
Please provide a description of the function:def new_dir(self, vd, name, parent, seqnum, rock_ridge, rr_name, log_block_size,
rr_relocated_child, rr_relocated, xa, file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, bytes, DirectoryRecord, int, str, bytes, int, bool, bool, bool, int) -> None
'''
Create a new directory Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
name - The name for this directory record.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
rr_name - The Rock Ridge name for this directory record.
log_block_size - The logical block size to use.
rr_relocated_child - True if this is a Rock Ridge relocated child.
rr_relocated - True if this is a Rock Ridge relocated entry.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode to set for this directory.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, name, parent, seqnum, True, log_block_size, xa)
if rock_ridge:
self._rr_new(rock_ridge, rr_name, b'', rr_relocated_child,
rr_relocated, False, file_mode)
if rr_relocated_child and self.rock_ridge:
# Relocated Rock Ridge entries are not exactly treated as directories, so
# fix things up here.
self.isdir = False
self.file_flags = 0
self.rock_ridge.add_to_file_links() | [] |
Please provide a description of the function:def change_existence(self, is_hidden):
# type: (bool) -> None
'''
Change the ISO9660 existence flag of this Directory Record.
Parameters:
is_hidden - True if this Directory Record should be hidden, False otherwise.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
if is_hidden:
self.file_flags |= (1 << self.FILE_FLAG_EXISTENCE_BIT)
else:
self.file_flags &= ~(1 << self.FILE_FLAG_EXISTENCE_BIT) | [] |
Please provide a description of the function:def _recalculate_extents_and_offsets(self, index, logical_block_size):
# type: (int, int) -> Tuple[int, int]
'''
Internal method to recalculate the extents and offsets associated with
children of this directory record.
Parameters:
index - The index at which to start the recalculation.
logical_block_size - The block size to use for comparisons.
Returns:
A tuple where the first element is the total number of extents required
by the children and where the second element is the offset into the
last extent currently being used.
'''
if index == 0:
dirrecord_offset = 0
num_extents = 1
else:
dirrecord_offset = self.children[index - 1].offset_to_here
num_extents = self.children[index - 1].extents_to_here
for i in range(index, len(self.children)):
c = self.children[i]
dirrecord_len = c.dr_len
if (dirrecord_offset + dirrecord_len) > logical_block_size:
num_extents += 1
dirrecord_offset = 0
dirrecord_offset += dirrecord_len
c.extents_to_here = num_extents
c.offset_to_here = dirrecord_offset
c.index_in_parent = i
return num_extents, dirrecord_offset | [] |
Please provide a description of the function:def _add_child(self, child, logical_block_size, allow_duplicate, check_overflow):
# type: (DirectoryRecord, int, bool, bool) -> bool
'''
An internal method to add a child to this object. Note that this is called both
during parsing and when adding a new object to the system, so it
it shouldn't have any functionality that is not appropriate for both.
Parameters:
child - The child directory record object to add.
logical_block_size - The size of a logical block for this volume descriptor.
allow_duplicate - Whether to allow duplicate names, as there are situations where duplicate children are allowed.
check_overflow - Whether to check for overflow; if we are parsing, we don't want to do this.
Returns:
True if adding this child caused the directory to overflow into another
extent, False otherwise.
'''
if not self.isdir:
raise pycdlibexception.PyCdlibInvalidInput('Trying to add a child to a record that is not a directory')
# First ensure that this is not a duplicate. For speed purposes, we
# recognize that bisect_left will always choose an index to the *left*
# of a duplicate child. Thus, to check for duplicates we only need to
# see if the child to be added is a duplicate with the entry that
# bisect_left returned.
index = bisect.bisect_left(self.children, child)
if index != len(self.children) and self.children[index].file_ident == child.file_ident:
if not self.children[index].is_associated_file() and not child.is_associated_file():
if not (self.rock_ridge is not None and self.file_identifier() == b'RR_MOVED'):
if not allow_duplicate:
raise pycdlibexception.PyCdlibInvalidInput('Failed adding duplicate name to parent')
else:
self.children[index].data_continuation = child
index += 1
self.children.insert(index, child)
if child.rock_ridge is not None and not child.is_dot() and not child.is_dotdot():
lo = 0
hi = len(self.rr_children)
while lo < hi:
mid = (lo + hi) // 2
rr = self.rr_children[mid].rock_ridge
if rr is not None:
if rr.name() < child.rock_ridge.name():
lo = mid + 1
else:
hi = mid
else:
raise pycdlibexception.PyCdlibInternalError('Expected all children to have Rock Ridge, but one did not')
rr_index = lo
self.rr_children.insert(rr_index, child)
# We now have to check if we need to add another logical block.
# We have to iterate over the entire list again, because where we
# placed this last entry may rearrange the empty spaces in the blocks
# that we've already allocated.
num_extents, offset_unused = self._recalculate_extents_and_offsets(index,
logical_block_size)
overflowed = False
if check_overflow and (num_extents * logical_block_size > self.data_length):
overflowed = True
# When we overflow our data length, we always add a full block.
self.data_length += logical_block_size
# We also have to make sure to update the length of the dot child,
# as that should always reflect the length.
self.children[0].data_length = self.data_length
# We also have to update all of the dotdot entries. If this is
# the root directory record (no parent), we first update the root
# dotdot entry. In all cases, we update the dotdot entry of all
# children that are directories.
if self.parent is None:
self.children[1].data_length = self.data_length
for c in self.children:
if not c.is_dir():
continue
if len(c.children) > 1:
c.children[1].data_length = self.data_length
return overflowed | [] |
Please provide a description of the function:def add_child(self, child, logical_block_size, allow_duplicate=False):
# type: (DirectoryRecord, int, bool) -> bool
'''
A method to add a new child to this directory record.
Parameters:
child - The child directory record object to add.
logical_block_size - The size of a logical block for this volume descriptor.
allow_duplicate - Whether to allow duplicate names, as there are
situations where duplicate children are allowed.
Returns:
True if adding this child caused the directory to overflow into another
extent, False otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
return self._add_child(child, logical_block_size, allow_duplicate, True) | [] |
Please provide a description of the function:def track_child(self, child, logical_block_size, allow_duplicate=False):
# type: (DirectoryRecord, int, bool) -> None
'''
A method to track an existing child of this directory record.
Parameters:
child - The child directory record object to add.
logical_block_size - The size of a logical block for this volume descriptor.
allow_duplicate - Whether to allow duplicate names, as there are
situations where duplicate children are allowed.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
self._add_child(child, logical_block_size, allow_duplicate, False) | [] |
Please provide a description of the function:def remove_child(self, child, index, logical_block_size):
# type: (DirectoryRecord, int, int) -> bool
'''
A method to remove a child from this Directory Record.
Parameters:
child - The child DirectoryRecord object to remove.
index - The index of the child into this DirectoryRecord children list.
logical_block_size - The size of a logical block on this volume descriptor.
Returns:
True if removing this child caused an underflow, False otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
if index < 0:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Invalid child index to remove')
# Unfortunately, Rock Ridge specifies that a CL 'directory' is replaced
# by a *file*, not another directory. Thus, we can't just depend on
# whether this child is marked as a directory by the file flags during
# parse time. Instead, we check if this is either a true directory,
# or a Rock Ridge CL entry, and in either case try to manipulate the
# file links.
if child.rock_ridge is not None:
if child.isdir or child.rock_ridge.child_link_record_exists():
if len(self.children) < 2:
raise pycdlibexception.PyCdlibInvalidISO('Expected a dot and dotdot entry, but missing; ISO is corrupt')
if self.children[0].rock_ridge is None or self.children[1].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Missing Rock Ridge entry on dot or dotdot; ISO is corrupt')
if self.parent is None:
self.children[0].rock_ridge.remove_from_file_links()
self.children[1].rock_ridge.remove_from_file_links()
else:
if self.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Child has Rock Ridge, but parent does not; ISO is corrupt')
self.rock_ridge.remove_from_file_links()
self.children[0].rock_ridge.remove_from_file_links()
del self.children[index]
# We now have to check if we need to remove a logical block.
# We have to iterate over the entire list again, because where we
# removed this last entry may rearrange the empty spaces in the blocks
# that we've already allocated.
num_extents, dirrecord_offset = self._recalculate_extents_and_offsets(index,
logical_block_size)
underflow = False
total_size = (num_extents - 1) * logical_block_size + dirrecord_offset
if (self.data_length - total_size) > logical_block_size:
self.data_length -= logical_block_size
# We also have to make sure to update the length of the dot child,
# as that should always reflect the length.
self.children[0].data_length = self.data_length
# We also have to update all of the dotdot entries. If this is
# the root directory record (no parent), we first update the root
# dotdot entry. In all cases, we update the dotdot entry of all
# children that are directories.
if self.parent is None:
self.children[1].data_length = self.data_length
for c in self.children:
if not c.is_dir():
continue
if len(c.children) > 1:
c.children[1].data_length = self.data_length
underflow = True
return underflow | [] |
Please provide a description of the function:def record(self):
# type: () -> bytes
'''
A method to generate the string representing this Directory Record.
Parameters:
None.
Returns:
String representing this Directory Record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
# Ecma-119 9.1.5 says the date should reflect the time when the
# record was written, so we make a new date now and use that to
# write out the record.
self.date = dates.DirectoryRecordDate()
self.date.new()
padlen = struct.calcsize(self.FMT) + self.len_fi
padstr = b'\x00' * (padlen % 2)
extent_loc = self._extent_location()
xa_rec = b''
if self.xa_record is not None:
xa_rec = b'\x00' * self.xa_pad_size + self.xa_record.record()
rr_rec = b''
if self.rock_ridge is not None:
rr_rec = self.rock_ridge.record_dr_entries()
outlist = [struct.pack(self.FMT, self.dr_len, self.xattr_len,
extent_loc, utils.swab_32bit(extent_loc),
self.data_length, utils.swab_32bit(self.data_length),
self.date.record(), self.file_flags,
self.file_unit_size, self.interleave_gap_size,
self.seqnum, utils.swab_16bit(self.seqnum),
self.len_fi) + self.file_ident + padstr + xa_rec + rr_rec]
outlist.append(b'\x00' * (len(outlist[0]) % 2))
return b''.join(outlist) | [] |
Please provide a description of the function:def is_associated_file(self):
# type: () -> bool
'''
A method to determine whether this file is 'associated' with another file
on the ISO.
Parameters:
None.
Returns:
True if this file is associated with another file on the ISO, False
otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
return self.file_flags & (1 << self.FILE_FLAG_ASSOCIATED_FILE_BIT) | [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.