id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
4,000
HazyResearch/pdftotree
pdftotree/utils/pdf/grid.py
_retain_centroids
def _retain_centroids(numbers, thres): """Only keep one number for each cluster within thres of each other""" numbers.sort() prev = -1 ret = [] for n in numbers: if prev < 0 or n - prev > thres: ret.append(n) prev = n return ret
python
def _retain_centroids(numbers, thres): """Only keep one number for each cluster within thres of each other""" numbers.sort() prev = -1 ret = [] for n in numbers: if prev < 0 or n - prev > thres: ret.append(n) prev = n return ret
[ "def", "_retain_centroids", "(", "numbers", ",", "thres", ")", ":", "numbers", ".", "sort", "(", ")", "prev", "=", "-", "1", "ret", "=", "[", "]", "for", "n", "in", "numbers", ":", "if", "prev", "<", "0", "or", "n", "-", "prev", ">", "thres", ":", "ret", ".", "append", "(", "n", ")", "prev", "=", "n", "return", "ret" ]
Only keep one number for each cluster within thres of each other
[ "Only", "keep", "one", "number", "for", "each", "cluster", "within", "thres", "of", "each", "other" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/grid.py#L178-L187
4,001
HazyResearch/pdftotree
pdftotree/utils/pdf/grid.py
_split_vlines_hlines
def _split_vlines_hlines(lines): """Separates lines into horizontal and vertical ones""" vlines, hlines = [], [] for line in lines: (vlines if line.x1 - line.x0 < 0.1 else hlines).append(line) return vlines, hlines
python
def _split_vlines_hlines(lines): """Separates lines into horizontal and vertical ones""" vlines, hlines = [], [] for line in lines: (vlines if line.x1 - line.x0 < 0.1 else hlines).append(line) return vlines, hlines
[ "def", "_split_vlines_hlines", "(", "lines", ")", ":", "vlines", ",", "hlines", "=", "[", "]", ",", "[", "]", "for", "line", "in", "lines", ":", "(", "vlines", "if", "line", ".", "x1", "-", "line", ".", "x0", "<", "0.1", "else", "hlines", ")", ".", "append", "(", "line", ")", "return", "vlines", ",", "hlines" ]
Separates lines into horizontal and vertical ones
[ "Separates", "lines", "into", "horizontal", "and", "vertical", "ones" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/grid.py#L190-L195
4,002
HazyResearch/pdftotree
pdftotree/utils/pdf/grid.py
_npiter
def _npiter(arr): """Wrapper for iterating numpy array""" for a in np.nditer(arr, flags=["refs_ok"]): c = a.item() if c is not None: yield c
python
def _npiter(arr): """Wrapper for iterating numpy array""" for a in np.nditer(arr, flags=["refs_ok"]): c = a.item() if c is not None: yield c
[ "def", "_npiter", "(", "arr", ")", ":", "for", "a", "in", "np", ".", "nditer", "(", "arr", ",", "flags", "=", "[", "\"refs_ok\"", "]", ")", ":", "c", "=", "a", ".", "item", "(", ")", "if", "c", "is", "not", "None", ":", "yield", "c" ]
Wrapper for iterating numpy array
[ "Wrapper", "for", "iterating", "numpy", "array" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/grid.py#L198-L203
4,003
HazyResearch/pdftotree
pdftotree/utils/pdf/grid.py
Grid.get_normalized_grid
def get_normalized_grid(self): """ Analyzes subcell structure """ log = logging.getLogger(__name__) # Resolve multirow mentions, TODO: validate against all PDFs # subcol_count = 0 mega_rows = [] for row_id, row in enumerate(self._grid): # maps yc_grid -> [mentions] subrow_across_cell = defaultdict(list) for col_id, cell in enumerate(row): # Keep cell text in reading order cell.texts.sort(key=cmp_to_key(reading_order)) log.debug("=" * 50) for m in cell.texts: subrow_across_cell[m.yc_grid].append(m) # prev = m log.debug(pformat(dict(subrow_across_cell))) mega_rows.append(subrow_across_cell) # Multiline paragraph check # Subrow/Subcolumn return mega_rows
python
def get_normalized_grid(self): """ Analyzes subcell structure """ log = logging.getLogger(__name__) # Resolve multirow mentions, TODO: validate against all PDFs # subcol_count = 0 mega_rows = [] for row_id, row in enumerate(self._grid): # maps yc_grid -> [mentions] subrow_across_cell = defaultdict(list) for col_id, cell in enumerate(row): # Keep cell text in reading order cell.texts.sort(key=cmp_to_key(reading_order)) log.debug("=" * 50) for m in cell.texts: subrow_across_cell[m.yc_grid].append(m) # prev = m log.debug(pformat(dict(subrow_across_cell))) mega_rows.append(subrow_across_cell) # Multiline paragraph check # Subrow/Subcolumn return mega_rows
[ "def", "get_normalized_grid", "(", "self", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# Resolve multirow mentions, TODO: validate against all PDFs", "# subcol_count = 0", "mega_rows", "=", "[", "]", "for", "row_id", ",", "row", "in", "enumerate", "(", "self", ".", "_grid", ")", ":", "# maps yc_grid -> [mentions]", "subrow_across_cell", "=", "defaultdict", "(", "list", ")", "for", "col_id", ",", "cell", "in", "enumerate", "(", "row", ")", ":", "# Keep cell text in reading order", "cell", ".", "texts", ".", "sort", "(", "key", "=", "cmp_to_key", "(", "reading_order", ")", ")", "log", ".", "debug", "(", "\"=\"", "*", "50", ")", "for", "m", "in", "cell", ".", "texts", ":", "subrow_across_cell", "[", "m", ".", "yc_grid", "]", ".", "append", "(", "m", ")", "# prev = m", "log", ".", "debug", "(", "pformat", "(", "dict", "(", "subrow_across_cell", ")", ")", ")", "mega_rows", ".", "append", "(", "subrow_across_cell", ")", "# Multiline paragraph check", "# Subrow/Subcolumn", "return", "mega_rows" ]
Analyzes subcell structure
[ "Analyzes", "subcell", "structure" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/grid.py#L118-L144
4,004
HazyResearch/pdftotree
pdftotree/utils/pdf/grid.py
Grid._mark_grid_bounds
def _mark_grid_bounds(self, plane, region_bbox): """ Assume all lines define a complete grid over the region_bbox. Detect which lines are missing so that we can recover merged cells. """ # Grid boundaries vbars = np.zeros([self.num_rows, self.num_cols + 1], dtype=np.bool) hbars = np.zeros([self.num_rows + 1, self.num_cols], dtype=np.bool) def closest_idx(arr, elem): left = bisect.bisect_left(arr, elem) - 1 right = bisect.bisect_right(arr, elem) - 1 return left if abs(arr[left] - elem) < abs(arr[right] - elem) else right # Figure out which separating segments are missing, i.e. merge cells for row, (y0, y1) in enumerate(self.yranges): yc = (y0 + y1) // 2 for l in plane.find((region_bbox.x0, yc, region_bbox.x1, yc)): vbars[row, closest_idx(self.xs, l.xc)] = True for col, (x0, x1) in enumerate(self.xranges): xc = (x0 + x1) // 2 for l in plane.find((xc, region_bbox.y0, xc, region_bbox.y1)): hbars[closest_idx(self.ys, l.yc), col] = True return vbars, hbars
python
def _mark_grid_bounds(self, plane, region_bbox): """ Assume all lines define a complete grid over the region_bbox. Detect which lines are missing so that we can recover merged cells. """ # Grid boundaries vbars = np.zeros([self.num_rows, self.num_cols + 1], dtype=np.bool) hbars = np.zeros([self.num_rows + 1, self.num_cols], dtype=np.bool) def closest_idx(arr, elem): left = bisect.bisect_left(arr, elem) - 1 right = bisect.bisect_right(arr, elem) - 1 return left if abs(arr[left] - elem) < abs(arr[right] - elem) else right # Figure out which separating segments are missing, i.e. merge cells for row, (y0, y1) in enumerate(self.yranges): yc = (y0 + y1) // 2 for l in plane.find((region_bbox.x0, yc, region_bbox.x1, yc)): vbars[row, closest_idx(self.xs, l.xc)] = True for col, (x0, x1) in enumerate(self.xranges): xc = (x0 + x1) // 2 for l in plane.find((xc, region_bbox.y0, xc, region_bbox.y1)): hbars[closest_idx(self.ys, l.yc), col] = True return vbars, hbars
[ "def", "_mark_grid_bounds", "(", "self", ",", "plane", ",", "region_bbox", ")", ":", "# Grid boundaries", "vbars", "=", "np", ".", "zeros", "(", "[", "self", ".", "num_rows", ",", "self", ".", "num_cols", "+", "1", "]", ",", "dtype", "=", "np", ".", "bool", ")", "hbars", "=", "np", ".", "zeros", "(", "[", "self", ".", "num_rows", "+", "1", ",", "self", ".", "num_cols", "]", ",", "dtype", "=", "np", ".", "bool", ")", "def", "closest_idx", "(", "arr", ",", "elem", ")", ":", "left", "=", "bisect", ".", "bisect_left", "(", "arr", ",", "elem", ")", "-", "1", "right", "=", "bisect", ".", "bisect_right", "(", "arr", ",", "elem", ")", "-", "1", "return", "left", "if", "abs", "(", "arr", "[", "left", "]", "-", "elem", ")", "<", "abs", "(", "arr", "[", "right", "]", "-", "elem", ")", "else", "right", "# Figure out which separating segments are missing, i.e. merge cells", "for", "row", ",", "(", "y0", ",", "y1", ")", "in", "enumerate", "(", "self", ".", "yranges", ")", ":", "yc", "=", "(", "y0", "+", "y1", ")", "//", "2", "for", "l", "in", "plane", ".", "find", "(", "(", "region_bbox", ".", "x0", ",", "yc", ",", "region_bbox", ".", "x1", ",", "yc", ")", ")", ":", "vbars", "[", "row", ",", "closest_idx", "(", "self", ".", "xs", ",", "l", ".", "xc", ")", "]", "=", "True", "for", "col", ",", "(", "x0", ",", "x1", ")", "in", "enumerate", "(", "self", ".", "xranges", ")", ":", "xc", "=", "(", "x0", "+", "x1", ")", "//", "2", "for", "l", "in", "plane", ".", "find", "(", "(", "xc", ",", "region_bbox", ".", "y0", ",", "xc", ",", "region_bbox", ".", "y1", ")", ")", ":", "hbars", "[", "closest_idx", "(", "self", ".", "ys", ",", "l", ".", "yc", ")", ",", "col", "]", "=", "True", "return", "vbars", ",", "hbars" ]
Assume all lines define a complete grid over the region_bbox. Detect which lines are missing so that we can recover merged cells.
[ "Assume", "all", "lines", "define", "a", "complete", "grid", "over", "the", "region_bbox", ".", "Detect", "which", "lines", "are", "missing", "so", "that", "we", "can", "recover", "merged", "cells", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/grid.py#L146-L170
4,005
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
vectorize
def vectorize(e, tolerance=0.1): """ vectorizes the pdf object's bounding box min_width is the width under which we consider it a line instead of a big rectangle """ tolerance = max(tolerance, e.linewidth) is_high = e.height > tolerance is_wide = e.width > tolerance # if skewed towards a line if is_wide and not is_high: return (e.width, 0.0) if is_high and not is_wide: return (0.0, e.height)
python
def vectorize(e, tolerance=0.1): """ vectorizes the pdf object's bounding box min_width is the width under which we consider it a line instead of a big rectangle """ tolerance = max(tolerance, e.linewidth) is_high = e.height > tolerance is_wide = e.width > tolerance # if skewed towards a line if is_wide and not is_high: return (e.width, 0.0) if is_high and not is_wide: return (0.0, e.height)
[ "def", "vectorize", "(", "e", ",", "tolerance", "=", "0.1", ")", ":", "tolerance", "=", "max", "(", "tolerance", ",", "e", ".", "linewidth", ")", "is_high", "=", "e", ".", "height", ">", "tolerance", "is_wide", "=", "e", ".", "width", ">", "tolerance", "# if skewed towards a line", "if", "is_wide", "and", "not", "is_high", ":", "return", "(", "e", ".", "width", ",", "0.0", ")", "if", "is_high", "and", "not", "is_wide", ":", "return", "(", "0.0", ",", "e", ".", "height", ")" ]
vectorizes the pdf object's bounding box min_width is the width under which we consider it a line instead of a big rectangle
[ "vectorizes", "the", "pdf", "object", "s", "bounding", "box", "min_width", "is", "the", "width", "under", "which", "we", "consider", "it", "a", "line", "instead", "of", "a", "big", "rectangle" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L36-L49
4,006
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
aligned
def aligned(e1, e2): """ alignment is determined by two boxes having one exactly the same attribute, which could mean parallel, perpendicularly forming a corner etc. """ return ( any(close(c1, c2) for c1, c2 in zip(e1.bbox, e2.bbox)) or x_center_aligned(e1, e2) or y_center_aligned(e1, e2) )
python
def aligned(e1, e2): """ alignment is determined by two boxes having one exactly the same attribute, which could mean parallel, perpendicularly forming a corner etc. """ return ( any(close(c1, c2) for c1, c2 in zip(e1.bbox, e2.bbox)) or x_center_aligned(e1, e2) or y_center_aligned(e1, e2) )
[ "def", "aligned", "(", "e1", ",", "e2", ")", ":", "return", "(", "any", "(", "close", "(", "c1", ",", "c2", ")", "for", "c1", ",", "c2", "in", "zip", "(", "e1", ".", "bbox", ",", "e2", ".", "bbox", ")", ")", "or", "x_center_aligned", "(", "e1", ",", "e2", ")", "or", "y_center_aligned", "(", "e1", ",", "e2", ")", ")" ]
alignment is determined by two boxes having one exactly the same attribute, which could mean parallel, perpendicularly forming a corner etc.
[ "alignment", "is", "determined", "by", "two", "boxes", "having", "one", "exactly", "the", "same", "attribute", "which", "could", "mean", "parallel", "perpendicularly", "forming", "a", "corner", "etc", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L52-L62
4,007
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
bound_bboxes
def bound_bboxes(bboxes): """ Finds the minimal bbox that contains all given bboxes """ group_x0 = min(map(lambda l: l[x0], bboxes)) group_y0 = min(map(lambda l: l[y0], bboxes)) group_x1 = max(map(lambda l: l[x1], bboxes)) group_y1 = max(map(lambda l: l[y1], bboxes)) return (group_x0, group_y0, group_x1, group_y1)
python
def bound_bboxes(bboxes): """ Finds the minimal bbox that contains all given bboxes """ group_x0 = min(map(lambda l: l[x0], bboxes)) group_y0 = min(map(lambda l: l[y0], bboxes)) group_x1 = max(map(lambda l: l[x1], bboxes)) group_y1 = max(map(lambda l: l[y1], bboxes)) return (group_x0, group_y0, group_x1, group_y1)
[ "def", "bound_bboxes", "(", "bboxes", ")", ":", "group_x0", "=", "min", "(", "map", "(", "lambda", "l", ":", "l", "[", "x0", "]", ",", "bboxes", ")", ")", "group_y0", "=", "min", "(", "map", "(", "lambda", "l", ":", "l", "[", "y0", "]", ",", "bboxes", ")", ")", "group_x1", "=", "max", "(", "map", "(", "lambda", "l", ":", "l", "[", "x1", "]", ",", "bboxes", ")", ")", "group_y1", "=", "max", "(", "map", "(", "lambda", "l", ":", "l", "[", "y1", "]", ",", "bboxes", ")", ")", "return", "(", "group_x0", ",", "group_y0", ",", "group_x1", ",", "group_y1", ")" ]
Finds the minimal bbox that contains all given bboxes
[ "Finds", "the", "minimal", "bbox", "that", "contains", "all", "given", "bboxes" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L106-L114
4,008
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
bound_elems
def bound_elems(elems): """ Finds the minimal bbox that contains all given elems """ group_x0 = min(map(lambda l: l.x0, elems)) group_y0 = min(map(lambda l: l.y0, elems)) group_x1 = max(map(lambda l: l.x1, elems)) group_y1 = max(map(lambda l: l.y1, elems)) return (group_x0, group_y0, group_x1, group_y1)
python
def bound_elems(elems): """ Finds the minimal bbox that contains all given elems """ group_x0 = min(map(lambda l: l.x0, elems)) group_y0 = min(map(lambda l: l.y0, elems)) group_x1 = max(map(lambda l: l.x1, elems)) group_y1 = max(map(lambda l: l.y1, elems)) return (group_x0, group_y0, group_x1, group_y1)
[ "def", "bound_elems", "(", "elems", ")", ":", "group_x0", "=", "min", "(", "map", "(", "lambda", "l", ":", "l", ".", "x0", ",", "elems", ")", ")", "group_y0", "=", "min", "(", "map", "(", "lambda", "l", ":", "l", ".", "y0", ",", "elems", ")", ")", "group_x1", "=", "max", "(", "map", "(", "lambda", "l", ":", "l", ".", "x1", ",", "elems", ")", ")", "group_y1", "=", "max", "(", "map", "(", "lambda", "l", ":", "l", ".", "y1", ",", "elems", ")", ")", "return", "(", "group_x0", ",", "group_y0", ",", "group_x1", ",", "group_y1", ")" ]
Finds the minimal bbox that contains all given elems
[ "Finds", "the", "minimal", "bbox", "that", "contains", "all", "given", "elems" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L117-L125
4,009
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
intersect
def intersect(a, b): """ Check if two rectangles intersect """ if a[x0] == a[x1] or a[y0] == a[y1]: return False if b[x0] == b[x1] or b[y0] == b[y1]: return False return a[x0] <= b[x1] and b[x0] <= a[x1] and a[y0] <= b[y1] and b[y0] <= a[y1]
python
def intersect(a, b): """ Check if two rectangles intersect """ if a[x0] == a[x1] or a[y0] == a[y1]: return False if b[x0] == b[x1] or b[y0] == b[y1]: return False return a[x0] <= b[x1] and b[x0] <= a[x1] and a[y0] <= b[y1] and b[y0] <= a[y1]
[ "def", "intersect", "(", "a", ",", "b", ")", ":", "if", "a", "[", "x0", "]", "==", "a", "[", "x1", "]", "or", "a", "[", "y0", "]", "==", "a", "[", "y1", "]", ":", "return", "False", "if", "b", "[", "x0", "]", "==", "b", "[", "x1", "]", "or", "b", "[", "y0", "]", "==", "b", "[", "y1", "]", ":", "return", "False", "return", "a", "[", "x0", "]", "<=", "b", "[", "x1", "]", "and", "b", "[", "x0", "]", "<=", "a", "[", "x1", "]", "and", "a", "[", "y0", "]", "<=", "b", "[", "y1", "]", "and", "b", "[", "y0", "]", "<=", "a", "[", "y1", "]" ]
Check if two rectangles intersect
[ "Check", "if", "two", "rectangles", "intersect" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L128-L136
4,010
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
reading_order
def reading_order(e1, e2): """ A comparator to sort bboxes from top to bottom, left to right """ b1 = e1.bbox b2 = e2.bbox if round(b1[y0]) == round(b2[y0]) or round(b1[y1]) == round(b2[y1]): return float_cmp(b1[x0], b2[x0]) return float_cmp(b1[y0], b2[y0])
python
def reading_order(e1, e2): """ A comparator to sort bboxes from top to bottom, left to right """ b1 = e1.bbox b2 = e2.bbox if round(b1[y0]) == round(b2[y0]) or round(b1[y1]) == round(b2[y1]): return float_cmp(b1[x0], b2[x0]) return float_cmp(b1[y0], b2[y0])
[ "def", "reading_order", "(", "e1", ",", "e2", ")", ":", "b1", "=", "e1", ".", "bbox", "b2", "=", "e2", ".", "bbox", "if", "round", "(", "b1", "[", "y0", "]", ")", "==", "round", "(", "b2", "[", "y0", "]", ")", "or", "round", "(", "b1", "[", "y1", "]", ")", "==", "round", "(", "b2", "[", "y1", "]", ")", ":", "return", "float_cmp", "(", "b1", "[", "x0", "]", ",", "b2", "[", "x0", "]", ")", "return", "float_cmp", "(", "b1", "[", "y0", "]", ",", "b2", "[", "y0", "]", ")" ]
A comparator to sort bboxes from top to bottom, left to right
[ "A", "comparator", "to", "sort", "bboxes", "from", "top", "to", "bottom", "left", "to", "right" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L155-L163
4,011
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
xy_reading_order
def xy_reading_order(e1, e2): """ A comparator to sort bboxes from left to right, top to bottom """ b1 = e1.bbox b2 = e2.bbox if round(b1[x0]) == round(b2[x0]): return float_cmp(b1[y0], b2[y0]) return float_cmp(b1[x0], b2[x0])
python
def xy_reading_order(e1, e2): """ A comparator to sort bboxes from left to right, top to bottom """ b1 = e1.bbox b2 = e2.bbox if round(b1[x0]) == round(b2[x0]): return float_cmp(b1[y0], b2[y0]) return float_cmp(b1[x0], b2[x0])
[ "def", "xy_reading_order", "(", "e1", ",", "e2", ")", ":", "b1", "=", "e1", ".", "bbox", "b2", "=", "e2", ".", "bbox", "if", "round", "(", "b1", "[", "x0", "]", ")", "==", "round", "(", "b2", "[", "x0", "]", ")", ":", "return", "float_cmp", "(", "b1", "[", "y0", "]", ",", "b2", "[", "y0", "]", ")", "return", "float_cmp", "(", "b1", "[", "x0", "]", ",", "b2", "[", "x0", "]", ")" ]
A comparator to sort bboxes from left to right, top to bottom
[ "A", "comparator", "to", "sort", "bboxes", "from", "left", "to", "right", "top", "to", "bottom" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L166-L174
4,012
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
column_order
def column_order(b1, b2): """ A comparator that sorts bboxes first by "columns", where a column is made up of all bboxes that overlap, then by vertical position in each column. b1 = [b1.type, b1.top, b1.left, b1.bottom, b1.right] b2 = [b2.type, b2.top, b2.left, b2.bottom, b2.right] """ (top, left, bottom) = (1, 2, 3) # TODO(senwu): Reimplement the functionality of this comparator to # detect the number of columns, and sort those in reading order. # TODO: This is just a simple top to bottom, left to right comparator # for now. if round(b1[top]) == round(b2[top]) or round(b1[bottom]) == round(b2[bottom]): return float_cmp(b1[left], b2[left]) return float_cmp(b1[top], b2[top])
python
def column_order(b1, b2): """ A comparator that sorts bboxes first by "columns", where a column is made up of all bboxes that overlap, then by vertical position in each column. b1 = [b1.type, b1.top, b1.left, b1.bottom, b1.right] b2 = [b2.type, b2.top, b2.left, b2.bottom, b2.right] """ (top, left, bottom) = (1, 2, 3) # TODO(senwu): Reimplement the functionality of this comparator to # detect the number of columns, and sort those in reading order. # TODO: This is just a simple top to bottom, left to right comparator # for now. if round(b1[top]) == round(b2[top]) or round(b1[bottom]) == round(b2[bottom]): return float_cmp(b1[left], b2[left]) return float_cmp(b1[top], b2[top])
[ "def", "column_order", "(", "b1", ",", "b2", ")", ":", "(", "top", ",", "left", ",", "bottom", ")", "=", "(", "1", ",", "2", ",", "3", ")", "# TODO(senwu): Reimplement the functionality of this comparator to", "# detect the number of columns, and sort those in reading order.", "# TODO: This is just a simple top to bottom, left to right comparator", "# for now.", "if", "round", "(", "b1", "[", "top", "]", ")", "==", "round", "(", "b2", "[", "top", "]", ")", "or", "round", "(", "b1", "[", "bottom", "]", ")", "==", "round", "(", "b2", "[", "bottom", "]", ")", ":", "return", "float_cmp", "(", "b1", "[", "left", "]", ",", "b2", "[", "left", "]", ")", "return", "float_cmp", "(", "b1", "[", "top", "]", ",", "b2", "[", "top", "]", ")" ]
A comparator that sorts bboxes first by "columns", where a column is made up of all bboxes that overlap, then by vertical position in each column. b1 = [b1.type, b1.top, b1.left, b1.bottom, b1.right] b2 = [b2.type, b2.top, b2.left, b2.bottom, b2.right]
[ "A", "comparator", "that", "sorts", "bboxes", "first", "by", "columns", "where", "a", "column", "is", "made", "up", "of", "all", "bboxes", "that", "overlap", "then", "by", "vertical", "position", "in", "each", "column", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L177-L193
4,013
HazyResearch/pdftotree
pdftotree/utils/pdf/vector_utils.py
merge_intervals
def merge_intervals(elems, overlap_thres=2.0): """ Project in x axis Sort by start Go through segments and keep max x1 Return a list of non-overlapping intervals """ overlap_thres = max(0.0, overlap_thres) ordered = sorted(elems, key=lambda e: e.x0) intervals = [] cur = [-overlap_thres, -overlap_thres] for e in ordered: if e.x0 - cur[1] > overlap_thres: # Check interval validity if cur[1] > 0.0: intervals.append(cur) cur = [e.x0, e.x1] continue cur[1] = max(cur[1], e.x1) intervals.append(cur) # Freeze the interval to tuples return map(tuple, intervals)
python
def merge_intervals(elems, overlap_thres=2.0): """ Project in x axis Sort by start Go through segments and keep max x1 Return a list of non-overlapping intervals """ overlap_thres = max(0.0, overlap_thres) ordered = sorted(elems, key=lambda e: e.x0) intervals = [] cur = [-overlap_thres, -overlap_thres] for e in ordered: if e.x0 - cur[1] > overlap_thres: # Check interval validity if cur[1] > 0.0: intervals.append(cur) cur = [e.x0, e.x1] continue cur[1] = max(cur[1], e.x1) intervals.append(cur) # Freeze the interval to tuples return map(tuple, intervals)
[ "def", "merge_intervals", "(", "elems", ",", "overlap_thres", "=", "2.0", ")", ":", "overlap_thres", "=", "max", "(", "0.0", ",", "overlap_thres", ")", "ordered", "=", "sorted", "(", "elems", ",", "key", "=", "lambda", "e", ":", "e", ".", "x0", ")", "intervals", "=", "[", "]", "cur", "=", "[", "-", "overlap_thres", ",", "-", "overlap_thres", "]", "for", "e", "in", "ordered", ":", "if", "e", ".", "x0", "-", "cur", "[", "1", "]", ">", "overlap_thres", ":", "# Check interval validity", "if", "cur", "[", "1", "]", ">", "0.0", ":", "intervals", ".", "append", "(", "cur", ")", "cur", "=", "[", "e", ".", "x0", ",", "e", ".", "x1", "]", "continue", "cur", "[", "1", "]", "=", "max", "(", "cur", "[", "1", "]", ",", "e", ".", "x1", ")", "intervals", ".", "append", "(", "cur", ")", "# Freeze the interval to tuples", "return", "map", "(", "tuple", ",", "intervals", ")" ]
Project in x axis Sort by start Go through segments and keep max x1 Return a list of non-overlapping intervals
[ "Project", "in", "x", "axis", "Sort", "by", "start", "Go", "through", "segments", "and", "keep", "max", "x1" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/vector_utils.py#L212-L235
4,014
HazyResearch/pdftotree
pdftotree/visual/visual_utils.py
predict_heatmap
def predict_heatmap(pdf_path, page_num, model, img_dim=448, img_dir="tmp/img"): """ Return an image corresponding to the page of the pdf documents saved at pdf_path. If the image is not found in img_dir this function creates it and saves it in img_dir. :param pdf_path: path to the pdf document. :param page_num: page number to create image from in the pdf file. :return: """ if not os.path.isdir(img_dir): print("\nCreating image folder at {}".format(img_dir)) os.makedirs(img_dir) pdf_name = os.path.splitext(os.path.basename(pdf_path))[0] # TODO: add hashing function to make sure name is unique # TODO: add parallelization img_path = os.path.join(img_dir, pdf_name + "-{}.png".format(page_num)) if not os.path.isfile(img_path): # create image for a page in the pdf document and save it in img_dir save_image(pdf_path, img_path, page_num) image = load_img(img_path, grayscale=True, target_size=(img_dim, img_dim)) image = img_to_array(image, data_format=K.image_data_format()) image = ( image.reshape((img_dim, img_dim, 1)) .repeat(3, axis=2) .reshape((1, img_dim, img_dim, 3)) ) return ( image.astype(np.uint8).reshape((img_dim, img_dim, 3)), model.predict(image).reshape((img_dim, img_dim)), )
python
def predict_heatmap(pdf_path, page_num, model, img_dim=448, img_dir="tmp/img"): """ Return an image corresponding to the page of the pdf documents saved at pdf_path. If the image is not found in img_dir this function creates it and saves it in img_dir. :param pdf_path: path to the pdf document. :param page_num: page number to create image from in the pdf file. :return: """ if not os.path.isdir(img_dir): print("\nCreating image folder at {}".format(img_dir)) os.makedirs(img_dir) pdf_name = os.path.splitext(os.path.basename(pdf_path))[0] # TODO: add hashing function to make sure name is unique # TODO: add parallelization img_path = os.path.join(img_dir, pdf_name + "-{}.png".format(page_num)) if not os.path.isfile(img_path): # create image for a page in the pdf document and save it in img_dir save_image(pdf_path, img_path, page_num) image = load_img(img_path, grayscale=True, target_size=(img_dim, img_dim)) image = img_to_array(image, data_format=K.image_data_format()) image = ( image.reshape((img_dim, img_dim, 1)) .repeat(3, axis=2) .reshape((1, img_dim, img_dim, 3)) ) return ( image.astype(np.uint8).reshape((img_dim, img_dim, 3)), model.predict(image).reshape((img_dim, img_dim)), )
[ "def", "predict_heatmap", "(", "pdf_path", ",", "page_num", ",", "model", ",", "img_dim", "=", "448", ",", "img_dir", "=", "\"tmp/img\"", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "img_dir", ")", ":", "print", "(", "\"\\nCreating image folder at {}\"", ".", "format", "(", "img_dir", ")", ")", "os", ".", "makedirs", "(", "img_dir", ")", "pdf_name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "pdf_path", ")", ")", "[", "0", "]", "# TODO: add hashing function to make sure name is unique", "# TODO: add parallelization", "img_path", "=", "os", ".", "path", ".", "join", "(", "img_dir", ",", "pdf_name", "+", "\"-{}.png\"", ".", "format", "(", "page_num", ")", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "img_path", ")", ":", "# create image for a page in the pdf document and save it in img_dir", "save_image", "(", "pdf_path", ",", "img_path", ",", "page_num", ")", "image", "=", "load_img", "(", "img_path", ",", "grayscale", "=", "True", ",", "target_size", "=", "(", "img_dim", ",", "img_dim", ")", ")", "image", "=", "img_to_array", "(", "image", ",", "data_format", "=", "K", ".", "image_data_format", "(", ")", ")", "image", "=", "(", "image", ".", "reshape", "(", "(", "img_dim", ",", "img_dim", ",", "1", ")", ")", ".", "repeat", "(", "3", ",", "axis", "=", "2", ")", ".", "reshape", "(", "(", "1", ",", "img_dim", ",", "img_dim", ",", "3", ")", ")", ")", "return", "(", "image", ".", "astype", "(", "np", ".", "uint8", ")", ".", "reshape", "(", "(", "img_dim", ",", "img_dim", ",", "3", ")", ")", ",", "model", ".", "predict", "(", "image", ")", ".", "reshape", "(", "(", "img_dim", ",", "img_dim", ")", ")", ",", ")" ]
Return an image corresponding to the page of the pdf documents saved at pdf_path. If the image is not found in img_dir this function creates it and saves it in img_dir. :param pdf_path: path to the pdf document. :param page_num: page number to create image from in the pdf file. :return:
[ "Return", "an", "image", "corresponding", "to", "the", "page", "of", "the", "pdf", "documents", "saved", "at", "pdf_path", ".", "If", "the", "image", "is", "not", "found", "in", "img_dir", "this", "function", "creates", "it", "and", "saves", "it", "in", "img_dir", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/visual/visual_utils.py#L11-L41
4,015
HazyResearch/pdftotree
pdftotree/visual/visual_utils.py
do_intersect
def do_intersect(bb1, bb2): """ Helper function that returns True if two bounding boxes overlap. """ if bb1[0] + bb1[2] < bb2[0] or bb2[0] + bb2[2] < bb1[0]: return False if bb1[1] + bb1[3] < bb2[1] or bb2[1] + bb2[3] < bb1[1]: return False return True
python
def do_intersect(bb1, bb2): """ Helper function that returns True if two bounding boxes overlap. """ if bb1[0] + bb1[2] < bb2[0] or bb2[0] + bb2[2] < bb1[0]: return False if bb1[1] + bb1[3] < bb2[1] or bb2[1] + bb2[3] < bb1[1]: return False return True
[ "def", "do_intersect", "(", "bb1", ",", "bb2", ")", ":", "if", "bb1", "[", "0", "]", "+", "bb1", "[", "2", "]", "<", "bb2", "[", "0", "]", "or", "bb2", "[", "0", "]", "+", "bb2", "[", "2", "]", "<", "bb1", "[", "0", "]", ":", "return", "False", "if", "bb1", "[", "1", "]", "+", "bb1", "[", "3", "]", "<", "bb2", "[", "1", "]", "or", "bb2", "[", "1", "]", "+", "bb2", "[", "3", "]", "<", "bb1", "[", "1", "]", ":", "return", "False", "return", "True" ]
Helper function that returns True if two bounding boxes overlap.
[ "Helper", "function", "that", "returns", "True", "if", "two", "bounding", "boxes", "overlap", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/visual/visual_utils.py#L63-L71
4,016
HazyResearch/pdftotree
pdftotree/visual/visual_utils.py
get_bboxes
def get_bboxes( img, mask, nb_boxes=100, score_thresh=0.5, iou_thresh=0.2, prop_size=0.09, prop_scale=1.2, ): """ Uses selective search to generate candidate bounding boxes and keeps the ones that have the largest iou with the predicted mask. :param img: original image :param mask: predicted mask :param nb_boxes: max number of candidate bounding boxes :param score_thresh: scre threshold to consider prediction is True :param iou_thresh: iou threshold to consider a candidate is a correct region :param prop_size: selective search parameter :param prop_scale: selective search parameter, larger prop_scale favorizes large boudning boxes :return: list of bounding boxes and ious, boudning boxes are tuples (left, top, width, height) """ min_size = int(img.shape[0] * prop_size * img.shape[1] * prop_size) scale = int(img.shape[0] * prop_scale) # TODO: cross validate for multiple values of prop_size, prop_scale, and nb_bboxes img_lbl, regions = selectivesearch.selective_search( img, scale=scale, sigma=0.8, min_size=min_size ) rect = [None] * nb_boxes max_iou = -1 * np.ones(nb_boxes) mask = 1.0 * (mask > score_thresh) # compute iou for each candidate bounding box and save top nb_bboxes for region in regions: left, top, width, height = region["rect"] intersection = mask[top : top + height, left : left + width].sum() union = height * width + mask.sum() - intersection iou = intersection / union idx = np.argmin(max_iou) if iou > max_iou[idx]: max_iou[idx] = iou rect[idx] = region["rect"] # Exclusive maximum remove_indexes = max_iou == -1 bboxes = [] filtered_ious = [] for idx in np.argsort([-x for x in max_iou]): if remove_indexes[idx]: # no more tables bounding boxes break if len(bboxes) == 0: # first candidate table bounding box if max_iou[idx] > iou_thresh: bboxes += [rect[idx]] filtered_ious += [max_iou[idx]] else: # No tables in this document break else: # If it doensn't intersect with any other bounding box if not any( [do_intersect(rect[idx], bboxes[k]) for k in range(len(bboxes))] ): if max_iou[idx] > iou_thresh: bboxes += [rect[idx]] filtered_ious += [max_iou[idx]] return bboxes, filtered_ious
python
def get_bboxes( img, mask, nb_boxes=100, score_thresh=0.5, iou_thresh=0.2, prop_size=0.09, prop_scale=1.2, ): """ Uses selective search to generate candidate bounding boxes and keeps the ones that have the largest iou with the predicted mask. :param img: original image :param mask: predicted mask :param nb_boxes: max number of candidate bounding boxes :param score_thresh: scre threshold to consider prediction is True :param iou_thresh: iou threshold to consider a candidate is a correct region :param prop_size: selective search parameter :param prop_scale: selective search parameter, larger prop_scale favorizes large boudning boxes :return: list of bounding boxes and ious, boudning boxes are tuples (left, top, width, height) """ min_size = int(img.shape[0] * prop_size * img.shape[1] * prop_size) scale = int(img.shape[0] * prop_scale) # TODO: cross validate for multiple values of prop_size, prop_scale, and nb_bboxes img_lbl, regions = selectivesearch.selective_search( img, scale=scale, sigma=0.8, min_size=min_size ) rect = [None] * nb_boxes max_iou = -1 * np.ones(nb_boxes) mask = 1.0 * (mask > score_thresh) # compute iou for each candidate bounding box and save top nb_bboxes for region in regions: left, top, width, height = region["rect"] intersection = mask[top : top + height, left : left + width].sum() union = height * width + mask.sum() - intersection iou = intersection / union idx = np.argmin(max_iou) if iou > max_iou[idx]: max_iou[idx] = iou rect[idx] = region["rect"] # Exclusive maximum remove_indexes = max_iou == -1 bboxes = [] filtered_ious = [] for idx in np.argsort([-x for x in max_iou]): if remove_indexes[idx]: # no more tables bounding boxes break if len(bboxes) == 0: # first candidate table bounding box if max_iou[idx] > iou_thresh: bboxes += [rect[idx]] filtered_ious += [max_iou[idx]] else: # No tables in this document break else: # If it doensn't intersect with any other bounding box if not any( [do_intersect(rect[idx], bboxes[k]) for k in range(len(bboxes))] ): if max_iou[idx] > iou_thresh: bboxes += [rect[idx]] filtered_ious += [max_iou[idx]] return bboxes, filtered_ious
[ "def", "get_bboxes", "(", "img", ",", "mask", ",", "nb_boxes", "=", "100", ",", "score_thresh", "=", "0.5", ",", "iou_thresh", "=", "0.2", ",", "prop_size", "=", "0.09", ",", "prop_scale", "=", "1.2", ",", ")", ":", "min_size", "=", "int", "(", "img", ".", "shape", "[", "0", "]", "*", "prop_size", "*", "img", ".", "shape", "[", "1", "]", "*", "prop_size", ")", "scale", "=", "int", "(", "img", ".", "shape", "[", "0", "]", "*", "prop_scale", ")", "# TODO: cross validate for multiple values of prop_size, prop_scale, and nb_bboxes", "img_lbl", ",", "regions", "=", "selectivesearch", ".", "selective_search", "(", "img", ",", "scale", "=", "scale", ",", "sigma", "=", "0.8", ",", "min_size", "=", "min_size", ")", "rect", "=", "[", "None", "]", "*", "nb_boxes", "max_iou", "=", "-", "1", "*", "np", ".", "ones", "(", "nb_boxes", ")", "mask", "=", "1.0", "*", "(", "mask", ">", "score_thresh", ")", "# compute iou for each candidate bounding box and save top nb_bboxes", "for", "region", "in", "regions", ":", "left", ",", "top", ",", "width", ",", "height", "=", "region", "[", "\"rect\"", "]", "intersection", "=", "mask", "[", "top", ":", "top", "+", "height", ",", "left", ":", "left", "+", "width", "]", ".", "sum", "(", ")", "union", "=", "height", "*", "width", "+", "mask", ".", "sum", "(", ")", "-", "intersection", "iou", "=", "intersection", "/", "union", "idx", "=", "np", ".", "argmin", "(", "max_iou", ")", "if", "iou", ">", "max_iou", "[", "idx", "]", ":", "max_iou", "[", "idx", "]", "=", "iou", "rect", "[", "idx", "]", "=", "region", "[", "\"rect\"", "]", "# Exclusive maximum", "remove_indexes", "=", "max_iou", "==", "-", "1", "bboxes", "=", "[", "]", "filtered_ious", "=", "[", "]", "for", "idx", "in", "np", ".", "argsort", "(", "[", "-", "x", "for", "x", "in", "max_iou", "]", ")", ":", "if", "remove_indexes", "[", "idx", "]", ":", "# no more tables bounding boxes", "break", "if", "len", "(", "bboxes", ")", "==", "0", ":", "# first candidate table bounding box", "if", "max_iou", "[", "idx", "]", ">", "iou_thresh", ":", "bboxes", "+=", "[", "rect", "[", "idx", "]", "]", "filtered_ious", "+=", "[", "max_iou", "[", "idx", "]", "]", "else", ":", "# No tables in this document", "break", "else", ":", "# If it doensn't intersect with any other bounding box", "if", "not", "any", "(", "[", "do_intersect", "(", "rect", "[", "idx", "]", ",", "bboxes", "[", "k", "]", ")", "for", "k", "in", "range", "(", "len", "(", "bboxes", ")", ")", "]", ")", ":", "if", "max_iou", "[", "idx", "]", ">", "iou_thresh", ":", "bboxes", "+=", "[", "rect", "[", "idx", "]", "]", "filtered_ious", "+=", "[", "max_iou", "[", "idx", "]", "]", "return", "bboxes", ",", "filtered_ious" ]
Uses selective search to generate candidate bounding boxes and keeps the ones that have the largest iou with the predicted mask. :param img: original image :param mask: predicted mask :param nb_boxes: max number of candidate bounding boxes :param score_thresh: scre threshold to consider prediction is True :param iou_thresh: iou threshold to consider a candidate is a correct region :param prop_size: selective search parameter :param prop_scale: selective search parameter, larger prop_scale favorizes large boudning boxes :return: list of bounding boxes and ious, boudning boxes are tuples (left, top, width, height)
[ "Uses", "selective", "search", "to", "generate", "candidate", "bounding", "boxes", "and", "keeps", "the", "ones", "that", "have", "the", "largest", "iou", "with", "the", "predicted", "mask", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/visual/visual_utils.py#L74-L141
4,017
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
_print_dict
def _print_dict(elem_dict): """ Print a dict in a readable way """ for key, value in sorted(elem_dict.iteritems()): if isinstance(value, collections.Iterable): print(key, len(value)) else: print(key, value)
python
def _print_dict(elem_dict): """ Print a dict in a readable way """ for key, value in sorted(elem_dict.iteritems()): if isinstance(value, collections.Iterable): print(key, len(value)) else: print(key, value)
[ "def", "_print_dict", "(", "elem_dict", ")", ":", "for", "key", ",", "value", "in", "sorted", "(", "elem_dict", ".", "iteritems", "(", ")", ")", ":", "if", "isinstance", "(", "value", ",", "collections", ".", "Iterable", ")", ":", "print", "(", "key", ",", "len", "(", "value", ")", ")", "else", ":", "print", "(", "key", ",", "value", ")" ]
Print a dict in a readable way
[ "Print", "a", "dict", "in", "a", "readable", "way" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L221-L229
4,018
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
_font_of_mention
def _font_of_mention(m): """ Returns the font type and size of the first alphanumeric char in the text or None if there isn't any. """ for ch in m: if isinstance(ch, LTChar) and ch.get_text().isalnum(): return (ch.fontname, _font_size_of(ch)) return (None, 0)
python
def _font_of_mention(m): """ Returns the font type and size of the first alphanumeric char in the text or None if there isn't any. """ for ch in m: if isinstance(ch, LTChar) and ch.get_text().isalnum(): return (ch.fontname, _font_size_of(ch)) return (None, 0)
[ "def", "_font_of_mention", "(", "m", ")", ":", "for", "ch", "in", "m", ":", "if", "isinstance", "(", "ch", ",", "LTChar", ")", "and", "ch", ".", "get_text", "(", ")", ".", "isalnum", "(", ")", ":", "return", "(", "ch", ".", "fontname", ",", "_font_size_of", "(", "ch", ")", ")", "return", "(", "None", ",", "0", ")" ]
Returns the font type and size of the first alphanumeric char in the text or None if there isn't any.
[ "Returns", "the", "font", "type", "and", "size", "of", "the", "first", "alphanumeric", "char", "in", "the", "text", "or", "None", "if", "there", "isn", "t", "any", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L238-L246
4,019
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
_allowed_char
def _allowed_char(c): """ Returns whether the given unicode char is allowed in output """ c = ord(c) if c < 0: return False if c < 128: return _ascii_allowed[c] # Genereally allow unicodes, TODO: check for unicode control characters # characters return True
python
def _allowed_char(c): """ Returns whether the given unicode char is allowed in output """ c = ord(c) if c < 0: return False if c < 128: return _ascii_allowed[c] # Genereally allow unicodes, TODO: check for unicode control characters # characters return True
[ "def", "_allowed_char", "(", "c", ")", ":", "c", "=", "ord", "(", "c", ")", "if", "c", "<", "0", ":", "return", "False", "if", "c", "<", "128", ":", "return", "_ascii_allowed", "[", "c", "]", "# Genereally allow unicodes, TODO: check for unicode control characters", "# characters", "return", "True" ]
Returns whether the given unicode char is allowed in output
[ "Returns", "whether", "the", "given", "unicode", "char", "is", "allowed", "in", "output" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L258-L269
4,020
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
keep_allowed_chars
def keep_allowed_chars(text): """ Cleans the text for output """ # print ','.join(str(ord(c)) for c in text) return "".join(" " if c == "\n" else c for c in text.strip() if _allowed_char(c))
python
def keep_allowed_chars(text): """ Cleans the text for output """ # print ','.join(str(ord(c)) for c in text) return "".join(" " if c == "\n" else c for c in text.strip() if _allowed_char(c))
[ "def", "keep_allowed_chars", "(", "text", ")", ":", "# print ','.join(str(ord(c)) for c in text)", "return", "\"\"", ".", "join", "(", "\" \"", "if", "c", "==", "\"\\n\"", "else", "c", "for", "c", "in", "text", ".", "strip", "(", ")", "if", "_allowed_char", "(", "c", ")", ")" ]
Cleans the text for output
[ "Cleans", "the", "text", "for", "output" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L272-L277
4,021
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
CustomPDFPageAggregator.paint_path
def paint_path(self, gstate, stroke, fill, evenodd, path): """ Converting long paths to small segments each time we m=Move or h=ClosePath for polygon """ shape = "".join(x[0] for x in path) prev_split = 0 for i in range(len(shape)): if shape[i] == "m" and prev_split != i: self.paint_single_path( gstate, stroke, fill, evenodd, path[prev_split:i] ) prev_split = i if shape[i] == "h": self.paint_single_path( gstate, stroke, fill, evenodd, path[prev_split : i + 1] ) prev_split = i + 1 # clean up remaining segments if prev_split < len(shape): self.paint_single_path(gstate, stroke, fill, evenodd, path[prev_split:])
python
def paint_path(self, gstate, stroke, fill, evenodd, path): """ Converting long paths to small segments each time we m=Move or h=ClosePath for polygon """ shape = "".join(x[0] for x in path) prev_split = 0 for i in range(len(shape)): if shape[i] == "m" and prev_split != i: self.paint_single_path( gstate, stroke, fill, evenodd, path[prev_split:i] ) prev_split = i if shape[i] == "h": self.paint_single_path( gstate, stroke, fill, evenodd, path[prev_split : i + 1] ) prev_split = i + 1 # clean up remaining segments if prev_split < len(shape): self.paint_single_path(gstate, stroke, fill, evenodd, path[prev_split:])
[ "def", "paint_path", "(", "self", ",", "gstate", ",", "stroke", ",", "fill", ",", "evenodd", ",", "path", ")", ":", "shape", "=", "\"\"", ".", "join", "(", "x", "[", "0", "]", "for", "x", "in", "path", ")", "prev_split", "=", "0", "for", "i", "in", "range", "(", "len", "(", "shape", ")", ")", ":", "if", "shape", "[", "i", "]", "==", "\"m\"", "and", "prev_split", "!=", "i", ":", "self", ".", "paint_single_path", "(", "gstate", ",", "stroke", ",", "fill", ",", "evenodd", ",", "path", "[", "prev_split", ":", "i", "]", ")", "prev_split", "=", "i", "if", "shape", "[", "i", "]", "==", "\"h\"", ":", "self", ".", "paint_single_path", "(", "gstate", ",", "stroke", ",", "fill", ",", "evenodd", ",", "path", "[", "prev_split", ":", "i", "+", "1", "]", ")", "prev_split", "=", "i", "+", "1", "# clean up remaining segments", "if", "prev_split", "<", "len", "(", "shape", ")", ":", "self", ".", "paint_single_path", "(", "gstate", ",", "stroke", ",", "fill", ",", "evenodd", ",", "path", "[", "prev_split", ":", "]", ")" ]
Converting long paths to small segments each time we m=Move or h=ClosePath for polygon
[ "Converting", "long", "paths", "to", "small", "segments", "each", "time", "we", "m", "=", "Move", "or", "h", "=", "ClosePath", "for", "polygon" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L53-L74
4,022
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_utils.py
CustomPDFPageAggregator.paint_single_path
def paint_single_path(self, gstate, stroke, fill, evenodd, path): """ Converting a single path draw command into lines and curves objects """ if len(path) < 2: return shape = "".join(x[0] for x in path) pts = [] for p in path: for i in range(1, len(p), 2): pts.append(apply_matrix_pt(self.ctm, (p[i], p[i + 1]))) # Line mode if self.line_only_shape.match(shape): # check for sloped lines first has_slope = False for i in range(len(pts) - 1): if pts[i][0] != pts[i + 1][0] and pts[i][1] != pts[i + 1][1]: has_slope = True break if not has_slope: for i in range(len(pts) - 1): self.cur_item.add(LTLine(gstate.linewidth, pts[i], pts[i + 1])) # Adding the closing line for a polygon, especially rectangles if shape.endswith("h"): self.cur_item.add(LTLine(gstate.linewidth, pts[0], pts[-1])) return # Add the curve as an arbitrary polyline (belzier curve info is lost here) self.cur_item.add(LTCurve(gstate.linewidth, pts))
python
def paint_single_path(self, gstate, stroke, fill, evenodd, path): """ Converting a single path draw command into lines and curves objects """ if len(path) < 2: return shape = "".join(x[0] for x in path) pts = [] for p in path: for i in range(1, len(p), 2): pts.append(apply_matrix_pt(self.ctm, (p[i], p[i + 1]))) # Line mode if self.line_only_shape.match(shape): # check for sloped lines first has_slope = False for i in range(len(pts) - 1): if pts[i][0] != pts[i + 1][0] and pts[i][1] != pts[i + 1][1]: has_slope = True break if not has_slope: for i in range(len(pts) - 1): self.cur_item.add(LTLine(gstate.linewidth, pts[i], pts[i + 1])) # Adding the closing line for a polygon, especially rectangles if shape.endswith("h"): self.cur_item.add(LTLine(gstate.linewidth, pts[0], pts[-1])) return # Add the curve as an arbitrary polyline (belzier curve info is lost here) self.cur_item.add(LTCurve(gstate.linewidth, pts))
[ "def", "paint_single_path", "(", "self", ",", "gstate", ",", "stroke", ",", "fill", ",", "evenodd", ",", "path", ")", ":", "if", "len", "(", "path", ")", "<", "2", ":", "return", "shape", "=", "\"\"", ".", "join", "(", "x", "[", "0", "]", "for", "x", "in", "path", ")", "pts", "=", "[", "]", "for", "p", "in", "path", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "p", ")", ",", "2", ")", ":", "pts", ".", "append", "(", "apply_matrix_pt", "(", "self", ".", "ctm", ",", "(", "p", "[", "i", "]", ",", "p", "[", "i", "+", "1", "]", ")", ")", ")", "# Line mode", "if", "self", ".", "line_only_shape", ".", "match", "(", "shape", ")", ":", "# check for sloped lines first", "has_slope", "=", "False", "for", "i", "in", "range", "(", "len", "(", "pts", ")", "-", "1", ")", ":", "if", "pts", "[", "i", "]", "[", "0", "]", "!=", "pts", "[", "i", "+", "1", "]", "[", "0", "]", "and", "pts", "[", "i", "]", "[", "1", "]", "!=", "pts", "[", "i", "+", "1", "]", "[", "1", "]", ":", "has_slope", "=", "True", "break", "if", "not", "has_slope", ":", "for", "i", "in", "range", "(", "len", "(", "pts", ")", "-", "1", ")", ":", "self", ".", "cur_item", ".", "add", "(", "LTLine", "(", "gstate", ".", "linewidth", ",", "pts", "[", "i", "]", ",", "pts", "[", "i", "+", "1", "]", ")", ")", "# Adding the closing line for a polygon, especially rectangles", "if", "shape", ".", "endswith", "(", "\"h\"", ")", ":", "self", ".", "cur_item", ".", "add", "(", "LTLine", "(", "gstate", ".", "linewidth", ",", "pts", "[", "0", "]", ",", "pts", "[", "-", "1", "]", ")", ")", "return", "# Add the curve as an arbitrary polyline (belzier curve info is lost here)", "self", ".", "cur_item", ".", "add", "(", "LTCurve", "(", "gstate", ".", "linewidth", ",", "pts", ")", ")" ]
Converting a single path draw command into lines and curves objects
[ "Converting", "a", "single", "path", "draw", "command", "into", "lines", "and", "curves", "objects" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_utils.py#L76-L107
4,023
HazyResearch/pdftotree
pdftotree/utils/pdf/layout_utils.py
traverse_layout
def traverse_layout(root, callback): """ Tree walker and invokes the callback as it traverse pdf object tree """ callback(root) if isinstance(root, collections.Iterable): for child in root: traverse_layout(child, callback)
python
def traverse_layout(root, callback): """ Tree walker and invokes the callback as it traverse pdf object tree """ callback(root) if isinstance(root, collections.Iterable): for child in root: traverse_layout(child, callback)
[ "def", "traverse_layout", "(", "root", ",", "callback", ")", ":", "callback", "(", "root", ")", "if", "isinstance", "(", "root", ",", "collections", ".", "Iterable", ")", ":", "for", "child", "in", "root", ":", "traverse_layout", "(", "child", ",", "callback", ")" ]
Tree walker and invokes the callback as it traverse pdf object tree
[ "Tree", "walker", "and", "invokes", "the", "callback", "as", "it", "traverse", "pdf", "object", "tree" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/layout_utils.py#L17-L25
4,024
HazyResearch/pdftotree
pdftotree/utils/pdf/layout_utils.py
get_near_items
def get_near_items(tree, tree_key): """ Check both possible neighbors for key in a binary tree """ try: yield tree.floor_item(tree_key) except KeyError: pass try: yield tree.ceiling_item(tree_key) except KeyError: pass
python
def get_near_items(tree, tree_key): """ Check both possible neighbors for key in a binary tree """ try: yield tree.floor_item(tree_key) except KeyError: pass try: yield tree.ceiling_item(tree_key) except KeyError: pass
[ "def", "get_near_items", "(", "tree", ",", "tree_key", ")", ":", "try", ":", "yield", "tree", ".", "floor_item", "(", "tree_key", ")", "except", "KeyError", ":", "pass", "try", ":", "yield", "tree", ".", "ceiling_item", "(", "tree_key", ")", "except", "KeyError", ":", "pass" ]
Check both possible neighbors for key in a binary tree
[ "Check", "both", "possible", "neighbors", "for", "key", "in", "a", "binary", "tree" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/layout_utils.py#L28-L40
4,025
HazyResearch/pdftotree
pdftotree/utils/pdf/layout_utils.py
align_add
def align_add(tree, key, item, align_thres=2.0): """ Adding the item object to a binary tree with the given key while allow for small key differences close_enough_func that checks if two keys are within threshold """ for near_key, near_list in get_near_items(tree, key): if abs(key - near_key) < align_thres: near_list.append(item) return # Create a new group if no items are close tree[key] = [item]
python
def align_add(tree, key, item, align_thres=2.0): """ Adding the item object to a binary tree with the given key while allow for small key differences close_enough_func that checks if two keys are within threshold """ for near_key, near_list in get_near_items(tree, key): if abs(key - near_key) < align_thres: near_list.append(item) return # Create a new group if no items are close tree[key] = [item]
[ "def", "align_add", "(", "tree", ",", "key", ",", "item", ",", "align_thres", "=", "2.0", ")", ":", "for", "near_key", ",", "near_list", "in", "get_near_items", "(", "tree", ",", "key", ")", ":", "if", "abs", "(", "key", "-", "near_key", ")", "<", "align_thres", ":", "near_list", ".", "append", "(", "item", ")", "return", "# Create a new group if no items are close", "tree", "[", "key", "]", "=", "[", "item", "]" ]
Adding the item object to a binary tree with the given key while allow for small key differences close_enough_func that checks if two keys are within threshold
[ "Adding", "the", "item", "object", "to", "a", "binary", "tree", "with", "the", "given", "key", "while", "allow", "for", "small", "key", "differences", "close_enough_func", "that", "checks", "if", "two", "keys", "are", "within", "threshold" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/layout_utils.py#L43-L55
4,026
HazyResearch/pdftotree
pdftotree/utils/pdf/layout_utils.py
collect_table_content
def collect_table_content(table_bboxes, elems): """ Returns a list of elements that are contained inside the corresponding supplied bbox. """ # list of table content chars table_contents = [[] for _ in range(len(table_bboxes))] prev_content = None prev_bbox = None for cid, c in enumerate(elems): # Annotations should not fall outside alone if isinstance(c, LTAnno): if prev_content is not None: prev_content.append(c) continue # Generally speaking table contents should be included sequentially # and we can avoid checking all tables for elems inside # Elements only need to intersect the bbox for table as some # formatting of fonts may result in slightly out of bbox text if prev_bbox is not None and intersect(prev_bbox, c.bbox): prev_content.append(c) continue # Search the rest of the tables for membership when done with # the current one for table_id, table_bbox in enumerate(table_bboxes): if intersect(table_bbox, c.bbox): prev_bbox = table_bbox prev_content = table_contents[table_id] prev_content.append(c) break return table_contents
python
def collect_table_content(table_bboxes, elems): """ Returns a list of elements that are contained inside the corresponding supplied bbox. """ # list of table content chars table_contents = [[] for _ in range(len(table_bboxes))] prev_content = None prev_bbox = None for cid, c in enumerate(elems): # Annotations should not fall outside alone if isinstance(c, LTAnno): if prev_content is not None: prev_content.append(c) continue # Generally speaking table contents should be included sequentially # and we can avoid checking all tables for elems inside # Elements only need to intersect the bbox for table as some # formatting of fonts may result in slightly out of bbox text if prev_bbox is not None and intersect(prev_bbox, c.bbox): prev_content.append(c) continue # Search the rest of the tables for membership when done with # the current one for table_id, table_bbox in enumerate(table_bboxes): if intersect(table_bbox, c.bbox): prev_bbox = table_bbox prev_content = table_contents[table_id] prev_content.append(c) break return table_contents
[ "def", "collect_table_content", "(", "table_bboxes", ",", "elems", ")", ":", "# list of table content chars", "table_contents", "=", "[", "[", "]", "for", "_", "in", "range", "(", "len", "(", "table_bboxes", ")", ")", "]", "prev_content", "=", "None", "prev_bbox", "=", "None", "for", "cid", ",", "c", "in", "enumerate", "(", "elems", ")", ":", "# Annotations should not fall outside alone", "if", "isinstance", "(", "c", ",", "LTAnno", ")", ":", "if", "prev_content", "is", "not", "None", ":", "prev_content", ".", "append", "(", "c", ")", "continue", "# Generally speaking table contents should be included sequentially", "# and we can avoid checking all tables for elems inside", "# Elements only need to intersect the bbox for table as some", "# formatting of fonts may result in slightly out of bbox text", "if", "prev_bbox", "is", "not", "None", "and", "intersect", "(", "prev_bbox", ",", "c", ".", "bbox", ")", ":", "prev_content", ".", "append", "(", "c", ")", "continue", "# Search the rest of the tables for membership when done with", "# the current one", "for", "table_id", ",", "table_bbox", "in", "enumerate", "(", "table_bboxes", ")", ":", "if", "intersect", "(", "table_bbox", ",", "c", ".", "bbox", ")", ":", "prev_bbox", "=", "table_bbox", "prev_content", "=", "table_contents", "[", "table_id", "]", "prev_content", ".", "append", "(", "c", ")", "break", "return", "table_contents" ]
Returns a list of elements that are contained inside the corresponding supplied bbox.
[ "Returns", "a", "list", "of", "elements", "that", "are", "contained", "inside", "the", "corresponding", "supplied", "bbox", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/layout_utils.py#L120-L150
4,027
HazyResearch/pdftotree
pdftotree/utils/pdf/layout_utils.py
project_onto
def project_onto(objs, axis, min_gap_size=4.0): """ Projects object bboxes onto the axis and return the unioned intervals and groups of objects in intervals. """ if axis == "x": axis = 0 if axis == "y": axis = 1 axis_end = axis + 2 if axis == 0: # if projecting onto X axis objs.sort(key=lambda o: o.x0) else: objs.sort(key=lambda o: o.y0) intervals = [] groups = [] start_i = 0 start = objs[0].bbox[axis] end = objs[0].bbox[axis_end] # Use _inf_bbox to trigger the last interval divide for o_i, o in enumerate(chain(objs, [_inf_bbox])): # Get current interval o_start = o.bbox[axis] o_end = o.bbox[axis_end] # start new interval when gap with previous end is big if o_start > end + min_gap_size: # Append new interval coordinates for children intervals.append((start, end)) # Append child object group on page groups.append(objs[start_i:o_i]) # Mark next obj list range start_i = o_i start = o_start # Always check to extend current interval to new end if o_end > end: end = o_end # else do nothing return intervals, groups
python
def project_onto(objs, axis, min_gap_size=4.0): """ Projects object bboxes onto the axis and return the unioned intervals and groups of objects in intervals. """ if axis == "x": axis = 0 if axis == "y": axis = 1 axis_end = axis + 2 if axis == 0: # if projecting onto X axis objs.sort(key=lambda o: o.x0) else: objs.sort(key=lambda o: o.y0) intervals = [] groups = [] start_i = 0 start = objs[0].bbox[axis] end = objs[0].bbox[axis_end] # Use _inf_bbox to trigger the last interval divide for o_i, o in enumerate(chain(objs, [_inf_bbox])): # Get current interval o_start = o.bbox[axis] o_end = o.bbox[axis_end] # start new interval when gap with previous end is big if o_start > end + min_gap_size: # Append new interval coordinates for children intervals.append((start, end)) # Append child object group on page groups.append(objs[start_i:o_i]) # Mark next obj list range start_i = o_i start = o_start # Always check to extend current interval to new end if o_end > end: end = o_end # else do nothing return intervals, groups
[ "def", "project_onto", "(", "objs", ",", "axis", ",", "min_gap_size", "=", "4.0", ")", ":", "if", "axis", "==", "\"x\"", ":", "axis", "=", "0", "if", "axis", "==", "\"y\"", ":", "axis", "=", "1", "axis_end", "=", "axis", "+", "2", "if", "axis", "==", "0", ":", "# if projecting onto X axis", "objs", ".", "sort", "(", "key", "=", "lambda", "o", ":", "o", ".", "x0", ")", "else", ":", "objs", ".", "sort", "(", "key", "=", "lambda", "o", ":", "o", ".", "y0", ")", "intervals", "=", "[", "]", "groups", "=", "[", "]", "start_i", "=", "0", "start", "=", "objs", "[", "0", "]", ".", "bbox", "[", "axis", "]", "end", "=", "objs", "[", "0", "]", ".", "bbox", "[", "axis_end", "]", "# Use _inf_bbox to trigger the last interval divide", "for", "o_i", ",", "o", "in", "enumerate", "(", "chain", "(", "objs", ",", "[", "_inf_bbox", "]", ")", ")", ":", "# Get current interval", "o_start", "=", "o", ".", "bbox", "[", "axis", "]", "o_end", "=", "o", ".", "bbox", "[", "axis_end", "]", "# start new interval when gap with previous end is big", "if", "o_start", ">", "end", "+", "min_gap_size", ":", "# Append new interval coordinates for children", "intervals", ".", "append", "(", "(", "start", ",", "end", ")", ")", "# Append child object group on page", "groups", ".", "append", "(", "objs", "[", "start_i", ":", "o_i", "]", ")", "# Mark next obj list range", "start_i", "=", "o_i", "start", "=", "o_start", "# Always check to extend current interval to new end", "if", "o_end", ">", "end", ":", "end", "=", "o_end", "# else do nothing", "return", "intervals", ",", "groups" ]
Projects object bboxes onto the axis and return the unioned intervals and groups of objects in intervals.
[ "Projects", "object", "bboxes", "onto", "the", "axis", "and", "return", "the", "unioned", "intervals", "and", "groups", "of", "objects", "in", "intervals", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/layout_utils.py#L168-L214
4,028
HazyResearch/pdftotree
pdftotree/utils/pdf/render.py
Renderer.draw_rect
def draw_rect(self, bbox, cell_val): """ Fills the bbox with the content values Float bbox values are normalized to have non-zero area """ new_x0 = int(bbox[x0]) new_y0 = int(bbox[y0]) new_x1 = max(new_x0 + 1, int(bbox[x1])) new_y1 = max(new_y0 + 1, int(bbox[y1])) self.grid[new_x0:new_x1, new_y0:new_y1] = cell_val
python
def draw_rect(self, bbox, cell_val): """ Fills the bbox with the content values Float bbox values are normalized to have non-zero area """ new_x0 = int(bbox[x0]) new_y0 = int(bbox[y0]) new_x1 = max(new_x0 + 1, int(bbox[x1])) new_y1 = max(new_y0 + 1, int(bbox[y1])) self.grid[new_x0:new_x1, new_y0:new_y1] = cell_val
[ "def", "draw_rect", "(", "self", ",", "bbox", ",", "cell_val", ")", ":", "new_x0", "=", "int", "(", "bbox", "[", "x0", "]", ")", "new_y0", "=", "int", "(", "bbox", "[", "y0", "]", ")", "new_x1", "=", "max", "(", "new_x0", "+", "1", ",", "int", "(", "bbox", "[", "x1", "]", ")", ")", "new_y1", "=", "max", "(", "new_y0", "+", "1", ",", "int", "(", "bbox", "[", "y1", "]", ")", ")", "self", ".", "grid", "[", "new_x0", ":", "new_x1", ",", "new_y0", ":", "new_y1", "]", "=", "cell_val" ]
Fills the bbox with the content values Float bbox values are normalized to have non-zero area
[ "Fills", "the", "bbox", "with", "the", "content", "values", "Float", "bbox", "values", "are", "normalized", "to", "have", "non", "-", "zero", "area" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/render.py#L57-L67
4,029
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_parsers.py
parse_layout
def parse_layout(elems, font_stat, combine=False): """ Parses pdf texts into a hypergraph grouped into rows and columns and then output """ boxes_segments = elems.segments boxes_curves = elems.curves boxes_figures = elems.figures page_width = elems.layout.width # page_height = elems.layout.height boxes = elems.mentions avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat) width = get_page_width(boxes + boxes_segments + boxes_figures + boxes_curves) char_width = get_char_width(boxes) grid_size = avg_font_pts / 2.0 for i, m in enumerate(boxes + elems.figures): m.id = i m.feats = defaultdict(bool) prefix = "" if isinstance(m, LTTextLine) and m.font_name: prefix = m.font_name + "-" + str(m.font_size) + "-" m.xc = (m.x0 + m.x1) / 2.0 m.yc = (m.y0 + m.y1) / 2.0 m.feats[prefix + "x0"] = m.x0_grid = m.x0 // grid_size m.feats[prefix + "x1"] = m.x1_grid = m.x1 // grid_size m.feats[prefix + "xc"] = m.xc_grid = m.xc // grid_size m.feats[prefix + "yc"] = m.yc_grid = m.yc // grid_size tbls, tbl_features = cluster_vertically_aligned_boxes( boxes, elems.layout.bbox, avg_font_pts, width, char_width, boxes_segments, boxes_curves, boxes_figures, page_width, combine, ) return tbls, tbl_features
python
def parse_layout(elems, font_stat, combine=False): """ Parses pdf texts into a hypergraph grouped into rows and columns and then output """ boxes_segments = elems.segments boxes_curves = elems.curves boxes_figures = elems.figures page_width = elems.layout.width # page_height = elems.layout.height boxes = elems.mentions avg_font_pts = get_most_common_font_pts(elems.mentions, font_stat) width = get_page_width(boxes + boxes_segments + boxes_figures + boxes_curves) char_width = get_char_width(boxes) grid_size = avg_font_pts / 2.0 for i, m in enumerate(boxes + elems.figures): m.id = i m.feats = defaultdict(bool) prefix = "" if isinstance(m, LTTextLine) and m.font_name: prefix = m.font_name + "-" + str(m.font_size) + "-" m.xc = (m.x0 + m.x1) / 2.0 m.yc = (m.y0 + m.y1) / 2.0 m.feats[prefix + "x0"] = m.x0_grid = m.x0 // grid_size m.feats[prefix + "x1"] = m.x1_grid = m.x1 // grid_size m.feats[prefix + "xc"] = m.xc_grid = m.xc // grid_size m.feats[prefix + "yc"] = m.yc_grid = m.yc // grid_size tbls, tbl_features = cluster_vertically_aligned_boxes( boxes, elems.layout.bbox, avg_font_pts, width, char_width, boxes_segments, boxes_curves, boxes_figures, page_width, combine, ) return tbls, tbl_features
[ "def", "parse_layout", "(", "elems", ",", "font_stat", ",", "combine", "=", "False", ")", ":", "boxes_segments", "=", "elems", ".", "segments", "boxes_curves", "=", "elems", ".", "curves", "boxes_figures", "=", "elems", ".", "figures", "page_width", "=", "elems", ".", "layout", ".", "width", "# page_height = elems.layout.height", "boxes", "=", "elems", ".", "mentions", "avg_font_pts", "=", "get_most_common_font_pts", "(", "elems", ".", "mentions", ",", "font_stat", ")", "width", "=", "get_page_width", "(", "boxes", "+", "boxes_segments", "+", "boxes_figures", "+", "boxes_curves", ")", "char_width", "=", "get_char_width", "(", "boxes", ")", "grid_size", "=", "avg_font_pts", "/", "2.0", "for", "i", ",", "m", "in", "enumerate", "(", "boxes", "+", "elems", ".", "figures", ")", ":", "m", ".", "id", "=", "i", "m", ".", "feats", "=", "defaultdict", "(", "bool", ")", "prefix", "=", "\"\"", "if", "isinstance", "(", "m", ",", "LTTextLine", ")", "and", "m", ".", "font_name", ":", "prefix", "=", "m", ".", "font_name", "+", "\"-\"", "+", "str", "(", "m", ".", "font_size", ")", "+", "\"-\"", "m", ".", "xc", "=", "(", "m", ".", "x0", "+", "m", ".", "x1", ")", "/", "2.0", "m", ".", "yc", "=", "(", "m", ".", "y0", "+", "m", ".", "y1", ")", "/", "2.0", "m", ".", "feats", "[", "prefix", "+", "\"x0\"", "]", "=", "m", ".", "x0_grid", "=", "m", ".", "x0", "//", "grid_size", "m", ".", "feats", "[", "prefix", "+", "\"x1\"", "]", "=", "m", ".", "x1_grid", "=", "m", ".", "x1", "//", "grid_size", "m", ".", "feats", "[", "prefix", "+", "\"xc\"", "]", "=", "m", ".", "xc_grid", "=", "m", ".", "xc", "//", "grid_size", "m", ".", "feats", "[", "prefix", "+", "\"yc\"", "]", "=", "m", ".", "yc_grid", "=", "m", ".", "yc", "//", "grid_size", "tbls", ",", "tbl_features", "=", "cluster_vertically_aligned_boxes", "(", "boxes", ",", "elems", ".", "layout", ".", "bbox", ",", "avg_font_pts", ",", "width", ",", "char_width", ",", "boxes_segments", ",", "boxes_curves", ",", "boxes_figures", ",", "page_width", ",", "combine", ",", ")", "return", "tbls", ",", "tbl_features" ]
Parses pdf texts into a hypergraph grouped into rows and columns and then output
[ "Parses", "pdf", "texts", "into", "a", "hypergraph", "grouped", "into", "rows", "and", "columns", "and", "then", "output" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_parsers.py#L23-L63
4,030
HazyResearch/pdftotree
pdftotree/utils/pdf/pdf_parsers.py
merge_nodes
def merge_nodes(nodes, plane, page_stat, merge_indices): """ Merges overlapping nodes """ # Merge inner boxes to the best outer box # nodes.sort(key=Node.area) to_be_removed = set() for inner_idx in range(len(nodes)): inner = nodes[inner_idx] outers = [] outers_indices = [] for outer_idx in range(len(nodes)): outer = nodes[outer_idx] if outer is inner or outer in to_be_removed: continue if intersect(outer.bbox, inner.bbox): outers.append(outer) outers_indices.append(outer_idx) if not outers: continue # Best is defined as min L1 distance to outer center best_outer = min( outers, key=lambda outer: l1(center(outer.bbox), center(inner.bbox)) ) best_outer_idx = outers_indices[outers.index(best_outer)] to_be_removed.add(inner) best_outer.merge(inner) for cid_iter in range(len(merge_indices)): if merge_indices[cid_iter] == merge_indices[inner_idx]: merge_indices[cid_iter] = merge_indices[best_outer_idx] return nodes, merge_indices
python
def merge_nodes(nodes, plane, page_stat, merge_indices): """ Merges overlapping nodes """ # Merge inner boxes to the best outer box # nodes.sort(key=Node.area) to_be_removed = set() for inner_idx in range(len(nodes)): inner = nodes[inner_idx] outers = [] outers_indices = [] for outer_idx in range(len(nodes)): outer = nodes[outer_idx] if outer is inner or outer in to_be_removed: continue if intersect(outer.bbox, inner.bbox): outers.append(outer) outers_indices.append(outer_idx) if not outers: continue # Best is defined as min L1 distance to outer center best_outer = min( outers, key=lambda outer: l1(center(outer.bbox), center(inner.bbox)) ) best_outer_idx = outers_indices[outers.index(best_outer)] to_be_removed.add(inner) best_outer.merge(inner) for cid_iter in range(len(merge_indices)): if merge_indices[cid_iter] == merge_indices[inner_idx]: merge_indices[cid_iter] = merge_indices[best_outer_idx] return nodes, merge_indices
[ "def", "merge_nodes", "(", "nodes", ",", "plane", ",", "page_stat", ",", "merge_indices", ")", ":", "# Merge inner boxes to the best outer box", "# nodes.sort(key=Node.area)", "to_be_removed", "=", "set", "(", ")", "for", "inner_idx", "in", "range", "(", "len", "(", "nodes", ")", ")", ":", "inner", "=", "nodes", "[", "inner_idx", "]", "outers", "=", "[", "]", "outers_indices", "=", "[", "]", "for", "outer_idx", "in", "range", "(", "len", "(", "nodes", ")", ")", ":", "outer", "=", "nodes", "[", "outer_idx", "]", "if", "outer", "is", "inner", "or", "outer", "in", "to_be_removed", ":", "continue", "if", "intersect", "(", "outer", ".", "bbox", ",", "inner", ".", "bbox", ")", ":", "outers", ".", "append", "(", "outer", ")", "outers_indices", ".", "append", "(", "outer_idx", ")", "if", "not", "outers", ":", "continue", "# Best is defined as min L1 distance to outer center", "best_outer", "=", "min", "(", "outers", ",", "key", "=", "lambda", "outer", ":", "l1", "(", "center", "(", "outer", ".", "bbox", ")", ",", "center", "(", "inner", ".", "bbox", ")", ")", ")", "best_outer_idx", "=", "outers_indices", "[", "outers", ".", "index", "(", "best_outer", ")", "]", "to_be_removed", ".", "add", "(", "inner", ")", "best_outer", ".", "merge", "(", "inner", ")", "for", "cid_iter", "in", "range", "(", "len", "(", "merge_indices", ")", ")", ":", "if", "merge_indices", "[", "cid_iter", "]", "==", "merge_indices", "[", "inner_idx", "]", ":", "merge_indices", "[", "cid_iter", "]", "=", "merge_indices", "[", "best_outer_idx", "]", "return", "nodes", ",", "merge_indices" ]
Merges overlapping nodes
[ "Merges", "overlapping", "nodes" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/pdf_parsers.py#L1266-L1296
4,031
HazyResearch/pdftotree
pdftotree/utils/pdf/node.py
_get_cols
def _get_cols(row_content): """ Counting the number columns based on the content of this row """ cols = [] subcell_col = [] prev_bar = None for _coord, item in row_content: if isinstance(item, LTTextLine): subcell_col.append(item) else: # bar, add column content # When there is no content, we count a None column if prev_bar: bar_ranges = (prev_bar, item) col_items = subcell_col if subcell_col else [None] cols.extend([bar_ranges, col_items]) prev_bar = item subcell_col = [] # Remove extra column before first bar return cols
python
def _get_cols(row_content): """ Counting the number columns based on the content of this row """ cols = [] subcell_col = [] prev_bar = None for _coord, item in row_content: if isinstance(item, LTTextLine): subcell_col.append(item) else: # bar, add column content # When there is no content, we count a None column if prev_bar: bar_ranges = (prev_bar, item) col_items = subcell_col if subcell_col else [None] cols.extend([bar_ranges, col_items]) prev_bar = item subcell_col = [] # Remove extra column before first bar return cols
[ "def", "_get_cols", "(", "row_content", ")", ":", "cols", "=", "[", "]", "subcell_col", "=", "[", "]", "prev_bar", "=", "None", "for", "_coord", ",", "item", "in", "row_content", ":", "if", "isinstance", "(", "item", ",", "LTTextLine", ")", ":", "subcell_col", ".", "append", "(", "item", ")", "else", ":", "# bar, add column content", "# When there is no content, we count a None column", "if", "prev_bar", ":", "bar_ranges", "=", "(", "prev_bar", ",", "item", ")", "col_items", "=", "subcell_col", "if", "subcell_col", "else", "[", "None", "]", "cols", ".", "extend", "(", "[", "bar_ranges", ",", "col_items", "]", ")", "prev_bar", "=", "item", "subcell_col", "=", "[", "]", "# Remove extra column before first bar", "return", "cols" ]
Counting the number columns based on the content of this row
[ "Counting", "the", "number", "columns", "based", "on", "the", "content", "of", "this", "row" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/node.py#L187-L206
4,032
HazyResearch/pdftotree
pdftotree/utils/pdf/node.py
_one_contains_other
def _one_contains_other(s1, s2): """ Whether one set contains the other """ return min(len(s1), len(s2)) == len(s1 & s2)
python
def _one_contains_other(s1, s2): """ Whether one set contains the other """ return min(len(s1), len(s2)) == len(s1 & s2)
[ "def", "_one_contains_other", "(", "s1", ",", "s2", ")", ":", "return", "min", "(", "len", "(", "s1", ")", ",", "len", "(", "s2", ")", ")", "==", "len", "(", "s1", "&", "s2", ")" ]
Whether one set contains the other
[ "Whether", "one", "set", "contains", "the", "other" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/node.py#L241-L245
4,033
HazyResearch/pdftotree
pdftotree/utils/pdf/node.py
Node.is_table
def is_table(self): """ Count the node's number of mention al ignment in both axes to determine if the node is a table. """ if self.type_counts["text"] < 6 or "figure" in self.type_counts: return False for e in self.elems: # Characters written as curve are usually small, discard diagrams here if elem_type(e) == "curve" and e.height * e.width > 100: return False # import re # space_re = '\\s+' # ws_arr = [] # whitespace_aligned = False # for elem in self.elems: # elem_ws = [] # for m in re.finditer(space_re, elem.get_text()): # elem_ws.append(m.start()) # # print elem, elem_ws # if(len(elem_ws)>0): # ws_arr.append(elem_ws) # # print ws_arr # if(len(ws_arr)>0): # count_arr = max([ws_arr.count(i) for i in ws_arr]) # if(float(count_arr)/len(ws_arr) > 0.75): # return True if ( self.sum_elem_bbox / (self.height * self.width) ) > self.table_area_threshold: return False has_many_x_align = False has_many_y_align = False for k, v in six.iteritems(self.feat_counts): font_key = k[0] if ( v >= 2 and "-" in font_key ): # Text row or column with more than 2 elements if font_key[-2] == "x": has_many_x_align = True if font_key[-2] == "y": has_many_y_align = True return has_many_x_align and has_many_y_align
python
def is_table(self): """ Count the node's number of mention al ignment in both axes to determine if the node is a table. """ if self.type_counts["text"] < 6 or "figure" in self.type_counts: return False for e in self.elems: # Characters written as curve are usually small, discard diagrams here if elem_type(e) == "curve" and e.height * e.width > 100: return False # import re # space_re = '\\s+' # ws_arr = [] # whitespace_aligned = False # for elem in self.elems: # elem_ws = [] # for m in re.finditer(space_re, elem.get_text()): # elem_ws.append(m.start()) # # print elem, elem_ws # if(len(elem_ws)>0): # ws_arr.append(elem_ws) # # print ws_arr # if(len(ws_arr)>0): # count_arr = max([ws_arr.count(i) for i in ws_arr]) # if(float(count_arr)/len(ws_arr) > 0.75): # return True if ( self.sum_elem_bbox / (self.height * self.width) ) > self.table_area_threshold: return False has_many_x_align = False has_many_y_align = False for k, v in six.iteritems(self.feat_counts): font_key = k[0] if ( v >= 2 and "-" in font_key ): # Text row or column with more than 2 elements if font_key[-2] == "x": has_many_x_align = True if font_key[-2] == "y": has_many_y_align = True return has_many_x_align and has_many_y_align
[ "def", "is_table", "(", "self", ")", ":", "if", "self", ".", "type_counts", "[", "\"text\"", "]", "<", "6", "or", "\"figure\"", "in", "self", ".", "type_counts", ":", "return", "False", "for", "e", "in", "self", ".", "elems", ":", "# Characters written as curve are usually small, discard diagrams here", "if", "elem_type", "(", "e", ")", "==", "\"curve\"", "and", "e", ".", "height", "*", "e", ".", "width", ">", "100", ":", "return", "False", "# import re", "# space_re = '\\\\s+'", "# ws_arr = []", "# whitespace_aligned = False", "# for elem in self.elems:", "# elem_ws = []", "# for m in re.finditer(space_re, elem.get_text()):", "# elem_ws.append(m.start())", "# # print elem, elem_ws", "# if(len(elem_ws)>0):", "# ws_arr.append(elem_ws)", "# # print ws_arr", "# if(len(ws_arr)>0):", "# count_arr = max([ws_arr.count(i) for i in ws_arr])", "# if(float(count_arr)/len(ws_arr) > 0.75):", "# return True", "if", "(", "self", ".", "sum_elem_bbox", "/", "(", "self", ".", "height", "*", "self", ".", "width", ")", ")", ">", "self", ".", "table_area_threshold", ":", "return", "False", "has_many_x_align", "=", "False", "has_many_y_align", "=", "False", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "self", ".", "feat_counts", ")", ":", "font_key", "=", "k", "[", "0", "]", "if", "(", "v", ">=", "2", "and", "\"-\"", "in", "font_key", ")", ":", "# Text row or column with more than 2 elements", "if", "font_key", "[", "-", "2", "]", "==", "\"x\"", ":", "has_many_x_align", "=", "True", "if", "font_key", "[", "-", "2", "]", "==", "\"y\"", ":", "has_many_y_align", "=", "True", "return", "has_many_x_align", "and", "has_many_y_align" ]
Count the node's number of mention al ignment in both axes to determine if the node is a table.
[ "Count", "the", "node", "s", "number", "of", "mention", "al", "ignment", "in", "both", "axes", "to", "determine", "if", "the", "node", "is", "a", "table", "." ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/node.py#L73-L115
4,034
HazyResearch/pdftotree
pdftotree/utils/pdf/node.py
Node.get_grid
def get_grid(self): """ Standardize the layout of the table into grids """ mentions, lines = _split_text_n_lines(self.elems) # Sort mentions in reading order where y values are snapped to half # height-sized grid mentions.sort(key=lambda m: (m.yc_grid, m.xc)) grid = Grid(mentions, lines, self) return grid
python
def get_grid(self): """ Standardize the layout of the table into grids """ mentions, lines = _split_text_n_lines(self.elems) # Sort mentions in reading order where y values are snapped to half # height-sized grid mentions.sort(key=lambda m: (m.yc_grid, m.xc)) grid = Grid(mentions, lines, self) return grid
[ "def", "get_grid", "(", "self", ")", ":", "mentions", ",", "lines", "=", "_split_text_n_lines", "(", "self", ".", "elems", ")", "# Sort mentions in reading order where y values are snapped to half", "# height-sized grid", "mentions", ".", "sort", "(", "key", "=", "lambda", "m", ":", "(", "m", ".", "yc_grid", ",", "m", ".", "xc", ")", ")", "grid", "=", "Grid", "(", "mentions", ",", "lines", ",", "self", ")", "return", "grid" ]
Standardize the layout of the table into grids
[ "Standardize", "the", "layout", "of", "the", "table", "into", "grids" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/node.py#L118-L128
4,035
HazyResearch/pdftotree
pdftotree/utils/img_utils.py
lazy_load_font
def lazy_load_font(font_size=default_font_size): """ Lazy loading font according to system platform """ if font_size not in _font_cache: if _platform.startswith("darwin"): font_path = "/Library/Fonts/Arial.ttf" elif _platform.startswith("linux"): font_path = "/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf" elif _platform.startswith("win32"): font_path = "C:\\Windows\\Fonts\\arial.ttf" _font_cache[font_size] = ImageFont.truetype(font_path, font_size) return _font_cache[font_size]
python
def lazy_load_font(font_size=default_font_size): """ Lazy loading font according to system platform """ if font_size not in _font_cache: if _platform.startswith("darwin"): font_path = "/Library/Fonts/Arial.ttf" elif _platform.startswith("linux"): font_path = "/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf" elif _platform.startswith("win32"): font_path = "C:\\Windows\\Fonts\\arial.ttf" _font_cache[font_size] = ImageFont.truetype(font_path, font_size) return _font_cache[font_size]
[ "def", "lazy_load_font", "(", "font_size", "=", "default_font_size", ")", ":", "if", "font_size", "not", "in", "_font_cache", ":", "if", "_platform", ".", "startswith", "(", "\"darwin\"", ")", ":", "font_path", "=", "\"/Library/Fonts/Arial.ttf\"", "elif", "_platform", ".", "startswith", "(", "\"linux\"", ")", ":", "font_path", "=", "\"/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf\"", "elif", "_platform", ".", "startswith", "(", "\"win32\"", ")", ":", "font_path", "=", "\"C:\\\\Windows\\\\Fonts\\\\arial.ttf\"", "_font_cache", "[", "font_size", "]", "=", "ImageFont", ".", "truetype", "(", "font_path", ",", "font_size", ")", "return", "_font_cache", "[", "font_size", "]" ]
Lazy loading font according to system platform
[ "Lazy", "loading", "font", "according", "to", "system", "platform" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/img_utils.py#L24-L36
4,036
HazyResearch/pdftotree
pdftotree/utils/img_utils.py
render_debug_img
def render_debug_img( file_name, page_num, elems, nodes=[], scaler=1, print_segments=False, print_curves=True, print_table_bbox=True, print_text_as_rect=True, ): """ Shows an image rendering of the pdf page along with debugging info printed """ # For debugging show the boolean pixels in black white grayscale height = scaler * int(elems.layout.height) width = scaler * int(elems.layout.width) debug_img, draw = create_img((0, 0, width, height)) font = lazy_load_font() large_font = lazy_load_font(24) if print_curves: for i, c in enumerate(elems.curves): if len(c.pts) > 1: draw.polygon(c.pts, outline=blue) draw.rectangle(c.bbox, fill=blue) # for fig in elems.figures: # draw.rectangle(fig.bbox, fill = blue) for i, m in enumerate(elems.mentions): if isinstance(m, LTAnno): continue if print_text_as_rect: fill = "pink" if hasattr(m, "feats") and m.feats["is_cell"] else green # fill = green draw.rectangle(m.bbox, fill=fill) # draw.text(center(m.bbox), str(i), black, font = font) # Draw id draw.text( m.bbox[:2], m.get_text(), black, font=font ) # Draw mention content else: draw.text(m.bbox[:2], m.get_text(), "black", font=font) if print_segments: # draw skeleton for all segments for i, s in enumerate(elems.segments): draw.line(s.bbox, fill="black") if print_table_bbox: for node in nodes: is_table = node.is_table() color = "red" if is_table else "green" draw.rectangle(node.bbox, outline=color) if is_table: # text = 'Borderless' if node.is_borderless() else 'Bordered' text = "Table" draw.rectangle(node.bbox, outline=color) draw.text(node.bbox[:2], text, red, font=large_font) # Water mark with file name so we can identify among multiple images if file_name and page_num is not None: water_mark = ( file_name + ":page " + str(page_num + 1) + "@%dx%d" % (width, height) ) draw.text((10, 10), water_mark, black, font=font) debug_img.show() return debug_img
python
def render_debug_img( file_name, page_num, elems, nodes=[], scaler=1, print_segments=False, print_curves=True, print_table_bbox=True, print_text_as_rect=True, ): """ Shows an image rendering of the pdf page along with debugging info printed """ # For debugging show the boolean pixels in black white grayscale height = scaler * int(elems.layout.height) width = scaler * int(elems.layout.width) debug_img, draw = create_img((0, 0, width, height)) font = lazy_load_font() large_font = lazy_load_font(24) if print_curves: for i, c in enumerate(elems.curves): if len(c.pts) > 1: draw.polygon(c.pts, outline=blue) draw.rectangle(c.bbox, fill=blue) # for fig in elems.figures: # draw.rectangle(fig.bbox, fill = blue) for i, m in enumerate(elems.mentions): if isinstance(m, LTAnno): continue if print_text_as_rect: fill = "pink" if hasattr(m, "feats") and m.feats["is_cell"] else green # fill = green draw.rectangle(m.bbox, fill=fill) # draw.text(center(m.bbox), str(i), black, font = font) # Draw id draw.text( m.bbox[:2], m.get_text(), black, font=font ) # Draw mention content else: draw.text(m.bbox[:2], m.get_text(), "black", font=font) if print_segments: # draw skeleton for all segments for i, s in enumerate(elems.segments): draw.line(s.bbox, fill="black") if print_table_bbox: for node in nodes: is_table = node.is_table() color = "red" if is_table else "green" draw.rectangle(node.bbox, outline=color) if is_table: # text = 'Borderless' if node.is_borderless() else 'Bordered' text = "Table" draw.rectangle(node.bbox, outline=color) draw.text(node.bbox[:2], text, red, font=large_font) # Water mark with file name so we can identify among multiple images if file_name and page_num is not None: water_mark = ( file_name + ":page " + str(page_num + 1) + "@%dx%d" % (width, height) ) draw.text((10, 10), water_mark, black, font=font) debug_img.show() return debug_img
[ "def", "render_debug_img", "(", "file_name", ",", "page_num", ",", "elems", ",", "nodes", "=", "[", "]", ",", "scaler", "=", "1", ",", "print_segments", "=", "False", ",", "print_curves", "=", "True", ",", "print_table_bbox", "=", "True", ",", "print_text_as_rect", "=", "True", ",", ")", ":", "# For debugging show the boolean pixels in black white grayscale", "height", "=", "scaler", "*", "int", "(", "elems", ".", "layout", ".", "height", ")", "width", "=", "scaler", "*", "int", "(", "elems", ".", "layout", ".", "width", ")", "debug_img", ",", "draw", "=", "create_img", "(", "(", "0", ",", "0", ",", "width", ",", "height", ")", ")", "font", "=", "lazy_load_font", "(", ")", "large_font", "=", "lazy_load_font", "(", "24", ")", "if", "print_curves", ":", "for", "i", ",", "c", "in", "enumerate", "(", "elems", ".", "curves", ")", ":", "if", "len", "(", "c", ".", "pts", ")", ">", "1", ":", "draw", ".", "polygon", "(", "c", ".", "pts", ",", "outline", "=", "blue", ")", "draw", ".", "rectangle", "(", "c", ".", "bbox", ",", "fill", "=", "blue", ")", "# for fig in elems.figures:", "# draw.rectangle(fig.bbox, fill = blue)", "for", "i", ",", "m", "in", "enumerate", "(", "elems", ".", "mentions", ")", ":", "if", "isinstance", "(", "m", ",", "LTAnno", ")", ":", "continue", "if", "print_text_as_rect", ":", "fill", "=", "\"pink\"", "if", "hasattr", "(", "m", ",", "\"feats\"", ")", "and", "m", ".", "feats", "[", "\"is_cell\"", "]", "else", "green", "# fill = green", "draw", ".", "rectangle", "(", "m", ".", "bbox", ",", "fill", "=", "fill", ")", "# draw.text(center(m.bbox), str(i), black, font = font) # Draw id", "draw", ".", "text", "(", "m", ".", "bbox", "[", ":", "2", "]", ",", "m", ".", "get_text", "(", ")", ",", "black", ",", "font", "=", "font", ")", "# Draw mention content", "else", ":", "draw", ".", "text", "(", "m", ".", "bbox", "[", ":", "2", "]", ",", "m", ".", "get_text", "(", ")", ",", "\"black\"", ",", "font", "=", "font", ")", "if", "print_segments", ":", "# draw skeleton for all segments", "for", "i", ",", "s", "in", "enumerate", "(", "elems", ".", "segments", ")", ":", "draw", ".", "line", "(", "s", ".", "bbox", ",", "fill", "=", "\"black\"", ")", "if", "print_table_bbox", ":", "for", "node", "in", "nodes", ":", "is_table", "=", "node", ".", "is_table", "(", ")", "color", "=", "\"red\"", "if", "is_table", "else", "\"green\"", "draw", ".", "rectangle", "(", "node", ".", "bbox", ",", "outline", "=", "color", ")", "if", "is_table", ":", "# text = 'Borderless' if node.is_borderless() else 'Bordered'", "text", "=", "\"Table\"", "draw", ".", "rectangle", "(", "node", ".", "bbox", ",", "outline", "=", "color", ")", "draw", ".", "text", "(", "node", ".", "bbox", "[", ":", "2", "]", ",", "text", ",", "red", ",", "font", "=", "large_font", ")", "# Water mark with file name so we can identify among multiple images", "if", "file_name", "and", "page_num", "is", "not", "None", ":", "water_mark", "=", "(", "file_name", "+", "\":page \"", "+", "str", "(", "page_num", "+", "1", ")", "+", "\"@%dx%d\"", "%", "(", "width", ",", "height", ")", ")", "draw", ".", "text", "(", "(", "10", ",", "10", ")", ",", "water_mark", ",", "black", ",", "font", "=", "font", ")", "debug_img", ".", "show", "(", ")", "return", "debug_img" ]
Shows an image rendering of the pdf page along with debugging info printed
[ "Shows", "an", "image", "rendering", "of", "the", "pdf", "page", "along", "with", "debugging", "info", "printed" ]
5890d668b475d5d3058d1d886aafbfd83268c440
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/img_utils.py#L93-L160
4,037
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
_partition_estimators
def _partition_estimators(n_estimators, n_jobs): """Private function used to partition estimators between jobs.""" # Compute the number of jobs if n_jobs == -1: n_jobs = min(cpu_count(), n_estimators) else: n_jobs = min(n_jobs, n_estimators) # Partition estimators between jobs n_estimators_per_job = (n_estimators // n_jobs) * np.ones(n_jobs, dtype=np.int) n_estimators_per_job[:n_estimators % n_jobs] += 1 starts = np.cumsum(n_estimators_per_job) return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
python
def _partition_estimators(n_estimators, n_jobs): """Private function used to partition estimators between jobs.""" # Compute the number of jobs if n_jobs == -1: n_jobs = min(cpu_count(), n_estimators) else: n_jobs = min(n_jobs, n_estimators) # Partition estimators between jobs n_estimators_per_job = (n_estimators // n_jobs) * np.ones(n_jobs, dtype=np.int) n_estimators_per_job[:n_estimators % n_jobs] += 1 starts = np.cumsum(n_estimators_per_job) return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
[ "def", "_partition_estimators", "(", "n_estimators", ",", "n_jobs", ")", ":", "# Compute the number of jobs", "if", "n_jobs", "==", "-", "1", ":", "n_jobs", "=", "min", "(", "cpu_count", "(", ")", ",", "n_estimators", ")", "else", ":", "n_jobs", "=", "min", "(", "n_jobs", ",", "n_estimators", ")", "# Partition estimators between jobs", "n_estimators_per_job", "=", "(", "n_estimators", "//", "n_jobs", ")", "*", "np", ".", "ones", "(", "n_jobs", ",", "dtype", "=", "np", ".", "int", ")", "n_estimators_per_job", "[", ":", "n_estimators", "%", "n_jobs", "]", "+=", "1", "starts", "=", "np", ".", "cumsum", "(", "n_estimators_per_job", ")", "return", "n_jobs", ",", "n_estimators_per_job", ".", "tolist", "(", ")", ",", "[", "0", "]", "+", "starts", ".", "tolist", "(", ")" ]
Private function used to partition estimators between jobs.
[ "Private", "function", "used", "to", "partition", "estimators", "between", "jobs", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L36-L51
4,038
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
_parallel_build_estimators
def _parallel_build_estimators(n_estimators, ensemble, X, y, cost_mat, seeds, verbose): """Private function used to build a batch of estimators within a job.""" # Retrieve settings n_samples, n_features = X.shape max_samples = ensemble.max_samples max_features = ensemble.max_features if (not isinstance(max_samples, (numbers.Integral, np.integer)) and (0.0 < max_samples <= 1.0)): max_samples = int(max_samples * n_samples) if (not isinstance(max_features, (numbers.Integral, np.integer)) and (0.0 < max_features <= 1.0)): max_features = int(max_features * n_features) bootstrap = ensemble.bootstrap bootstrap_features = ensemble.bootstrap_features # Build estimators estimators = [] estimators_samples = [] estimators_features = [] for i in range(n_estimators): if verbose > 1: print(("building estimator %d of %d" % (i + 1, n_estimators))) random_state = check_random_state(seeds[i]) seed = check_random_state(random_state.randint(MAX_INT)) estimator = ensemble._make_estimator(append=False) try: # Not all estimator accept a random_state estimator.set_params(random_state=seed) except ValueError: pass # Draw features if bootstrap_features: features = random_state.randint(0, n_features, max_features) else: features = sample_without_replacement(n_features, max_features, random_state=random_state) # Draw samples, using a mask, and then fit if bootstrap: indices = random_state.randint(0, n_samples, max_samples) else: indices = sample_without_replacement(n_samples, max_samples, random_state=random_state) sample_counts = np.bincount(indices, minlength=n_samples) estimator.fit((X[indices])[:, features], y[indices], cost_mat[indices, :]) samples = sample_counts > 0. estimators.append(estimator) estimators_samples.append(samples) estimators_features.append(features) return estimators, estimators_samples, estimators_features
python
def _parallel_build_estimators(n_estimators, ensemble, X, y, cost_mat, seeds, verbose): """Private function used to build a batch of estimators within a job.""" # Retrieve settings n_samples, n_features = X.shape max_samples = ensemble.max_samples max_features = ensemble.max_features if (not isinstance(max_samples, (numbers.Integral, np.integer)) and (0.0 < max_samples <= 1.0)): max_samples = int(max_samples * n_samples) if (not isinstance(max_features, (numbers.Integral, np.integer)) and (0.0 < max_features <= 1.0)): max_features = int(max_features * n_features) bootstrap = ensemble.bootstrap bootstrap_features = ensemble.bootstrap_features # Build estimators estimators = [] estimators_samples = [] estimators_features = [] for i in range(n_estimators): if verbose > 1: print(("building estimator %d of %d" % (i + 1, n_estimators))) random_state = check_random_state(seeds[i]) seed = check_random_state(random_state.randint(MAX_INT)) estimator = ensemble._make_estimator(append=False) try: # Not all estimator accept a random_state estimator.set_params(random_state=seed) except ValueError: pass # Draw features if bootstrap_features: features = random_state.randint(0, n_features, max_features) else: features = sample_without_replacement(n_features, max_features, random_state=random_state) # Draw samples, using a mask, and then fit if bootstrap: indices = random_state.randint(0, n_samples, max_samples) else: indices = sample_without_replacement(n_samples, max_samples, random_state=random_state) sample_counts = np.bincount(indices, minlength=n_samples) estimator.fit((X[indices])[:, features], y[indices], cost_mat[indices, :]) samples = sample_counts > 0. estimators.append(estimator) estimators_samples.append(samples) estimators_features.append(features) return estimators, estimators_samples, estimators_features
[ "def", "_parallel_build_estimators", "(", "n_estimators", ",", "ensemble", ",", "X", ",", "y", ",", "cost_mat", ",", "seeds", ",", "verbose", ")", ":", "# Retrieve settings", "n_samples", ",", "n_features", "=", "X", ".", "shape", "max_samples", "=", "ensemble", ".", "max_samples", "max_features", "=", "ensemble", ".", "max_features", "if", "(", "not", "isinstance", "(", "max_samples", ",", "(", "numbers", ".", "Integral", ",", "np", ".", "integer", ")", ")", "and", "(", "0.0", "<", "max_samples", "<=", "1.0", ")", ")", ":", "max_samples", "=", "int", "(", "max_samples", "*", "n_samples", ")", "if", "(", "not", "isinstance", "(", "max_features", ",", "(", "numbers", ".", "Integral", ",", "np", ".", "integer", ")", ")", "and", "(", "0.0", "<", "max_features", "<=", "1.0", ")", ")", ":", "max_features", "=", "int", "(", "max_features", "*", "n_features", ")", "bootstrap", "=", "ensemble", ".", "bootstrap", "bootstrap_features", "=", "ensemble", ".", "bootstrap_features", "# Build estimators", "estimators", "=", "[", "]", "estimators_samples", "=", "[", "]", "estimators_features", "=", "[", "]", "for", "i", "in", "range", "(", "n_estimators", ")", ":", "if", "verbose", ">", "1", ":", "print", "(", "(", "\"building estimator %d of %d\"", "%", "(", "i", "+", "1", ",", "n_estimators", ")", ")", ")", "random_state", "=", "check_random_state", "(", "seeds", "[", "i", "]", ")", "seed", "=", "check_random_state", "(", "random_state", ".", "randint", "(", "MAX_INT", ")", ")", "estimator", "=", "ensemble", ".", "_make_estimator", "(", "append", "=", "False", ")", "try", ":", "# Not all estimator accept a random_state", "estimator", ".", "set_params", "(", "random_state", "=", "seed", ")", "except", "ValueError", ":", "pass", "# Draw features", "if", "bootstrap_features", ":", "features", "=", "random_state", ".", "randint", "(", "0", ",", "n_features", ",", "max_features", ")", "else", ":", "features", "=", "sample_without_replacement", "(", "n_features", ",", "max_features", ",", "random_state", "=", "random_state", ")", "# Draw samples, using a mask, and then fit", "if", "bootstrap", ":", "indices", "=", "random_state", ".", "randint", "(", "0", ",", "n_samples", ",", "max_samples", ")", "else", ":", "indices", "=", "sample_without_replacement", "(", "n_samples", ",", "max_samples", ",", "random_state", "=", "random_state", ")", "sample_counts", "=", "np", ".", "bincount", "(", "indices", ",", "minlength", "=", "n_samples", ")", "estimator", ".", "fit", "(", "(", "X", "[", "indices", "]", ")", "[", ":", ",", "features", "]", ",", "y", "[", "indices", "]", ",", "cost_mat", "[", "indices", ",", ":", "]", ")", "samples", "=", "sample_counts", ">", "0.", "estimators", ".", "append", "(", "estimator", ")", "estimators_samples", ".", "append", "(", "samples", ")", "estimators_features", ".", "append", "(", "features", ")", "return", "estimators", ",", "estimators_samples", ",", "estimators_features" ]
Private function used to build a batch of estimators within a job.
[ "Private", "function", "used", "to", "build", "a", "batch", "of", "estimators", "within", "a", "job", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L54-L116
4,039
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
_parallel_predict
def _parallel_predict(estimators, estimators_features, X, n_classes, combination, estimators_weight): """Private function used to compute predictions within a job.""" n_samples = X.shape[0] pred = np.zeros((n_samples, n_classes)) n_estimators = len(estimators) for estimator, features, weight in zip(estimators, estimators_features, estimators_weight): # Resort to voting predictions = estimator.predict(X[:, features]) for i in range(n_samples): if combination == 'weighted_voting': pred[i, int(predictions[i])] += 1 * weight else: pred[i, int(predictions[i])] += 1 return pred
python
def _parallel_predict(estimators, estimators_features, X, n_classes, combination, estimators_weight): """Private function used to compute predictions within a job.""" n_samples = X.shape[0] pred = np.zeros((n_samples, n_classes)) n_estimators = len(estimators) for estimator, features, weight in zip(estimators, estimators_features, estimators_weight): # Resort to voting predictions = estimator.predict(X[:, features]) for i in range(n_samples): if combination == 'weighted_voting': pred[i, int(predictions[i])] += 1 * weight else: pred[i, int(predictions[i])] += 1 return pred
[ "def", "_parallel_predict", "(", "estimators", ",", "estimators_features", ",", "X", ",", "n_classes", ",", "combination", ",", "estimators_weight", ")", ":", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "pred", "=", "np", ".", "zeros", "(", "(", "n_samples", ",", "n_classes", ")", ")", "n_estimators", "=", "len", "(", "estimators", ")", "for", "estimator", ",", "features", ",", "weight", "in", "zip", "(", "estimators", ",", "estimators_features", ",", "estimators_weight", ")", ":", "# Resort to voting", "predictions", "=", "estimator", ".", "predict", "(", "X", "[", ":", ",", "features", "]", ")", "for", "i", "in", "range", "(", "n_samples", ")", ":", "if", "combination", "==", "'weighted_voting'", ":", "pred", "[", "i", ",", "int", "(", "predictions", "[", "i", "]", ")", "]", "+=", "1", "*", "weight", "else", ":", "pred", "[", "i", ",", "int", "(", "predictions", "[", "i", "]", ")", "]", "+=", "1", "return", "pred" ]
Private function used to compute predictions within a job.
[ "Private", "function", "used", "to", "compute", "predictions", "within", "a", "job", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L134-L150
4,040
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
_create_stacking_set
def _create_stacking_set(estimators, estimators_features, estimators_weight, X, combination): """Private function used to create the stacking training set.""" n_samples = X.shape[0] valid_estimators = np.nonzero(estimators_weight)[0] n_valid_estimators = valid_estimators.shape[0] X_stacking = np.zeros((n_samples, n_valid_estimators)) for e in range(n_valid_estimators): if combination in ['stacking', 'stacking_bmr']: X_stacking[:, e] = estimators[valid_estimators[e]].predict(X[:, estimators_features[valid_estimators[e]]]) elif combination in ['stacking_proba', 'stacking_proba_bmr']: X_stacking[:, e] = estimators[valid_estimators[e]].predict_proba(X[:, estimators_features[valid_estimators[e]]])[:, 1] return X_stacking
python
def _create_stacking_set(estimators, estimators_features, estimators_weight, X, combination): """Private function used to create the stacking training set.""" n_samples = X.shape[0] valid_estimators = np.nonzero(estimators_weight)[0] n_valid_estimators = valid_estimators.shape[0] X_stacking = np.zeros((n_samples, n_valid_estimators)) for e in range(n_valid_estimators): if combination in ['stacking', 'stacking_bmr']: X_stacking[:, e] = estimators[valid_estimators[e]].predict(X[:, estimators_features[valid_estimators[e]]]) elif combination in ['stacking_proba', 'stacking_proba_bmr']: X_stacking[:, e] = estimators[valid_estimators[e]].predict_proba(X[:, estimators_features[valid_estimators[e]]])[:, 1] return X_stacking
[ "def", "_create_stacking_set", "(", "estimators", ",", "estimators_features", ",", "estimators_weight", ",", "X", ",", "combination", ")", ":", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "valid_estimators", "=", "np", ".", "nonzero", "(", "estimators_weight", ")", "[", "0", "]", "n_valid_estimators", "=", "valid_estimators", ".", "shape", "[", "0", "]", "X_stacking", "=", "np", ".", "zeros", "(", "(", "n_samples", ",", "n_valid_estimators", ")", ")", "for", "e", "in", "range", "(", "n_valid_estimators", ")", ":", "if", "combination", "in", "[", "'stacking'", ",", "'stacking_bmr'", "]", ":", "X_stacking", "[", ":", ",", "e", "]", "=", "estimators", "[", "valid_estimators", "[", "e", "]", "]", ".", "predict", "(", "X", "[", ":", ",", "estimators_features", "[", "valid_estimators", "[", "e", "]", "]", "]", ")", "elif", "combination", "in", "[", "'stacking_proba'", ",", "'stacking_proba_bmr'", "]", ":", "X_stacking", "[", ":", ",", "e", "]", "=", "estimators", "[", "valid_estimators", "[", "e", "]", "]", ".", "predict_proba", "(", "X", "[", ":", ",", "estimators_features", "[", "valid_estimators", "[", "e", "]", "]", "]", ")", "[", ":", ",", "1", "]", "return", "X_stacking" ]
Private function used to create the stacking training set.
[ "Private", "function", "used", "to", "create", "the", "stacking", "training", "set", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L153-L167
4,041
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
BaseBagging._fit_bmr_model
def _fit_bmr_model(self, X, y): """Private function used to fit the BayesMinimumRisk model.""" self.f_bmr = BayesMinimumRiskClassifier() X_bmr = self.predict_proba(X) self.f_bmr.fit(y, X_bmr) return self
python
def _fit_bmr_model(self, X, y): """Private function used to fit the BayesMinimumRisk model.""" self.f_bmr = BayesMinimumRiskClassifier() X_bmr = self.predict_proba(X) self.f_bmr.fit(y, X_bmr) return self
[ "def", "_fit_bmr_model", "(", "self", ",", "X", ",", "y", ")", ":", "self", ".", "f_bmr", "=", "BayesMinimumRiskClassifier", "(", ")", "X_bmr", "=", "self", ".", "predict_proba", "(", "X", ")", "self", ".", "f_bmr", ".", "fit", "(", "y", ",", "X_bmr", ")", "return", "self" ]
Private function used to fit the BayesMinimumRisk model.
[ "Private", "function", "used", "to", "fit", "the", "BayesMinimumRisk", "model", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L295-L300
4,042
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
BaseBagging._fit_stacking_model
def _fit_stacking_model(self,X, y, cost_mat, max_iter=100): """Private function used to fit the stacking model.""" self.f_staking = CostSensitiveLogisticRegression(verbose=self.verbose, max_iter=max_iter) X_stacking = _create_stacking_set(self.estimators_, self.estimators_features_, self.estimators_weight_, X, self.combination) self.f_staking.fit(X_stacking, y, cost_mat) return self
python
def _fit_stacking_model(self,X, y, cost_mat, max_iter=100): """Private function used to fit the stacking model.""" self.f_staking = CostSensitiveLogisticRegression(verbose=self.verbose, max_iter=max_iter) X_stacking = _create_stacking_set(self.estimators_, self.estimators_features_, self.estimators_weight_, X, self.combination) self.f_staking.fit(X_stacking, y, cost_mat) return self
[ "def", "_fit_stacking_model", "(", "self", ",", "X", ",", "y", ",", "cost_mat", ",", "max_iter", "=", "100", ")", ":", "self", ".", "f_staking", "=", "CostSensitiveLogisticRegression", "(", "verbose", "=", "self", ".", "verbose", ",", "max_iter", "=", "max_iter", ")", "X_stacking", "=", "_create_stacking_set", "(", "self", ".", "estimators_", ",", "self", ".", "estimators_features_", ",", "self", ".", "estimators_weight_", ",", "X", ",", "self", ".", "combination", ")", "self", ".", "f_staking", ".", "fit", "(", "X_stacking", ",", "y", ",", "cost_mat", ")", "return", "self" ]
Private function used to fit the stacking model.
[ "Private", "function", "used", "to", "fit", "the", "stacking", "model", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L302-L308
4,043
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
BaseBagging._evaluate_oob_savings
def _evaluate_oob_savings(self, X, y, cost_mat): """Private function used to calculate the OOB Savings of each estimator.""" estimators_weight = [] for estimator, samples, features in zip(self.estimators_, self.estimators_samples_, self.estimators_features_): # Test if all examples where used for training if not np.any(~samples): # Then use training oob_pred = estimator.predict(X[:, features]) oob_savings = max(0, savings_score(y, oob_pred, cost_mat)) else: # Then use OOB oob_pred = estimator.predict((X[~samples])[:, features]) oob_savings = max(0, savings_score(y[~samples], oob_pred, cost_mat[~samples])) estimators_weight.append(oob_savings) # Control in case were all weights are 0 if sum(estimators_weight) == 0: self.estimators_weight_ = np.ones(len(estimators_weight)) / len(estimators_weight) else: self.estimators_weight_ = (np.array(estimators_weight) / sum(estimators_weight)).tolist() return self
python
def _evaluate_oob_savings(self, X, y, cost_mat): """Private function used to calculate the OOB Savings of each estimator.""" estimators_weight = [] for estimator, samples, features in zip(self.estimators_, self.estimators_samples_, self.estimators_features_): # Test if all examples where used for training if not np.any(~samples): # Then use training oob_pred = estimator.predict(X[:, features]) oob_savings = max(0, savings_score(y, oob_pred, cost_mat)) else: # Then use OOB oob_pred = estimator.predict((X[~samples])[:, features]) oob_savings = max(0, savings_score(y[~samples], oob_pred, cost_mat[~samples])) estimators_weight.append(oob_savings) # Control in case were all weights are 0 if sum(estimators_weight) == 0: self.estimators_weight_ = np.ones(len(estimators_weight)) / len(estimators_weight) else: self.estimators_weight_ = (np.array(estimators_weight) / sum(estimators_weight)).tolist() return self
[ "def", "_evaluate_oob_savings", "(", "self", ",", "X", ",", "y", ",", "cost_mat", ")", ":", "estimators_weight", "=", "[", "]", "for", "estimator", ",", "samples", ",", "features", "in", "zip", "(", "self", ".", "estimators_", ",", "self", ".", "estimators_samples_", ",", "self", ".", "estimators_features_", ")", ":", "# Test if all examples where used for training", "if", "not", "np", ".", "any", "(", "~", "samples", ")", ":", "# Then use training", "oob_pred", "=", "estimator", ".", "predict", "(", "X", "[", ":", ",", "features", "]", ")", "oob_savings", "=", "max", "(", "0", ",", "savings_score", "(", "y", ",", "oob_pred", ",", "cost_mat", ")", ")", "else", ":", "# Then use OOB", "oob_pred", "=", "estimator", ".", "predict", "(", "(", "X", "[", "~", "samples", "]", ")", "[", ":", ",", "features", "]", ")", "oob_savings", "=", "max", "(", "0", ",", "savings_score", "(", "y", "[", "~", "samples", "]", ",", "oob_pred", ",", "cost_mat", "[", "~", "samples", "]", ")", ")", "estimators_weight", ".", "append", "(", "oob_savings", ")", "# Control in case were all weights are 0", "if", "sum", "(", "estimators_weight", ")", "==", "0", ":", "self", ".", "estimators_weight_", "=", "np", ".", "ones", "(", "len", "(", "estimators_weight", ")", ")", "/", "len", "(", "estimators_weight", ")", "else", ":", "self", ".", "estimators_weight_", "=", "(", "np", ".", "array", "(", "estimators_weight", ")", "/", "sum", "(", "estimators_weight", ")", ")", ".", "tolist", "(", ")", "return", "self" ]
Private function used to calculate the OOB Savings of each estimator.
[ "Private", "function", "used", "to", "calculate", "the", "OOB", "Savings", "of", "each", "estimator", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L311-L334
4,044
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
BaggingClassifier.predict
def predict(self, X, cost_mat=None): """Predict class for X. The predicted class of an input sample is computed as the class with the highest mean predicted probability. If base estimators do not implement a ``predict_proba`` method, then it resorts to voting. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. cost_mat : optional array-like of shape = [n_samples, 4], (default=None) Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- pred : array of shape = [n_samples] The predicted classes. """ # Check data # X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) # Dont in version 0.15 if self.n_features_ != X.shape[1]: raise ValueError("Number of features of the model must " "match the input. Model n_features is {0} and " "input n_features is {1}." "".format(self.n_features_, X.shape[1])) #TODO: check if combination in possible combinations if self.combination in ['stacking', 'stacking_proba']: X_stacking = _create_stacking_set(self.estimators_, self.estimators_features_, self.estimators_weight_, X, self.combination) return self.f_staking.predict(X_stacking) elif self.combination in ['majority_voting', 'weighted_voting']: # Parallel loop n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators, self.n_jobs) all_pred = Parallel(n_jobs=n_jobs, verbose=self.verbose)( delayed(_parallel_predict)( self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, self.n_classes_, self.combination, self.estimators_weight_[starts[i]:starts[i + 1]]) for i in range(n_jobs)) # Reduce pred = sum(all_pred) / self.n_estimators return self.classes_.take(np.argmax(pred, axis=1), axis=0) elif self.combination in ['majority_bmr', 'weighted_bmr', 'stacking_bmr', 'stacking_proba_bmr']: #TODO: Add check if cost_mat == None X_bmr = self.predict_proba(X) return self.f_bmr.predict(X_bmr, cost_mat)
python
def predict(self, X, cost_mat=None): """Predict class for X. The predicted class of an input sample is computed as the class with the highest mean predicted probability. If base estimators do not implement a ``predict_proba`` method, then it resorts to voting. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. cost_mat : optional array-like of shape = [n_samples, 4], (default=None) Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- pred : array of shape = [n_samples] The predicted classes. """ # Check data # X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) # Dont in version 0.15 if self.n_features_ != X.shape[1]: raise ValueError("Number of features of the model must " "match the input. Model n_features is {0} and " "input n_features is {1}." "".format(self.n_features_, X.shape[1])) #TODO: check if combination in possible combinations if self.combination in ['stacking', 'stacking_proba']: X_stacking = _create_stacking_set(self.estimators_, self.estimators_features_, self.estimators_weight_, X, self.combination) return self.f_staking.predict(X_stacking) elif self.combination in ['majority_voting', 'weighted_voting']: # Parallel loop n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators, self.n_jobs) all_pred = Parallel(n_jobs=n_jobs, verbose=self.verbose)( delayed(_parallel_predict)( self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, self.n_classes_, self.combination, self.estimators_weight_[starts[i]:starts[i + 1]]) for i in range(n_jobs)) # Reduce pred = sum(all_pred) / self.n_estimators return self.classes_.take(np.argmax(pred, axis=1), axis=0) elif self.combination in ['majority_bmr', 'weighted_bmr', 'stacking_bmr', 'stacking_proba_bmr']: #TODO: Add check if cost_mat == None X_bmr = self.predict_proba(X) return self.f_bmr.predict(X_bmr, cost_mat)
[ "def", "predict", "(", "self", ",", "X", ",", "cost_mat", "=", "None", ")", ":", "# Check data", "# X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) # Dont in version 0.15", "if", "self", ".", "n_features_", "!=", "X", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "\"Number of features of the model must \"", "\"match the input. Model n_features is {0} and \"", "\"input n_features is {1}.\"", "\"\"", ".", "format", "(", "self", ".", "n_features_", ",", "X", ".", "shape", "[", "1", "]", ")", ")", "#TODO: check if combination in possible combinations", "if", "self", ".", "combination", "in", "[", "'stacking'", ",", "'stacking_proba'", "]", ":", "X_stacking", "=", "_create_stacking_set", "(", "self", ".", "estimators_", ",", "self", ".", "estimators_features_", ",", "self", ".", "estimators_weight_", ",", "X", ",", "self", ".", "combination", ")", "return", "self", ".", "f_staking", ".", "predict", "(", "X_stacking", ")", "elif", "self", ".", "combination", "in", "[", "'majority_voting'", ",", "'weighted_voting'", "]", ":", "# Parallel loop", "n_jobs", ",", "n_estimators", ",", "starts", "=", "_partition_estimators", "(", "self", ".", "n_estimators", ",", "self", ".", "n_jobs", ")", "all_pred", "=", "Parallel", "(", "n_jobs", "=", "n_jobs", ",", "verbose", "=", "self", ".", "verbose", ")", "(", "delayed", "(", "_parallel_predict", ")", "(", "self", ".", "estimators_", "[", "starts", "[", "i", "]", ":", "starts", "[", "i", "+", "1", "]", "]", ",", "self", ".", "estimators_features_", "[", "starts", "[", "i", "]", ":", "starts", "[", "i", "+", "1", "]", "]", ",", "X", ",", "self", ".", "n_classes_", ",", "self", ".", "combination", ",", "self", ".", "estimators_weight_", "[", "starts", "[", "i", "]", ":", "starts", "[", "i", "+", "1", "]", "]", ")", "for", "i", "in", "range", "(", "n_jobs", ")", ")", "# Reduce", "pred", "=", "sum", "(", "all_pred", ")", "/", "self", ".", "n_estimators", "return", "self", ".", "classes_", ".", "take", "(", "np", ".", "argmax", "(", "pred", ",", "axis", "=", "1", ")", ",", "axis", "=", "0", ")", "elif", "self", ".", "combination", "in", "[", "'majority_bmr'", ",", "'weighted_bmr'", ",", "'stacking_bmr'", ",", "'stacking_proba_bmr'", "]", ":", "#TODO: Add check if cost_mat == None", "X_bmr", "=", "self", ".", "predict_proba", "(", "X", ")", "return", "self", ".", "f_bmr", ".", "predict", "(", "X_bmr", ",", "cost_mat", ")" ]
Predict class for X. The predicted class of an input sample is computed as the class with the highest mean predicted probability. If base estimators do not implement a ``predict_proba`` method, then it resorts to voting. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. cost_mat : optional array-like of shape = [n_samples, 4], (default=None) Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- pred : array of shape = [n_samples] The predicted classes.
[ "Predict", "class", "for", "X", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L491-L554
4,045
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
BaggingClassifier.predict_proba
def predict_proba(self, X): """Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the mean predicted class probabilities of the base estimators in the ensemble. If base estimators do not implement a ``predict_proba`` method, then it resorts to voting and the predicted class probabilities of a an input sample represents the proportion of estimators predicting each class. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- p : array of shape = [n_samples, n_classes] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ # Check data # X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) # Dont in version 0.15 if self.n_features_ != X.shape[1]: raise ValueError("Number of features of the model must " "match the input. Model n_features is {0} and " "input n_features is {1}." "".format(self.n_features_, X.shape[1])) # Parallel loop n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators, self.n_jobs) all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)( delayed(_parallel_predict_proba)( self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, self.n_classes_, self.combination, self.estimators_weight_[starts[i]:starts[i + 1]]) for i in range(n_jobs)) # Reduce if self.combination in ['majority_voting', 'majority_bmr']: proba = sum(all_proba) / self.n_estimators elif self.combination in ['weighted_voting', 'weighted_bmr']: proba = sum(all_proba) elif self.combination in ['stacking', 'stacking_proba', 'stacking_bmr', 'stacking_proba_bmr']: X_stacking = _create_stacking_set(self.estimators_, self.estimators_features_, self.estimators_weight_, X, self.combination) proba = self.f_staking.predict_proba(X_stacking) return proba
python
def predict_proba(self, X): """Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the mean predicted class probabilities of the base estimators in the ensemble. If base estimators do not implement a ``predict_proba`` method, then it resorts to voting and the predicted class probabilities of a an input sample represents the proportion of estimators predicting each class. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- p : array of shape = [n_samples, n_classes] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ # Check data # X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) # Dont in version 0.15 if self.n_features_ != X.shape[1]: raise ValueError("Number of features of the model must " "match the input. Model n_features is {0} and " "input n_features is {1}." "".format(self.n_features_, X.shape[1])) # Parallel loop n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators, self.n_jobs) all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)( delayed(_parallel_predict_proba)( self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, self.n_classes_, self.combination, self.estimators_weight_[starts[i]:starts[i + 1]]) for i in range(n_jobs)) # Reduce if self.combination in ['majority_voting', 'majority_bmr']: proba = sum(all_proba) / self.n_estimators elif self.combination in ['weighted_voting', 'weighted_bmr']: proba = sum(all_proba) elif self.combination in ['stacking', 'stacking_proba', 'stacking_bmr', 'stacking_proba_bmr']: X_stacking = _create_stacking_set(self.estimators_, self.estimators_features_, self.estimators_weight_, X, self.combination) proba = self.f_staking.predict_proba(X_stacking) return proba
[ "def", "predict_proba", "(", "self", ",", "X", ")", ":", "# Check data", "# X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) # Dont in version 0.15", "if", "self", ".", "n_features_", "!=", "X", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "\"Number of features of the model must \"", "\"match the input. Model n_features is {0} and \"", "\"input n_features is {1}.\"", "\"\"", ".", "format", "(", "self", ".", "n_features_", ",", "X", ".", "shape", "[", "1", "]", ")", ")", "# Parallel loop", "n_jobs", ",", "n_estimators", ",", "starts", "=", "_partition_estimators", "(", "self", ".", "n_estimators", ",", "self", ".", "n_jobs", ")", "all_proba", "=", "Parallel", "(", "n_jobs", "=", "n_jobs", ",", "verbose", "=", "self", ".", "verbose", ")", "(", "delayed", "(", "_parallel_predict_proba", ")", "(", "self", ".", "estimators_", "[", "starts", "[", "i", "]", ":", "starts", "[", "i", "+", "1", "]", "]", ",", "self", ".", "estimators_features_", "[", "starts", "[", "i", "]", ":", "starts", "[", "i", "+", "1", "]", "]", ",", "X", ",", "self", ".", "n_classes_", ",", "self", ".", "combination", ",", "self", ".", "estimators_weight_", "[", "starts", "[", "i", "]", ":", "starts", "[", "i", "+", "1", "]", "]", ")", "for", "i", "in", "range", "(", "n_jobs", ")", ")", "# Reduce", "if", "self", ".", "combination", "in", "[", "'majority_voting'", ",", "'majority_bmr'", "]", ":", "proba", "=", "sum", "(", "all_proba", ")", "/", "self", ".", "n_estimators", "elif", "self", ".", "combination", "in", "[", "'weighted_voting'", ",", "'weighted_bmr'", "]", ":", "proba", "=", "sum", "(", "all_proba", ")", "elif", "self", ".", "combination", "in", "[", "'stacking'", ",", "'stacking_proba'", ",", "'stacking_bmr'", ",", "'stacking_proba_bmr'", "]", ":", "X_stacking", "=", "_create_stacking_set", "(", "self", ".", "estimators_", ",", "self", ".", "estimators_features_", ",", "self", ".", "estimators_weight_", ",", "X", ",", "self", ".", "combination", ")", "proba", "=", "self", ".", "f_staking", ".", "predict_proba", "(", "X_stacking", ")", "return", "proba" ]
Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the mean predicted class probabilities of the base estimators in the ensemble. If base estimators do not implement a ``predict_proba`` method, then it resorts to voting and the predicted class probabilities of a an input sample represents the proportion of estimators predicting each class. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- p : array of shape = [n_samples, n_classes] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`.
[ "Predict", "class", "probabilities", "for", "X", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L556-L610
4,046
albahnsen/CostSensitiveClassification
costcla/sampling/cost_sampling.py
cost_sampling
def cost_sampling(X, y, cost_mat, method='RejectionSampling', oversampling_norm=0.1, max_wc=97.5): """Cost-proportionate sampling. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y : array-like of shape = [n_samples] Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. method : str, optional (default = RejectionSampling) Method to perform the cost-proportionate sampling, either 'RejectionSampling' or 'OverSampling'. oversampling_norm: float, optional (default = 0.1) normalize value of wc, the smaller the biggest the data. max_wc: float, optional (default = 97.5) outlier adjustment for the cost. References ---------- .. [1] B. Zadrozny, J. Langford, N. Naoki, "Cost-sensitive learning by cost-proportionate example weighting", in Proceedings of the Third IEEE International Conference on Data Mining, 435-442, 2003. .. [2] C. Elkan, "The foundations of Cost-Sensitive Learning", in Seventeenth International Joint Conference on Artificial Intelligence, 973-978, 2001. Examples -------- >>> from sklearn.ensemble import RandomForestClassifier >>> from sklearn.cross_validation import train_test_split >>> from costcla.datasets import load_creditscoring1 >>> from costcla.sampling import cost_sampling, undersampling >>> from costcla.metrics import savings_score >>> data = load_creditscoring1() >>> sets = train_test_split(data.data, data.target, data.cost_mat, test_size=0.33, random_state=0) >>> X_train, X_test, y_train, y_test, cost_mat_train, cost_mat_test = sets >>> X_cps_o, y_cps_o, cost_mat_cps_o = cost_sampling(X_train, y_train, cost_mat_train, method='OverSampling') >>> X_cps_r, y_cps_r, cost_mat_cps_r = cost_sampling(X_train, y_train, cost_mat_train, method='RejectionSampling') >>> X_u, y_u, cost_mat_u = undersampling(X_train, y_train, cost_mat_train) >>> y_pred_test_rf = RandomForestClassifier(random_state=0).fit(X_train, y_train).predict(X_test) >>> y_pred_test_rf_cps_o = RandomForestClassifier(random_state=0).fit(X_cps_o, y_cps_o).predict(X_test) >>> y_pred_test_rf_cps_r = RandomForestClassifier(random_state=0).fit(X_cps_r, y_cps_r).predict(X_test) >>> y_pred_test_rf_u = RandomForestClassifier(random_state=0).fit(X_u, y_u).predict(X_test) >>> # Savings using only RandomForest >>> print(savings_score(y_test, y_pred_test_rf, cost_mat_test)) 0.12454256594 >>> # Savings using RandomForest with cost-proportionate over-sampling >>> print(savings_score(y_test, y_pred_test_rf_cps_o, cost_mat_test)) 0.192480226286 >>> # Savings using RandomForest with cost-proportionate rejection-sampling >>> print(savings_score(y_test, y_pred_test_rf_cps_r, cost_mat_test)) 0.465830173459 >>> # Savings using RandomForest with under-sampling >>> print(savings_score(y_test, y_pred_test_rf_u, cost_mat_test)) 0.466630646543 >>> # Size of each training set >>> print(X_train.shape[0], X_cps_o.shape[0], X_cps_r.shape[0], X_u.shape[0]) 75653 109975 8690 10191 >>> # Percentage of positives in each training set >>> print(y_train.mean(), y_cps_o.mean(), y_cps_r.mean(), y_u.mean()) 0.0668182358928 0.358054103205 0.436939010357 0.49602590521 """ #TODO: Check consistency of input # The methods are construct only for the misclassification costs, not the full cost matrix. cost_mis = cost_mat[:, 0] cost_mis[y == 1] = cost_mat[y == 1, 1] # wc = cost_mis / cost_mis.max() wc = np.minimum(cost_mis / np.percentile(cost_mis, max_wc), 1) n_samples = X.shape[0] filter_ = list(range(n_samples)) if method == 'RejectionSampling': # under-sampling by rejection [1] #TODO: Add random state rej_rand = np.random.rand(n_samples) filter_ = rej_rand <= wc elif method == 'OverSampling': # over-sampling with normalized wn [2] wc_n = np.ceil(wc / oversampling_norm).astype(np.int) new_n = wc_n.sum() filter_ = np.ones(new_n, dtype=np.int) e = 0 #TODO replace for for i in range(n_samples): filter_[e: e + wc_n[i]] = i e += wc_n[i] x_cps = X[filter_] y_cps = y[filter_] cost_mat_cps = cost_mat[filter_] return x_cps, y_cps, cost_mat_cps
python
def cost_sampling(X, y, cost_mat, method='RejectionSampling', oversampling_norm=0.1, max_wc=97.5): """Cost-proportionate sampling. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y : array-like of shape = [n_samples] Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. method : str, optional (default = RejectionSampling) Method to perform the cost-proportionate sampling, either 'RejectionSampling' or 'OverSampling'. oversampling_norm: float, optional (default = 0.1) normalize value of wc, the smaller the biggest the data. max_wc: float, optional (default = 97.5) outlier adjustment for the cost. References ---------- .. [1] B. Zadrozny, J. Langford, N. Naoki, "Cost-sensitive learning by cost-proportionate example weighting", in Proceedings of the Third IEEE International Conference on Data Mining, 435-442, 2003. .. [2] C. Elkan, "The foundations of Cost-Sensitive Learning", in Seventeenth International Joint Conference on Artificial Intelligence, 973-978, 2001. Examples -------- >>> from sklearn.ensemble import RandomForestClassifier >>> from sklearn.cross_validation import train_test_split >>> from costcla.datasets import load_creditscoring1 >>> from costcla.sampling import cost_sampling, undersampling >>> from costcla.metrics import savings_score >>> data = load_creditscoring1() >>> sets = train_test_split(data.data, data.target, data.cost_mat, test_size=0.33, random_state=0) >>> X_train, X_test, y_train, y_test, cost_mat_train, cost_mat_test = sets >>> X_cps_o, y_cps_o, cost_mat_cps_o = cost_sampling(X_train, y_train, cost_mat_train, method='OverSampling') >>> X_cps_r, y_cps_r, cost_mat_cps_r = cost_sampling(X_train, y_train, cost_mat_train, method='RejectionSampling') >>> X_u, y_u, cost_mat_u = undersampling(X_train, y_train, cost_mat_train) >>> y_pred_test_rf = RandomForestClassifier(random_state=0).fit(X_train, y_train).predict(X_test) >>> y_pred_test_rf_cps_o = RandomForestClassifier(random_state=0).fit(X_cps_o, y_cps_o).predict(X_test) >>> y_pred_test_rf_cps_r = RandomForestClassifier(random_state=0).fit(X_cps_r, y_cps_r).predict(X_test) >>> y_pred_test_rf_u = RandomForestClassifier(random_state=0).fit(X_u, y_u).predict(X_test) >>> # Savings using only RandomForest >>> print(savings_score(y_test, y_pred_test_rf, cost_mat_test)) 0.12454256594 >>> # Savings using RandomForest with cost-proportionate over-sampling >>> print(savings_score(y_test, y_pred_test_rf_cps_o, cost_mat_test)) 0.192480226286 >>> # Savings using RandomForest with cost-proportionate rejection-sampling >>> print(savings_score(y_test, y_pred_test_rf_cps_r, cost_mat_test)) 0.465830173459 >>> # Savings using RandomForest with under-sampling >>> print(savings_score(y_test, y_pred_test_rf_u, cost_mat_test)) 0.466630646543 >>> # Size of each training set >>> print(X_train.shape[0], X_cps_o.shape[0], X_cps_r.shape[0], X_u.shape[0]) 75653 109975 8690 10191 >>> # Percentage of positives in each training set >>> print(y_train.mean(), y_cps_o.mean(), y_cps_r.mean(), y_u.mean()) 0.0668182358928 0.358054103205 0.436939010357 0.49602590521 """ #TODO: Check consistency of input # The methods are construct only for the misclassification costs, not the full cost matrix. cost_mis = cost_mat[:, 0] cost_mis[y == 1] = cost_mat[y == 1, 1] # wc = cost_mis / cost_mis.max() wc = np.minimum(cost_mis / np.percentile(cost_mis, max_wc), 1) n_samples = X.shape[0] filter_ = list(range(n_samples)) if method == 'RejectionSampling': # under-sampling by rejection [1] #TODO: Add random state rej_rand = np.random.rand(n_samples) filter_ = rej_rand <= wc elif method == 'OverSampling': # over-sampling with normalized wn [2] wc_n = np.ceil(wc / oversampling_norm).astype(np.int) new_n = wc_n.sum() filter_ = np.ones(new_n, dtype=np.int) e = 0 #TODO replace for for i in range(n_samples): filter_[e: e + wc_n[i]] = i e += wc_n[i] x_cps = X[filter_] y_cps = y[filter_] cost_mat_cps = cost_mat[filter_] return x_cps, y_cps, cost_mat_cps
[ "def", "cost_sampling", "(", "X", ",", "y", ",", "cost_mat", ",", "method", "=", "'RejectionSampling'", ",", "oversampling_norm", "=", "0.1", ",", "max_wc", "=", "97.5", ")", ":", "#TODO: Check consistency of input", "# The methods are construct only for the misclassification costs, not the full cost matrix.", "cost_mis", "=", "cost_mat", "[", ":", ",", "0", "]", "cost_mis", "[", "y", "==", "1", "]", "=", "cost_mat", "[", "y", "==", "1", ",", "1", "]", "# wc = cost_mis / cost_mis.max()", "wc", "=", "np", ".", "minimum", "(", "cost_mis", "/", "np", ".", "percentile", "(", "cost_mis", ",", "max_wc", ")", ",", "1", ")", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "filter_", "=", "list", "(", "range", "(", "n_samples", ")", ")", "if", "method", "==", "'RejectionSampling'", ":", "# under-sampling by rejection [1]", "#TODO: Add random state", "rej_rand", "=", "np", ".", "random", ".", "rand", "(", "n_samples", ")", "filter_", "=", "rej_rand", "<=", "wc", "elif", "method", "==", "'OverSampling'", ":", "# over-sampling with normalized wn [2]", "wc_n", "=", "np", ".", "ceil", "(", "wc", "/", "oversampling_norm", ")", ".", "astype", "(", "np", ".", "int", ")", "new_n", "=", "wc_n", ".", "sum", "(", ")", "filter_", "=", "np", ".", "ones", "(", "new_n", ",", "dtype", "=", "np", ".", "int", ")", "e", "=", "0", "#TODO replace for", "for", "i", "in", "range", "(", "n_samples", ")", ":", "filter_", "[", "e", ":", "e", "+", "wc_n", "[", "i", "]", "]", "=", "i", "e", "+=", "wc_n", "[", "i", "]", "x_cps", "=", "X", "[", "filter_", "]", "y_cps", "=", "y", "[", "filter_", "]", "cost_mat_cps", "=", "cost_mat", "[", "filter_", "]", "return", "x_cps", ",", "y_cps", ",", "cost_mat_cps" ]
Cost-proportionate sampling. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y : array-like of shape = [n_samples] Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. method : str, optional (default = RejectionSampling) Method to perform the cost-proportionate sampling, either 'RejectionSampling' or 'OverSampling'. oversampling_norm: float, optional (default = 0.1) normalize value of wc, the smaller the biggest the data. max_wc: float, optional (default = 97.5) outlier adjustment for the cost. References ---------- .. [1] B. Zadrozny, J. Langford, N. Naoki, "Cost-sensitive learning by cost-proportionate example weighting", in Proceedings of the Third IEEE International Conference on Data Mining, 435-442, 2003. .. [2] C. Elkan, "The foundations of Cost-Sensitive Learning", in Seventeenth International Joint Conference on Artificial Intelligence, 973-978, 2001. Examples -------- >>> from sklearn.ensemble import RandomForestClassifier >>> from sklearn.cross_validation import train_test_split >>> from costcla.datasets import load_creditscoring1 >>> from costcla.sampling import cost_sampling, undersampling >>> from costcla.metrics import savings_score >>> data = load_creditscoring1() >>> sets = train_test_split(data.data, data.target, data.cost_mat, test_size=0.33, random_state=0) >>> X_train, X_test, y_train, y_test, cost_mat_train, cost_mat_test = sets >>> X_cps_o, y_cps_o, cost_mat_cps_o = cost_sampling(X_train, y_train, cost_mat_train, method='OverSampling') >>> X_cps_r, y_cps_r, cost_mat_cps_r = cost_sampling(X_train, y_train, cost_mat_train, method='RejectionSampling') >>> X_u, y_u, cost_mat_u = undersampling(X_train, y_train, cost_mat_train) >>> y_pred_test_rf = RandomForestClassifier(random_state=0).fit(X_train, y_train).predict(X_test) >>> y_pred_test_rf_cps_o = RandomForestClassifier(random_state=0).fit(X_cps_o, y_cps_o).predict(X_test) >>> y_pred_test_rf_cps_r = RandomForestClassifier(random_state=0).fit(X_cps_r, y_cps_r).predict(X_test) >>> y_pred_test_rf_u = RandomForestClassifier(random_state=0).fit(X_u, y_u).predict(X_test) >>> # Savings using only RandomForest >>> print(savings_score(y_test, y_pred_test_rf, cost_mat_test)) 0.12454256594 >>> # Savings using RandomForest with cost-proportionate over-sampling >>> print(savings_score(y_test, y_pred_test_rf_cps_o, cost_mat_test)) 0.192480226286 >>> # Savings using RandomForest with cost-proportionate rejection-sampling >>> print(savings_score(y_test, y_pred_test_rf_cps_r, cost_mat_test)) 0.465830173459 >>> # Savings using RandomForest with under-sampling >>> print(savings_score(y_test, y_pred_test_rf_u, cost_mat_test)) 0.466630646543 >>> # Size of each training set >>> print(X_train.shape[0], X_cps_o.shape[0], X_cps_r.shape[0], X_u.shape[0]) 75653 109975 8690 10191 >>> # Percentage of positives in each training set >>> print(y_train.mean(), y_cps_o.mean(), y_cps_r.mean(), y_u.mean()) 0.0668182358928 0.358054103205 0.436939010357 0.49602590521
[ "Cost", "-", "proportionate", "sampling", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/sampling/cost_sampling.py#L11-L123
4,047
albahnsen/CostSensitiveClassification
costcla/datasets/base.py
_creditscoring_costmat
def _creditscoring_costmat(income, debt, pi_1, cost_mat_parameters): """ Private function to calculate the cost matrix of credit scoring models. Parameters ---------- income : array of shape = [n_samples] Monthly income of each example debt : array of shape = [n_samples] Debt ratio each example pi_1 : float Percentage of positives in the training set References ---------- .. [1] A. Correa Bahnsen, D.Aouada, B, Ottersten, "Example-Dependent Cost-Sensitive Logistic Regression for Credit Scoring", in Proceedings of the International Conference on Machine Learning and Applications, , 2014. Returns ------- cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. """ def calculate_a(cl_i, int_, n_term): """ Private function """ return cl_i * ((int_ * (1 + int_) ** n_term) / ((1 + int_) ** n_term - 1)) def calculate_pv(a, int_, n_term): """ Private function """ return a / int_ * (1 - 1 / (1 + int_) ** n_term) #Calculate credit line Cl def calculate_cl(k, inc_i, cl_max, debt_i, int_r, n_term): """ Private function """ cl_k = k * inc_i A = calculate_a(cl_k, int_r, n_term) Cl_debt = calculate_pv(inc_i * min(A / inc_i, 1 - debt_i), int_r, n_term) return min(cl_k, cl_max, Cl_debt) #calculate costs def calculate_cost_fn(cl_i, lgd): return cl_i * lgd def calculate_cost_fp(cl_i, int_r, n_term, int_cf, pi_1, lgd, cl_avg): a = calculate_a(cl_i, int_r, n_term) pv = calculate_pv(a, int_cf, n_term) r = pv - cl_i r_avg = calculate_pv(calculate_a(cl_avg, int_r, n_term), int_cf, n_term) - cl_avg cost_fp = r - (1 - pi_1) * r_avg + pi_1 * calculate_cost_fn(cl_avg, lgd) return max(0, cost_fp) v_calculate_cost_fp = np.vectorize(calculate_cost_fp) v_calculate_cost_fn = np.vectorize(calculate_cost_fn) v_calculate_cl = np.vectorize(calculate_cl) # Parameters k = cost_mat_parameters['k'] int_r = cost_mat_parameters['int_r'] n_term = cost_mat_parameters['n_term'] int_cf = cost_mat_parameters['int_cf'] lgd = cost_mat_parameters['lgd'] cl_max = cost_mat_parameters['cl_max'] cl = v_calculate_cl(k, income, cl_max, debt, int_r, n_term) cl_avg = cl.mean() n_samples = income.shape[0] cost_mat = np.zeros((n_samples, 4)) #cost_mat[FP,FN,TP,TN] cost_mat[:, 0] = v_calculate_cost_fp(cl, int_r, n_term, int_cf, pi_1, lgd, cl_avg) cost_mat[:, 1] = v_calculate_cost_fn(cl, lgd) cost_mat[:, 2] = 0.0 cost_mat[:, 3] = 0.0 return cost_mat
python
def _creditscoring_costmat(income, debt, pi_1, cost_mat_parameters): """ Private function to calculate the cost matrix of credit scoring models. Parameters ---------- income : array of shape = [n_samples] Monthly income of each example debt : array of shape = [n_samples] Debt ratio each example pi_1 : float Percentage of positives in the training set References ---------- .. [1] A. Correa Bahnsen, D.Aouada, B, Ottersten, "Example-Dependent Cost-Sensitive Logistic Regression for Credit Scoring", in Proceedings of the International Conference on Machine Learning and Applications, , 2014. Returns ------- cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. """ def calculate_a(cl_i, int_, n_term): """ Private function """ return cl_i * ((int_ * (1 + int_) ** n_term) / ((1 + int_) ** n_term - 1)) def calculate_pv(a, int_, n_term): """ Private function """ return a / int_ * (1 - 1 / (1 + int_) ** n_term) #Calculate credit line Cl def calculate_cl(k, inc_i, cl_max, debt_i, int_r, n_term): """ Private function """ cl_k = k * inc_i A = calculate_a(cl_k, int_r, n_term) Cl_debt = calculate_pv(inc_i * min(A / inc_i, 1 - debt_i), int_r, n_term) return min(cl_k, cl_max, Cl_debt) #calculate costs def calculate_cost_fn(cl_i, lgd): return cl_i * lgd def calculate_cost_fp(cl_i, int_r, n_term, int_cf, pi_1, lgd, cl_avg): a = calculate_a(cl_i, int_r, n_term) pv = calculate_pv(a, int_cf, n_term) r = pv - cl_i r_avg = calculate_pv(calculate_a(cl_avg, int_r, n_term), int_cf, n_term) - cl_avg cost_fp = r - (1 - pi_1) * r_avg + pi_1 * calculate_cost_fn(cl_avg, lgd) return max(0, cost_fp) v_calculate_cost_fp = np.vectorize(calculate_cost_fp) v_calculate_cost_fn = np.vectorize(calculate_cost_fn) v_calculate_cl = np.vectorize(calculate_cl) # Parameters k = cost_mat_parameters['k'] int_r = cost_mat_parameters['int_r'] n_term = cost_mat_parameters['n_term'] int_cf = cost_mat_parameters['int_cf'] lgd = cost_mat_parameters['lgd'] cl_max = cost_mat_parameters['cl_max'] cl = v_calculate_cl(k, income, cl_max, debt, int_r, n_term) cl_avg = cl.mean() n_samples = income.shape[0] cost_mat = np.zeros((n_samples, 4)) #cost_mat[FP,FN,TP,TN] cost_mat[:, 0] = v_calculate_cost_fp(cl, int_r, n_term, int_cf, pi_1, lgd, cl_avg) cost_mat[:, 1] = v_calculate_cost_fn(cl, lgd) cost_mat[:, 2] = 0.0 cost_mat[:, 3] = 0.0 return cost_mat
[ "def", "_creditscoring_costmat", "(", "income", ",", "debt", ",", "pi_1", ",", "cost_mat_parameters", ")", ":", "def", "calculate_a", "(", "cl_i", ",", "int_", ",", "n_term", ")", ":", "\"\"\" Private function \"\"\"", "return", "cl_i", "*", "(", "(", "int_", "*", "(", "1", "+", "int_", ")", "**", "n_term", ")", "/", "(", "(", "1", "+", "int_", ")", "**", "n_term", "-", "1", ")", ")", "def", "calculate_pv", "(", "a", ",", "int_", ",", "n_term", ")", ":", "\"\"\" Private function \"\"\"", "return", "a", "/", "int_", "*", "(", "1", "-", "1", "/", "(", "1", "+", "int_", ")", "**", "n_term", ")", "#Calculate credit line Cl", "def", "calculate_cl", "(", "k", ",", "inc_i", ",", "cl_max", ",", "debt_i", ",", "int_r", ",", "n_term", ")", ":", "\"\"\" Private function \"\"\"", "cl_k", "=", "k", "*", "inc_i", "A", "=", "calculate_a", "(", "cl_k", ",", "int_r", ",", "n_term", ")", "Cl_debt", "=", "calculate_pv", "(", "inc_i", "*", "min", "(", "A", "/", "inc_i", ",", "1", "-", "debt_i", ")", ",", "int_r", ",", "n_term", ")", "return", "min", "(", "cl_k", ",", "cl_max", ",", "Cl_debt", ")", "#calculate costs", "def", "calculate_cost_fn", "(", "cl_i", ",", "lgd", ")", ":", "return", "cl_i", "*", "lgd", "def", "calculate_cost_fp", "(", "cl_i", ",", "int_r", ",", "n_term", ",", "int_cf", ",", "pi_1", ",", "lgd", ",", "cl_avg", ")", ":", "a", "=", "calculate_a", "(", "cl_i", ",", "int_r", ",", "n_term", ")", "pv", "=", "calculate_pv", "(", "a", ",", "int_cf", ",", "n_term", ")", "r", "=", "pv", "-", "cl_i", "r_avg", "=", "calculate_pv", "(", "calculate_a", "(", "cl_avg", ",", "int_r", ",", "n_term", ")", ",", "int_cf", ",", "n_term", ")", "-", "cl_avg", "cost_fp", "=", "r", "-", "(", "1", "-", "pi_1", ")", "*", "r_avg", "+", "pi_1", "*", "calculate_cost_fn", "(", "cl_avg", ",", "lgd", ")", "return", "max", "(", "0", ",", "cost_fp", ")", "v_calculate_cost_fp", "=", "np", ".", "vectorize", "(", "calculate_cost_fp", ")", "v_calculate_cost_fn", "=", "np", ".", "vectorize", "(", "calculate_cost_fn", ")", "v_calculate_cl", "=", "np", ".", "vectorize", "(", "calculate_cl", ")", "# Parameters", "k", "=", "cost_mat_parameters", "[", "'k'", "]", "int_r", "=", "cost_mat_parameters", "[", "'int_r'", "]", "n_term", "=", "cost_mat_parameters", "[", "'n_term'", "]", "int_cf", "=", "cost_mat_parameters", "[", "'int_cf'", "]", "lgd", "=", "cost_mat_parameters", "[", "'lgd'", "]", "cl_max", "=", "cost_mat_parameters", "[", "'cl_max'", "]", "cl", "=", "v_calculate_cl", "(", "k", ",", "income", ",", "cl_max", ",", "debt", ",", "int_r", ",", "n_term", ")", "cl_avg", "=", "cl", ".", "mean", "(", ")", "n_samples", "=", "income", ".", "shape", "[", "0", "]", "cost_mat", "=", "np", ".", "zeros", "(", "(", "n_samples", ",", "4", ")", ")", "#cost_mat[FP,FN,TP,TN]", "cost_mat", "[", ":", ",", "0", "]", "=", "v_calculate_cost_fp", "(", "cl", ",", "int_r", ",", "n_term", ",", "int_cf", ",", "pi_1", ",", "lgd", ",", "cl_avg", ")", "cost_mat", "[", ":", ",", "1", "]", "=", "v_calculate_cost_fn", "(", "cl", ",", "lgd", ")", "cost_mat", "[", ":", ",", "2", "]", "=", "0.0", "cost_mat", "[", ":", ",", "3", "]", "=", "0.0", "return", "cost_mat" ]
Private function to calculate the cost matrix of credit scoring models. Parameters ---------- income : array of shape = [n_samples] Monthly income of each example debt : array of shape = [n_samples] Debt ratio each example pi_1 : float Percentage of positives in the training set References ---------- .. [1] A. Correa Bahnsen, D.Aouada, B, Ottersten, "Example-Dependent Cost-Sensitive Logistic Regression for Credit Scoring", in Proceedings of the International Conference on Machine Learning and Applications, , 2014. Returns ------- cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example.
[ "Private", "function", "to", "calculate", "the", "cost", "matrix", "of", "credit", "scoring", "models", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/datasets/base.py#L336-L415
4,048
albahnsen/CostSensitiveClassification
costcla/probcal/probcal.py
ROCConvexHull.predict_proba
def predict_proba(self, p): """ Calculate the calibrated probabilities Parameters ---------- y_prob : array-like of shape = [n_samples, 2] Predicted probabilities to be calibrated using calibration map Returns ------- y_prob_cal : array-like of shape = [n_samples, 1] Predicted calibrated probabilities """ # TODO: Check input if p.size != p.shape[0]: p = p[:, 1] calibrated_proba = np.zeros(p.shape[0]) for i in range(self.calibration_map.shape[0]): calibrated_proba[np.logical_and(self.calibration_map[i, 1] <= p, self.calibration_map[i, 0] > p)] = \ self.calibration_map[i, 2] # TODO: return 2D and refactor return calibrated_proba
python
def predict_proba(self, p): """ Calculate the calibrated probabilities Parameters ---------- y_prob : array-like of shape = [n_samples, 2] Predicted probabilities to be calibrated using calibration map Returns ------- y_prob_cal : array-like of shape = [n_samples, 1] Predicted calibrated probabilities """ # TODO: Check input if p.size != p.shape[0]: p = p[:, 1] calibrated_proba = np.zeros(p.shape[0]) for i in range(self.calibration_map.shape[0]): calibrated_proba[np.logical_and(self.calibration_map[i, 1] <= p, self.calibration_map[i, 0] > p)] = \ self.calibration_map[i, 2] # TODO: return 2D and refactor return calibrated_proba
[ "def", "predict_proba", "(", "self", ",", "p", ")", ":", "# TODO: Check input", "if", "p", ".", "size", "!=", "p", ".", "shape", "[", "0", "]", ":", "p", "=", "p", "[", ":", ",", "1", "]", "calibrated_proba", "=", "np", ".", "zeros", "(", "p", ".", "shape", "[", "0", "]", ")", "for", "i", "in", "range", "(", "self", ".", "calibration_map", ".", "shape", "[", "0", "]", ")", ":", "calibrated_proba", "[", "np", ".", "logical_and", "(", "self", ".", "calibration_map", "[", "i", ",", "1", "]", "<=", "p", ",", "self", ".", "calibration_map", "[", "i", ",", "0", "]", ">", "p", ")", "]", "=", "self", ".", "calibration_map", "[", "i", ",", "2", "]", "# TODO: return 2D and refactor", "return", "calibrated_proba" ]
Calculate the calibrated probabilities Parameters ---------- y_prob : array-like of shape = [n_samples, 2] Predicted probabilities to be calibrated using calibration map Returns ------- y_prob_cal : array-like of shape = [n_samples, 1] Predicted calibrated probabilities
[ "Calculate", "the", "calibrated", "probabilities" ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/probcal/probcal.py#L137-L161
4,049
albahnsen/CostSensitiveClassification
costcla/utils/cross_validation.py
cross_val_score
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Evaluate a score by cross-validation Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : cross-validation generator or int, optional, default: None A cross-validation generator to use. If int, determines the number of folds in StratifiedKFold if y is binary or multiclass and estimator is a classifier, or the number of folds in KFold otherwise. If None, it is equivalent to cv=3. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. """ X, y = indexable(X, y) cv = _check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params) for train, test in cv) return np.array(scores)[:, 0]
python
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Evaluate a score by cross-validation Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : cross-validation generator or int, optional, default: None A cross-validation generator to use. If int, determines the number of folds in StratifiedKFold if y is binary or multiclass and estimator is a classifier, or the number of folds in KFold otherwise. If None, it is equivalent to cv=3. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. """ X, y = indexable(X, y) cv = _check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params) for train, test in cv) return np.array(scores)[:, 0]
[ "def", "cross_val_score", "(", "estimator", ",", "X", ",", "y", "=", "None", ",", "scoring", "=", "None", ",", "cv", "=", "None", ",", "n_jobs", "=", "1", ",", "verbose", "=", "0", ",", "fit_params", "=", "None", ",", "pre_dispatch", "=", "'2*n_jobs'", ")", ":", "X", ",", "y", "=", "indexable", "(", "X", ",", "y", ")", "cv", "=", "_check_cv", "(", "cv", ",", "X", ",", "y", ",", "classifier", "=", "is_classifier", "(", "estimator", ")", ")", "scorer", "=", "check_scoring", "(", "estimator", ",", "scoring", "=", "scoring", ")", "# We clone the estimator to make sure that all the folds are", "# independent, and that it is pickle-able.", "parallel", "=", "Parallel", "(", "n_jobs", "=", "n_jobs", ",", "verbose", "=", "verbose", ",", "pre_dispatch", "=", "pre_dispatch", ")", "scores", "=", "parallel", "(", "delayed", "(", "_fit_and_score", ")", "(", "clone", "(", "estimator", ")", ",", "X", ",", "y", ",", "scorer", ",", "train", ",", "test", ",", "verbose", ",", "None", ",", "fit_params", ")", "for", "train", ",", "test", "in", "cv", ")", "return", "np", ".", "array", "(", "scores", ")", "[", ":", ",", "0", "]" ]
Evaluate a score by cross-validation Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : cross-validation generator or int, optional, default: None A cross-validation generator to use. If int, determines the number of folds in StratifiedKFold if y is binary or multiclass and estimator is a classifier, or the number of folds in KFold otherwise. If None, it is equivalent to cv=3. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation.
[ "Evaluate", "a", "score", "by", "cross", "-", "validation" ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/utils/cross_validation.py#L1080-L1151
4,050
albahnsen/CostSensitiveClassification
costcla/utils/cross_validation.py
_safe_split
def _safe_split(estimator, X, y, indices, train_indices=None): """Create subset of dataset and properly handle kernels.""" if hasattr(estimator, 'kernel') and isinstance(estimator.kernel, collections.Callable): # cannot compute the kernel values with custom function raise ValueError("Cannot use a custom kernel function. " "Precompute the kernel matrix instead.") if not hasattr(X, "shape"): if getattr(estimator, "_pairwise", False): raise ValueError("Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices.") X_subset = [X[idx] for idx in indices] else: if getattr(estimator, "_pairwise", False): # X is a precomputed square kernel matrix if X.shape[0] != X.shape[1]: raise ValueError("X should be a square kernel matrix") if train_indices is None: X_subset = X[np.ix_(indices, indices)] else: X_subset = X[np.ix_(indices, train_indices)] else: X_subset = safe_indexing(X, indices) if y is not None: y_subset = safe_indexing(y, indices) else: y_subset = None return X_subset, y_subset
python
def _safe_split(estimator, X, y, indices, train_indices=None): """Create subset of dataset and properly handle kernels.""" if hasattr(estimator, 'kernel') and isinstance(estimator.kernel, collections.Callable): # cannot compute the kernel values with custom function raise ValueError("Cannot use a custom kernel function. " "Precompute the kernel matrix instead.") if not hasattr(X, "shape"): if getattr(estimator, "_pairwise", False): raise ValueError("Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices.") X_subset = [X[idx] for idx in indices] else: if getattr(estimator, "_pairwise", False): # X is a precomputed square kernel matrix if X.shape[0] != X.shape[1]: raise ValueError("X should be a square kernel matrix") if train_indices is None: X_subset = X[np.ix_(indices, indices)] else: X_subset = X[np.ix_(indices, train_indices)] else: X_subset = safe_indexing(X, indices) if y is not None: y_subset = safe_indexing(y, indices) else: y_subset = None return X_subset, y_subset
[ "def", "_safe_split", "(", "estimator", ",", "X", ",", "y", ",", "indices", ",", "train_indices", "=", "None", ")", ":", "if", "hasattr", "(", "estimator", ",", "'kernel'", ")", "and", "isinstance", "(", "estimator", ".", "kernel", ",", "collections", ".", "Callable", ")", ":", "# cannot compute the kernel values with custom function", "raise", "ValueError", "(", "\"Cannot use a custom kernel function. \"", "\"Precompute the kernel matrix instead.\"", ")", "if", "not", "hasattr", "(", "X", ",", "\"shape\"", ")", ":", "if", "getattr", "(", "estimator", ",", "\"_pairwise\"", ",", "False", ")", ":", "raise", "ValueError", "(", "\"Precomputed kernels or affinity matrices have \"", "\"to be passed as arrays or sparse matrices.\"", ")", "X_subset", "=", "[", "X", "[", "idx", "]", "for", "idx", "in", "indices", "]", "else", ":", "if", "getattr", "(", "estimator", ",", "\"_pairwise\"", ",", "False", ")", ":", "# X is a precomputed square kernel matrix", "if", "X", ".", "shape", "[", "0", "]", "!=", "X", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "\"X should be a square kernel matrix\"", ")", "if", "train_indices", "is", "None", ":", "X_subset", "=", "X", "[", "np", ".", "ix_", "(", "indices", ",", "indices", ")", "]", "else", ":", "X_subset", "=", "X", "[", "np", ".", "ix_", "(", "indices", ",", "train_indices", ")", "]", "else", ":", "X_subset", "=", "safe_indexing", "(", "X", ",", "indices", ")", "if", "y", "is", "not", "None", ":", "y_subset", "=", "safe_indexing", "(", "y", ",", "indices", ")", "else", ":", "y_subset", "=", "None", "return", "X_subset", ",", "y_subset" ]
Create subset of dataset and properly handle kernels.
[ "Create", "subset", "of", "dataset", "and", "properly", "handle", "kernels", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/utils/cross_validation.py#L1258-L1287
4,051
albahnsen/CostSensitiveClassification
costcla/utils/cross_validation.py
_score
def _score(estimator, X_test, y_test, scorer): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score
python
def _score(estimator, X_test, y_test, scorer): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score
[ "def", "_score", "(", "estimator", ",", "X_test", ",", "y_test", ",", "scorer", ")", ":", "if", "y_test", "is", "None", ":", "score", "=", "scorer", "(", "estimator", ",", "X_test", ")", "else", ":", "score", "=", "scorer", "(", "estimator", ",", "X_test", ",", "y_test", ")", "if", "not", "isinstance", "(", "score", ",", "numbers", ".", "Number", ")", ":", "raise", "ValueError", "(", "\"scoring must return a number, got %s (%s) instead.\"", "%", "(", "str", "(", "score", ")", ",", "type", "(", "score", ")", ")", ")", "return", "score" ]
Compute the score of an estimator on a given test set.
[ "Compute", "the", "score", "of", "an", "estimator", "on", "a", "given", "test", "set", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/utils/cross_validation.py#L1290-L1299
4,052
albahnsen/CostSensitiveClassification
costcla/utils/cross_validation.py
_shuffle
def _shuffle(y, labels, random_state): """Return a shuffled copy of y eventually shuffle among same labels.""" if labels is None: ind = random_state.permutation(len(y)) else: ind = np.arange(len(labels)) for label in np.unique(labels): this_mask = (labels == label) ind[this_mask] = random_state.permutation(ind[this_mask]) return y[ind]
python
def _shuffle(y, labels, random_state): """Return a shuffled copy of y eventually shuffle among same labels.""" if labels is None: ind = random_state.permutation(len(y)) else: ind = np.arange(len(labels)) for label in np.unique(labels): this_mask = (labels == label) ind[this_mask] = random_state.permutation(ind[this_mask]) return y[ind]
[ "def", "_shuffle", "(", "y", ",", "labels", ",", "random_state", ")", ":", "if", "labels", "is", "None", ":", "ind", "=", "random_state", ".", "permutation", "(", "len", "(", "y", ")", ")", "else", ":", "ind", "=", "np", ".", "arange", "(", "len", "(", "labels", ")", ")", "for", "label", "in", "np", ".", "unique", "(", "labels", ")", ":", "this_mask", "=", "(", "labels", "==", "label", ")", "ind", "[", "this_mask", "]", "=", "random_state", ".", "permutation", "(", "ind", "[", "this_mask", "]", ")", "return", "y", "[", "ind", "]" ]
Return a shuffled copy of y eventually shuffle among same labels.
[ "Return", "a", "shuffled", "copy", "of", "y", "eventually", "shuffle", "among", "same", "labels", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/utils/cross_validation.py#L1311-L1320
4,053
albahnsen/CostSensitiveClassification
costcla/utils/cross_validation.py
check_cv
def check_cv(cv, X=None, y=None, classifier=False): """Input checker utility for building a CV in a user friendly way. Parameters ---------- cv : int, a cv generator instance, or None The input specifying which cv generator to use. It can be an integer, in which case it is the number of folds in a KFold, None, in which case 3 fold is used, or another object, that will then be used as a cv generator. X : array-like The data the cross-val object will be applied on. y : array-like The target variable for a supervised learning problem. classifier : boolean optional Whether the task is a classification task, in which case stratified KFold will be used. Returns ------- checked_cv: a cross-validation generator instance. The return value is guaranteed to be a cv generator instance, whatever the input type. """ return _check_cv(cv, X=X, y=y, classifier=classifier, warn_mask=True)
python
def check_cv(cv, X=None, y=None, classifier=False): """Input checker utility for building a CV in a user friendly way. Parameters ---------- cv : int, a cv generator instance, or None The input specifying which cv generator to use. It can be an integer, in which case it is the number of folds in a KFold, None, in which case 3 fold is used, or another object, that will then be used as a cv generator. X : array-like The data the cross-val object will be applied on. y : array-like The target variable for a supervised learning problem. classifier : boolean optional Whether the task is a classification task, in which case stratified KFold will be used. Returns ------- checked_cv: a cross-validation generator instance. The return value is guaranteed to be a cv generator instance, whatever the input type. """ return _check_cv(cv, X=X, y=y, classifier=classifier, warn_mask=True)
[ "def", "check_cv", "(", "cv", ",", "X", "=", "None", ",", "y", "=", "None", ",", "classifier", "=", "False", ")", ":", "return", "_check_cv", "(", "cv", ",", "X", "=", "X", ",", "y", "=", "y", ",", "classifier", "=", "classifier", ",", "warn_mask", "=", "True", ")" ]
Input checker utility for building a CV in a user friendly way. Parameters ---------- cv : int, a cv generator instance, or None The input specifying which cv generator to use. It can be an integer, in which case it is the number of folds in a KFold, None, in which case 3 fold is used, or another object, that will then be used as a cv generator. X : array-like The data the cross-val object will be applied on. y : array-like The target variable for a supervised learning problem. classifier : boolean optional Whether the task is a classification task, in which case stratified KFold will be used. Returns ------- checked_cv: a cross-validation generator instance. The return value is guaranteed to be a cv generator instance, whatever the input type.
[ "Input", "checker", "utility", "for", "building", "a", "CV", "in", "a", "user", "friendly", "way", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/utils/cross_validation.py#L1323-L1350
4,054
albahnsen/CostSensitiveClassification
costcla/sampling/_smote.py
_borderlineSMOTE
def _borderlineSMOTE(X, y, minority_target, N, k): """ Returns synthetic minority samples. Parameters ---------- X : array-like, shape = [n__samples, n_features] Holds the minority and majority samples y : array-like, shape = [n__samples] Holds the class targets for samples minority_target : value for minority class N : percetange of new synthetic samples: n_synthetic_samples = N/100 * n_minority_samples. Can be < 100. k : int. Number of nearest neighbours. h : high in random.uniform to scale dif of snythetic sample Returns ------- safe : Safe minorities synthetic : Synthetic sample of minorities in danger zone danger : Minorities of danger zone """ n_samples, _ = X.shape #Learn nearest neighbours on complete training set neigh = NearestNeighbors(n_neighbors = k) neigh.fit(X) safe_minority_indices = list() danger_minority_indices = list() for i in range(n_samples): if y[i] != minority_target: continue nn = neigh.kneighbors(X[i], return_distance=False) majority_neighbours = 0 for n in nn[0]: if y[n] != minority_target: majority_neighbours += 1 if majority_neighbours == len(nn): continue elif majority_neighbours < (len(nn)/2): logger.debug("Add sample to safe minorities.") safe_minority_indices.append(i) else: #DANGER zone danger_minority_indices.append(i) #SMOTE danger minority samples synthetic_samples = _SMOTE(X[danger_minority_indices], N, k, h = 0.5) return (X[safe_minority_indices], synthetic_samples, X[danger_minority_indices])
python
def _borderlineSMOTE(X, y, minority_target, N, k): """ Returns synthetic minority samples. Parameters ---------- X : array-like, shape = [n__samples, n_features] Holds the minority and majority samples y : array-like, shape = [n__samples] Holds the class targets for samples minority_target : value for minority class N : percetange of new synthetic samples: n_synthetic_samples = N/100 * n_minority_samples. Can be < 100. k : int. Number of nearest neighbours. h : high in random.uniform to scale dif of snythetic sample Returns ------- safe : Safe minorities synthetic : Synthetic sample of minorities in danger zone danger : Minorities of danger zone """ n_samples, _ = X.shape #Learn nearest neighbours on complete training set neigh = NearestNeighbors(n_neighbors = k) neigh.fit(X) safe_minority_indices = list() danger_minority_indices = list() for i in range(n_samples): if y[i] != minority_target: continue nn = neigh.kneighbors(X[i], return_distance=False) majority_neighbours = 0 for n in nn[0]: if y[n] != minority_target: majority_neighbours += 1 if majority_neighbours == len(nn): continue elif majority_neighbours < (len(nn)/2): logger.debug("Add sample to safe minorities.") safe_minority_indices.append(i) else: #DANGER zone danger_minority_indices.append(i) #SMOTE danger minority samples synthetic_samples = _SMOTE(X[danger_minority_indices], N, k, h = 0.5) return (X[safe_minority_indices], synthetic_samples, X[danger_minority_indices])
[ "def", "_borderlineSMOTE", "(", "X", ",", "y", ",", "minority_target", ",", "N", ",", "k", ")", ":", "n_samples", ",", "_", "=", "X", ".", "shape", "#Learn nearest neighbours on complete training set", "neigh", "=", "NearestNeighbors", "(", "n_neighbors", "=", "k", ")", "neigh", ".", "fit", "(", "X", ")", "safe_minority_indices", "=", "list", "(", ")", "danger_minority_indices", "=", "list", "(", ")", "for", "i", "in", "range", "(", "n_samples", ")", ":", "if", "y", "[", "i", "]", "!=", "minority_target", ":", "continue", "nn", "=", "neigh", ".", "kneighbors", "(", "X", "[", "i", "]", ",", "return_distance", "=", "False", ")", "majority_neighbours", "=", "0", "for", "n", "in", "nn", "[", "0", "]", ":", "if", "y", "[", "n", "]", "!=", "minority_target", ":", "majority_neighbours", "+=", "1", "if", "majority_neighbours", "==", "len", "(", "nn", ")", ":", "continue", "elif", "majority_neighbours", "<", "(", "len", "(", "nn", ")", "/", "2", ")", ":", "logger", ".", "debug", "(", "\"Add sample to safe minorities.\"", ")", "safe_minority_indices", ".", "append", "(", "i", ")", "else", ":", "#DANGER zone", "danger_minority_indices", ".", "append", "(", "i", ")", "#SMOTE danger minority samples", "synthetic_samples", "=", "_SMOTE", "(", "X", "[", "danger_minority_indices", "]", ",", "N", ",", "k", ",", "h", "=", "0.5", ")", "return", "(", "X", "[", "safe_minority_indices", "]", ",", "synthetic_samples", ",", "X", "[", "danger_minority_indices", "]", ")" ]
Returns synthetic minority samples. Parameters ---------- X : array-like, shape = [n__samples, n_features] Holds the minority and majority samples y : array-like, shape = [n__samples] Holds the class targets for samples minority_target : value for minority class N : percetange of new synthetic samples: n_synthetic_samples = N/100 * n_minority_samples. Can be < 100. k : int. Number of nearest neighbours. h : high in random.uniform to scale dif of snythetic sample Returns ------- safe : Safe minorities synthetic : Synthetic sample of minorities in danger zone danger : Minorities of danger zone
[ "Returns", "synthetic", "minority", "samples", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/sampling/_smote.py#L91-L147
4,055
albahnsen/CostSensitiveClassification
costcla/models/directcost.py
BayesMinimumRiskClassifier.fit
def fit(self,y_true_cal=None, y_prob_cal=None): """ If calibration, then train the calibration of probabilities Parameters ---------- y_true_cal : array-like of shape = [n_samples], optional default = None True class to be used for calibrating the probabilities y_prob_cal : array-like of shape = [n_samples, 2], optional default = None Predicted probabilities to be used for calibrating the probabilities Returns ------- self : object Returns self. """ if self.calibration: self.cal = ROCConvexHull() self.cal.fit(y_true_cal, y_prob_cal[:, 1])
python
def fit(self,y_true_cal=None, y_prob_cal=None): """ If calibration, then train the calibration of probabilities Parameters ---------- y_true_cal : array-like of shape = [n_samples], optional default = None True class to be used for calibrating the probabilities y_prob_cal : array-like of shape = [n_samples, 2], optional default = None Predicted probabilities to be used for calibrating the probabilities Returns ------- self : object Returns self. """ if self.calibration: self.cal = ROCConvexHull() self.cal.fit(y_true_cal, y_prob_cal[:, 1])
[ "def", "fit", "(", "self", ",", "y_true_cal", "=", "None", ",", "y_prob_cal", "=", "None", ")", ":", "if", "self", ".", "calibration", ":", "self", ".", "cal", "=", "ROCConvexHull", "(", ")", "self", ".", "cal", ".", "fit", "(", "y_true_cal", ",", "y_prob_cal", "[", ":", ",", "1", "]", ")" ]
If calibration, then train the calibration of probabilities Parameters ---------- y_true_cal : array-like of shape = [n_samples], optional default = None True class to be used for calibrating the probabilities y_prob_cal : array-like of shape = [n_samples, 2], optional default = None Predicted probabilities to be used for calibrating the probabilities Returns ------- self : object Returns self.
[ "If", "calibration", "then", "train", "the", "calibration", "of", "probabilities" ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/directcost.py#L56-L74
4,056
albahnsen/CostSensitiveClassification
costcla/models/directcost.py
ThresholdingOptimization.fit
def fit(self, y_prob, cost_mat, y_true): """ Calculate the optimal threshold using the ThresholdingOptimization. Parameters ---------- y_prob : array-like of shape = [n_samples, 2] Predicted probabilities. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. y_true : array-like of shape = [n_samples] True class Returns ------- self """ #TODO: Check input if self.calibration: cal = ROCConvexHull() cal.fit(y_true, y_prob[:, 1]) y_prob[:, 1] = cal.predict_proba(y_prob[:, 1]) y_prob[:, 0] = 1 - y_prob[:, 1] thresholds = np.unique(y_prob) cost = np.zeros(thresholds.shape) for i in range(thresholds.shape[0]): pred = np.floor(y_prob[:, 1]+(1-thresholds[i])) cost[i] = cost_loss(y_true, pred, cost_mat) self.threshold_ = thresholds[np.argmin(cost)] return self
python
def fit(self, y_prob, cost_mat, y_true): """ Calculate the optimal threshold using the ThresholdingOptimization. Parameters ---------- y_prob : array-like of shape = [n_samples, 2] Predicted probabilities. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. y_true : array-like of shape = [n_samples] True class Returns ------- self """ #TODO: Check input if self.calibration: cal = ROCConvexHull() cal.fit(y_true, y_prob[:, 1]) y_prob[:, 1] = cal.predict_proba(y_prob[:, 1]) y_prob[:, 0] = 1 - y_prob[:, 1] thresholds = np.unique(y_prob) cost = np.zeros(thresholds.shape) for i in range(thresholds.shape[0]): pred = np.floor(y_prob[:, 1]+(1-thresholds[i])) cost[i] = cost_loss(y_true, pred, cost_mat) self.threshold_ = thresholds[np.argmin(cost)] return self
[ "def", "fit", "(", "self", ",", "y_prob", ",", "cost_mat", ",", "y_true", ")", ":", "#TODO: Check input", "if", "self", ".", "calibration", ":", "cal", "=", "ROCConvexHull", "(", ")", "cal", ".", "fit", "(", "y_true", ",", "y_prob", "[", ":", ",", "1", "]", ")", "y_prob", "[", ":", ",", "1", "]", "=", "cal", ".", "predict_proba", "(", "y_prob", "[", ":", ",", "1", "]", ")", "y_prob", "[", ":", ",", "0", "]", "=", "1", "-", "y_prob", "[", ":", ",", "1", "]", "thresholds", "=", "np", ".", "unique", "(", "y_prob", ")", "cost", "=", "np", ".", "zeros", "(", "thresholds", ".", "shape", ")", "for", "i", "in", "range", "(", "thresholds", ".", "shape", "[", "0", "]", ")", ":", "pred", "=", "np", ".", "floor", "(", "y_prob", "[", ":", ",", "1", "]", "+", "(", "1", "-", "thresholds", "[", "i", "]", ")", ")", "cost", "[", "i", "]", "=", "cost_loss", "(", "y_true", ",", "pred", ",", "cost_mat", ")", "self", ".", "threshold_", "=", "thresholds", "[", "np", ".", "argmin", "(", "cost", ")", "]", "return", "self" ]
Calculate the optimal threshold using the ThresholdingOptimization. Parameters ---------- y_prob : array-like of shape = [n_samples, 2] Predicted probabilities. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. y_true : array-like of shape = [n_samples] True class Returns ------- self
[ "Calculate", "the", "optimal", "threshold", "using", "the", "ThresholdingOptimization", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/directcost.py#L199-L238
4,057
albahnsen/CostSensitiveClassification
costcla/models/directcost.py
ThresholdingOptimization.predict
def predict(self, y_prob): """ Calculate the prediction using the ThresholdingOptimization. Parameters ---------- y_prob : array-like of shape = [n_samples, 2] Predicted probabilities. Returns ------- y_pred : array-like of shape = [n_samples] Predicted class """ y_pred = np.floor(y_prob[:, 1] + (1 - self.threshold_)) return y_pred
python
def predict(self, y_prob): """ Calculate the prediction using the ThresholdingOptimization. Parameters ---------- y_prob : array-like of shape = [n_samples, 2] Predicted probabilities. Returns ------- y_pred : array-like of shape = [n_samples] Predicted class """ y_pred = np.floor(y_prob[:, 1] + (1 - self.threshold_)) return y_pred
[ "def", "predict", "(", "self", ",", "y_prob", ")", ":", "y_pred", "=", "np", ".", "floor", "(", "y_prob", "[", ":", ",", "1", "]", "+", "(", "1", "-", "self", ".", "threshold_", ")", ")", "return", "y_pred" ]
Calculate the prediction using the ThresholdingOptimization. Parameters ---------- y_prob : array-like of shape = [n_samples, 2] Predicted probabilities. Returns ------- y_pred : array-like of shape = [n_samples] Predicted class
[ "Calculate", "the", "prediction", "using", "the", "ThresholdingOptimization", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/directcost.py#L240-L255
4,058
albahnsen/CostSensitiveClassification
costcla/sampling/sampling.py
undersampling
def undersampling(X, y, cost_mat=None, per=0.5): """Under-sampling. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y : array-like of shape = [n_samples] Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4], optional (default=None) Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. per: float, optional (default = 0.5) Percentage of the minority class in the under-sampled data """ n_samples = X.shape[0] #TODO: allow y different from (0, 1) num_y1 = y.sum() num_y0 = n_samples - num_y1 filter_rand = np.random.rand(int(num_y1 + num_y0)) #TODO: rewrite in a more readable way if num_y1 < num_y0: num_y0_new = num_y1 * 1.0 / per - num_y1 num_y0_new_per = num_y0_new * 1.0 / num_y0 filter_0 = np.logical_and(y == 0, filter_rand <= num_y0_new_per) filter_ = np.nonzero(np.logical_or(y == 1, filter_0))[0] else: num_y1_new = num_y0 * 1.0 / per - num_y0 num_y1_new_per = num_y1_new * 1.0 / num_y1 filter_1 = np.logical_and(y == 1, filter_rand <= num_y1_new_per) filter_ = np.nonzero(np.logical_or(y == 0, filter_1))[0] X_u = X[filter_, :] y_u = y[filter_] if not cost_mat is None: cost_mat_u = cost_mat[filter_, :] return X_u, y_u, cost_mat_u else: return X_u, y_u
python
def undersampling(X, y, cost_mat=None, per=0.5): """Under-sampling. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y : array-like of shape = [n_samples] Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4], optional (default=None) Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. per: float, optional (default = 0.5) Percentage of the minority class in the under-sampled data """ n_samples = X.shape[0] #TODO: allow y different from (0, 1) num_y1 = y.sum() num_y0 = n_samples - num_y1 filter_rand = np.random.rand(int(num_y1 + num_y0)) #TODO: rewrite in a more readable way if num_y1 < num_y0: num_y0_new = num_y1 * 1.0 / per - num_y1 num_y0_new_per = num_y0_new * 1.0 / num_y0 filter_0 = np.logical_and(y == 0, filter_rand <= num_y0_new_per) filter_ = np.nonzero(np.logical_or(y == 1, filter_0))[0] else: num_y1_new = num_y0 * 1.0 / per - num_y0 num_y1_new_per = num_y1_new * 1.0 / num_y1 filter_1 = np.logical_and(y == 1, filter_rand <= num_y1_new_per) filter_ = np.nonzero(np.logical_or(y == 0, filter_1))[0] X_u = X[filter_, :] y_u = y[filter_] if not cost_mat is None: cost_mat_u = cost_mat[filter_, :] return X_u, y_u, cost_mat_u else: return X_u, y_u
[ "def", "undersampling", "(", "X", ",", "y", ",", "cost_mat", "=", "None", ",", "per", "=", "0.5", ")", ":", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "#TODO: allow y different from (0, 1)", "num_y1", "=", "y", ".", "sum", "(", ")", "num_y0", "=", "n_samples", "-", "num_y1", "filter_rand", "=", "np", ".", "random", ".", "rand", "(", "int", "(", "num_y1", "+", "num_y0", ")", ")", "#TODO: rewrite in a more readable way", "if", "num_y1", "<", "num_y0", ":", "num_y0_new", "=", "num_y1", "*", "1.0", "/", "per", "-", "num_y1", "num_y0_new_per", "=", "num_y0_new", "*", "1.0", "/", "num_y0", "filter_0", "=", "np", ".", "logical_and", "(", "y", "==", "0", ",", "filter_rand", "<=", "num_y0_new_per", ")", "filter_", "=", "np", ".", "nonzero", "(", "np", ".", "logical_or", "(", "y", "==", "1", ",", "filter_0", ")", ")", "[", "0", "]", "else", ":", "num_y1_new", "=", "num_y0", "*", "1.0", "/", "per", "-", "num_y0", "num_y1_new_per", "=", "num_y1_new", "*", "1.0", "/", "num_y1", "filter_1", "=", "np", ".", "logical_and", "(", "y", "==", "1", ",", "filter_rand", "<=", "num_y1_new_per", ")", "filter_", "=", "np", ".", "nonzero", "(", "np", ".", "logical_or", "(", "y", "==", "0", ",", "filter_1", ")", ")", "[", "0", "]", "X_u", "=", "X", "[", "filter_", ",", ":", "]", "y_u", "=", "y", "[", "filter_", "]", "if", "not", "cost_mat", "is", "None", ":", "cost_mat_u", "=", "cost_mat", "[", "filter_", ",", ":", "]", "return", "X_u", ",", "y_u", ",", "cost_mat_u", "else", ":", "return", "X_u", ",", "y_u" ]
Under-sampling. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y : array-like of shape = [n_samples] Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4], optional (default=None) Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. per: float, optional (default = 0.5) Percentage of the minority class in the under-sampled data
[ "Under", "-", "sampling", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/sampling/sampling.py#L11-L57
4,059
albahnsen/CostSensitiveClassification
costcla/models/cost_tree.py
CostSensitiveDecisionTreeClassifier._node_cost
def _node_cost(self, y_true, cost_mat): """ Private function to calculate the cost of a node. Parameters ---------- y_true : array indicator matrix Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- tuple(cost_loss : float, node prediction : int, node predicted probability : float) """ n_samples = len(y_true) # Evaluates the cost by predicting the node as positive and negative costs = np.zeros(2) costs[0] = cost_loss(y_true, np.zeros(y_true.shape), cost_mat) costs[1] = cost_loss(y_true, np.ones(y_true.shape), cost_mat) pi = np.array([1 - y_true.mean(), y_true.mean()]) if self.criterion == 'direct_cost': costs = costs elif self.criterion == 'pi_cost': costs *= pi elif self.criterion == 'gini_cost': costs *= pi ** 2 elif self.criterion in 'entropy_cost': if pi[0] == 0 or pi[1] == 0: costs *= 0 else: costs *= -np.log(pi) y_pred = np.argmin(costs) # Calculate the predicted probability of a node using laplace correction. n_positives = y_true.sum() y_prob = (n_positives + 1.0) / (n_samples + 2.0) return costs[y_pred], y_pred, y_prob
python
def _node_cost(self, y_true, cost_mat): """ Private function to calculate the cost of a node. Parameters ---------- y_true : array indicator matrix Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- tuple(cost_loss : float, node prediction : int, node predicted probability : float) """ n_samples = len(y_true) # Evaluates the cost by predicting the node as positive and negative costs = np.zeros(2) costs[0] = cost_loss(y_true, np.zeros(y_true.shape), cost_mat) costs[1] = cost_loss(y_true, np.ones(y_true.shape), cost_mat) pi = np.array([1 - y_true.mean(), y_true.mean()]) if self.criterion == 'direct_cost': costs = costs elif self.criterion == 'pi_cost': costs *= pi elif self.criterion == 'gini_cost': costs *= pi ** 2 elif self.criterion in 'entropy_cost': if pi[0] == 0 or pi[1] == 0: costs *= 0 else: costs *= -np.log(pi) y_pred = np.argmin(costs) # Calculate the predicted probability of a node using laplace correction. n_positives = y_true.sum() y_prob = (n_positives + 1.0) / (n_samples + 2.0) return costs[y_pred], y_pred, y_prob
[ "def", "_node_cost", "(", "self", ",", "y_true", ",", "cost_mat", ")", ":", "n_samples", "=", "len", "(", "y_true", ")", "# Evaluates the cost by predicting the node as positive and negative", "costs", "=", "np", ".", "zeros", "(", "2", ")", "costs", "[", "0", "]", "=", "cost_loss", "(", "y_true", ",", "np", ".", "zeros", "(", "y_true", ".", "shape", ")", ",", "cost_mat", ")", "costs", "[", "1", "]", "=", "cost_loss", "(", "y_true", ",", "np", ".", "ones", "(", "y_true", ".", "shape", ")", ",", "cost_mat", ")", "pi", "=", "np", ".", "array", "(", "[", "1", "-", "y_true", ".", "mean", "(", ")", ",", "y_true", ".", "mean", "(", ")", "]", ")", "if", "self", ".", "criterion", "==", "'direct_cost'", ":", "costs", "=", "costs", "elif", "self", ".", "criterion", "==", "'pi_cost'", ":", "costs", "*=", "pi", "elif", "self", ".", "criterion", "==", "'gini_cost'", ":", "costs", "*=", "pi", "**", "2", "elif", "self", ".", "criterion", "in", "'entropy_cost'", ":", "if", "pi", "[", "0", "]", "==", "0", "or", "pi", "[", "1", "]", "==", "0", ":", "costs", "*=", "0", "else", ":", "costs", "*=", "-", "np", ".", "log", "(", "pi", ")", "y_pred", "=", "np", ".", "argmin", "(", "costs", ")", "# Calculate the predicted probability of a node using laplace correction.", "n_positives", "=", "y_true", ".", "sum", "(", ")", "y_prob", "=", "(", "n_positives", "+", "1.0", ")", "/", "(", "n_samples", "+", "2.0", ")", "return", "costs", "[", "y_pred", "]", ",", "y_pred", ",", "y_prob" ]
Private function to calculate the cost of a node. Parameters ---------- y_true : array indicator matrix Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- tuple(cost_loss : float, node prediction : int, node predicted probability : float)
[ "Private", "function", "to", "calculate", "the", "cost", "of", "a", "node", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L138-L183
4,060
albahnsen/CostSensitiveClassification
costcla/models/cost_tree.py
CostSensitiveDecisionTreeClassifier._calculate_gain
def _calculate_gain(self, cost_base, y_true, X, cost_mat, split): """ Private function to calculate the gain in cost of using split in the current node. Parameters ---------- cost_base : float Cost of the naive prediction y_true : array indicator matrix Ground truth (correct) labels. X : array-like of shape = [n_samples, n_features] The input samples. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. split : tuple of len = 2 split[0] = feature to split = j split[1] = where to split = l Returns ------- tuple(gain : float, left node prediction : int) """ # Check if cost_base == 0, then no gain is possible #TODO: This must be check in _best_split if cost_base == 0.0: return 0.0, int(np.sign(y_true.mean() - 0.5) == 1) # In case cost_b==0 and pi_1!=(0,1) j, l = split filter_Xl = (X[:, j] <= l) filter_Xr = ~filter_Xl n_samples, n_features = X.shape # Check if one of the leafs is empty #TODO: This must be check in _best_split if np.nonzero(filter_Xl)[0].shape[0] in [0, n_samples]: # One leaft is empty return 0.0, 0.0 # Split X in Xl and Xr according to rule split Xl_cost, Xl_pred, _ = self._node_cost(y_true[filter_Xl], cost_mat[filter_Xl, :]) Xr_cost, _, _ = self._node_cost(y_true[filter_Xr], cost_mat[filter_Xr, :]) if self.criterion_weight: n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0] Xl_w = n_samples_Xl * 1.0 / n_samples Xr_w = 1 - Xl_w gain = round((cost_base - (Xl_w * Xl_cost + Xr_w * Xr_cost)) / cost_base, 6) else: gain = round((cost_base - (Xl_cost + Xr_cost)) / cost_base, 6) return gain, Xl_pred
python
def _calculate_gain(self, cost_base, y_true, X, cost_mat, split): """ Private function to calculate the gain in cost of using split in the current node. Parameters ---------- cost_base : float Cost of the naive prediction y_true : array indicator matrix Ground truth (correct) labels. X : array-like of shape = [n_samples, n_features] The input samples. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. split : tuple of len = 2 split[0] = feature to split = j split[1] = where to split = l Returns ------- tuple(gain : float, left node prediction : int) """ # Check if cost_base == 0, then no gain is possible #TODO: This must be check in _best_split if cost_base == 0.0: return 0.0, int(np.sign(y_true.mean() - 0.5) == 1) # In case cost_b==0 and pi_1!=(0,1) j, l = split filter_Xl = (X[:, j] <= l) filter_Xr = ~filter_Xl n_samples, n_features = X.shape # Check if one of the leafs is empty #TODO: This must be check in _best_split if np.nonzero(filter_Xl)[0].shape[0] in [0, n_samples]: # One leaft is empty return 0.0, 0.0 # Split X in Xl and Xr according to rule split Xl_cost, Xl_pred, _ = self._node_cost(y_true[filter_Xl], cost_mat[filter_Xl, :]) Xr_cost, _, _ = self._node_cost(y_true[filter_Xr], cost_mat[filter_Xr, :]) if self.criterion_weight: n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0] Xl_w = n_samples_Xl * 1.0 / n_samples Xr_w = 1 - Xl_w gain = round((cost_base - (Xl_w * Xl_cost + Xr_w * Xr_cost)) / cost_base, 6) else: gain = round((cost_base - (Xl_cost + Xr_cost)) / cost_base, 6) return gain, Xl_pred
[ "def", "_calculate_gain", "(", "self", ",", "cost_base", ",", "y_true", ",", "X", ",", "cost_mat", ",", "split", ")", ":", "# Check if cost_base == 0, then no gain is possible", "#TODO: This must be check in _best_split", "if", "cost_base", "==", "0.0", ":", "return", "0.0", ",", "int", "(", "np", ".", "sign", "(", "y_true", ".", "mean", "(", ")", "-", "0.5", ")", "==", "1", ")", "# In case cost_b==0 and pi_1!=(0,1)", "j", ",", "l", "=", "split", "filter_Xl", "=", "(", "X", "[", ":", ",", "j", "]", "<=", "l", ")", "filter_Xr", "=", "~", "filter_Xl", "n_samples", ",", "n_features", "=", "X", ".", "shape", "# Check if one of the leafs is empty", "#TODO: This must be check in _best_split", "if", "np", ".", "nonzero", "(", "filter_Xl", ")", "[", "0", "]", ".", "shape", "[", "0", "]", "in", "[", "0", ",", "n_samples", "]", ":", "# One leaft is empty", "return", "0.0", ",", "0.0", "# Split X in Xl and Xr according to rule split", "Xl_cost", ",", "Xl_pred", ",", "_", "=", "self", ".", "_node_cost", "(", "y_true", "[", "filter_Xl", "]", ",", "cost_mat", "[", "filter_Xl", ",", ":", "]", ")", "Xr_cost", ",", "_", ",", "_", "=", "self", ".", "_node_cost", "(", "y_true", "[", "filter_Xr", "]", ",", "cost_mat", "[", "filter_Xr", ",", ":", "]", ")", "if", "self", ".", "criterion_weight", ":", "n_samples_Xl", "=", "np", ".", "nonzero", "(", "filter_Xl", ")", "[", "0", "]", ".", "shape", "[", "0", "]", "Xl_w", "=", "n_samples_Xl", "*", "1.0", "/", "n_samples", "Xr_w", "=", "1", "-", "Xl_w", "gain", "=", "round", "(", "(", "cost_base", "-", "(", "Xl_w", "*", "Xl_cost", "+", "Xr_w", "*", "Xr_cost", ")", ")", "/", "cost_base", ",", "6", ")", "else", ":", "gain", "=", "round", "(", "(", "cost_base", "-", "(", "Xl_cost", "+", "Xr_cost", ")", ")", "/", "cost_base", ",", "6", ")", "return", "gain", ",", "Xl_pred" ]
Private function to calculate the gain in cost of using split in the current node. Parameters ---------- cost_base : float Cost of the naive prediction y_true : array indicator matrix Ground truth (correct) labels. X : array-like of shape = [n_samples, n_features] The input samples. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. split : tuple of len = 2 split[0] = feature to split = j split[1] = where to split = l Returns ------- tuple(gain : float, left node prediction : int)
[ "Private", "function", "to", "calculate", "the", "gain", "in", "cost", "of", "using", "split", "in", "the", "current", "node", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L185-L242
4,061
albahnsen/CostSensitiveClassification
costcla/models/cost_tree.py
CostSensitiveDecisionTreeClassifier._best_split
def _best_split(self, y_true, X, cost_mat): """ Private function to calculate the split that gives the best gain. Parameters ---------- y_true : array indicator matrix Ground truth (correct) labels. X : array-like of shape = [n_samples, n_features] The input samples. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- tuple(split : tuple(j, l), gain : float, left node prediction : int, y_pred : int, y_prob : float) """ n_samples, n_features = X.shape num_pct = self.num_pct cost_base, y_pred, y_prob = self._node_cost(y_true, cost_mat) # Calculate the gain of all features each split in num_pct gains = np.zeros((n_features, num_pct)) pred = np.zeros((n_features, num_pct)) splits = np.zeros((n_features, num_pct)) # Selected features selected_features = np.arange(0, self.n_features_) # Add random state np.random.shuffle(selected_features) selected_features = selected_features[:self.max_features_] selected_features.sort() #TODO: # Skip the CPU intensive evaluation of the impurity criterion for # features that were already detected as constant (hence not suitable # for good splitting) by ancestor nodes and save the information on # newly discovered constant features to spare computation on descendant # nodes. # For each feature test all possible splits for j in selected_features: splits[j, :] = np.percentile(X[:, j], np.arange(0, 100, 100.0 / num_pct).tolist()) for l in range(num_pct): # Avoid repeated values, since np.percentile may return repeated values if l == 0 or (l > 0 and splits[j, l] != splits[j, l - 1]): split = (j, splits[j, l]) gains[j, l], pred[j, l] = self._calculate_gain(cost_base, y_true, X, cost_mat, split) best_split = np.unravel_index(gains.argmax(), gains.shape) return (best_split[0], splits[best_split]), gains.max(), pred[best_split], y_pred, y_prob
python
def _best_split(self, y_true, X, cost_mat): """ Private function to calculate the split that gives the best gain. Parameters ---------- y_true : array indicator matrix Ground truth (correct) labels. X : array-like of shape = [n_samples, n_features] The input samples. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- tuple(split : tuple(j, l), gain : float, left node prediction : int, y_pred : int, y_prob : float) """ n_samples, n_features = X.shape num_pct = self.num_pct cost_base, y_pred, y_prob = self._node_cost(y_true, cost_mat) # Calculate the gain of all features each split in num_pct gains = np.zeros((n_features, num_pct)) pred = np.zeros((n_features, num_pct)) splits = np.zeros((n_features, num_pct)) # Selected features selected_features = np.arange(0, self.n_features_) # Add random state np.random.shuffle(selected_features) selected_features = selected_features[:self.max_features_] selected_features.sort() #TODO: # Skip the CPU intensive evaluation of the impurity criterion for # features that were already detected as constant (hence not suitable # for good splitting) by ancestor nodes and save the information on # newly discovered constant features to spare computation on descendant # nodes. # For each feature test all possible splits for j in selected_features: splits[j, :] = np.percentile(X[:, j], np.arange(0, 100, 100.0 / num_pct).tolist()) for l in range(num_pct): # Avoid repeated values, since np.percentile may return repeated values if l == 0 or (l > 0 and splits[j, l] != splits[j, l - 1]): split = (j, splits[j, l]) gains[j, l], pred[j, l] = self._calculate_gain(cost_base, y_true, X, cost_mat, split) best_split = np.unravel_index(gains.argmax(), gains.shape) return (best_split[0], splits[best_split]), gains.max(), pred[best_split], y_pred, y_prob
[ "def", "_best_split", "(", "self", ",", "y_true", ",", "X", ",", "cost_mat", ")", ":", "n_samples", ",", "n_features", "=", "X", ".", "shape", "num_pct", "=", "self", ".", "num_pct", "cost_base", ",", "y_pred", ",", "y_prob", "=", "self", ".", "_node_cost", "(", "y_true", ",", "cost_mat", ")", "# Calculate the gain of all features each split in num_pct", "gains", "=", "np", ".", "zeros", "(", "(", "n_features", ",", "num_pct", ")", ")", "pred", "=", "np", ".", "zeros", "(", "(", "n_features", ",", "num_pct", ")", ")", "splits", "=", "np", ".", "zeros", "(", "(", "n_features", ",", "num_pct", ")", ")", "# Selected features", "selected_features", "=", "np", ".", "arange", "(", "0", ",", "self", ".", "n_features_", ")", "# Add random state", "np", ".", "random", ".", "shuffle", "(", "selected_features", ")", "selected_features", "=", "selected_features", "[", ":", "self", ".", "max_features_", "]", "selected_features", ".", "sort", "(", ")", "#TODO: # Skip the CPU intensive evaluation of the impurity criterion for", "# features that were already detected as constant (hence not suitable", "# for good splitting) by ancestor nodes and save the information on", "# newly discovered constant features to spare computation on descendant", "# nodes.", "# For each feature test all possible splits", "for", "j", "in", "selected_features", ":", "splits", "[", "j", ",", ":", "]", "=", "np", ".", "percentile", "(", "X", "[", ":", ",", "j", "]", ",", "np", ".", "arange", "(", "0", ",", "100", ",", "100.0", "/", "num_pct", ")", ".", "tolist", "(", ")", ")", "for", "l", "in", "range", "(", "num_pct", ")", ":", "# Avoid repeated values, since np.percentile may return repeated values", "if", "l", "==", "0", "or", "(", "l", ">", "0", "and", "splits", "[", "j", ",", "l", "]", "!=", "splits", "[", "j", ",", "l", "-", "1", "]", ")", ":", "split", "=", "(", "j", ",", "splits", "[", "j", ",", "l", "]", ")", "gains", "[", "j", ",", "l", "]", ",", "pred", "[", "j", ",", "l", "]", "=", "self", ".", "_calculate_gain", "(", "cost_base", ",", "y_true", ",", "X", ",", "cost_mat", ",", "split", ")", "best_split", "=", "np", ".", "unravel_index", "(", "gains", ".", "argmax", "(", ")", ",", "gains", ".", "shape", ")", "return", "(", "best_split", "[", "0", "]", ",", "splits", "[", "best_split", "]", ")", ",", "gains", ".", "max", "(", ")", ",", "pred", "[", "best_split", "]", ",", "y_pred", ",", "y_prob" ]
Private function to calculate the split that gives the best gain. Parameters ---------- y_true : array indicator matrix Ground truth (correct) labels. X : array-like of shape = [n_samples, n_features] The input samples. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- tuple(split : tuple(j, l), gain : float, left node prediction : int, y_pred : int, y_prob : float)
[ "Private", "function", "to", "calculate", "the", "split", "that", "gives", "the", "best", "gain", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L244-L302
4,062
albahnsen/CostSensitiveClassification
costcla/models/cost_tree.py
CostSensitiveDecisionTreeClassifier._tree_grow
def _tree_grow(self, y_true, X, cost_mat, level=0): """ Private recursive function to grow the decision tree. Parameters ---------- y_true : array indicator matrix Ground truth (correct) labels. X : array-like of shape = [n_samples, n_features] The input samples. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- Tree : Object Container of the decision tree NOTE: it is not the same structure as the sklearn.tree.tree object """ #TODO: Find error, add min_samples_split if len(X.shape) == 1: tree = dict(y_pred=y_true, y_prob=0.5, level=level, split=-1, n_samples=1, gain=0) return tree # Calculate the best split of the current node split, gain, Xl_pred, y_pred, y_prob = self._best_split(y_true, X, cost_mat) n_samples, n_features = X.shape # Construct the tree object as a dictionary #TODO: Convert tree to be equal to sklearn.tree.tree object tree = dict(y_pred=y_pred, y_prob=y_prob, level=level, split=-1, n_samples=n_samples, gain=gain) # Check the stopping criteria if gain < self.min_gain: return tree if self.max_depth is not None: if level >= self.max_depth: return tree if n_samples <= self.min_samples_split: return tree j, l = split filter_Xl = (X[:, j] <= l) filter_Xr = ~filter_Xl n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0] n_samples_Xr = np.nonzero(filter_Xr)[0].shape[0] if min(n_samples_Xl, n_samples_Xr) <= self.min_samples_leaf: return tree # No stooping criteria is met tree['split'] = split tree['node'] = self.tree_.n_nodes self.tree_.n_nodes += 1 tree['sl'] = self._tree_grow(y_true[filter_Xl], X[filter_Xl], cost_mat[filter_Xl], level + 1) tree['sr'] = self._tree_grow(y_true[filter_Xr], X[filter_Xr], cost_mat[filter_Xr], level + 1) return tree
python
def _tree_grow(self, y_true, X, cost_mat, level=0): """ Private recursive function to grow the decision tree. Parameters ---------- y_true : array indicator matrix Ground truth (correct) labels. X : array-like of shape = [n_samples, n_features] The input samples. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- Tree : Object Container of the decision tree NOTE: it is not the same structure as the sklearn.tree.tree object """ #TODO: Find error, add min_samples_split if len(X.shape) == 1: tree = dict(y_pred=y_true, y_prob=0.5, level=level, split=-1, n_samples=1, gain=0) return tree # Calculate the best split of the current node split, gain, Xl_pred, y_pred, y_prob = self._best_split(y_true, X, cost_mat) n_samples, n_features = X.shape # Construct the tree object as a dictionary #TODO: Convert tree to be equal to sklearn.tree.tree object tree = dict(y_pred=y_pred, y_prob=y_prob, level=level, split=-1, n_samples=n_samples, gain=gain) # Check the stopping criteria if gain < self.min_gain: return tree if self.max_depth is not None: if level >= self.max_depth: return tree if n_samples <= self.min_samples_split: return tree j, l = split filter_Xl = (X[:, j] <= l) filter_Xr = ~filter_Xl n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0] n_samples_Xr = np.nonzero(filter_Xr)[0].shape[0] if min(n_samples_Xl, n_samples_Xr) <= self.min_samples_leaf: return tree # No stooping criteria is met tree['split'] = split tree['node'] = self.tree_.n_nodes self.tree_.n_nodes += 1 tree['sl'] = self._tree_grow(y_true[filter_Xl], X[filter_Xl], cost_mat[filter_Xl], level + 1) tree['sr'] = self._tree_grow(y_true[filter_Xr], X[filter_Xr], cost_mat[filter_Xr], level + 1) return tree
[ "def", "_tree_grow", "(", "self", ",", "y_true", ",", "X", ",", "cost_mat", ",", "level", "=", "0", ")", ":", "#TODO: Find error, add min_samples_split", "if", "len", "(", "X", ".", "shape", ")", "==", "1", ":", "tree", "=", "dict", "(", "y_pred", "=", "y_true", ",", "y_prob", "=", "0.5", ",", "level", "=", "level", ",", "split", "=", "-", "1", ",", "n_samples", "=", "1", ",", "gain", "=", "0", ")", "return", "tree", "# Calculate the best split of the current node", "split", ",", "gain", ",", "Xl_pred", ",", "y_pred", ",", "y_prob", "=", "self", ".", "_best_split", "(", "y_true", ",", "X", ",", "cost_mat", ")", "n_samples", ",", "n_features", "=", "X", ".", "shape", "# Construct the tree object as a dictionary", "#TODO: Convert tree to be equal to sklearn.tree.tree object", "tree", "=", "dict", "(", "y_pred", "=", "y_pred", ",", "y_prob", "=", "y_prob", ",", "level", "=", "level", ",", "split", "=", "-", "1", ",", "n_samples", "=", "n_samples", ",", "gain", "=", "gain", ")", "# Check the stopping criteria", "if", "gain", "<", "self", ".", "min_gain", ":", "return", "tree", "if", "self", ".", "max_depth", "is", "not", "None", ":", "if", "level", ">=", "self", ".", "max_depth", ":", "return", "tree", "if", "n_samples", "<=", "self", ".", "min_samples_split", ":", "return", "tree", "j", ",", "l", "=", "split", "filter_Xl", "=", "(", "X", "[", ":", ",", "j", "]", "<=", "l", ")", "filter_Xr", "=", "~", "filter_Xl", "n_samples_Xl", "=", "np", ".", "nonzero", "(", "filter_Xl", ")", "[", "0", "]", ".", "shape", "[", "0", "]", "n_samples_Xr", "=", "np", ".", "nonzero", "(", "filter_Xr", ")", "[", "0", "]", ".", "shape", "[", "0", "]", "if", "min", "(", "n_samples_Xl", ",", "n_samples_Xr", ")", "<=", "self", ".", "min_samples_leaf", ":", "return", "tree", "# No stooping criteria is met", "tree", "[", "'split'", "]", "=", "split", "tree", "[", "'node'", "]", "=", "self", ".", "tree_", ".", "n_nodes", "self", ".", "tree_", ".", "n_nodes", "+=", "1", "tree", "[", "'sl'", "]", "=", "self", ".", "_tree_grow", "(", "y_true", "[", "filter_Xl", "]", ",", "X", "[", "filter_Xl", "]", ",", "cost_mat", "[", "filter_Xl", "]", ",", "level", "+", "1", ")", "tree", "[", "'sr'", "]", "=", "self", ".", "_tree_grow", "(", "y_true", "[", "filter_Xr", "]", ",", "X", "[", "filter_Xr", "]", ",", "cost_mat", "[", "filter_Xr", "]", ",", "level", "+", "1", ")", "return", "tree" ]
Private recursive function to grow the decision tree. Parameters ---------- y_true : array indicator matrix Ground truth (correct) labels. X : array-like of shape = [n_samples, n_features] The input samples. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- Tree : Object Container of the decision tree NOTE: it is not the same structure as the sklearn.tree.tree object
[ "Private", "recursive", "function", "to", "grow", "the", "decision", "tree", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L304-L369
4,063
albahnsen/CostSensitiveClassification
costcla/models/cost_tree.py
CostSensitiveDecisionTreeClassifier._nodes
def _nodes(self, tree): """ Private function that find the number of nodes in a tree. Parameters ---------- tree : object Returns ------- nodes : array like of shape [n_nodes] """ def recourse(temp_tree_, nodes): if isinstance(temp_tree_, dict): if temp_tree_['split'] != -1: nodes.append(temp_tree_['node']) if temp_tree_['split'] != -1: for k in ['sl', 'sr']: recourse(temp_tree_[k], nodes) return None nodes_ = [] recourse(tree, nodes_) return nodes_
python
def _nodes(self, tree): """ Private function that find the number of nodes in a tree. Parameters ---------- tree : object Returns ------- nodes : array like of shape [n_nodes] """ def recourse(temp_tree_, nodes): if isinstance(temp_tree_, dict): if temp_tree_['split'] != -1: nodes.append(temp_tree_['node']) if temp_tree_['split'] != -1: for k in ['sl', 'sr']: recourse(temp_tree_[k], nodes) return None nodes_ = [] recourse(tree, nodes_) return nodes_
[ "def", "_nodes", "(", "self", ",", "tree", ")", ":", "def", "recourse", "(", "temp_tree_", ",", "nodes", ")", ":", "if", "isinstance", "(", "temp_tree_", ",", "dict", ")", ":", "if", "temp_tree_", "[", "'split'", "]", "!=", "-", "1", ":", "nodes", ".", "append", "(", "temp_tree_", "[", "'node'", "]", ")", "if", "temp_tree_", "[", "'split'", "]", "!=", "-", "1", ":", "for", "k", "in", "[", "'sl'", ",", "'sr'", "]", ":", "recourse", "(", "temp_tree_", "[", "k", "]", ",", "nodes", ")", "return", "None", "nodes_", "=", "[", "]", "recourse", "(", "tree", ",", "nodes_", ")", "return", "nodes_" ]
Private function that find the number of nodes in a tree. Parameters ---------- tree : object Returns ------- nodes : array like of shape [n_nodes]
[ "Private", "function", "that", "find", "the", "number", "of", "nodes", "in", "a", "tree", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L444-L466
4,064
albahnsen/CostSensitiveClassification
costcla/models/cost_tree.py
CostSensitiveDecisionTreeClassifier._classify
def _classify(self, X, tree, proba=False): """ Private function that classify a dataset using tree. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. tree : object proba : bool, optional (default=False) If True then return probabilities else return class Returns ------- prediction : array of shape = [n_samples] If proba then return the predicted positive probabilities, else return the predicted class for each example in X """ n_samples, n_features = X.shape predicted = np.ones(n_samples) # Check if final node if tree['split'] == -1: if not proba: predicted = predicted * tree['y_pred'] else: predicted = predicted * tree['y_prob'] else: j, l = tree['split'] filter_Xl = (X[:, j] <= l) filter_Xr = ~filter_Xl n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0] n_samples_Xr = np.nonzero(filter_Xr)[0].shape[0] if n_samples_Xl == 0: # If left node is empty only continue with right predicted[filter_Xr] = self._classify(X[filter_Xr, :], tree['sr'], proba) elif n_samples_Xr == 0: # If right node is empty only continue with left predicted[filter_Xl] = self._classify(X[filter_Xl, :], tree['sl'], proba) else: predicted[filter_Xl] = self._classify(X[filter_Xl, :], tree['sl'], proba) predicted[filter_Xr] = self._classify(X[filter_Xr, :], tree['sr'], proba) return predicted
python
def _classify(self, X, tree, proba=False): """ Private function that classify a dataset using tree. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. tree : object proba : bool, optional (default=False) If True then return probabilities else return class Returns ------- prediction : array of shape = [n_samples] If proba then return the predicted positive probabilities, else return the predicted class for each example in X """ n_samples, n_features = X.shape predicted = np.ones(n_samples) # Check if final node if tree['split'] == -1: if not proba: predicted = predicted * tree['y_pred'] else: predicted = predicted * tree['y_prob'] else: j, l = tree['split'] filter_Xl = (X[:, j] <= l) filter_Xr = ~filter_Xl n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0] n_samples_Xr = np.nonzero(filter_Xr)[0].shape[0] if n_samples_Xl == 0: # If left node is empty only continue with right predicted[filter_Xr] = self._classify(X[filter_Xr, :], tree['sr'], proba) elif n_samples_Xr == 0: # If right node is empty only continue with left predicted[filter_Xl] = self._classify(X[filter_Xl, :], tree['sl'], proba) else: predicted[filter_Xl] = self._classify(X[filter_Xl, :], tree['sl'], proba) predicted[filter_Xr] = self._classify(X[filter_Xr, :], tree['sr'], proba) return predicted
[ "def", "_classify", "(", "self", ",", "X", ",", "tree", ",", "proba", "=", "False", ")", ":", "n_samples", ",", "n_features", "=", "X", ".", "shape", "predicted", "=", "np", ".", "ones", "(", "n_samples", ")", "# Check if final node", "if", "tree", "[", "'split'", "]", "==", "-", "1", ":", "if", "not", "proba", ":", "predicted", "=", "predicted", "*", "tree", "[", "'y_pred'", "]", "else", ":", "predicted", "=", "predicted", "*", "tree", "[", "'y_prob'", "]", "else", ":", "j", ",", "l", "=", "tree", "[", "'split'", "]", "filter_Xl", "=", "(", "X", "[", ":", ",", "j", "]", "<=", "l", ")", "filter_Xr", "=", "~", "filter_Xl", "n_samples_Xl", "=", "np", ".", "nonzero", "(", "filter_Xl", ")", "[", "0", "]", ".", "shape", "[", "0", "]", "n_samples_Xr", "=", "np", ".", "nonzero", "(", "filter_Xr", ")", "[", "0", "]", ".", "shape", "[", "0", "]", "if", "n_samples_Xl", "==", "0", ":", "# If left node is empty only continue with right", "predicted", "[", "filter_Xr", "]", "=", "self", ".", "_classify", "(", "X", "[", "filter_Xr", ",", ":", "]", ",", "tree", "[", "'sr'", "]", ",", "proba", ")", "elif", "n_samples_Xr", "==", "0", ":", "# If right node is empty only continue with left", "predicted", "[", "filter_Xl", "]", "=", "self", ".", "_classify", "(", "X", "[", "filter_Xl", ",", ":", "]", ",", "tree", "[", "'sl'", "]", ",", "proba", ")", "else", ":", "predicted", "[", "filter_Xl", "]", "=", "self", ".", "_classify", "(", "X", "[", "filter_Xl", ",", ":", "]", ",", "tree", "[", "'sl'", "]", ",", "proba", ")", "predicted", "[", "filter_Xr", "]", "=", "self", ".", "_classify", "(", "X", "[", "filter_Xr", ",", ":", "]", ",", "tree", "[", "'sr'", "]", ",", "proba", ")", "return", "predicted" ]
Private function that classify a dataset using tree. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. tree : object proba : bool, optional (default=False) If True then return probabilities else return class Returns ------- prediction : array of shape = [n_samples] If proba then return the predicted positive probabilities, else return the predicted class for each example in X
[ "Private", "function", "that", "classify", "a", "dataset", "using", "tree", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L468-L513
4,065
albahnsen/CostSensitiveClassification
costcla/models/cost_tree.py
CostSensitiveDecisionTreeClassifier.predict
def predict(self, X): """ Predict class of X. The predicted class for each sample in X is returned. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted classes, """ #TODO: Check consistency of X if self.pruned: tree_ = self.tree_.tree_pruned else: tree_ = self.tree_.tree return self._classify(X, tree_, proba=False)
python
def predict(self, X): """ Predict class of X. The predicted class for each sample in X is returned. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted classes, """ #TODO: Check consistency of X if self.pruned: tree_ = self.tree_.tree_pruned else: tree_ = self.tree_.tree return self._classify(X, tree_, proba=False)
[ "def", "predict", "(", "self", ",", "X", ")", ":", "#TODO: Check consistency of X", "if", "self", ".", "pruned", ":", "tree_", "=", "self", ".", "tree_", ".", "tree_pruned", "else", ":", "tree_", "=", "self", ".", "tree_", ".", "tree", "return", "self", ".", "_classify", "(", "X", ",", "tree_", ",", "proba", "=", "False", ")" ]
Predict class of X. The predicted class for each sample in X is returned. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted classes,
[ "Predict", "class", "of", "X", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L515-L537
4,066
albahnsen/CostSensitiveClassification
costcla/models/cost_tree.py
CostSensitiveDecisionTreeClassifier.predict_proba
def predict_proba(self, X): """Predict class probabilities of the input samples X. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- prob : array of shape = [n_samples, 2] The class probabilities of the input samples. """ #TODO: Check consistency of X n_samples, n_features = X.shape prob = np.zeros((n_samples, 2)) if self.pruned: tree_ = self.tree_.tree_pruned else: tree_ = self.tree_.tree prob[:, 1] = self._classify(X, tree_, proba=True) prob[:, 0] = 1 - prob[:, 1] return prob
python
def predict_proba(self, X): """Predict class probabilities of the input samples X. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- prob : array of shape = [n_samples, 2] The class probabilities of the input samples. """ #TODO: Check consistency of X n_samples, n_features = X.shape prob = np.zeros((n_samples, 2)) if self.pruned: tree_ = self.tree_.tree_pruned else: tree_ = self.tree_.tree prob[:, 1] = self._classify(X, tree_, proba=True) prob[:, 0] = 1 - prob[:, 1] return prob
[ "def", "predict_proba", "(", "self", ",", "X", ")", ":", "#TODO: Check consistency of X", "n_samples", ",", "n_features", "=", "X", ".", "shape", "prob", "=", "np", ".", "zeros", "(", "(", "n_samples", ",", "2", ")", ")", "if", "self", ".", "pruned", ":", "tree_", "=", "self", ".", "tree_", ".", "tree_pruned", "else", ":", "tree_", "=", "self", ".", "tree_", ".", "tree", "prob", "[", ":", ",", "1", "]", "=", "self", ".", "_classify", "(", "X", ",", "tree_", ",", "proba", "=", "True", ")", "prob", "[", ":", ",", "0", "]", "=", "1", "-", "prob", "[", ":", ",", "1", "]", "return", "prob" ]
Predict class probabilities of the input samples X. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- prob : array of shape = [n_samples, 2] The class probabilities of the input samples.
[ "Predict", "class", "probabilities", "of", "the", "input", "samples", "X", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L539-L564
4,067
albahnsen/CostSensitiveClassification
costcla/models/cost_tree.py
CostSensitiveDecisionTreeClassifier._delete_node
def _delete_node(self, tree, node): """ Private function that eliminate node from tree. Parameters ---------- tree : object node : int node to be eliminated from tree Returns ------- pruned_tree : object """ # Calculate gains temp_tree = copy.deepcopy(tree) def recourse(temp_tree_, del_node): if isinstance(temp_tree_, dict): if temp_tree_['split'] != -1: if temp_tree_['node'] == del_node: del temp_tree_['sr'] del temp_tree_['sl'] del temp_tree_['node'] temp_tree_['split'] = -1 else: for k in ['sl', 'sr']: recourse(temp_tree_[k], del_node) return None recourse(temp_tree, node) return temp_tree
python
def _delete_node(self, tree, node): """ Private function that eliminate node from tree. Parameters ---------- tree : object node : int node to be eliminated from tree Returns ------- pruned_tree : object """ # Calculate gains temp_tree = copy.deepcopy(tree) def recourse(temp_tree_, del_node): if isinstance(temp_tree_, dict): if temp_tree_['split'] != -1: if temp_tree_['node'] == del_node: del temp_tree_['sr'] del temp_tree_['sl'] del temp_tree_['node'] temp_tree_['split'] = -1 else: for k in ['sl', 'sr']: recourse(temp_tree_[k], del_node) return None recourse(temp_tree, node) return temp_tree
[ "def", "_delete_node", "(", "self", ",", "tree", ",", "node", ")", ":", "# Calculate gains", "temp_tree", "=", "copy", ".", "deepcopy", "(", "tree", ")", "def", "recourse", "(", "temp_tree_", ",", "del_node", ")", ":", "if", "isinstance", "(", "temp_tree_", ",", "dict", ")", ":", "if", "temp_tree_", "[", "'split'", "]", "!=", "-", "1", ":", "if", "temp_tree_", "[", "'node'", "]", "==", "del_node", ":", "del", "temp_tree_", "[", "'sr'", "]", "del", "temp_tree_", "[", "'sl'", "]", "del", "temp_tree_", "[", "'node'", "]", "temp_tree_", "[", "'split'", "]", "=", "-", "1", "else", ":", "for", "k", "in", "[", "'sl'", ",", "'sr'", "]", ":", "recourse", "(", "temp_tree_", "[", "k", "]", ",", "del_node", ")", "return", "None", "recourse", "(", "temp_tree", ",", "node", ")", "return", "temp_tree" ]
Private function that eliminate node from tree. Parameters ---------- tree : object node : int node to be eliminated from tree Returns ------- pruned_tree : object
[ "Private", "function", "that", "eliminate", "node", "from", "tree", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L566-L599
4,068
albahnsen/CostSensitiveClassification
costcla/models/cost_tree.py
CostSensitiveDecisionTreeClassifier._pruning
def _pruning(self, X, y_true, cost_mat): """ Private function that prune the decision tree. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y_true : array indicator matrix Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. """ # Calculate gains nodes = self._nodes(self.tree_.tree_pruned) n_nodes = len(nodes) gains = np.zeros(n_nodes) y_pred = self._classify(X, self.tree_.tree_pruned) cost_base = cost_loss(y_true, y_pred, cost_mat) for m, node in enumerate(nodes): # Create temporal tree by eliminating node from tree_pruned temp_tree = self._delete_node(self.tree_.tree_pruned, node) y_pred = self._classify(X, temp_tree) nodes_pruned = self._nodes(temp_tree) # Calculate %gain gain = (cost_base - cost_loss(y_true, y_pred, cost_mat)) / cost_base # Calculate %gain_size gain_size = (len(nodes) - len(nodes_pruned)) * 1.0 / len(nodes) # Calculate weighted gain gains[m] = gain * gain_size best_gain = np.max(gains) best_node = nodes[int(np.argmax(gains))] if best_gain > self.min_gain: self.tree_.tree_pruned = self._delete_node(self.tree_.tree_pruned, best_node) # If best tree is not root node, then recursively pruning the tree if best_node != 0: self._pruning(X, y_true, cost_mat)
python
def _pruning(self, X, y_true, cost_mat): """ Private function that prune the decision tree. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y_true : array indicator matrix Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. """ # Calculate gains nodes = self._nodes(self.tree_.tree_pruned) n_nodes = len(nodes) gains = np.zeros(n_nodes) y_pred = self._classify(X, self.tree_.tree_pruned) cost_base = cost_loss(y_true, y_pred, cost_mat) for m, node in enumerate(nodes): # Create temporal tree by eliminating node from tree_pruned temp_tree = self._delete_node(self.tree_.tree_pruned, node) y_pred = self._classify(X, temp_tree) nodes_pruned = self._nodes(temp_tree) # Calculate %gain gain = (cost_base - cost_loss(y_true, y_pred, cost_mat)) / cost_base # Calculate %gain_size gain_size = (len(nodes) - len(nodes_pruned)) * 1.0 / len(nodes) # Calculate weighted gain gains[m] = gain * gain_size best_gain = np.max(gains) best_node = nodes[int(np.argmax(gains))] if best_gain > self.min_gain: self.tree_.tree_pruned = self._delete_node(self.tree_.tree_pruned, best_node) # If best tree is not root node, then recursively pruning the tree if best_node != 0: self._pruning(X, y_true, cost_mat)
[ "def", "_pruning", "(", "self", ",", "X", ",", "y_true", ",", "cost_mat", ")", ":", "# Calculate gains", "nodes", "=", "self", ".", "_nodes", "(", "self", ".", "tree_", ".", "tree_pruned", ")", "n_nodes", "=", "len", "(", "nodes", ")", "gains", "=", "np", ".", "zeros", "(", "n_nodes", ")", "y_pred", "=", "self", ".", "_classify", "(", "X", ",", "self", ".", "tree_", ".", "tree_pruned", ")", "cost_base", "=", "cost_loss", "(", "y_true", ",", "y_pred", ",", "cost_mat", ")", "for", "m", ",", "node", "in", "enumerate", "(", "nodes", ")", ":", "# Create temporal tree by eliminating node from tree_pruned", "temp_tree", "=", "self", ".", "_delete_node", "(", "self", ".", "tree_", ".", "tree_pruned", ",", "node", ")", "y_pred", "=", "self", ".", "_classify", "(", "X", ",", "temp_tree", ")", "nodes_pruned", "=", "self", ".", "_nodes", "(", "temp_tree", ")", "# Calculate %gain", "gain", "=", "(", "cost_base", "-", "cost_loss", "(", "y_true", ",", "y_pred", ",", "cost_mat", ")", ")", "/", "cost_base", "# Calculate %gain_size", "gain_size", "=", "(", "len", "(", "nodes", ")", "-", "len", "(", "nodes_pruned", ")", ")", "*", "1.0", "/", "len", "(", "nodes", ")", "# Calculate weighted gain", "gains", "[", "m", "]", "=", "gain", "*", "gain_size", "best_gain", "=", "np", ".", "max", "(", "gains", ")", "best_node", "=", "nodes", "[", "int", "(", "np", ".", "argmax", "(", "gains", ")", ")", "]", "if", "best_gain", ">", "self", ".", "min_gain", ":", "self", ".", "tree_", ".", "tree_pruned", "=", "self", ".", "_delete_node", "(", "self", ".", "tree_", ".", "tree_pruned", ",", "best_node", ")", "# If best tree is not root node, then recursively pruning the tree", "if", "best_node", "!=", "0", ":", "self", ".", "_pruning", "(", "X", ",", "y_true", ",", "cost_mat", ")" ]
Private function that prune the decision tree. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y_true : array indicator matrix Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example.
[ "Private", "function", "that", "prune", "the", "decision", "tree", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L601-L652
4,069
albahnsen/CostSensitiveClassification
costcla/models/cost_tree.py
CostSensitiveDecisionTreeClassifier.pruning
def pruning(self, X, y, cost_mat): """ Function that prune the decision tree. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y_true : array indicator matrix Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. """ self.tree_.tree_pruned = copy.deepcopy(self.tree_.tree) if self.tree_.n_nodes > 0: self._pruning(X, y, cost_mat) nodes_pruned = self._nodes(self.tree_.tree_pruned) self.tree_.n_nodes_pruned = len(nodes_pruned)
python
def pruning(self, X, y, cost_mat): """ Function that prune the decision tree. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y_true : array indicator matrix Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. """ self.tree_.tree_pruned = copy.deepcopy(self.tree_.tree) if self.tree_.n_nodes > 0: self._pruning(X, y, cost_mat) nodes_pruned = self._nodes(self.tree_.tree_pruned) self.tree_.n_nodes_pruned = len(nodes_pruned)
[ "def", "pruning", "(", "self", ",", "X", ",", "y", ",", "cost_mat", ")", ":", "self", ".", "tree_", ".", "tree_pruned", "=", "copy", ".", "deepcopy", "(", "self", ".", "tree_", ".", "tree", ")", "if", "self", ".", "tree_", ".", "n_nodes", ">", "0", ":", "self", ".", "_pruning", "(", "X", ",", "y", ",", "cost_mat", ")", "nodes_pruned", "=", "self", ".", "_nodes", "(", "self", ".", "tree_", ".", "tree_pruned", ")", "self", ".", "tree_", ".", "n_nodes_pruned", "=", "len", "(", "nodes_pruned", ")" ]
Function that prune the decision tree. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y_true : array indicator matrix Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example.
[ "Function", "that", "prune", "the", "decision", "tree", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L654-L676
4,070
albahnsen/CostSensitiveClassification
costcla/metrics/costs.py
cost_loss
def cost_loss(y_true, y_pred, cost_mat): #TODO: update description """Cost classification loss. This function calculates the cost of using y_pred on y_true with cost-matrix cost-mat. It differ from traditional classification evaluation measures since measures such as accuracy asing the same cost to different errors, but that is not the real case in several real-world classification problems as they are example-dependent cost-sensitive in nature, where the costs due to misclassification vary between examples. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_pred : array-like or label indicator matrix Predicted labels, as returned by a classifier. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- loss : float Cost of a using y_pred on y_true with cost-matrix cost-mat References ---------- .. [1] C. Elkan, "The foundations of Cost-Sensitive Learning", in Seventeenth International Joint Conference on Artificial Intelligence, 973-978, 2001. .. [2] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten, `"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining, 677-685, 2014. See also -------- savings_score Examples -------- >>> import numpy as np >>> from costcla.metrics import cost_loss >>> y_pred = [0, 1, 0, 0] >>> y_true = [0, 1, 1, 0] >>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]]) >>> cost_loss(y_true, y_pred, cost_mat) 3 """ #TODO: Check consistency of cost_mat y_true = column_or_1d(y_true) y_true = (y_true == 1).astype(np.float) y_pred = column_or_1d(y_pred) y_pred = (y_pred == 1).astype(np.float) cost = y_true * ((1 - y_pred) * cost_mat[:, 1] + y_pred * cost_mat[:, 2]) cost += (1 - y_true) * (y_pred * cost_mat[:, 0] + (1 - y_pred) * cost_mat[:, 3]) return np.sum(cost)
python
def cost_loss(y_true, y_pred, cost_mat): #TODO: update description """Cost classification loss. This function calculates the cost of using y_pred on y_true with cost-matrix cost-mat. It differ from traditional classification evaluation measures since measures such as accuracy asing the same cost to different errors, but that is not the real case in several real-world classification problems as they are example-dependent cost-sensitive in nature, where the costs due to misclassification vary between examples. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_pred : array-like or label indicator matrix Predicted labels, as returned by a classifier. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- loss : float Cost of a using y_pred on y_true with cost-matrix cost-mat References ---------- .. [1] C. Elkan, "The foundations of Cost-Sensitive Learning", in Seventeenth International Joint Conference on Artificial Intelligence, 973-978, 2001. .. [2] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten, `"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining, 677-685, 2014. See also -------- savings_score Examples -------- >>> import numpy as np >>> from costcla.metrics import cost_loss >>> y_pred = [0, 1, 0, 0] >>> y_true = [0, 1, 1, 0] >>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]]) >>> cost_loss(y_true, y_pred, cost_mat) 3 """ #TODO: Check consistency of cost_mat y_true = column_or_1d(y_true) y_true = (y_true == 1).astype(np.float) y_pred = column_or_1d(y_pred) y_pred = (y_pred == 1).astype(np.float) cost = y_true * ((1 - y_pred) * cost_mat[:, 1] + y_pred * cost_mat[:, 2]) cost += (1 - y_true) * (y_pred * cost_mat[:, 0] + (1 - y_pred) * cost_mat[:, 3]) return np.sum(cost)
[ "def", "cost_loss", "(", "y_true", ",", "y_pred", ",", "cost_mat", ")", ":", "#TODO: update description", "#TODO: Check consistency of cost_mat", "y_true", "=", "column_or_1d", "(", "y_true", ")", "y_true", "=", "(", "y_true", "==", "1", ")", ".", "astype", "(", "np", ".", "float", ")", "y_pred", "=", "column_or_1d", "(", "y_pred", ")", "y_pred", "=", "(", "y_pred", "==", "1", ")", ".", "astype", "(", "np", ".", "float", ")", "cost", "=", "y_true", "*", "(", "(", "1", "-", "y_pred", ")", "*", "cost_mat", "[", ":", ",", "1", "]", "+", "y_pred", "*", "cost_mat", "[", ":", ",", "2", "]", ")", "cost", "+=", "(", "1", "-", "y_true", ")", "*", "(", "y_pred", "*", "cost_mat", "[", ":", ",", "0", "]", "+", "(", "1", "-", "y_pred", ")", "*", "cost_mat", "[", ":", ",", "3", "]", ")", "return", "np", ".", "sum", "(", "cost", ")" ]
Cost classification loss. This function calculates the cost of using y_pred on y_true with cost-matrix cost-mat. It differ from traditional classification evaluation measures since measures such as accuracy asing the same cost to different errors, but that is not the real case in several real-world classification problems as they are example-dependent cost-sensitive in nature, where the costs due to misclassification vary between examples. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_pred : array-like or label indicator matrix Predicted labels, as returned by a classifier. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- loss : float Cost of a using y_pred on y_true with cost-matrix cost-mat References ---------- .. [1] C. Elkan, "The foundations of Cost-Sensitive Learning", in Seventeenth International Joint Conference on Artificial Intelligence, 973-978, 2001. .. [2] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten, `"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining, 677-685, 2014. See also -------- savings_score Examples -------- >>> import numpy as np >>> from costcla.metrics import cost_loss >>> y_pred = [0, 1, 0, 0] >>> y_true = [0, 1, 1, 0] >>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]]) >>> cost_loss(y_true, y_pred, cost_mat) 3
[ "Cost", "classification", "loss", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/metrics/costs.py#L19-L81
4,071
albahnsen/CostSensitiveClassification
costcla/metrics/costs.py
savings_score
def savings_score(y_true, y_pred, cost_mat): #TODO: update description """Savings score. This function calculates the savings cost of using y_pred on y_true with cost-matrix cost-mat, as the difference of y_pred and the cost_loss of a naive classification model. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_pred : array-like or label indicator matrix Predicted labels, as returned by a classifier. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- score : float Savings of a using y_pred on y_true with cost-matrix cost-mat The best performance is 1. References ---------- .. [1] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten, `"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining, 677-685, 2014. See also -------- cost_loss Examples -------- >>> import numpy as np >>> from costcla.metrics import savings_score, cost_loss >>> y_pred = [0, 1, 0, 0] >>> y_true = [0, 1, 1, 0] >>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]]) >>> savings_score(y_true, y_pred, cost_mat) 0.5 """ #TODO: Check consistency of cost_mat y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) n_samples = len(y_true) # Calculate the cost of naive prediction cost_base = min(cost_loss(y_true, np.zeros(n_samples), cost_mat), cost_loss(y_true, np.ones(n_samples), cost_mat)) cost = cost_loss(y_true, y_pred, cost_mat) return 1.0 - cost / cost_base
python
def savings_score(y_true, y_pred, cost_mat): #TODO: update description """Savings score. This function calculates the savings cost of using y_pred on y_true with cost-matrix cost-mat, as the difference of y_pred and the cost_loss of a naive classification model. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_pred : array-like or label indicator matrix Predicted labels, as returned by a classifier. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- score : float Savings of a using y_pred on y_true with cost-matrix cost-mat The best performance is 1. References ---------- .. [1] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten, `"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining, 677-685, 2014. See also -------- cost_loss Examples -------- >>> import numpy as np >>> from costcla.metrics import savings_score, cost_loss >>> y_pred = [0, 1, 0, 0] >>> y_true = [0, 1, 1, 0] >>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]]) >>> savings_score(y_true, y_pred, cost_mat) 0.5 """ #TODO: Check consistency of cost_mat y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) n_samples = len(y_true) # Calculate the cost of naive prediction cost_base = min(cost_loss(y_true, np.zeros(n_samples), cost_mat), cost_loss(y_true, np.ones(n_samples), cost_mat)) cost = cost_loss(y_true, y_pred, cost_mat) return 1.0 - cost / cost_base
[ "def", "savings_score", "(", "y_true", ",", "y_pred", ",", "cost_mat", ")", ":", "#TODO: update description", "#TODO: Check consistency of cost_mat", "y_true", "=", "column_or_1d", "(", "y_true", ")", "y_pred", "=", "column_or_1d", "(", "y_pred", ")", "n_samples", "=", "len", "(", "y_true", ")", "# Calculate the cost of naive prediction", "cost_base", "=", "min", "(", "cost_loss", "(", "y_true", ",", "np", ".", "zeros", "(", "n_samples", ")", ",", "cost_mat", ")", ",", "cost_loss", "(", "y_true", ",", "np", ".", "ones", "(", "n_samples", ")", ",", "cost_mat", ")", ")", "cost", "=", "cost_loss", "(", "y_true", ",", "y_pred", ",", "cost_mat", ")", "return", "1.0", "-", "cost", "/", "cost_base" ]
Savings score. This function calculates the savings cost of using y_pred on y_true with cost-matrix cost-mat, as the difference of y_pred and the cost_loss of a naive classification model. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_pred : array-like or label indicator matrix Predicted labels, as returned by a classifier. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- score : float Savings of a using y_pred on y_true with cost-matrix cost-mat The best performance is 1. References ---------- .. [1] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten, `"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining, 677-685, 2014. See also -------- cost_loss Examples -------- >>> import numpy as np >>> from costcla.metrics import savings_score, cost_loss >>> y_pred = [0, 1, 0, 0] >>> y_true = [0, 1, 1, 0] >>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]]) >>> savings_score(y_true, y_pred, cost_mat) 0.5
[ "Savings", "score", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/metrics/costs.py#L84-L143
4,072
albahnsen/CostSensitiveClassification
costcla/metrics/costs.py
brier_score_loss
def brier_score_loss(y_true, y_prob): """Compute the Brier score The smaller the Brier score, the better, hence the naming with "loss". Across all items in a set N predictions, the Brier score measures the mean squared difference between (1) the predicted probability assigned to the possible outcomes for item i, and (2) the actual outcome. Therefore, the lower the Brier score is for a set of predictions, the better the predictions are calibrated. Note that the Brier score always takes on a value between zero and one, since this is the largest possible difference between a predicted probability (which must be between zero and one) and the actual outcome (which can take on values of only 0 and 1). The Brier score is appropriate for binary and categorical outcomes that can be structured as true or false, but is inappropriate for ordinal variables which can take on three or more values (this is because the Brier score assumes that all possible outcomes are equivalently "distant" from one another). Parameters ---------- y_true : array, shape (n_samples,) True targets. y_prob : array, shape (n_samples,) Probabilities of the positive class. Returns ------- score : float Brier score Examples -------- >>> import numpy as np >>> from costcla.metrics import brier_score_loss >>> y_true = [0, 1, 1, 0] >>> y_prob = [0.1, 0.9, 0.8, 0.3] >>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true, np.array(y_prob) > 0.5) 0.0 References ---------- http://en.wikipedia.org/wiki/Brier_score """ y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) return np.mean((y_true - y_prob) ** 2)
python
def brier_score_loss(y_true, y_prob): """Compute the Brier score The smaller the Brier score, the better, hence the naming with "loss". Across all items in a set N predictions, the Brier score measures the mean squared difference between (1) the predicted probability assigned to the possible outcomes for item i, and (2) the actual outcome. Therefore, the lower the Brier score is for a set of predictions, the better the predictions are calibrated. Note that the Brier score always takes on a value between zero and one, since this is the largest possible difference between a predicted probability (which must be between zero and one) and the actual outcome (which can take on values of only 0 and 1). The Brier score is appropriate for binary and categorical outcomes that can be structured as true or false, but is inappropriate for ordinal variables which can take on three or more values (this is because the Brier score assumes that all possible outcomes are equivalently "distant" from one another). Parameters ---------- y_true : array, shape (n_samples,) True targets. y_prob : array, shape (n_samples,) Probabilities of the positive class. Returns ------- score : float Brier score Examples -------- >>> import numpy as np >>> from costcla.metrics import brier_score_loss >>> y_true = [0, 1, 1, 0] >>> y_prob = [0.1, 0.9, 0.8, 0.3] >>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true, np.array(y_prob) > 0.5) 0.0 References ---------- http://en.wikipedia.org/wiki/Brier_score """ y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) return np.mean((y_true - y_prob) ** 2)
[ "def", "brier_score_loss", "(", "y_true", ",", "y_prob", ")", ":", "y_true", "=", "column_or_1d", "(", "y_true", ")", "y_prob", "=", "column_or_1d", "(", "y_prob", ")", "return", "np", ".", "mean", "(", "(", "y_true", "-", "y_prob", ")", "**", "2", ")" ]
Compute the Brier score The smaller the Brier score, the better, hence the naming with "loss". Across all items in a set N predictions, the Brier score measures the mean squared difference between (1) the predicted probability assigned to the possible outcomes for item i, and (2) the actual outcome. Therefore, the lower the Brier score is for a set of predictions, the better the predictions are calibrated. Note that the Brier score always takes on a value between zero and one, since this is the largest possible difference between a predicted probability (which must be between zero and one) and the actual outcome (which can take on values of only 0 and 1). The Brier score is appropriate for binary and categorical outcomes that can be structured as true or false, but is inappropriate for ordinal variables which can take on three or more values (this is because the Brier score assumes that all possible outcomes are equivalently "distant" from one another). Parameters ---------- y_true : array, shape (n_samples,) True targets. y_prob : array, shape (n_samples,) Probabilities of the positive class. Returns ------- score : float Brier score Examples -------- >>> import numpy as np >>> from costcla.metrics import brier_score_loss >>> y_true = [0, 1, 1, 0] >>> y_prob = [0.1, 0.9, 0.8, 0.3] >>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true, np.array(y_prob) > 0.5) 0.0 References ---------- http://en.wikipedia.org/wiki/Brier_score
[ "Compute", "the", "Brier", "score" ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/metrics/costs.py#L149-L200
4,073
albahnsen/CostSensitiveClassification
costcla/models/regression.py
_logistic_cost_loss
def _logistic_cost_loss(w, X, y, cost_mat, alpha): """Computes the logistic loss. Parameters ---------- w : array-like, shape (n_w, n_features,) or (n_w, n_features + 1,) Coefficient vector or matrix of coefficient. X : array-like, shape (n_samples, n_features) Training data. y : ndarray, shape (n_samples,) Array of labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. alpha : float Regularization parameter. alpha is equal to 1 / C. Returns ------- out : float Logistic loss. """ if w.shape[0] == w.size: # Only evaluating one w return _logistic_cost_loss_i(w, X, y, cost_mat, alpha) else: # Evaluating a set of w n_w = w.shape[0] out = np.zeros(n_w) for i in range(n_w): out[i] = _logistic_cost_loss_i(w[i], X, y, cost_mat, alpha) return out
python
def _logistic_cost_loss(w, X, y, cost_mat, alpha): """Computes the logistic loss. Parameters ---------- w : array-like, shape (n_w, n_features,) or (n_w, n_features + 1,) Coefficient vector or matrix of coefficient. X : array-like, shape (n_samples, n_features) Training data. y : ndarray, shape (n_samples,) Array of labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. alpha : float Regularization parameter. alpha is equal to 1 / C. Returns ------- out : float Logistic loss. """ if w.shape[0] == w.size: # Only evaluating one w return _logistic_cost_loss_i(w, X, y, cost_mat, alpha) else: # Evaluating a set of w n_w = w.shape[0] out = np.zeros(n_w) for i in range(n_w): out[i] = _logistic_cost_loss_i(w[i], X, y, cost_mat, alpha) return out
[ "def", "_logistic_cost_loss", "(", "w", ",", "X", ",", "y", ",", "cost_mat", ",", "alpha", ")", ":", "if", "w", ".", "shape", "[", "0", "]", "==", "w", ".", "size", ":", "# Only evaluating one w", "return", "_logistic_cost_loss_i", "(", "w", ",", "X", ",", "y", ",", "cost_mat", ",", "alpha", ")", "else", ":", "# Evaluating a set of w", "n_w", "=", "w", ".", "shape", "[", "0", "]", "out", "=", "np", ".", "zeros", "(", "n_w", ")", "for", "i", "in", "range", "(", "n_w", ")", ":", "out", "[", "i", "]", "=", "_logistic_cost_loss_i", "(", "w", "[", "i", "]", ",", "X", ",", "y", ",", "cost_mat", ",", "alpha", ")", "return", "out" ]
Computes the logistic loss. Parameters ---------- w : array-like, shape (n_w, n_features,) or (n_w, n_features + 1,) Coefficient vector or matrix of coefficient. X : array-like, shape (n_samples, n_features) Training data. y : ndarray, shape (n_samples,) Array of labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. alpha : float Regularization parameter. alpha is equal to 1 / C. Returns ------- out : float Logistic loss.
[ "Computes", "the", "logistic", "loss", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/regression.py#L58-L98
4,074
albahnsen/CostSensitiveClassification
costcla/models/regression.py
CostSensitiveLogisticRegression.predict
def predict(self, X, cut_point=0.5): """Predicted class. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples] Returns the prediction of the sample.. """ return np.floor(self.predict_proba(X)[:, 1] + (1 - cut_point))
python
def predict(self, X, cut_point=0.5): """Predicted class. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples] Returns the prediction of the sample.. """ return np.floor(self.predict_proba(X)[:, 1] + (1 - cut_point))
[ "def", "predict", "(", "self", ",", "X", ",", "cut_point", "=", "0.5", ")", ":", "return", "np", ".", "floor", "(", "self", ".", "predict_proba", "(", "X", ")", "[", ":", ",", "1", "]", "+", "(", "1", "-", "cut_point", ")", ")" ]
Predicted class. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples] Returns the prediction of the sample..
[ "Predicted", "class", "." ]
75778ae32c70671c0cdde6c4651277b6a8b58871
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/regression.py#L278-L290
4,075
MozillaSecurity/laniakea
laniakea/core/userdata.py
UserData.list_tags
def list_tags(userdata): """List all used macros within a UserData script. :param userdata: The UserData script. :type userdata: str """ macros = re.findall('@(.*?)@', userdata) logging.info('List of available macros:') for macro in macros: logging.info('\t%r', macro)
python
def list_tags(userdata): """List all used macros within a UserData script. :param userdata: The UserData script. :type userdata: str """ macros = re.findall('@(.*?)@', userdata) logging.info('List of available macros:') for macro in macros: logging.info('\t%r', macro)
[ "def", "list_tags", "(", "userdata", ")", ":", "macros", "=", "re", ".", "findall", "(", "'@(.*?)@'", ",", "userdata", ")", "logging", ".", "info", "(", "'List of available macros:'", ")", "for", "macro", "in", "macros", ":", "logging", ".", "info", "(", "'\\t%r'", ",", "macro", ")" ]
List all used macros within a UserData script. :param userdata: The UserData script. :type userdata: str
[ "List", "all", "used", "macros", "within", "a", "UserData", "script", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/userdata.py#L54-L63
4,076
MozillaSecurity/laniakea
laniakea/core/userdata.py
UserData.handle_tags
def handle_tags(userdata, macros): """Insert macro values or auto export variables in UserData scripts. :param userdata: The UserData script. :type userdata: str :param macros: UserData macros as key value pair. :type macros: dict :return: UserData script with the macros replaced with their values. :rtype: str """ macro_vars = re.findall('@(.*?)@', userdata) for macro_var in macro_vars: if macro_var == '!all_macros_export': macro_var_export_list = [] for defined_macro in macros: macro_var_export_list.append('export %s="%s"' % (defined_macro, macros[defined_macro])) macro_var_exports = "\n".join(macro_var_export_list) userdata = userdata.replace('@%s@' % macro_var, macro_var_exports) elif macro_var == "!all_macros_docker": macro_var_export_list = [] for defined_macro in macros: macro_var_export_list.append("-e '%s=%s'" % (defined_macro, macros[defined_macro])) macro_var_exports = " ".join(macro_var_export_list) userdata = userdata.replace('@%s@' % macro_var, macro_var_exports) else: if "|" in macro_var: macro_var, default_value = macro_var.split('|') if macro_var not in macros: logging.warning('Using default variable value %s for @%s@ ', default_value, macro_var) value = default_value else: value = macros[macro_var] userdata = userdata.replace('@%s|%s@' % (macro_var, default_value), value) else: if macro_var not in macros: logging.error('Undefined variable @%s@ in UserData script', macro_var) return None userdata = userdata.replace('@%s@' % macro_var, macros[macro_var]) return userdata
python
def handle_tags(userdata, macros): """Insert macro values or auto export variables in UserData scripts. :param userdata: The UserData script. :type userdata: str :param macros: UserData macros as key value pair. :type macros: dict :return: UserData script with the macros replaced with their values. :rtype: str """ macro_vars = re.findall('@(.*?)@', userdata) for macro_var in macro_vars: if macro_var == '!all_macros_export': macro_var_export_list = [] for defined_macro in macros: macro_var_export_list.append('export %s="%s"' % (defined_macro, macros[defined_macro])) macro_var_exports = "\n".join(macro_var_export_list) userdata = userdata.replace('@%s@' % macro_var, macro_var_exports) elif macro_var == "!all_macros_docker": macro_var_export_list = [] for defined_macro in macros: macro_var_export_list.append("-e '%s=%s'" % (defined_macro, macros[defined_macro])) macro_var_exports = " ".join(macro_var_export_list) userdata = userdata.replace('@%s@' % macro_var, macro_var_exports) else: if "|" in macro_var: macro_var, default_value = macro_var.split('|') if macro_var not in macros: logging.warning('Using default variable value %s for @%s@ ', default_value, macro_var) value = default_value else: value = macros[macro_var] userdata = userdata.replace('@%s|%s@' % (macro_var, default_value), value) else: if macro_var not in macros: logging.error('Undefined variable @%s@ in UserData script', macro_var) return None userdata = userdata.replace('@%s@' % macro_var, macros[macro_var]) return userdata
[ "def", "handle_tags", "(", "userdata", ",", "macros", ")", ":", "macro_vars", "=", "re", ".", "findall", "(", "'@(.*?)@'", ",", "userdata", ")", "for", "macro_var", "in", "macro_vars", ":", "if", "macro_var", "==", "'!all_macros_export'", ":", "macro_var_export_list", "=", "[", "]", "for", "defined_macro", "in", "macros", ":", "macro_var_export_list", ".", "append", "(", "'export %s=\"%s\"'", "%", "(", "defined_macro", ",", "macros", "[", "defined_macro", "]", ")", ")", "macro_var_exports", "=", "\"\\n\"", ".", "join", "(", "macro_var_export_list", ")", "userdata", "=", "userdata", ".", "replace", "(", "'@%s@'", "%", "macro_var", ",", "macro_var_exports", ")", "elif", "macro_var", "==", "\"!all_macros_docker\"", ":", "macro_var_export_list", "=", "[", "]", "for", "defined_macro", "in", "macros", ":", "macro_var_export_list", ".", "append", "(", "\"-e '%s=%s'\"", "%", "(", "defined_macro", ",", "macros", "[", "defined_macro", "]", ")", ")", "macro_var_exports", "=", "\" \"", ".", "join", "(", "macro_var_export_list", ")", "userdata", "=", "userdata", ".", "replace", "(", "'@%s@'", "%", "macro_var", ",", "macro_var_exports", ")", "else", ":", "if", "\"|\"", "in", "macro_var", ":", "macro_var", ",", "default_value", "=", "macro_var", ".", "split", "(", "'|'", ")", "if", "macro_var", "not", "in", "macros", ":", "logging", ".", "warning", "(", "'Using default variable value %s for @%s@ '", ",", "default_value", ",", "macro_var", ")", "value", "=", "default_value", "else", ":", "value", "=", "macros", "[", "macro_var", "]", "userdata", "=", "userdata", ".", "replace", "(", "'@%s|%s@'", "%", "(", "macro_var", ",", "default_value", ")", ",", "value", ")", "else", ":", "if", "macro_var", "not", "in", "macros", ":", "logging", ".", "error", "(", "'Undefined variable @%s@ in UserData script'", ",", "macro_var", ")", "return", "None", "userdata", "=", "userdata", ".", "replace", "(", "'@%s@'", "%", "macro_var", ",", "macros", "[", "macro_var", "]", ")", "return", "userdata" ]
Insert macro values or auto export variables in UserData scripts. :param userdata: The UserData script. :type userdata: str :param macros: UserData macros as key value pair. :type macros: dict :return: UserData script with the macros replaced with their values. :rtype: str
[ "Insert", "macro", "values", "or", "auto", "export", "variables", "in", "UserData", "scripts", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/userdata.py#L66-L109
4,077
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager.retry_on_ec2_error
def retry_on_ec2_error(self, func, *args, **kwargs): """ Call the given method with the given arguments, retrying if the call failed due to an EC2ResponseError. This method will wait at most 30 seconds and perform up to 6 retries. If the method still fails, it will propagate the error. :param func: Function to call :type func: function """ exception_retry_count = 6 while True: try: return func(*args, **kwargs) except (boto.exception.EC2ResponseError, ssl.SSLError) as msg: exception_retry_count -= 1 if exception_retry_count <= 0: raise msg time.sleep(5)
python
def retry_on_ec2_error(self, func, *args, **kwargs): """ Call the given method with the given arguments, retrying if the call failed due to an EC2ResponseError. This method will wait at most 30 seconds and perform up to 6 retries. If the method still fails, it will propagate the error. :param func: Function to call :type func: function """ exception_retry_count = 6 while True: try: return func(*args, **kwargs) except (boto.exception.EC2ResponseError, ssl.SSLError) as msg: exception_retry_count -= 1 if exception_retry_count <= 0: raise msg time.sleep(5)
[ "def", "retry_on_ec2_error", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "exception_retry_count", "=", "6", "while", "True", ":", "try", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "(", "boto", ".", "exception", ".", "EC2ResponseError", ",", "ssl", ".", "SSLError", ")", "as", "msg", ":", "exception_retry_count", "-=", "1", "if", "exception_retry_count", "<=", "0", ":", "raise", "msg", "time", ".", "sleep", "(", "5", ")" ]
Call the given method with the given arguments, retrying if the call failed due to an EC2ResponseError. This method will wait at most 30 seconds and perform up to 6 retries. If the method still fails, it will propagate the error. :param func: Function to call :type func: function
[ "Call", "the", "given", "method", "with", "the", "given", "arguments", "retrying", "if", "the", "call", "failed", "due", "to", "an", "EC2ResponseError", ".", "This", "method", "will", "wait", "at", "most", "30", "seconds", "and", "perform", "up", "to", "6", "retries", ".", "If", "the", "method", "still", "fails", "it", "will", "propagate", "the", "error", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L36-L54
4,078
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager.connect
def connect(self, region, **kw_params): """Connect to a EC2. :param region: The name of the region to connect to. :type region: str :param kw_params: :type kw_params: dict """ self.ec2 = boto.ec2.connect_to_region(region, **kw_params) if not self.ec2: raise EC2ManagerException('Unable to connect to region "%s"' % region) self.remote_images.clear() if self.images and any(('image_name' in img and 'image_id' not in img) for img in self.images.values()): for img in self.images.values(): if 'image_name' in img and 'image_id' not in img: img['image_id'] = self.resolve_image_name(img.pop('image_name'))
python
def connect(self, region, **kw_params): """Connect to a EC2. :param region: The name of the region to connect to. :type region: str :param kw_params: :type kw_params: dict """ self.ec2 = boto.ec2.connect_to_region(region, **kw_params) if not self.ec2: raise EC2ManagerException('Unable to connect to region "%s"' % region) self.remote_images.clear() if self.images and any(('image_name' in img and 'image_id' not in img) for img in self.images.values()): for img in self.images.values(): if 'image_name' in img and 'image_id' not in img: img['image_id'] = self.resolve_image_name(img.pop('image_name'))
[ "def", "connect", "(", "self", ",", "region", ",", "*", "*", "kw_params", ")", ":", "self", ".", "ec2", "=", "boto", ".", "ec2", ".", "connect_to_region", "(", "region", ",", "*", "*", "kw_params", ")", "if", "not", "self", ".", "ec2", ":", "raise", "EC2ManagerException", "(", "'Unable to connect to region \"%s\"'", "%", "region", ")", "self", ".", "remote_images", ".", "clear", "(", ")", "if", "self", ".", "images", "and", "any", "(", "(", "'image_name'", "in", "img", "and", "'image_id'", "not", "in", "img", ")", "for", "img", "in", "self", ".", "images", ".", "values", "(", ")", ")", ":", "for", "img", "in", "self", ".", "images", ".", "values", "(", ")", ":", "if", "'image_name'", "in", "img", "and", "'image_id'", "not", "in", "img", ":", "img", "[", "'image_id'", "]", "=", "self", ".", "resolve_image_name", "(", "img", ".", "pop", "(", "'image_name'", ")", ")" ]
Connect to a EC2. :param region: The name of the region to connect to. :type region: str :param kw_params: :type kw_params: dict
[ "Connect", "to", "a", "EC2", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L56-L72
4,079
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager.resolve_image_name
def resolve_image_name(self, image_name): """Look up an AMI for the connected region based on an image name. :param image_name: The name of the image to resolve. :type image_name: str :return: The AMI for the given image. :rtype: str """ # look at each scope in order of size scopes = ['self', 'amazon', 'aws-marketplace'] if image_name in self.remote_images: return self.remote_images[image_name] for scope in scopes: logger.info('Retrieving available AMIs owned by %s...', scope) remote_images = self.ec2.get_all_images(owners=[scope], filters={'name': image_name}) self.remote_images.update({ri.name: ri.id for ri in remote_images}) if image_name in self.remote_images: return self.remote_images[image_name] raise EC2ManagerException('Failed to resolve AMI name "%s" to an AMI' % image_name)
python
def resolve_image_name(self, image_name): """Look up an AMI for the connected region based on an image name. :param image_name: The name of the image to resolve. :type image_name: str :return: The AMI for the given image. :rtype: str """ # look at each scope in order of size scopes = ['self', 'amazon', 'aws-marketplace'] if image_name in self.remote_images: return self.remote_images[image_name] for scope in scopes: logger.info('Retrieving available AMIs owned by %s...', scope) remote_images = self.ec2.get_all_images(owners=[scope], filters={'name': image_name}) self.remote_images.update({ri.name: ri.id for ri in remote_images}) if image_name in self.remote_images: return self.remote_images[image_name] raise EC2ManagerException('Failed to resolve AMI name "%s" to an AMI' % image_name)
[ "def", "resolve_image_name", "(", "self", ",", "image_name", ")", ":", "# look at each scope in order of size", "scopes", "=", "[", "'self'", ",", "'amazon'", ",", "'aws-marketplace'", "]", "if", "image_name", "in", "self", ".", "remote_images", ":", "return", "self", ".", "remote_images", "[", "image_name", "]", "for", "scope", "in", "scopes", ":", "logger", ".", "info", "(", "'Retrieving available AMIs owned by %s...'", ",", "scope", ")", "remote_images", "=", "self", ".", "ec2", ".", "get_all_images", "(", "owners", "=", "[", "scope", "]", ",", "filters", "=", "{", "'name'", ":", "image_name", "}", ")", "self", ".", "remote_images", ".", "update", "(", "{", "ri", ".", "name", ":", "ri", ".", "id", "for", "ri", "in", "remote_images", "}", ")", "if", "image_name", "in", "self", ".", "remote_images", ":", "return", "self", ".", "remote_images", "[", "image_name", "]", "raise", "EC2ManagerException", "(", "'Failed to resolve AMI name \"%s\" to an AMI'", "%", "image_name", ")" ]
Look up an AMI for the connected region based on an image name. :param image_name: The name of the image to resolve. :type image_name: str :return: The AMI for the given image. :rtype: str
[ "Look", "up", "an", "AMI", "for", "the", "connected", "region", "based", "on", "an", "image", "name", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L74-L92
4,080
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager.create_on_demand
def create_on_demand(self, instance_type='default', tags=None, root_device_type='ebs', size='default', vol_type='gp2', delete_on_termination=False): """Create one or more EC2 on-demand instances. :param size: Size of root device :type size: int :param delete_on_termination: :type delete_on_termination: boolean :param vol_type: :type vol_type: str :param root_device_type: The type of the root device. :type root_device_type: str :param instance_type: A section name in amazon.json :type instance_type: str :param tags: :type tags: dict :return: List of instances created :rtype: list """ name, size = self._get_default_name_size(instance_type, size) if root_device_type == 'ebs': self.images[instance_type]['block_device_map'] = \ self._configure_ebs_volume(vol_type, name, size, delete_on_termination) reservation = self.ec2.run_instances(**self.images[instance_type]) logger.info('Creating requested tags...') for i in reservation.instances: self.retry_on_ec2_error(self.ec2.create_tags, [i.id], tags or {}) instances = [] logger.info('Waiting for instances to become ready...') while len(reservation.instances): # pylint: disable=len-as-condition for i in reservation.instances: if i.state == 'running': instances.append(i) reservation.instances.pop(reservation.instances.index(i)) logger.info('%s is %s at %s (%s)', i.id, i.state, i.public_dns_name, i.ip_address) else: self.retry_on_ec2_error(i.update) return instances
python
def create_on_demand(self, instance_type='default', tags=None, root_device_type='ebs', size='default', vol_type='gp2', delete_on_termination=False): """Create one or more EC2 on-demand instances. :param size: Size of root device :type size: int :param delete_on_termination: :type delete_on_termination: boolean :param vol_type: :type vol_type: str :param root_device_type: The type of the root device. :type root_device_type: str :param instance_type: A section name in amazon.json :type instance_type: str :param tags: :type tags: dict :return: List of instances created :rtype: list """ name, size = self._get_default_name_size(instance_type, size) if root_device_type == 'ebs': self.images[instance_type]['block_device_map'] = \ self._configure_ebs_volume(vol_type, name, size, delete_on_termination) reservation = self.ec2.run_instances(**self.images[instance_type]) logger.info('Creating requested tags...') for i in reservation.instances: self.retry_on_ec2_error(self.ec2.create_tags, [i.id], tags or {}) instances = [] logger.info('Waiting for instances to become ready...') while len(reservation.instances): # pylint: disable=len-as-condition for i in reservation.instances: if i.state == 'running': instances.append(i) reservation.instances.pop(reservation.instances.index(i)) logger.info('%s is %s at %s (%s)', i.id, i.state, i.public_dns_name, i.ip_address) else: self.retry_on_ec2_error(i.update) return instances
[ "def", "create_on_demand", "(", "self", ",", "instance_type", "=", "'default'", ",", "tags", "=", "None", ",", "root_device_type", "=", "'ebs'", ",", "size", "=", "'default'", ",", "vol_type", "=", "'gp2'", ",", "delete_on_termination", "=", "False", ")", ":", "name", ",", "size", "=", "self", ".", "_get_default_name_size", "(", "instance_type", ",", "size", ")", "if", "root_device_type", "==", "'ebs'", ":", "self", ".", "images", "[", "instance_type", "]", "[", "'block_device_map'", "]", "=", "self", ".", "_configure_ebs_volume", "(", "vol_type", ",", "name", ",", "size", ",", "delete_on_termination", ")", "reservation", "=", "self", ".", "ec2", ".", "run_instances", "(", "*", "*", "self", ".", "images", "[", "instance_type", "]", ")", "logger", ".", "info", "(", "'Creating requested tags...'", ")", "for", "i", "in", "reservation", ".", "instances", ":", "self", ".", "retry_on_ec2_error", "(", "self", ".", "ec2", ".", "create_tags", ",", "[", "i", ".", "id", "]", ",", "tags", "or", "{", "}", ")", "instances", "=", "[", "]", "logger", ".", "info", "(", "'Waiting for instances to become ready...'", ")", "while", "len", "(", "reservation", ".", "instances", ")", ":", "# pylint: disable=len-as-condition", "for", "i", "in", "reservation", ".", "instances", ":", "if", "i", ".", "state", "==", "'running'", ":", "instances", ".", "append", "(", "i", ")", "reservation", ".", "instances", ".", "pop", "(", "reservation", ".", "instances", ".", "index", "(", "i", ")", ")", "logger", ".", "info", "(", "'%s is %s at %s (%s)'", ",", "i", ".", "id", ",", "i", ".", "state", ",", "i", ".", "public_dns_name", ",", "i", ".", "ip_address", ")", "else", ":", "self", ".", "retry_on_ec2_error", "(", "i", ".", "update", ")", "return", "instances" ]
Create one or more EC2 on-demand instances. :param size: Size of root device :type size: int :param delete_on_termination: :type delete_on_termination: boolean :param vol_type: :type vol_type: str :param root_device_type: The type of the root device. :type root_device_type: str :param instance_type: A section name in amazon.json :type instance_type: str :param tags: :type tags: dict :return: List of instances created :rtype: list
[ "Create", "one", "or", "more", "EC2", "on", "-", "demand", "instances", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L94-L144
4,081
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager.create_spot_requests
def create_spot_requests(self, price, instance_type='default', root_device_type='ebs', size='default', vol_type='gp2', delete_on_termination=False, timeout=None): """Request creation of one or more EC2 spot instances. :param size: :param vol_type: :param delete_on_termination: :param root_device_type: The type of the root device. :type root_device_type: str :param price: Max price to pay for spot instance per hour. :type price: float :param instance_type: A section name in amazon.json :type instance_type: str :param timeout: Seconds to keep the request open (cancelled if not fulfilled). :type timeout: int :return: List of requests created :rtype: list """ name, size = self._get_default_name_size(instance_type, size) if root_device_type == 'ebs': self.images[instance_type]['block_device_map'] = \ self._configure_ebs_volume(vol_type, name, size, delete_on_termination) valid_until = None if timeout is not None: valid_until = (datetime.datetime.now() + datetime.timedelta(seconds=timeout)).isoformat() requests = self.ec2.request_spot_instances(price, valid_until=valid_until, **self.images[instance_type]) return [r.id for r in requests]
python
def create_spot_requests(self, price, instance_type='default', root_device_type='ebs', size='default', vol_type='gp2', delete_on_termination=False, timeout=None): """Request creation of one or more EC2 spot instances. :param size: :param vol_type: :param delete_on_termination: :param root_device_type: The type of the root device. :type root_device_type: str :param price: Max price to pay for spot instance per hour. :type price: float :param instance_type: A section name in amazon.json :type instance_type: str :param timeout: Seconds to keep the request open (cancelled if not fulfilled). :type timeout: int :return: List of requests created :rtype: list """ name, size = self._get_default_name_size(instance_type, size) if root_device_type == 'ebs': self.images[instance_type]['block_device_map'] = \ self._configure_ebs_volume(vol_type, name, size, delete_on_termination) valid_until = None if timeout is not None: valid_until = (datetime.datetime.now() + datetime.timedelta(seconds=timeout)).isoformat() requests = self.ec2.request_spot_instances(price, valid_until=valid_until, **self.images[instance_type]) return [r.id for r in requests]
[ "def", "create_spot_requests", "(", "self", ",", "price", ",", "instance_type", "=", "'default'", ",", "root_device_type", "=", "'ebs'", ",", "size", "=", "'default'", ",", "vol_type", "=", "'gp2'", ",", "delete_on_termination", "=", "False", ",", "timeout", "=", "None", ")", ":", "name", ",", "size", "=", "self", ".", "_get_default_name_size", "(", "instance_type", ",", "size", ")", "if", "root_device_type", "==", "'ebs'", ":", "self", ".", "images", "[", "instance_type", "]", "[", "'block_device_map'", "]", "=", "self", ".", "_configure_ebs_volume", "(", "vol_type", ",", "name", ",", "size", ",", "delete_on_termination", ")", "valid_until", "=", "None", "if", "timeout", "is", "not", "None", ":", "valid_until", "=", "(", "datetime", ".", "datetime", ".", "now", "(", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timeout", ")", ")", ".", "isoformat", "(", ")", "requests", "=", "self", ".", "ec2", ".", "request_spot_instances", "(", "price", ",", "valid_until", "=", "valid_until", ",", "*", "*", "self", ".", "images", "[", "instance_type", "]", ")", "return", "[", "r", ".", "id", "for", "r", "in", "requests", "]" ]
Request creation of one or more EC2 spot instances. :param size: :param vol_type: :param delete_on_termination: :param root_device_type: The type of the root device. :type root_device_type: str :param price: Max price to pay for spot instance per hour. :type price: float :param instance_type: A section name in amazon.json :type instance_type: str :param timeout: Seconds to keep the request open (cancelled if not fulfilled). :type timeout: int :return: List of requests created :rtype: list
[ "Request", "creation", "of", "one", "or", "more", "EC2", "spot", "instances", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L146-L181
4,082
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager.check_spot_requests
def check_spot_requests(self, requests, tags=None): """Check status of one or more EC2 spot instance requests. :param requests: List of EC2 spot instance request IDs. :type requests: list :param tags: :type tags: dict :return: List of boto.ec2.instance.Instance's created, order corresponding to requests param (None if request still open, boto.ec2.instance.Reservation if request is no longer open) :rtype: list """ instances = [None] * len(requests) ec2_requests = self.retry_on_ec2_error(self.ec2.get_all_spot_instance_requests, request_ids=requests) for req in ec2_requests: if req.instance_id: instance = self.retry_on_ec2_error(self.ec2.get_only_instances, req.instance_id)[0] if not instance: raise EC2ManagerException('Failed to get instance with id %s for %s request %s' % (req.instance_id, req.status.code, req.id)) instances[requests.index(req.id)] = instance self.retry_on_ec2_error(self.ec2.create_tags, [instance.id], tags or {}) logger.info('Request %s is %s and %s.', req.id, req.status.code, req.state) logger.info('%s is %s at %s (%s)', instance.id, instance.state, instance.public_dns_name, instance.ip_address) elif req.state != "open": # return the request so we don't try again instances[requests.index(req.id)] = req return instances
python
def check_spot_requests(self, requests, tags=None): """Check status of one or more EC2 spot instance requests. :param requests: List of EC2 spot instance request IDs. :type requests: list :param tags: :type tags: dict :return: List of boto.ec2.instance.Instance's created, order corresponding to requests param (None if request still open, boto.ec2.instance.Reservation if request is no longer open) :rtype: list """ instances = [None] * len(requests) ec2_requests = self.retry_on_ec2_error(self.ec2.get_all_spot_instance_requests, request_ids=requests) for req in ec2_requests: if req.instance_id: instance = self.retry_on_ec2_error(self.ec2.get_only_instances, req.instance_id)[0] if not instance: raise EC2ManagerException('Failed to get instance with id %s for %s request %s' % (req.instance_id, req.status.code, req.id)) instances[requests.index(req.id)] = instance self.retry_on_ec2_error(self.ec2.create_tags, [instance.id], tags or {}) logger.info('Request %s is %s and %s.', req.id, req.status.code, req.state) logger.info('%s is %s at %s (%s)', instance.id, instance.state, instance.public_dns_name, instance.ip_address) elif req.state != "open": # return the request so we don't try again instances[requests.index(req.id)] = req return instances
[ "def", "check_spot_requests", "(", "self", ",", "requests", ",", "tags", "=", "None", ")", ":", "instances", "=", "[", "None", "]", "*", "len", "(", "requests", ")", "ec2_requests", "=", "self", ".", "retry_on_ec2_error", "(", "self", ".", "ec2", ".", "get_all_spot_instance_requests", ",", "request_ids", "=", "requests", ")", "for", "req", "in", "ec2_requests", ":", "if", "req", ".", "instance_id", ":", "instance", "=", "self", ".", "retry_on_ec2_error", "(", "self", ".", "ec2", ".", "get_only_instances", ",", "req", ".", "instance_id", ")", "[", "0", "]", "if", "not", "instance", ":", "raise", "EC2ManagerException", "(", "'Failed to get instance with id %s for %s request %s'", "%", "(", "req", ".", "instance_id", ",", "req", ".", "status", ".", "code", ",", "req", ".", "id", ")", ")", "instances", "[", "requests", ".", "index", "(", "req", ".", "id", ")", "]", "=", "instance", "self", ".", "retry_on_ec2_error", "(", "self", ".", "ec2", ".", "create_tags", ",", "[", "instance", ".", "id", "]", ",", "tags", "or", "{", "}", ")", "logger", ".", "info", "(", "'Request %s is %s and %s.'", ",", "req", ".", "id", ",", "req", ".", "status", ".", "code", ",", "req", ".", "state", ")", "logger", ".", "info", "(", "'%s is %s at %s (%s)'", ",", "instance", ".", "id", ",", "instance", ".", "state", ",", "instance", ".", "public_dns_name", ",", "instance", ".", "ip_address", ")", "elif", "req", ".", "state", "!=", "\"open\"", ":", "# return the request so we don't try again", "instances", "[", "requests", ".", "index", "(", "req", ".", "id", ")", "]", "=", "req", "return", "instances" ]
Check status of one or more EC2 spot instance requests. :param requests: List of EC2 spot instance request IDs. :type requests: list :param tags: :type tags: dict :return: List of boto.ec2.instance.Instance's created, order corresponding to requests param (None if request still open, boto.ec2.instance.Reservation if request is no longer open) :rtype: list
[ "Check", "status", "of", "one", "or", "more", "EC2", "spot", "instance", "requests", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L183-L221
4,083
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager.cancel_spot_requests
def cancel_spot_requests(self, requests): """Cancel one or more EC2 spot instance requests. :param requests: List of EC2 spot instance request IDs. :type requests: list """ ec2_requests = self.retry_on_ec2_error(self.ec2.get_all_spot_instance_requests, request_ids=requests) for req in ec2_requests: req.cancel()
python
def cancel_spot_requests(self, requests): """Cancel one or more EC2 spot instance requests. :param requests: List of EC2 spot instance request IDs. :type requests: list """ ec2_requests = self.retry_on_ec2_error(self.ec2.get_all_spot_instance_requests, request_ids=requests) for req in ec2_requests: req.cancel()
[ "def", "cancel_spot_requests", "(", "self", ",", "requests", ")", ":", "ec2_requests", "=", "self", ".", "retry_on_ec2_error", "(", "self", ".", "ec2", ".", "get_all_spot_instance_requests", ",", "request_ids", "=", "requests", ")", "for", "req", "in", "ec2_requests", ":", "req", ".", "cancel", "(", ")" ]
Cancel one or more EC2 spot instance requests. :param requests: List of EC2 spot instance request IDs. :type requests: list
[ "Cancel", "one", "or", "more", "EC2", "spot", "instance", "requests", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L223-L232
4,084
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager.create_spot
def create_spot(self, price, instance_type='default', tags=None, root_device_type='ebs', size='default', vol_type='gp2', delete_on_termination=False, timeout=None): """Create one or more EC2 spot instances. :param root_device_type: :param size: :param vol_type: :param delete_on_termination: :param timeout: :param price: Max price to pay for spot instance per hour. :type price: float :param instance_type: A section name in amazon.json :type instance_type: str :param tags: :type tags: dict :return: List of instances created :rtype: list """ request_ids = self.create_spot_requests(price, instance_type=instance_type, root_device_type=root_device_type, size=size, vol_type=vol_type, delete_on_termination=delete_on_termination) instances = [] logger.info('Waiting on fulfillment of requested spot instances.') poll_resolution = 5.0 time_exceeded = False while request_ids: time.sleep(poll_resolution) new_instances = self.check_spot_requests(request_ids, tags=tags) if timeout is not None: timeout -= poll_resolution time_exceeded = timeout <= 0 fulfilled = [] for idx, instance in enumerate(new_instances): if instance.status.code == "bad-parameters": logging.error('Spot request for "%s" failed due to bad parameters.', instance.id) self.cancel_spot_requests([instance.id]) if instance is not None: fulfilled.append(idx) if isinstance(instance, boto.ec2.instance.Instance): instances.append(instance) for idx in reversed(fulfilled): request_ids.pop(idx) if request_ids and time_exceeded: self.cancel_spot_requests(request_ids) break return instances
python
def create_spot(self, price, instance_type='default', tags=None, root_device_type='ebs', size='default', vol_type='gp2', delete_on_termination=False, timeout=None): """Create one or more EC2 spot instances. :param root_device_type: :param size: :param vol_type: :param delete_on_termination: :param timeout: :param price: Max price to pay for spot instance per hour. :type price: float :param instance_type: A section name in amazon.json :type instance_type: str :param tags: :type tags: dict :return: List of instances created :rtype: list """ request_ids = self.create_spot_requests(price, instance_type=instance_type, root_device_type=root_device_type, size=size, vol_type=vol_type, delete_on_termination=delete_on_termination) instances = [] logger.info('Waiting on fulfillment of requested spot instances.') poll_resolution = 5.0 time_exceeded = False while request_ids: time.sleep(poll_resolution) new_instances = self.check_spot_requests(request_ids, tags=tags) if timeout is not None: timeout -= poll_resolution time_exceeded = timeout <= 0 fulfilled = [] for idx, instance in enumerate(new_instances): if instance.status.code == "bad-parameters": logging.error('Spot request for "%s" failed due to bad parameters.', instance.id) self.cancel_spot_requests([instance.id]) if instance is not None: fulfilled.append(idx) if isinstance(instance, boto.ec2.instance.Instance): instances.append(instance) for idx in reversed(fulfilled): request_ids.pop(idx) if request_ids and time_exceeded: self.cancel_spot_requests(request_ids) break return instances
[ "def", "create_spot", "(", "self", ",", "price", ",", "instance_type", "=", "'default'", ",", "tags", "=", "None", ",", "root_device_type", "=", "'ebs'", ",", "size", "=", "'default'", ",", "vol_type", "=", "'gp2'", ",", "delete_on_termination", "=", "False", ",", "timeout", "=", "None", ")", ":", "request_ids", "=", "self", ".", "create_spot_requests", "(", "price", ",", "instance_type", "=", "instance_type", ",", "root_device_type", "=", "root_device_type", ",", "size", "=", "size", ",", "vol_type", "=", "vol_type", ",", "delete_on_termination", "=", "delete_on_termination", ")", "instances", "=", "[", "]", "logger", ".", "info", "(", "'Waiting on fulfillment of requested spot instances.'", ")", "poll_resolution", "=", "5.0", "time_exceeded", "=", "False", "while", "request_ids", ":", "time", ".", "sleep", "(", "poll_resolution", ")", "new_instances", "=", "self", ".", "check_spot_requests", "(", "request_ids", ",", "tags", "=", "tags", ")", "if", "timeout", "is", "not", "None", ":", "timeout", "-=", "poll_resolution", "time_exceeded", "=", "timeout", "<=", "0", "fulfilled", "=", "[", "]", "for", "idx", ",", "instance", "in", "enumerate", "(", "new_instances", ")", ":", "if", "instance", ".", "status", ".", "code", "==", "\"bad-parameters\"", ":", "logging", ".", "error", "(", "'Spot request for \"%s\" failed due to bad parameters.'", ",", "instance", ".", "id", ")", "self", ".", "cancel_spot_requests", "(", "[", "instance", ".", "id", "]", ")", "if", "instance", "is", "not", "None", ":", "fulfilled", ".", "append", "(", "idx", ")", "if", "isinstance", "(", "instance", ",", "boto", ".", "ec2", ".", "instance", ".", "Instance", ")", ":", "instances", ".", "append", "(", "instance", ")", "for", "idx", "in", "reversed", "(", "fulfilled", ")", ":", "request_ids", ".", "pop", "(", "idx", ")", "if", "request_ids", "and", "time_exceeded", ":", "self", ".", "cancel_spot_requests", "(", "request_ids", ")", "break", "return", "instances" ]
Create one or more EC2 spot instances. :param root_device_type: :param size: :param vol_type: :param delete_on_termination: :param timeout: :param price: Max price to pay for spot instance per hour. :type price: float :param instance_type: A section name in amazon.json :type instance_type: str :param tags: :type tags: dict :return: List of instances created :rtype: list
[ "Create", "one", "or", "more", "EC2", "spot", "instances", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L234-L294
4,085
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager._scale_down
def _scale_down(self, instances, count): """Return a list of |count| last created instances by launch time. :param instances: A list of instances. :type instances: list :param count: Number of instances to scale down. :type count: integer :return: List of instances to be scaled down. :rtype: list """ i = sorted(instances, key=lambda i: i.launch_time, reverse=True) if not i: return [] running = len(i) logger.info('%d instance/s are running.', running) logger.info('Scaling down %d instances of those.', count) if count > running: logger.info('Scale-down value is > than running instance/s - using maximum of %d!', running) count = running return i[:count]
python
def _scale_down(self, instances, count): """Return a list of |count| last created instances by launch time. :param instances: A list of instances. :type instances: list :param count: Number of instances to scale down. :type count: integer :return: List of instances to be scaled down. :rtype: list """ i = sorted(instances, key=lambda i: i.launch_time, reverse=True) if not i: return [] running = len(i) logger.info('%d instance/s are running.', running) logger.info('Scaling down %d instances of those.', count) if count > running: logger.info('Scale-down value is > than running instance/s - using maximum of %d!', running) count = running return i[:count]
[ "def", "_scale_down", "(", "self", ",", "instances", ",", "count", ")", ":", "i", "=", "sorted", "(", "instances", ",", "key", "=", "lambda", "i", ":", "i", ".", "launch_time", ",", "reverse", "=", "True", ")", "if", "not", "i", ":", "return", "[", "]", "running", "=", "len", "(", "i", ")", "logger", ".", "info", "(", "'%d instance/s are running.'", ",", "running", ")", "logger", ".", "info", "(", "'Scaling down %d instances of those.'", ",", "count", ")", "if", "count", ">", "running", ":", "logger", ".", "info", "(", "'Scale-down value is > than running instance/s - using maximum of %d!'", ",", "running", ")", "count", "=", "running", "return", "i", "[", ":", "count", "]" ]
Return a list of |count| last created instances by launch time. :param instances: A list of instances. :type instances: list :param count: Number of instances to scale down. :type count: integer :return: List of instances to be scaled down. :rtype: list
[ "Return", "a", "list", "of", "|count|", "last", "created", "instances", "by", "launch", "time", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L296-L315
4,086
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager._configure_ebs_volume
def _configure_ebs_volume(self, vol_type, name, size, delete_on_termination): """Sets the desired root EBS size, otherwise the default EC2 value is used. :param vol_type: Type of EBS storage - gp2 (SSD), io1 or standard (magnetic) :type vol_type: str :param size: Desired root EBS size. :type size: int :param delete_on_termination: Toggle this flag to delete EBS volume on termination. :type delete_on_termination: bool :return: A BlockDeviceMapping object. :rtype: object """ # From GitHub boto docs: http://git.io/veyDv root_dev = boto.ec2.blockdevicemapping.BlockDeviceType() root_dev.delete_on_termination = delete_on_termination root_dev.volume_type = vol_type if size != 'default': root_dev.size = size # change root volume to desired size bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping() bdm[name] = root_dev return bdm
python
def _configure_ebs_volume(self, vol_type, name, size, delete_on_termination): """Sets the desired root EBS size, otherwise the default EC2 value is used. :param vol_type: Type of EBS storage - gp2 (SSD), io1 or standard (magnetic) :type vol_type: str :param size: Desired root EBS size. :type size: int :param delete_on_termination: Toggle this flag to delete EBS volume on termination. :type delete_on_termination: bool :return: A BlockDeviceMapping object. :rtype: object """ # From GitHub boto docs: http://git.io/veyDv root_dev = boto.ec2.blockdevicemapping.BlockDeviceType() root_dev.delete_on_termination = delete_on_termination root_dev.volume_type = vol_type if size != 'default': root_dev.size = size # change root volume to desired size bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping() bdm[name] = root_dev return bdm
[ "def", "_configure_ebs_volume", "(", "self", ",", "vol_type", ",", "name", ",", "size", ",", "delete_on_termination", ")", ":", "# From GitHub boto docs: http://git.io/veyDv", "root_dev", "=", "boto", ".", "ec2", ".", "blockdevicemapping", ".", "BlockDeviceType", "(", ")", "root_dev", ".", "delete_on_termination", "=", "delete_on_termination", "root_dev", ".", "volume_type", "=", "vol_type", "if", "size", "!=", "'default'", ":", "root_dev", ".", "size", "=", "size", "# change root volume to desired size", "bdm", "=", "boto", ".", "ec2", ".", "blockdevicemapping", ".", "BlockDeviceMapping", "(", ")", "bdm", "[", "name", "]", "=", "root_dev", "return", "bdm" ]
Sets the desired root EBS size, otherwise the default EC2 value is used. :param vol_type: Type of EBS storage - gp2 (SSD), io1 or standard (magnetic) :type vol_type: str :param size: Desired root EBS size. :type size: int :param delete_on_termination: Toggle this flag to delete EBS volume on termination. :type delete_on_termination: bool :return: A BlockDeviceMapping object. :rtype: object
[ "Sets", "the", "desired", "root", "EBS", "size", "otherwise", "the", "default", "EC2", "value", "is", "used", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L337-L357
4,087
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager.stop
def stop(self, instances, count=0): """Stop each provided running instance. :param count: :param instances: A list of instances. :type instances: list """ if not instances: return if count > 0: instances = self._scale_down(instances, count) self.ec2.stop_instances([i.id for i in instances])
python
def stop(self, instances, count=0): """Stop each provided running instance. :param count: :param instances: A list of instances. :type instances: list """ if not instances: return if count > 0: instances = self._scale_down(instances, count) self.ec2.stop_instances([i.id for i in instances])
[ "def", "stop", "(", "self", ",", "instances", ",", "count", "=", "0", ")", ":", "if", "not", "instances", ":", "return", "if", "count", ">", "0", ":", "instances", "=", "self", ".", "_scale_down", "(", "instances", ",", "count", ")", "self", ".", "ec2", ".", "stop_instances", "(", "[", "i", ".", "id", "for", "i", "in", "instances", "]", ")" ]
Stop each provided running instance. :param count: :param instances: A list of instances. :type instances: list
[ "Stop", "each", "provided", "running", "instance", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L359-L370
4,088
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager.terminate
def terminate(self, instances, count=0): """Terminate each provided running or stopped instance. :param count: :param instances: A list of instances. :type instances: list """ if not instances: return if count > 0: instances = self._scale_down(instances, count) self.ec2.terminate_instances([i.id for i in instances])
python
def terminate(self, instances, count=0): """Terminate each provided running or stopped instance. :param count: :param instances: A list of instances. :type instances: list """ if not instances: return if count > 0: instances = self._scale_down(instances, count) self.ec2.terminate_instances([i.id for i in instances])
[ "def", "terminate", "(", "self", ",", "instances", ",", "count", "=", "0", ")", ":", "if", "not", "instances", ":", "return", "if", "count", ">", "0", ":", "instances", "=", "self", ".", "_scale_down", "(", "instances", ",", "count", ")", "self", ".", "ec2", ".", "terminate_instances", "(", "[", "i", ".", "id", "for", "i", "in", "instances", "]", ")" ]
Terminate each provided running or stopped instance. :param count: :param instances: A list of instances. :type instances: list
[ "Terminate", "each", "provided", "running", "or", "stopped", "instance", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L372-L383
4,089
MozillaSecurity/laniakea
laniakea/core/providers/ec2/manager.py
EC2Manager.find
def find(self, instance_ids=None, filters=None): """Flatten list of reservations to a list of instances. :param instance_ids: A list of instance ids to filter by :type instance_ids: list :param filters: A dict of Filter.N values defined in http://goo.gl/jYNej9 :type filters: dict :return: A flattened list of filtered instances. :rtype: list """ instances = [] reservations = self.retry_on_ec2_error(self.ec2.get_all_instances, instance_ids=instance_ids, filters=filters) for reservation in reservations: instances.extend(reservation.instances) return instances
python
def find(self, instance_ids=None, filters=None): """Flatten list of reservations to a list of instances. :param instance_ids: A list of instance ids to filter by :type instance_ids: list :param filters: A dict of Filter.N values defined in http://goo.gl/jYNej9 :type filters: dict :return: A flattened list of filtered instances. :rtype: list """ instances = [] reservations = self.retry_on_ec2_error(self.ec2.get_all_instances, instance_ids=instance_ids, filters=filters) for reservation in reservations: instances.extend(reservation.instances) return instances
[ "def", "find", "(", "self", ",", "instance_ids", "=", "None", ",", "filters", "=", "None", ")", ":", "instances", "=", "[", "]", "reservations", "=", "self", ".", "retry_on_ec2_error", "(", "self", ".", "ec2", ".", "get_all_instances", ",", "instance_ids", "=", "instance_ids", ",", "filters", "=", "filters", ")", "for", "reservation", "in", "reservations", ":", "instances", ".", "extend", "(", "reservation", ".", "instances", ")", "return", "instances" ]
Flatten list of reservations to a list of instances. :param instance_ids: A list of instance ids to filter by :type instance_ids: list :param filters: A dict of Filter.N values defined in http://goo.gl/jYNej9 :type filters: dict :return: A flattened list of filtered instances. :rtype: list
[ "Flatten", "list", "of", "reservations", "to", "a", "list", "of", "instances", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L385-L399
4,090
MozillaSecurity/laniakea
laniakea/core/common.py
ModuleLoader.load
def load(self, root, module_path, pkg_name): """Load modules dynamically. """ root = os.path.join(root, module_path) import_name = os.path.join(pkg_name, module_path).replace(os.sep, '.') for (_, name, _) in pkgutil.iter_modules([root]): self.modules[name] = import_module('.' + name, package=import_name) return self.modules
python
def load(self, root, module_path, pkg_name): """Load modules dynamically. """ root = os.path.join(root, module_path) import_name = os.path.join(pkg_name, module_path).replace(os.sep, '.') for (_, name, _) in pkgutil.iter_modules([root]): self.modules[name] = import_module('.' + name, package=import_name) return self.modules
[ "def", "load", "(", "self", ",", "root", ",", "module_path", ",", "pkg_name", ")", ":", "root", "=", "os", ".", "path", ".", "join", "(", "root", ",", "module_path", ")", "import_name", "=", "os", ".", "path", ".", "join", "(", "pkg_name", ",", "module_path", ")", ".", "replace", "(", "os", ".", "sep", ",", "'.'", ")", "for", "(", "_", ",", "name", ",", "_", ")", "in", "pkgutil", ".", "iter_modules", "(", "[", "root", "]", ")", ":", "self", ".", "modules", "[", "name", "]", "=", "import_module", "(", "'.'", "+", "name", ",", "package", "=", "import_name", ")", "return", "self", ".", "modules" ]
Load modules dynamically.
[ "Load", "modules", "dynamically", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/common.py#L155-L162
4,091
MozillaSecurity/laniakea
laniakea/core/common.py
ModuleLoader.command_line_interfaces
def command_line_interfaces(self): """Return the CommandLine classes from each provider. """ interfaces = [] for _, module in self.modules.items(): for entry in dir(module): if entry.endswith('CommandLine'): interfaces.append((module, entry)) return interfaces
python
def command_line_interfaces(self): """Return the CommandLine classes from each provider. """ interfaces = [] for _, module in self.modules.items(): for entry in dir(module): if entry.endswith('CommandLine'): interfaces.append((module, entry)) return interfaces
[ "def", "command_line_interfaces", "(", "self", ")", ":", "interfaces", "=", "[", "]", "for", "_", ",", "module", "in", "self", ".", "modules", ".", "items", "(", ")", ":", "for", "entry", "in", "dir", "(", "module", ")", ":", "if", "entry", ".", "endswith", "(", "'CommandLine'", ")", ":", "interfaces", ".", "append", "(", "(", "module", ",", "entry", ")", ")", "return", "interfaces" ]
Return the CommandLine classes from each provider.
[ "Return", "the", "CommandLine", "classes", "from", "each", "provider", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/common.py#L164-L172
4,092
MozillaSecurity/laniakea
laniakea/core/common.py
Common.pluralize
def pluralize(item): """Nothing to see here. """ assert isinstance(item, (int, list)) if isinstance(item, int): return 's' if item > 1 else '' if isinstance(item, list): return 's' if len(item) > 1 else '' return ''
python
def pluralize(item): """Nothing to see here. """ assert isinstance(item, (int, list)) if isinstance(item, int): return 's' if item > 1 else '' if isinstance(item, list): return 's' if len(item) > 1 else '' return ''
[ "def", "pluralize", "(", "item", ")", ":", "assert", "isinstance", "(", "item", ",", "(", "int", ",", "list", ")", ")", "if", "isinstance", "(", "item", ",", "int", ")", ":", "return", "'s'", "if", "item", ">", "1", "else", "''", "if", "isinstance", "(", "item", ",", "list", ")", ":", "return", "'s'", "if", "len", "(", "item", ")", ">", "1", "else", "''", "return", "''" ]
Nothing to see here.
[ "Nothing", "to", "see", "here", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/common.py#L190-L198
4,093
MozillaSecurity/laniakea
laniakea/core/providers/packet/manager.py
PacketConfiguration.validate
def validate(self): """Perform some basic configuration validation. """ if not self.conf.get('auth_token'): raise PacketManagerException('The auth token for Packet is not defined but required.') if not self.conf.get('projects'): raise PacketManagerException('Required "projects" section is missing.') projects = self.conf.get('projects') if not projects.keys(): raise PacketManagerException('At least one project at Packet is required.') failure = False for project, identifier in projects.items(): if not identifier: failure = True logging.error('Project "%s" has no valid identifier.', project) if failure: raise PacketManagerException('One or more projects are not setup appropriately.')
python
def validate(self): """Perform some basic configuration validation. """ if not self.conf.get('auth_token'): raise PacketManagerException('The auth token for Packet is not defined but required.') if not self.conf.get('projects'): raise PacketManagerException('Required "projects" section is missing.') projects = self.conf.get('projects') if not projects.keys(): raise PacketManagerException('At least one project at Packet is required.') failure = False for project, identifier in projects.items(): if not identifier: failure = True logging.error('Project "%s" has no valid identifier.', project) if failure: raise PacketManagerException('One or more projects are not setup appropriately.')
[ "def", "validate", "(", "self", ")", ":", "if", "not", "self", ".", "conf", ".", "get", "(", "'auth_token'", ")", ":", "raise", "PacketManagerException", "(", "'The auth token for Packet is not defined but required.'", ")", "if", "not", "self", ".", "conf", ".", "get", "(", "'projects'", ")", ":", "raise", "PacketManagerException", "(", "'Required \"projects\" section is missing.'", ")", "projects", "=", "self", ".", "conf", ".", "get", "(", "'projects'", ")", "if", "not", "projects", ".", "keys", "(", ")", ":", "raise", "PacketManagerException", "(", "'At least one project at Packet is required.'", ")", "failure", "=", "False", "for", "project", ",", "identifier", "in", "projects", ".", "items", "(", ")", ":", "if", "not", "identifier", ":", "failure", "=", "True", "logging", ".", "error", "(", "'Project \"%s\" has no valid identifier.'", ",", "project", ")", "if", "failure", ":", "raise", "PacketManagerException", "(", "'One or more projects are not setup appropriately.'", ")" ]
Perform some basic configuration validation.
[ "Perform", "some", "basic", "configuration", "validation", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/packet/manager.py#L31-L49
4,094
MozillaSecurity/laniakea
laniakea/core/providers/packet/manager.py
PacketManager.print_projects
def print_projects(self, projects): """Print method for projects. """ for project in projects: print('{}: {}'.format(project.name, project.id))
python
def print_projects(self, projects): """Print method for projects. """ for project in projects: print('{}: {}'.format(project.name, project.id))
[ "def", "print_projects", "(", "self", ",", "projects", ")", ":", "for", "project", "in", "projects", ":", "print", "(", "'{}: {}'", ".", "format", "(", "project", ".", "name", ",", "project", ".", "id", ")", ")" ]
Print method for projects.
[ "Print", "method", "for", "projects", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/packet/manager.py#L76-L80
4,095
MozillaSecurity/laniakea
laniakea/core/providers/packet/manager.py
PacketManager.print_operating_systems
def print_operating_systems(self, operating_systems): """Print method for operating systems. """ for _os in operating_systems: print('{}: {}'.format(_os.name, _os.slug))
python
def print_operating_systems(self, operating_systems): """Print method for operating systems. """ for _os in operating_systems: print('{}: {}'.format(_os.name, _os.slug))
[ "def", "print_operating_systems", "(", "self", ",", "operating_systems", ")", ":", "for", "_os", "in", "operating_systems", ":", "print", "(", "'{}: {}'", ".", "format", "(", "_os", ".", "name", ",", "_os", ".", "slug", ")", ")" ]
Print method for operating systems.
[ "Print", "method", "for", "operating", "systems", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/packet/manager.py#L89-L93
4,096
MozillaSecurity/laniakea
laniakea/core/providers/packet/manager.py
PacketManager.print_plans
def print_plans(self, plans): """Print method for plans. """ for plan in plans: print('Name: {} "{}" Price: {} USD'.format(plan.name, plan.slug, plan.pricing['hour'])) self.pprint(plan.specs) print('\n')
python
def print_plans(self, plans): """Print method for plans. """ for plan in plans: print('Name: {} "{}" Price: {} USD'.format(plan.name, plan.slug, plan.pricing['hour'])) self.pprint(plan.specs) print('\n')
[ "def", "print_plans", "(", "self", ",", "plans", ")", ":", "for", "plan", "in", "plans", ":", "print", "(", "'Name: {} \"{}\" Price: {} USD'", ".", "format", "(", "plan", ".", "name", ",", "plan", ".", "slug", ",", "plan", ".", "pricing", "[", "'hour'", "]", ")", ")", "self", ".", "pprint", "(", "plan", ".", "specs", ")", "print", "(", "'\\n'", ")" ]
Print method for plans.
[ "Print", "method", "for", "plans", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/packet/manager.py#L102-L108
4,097
MozillaSecurity/laniakea
laniakea/core/providers/packet/manager.py
PacketManager.print_facilities
def print_facilities(self, facilities): """Print method for facilities. """ for facility in facilities: print('{} - ({}): {}'.format(facility.code, facility.name, ",".join(facility.features)))
python
def print_facilities(self, facilities): """Print method for facilities. """ for facility in facilities: print('{} - ({}): {}'.format(facility.code, facility.name, ",".join(facility.features)))
[ "def", "print_facilities", "(", "self", ",", "facilities", ")", ":", "for", "facility", "in", "facilities", ":", "print", "(", "'{} - ({}): {}'", ".", "format", "(", "facility", ".", "code", ",", "facility", ".", "name", ",", "\",\"", ".", "join", "(", "facility", ".", "features", ")", ")", ")" ]
Print method for facilities.
[ "Print", "method", "for", "facilities", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/packet/manager.py#L117-L121
4,098
MozillaSecurity/laniakea
laniakea/core/providers/packet/manager.py
PacketManager.list_devices
def list_devices(self, project_id, conditions=None, params=None): """Retrieve list of devices in a project by one of more conditions. """ default_params = {'per_page': 1000} if params: default_params.update(params) data = self.api('projects/%s/devices' % project_id, params=default_params) devices = [] for device in self.filter(conditions, data['devices']): devices.append(packet.Device(device, self.manager)) return devices
python
def list_devices(self, project_id, conditions=None, params=None): """Retrieve list of devices in a project by one of more conditions. """ default_params = {'per_page': 1000} if params: default_params.update(params) data = self.api('projects/%s/devices' % project_id, params=default_params) devices = [] for device in self.filter(conditions, data['devices']): devices.append(packet.Device(device, self.manager)) return devices
[ "def", "list_devices", "(", "self", ",", "project_id", ",", "conditions", "=", "None", ",", "params", "=", "None", ")", ":", "default_params", "=", "{", "'per_page'", ":", "1000", "}", "if", "params", ":", "default_params", ".", "update", "(", "params", ")", "data", "=", "self", ".", "api", "(", "'projects/%s/devices'", "%", "project_id", ",", "params", "=", "default_params", ")", "devices", "=", "[", "]", "for", "device", "in", "self", ".", "filter", "(", "conditions", ",", "data", "[", "'devices'", "]", ")", ":", "devices", ".", "append", "(", "packet", ".", "Device", "(", "device", ",", "self", ".", "manager", ")", ")", "return", "devices" ]
Retrieve list of devices in a project by one of more conditions.
[ "Retrieve", "list", "of", "devices", "in", "a", "project", "by", "one", "of", "more", "conditions", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/packet/manager.py#L134-L144
4,099
MozillaSecurity/laniakea
laniakea/core/providers/packet/manager.py
PacketManager.print_devices
def print_devices(self, devices): """Print method for devices. """ for device in devices: print('ID: {} OS: {} IP: {} State: {} ({}) Tags: {}' .format(device.id, device.operating_system.slug, self.get_public_ip(device.ip_addresses), device.state, 'spot' if device.spot_instance else 'on-demand', device.tags))
python
def print_devices(self, devices): """Print method for devices. """ for device in devices: print('ID: {} OS: {} IP: {} State: {} ({}) Tags: {}' .format(device.id, device.operating_system.slug, self.get_public_ip(device.ip_addresses), device.state, 'spot' if device.spot_instance else 'on-demand', device.tags))
[ "def", "print_devices", "(", "self", ",", "devices", ")", ":", "for", "device", "in", "devices", ":", "print", "(", "'ID: {} OS: {} IP: {} State: {} ({}) Tags: {}'", ".", "format", "(", "device", ".", "id", ",", "device", ".", "operating_system", ".", "slug", ",", "self", ".", "get_public_ip", "(", "device", ".", "ip_addresses", ")", ",", "device", ".", "state", ",", "'spot'", "if", "device", ".", "spot_instance", "else", "'on-demand'", ",", "device", ".", "tags", ")", ")" ]
Print method for devices.
[ "Print", "method", "for", "devices", "." ]
7e80adc6ae92c6c1332d4c08473bb271fb3b6833
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/packet/manager.py#L146-L156