Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def multiply(self, a, b): if a is None or b is None: return None m, n, l = len(a), len(b[0]), len(b[0]) if len(b) != n: raise Exception("A's column number must be equal to B's row number.") c = [[0 for _ in range(l)] for _ in range(m)] for i, row in enumerate(a): for k, eleA in enumerate(row): if eleA: for j, eleB in enumerate(b[k]): if eleB: c[i][j] += eleA * eleB return c
[ "\n :type A: List[List[int]]\n :type B: List[List[int]]\n :rtype: List[List[int]]\n " ]
Please provide a description of the function:def multiply(self, a, b): if a is None or b is None: return None m, n = len(a), len(b[0]) if len(b) != n: raise Exception("A's column number must be equal to B's row number.") l = len(b[0]) table_a, table_b = {}, {} for i, row in enumerate(a): for j, ele in enumerate(row): if ele: if i not in table_a: table_a[i] = {} table_a[i][j] = ele for i, row in enumerate(b): for j, ele in enumerate(row): if ele: if i not in table_b: table_b[i] = {} table_b[i][j] = ele c = [[0 for j in range(l)] for i in range(m)] for i in table_a: for k in table_a[i]: if k not in table_b: continue for j in table_b[k]: c[i][j] += table_a[i][k] * table_b[k][j] return c
[ "\n :type A: List[List[int]]\n :type B: List[List[int]]\n :rtype: List[List[int]]\n " ]
Please provide a description of the function:def bitonic_sort(arr, reverse=False): def compare(arr, reverse): n = len(arr)//2 for i in range(n): if reverse != (arr[i] > arr[i+n]): arr[i], arr[i+n] = arr[i+n], arr[i] return arr def bitonic_merge(arr, reverse): n = len(arr) if n <= 1: return arr arr = compare(arr, reverse) left = bitonic_merge(arr[:n // 2], reverse) right = bitonic_merge(arr[n // 2:], reverse) return left + right #end of function(compare and bitionic_merge) definition n = len(arr) if n <= 1: return arr # checks if n is power of two if not (n and (not(n & (n - 1))) ): raise ValueError("the size of input should be power of two") left = bitonic_sort(arr[:n // 2], True) right = bitonic_sort(arr[n // 2:], False) arr = bitonic_merge(left + right, reverse) return arr
[ "\n bitonic sort is sorting algorithm to use multiple process, but this code not containing parallel process\n It can sort only array that sizes power of 2\n It can sort array in both increasing order and decreasing order by giving argument true(increasing) and false(decreasing)\n \n Worst-case in parallel: O(log(n)^2)\n Worst-case in non-parallel: O(nlog(n)^2)\n \n reference: https://en.wikipedia.org/wiki/Bitonic_sorter\n " ]
Please provide a description of the function:def scc(graph): ''' Computes the strongly connected components of a graph ''' order = [] vis = {vertex: False for vertex in graph} graph_transposed = {vertex: [] for vertex in graph} for (v, neighbours) in graph.iteritems(): for u in neighbours: add_edge(graph_transposed, u, v) for v in graph: if not vis[v]: dfs_transposed(v, graph_transposed, order, vis) vis = {vertex: False for vertex in graph} vertex_scc = {} current_comp = 0 for v in reversed(order): if not vis[v]: # Each dfs will visit exactly one component dfs(v, current_comp, vertex_scc, graph, vis) current_comp += 1 return vertex_scc
[]
Please provide a description of the function:def build_graph(formula): ''' Builds the implication graph from the formula ''' graph = {} for clause in formula: for (lit, _) in clause: for neg in [False, True]: graph[(lit, neg)] = [] for ((a_lit, a_neg), (b_lit, b_neg)) in formula: add_edge(graph, (a_lit, a_neg), (b_lit, not b_neg)) add_edge(graph, (b_lit, b_neg), (a_lit, not a_neg)) return graph
[]
Please provide a description of the function:def unique_array_sum_combinations(A, B, C, target): def check_sum(n, *nums): if sum(x for x in nums) == n: return (True, nums) else: return (False, nums) pro = itertools.product(A, B, C) func = partial(check_sum, target) sums = list(itertools.starmap(func, pro)) res = set() for s in sums: if s[0] is True and s[1] not in res: res.add(s[1]) return list(res)
[ "\n 1. Sort all the arrays - a,b,c. - This improves average time complexity.\n 2. If c[i] < Sum, then look for Sum - c[i] in array a and b.\n When pair found, insert c[i], a[j] & b[k] into the result list.\n This can be done in O(n).\n 3. Keep on doing the above procedure while going through complete c array.\n\n Complexity: O(n(m+p))\n " ]
Please provide a description of the function:def is_bst(root): stack = [] pre = None while root or stack: while root: stack.append(root) root = root.left root = stack.pop() if pre and root.val <= pre.val: return False pre = root root = root.right return True
[ "\n :type root: TreeNode\n :rtype: bool\n " ]
Please provide a description of the function:def __get_depth(root): if root is None: return 0 left = __get_depth(root.left) right = __get_depth(root.right) if abs(left-right) > 1 or -1 in [left, right]: return -1 return 1 + max(left, right)
[ "\n return 0 if unbalanced else depth + 1\n " ]
Please provide a description of the function:def copy_random_pointer_v1(head): dic = dict() m = n = head while m: dic[m] = RandomListNode(m.label) m = m.next while n: dic[n].next = dic.get(n.next) dic[n].random = dic.get(n.random) n = n.next return dic.get(head)
[ "\n :type head: RandomListNode\n :rtype: RandomListNode\n " ]
Please provide a description of the function:def copy_random_pointer_v2(head): copy = defaultdict(lambda: RandomListNode(0)) copy[None] = None node = head while node: copy[node].label = node.label copy[node].next = copy[node.next] copy[node].random = copy[node.random] node = node.next return copy[head]
[ "\n :type head: RandomListNode\n :rtype: RandomListNode\n " ]
Please provide a description of the function:def get_factors(n): def factor(n, i, combi, res): while i * i <= n: if n % i == 0: res += combi + [i, int(n/i)], factor(n/i, i, combi+[i], res) i += 1 return res return factor(n, 2, [], [])
[ "[summary]\n \n Arguments:\n n {[int]} -- [to analysed number]\n \n Returns:\n [list of lists] -- [all factors of the number n]\n ", "[summary]\n helper function\n\n Arguments:\n n {[int]} -- [number]\n i {[int]} -- [to tested divisor]\n combi {[list]} -- [catch divisors]\n res {[list]} -- [all factors of the number n]\n \n Returns:\n [list] -- [res]\n " ]
Please provide a description of the function:def get_factors_iterative1(n): todo, res = [(n, 2, [])], [] while todo: n, i, combi = todo.pop() while i * i <= n: if n % i == 0: res += combi + [i, n//i], todo.append((n//i, i, combi+[i])), i += 1 return res
[ "[summary]\n Computes all factors of n.\n Translated the function get_factors(...) in\n a call-stack modell.\n\n Arguments:\n n {[int]} -- [to analysed number]\n \n Returns:\n [list of lists] -- [all factors]\n " ]
Please provide a description of the function:def get_factors_iterative2(n): ans, stack, x = [], [], 2 while True: if x > n // x: if not stack: return ans ans.append(stack + [n]) x = stack.pop() n *= x x += 1 elif n % x == 0: stack.append(x) n //= x else: x += 1
[ "[summary]\n analog as above\n\n Arguments:\n n {[int]} -- [description]\n \n Returns:\n [list of lists] -- [all factors of n]\n " ]
Please provide a description of the function:def longest_increasing_subsequence(sequence): length = len(sequence) counts = [1 for _ in range(length)] for i in range(1, length): for j in range(0, i): if sequence[i] > sequence[j]: counts[i] = max(counts[i], counts[j] + 1) print(counts) return max(counts)
[ "\n Dynamic Programming Algorithm for\n counting the length of longest increasing subsequence\n type sequence: List[int]\n " ]
Please provide a description of the function:def single_number3(nums): # isolate a^b from pairs using XOR ab = 0 for n in nums: ab ^= n # isolate right most bit from a^b right_most = ab & (-ab) # isolate a and b from a^b a, b = 0, 0 for n in nums: if n & right_most: a ^= n else: b ^= n return [a, b]
[ "\n :type nums: List[int]\n :rtype: List[int]\n " ]
Please provide a description of the function:def distance(x,y): assert len(x) == len(y), "The vector must have same length" result = () sum = 0 for i in range(len(x)): result += (x[i] -y[i],) for component in result: sum += component**2 return math.sqrt(sum)
[ "[summary]\n HELPER-FUNCTION\n calculates the (eulidean) distance between vector x and y.\n\n Arguments:\n x {[tuple]} -- [vector]\n y {[tuple]} -- [vector]\n " ]
Please provide a description of the function:def nearest_neighbor(x, tSet): assert isinstance(x, tuple) and isinstance(tSet, dict) current_key = () min_d = float('inf') for key in tSet: d = distance(x, key) if d < min_d: min_d = d current_key = key return tSet[current_key]
[ "[summary]\n Implements the nearest neighbor algorithm\n\n Arguments:\n x {[tupel]} -- [vector]\n tSet {[dict]} -- [training set]\n\n Returns:\n [type] -- [result of the AND-function]\n " ]
Please provide a description of the function:def is_strobogrammatic(num): comb = "00 11 88 69 96" i = 0 j = len(num) - 1 while i <= j: x = comb.find(num[i]+num[j]) if x == -1: return False i += 1 j -= 1 return True
[ "\n :type num: str\n :rtype: bool\n " ]
Please provide a description of the function:def merge_sort(arr): # Our recursive base case if len(arr) <= 1: return arr mid = len(arr) // 2 # Perform merge_sort recursively on both halves left, right = merge_sort(arr[:mid]), merge_sort(arr[mid:]) # Merge each side together return merge(left, right, arr.copy())
[ " Merge Sort\n Complexity: O(n log(n))\n " ]
Please provide a description of the function:def merge(left, right, merged): left_cursor, right_cursor = 0, 0 while left_cursor < len(left) and right_cursor < len(right): # Sort each one and place into the result if left[left_cursor] <= right[right_cursor]: merged[left_cursor+right_cursor]=left[left_cursor] left_cursor += 1 else: merged[left_cursor + right_cursor] = right[right_cursor] right_cursor += 1 # Add the left overs if there's any left to the result for left_cursor in range(left_cursor, len(left)): merged[left_cursor + right_cursor] = left[left_cursor] # Add the left overs if there's any left to the result for right_cursor in range(right_cursor, len(right)): merged[left_cursor + right_cursor] = right[right_cursor] # Return result return merged
[ " Merge helper\n Complexity: O(n)\n " ]
Please provide a description of the function:def bucket_sort(arr): ''' Bucket Sort Complexity: O(n^2) The complexity is dominated by nextSort ''' # The number of buckets and make buckets num_buckets = len(arr) buckets = [[] for bucket in range(num_buckets)] # Assign values into bucket_sort for value in arr: index = value * num_buckets // (max(arr) + 1) buckets[index].append(value) # Sort sorted_list = [] for i in range(num_buckets): sorted_list.extend(next_sort(buckets[i])) return sorted_list
[]
Please provide a description of the function:def k_closest(points, k, origin=(0, 0)): # Time: O(k+(n-k)logk) # Space: O(k) heap = [(-distance(p, origin), p) for p in points[:k]] heapify(heap) for p in points[k:]: d = distance(p, origin) heappushpop(heap, (-d, p)) # heappushpop does conditional check return [p for nd, p in heap]
[ "Initialize max heap with first k points.\n Python does not support a max heap; thus we can use the default min heap where the keys (distance) are negated.\n ", "\n For every point p in points[k:],\n check if p is smaller than the root of the max heap;\n if it is, add p to heap and remove root. Reheapify.\n ", "Same as:\n if d < -heap[0][0]:\n heappush(heap, (-d,p))\n heappop(heap)\n\n Note: heappushpop is more efficient than separate push and pop calls.\n Each heappushpop call takes O(logk) time.\n " ]
Please provide a description of the function:def reverse_list(head): if not head or not head.next: return head prev = None while head: current = head head = head.next current.next = prev prev = current return prev
[ "\n :type head: ListNode\n :rtype: ListNode\n " ]
Please provide a description of the function:def reverse_list_recursive(head): if head is None or head.next is None: return head p = head.next head.next = None revrest = reverse_list_recursive(p) p.next = head return revrest
[ "\n :type head: ListNode\n :rtype: ListNode\n " ]
Please provide a description of the function:def has_path_sum(root, sum): if root is None: return False if root.left is None and root.right is None and root.val == sum: return True sum -= root.val return has_path_sum(root.left, sum) or has_path_sum(root.right, sum)
[ "\n :type root: TreeNode\n :type sum: int\n :rtype: bool\n " ]
Please provide a description of the function:def int_to_base(n, base): is_negative = False if n == 0: return '0' elif n < 0: is_negative = True n *= -1 digit = string.digits + string.ascii_uppercase res = '' while n > 0: res += digit[n % base] n //= base if is_negative: return '-' + res[::-1] else: return res[::-1]
[ "\n :type n: int\n :type base: int\n :rtype: str\n " ]
Please provide a description of the function:def base_to_int(s, base): digit = {} for i,c in enumerate(string.digits + string.ascii_uppercase): digit[c] = i multiplier = 1 res = 0 for c in s[::-1]: res += digit[c] * multiplier multiplier *= base return res
[ "\n Note : You can use int() built-in function instread of this.\n :type s: str\n :type base: int\n :rtype: int\n " ]
Please provide a description of the function:def is_cyclic(head): if not head: return False runner = head walker = head while runner.next and runner.next.next: runner = runner.next.next walker = walker.next if runner == walker: return True return False
[ "\n :type head: Node\n :rtype: bool\n " ]
Please provide a description of the function:def decode_string(s): stack = []; cur_num = 0; cur_string = '' for c in s: if c == '[': stack.append((cur_string, cur_num)) cur_string = '' cur_num = 0 elif c == ']': prev_string, num = stack.pop() cur_string = prev_string + num * cur_string elif c.isdigit(): cur_num = cur_num*10 + int(c) else: cur_string += c return cur_string
[ "\n :type s: str\n :rtype: str\n " ]
Please provide a description of the function:def palindromic_substrings_iter(s): if not s: yield [] return for i in range(len(s), 0, -1): sub = s[:i] if sub == sub[::-1]: for rest in palindromic_substrings_iter(s[i:]): yield [sub] + rest
[ "\n A slightly more Pythonic approach with a recursive generator\n " ]
Please provide a description of the function:def is_isomorphic(s, t): if len(s) != len(t): return False dict = {} set_value = set() for i in range(len(s)): if s[i] not in dict: if t[i] in set_value: return False dict[s[i]] = t[i] set_value.add(t[i]) else: if dict[s[i]] != t[i]: return False return True
[ "\n :type s: str\n :type t: str\n :rtype: bool\n " ]
Please provide a description of the function:def calc(n2, n1, operator): if operator == '-': return n1 - n2 elif operator == '+': return n1 + n2 elif operator == '*': return n1 * n2 elif operator == '/': return n1 / n2 elif operator == '^': return n1 ** n2 return 0
[ "\r\n Calculate operation result\r\n\r\n n2 Number: Number 2\r\n n1 Number: Number 1\r\n operator Char: Operation to calculate\r\n " ]
Please provide a description of the function:def apply_operation(op_stack, out_stack): out_stack.append(calc(out_stack.pop(), out_stack.pop(), op_stack.pop()))
[ "\r\n Apply operation to the first 2 items of the output queue\r\n\r\n op_stack Deque (reference)\r\n out_stack Deque (reference)\r\n " ]
Please provide a description of the function:def parse(expression): result = [] current = "" for i in expression: if i.isdigit() or i == '.': current += i else: if len(current) > 0: result.append(current) current = "" if i in __operators__ or i in __parenthesis__: result.append(i) else: raise Exception("invalid syntax " + i) if len(current) > 0: result.append(current) return result
[ "\r\n Return array of parsed tokens in the expression\r\n\r\n expression String: Math expression to parse in infix notation\r\n " ]
Please provide a description of the function:def evaluate(expression): op_stack = deque() # operator stack out_stack = deque() # output stack (values) tokens = parse(expression) # calls the function only once! for token in tokens: if numeric_value.match(token): out_stack.append(float(token)) elif token == '(': op_stack.append(token) elif token == ')': while len(op_stack) > 0 and op_stack[-1] != '(': apply_operation(op_stack, out_stack) op_stack.pop() # Remove remaining '(' else: # is_operator(token) while len(op_stack) > 0 and is_operator(op_stack[-1]) and higher_priority(op_stack[-1], token): apply_operation(op_stack, out_stack) op_stack.append(token) while len(op_stack) > 0: apply_operation(op_stack, out_stack) return out_stack[-1]
[ "\r\n Calculate result of expression\r\n\r\n expression String: The expression\r\n type Type (optional): Number type [int, float]\r\n " ]
Please provide a description of the function:def main(): print("\t\tCalculator\n\n") while True: user_input = input("expression or exit: ") if user_input == "exit": break try: print("The result is {0}".format(evaluate(user_input))) except Exception: print("invalid syntax!") user_input = input("expression or exit: ") print("program end")
[ "\r\n simple user-interface\r\n " ]
Please provide a description of the function:def closest_value(root, target): a = root.val kid = root.left if target < a else root.right if not kid: return a b = closest_value(kid, target) return min((a,b), key=lambda x: abs(target-x))
[ "\n :type root: TreeNode\n :type target: float\n :rtype: int\n " ]
Please provide a description of the function:def get_primes(n): if n <= 0: raise ValueError("'n' must be a positive integer.") # If x is even, exclude x from list (-1): sieve_size = (n // 2 - 1) if n % 2 == 0 else (n // 2) sieve = [True for _ in range(sieve_size)] # Sieve primes = [] # List of Primes if n >= 2: primes.append(2) # 2 is prime by default for i in range(sieve_size): if sieve[i]: value_at_i = i*2 + 3 primes.append(value_at_i) for j in range(i, sieve_size, value_at_i): sieve[j] = False return primes
[ "Return list of all primes less than n,\n Using sieve of Eratosthenes.\n " ]
Please provide a description of the function:def permute(elements): if len(elements) <= 1: return [elements] else: tmp = [] for perm in permute(elements[1:]): for i in range(len(elements)): tmp.append(perm[:i] + elements[0:1] + perm[i:]) return tmp
[ "\n returns a list with the permuations.\n " ]
Please provide a description of the function:def permute_iter(elements): if len(elements) <= 1: yield elements else: for perm in permute_iter(elements[1:]): for i in range(len(elements)): yield perm[:i] + elements[0:1] + perm[i:]
[ "\n iterator: returns a perumation by each call.\n " ]
Please provide a description of the function:def extended_gcd(a, b): old_s, s = 1, 0 old_t, t = 0, 1 old_r, r = a, b while r != 0: quotient = old_r / r old_r, r = r, old_r - quotient * r old_s, s = s, old_s - quotient * s old_t, t = t, old_t - quotient * t return old_s, old_t, old_r
[ "Extended GCD algorithm.\n Return s, t, g\n such that a * s + b * t = GCD(a, b)\n and s and t are co-prime.\n " ]
Please provide a description of the function:def bin_tree_to_list(root): if not root: return root root = bin_tree_to_list_util(root) while root.left: root = root.left return root
[ "\n type root: root class\n " ]
Please provide a description of the function:def add_operators(num, target): def dfs(res, path, num, target, pos, prev, multed): if pos == len(num): if target == prev: res.append(path) return for i in range(pos, len(num)): if i != pos and num[pos] == '0': # all digits have to be used break cur = int(num[pos:i+1]) if pos == 0: dfs(res, path + str(cur), num, target, i+1, cur, cur) else: dfs(res, path + "+" + str(cur), num, target, i+1, prev + cur, cur) dfs(res, path + "-" + str(cur), num, target, i+1, prev - cur, -cur) dfs(res, path + "*" + str(cur), num, target, i+1, prev - multed + multed * cur, multed * cur) res = [] if not num: return res dfs(res, "", num, target, 0, 0, 0) return res
[ "\n :type num: str\n :type target: int\n :rtype: List[str]\n " ]
Please provide a description of the function:def _init_rabit(): if _LIB is not None: _LIB.RabitGetRank.restype = ctypes.c_int _LIB.RabitGetWorldSize.restype = ctypes.c_int _LIB.RabitIsDistributed.restype = ctypes.c_int _LIB.RabitVersionNumber.restype = ctypes.c_int
[ "internal library initializer." ]
Please provide a description of the function:def init(args=None): if args is None: args = [] arr = (ctypes.c_char_p * len(args))() arr[:] = args _LIB.RabitInit(len(arr), arr)
[ "Initialize the rabit library with arguments" ]
Please provide a description of the function:def tracker_print(msg): if not isinstance(msg, STRING_TYPES): msg = str(msg) is_dist = _LIB.RabitIsDistributed() if is_dist != 0: _LIB.RabitTrackerPrint(c_str(msg)) else: sys.stdout.write(msg) sys.stdout.flush()
[ "Print message to the tracker.\n\n This function can be used to communicate the information of\n the progress to the tracker\n\n Parameters\n ----------\n msg : str\n The message to be printed to tracker.\n " ]
Please provide a description of the function:def get_processor_name(): mxlen = 256 length = ctypes.c_ulong() buf = ctypes.create_string_buffer(mxlen) _LIB.RabitGetProcessorName(buf, ctypes.byref(length), mxlen) return buf.value
[ "Get the processor name.\n\n Returns\n -------\n name : str\n the name of processor(host)\n " ]
Please provide a description of the function:def broadcast(data, root): rank = get_rank() length = ctypes.c_ulong() if root == rank: assert data is not None, 'need to pass in data when broadcasting' s = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL) length.value = len(s) # run first broadcast _LIB.RabitBroadcast(ctypes.byref(length), ctypes.sizeof(ctypes.c_ulong), root) if root != rank: dptr = (ctypes.c_char * length.value)() # run second _LIB.RabitBroadcast(ctypes.cast(dptr, ctypes.c_void_p), length.value, root) data = pickle.loads(dptr.raw) del dptr else: _LIB.RabitBroadcast(ctypes.cast(ctypes.c_char_p(s), ctypes.c_void_p), length.value, root) del s return data
[ "Broadcast object from one node to all other nodes.\n\n Parameters\n ----------\n data : any type that can be pickled\n Input data, if current rank does not equal root, this can be None\n root : int\n Rank of the node to broadcast data from.\n\n Returns\n -------\n object : int\n the result of broadcast.\n " ]
Please provide a description of the function:def normpath(path): normalized = os.path.join(*path.split("/")) if os.path.isabs(path): return os.path.abspath("/") + normalized else: return normalized
[ "Normalize UNIX path to a native path." ]
Please provide a description of the function:def _train_internal(params, dtrain, num_boost_round=10, evals=(), obj=None, feval=None, xgb_model=None, callbacks=None): callbacks = [] if callbacks is None else callbacks evals = list(evals) if isinstance(params, dict) \ and 'eval_metric' in params \ and isinstance(params['eval_metric'], list): params = dict((k, v) for k, v in params.items()) eval_metrics = params['eval_metric'] params.pop("eval_metric", None) params = list(params.items()) for eval_metric in eval_metrics: params += [('eval_metric', eval_metric)] bst = Booster(params, [dtrain] + [d[0] for d in evals]) nboost = 0 num_parallel_tree = 1 if xgb_model is not None: if not isinstance(xgb_model, STRING_TYPES): xgb_model = xgb_model.save_raw() bst = Booster(params, [dtrain] + [d[0] for d in evals], model_file=xgb_model) nboost = len(bst.get_dump()) _params = dict(params) if isinstance(params, list) else params if 'num_parallel_tree' in _params: num_parallel_tree = _params['num_parallel_tree'] nboost //= num_parallel_tree if 'num_class' in _params: nboost //= _params['num_class'] # Distributed code: Load the checkpoint from rabit. version = bst.load_rabit_checkpoint() assert rabit.get_world_size() != 1 or version == 0 rank = rabit.get_rank() start_iteration = int(version / 2) nboost += start_iteration callbacks_before_iter = [ cb for cb in callbacks if cb.__dict__.get('before_iteration', False)] callbacks_after_iter = [ cb for cb in callbacks if not cb.__dict__.get('before_iteration', False)] for i in range(start_iteration, num_boost_round): for cb in callbacks_before_iter: cb(CallbackEnv(model=bst, cvfolds=None, iteration=i, begin_iteration=start_iteration, end_iteration=num_boost_round, rank=rank, evaluation_result_list=None)) # Distributed code: need to resume to this point. # Skip the first update if it is a recovery step. if version % 2 == 0: bst.update(dtrain, i, obj) bst.save_rabit_checkpoint() version += 1 assert rabit.get_world_size() == 1 or version == rabit.version_number() nboost += 1 evaluation_result_list = [] # check evaluation result. if evals: bst_eval_set = bst.eval_set(evals, i, feval) if isinstance(bst_eval_set, STRING_TYPES): msg = bst_eval_set else: msg = bst_eval_set.decode() res = [x.split(':') for x in msg.split()] evaluation_result_list = [(k, float(v)) for k, v in res[1:]] try: for cb in callbacks_after_iter: cb(CallbackEnv(model=bst, cvfolds=None, iteration=i, begin_iteration=start_iteration, end_iteration=num_boost_round, rank=rank, evaluation_result_list=evaluation_result_list)) except EarlyStopException: break # do checkpoint after evaluation, in case evaluation also updates booster. bst.save_rabit_checkpoint() version += 1 if bst.attr('best_score') is not None: bst.best_score = float(bst.attr('best_score')) bst.best_iteration = int(bst.attr('best_iteration')) else: bst.best_iteration = nboost - 1 bst.best_ntree_limit = (bst.best_iteration + 1) * num_parallel_tree return bst
[ "internal training function" ]
Please provide a description of the function:def train(params, dtrain, num_boost_round=10, evals=(), obj=None, feval=None, maximize=False, early_stopping_rounds=None, evals_result=None, verbose_eval=True, xgb_model=None, callbacks=None, learning_rates=None): # pylint: disable=too-many-statements,too-many-branches, attribute-defined-outside-init callbacks = [] if callbacks is None else callbacks # Most of legacy advanced options becomes callbacks if isinstance(verbose_eval, bool) and verbose_eval: callbacks.append(callback.print_evaluation()) else: if isinstance(verbose_eval, int): callbacks.append(callback.print_evaluation(verbose_eval)) if early_stopping_rounds is not None: callbacks.append(callback.early_stop(early_stopping_rounds, maximize=maximize, verbose=bool(verbose_eval))) if evals_result is not None: callbacks.append(callback.record_evaluation(evals_result)) if learning_rates is not None: warnings.warn("learning_rates parameter is deprecated - use callback API instead", DeprecationWarning) callbacks.append(callback.reset_learning_rate(learning_rates)) return _train_internal(params, dtrain, num_boost_round=num_boost_round, evals=evals, obj=obj, feval=feval, xgb_model=xgb_model, callbacks=callbacks)
[ "Train a booster with given parameters.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : DMatrix\n Data to be trained.\n num_boost_round: int\n Number of boosting iterations.\n evals: list of pairs (DMatrix, string)\n List of items to be evaluated during training, this allows user to watch\n performance on the validation set.\n obj : function\n Customized objective function.\n feval : function\n Customized evaluation function.\n maximize : bool\n Whether to maximize feval.\n early_stopping_rounds: int\n Activates early stopping. Validation error needs to decrease at least\n every **early_stopping_rounds** round(s) to continue training.\n Requires at least one item in **evals**.\n If there's more than one, will use the last.\n Returns the model from the last iteration (not the best one).\n If early stopping occurs, the model will have three additional fields:\n ``bst.best_score``, ``bst.best_iteration`` and ``bst.best_ntree_limit``.\n (Use ``bst.best_ntree_limit`` to get the correct value if\n ``num_parallel_tree`` and/or ``num_class`` appears in the parameters)\n evals_result: dict\n This dictionary stores the evaluation results of all the items in watchlist.\n\n Example: with a watchlist containing\n ``[(dtest,'eval'), (dtrain,'train')]`` and\n a parameter containing ``('eval_metric': 'logloss')``,\n the **evals_result** returns\n\n .. code-block:: python\n\n {'train': {'logloss': ['0.48253', '0.35953']},\n 'eval': {'logloss': ['0.480385', '0.357756']}}\n\n verbose_eval : bool or int\n Requires at least one item in **evals**.\n If **verbose_eval** is True then the evaluation metric on the validation set is\n printed at each boosting stage.\n If **verbose_eval** is an integer then the evaluation metric on the validation set\n is printed at every given **verbose_eval** boosting stage. The last boosting stage\n / the boosting stage found by using **early_stopping_rounds** is also printed.\n Example: with ``verbose_eval=4`` and at least one item in **evals**, an evaluation metric\n is printed every 4 boosting stages, instead of every boosting stage.\n learning_rates: list or function (deprecated - use callback API instead)\n List of learning rate for each boosting round\n or a customized function that calculates eta in terms of\n current number of round and the total number of boosting round (e.g. yields\n learning rate decay)\n xgb_model : file name of stored xgb model or 'Booster' instance\n Xgb model to be loaded before training (allows training continuation).\n callbacks : list of callback functions\n List of callback functions that are applied at end of each iteration.\n It is possible to use predefined callbacks by using\n :ref:`Callback API <callback_api>`.\n Example:\n\n .. code-block:: python\n\n [xgb.callback.reset_learning_rate(custom_rates)]\n\n Returns\n -------\n Booster : a trained booster model\n " ]
Please provide a description of the function:def mknfold(dall, nfold, param, seed, evals=(), fpreproc=None, stratified=False, folds=None, shuffle=True): evals = list(evals) np.random.seed(seed) if stratified is False and folds is None: # Do standard k-fold cross validation if shuffle is True: idx = np.random.permutation(dall.num_row()) else: idx = np.arange(dall.num_row()) out_idset = np.array_split(idx, nfold) in_idset = [ np.concatenate([out_idset[i] for i in range(nfold) if k != i]) for k in range(nfold) ] elif folds is not None: # Use user specified custom split using indices try: in_idset = [x[0] for x in folds] out_idset = [x[1] for x in folds] except TypeError: # Custom stratification using Sklearn KFoldSplit object splits = list(folds.split(X=dall.get_label(), y=dall.get_label())) in_idset = [x[0] for x in splits] out_idset = [x[1] for x in splits] nfold = len(out_idset) else: # Do standard stratefied shuffle k-fold split sfk = XGBStratifiedKFold(n_splits=nfold, shuffle=True, random_state=seed) splits = list(sfk.split(X=dall.get_label(), y=dall.get_label())) in_idset = [x[0] for x in splits] out_idset = [x[1] for x in splits] nfold = len(out_idset) ret = [] for k in range(nfold): dtrain = dall.slice(in_idset[k]) dtest = dall.slice(out_idset[k]) # run preprocessing on the data set if needed if fpreproc is not None: dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy()) else: tparam = param plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals] ret.append(CVPack(dtrain, dtest, plst)) return ret
[ "\n Make an n-fold list of CVPack from random indices.\n " ]
Please provide a description of the function:def aggcv(rlist): # pylint: disable=invalid-name cvmap = {} idx = rlist[0].split()[0] for line in rlist: arr = line.split() assert idx == arr[0] for it in arr[1:]: if not isinstance(it, STRING_TYPES): it = it.decode() k, v = it.split(':') if k not in cvmap: cvmap[k] = [] cvmap[k].append(float(v)) msg = idx results = [] for k, v in sorted(cvmap.items(), key=lambda x: (x[0].startswith('test'), x[0])): v = np.array(v) if not isinstance(msg, STRING_TYPES): msg = msg.decode() mean, std = np.mean(v), np.std(v) results.extend([(k, mean, std)]) return results
[ "\n Aggregate cross-validation results.\n\n If verbose_eval is true, progress is displayed in every call. If\n verbose_eval is an integer, progress will only be displayed every\n `verbose_eval` trees, tracked via trial.\n " ]
Please provide a description of the function:def cv(params, dtrain, num_boost_round=10, nfold=3, stratified=False, folds=None, metrics=(), obj=None, feval=None, maximize=False, early_stopping_rounds=None, fpreproc=None, as_pandas=True, verbose_eval=None, show_stdv=True, seed=0, callbacks=None, shuffle=True): # pylint: disable = invalid-name if stratified is True and not SKLEARN_INSTALLED: raise XGBoostError('sklearn needs to be installed in order to use stratified cv') if isinstance(metrics, str): metrics = [metrics] if isinstance(params, list): _metrics = [x[1] for x in params if x[0] == 'eval_metric'] params = dict(params) if 'eval_metric' in params: params['eval_metric'] = _metrics else: params = dict((k, v) for k, v in params.items()) if (not metrics) and 'eval_metric' in params: if isinstance(params['eval_metric'], list): metrics = params['eval_metric'] else: metrics = [params['eval_metric']] params.pop("eval_metric", None) results = {} cvfolds = mknfold(dtrain, nfold, params, seed, metrics, fpreproc, stratified, folds, shuffle) # setup callbacks callbacks = [] if callbacks is None else callbacks if early_stopping_rounds is not None: callbacks.append(callback.early_stop(early_stopping_rounds, maximize=maximize, verbose=False)) if isinstance(verbose_eval, bool) and verbose_eval: callbacks.append(callback.print_evaluation(show_stdv=show_stdv)) else: if isinstance(verbose_eval, int): callbacks.append(callback.print_evaluation(verbose_eval, show_stdv=show_stdv)) callbacks_before_iter = [ cb for cb in callbacks if cb.__dict__.get('before_iteration', False)] callbacks_after_iter = [ cb for cb in callbacks if not cb.__dict__.get('before_iteration', False)] for i in range(num_boost_round): for cb in callbacks_before_iter: cb(CallbackEnv(model=None, cvfolds=cvfolds, iteration=i, begin_iteration=0, end_iteration=num_boost_round, rank=0, evaluation_result_list=None)) for fold in cvfolds: fold.update(i, obj) res = aggcv([f.eval(i, feval) for f in cvfolds]) for key, mean, std in res: if key + '-mean' not in results: results[key + '-mean'] = [] if key + '-std' not in results: results[key + '-std'] = [] results[key + '-mean'].append(mean) results[key + '-std'].append(std) try: for cb in callbacks_after_iter: cb(CallbackEnv(model=None, cvfolds=cvfolds, iteration=i, begin_iteration=0, end_iteration=num_boost_round, rank=0, evaluation_result_list=res)) except EarlyStopException as e: for k in results: results[k] = results[k][:(e.best_iteration + 1)] break if as_pandas: try: import pandas as pd results = pd.DataFrame.from_dict(results) except ImportError: pass return results
[ "Cross-validation with given parameters.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : DMatrix\n Data to be trained.\n num_boost_round : int\n Number of boosting iterations.\n nfold : int\n Number of folds in CV.\n stratified : bool\n Perform stratified sampling.\n folds : a KFold or StratifiedKFold instance or list of fold indices\n Sklearn KFolds or StratifiedKFolds object.\n Alternatively may explicitly pass sample indices for each fold.\n For ``n`` folds, **folds** should be a length ``n`` list of tuples.\n Each tuple is ``(in,out)`` where ``in`` is a list of indices to be used\n as the training samples for the ``n`` th fold and ``out`` is a list of\n indices to be used as the testing samples for the ``n`` th fold.\n metrics : string or list of strings\n Evaluation metrics to be watched in CV.\n obj : function\n Custom objective function.\n feval : function\n Custom evaluation function.\n maximize : bool\n Whether to maximize feval.\n early_stopping_rounds: int\n Activates early stopping. CV error needs to decrease at least\n every <early_stopping_rounds> round(s) to continue.\n Last entry in evaluation history is the one from best iteration.\n fpreproc : function\n Preprocessing function that takes (dtrain, dtest, param) and returns\n transformed versions of those.\n as_pandas : bool, default True\n Return pd.DataFrame when pandas is installed.\n If False or pandas is not installed, return np.ndarray\n verbose_eval : bool, int, or None, default None\n Whether to display the progress. If None, progress will be displayed\n when np.ndarray is returned. If True, progress will be displayed at\n boosting stage. If an integer is given, progress will be displayed\n at every given `verbose_eval` boosting stage.\n show_stdv : bool, default True\n Whether to display the standard deviation in progress.\n Results are not affected, and always contains std.\n seed : int\n Seed used to generate the folds (passed to numpy.random.seed).\n callbacks : list of callback functions\n List of callback functions that are applied at end of each iteration.\n It is possible to use predefined callbacks by using\n :ref:`Callback API <callback_api>`.\n Example:\n\n .. code-block:: python\n\n [xgb.callback.reset_learning_rate(custom_rates)]\n shuffle : bool\n Shuffle data before creating folds.\n\n Returns\n -------\n evaluation history : list(string)\n " ]
Please provide a description of the function:def update(self, iteration, fobj): self.bst.update(self.dtrain, iteration, fobj)
[ "\"Update the boosters for one iteration" ]
Please provide a description of the function:def eval(self, iteration, feval): return self.bst.eval_set(self.watchlist, iteration, feval)
[ "\"Evaluate the CVPack for one iteration." ]
Please provide a description of the function:def _get_callback_context(env): if env.model is not None and env.cvfolds is None: context = 'train' elif env.model is None and env.cvfolds is not None: context = 'cv' return context
[ "return whether the current callback context is cv or train" ]
Please provide a description of the function:def _fmt_metric(value, show_stdv=True): if len(value) == 2: return '%s:%g' % (value[0], value[1]) if len(value) == 3: if show_stdv: return '%s:%g+%g' % (value[0], value[1], value[2]) return '%s:%g' % (value[0], value[1]) raise ValueError("wrong metric value")
[ "format metric string" ]
Please provide a description of the function:def print_evaluation(period=1, show_stdv=True): def callback(env): if env.rank != 0 or (not env.evaluation_result_list) or period is False or period == 0: return i = env.iteration if i % period == 0 or i + 1 == env.begin_iteration or i + 1 == env.end_iteration: msg = '\t'.join([_fmt_metric(x, show_stdv) for x in env.evaluation_result_list]) rabit.tracker_print('[%d]\t%s\n' % (i, msg)) return callback
[ "Create a callback that print evaluation result.\n\n We print the evaluation results every **period** iterations\n and on the first and the last iterations.\n\n Parameters\n ----------\n period : int\n The period to log the evaluation results\n\n show_stdv : bool, optional\n Whether show stdv if provided\n\n Returns\n -------\n callback : function\n A callback that print evaluation every period iterations.\n ", "internal function" ]
Please provide a description of the function:def record_evaluation(eval_result): if not isinstance(eval_result, dict): raise TypeError('eval_result has to be a dictionary') eval_result.clear() def init(env): for k, _ in env.evaluation_result_list: pos = k.index('-') key = k[:pos] metric = k[pos + 1:] if key not in eval_result: eval_result[key] = {} if metric not in eval_result[key]: eval_result[key][metric] = [] def callback(env): if not eval_result: init(env) for k, v in env.evaluation_result_list: pos = k.index('-') key = k[:pos] metric = k[pos + 1:] eval_result[key][metric].append(v) return callback
[ "Create a call back that records the evaluation history into **eval_result**.\n\n Parameters\n ----------\n eval_result : dict\n A dictionary to store the evaluation results.\n\n Returns\n -------\n callback : function\n The requested callback function.\n ", "internal function", "internal function" ]
Please provide a description of the function:def reset_learning_rate(learning_rates): def get_learning_rate(i, n, learning_rates): if isinstance(learning_rates, list): if len(learning_rates) != n: raise ValueError("Length of list 'learning_rates' has to equal 'num_boost_round'.") new_learning_rate = learning_rates[i] else: new_learning_rate = learning_rates(i, n) return new_learning_rate def callback(env): context = _get_callback_context(env) if context == 'train': bst, i, n = env.model, env.iteration, env.end_iteration bst.set_param('learning_rate', get_learning_rate(i, n, learning_rates)) elif context == 'cv': i, n = env.iteration, env.end_iteration for cvpack in env.cvfolds: bst = cvpack.bst bst.set_param('learning_rate', get_learning_rate(i, n, learning_rates)) callback.before_iteration = True return callback
[ "Reset learning rate after iteration 1\n\n NOTE: the initial learning rate will still take in-effect on first iteration.\n\n Parameters\n ----------\n learning_rates: list or function\n List of learning rate for each boosting round\n or a customized function that calculates eta in terms of\n current number of round and the total number of boosting round (e.g.\n yields learning rate decay)\n\n * list ``l``: ``eta = l[boosting_round]``\n * function ``f``: ``eta = f(boosting_round, num_boost_round)``\n\n Returns\n -------\n callback : function\n The requested callback function.\n ", "helper providing the learning rate", "internal function" ]
Please provide a description of the function:def early_stop(stopping_rounds, maximize=False, verbose=True): state = {} def init(env): bst = env.model if not env.evaluation_result_list: raise ValueError('For early stopping you need at least one set in evals.') if len(env.evaluation_result_list) > 1 and verbose: msg = ("Multiple eval metrics have been passed: " "'{0}' will be used for early stopping.\n\n") rabit.tracker_print(msg.format(env.evaluation_result_list[-1][0])) maximize_metrics = ('auc', 'aucpr', 'map', 'ndcg') maximize_at_n_metrics = ('auc@', 'aucpr@', 'map@', 'ndcg@') maximize_score = maximize metric_label = env.evaluation_result_list[-1][0] metric = metric_label.split('-', 1)[-1] if any(metric.startswith(x) for x in maximize_at_n_metrics): maximize_score = True if any(metric.split(":")[0] == x for x in maximize_metrics): maximize_score = True if verbose and env.rank == 0: msg = "Will train until {} hasn't improved in {} rounds.\n" rabit.tracker_print(msg.format(metric_label, stopping_rounds)) state['maximize_score'] = maximize_score state['best_iteration'] = 0 if maximize_score: state['best_score'] = float('-inf') else: state['best_score'] = float('inf') if bst is not None: if bst.attr('best_score') is not None: state['best_score'] = float(bst.attr('best_score')) state['best_iteration'] = int(bst.attr('best_iteration')) state['best_msg'] = bst.attr('best_msg') else: bst.set_attr(best_iteration=str(state['best_iteration'])) bst.set_attr(best_score=str(state['best_score'])) else: assert env.cvfolds is not None def callback(env): score = env.evaluation_result_list[-1][1] if not state: init(env) best_score = state['best_score'] best_iteration = state['best_iteration'] maximize_score = state['maximize_score'] if (maximize_score and score > best_score) or \ (not maximize_score and score < best_score): msg = '[%d]\t%s' % ( env.iteration, '\t'.join([_fmt_metric(x) for x in env.evaluation_result_list])) state['best_msg'] = msg state['best_score'] = score state['best_iteration'] = env.iteration # save the property to attributes, so they will occur in checkpoint. if env.model is not None: env.model.set_attr(best_score=str(state['best_score']), best_iteration=str(state['best_iteration']), best_msg=state['best_msg']) elif env.iteration - best_iteration >= stopping_rounds: best_msg = state['best_msg'] if verbose and env.rank == 0: msg = "Stopping. Best iteration:\n{}\n\n" rabit.tracker_print(msg.format(best_msg)) raise EarlyStopException(best_iteration) return callback
[ "Create a callback that activates early stoppping.\n\n Validation error needs to decrease at least\n every **stopping_rounds** round(s) to continue training.\n Requires at least one item in **evals**.\n If there's more than one, will use the last.\n Returns the model from the last iteration (not the best one).\n If early stopping occurs, the model will have three additional fields:\n ``bst.best_score``, ``bst.best_iteration`` and ``bst.best_ntree_limit``.\n (Use ``bst.best_ntree_limit`` to get the correct value if ``num_parallel_tree``\n and/or ``num_class`` appears in the parameters)\n\n Parameters\n ----------\n stopp_rounds : int\n The stopping rounds before the trend occur.\n\n maximize : bool\n Whether to maximize evaluation metric.\n\n verbose : optional, bool\n Whether to print message about early stopping information.\n\n Returns\n -------\n callback : function\n The requested callback function.\n ", "internal function", "internal function" ]
Please provide a description of the function:def run_doxygen(folder): try: retcode = subprocess.call("cd %s; make doxygen" % folder, shell=True) if retcode < 0: sys.stderr.write("doxygen terminated by signal %s" % (-retcode)) except OSError as e: sys.stderr.write("doxygen execution failed: %s" % e)
[ "Run the doxygen make command in the designated folder." ]
Please provide a description of the function:def _objective_decorator(func): def inner(preds, dmatrix): labels = dmatrix.get_label() return func(labels, preds) return inner
[ "Decorate an objective function\n\n Converts an objective function using the typical sklearn metrics\n signature so that it is usable with ``xgboost.training.train``\n\n Parameters\n ----------\n func: callable\n Expects a callable with signature ``func(y_true, y_pred)``:\n\n y_true: array_like of shape [n_samples]\n The target values\n y_pred: array_like of shape [n_samples]\n The predicted values\n\n Returns\n -------\n new_func: callable\n The new objective function as expected by ``xgboost.training.train``.\n The signature is ``new_func(preds, dmatrix)``:\n\n preds: array_like, shape [n_samples]\n The predicted values\n dmatrix: ``DMatrix``\n The training set from which the labels will be extracted using\n ``dmatrix.get_label()``\n ", "internal function" ]
Please provide a description of the function:def set_params(self, **params): if not params: # Simple optimization to gain speed (inspect is slow) return self for key, value in params.items(): if hasattr(self, key): setattr(self, key, value) else: self.kwargs[key] = value return self
[ "Set the parameters of this estimator.\n Modification of the sklearn method to allow unknown kwargs. This allows using\n the full range of xgboost parameters that are not defined as member variables\n in sklearn grid search.\n Returns\n -------\n self\n " ]
Please provide a description of the function:def get_params(self, deep=False): params = super(XGBModel, self).get_params(deep=deep) if isinstance(self.kwargs, dict): # if kwargs is a dict, update params accordingly params.update(self.kwargs) if params['missing'] is np.nan: params['missing'] = None # sklearn doesn't handle nan. see #4725 if not params.get('eval_metric', True): del params['eval_metric'] # don't give as None param to Booster return params
[ "Get parameters." ]
Please provide a description of the function:def get_xgb_params(self): xgb_params = self.get_params() random_state = xgb_params.pop('random_state') if 'seed' in xgb_params and xgb_params['seed'] is not None: warnings.warn('The seed parameter is deprecated as of version .6.' 'Please use random_state instead.' 'seed is deprecated.', DeprecationWarning) else: xgb_params['seed'] = random_state n_jobs = xgb_params.pop('n_jobs') if 'nthread' in xgb_params and xgb_params['nthread'] is not None: warnings.warn('The nthread parameter is deprecated as of version .6.' 'Please use n_jobs instead.' 'nthread is deprecated.', DeprecationWarning) else: xgb_params['nthread'] = n_jobs if 'silent' in xgb_params and xgb_params['silent'] is not None: warnings.warn('The silent parameter is deprecated.' 'Please use verbosity instead.' 'silent is depreated', DeprecationWarning) # TODO(canonizer): set verbosity explicitly if silent is removed from xgboost, # but remains in this API else: # silent=None shouldn't be passed to xgboost xgb_params.pop('silent', None) if xgb_params['nthread'] <= 0: xgb_params.pop('nthread', None) return xgb_params
[ "Get xgboost type parameters." ]
Please provide a description of the function:def load_model(self, fname): if self._Booster is None: self._Booster = Booster({'nthread': self.n_jobs}) self._Booster.load_model(fname)
[ "\n Load the model from a file.\n\n The model is loaded from an XGBoost internal binary format which is\n universal among the various XGBoost interfaces. Auxiliary attributes of\n the Python Booster object (such as feature names) will not be loaded.\n Label encodings (text labels to numeric labels) will be also lost.\n **If you are using only the Python interface, we recommend pickling the\n model object for best results.**\n\n Parameters\n ----------\n fname : string or a memory buffer\n Input file name or memory buffer(see also save_raw)\n " ]
Please provide a description of the function:def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None, early_stopping_rounds=None, verbose=True, xgb_model=None, sample_weight_eval_set=None, callbacks=None): # pylint: disable=missing-docstring,invalid-name,attribute-defined-outside-init if sample_weight is not None: trainDmatrix = DMatrix(X, label=y, weight=sample_weight, missing=self.missing, nthread=self.n_jobs) else: trainDmatrix = DMatrix(X, label=y, missing=self.missing, nthread=self.n_jobs) evals_result = {} if eval_set is not None: if sample_weight_eval_set is None: sample_weight_eval_set = [None] * len(eval_set) evals = list( DMatrix(eval_set[i][0], label=eval_set[i][1], missing=self.missing, weight=sample_weight_eval_set[i], nthread=self.n_jobs) for i in range(len(eval_set))) evals = list(zip(evals, ["validation_{}".format(i) for i in range(len(evals))])) else: evals = () params = self.get_xgb_params() if callable(self.objective): obj = _objective_decorator(self.objective) params["objective"] = "reg:linear" else: obj = None feval = eval_metric if callable(eval_metric) else None if eval_metric is not None: if callable(eval_metric): eval_metric = None else: params.update({'eval_metric': eval_metric}) self._Booster = train(params, trainDmatrix, self.get_num_boosting_rounds(), evals=evals, early_stopping_rounds=early_stopping_rounds, evals_result=evals_result, obj=obj, feval=feval, verbose_eval=verbose, xgb_model=xgb_model, callbacks=callbacks) if evals_result: for val in evals_result.items(): evals_result_key = list(val[1].keys())[0] evals_result[val[0]][evals_result_key] = val[1][evals_result_key] self.evals_result_ = evals_result if early_stopping_rounds is not None: self.best_score = self._Booster.best_score self.best_iteration = self._Booster.best_iteration self.best_ntree_limit = self._Booster.best_ntree_limit return self
[ "\n Fit the gradient boosting model\n\n Parameters\n ----------\n X : array_like\n Feature matrix\n y : array_like\n Labels\n sample_weight : array_like\n instance weights\n eval_set : list, optional\n A list of (X, y) tuple pairs to use as a validation set for\n early-stopping\n sample_weight_eval_set : list, optional\n A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of\n instance weights on the i-th validation set.\n eval_metric : str, callable, optional\n If a str, should be a built-in evaluation metric to use. See\n doc/parameter.rst. If callable, a custom evaluation metric. The call\n signature is func(y_predicted, y_true) where y_true will be a\n DMatrix object such that you may need to call the get_label\n method. It must return a str, value pair where the str is a name\n for the evaluation and value is the value of the evaluation\n function. This objective is always minimized.\n early_stopping_rounds : int\n Activates early stopping. Validation error needs to decrease at\n least every <early_stopping_rounds> round(s) to continue training.\n Requires at least one item in evals. If there's more than one,\n will use the last. Returns the model from the last iteration\n (not the best one). If early stopping occurs, the model will\n have three additional fields: bst.best_score, bst.best_iteration\n and bst.best_ntree_limit.\n (Use bst.best_ntree_limit to get the correct value if num_parallel_tree\n and/or num_class appears in the parameters)\n verbose : bool\n If `verbose` and an evaluation set is used, writes the evaluation\n metric measured on the validation set to stderr.\n xgb_model : str\n file name of stored xgb model or 'Booster' instance Xgb model to be\n loaded before training (allows training continuation).\n callbacks : list of callback functions\n List of callback functions that are applied at end of each iteration.\n It is possible to use predefined callbacks by using :ref:`callback_api`.\n Example:\n\n .. code-block:: python\n\n [xgb.callback.reset_learning_rate(custom_rates)]\n " ]
Please provide a description of the function:def predict(self, data, output_margin=False, ntree_limit=None, validate_features=True): # pylint: disable=missing-docstring,invalid-name test_dmatrix = DMatrix(data, missing=self.missing, nthread=self.n_jobs) # get ntree_limit to use - if none specified, default to # best_ntree_limit if defined, otherwise 0. if ntree_limit is None: ntree_limit = getattr(self, "best_ntree_limit", 0) return self.get_booster().predict(test_dmatrix, output_margin=output_margin, ntree_limit=ntree_limit, validate_features=validate_features)
[ "\n Predict with `data`.\n\n .. note:: This function is not thread safe.\n\n For each booster object, predict can only be called from one thread.\n If you want to run prediction using multiple thread, call ``xgb.copy()`` to make copies\n of model object and then call ``predict()``.\n\n .. note:: Using ``predict()`` with DART booster\n\n If the booster object is DART type, ``predict()`` will perform dropouts, i.e. only\n some of the trees will be evaluated. This will produce incorrect results if ``data`` is\n not the training data. To obtain correct results on test sets, set ``ntree_limit`` to\n a nonzero value, e.g.\n\n .. code-block:: python\n\n preds = bst.predict(dtest, ntree_limit=num_round)\n\n Parameters\n ----------\n data : DMatrix\n The dmatrix storing the input.\n output_margin : bool\n Whether to output the raw untransformed margin value.\n ntree_limit : int\n Limit number of trees in the prediction; defaults to best_ntree_limit if defined\n (i.e. it has been trained with early stopping), otherwise 0 (use all trees).\n validate_features : bool\n When this is True, validate that the Booster's and data's feature_names are identical.\n Otherwise, it is assumed that the feature_names are the same.\n Returns\n -------\n prediction : numpy array\n " ]
Please provide a description of the function:def apply(self, X, ntree_limit=0): test_dmatrix = DMatrix(X, missing=self.missing, nthread=self.n_jobs) return self.get_booster().predict(test_dmatrix, pred_leaf=True, ntree_limit=ntree_limit)
[ "Return the predicted leaf every tree for each sample.\n\n Parameters\n ----------\n X : array_like, shape=[n_samples, n_features]\n Input features matrix.\n\n ntree_limit : int\n Limit number of trees in the prediction; defaults to 0 (use all trees).\n\n Returns\n -------\n X_leaves : array_like, shape=[n_samples, n_trees]\n For each datapoint x in X and for each tree, return the index of the\n leaf x ends up in. Leaves are numbered within\n ``[0; 2**(self.max_depth+1))``, possibly with gaps in the numbering.\n " ]
Please provide a description of the function:def feature_importances_(self): if getattr(self, 'booster', None) is not None and self.booster != 'gbtree': raise AttributeError('Feature importance is not defined for Booster type {}' .format(self.booster)) b = self.get_booster() score = b.get_score(importance_type=self.importance_type) all_features = [score.get(f, 0.) for f in b.feature_names] all_features = np.array(all_features, dtype=np.float32) return all_features / all_features.sum()
[ "\n Feature importances property\n\n .. note:: Feature importance is defined only for tree boosters\n\n Feature importance is only defined when the decision tree model is chosen as base\n learner (`booster=gbtree`). It is not defined for other base learner types, such\n as linear learners (`booster=gblinear`).\n\n Returns\n -------\n feature_importances_ : array of shape ``[n_features]``\n\n " ]
Please provide a description of the function:def coef_(self): if getattr(self, 'booster', None) is not None and self.booster != 'gblinear': raise AttributeError('Coefficients are not defined for Booster type {}' .format(self.booster)) b = self.get_booster() coef = np.array(json.loads(b.get_dump(dump_format='json')[0])['weight']) # Logic for multiclass classification n_classes = getattr(self, 'n_classes_', None) if n_classes is not None: if n_classes > 2: assert len(coef.shape) == 1 assert coef.shape[0] % n_classes == 0 coef = coef.reshape((n_classes, -1)) return coef
[ "\n Coefficients property\n\n .. note:: Coefficients are defined only for linear learners\n\n Coefficients are only defined when the linear model is chosen as base\n learner (`booster=gblinear`). It is not defined for other base learner types, such\n as tree learners (`booster=gbtree`).\n\n Returns\n -------\n coef_ : array of shape ``[n_features]`` or ``[n_classes, n_features]``\n " ]
Please provide a description of the function:def intercept_(self): if getattr(self, 'booster', None) is not None and self.booster != 'gblinear': raise AttributeError('Intercept (bias) is not defined for Booster type {}' .format(self.booster)) b = self.get_booster() return np.array(json.loads(b.get_dump(dump_format='json')[0])['bias'])
[ "\n Intercept (bias) property\n\n .. note:: Intercept is defined only for linear learners\n\n Intercept (bias) is only defined when the linear model is chosen as base\n learner (`booster=gblinear`). It is not defined for other base learner types, such\n as tree learners (`booster=gbtree`).\n\n Returns\n -------\n intercept_ : array of shape ``(1,)`` or ``[n_classes]``\n " ]
Please provide a description of the function:def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None, early_stopping_rounds=None, verbose=True, xgb_model=None, sample_weight_eval_set=None, callbacks=None): # pylint: disable = attribute-defined-outside-init,arguments-differ evals_result = {} self.classes_ = np.unique(y) self.n_classes_ = len(self.classes_) xgb_options = self.get_xgb_params() if callable(self.objective): obj = _objective_decorator(self.objective) # Use default value. Is it really not used ? xgb_options["objective"] = "binary:logistic" else: obj = None if self.n_classes_ > 2: # Switch to using a multiclass objective in the underlying XGB instance xgb_options["objective"] = "multi:softprob" xgb_options['num_class'] = self.n_classes_ feval = eval_metric if callable(eval_metric) else None if eval_metric is not None: if callable(eval_metric): eval_metric = None else: xgb_options.update({"eval_metric": eval_metric}) self._le = XGBLabelEncoder().fit(y) training_labels = self._le.transform(y) if eval_set is not None: if sample_weight_eval_set is None: sample_weight_eval_set = [None] * len(eval_set) evals = list( DMatrix(eval_set[i][0], label=self._le.transform(eval_set[i][1]), missing=self.missing, weight=sample_weight_eval_set[i], nthread=self.n_jobs) for i in range(len(eval_set)) ) nevals = len(evals) eval_names = ["validation_{}".format(i) for i in range(nevals)] evals = list(zip(evals, eval_names)) else: evals = () self._features_count = X.shape[1] if sample_weight is not None: train_dmatrix = DMatrix(X, label=training_labels, weight=sample_weight, missing=self.missing, nthread=self.n_jobs) else: train_dmatrix = DMatrix(X, label=training_labels, missing=self.missing, nthread=self.n_jobs) self._Booster = train(xgb_options, train_dmatrix, self.get_num_boosting_rounds(), evals=evals, early_stopping_rounds=early_stopping_rounds, evals_result=evals_result, obj=obj, feval=feval, verbose_eval=verbose, xgb_model=xgb_model, callbacks=callbacks) self.objective = xgb_options["objective"] if evals_result: for val in evals_result.items(): evals_result_key = list(val[1].keys())[0] evals_result[val[0]][evals_result_key] = val[1][evals_result_key] self.evals_result_ = evals_result if early_stopping_rounds is not None: self.best_score = self._Booster.best_score self.best_iteration = self._Booster.best_iteration self.best_ntree_limit = self._Booster.best_ntree_limit return self
[ "\n Fit gradient boosting classifier\n\n Parameters\n ----------\n X : array_like\n Feature matrix\n y : array_like\n Labels\n sample_weight : array_like\n Weight for each instance\n eval_set : list, optional\n A list of (X, y) pairs to use as a validation set for\n early-stopping\n sample_weight_eval_set : list, optional\n A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of\n instance weights on the i-th validation set.\n eval_metric : str, callable, optional\n If a str, should be a built-in evaluation metric to use. See\n doc/parameter.rst. If callable, a custom evaluation metric. The call\n signature is func(y_predicted, y_true) where y_true will be a\n DMatrix object such that you may need to call the get_label\n method. It must return a str, value pair where the str is a name\n for the evaluation and value is the value of the evaluation\n function. This objective is always minimized.\n early_stopping_rounds : int, optional\n Activates early stopping. Validation error needs to decrease at\n least every <early_stopping_rounds> round(s) to continue training.\n Requires at least one item in evals. If there's more than one,\n will use the last. If early stopping occurs, the model will have\n three additional fields: bst.best_score, bst.best_iteration and\n bst.best_ntree_limit (bst.best_ntree_limit is the ntree_limit parameter\n default value in predict method if not any other value is specified).\n (Use bst.best_ntree_limit to get the correct value if num_parallel_tree\n and/or num_class appears in the parameters)\n verbose : bool\n If `verbose` and an evaluation set is used, writes the evaluation\n metric measured on the validation set to stderr.\n xgb_model : str\n file name of stored xgb model or 'Booster' instance Xgb model to be\n loaded before training (allows training continuation).\n callbacks : list of callback functions\n List of callback functions that are applied at end of each iteration.\n It is possible to use predefined callbacks by using :ref:`callback_api`.\n Example:\n\n .. code-block:: python\n\n [xgb.callback.reset_learning_rate(custom_rates)]\n " ]
Please provide a description of the function:def predict(self, data, output_margin=False, ntree_limit=None, validate_features=True): test_dmatrix = DMatrix(data, missing=self.missing, nthread=self.n_jobs) if ntree_limit is None: ntree_limit = getattr(self, "best_ntree_limit", 0) class_probs = self.get_booster().predict(test_dmatrix, output_margin=output_margin, ntree_limit=ntree_limit, validate_features=validate_features) if output_margin: # If output_margin is active, simply return the scores return class_probs if len(class_probs.shape) > 1: column_indexes = np.argmax(class_probs, axis=1) else: column_indexes = np.repeat(0, class_probs.shape[0]) column_indexes[class_probs > 0.5] = 1 return self._le.inverse_transform(column_indexes)
[ "\n Predict with `data`.\n\n .. note:: This function is not thread safe.\n\n For each booster object, predict can only be called from one thread.\n If you want to run prediction using multiple thread, call ``xgb.copy()`` to make copies\n of model object and then call ``predict()``.\n\n .. note:: Using ``predict()`` with DART booster\n\n If the booster object is DART type, ``predict()`` will perform dropouts, i.e. only\n some of the trees will be evaluated. This will produce incorrect results if ``data`` is\n not the training data. To obtain correct results on test sets, set ``ntree_limit`` to\n a nonzero value, e.g.\n\n .. code-block:: python\n\n preds = bst.predict(dtest, ntree_limit=num_round)\n\n Parameters\n ----------\n data : DMatrix\n The dmatrix storing the input.\n output_margin : bool\n Whether to output the raw untransformed margin value.\n ntree_limit : int\n Limit number of trees in the prediction; defaults to best_ntree_limit if defined\n (i.e. it has been trained with early stopping), otherwise 0 (use all trees).\n validate_features : bool\n When this is True, validate that the Booster's and data's feature_names are identical.\n Otherwise, it is assumed that the feature_names are the same.\n Returns\n -------\n prediction : numpy array\n " ]
Please provide a description of the function:def predict_proba(self, data, ntree_limit=None, validate_features=True): test_dmatrix = DMatrix(data, missing=self.missing, nthread=self.n_jobs) if ntree_limit is None: ntree_limit = getattr(self, "best_ntree_limit", 0) class_probs = self.get_booster().predict(test_dmatrix, ntree_limit=ntree_limit, validate_features=validate_features) if self.objective == "multi:softprob": return class_probs classone_probs = class_probs classzero_probs = 1.0 - classone_probs return np.vstack((classzero_probs, classone_probs)).transpose()
[ "\n Predict the probability of each `data` example being of a given class.\n\n .. note:: This function is not thread safe\n\n For each booster object, predict can only be called from one thread.\n If you want to run prediction using multiple thread, call ``xgb.copy()`` to make copies\n of model object and then call predict\n\n Parameters\n ----------\n data : DMatrix\n The dmatrix storing the input.\n ntree_limit : int\n Limit number of trees in the prediction; defaults to best_ntree_limit if defined\n (i.e. it has been trained with early stopping), otherwise 0 (use all trees).\n validate_features : bool\n When this is True, validate that the Booster's and data's feature_names are identical.\n Otherwise, it is assumed that the feature_names are the same.\n\n Returns\n -------\n prediction : numpy array\n a numpy array with the probability of each data example being of a given class.\n " ]
Please provide a description of the function:def fit(self, X, y, group, sample_weight=None, eval_set=None, sample_weight_eval_set=None, eval_group=None, eval_metric=None, early_stopping_rounds=None, verbose=False, xgb_model=None, callbacks=None): # pylint: disable = attribute-defined-outside-init,arguments-differ # check if group information is provided if group is None: raise ValueError("group is required for ranking task") if eval_set is not None: if eval_group is None: raise ValueError("eval_group is required if eval_set is not None") if len(eval_group) != len(eval_set): raise ValueError("length of eval_group should match that of eval_set") if any(group is None for group in eval_group): raise ValueError("group is required for all eval datasets for ranking task") def _dmat_init(group, **params): ret = DMatrix(**params) ret.set_group(group) return ret if sample_weight is not None: train_dmatrix = _dmat_init(group, data=X, label=y, weight=sample_weight, missing=self.missing, nthread=self.n_jobs) else: train_dmatrix = _dmat_init(group, data=X, label=y, missing=self.missing, nthread=self.n_jobs) evals_result = {} if eval_set is not None: if sample_weight_eval_set is None: sample_weight_eval_set = [None] * len(eval_set) evals = [_dmat_init(eval_group[i], data=eval_set[i][0], label=eval_set[i][1], missing=self.missing, weight=sample_weight_eval_set[i], nthread=self.n_jobs) for i in range(len(eval_set))] nevals = len(evals) eval_names = ["eval_{}".format(i) for i in range(nevals)] evals = list(zip(evals, eval_names)) else: evals = () params = self.get_xgb_params() feval = eval_metric if callable(eval_metric) else None if eval_metric is not None: if callable(eval_metric): eval_metric = None else: params.update({'eval_metric': eval_metric}) self._Booster = train(params, train_dmatrix, self.n_estimators, early_stopping_rounds=early_stopping_rounds, evals=evals, evals_result=evals_result, feval=feval, verbose_eval=verbose, xgb_model=xgb_model, callbacks=callbacks) self.objective = params["objective"] if evals_result: for val in evals_result.items(): evals_result_key = list(val[1].keys())[0] evals_result[val[0]][evals_result_key] = val[1][evals_result_key] self.evals_result = evals_result if early_stopping_rounds is not None: self.best_score = self._Booster.best_score self.best_iteration = self._Booster.best_iteration self.best_ntree_limit = self._Booster.best_ntree_limit return self
[ "\n Fit the gradient boosting model\n\n Parameters\n ----------\n X : array_like\n Feature matrix\n y : array_like\n Labels\n group : array_like\n group size of training data\n sample_weight : array_like\n group weights\n\n .. note:: Weights are per-group for ranking tasks\n\n In ranking task, one weight is assigned to each group (not each data\n point). This is because we only care about the relative ordering of\n data points within each group, so it doesn't make sense to assign\n weights to individual data points.\n\n eval_set : list, optional\n A list of (X, y) tuple pairs to use as a validation set for\n early-stopping\n sample_weight_eval_set : list, optional\n A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of\n group weights on the i-th validation set.\n\n .. note:: Weights are per-group for ranking tasks\n\n In ranking task, one weight is assigned to each group (not each data\n point). This is because we only care about the relative ordering of\n data points within each group, so it doesn't make sense to assign\n weights to individual data points.\n\n eval_group : list of arrays, optional\n A list that contains the group size corresponds to each\n (X, y) pair in eval_set\n eval_metric : str, callable, optional\n If a str, should be a built-in evaluation metric to use. See\n doc/parameter.rst. If callable, a custom evaluation metric. The call\n signature is func(y_predicted, y_true) where y_true will be a\n DMatrix object such that you may need to call the get_label\n method. It must return a str, value pair where the str is a name\n for the evaluation and value is the value of the evaluation\n function. This objective is always minimized.\n early_stopping_rounds : int\n Activates early stopping. Validation error needs to decrease at\n least every <early_stopping_rounds> round(s) to continue training.\n Requires at least one item in evals. If there's more than one,\n will use the last. Returns the model from the last iteration\n (not the best one). If early stopping occurs, the model will\n have three additional fields: bst.best_score, bst.best_iteration\n and bst.best_ntree_limit.\n (Use bst.best_ntree_limit to get the correct value if num_parallel_tree\n and/or num_class appears in the parameters)\n verbose : bool\n If `verbose` and an evaluation set is used, writes the evaluation\n metric measured on the validation set to stderr.\n xgb_model : str\n file name of stored xgb model or 'Booster' instance Xgb model to be\n loaded before training (allows training continuation).\n callbacks : list of callback functions\n List of callback functions that are applied at end of each iteration.\n It is possible to use predefined callbacks by using :ref:`callback_api`.\n Example:\n\n .. code-block:: python\n\n [xgb.callback.reset_learning_rate(custom_rates)]\n " ]
Please provide a description of the function:def from_pystr_to_cstr(data): if not isinstance(data, list): raise NotImplementedError pointers = (ctypes.c_char_p * len(data))() if PY3: data = [bytes(d, 'utf-8') for d in data] else: data = [d.encode('utf-8') if isinstance(d, unicode) else d # pylint: disable=undefined-variable for d in data] pointers[:] = data return pointers
[ "Convert a list of Python str to C pointer\n\n Parameters\n ----------\n data : list\n list of str\n " ]
Please provide a description of the function:def from_cstr_to_pystr(data, length): if PY3: res = [] for i in range(length.value): try: res.append(str(data[i].decode('ascii'))) except UnicodeDecodeError: res.append(str(data[i].decode('utf-8'))) else: res = [] for i in range(length.value): try: res.append(str(data[i].decode('ascii'))) except UnicodeDecodeError: # pylint: disable=undefined-variable res.append(unicode(data[i].decode('utf-8'))) return res
[ "Revert C pointer to Python str\n\n Parameters\n ----------\n data : ctypes pointer\n pointer to data\n length : ctypes pointer\n pointer to length of data\n " ]
Please provide a description of the function:def _load_lib(): lib_paths = find_lib_path() if not lib_paths: return None try: pathBackup = os.environ['PATH'].split(os.pathsep) except KeyError: pathBackup = [] lib_success = False os_error_list = [] for lib_path in lib_paths: try: # needed when the lib is linked with non-system-available dependencies os.environ['PATH'] = os.pathsep.join(pathBackup + [os.path.dirname(lib_path)]) lib = ctypes.cdll.LoadLibrary(lib_path) lib_success = True except OSError as e: os_error_list.append(str(e)) continue finally: os.environ['PATH'] = os.pathsep.join(pathBackup) if not lib_success: libname = os.path.basename(lib_paths[0]) raise XGBoostError( 'XGBoost Library ({}) could not be loaded.\n'.format(libname) + 'Likely causes:\n' + ' * OpenMP runtime is not installed ' + '(vcomp140.dll or libgomp-1.dll for Windows, ' + 'libgomp.so for UNIX-like OSes)\n' + ' * You are running 32-bit Python on a 64-bit OS\n' + 'Error message(s): {}\n'.format(os_error_list)) lib.XGBGetLastError.restype = ctypes.c_char_p lib.callback = _get_log_callback_func() if lib.XGBRegisterLogCallback(lib.callback) != 0: raise XGBoostError(lib.XGBGetLastError()) return lib
[ "Load xgboost Library." ]
Please provide a description of the function:def ctypes2numpy(cptr, length, dtype): NUMPY_TO_CTYPES_MAPPING = { np.float32: ctypes.c_float, np.uint32: ctypes.c_uint, } if dtype not in NUMPY_TO_CTYPES_MAPPING: raise RuntimeError('Supported types: {}'.format(NUMPY_TO_CTYPES_MAPPING.keys())) ctype = NUMPY_TO_CTYPES_MAPPING[dtype] if not isinstance(cptr, ctypes.POINTER(ctype)): raise RuntimeError('expected {} pointer'.format(ctype)) res = np.zeros(length, dtype=dtype) if not ctypes.memmove(res.ctypes.data, cptr, length * res.strides[0]): raise RuntimeError('memmove failed') return res
[ "Convert a ctypes pointer array to a numpy array.\n " ]
Please provide a description of the function:def ctypes2buffer(cptr, length): if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)): raise RuntimeError('expected char pointer') res = bytearray(length) rptr = (ctypes.c_char * length).from_buffer(res) if not ctypes.memmove(rptr, cptr, length): raise RuntimeError('memmove failed') return res
[ "Convert ctypes pointer to buffer type." ]
Please provide a description of the function:def c_array(ctype, values): if isinstance(values, np.ndarray) and values.dtype.itemsize == ctypes.sizeof(ctype): return (ctype * len(values)).from_buffer_copy(values) return (ctype * len(values))(*values)
[ "Convert a python string to c array." ]
Please provide a description of the function:def _maybe_pandas_data(data, feature_names, feature_types): if not isinstance(data, DataFrame): return data, feature_names, feature_types data_dtypes = data.dtypes if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in data_dtypes): bad_fields = [data.columns[i] for i, dtype in enumerate(data_dtypes) if dtype.name not in PANDAS_DTYPE_MAPPER] msg = raise ValueError(msg + ', '.join(bad_fields)) if feature_names is None: if isinstance(data.columns, MultiIndex): feature_names = [ ' '.join([str(x) for x in i]) for i in data.columns ] else: feature_names = data.columns.format() if feature_types is None: feature_types = [PANDAS_DTYPE_MAPPER[dtype.name] for dtype in data_dtypes] data = data.values.astype('float') return data, feature_names, feature_types
[ " Extract internal data from pd.DataFrame for DMatrix data ", "DataFrame.dtypes for data must be int, float or bool.\n Did not expect the data types in fields " ]
Please provide a description of the function:def _maybe_dt_data(data, feature_names, feature_types): if not isinstance(data, DataTable): return data, feature_names, feature_types data_types_names = tuple(lt.name for lt in data.ltypes) bad_fields = [data.names[i] for i, type_name in enumerate(data_types_names) if type_name not in DT_TYPE_MAPPER] if bad_fields: msg = raise ValueError(msg + ', '.join(bad_fields)) if feature_names is None: feature_names = data.names # always return stypes for dt ingestion if feature_types is not None: raise ValueError('DataTable has own feature types, cannot pass them in') feature_types = np.vectorize(DT_TYPE_MAPPER2.get)(data_types_names) return data, feature_names, feature_types
[ "\n Validate feature names and types if data table\n ", "DataFrame.types for data must be int, float or bool.\n Did not expect the data types in fields " ]
Please provide a description of the function:def _maybe_dt_array(array): if not isinstance(array, DataTable) or array is None: return array if array.shape[1] > 1: raise ValueError('DataTable for label or weight cannot have multiple columns') # below requires new dt version # extract first column array = array.to_numpy()[:, 0].astype('float') return array
[ " Extract numpy array from single column data table " ]
Please provide a description of the function:def _init_from_csr(self, csr): if len(csr.indices) != len(csr.data): raise ValueError('length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data))) handle = ctypes.c_void_p() _check_call(_LIB.XGDMatrixCreateFromCSREx(c_array(ctypes.c_size_t, csr.indptr), c_array(ctypes.c_uint, csr.indices), c_array(ctypes.c_float, csr.data), ctypes.c_size_t(len(csr.indptr)), ctypes.c_size_t(len(csr.data)), ctypes.c_size_t(csr.shape[1]), ctypes.byref(handle))) self.handle = handle
[ "\n Initialize data from a CSR matrix.\n " ]
Please provide a description of the function:def _init_from_csc(self, csc): if len(csc.indices) != len(csc.data): raise ValueError('length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data))) handle = ctypes.c_void_p() _check_call(_LIB.XGDMatrixCreateFromCSCEx(c_array(ctypes.c_size_t, csc.indptr), c_array(ctypes.c_uint, csc.indices), c_array(ctypes.c_float, csc.data), ctypes.c_size_t(len(csc.indptr)), ctypes.c_size_t(len(csc.data)), ctypes.c_size_t(csc.shape[0]), ctypes.byref(handle))) self.handle = handle
[ "\n Initialize data from a CSC matrix.\n " ]
Please provide a description of the function:def _init_from_npy2d(self, mat, missing, nthread): if len(mat.shape) != 2: raise ValueError('Input numpy.ndarray must be 2 dimensional') # flatten the array by rows and ensure it is float32. # we try to avoid data copies if possible (reshape returns a view when possible # and we explicitly tell np.array to try and avoid copying) data = np.array(mat.reshape(mat.size), copy=False, dtype=np.float32) handle = ctypes.c_void_p() missing = missing if missing is not None else np.nan if nthread is None: _check_call(_LIB.XGDMatrixCreateFromMat( data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), c_bst_ulong(mat.shape[0]), c_bst_ulong(mat.shape[1]), ctypes.c_float(missing), ctypes.byref(handle))) else: _check_call(_LIB.XGDMatrixCreateFromMat_omp( data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), c_bst_ulong(mat.shape[0]), c_bst_ulong(mat.shape[1]), ctypes.c_float(missing), ctypes.byref(handle), nthread)) self.handle = handle
[ "\n Initialize data from a 2-D numpy matrix.\n\n If ``mat`` does not have ``order='C'`` (aka row-major) or is not contiguous,\n a temporary copy will be made.\n\n If ``mat`` does not have ``dtype=numpy.float32``, a temporary copy will be made.\n\n So there could be as many as two temporary data copies; be mindful of input layout\n and type if memory use is a concern.\n " ]
Please provide a description of the function:def _init_from_dt(self, data, nthread): ptrs = (ctypes.c_void_p * data.ncols)() if hasattr(data, "internal") and hasattr(data.internal, "column"): # datatable>0.8.0 for icol in range(data.ncols): col = data.internal.column(icol) ptr = col.data_pointer ptrs[icol] = ctypes.c_void_p(ptr) else: # datatable<=0.8.0 from datatable.internal import frame_column_data_r # pylint: disable=no-name-in-module,import-error for icol in range(data.ncols): ptrs[icol] = frame_column_data_r(data, icol) # always return stypes for dt ingestion feature_type_strings = (ctypes.c_char_p * data.ncols)() for icol in range(data.ncols): feature_type_strings[icol] = ctypes.c_char_p(data.stypes[icol].name.encode('utf-8')) handle = ctypes.c_void_p() _check_call(_LIB.XGDMatrixCreateFromDT( ptrs, feature_type_strings, c_bst_ulong(data.shape[0]), c_bst_ulong(data.shape[1]), ctypes.byref(handle), nthread)) self.handle = handle
[ "\n Initialize data from a datatable Frame.\n " ]
Please provide a description of the function:def set_float_info(self, field, data): if getattr(data, 'base', None) is not None and \ data.base is not None and isinstance(data, np.ndarray) \ and isinstance(data.base, np.ndarray) and (not data.flags.c_contiguous): self.set_float_info_npy2d(field, data) return c_data = c_array(ctypes.c_float, data) _check_call(_LIB.XGDMatrixSetFloatInfo(self.handle, c_str(field), c_data, c_bst_ulong(len(data))))
[ "Set float type property into the DMatrix.\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n data: numpy array\n The array of data to be set\n " ]
Please provide a description of the function:def set_float_info_npy2d(self, field, data): if getattr(data, 'base', None) is not None and \ data.base is not None and isinstance(data, np.ndarray) \ and isinstance(data.base, np.ndarray) and (not data.flags.c_contiguous): warnings.warn("Use subset (sliced data) of np.ndarray is not recommended " + "because it will generate extra copies and increase memory consumption") data = np.array(data, copy=True, dtype=np.float32) else: data = np.array(data, copy=False, dtype=np.float32) c_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)) _check_call(_LIB.XGDMatrixSetFloatInfo(self.handle, c_str(field), c_data, c_bst_ulong(len(data))))
[ "Set float type property into the DMatrix\n for numpy 2d array input\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n data: numpy array\n The array of data to be set\n " ]
Please provide a description of the function:def set_uint_info(self, field, data): if getattr(data, 'base', None) is not None and \ data.base is not None and isinstance(data, np.ndarray) \ and isinstance(data.base, np.ndarray) and (not data.flags.c_contiguous): warnings.warn("Use subset (sliced data) of np.ndarray is not recommended " + "because it will generate extra copies and increase memory consumption") data = np.array(data, copy=True, dtype=ctypes.c_uint) else: data = np.array(data, copy=False, dtype=ctypes.c_uint) _check_call(_LIB.XGDMatrixSetUIntInfo(self.handle, c_str(field), c_array(ctypes.c_uint, data), c_bst_ulong(len(data))))
[ "Set uint type property into the DMatrix.\n\n Parameters\n ----------\n field: str\n The field name of the information\n\n data: numpy array\n The array of data to be set\n " ]
Please provide a description of the function:def save_binary(self, fname, silent=True): _check_call(_LIB.XGDMatrixSaveBinary(self.handle, c_str(fname), ctypes.c_int(silent)))
[ "Save DMatrix to an XGBoost buffer. Saved binary can be later loaded\n by providing the path to :py:func:`xgboost.DMatrix` as input.\n\n Parameters\n ----------\n fname : string\n Name of the output buffer file.\n silent : bool (optional; default: True)\n If set, the output is suppressed.\n " ]
Please provide a description of the function:def set_group(self, group): _check_call(_LIB.XGDMatrixSetGroup(self.handle, c_array(ctypes.c_uint, group), c_bst_ulong(len(group))))
[ "Set group size of DMatrix (used for ranking).\n\n Parameters\n ----------\n group : array like\n Group size of each group\n " ]
Please provide a description of the function:def feature_names(self): if self._feature_names is None: self._feature_names = ['f{0}'.format(i) for i in range(self.num_col())] return self._feature_names
[ "Get feature names (column labels).\n\n Returns\n -------\n feature_names : list or None\n " ]
Please provide a description of the function:def feature_names(self, feature_names): if feature_names is not None: # validate feature name try: if not isinstance(feature_names, str): feature_names = [n for n in iter(feature_names)] else: feature_names = [feature_names] except TypeError: feature_names = [feature_names] if len(feature_names) != len(set(feature_names)): raise ValueError('feature_names must be unique') if len(feature_names) != self.num_col(): msg = 'feature_names must have the same length as data' raise ValueError(msg) # prohibit to use symbols may affect to parse. e.g. []< if not all(isinstance(f, STRING_TYPES) and not any(x in f for x in set(('[', ']', '<'))) for f in feature_names): raise ValueError('feature_names may not contain [, ] or <') else: # reset feature_types also self.feature_types = None self._feature_names = feature_names
[ "Set feature names (column labels).\n\n Parameters\n ----------\n feature_names : list or None\n Labels for features. None will reset existing feature names\n " ]
Please provide a description of the function:def feature_types(self, feature_types): if feature_types is not None: if self._feature_names is None: msg = 'Unable to set feature types before setting names' raise ValueError(msg) if isinstance(feature_types, STRING_TYPES): # single string will be applied to all columns feature_types = [feature_types] * self.num_col() try: if not isinstance(feature_types, str): feature_types = [n for n in iter(feature_types)] else: feature_types = [feature_types] except TypeError: feature_types = [feature_types] if len(feature_types) != self.num_col(): msg = 'feature_types must have the same length as data' raise ValueError(msg) valid = ('int', 'float', 'i', 'q') if not all(isinstance(f, STRING_TYPES) and f in valid for f in feature_types): raise ValueError('All feature_names must be {int, float, i, q}') self._feature_types = feature_types
[ "Set feature types (column types).\n\n This is for displaying the results and unrelated\n to the learning process.\n\n Parameters\n ----------\n feature_types : list or None\n Labels for features. None will reset existing feature names\n " ]
Please provide a description of the function:def load_rabit_checkpoint(self): version = ctypes.c_int() _check_call(_LIB.XGBoosterLoadRabitCheckpoint( self.handle, ctypes.byref(version))) return version.value
[ "Initialize the model by load from rabit checkpoint.\n\n Returns\n -------\n version: integer\n The version number of the model.\n " ]