response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Pure implementation of interpolation search algorithm in Python Be careful collection must be sorted, otherwise result will be unpredictable :param sorted_collection: some sorted collection with comparable items :param item: item value to search :return: index of found item or None if item is not found
def interpolation_search(sorted_collection, item): """Pure implementation of interpolation search algorithm in Python Be careful collection must be sorted, otherwise result will be unpredictable :param sorted_collection: some sorted collection with comparable items :param item: item value to search :return: index of found item or None if item is not found """ left = 0 right = len(sorted_collection) - 1 while left <= right: point = left + ((item - sorted_collection[left]) * (right - left)) // (sorted_collection[right] - sorted_collection[left]) #out of range check if point<0 or point>=len(sorted_collection): return None current_item = sorted_collection[point] if current_item == item: return point else: if item < current_item: right = point - 1 else: left = point + 1 return None
Pure implementation of interpolation search algorithm in Python by recursion Be careful collection must be sorted, otherwise result will be unpredictable First recursion should be started with left=0 and right=(len(sorted_collection)-1) :param sorted_collection: some sorted collection with comparable items :param item: item value to search :return: index of found item or None if item is not found
def interpolation_search_by_recursion(sorted_collection, item, left, right): """Pure implementation of interpolation search algorithm in Python by recursion Be careful collection must be sorted, otherwise result will be unpredictable First recursion should be started with left=0 and right=(len(sorted_collection)-1) :param sorted_collection: some sorted collection with comparable items :param item: item value to search :return: index of found item or None if item is not found """ point = left + ((item - sorted_collection[left]) * (right - left)) // (sorted_collection[right] - sorted_collection[left]) #out of range check if point<0 or point>=len(sorted_collection): return None if sorted_collection[point] == item: return point elif sorted_collection[point] > item: return interpolation_search_by_recursion(sorted_collection, item, left, point-1) else: return interpolation_search_by_recursion(sorted_collection, item, point+1, right)
Check if collection is sorted, if not - raises :py:class:`ValueError` :param collection: collection :return: True if collection is sorted :raise: :py:class:`ValueError` if collection is not sorted Examples: >>> __assert_sorted([0, 1, 2, 4]) True >>> __assert_sorted([10, -1, 5]) Traceback (most recent call last): ... ValueError: Collection must be sorted
def __assert_sorted(collection): """Check if collection is sorted, if not - raises :py:class:`ValueError` :param collection: collection :return: True if collection is sorted :raise: :py:class:`ValueError` if collection is not sorted Examples: >>> __assert_sorted([0, 1, 2, 4]) True >>> __assert_sorted([10, -1, 5]) Traceback (most recent call last): ... ValueError: Collection must be sorted """ if collection != sorted(collection): raise ValueError('Collection must be sorted') return True
Pure implementation of linear search algorithm in Python :param sequence: some sorted collection with comparable items :param target: item value to search :return: index of found item or None if item is not found Examples: >>> linear_search([0, 5, 7, 10, 15], 0) 0 >>> linear_search([0, 5, 7, 10, 15], 15) 4 >>> linear_search([0, 5, 7, 10, 15], 5) 1 >>> linear_search([0, 5, 7, 10, 15], 6)
def linear_search(sequence, target): """Pure implementation of linear search algorithm in Python :param sequence: some sorted collection with comparable items :param target: item value to search :return: index of found item or None if item is not found Examples: >>> linear_search([0, 5, 7, 10, 15], 0) 0 >>> linear_search([0, 5, 7, 10, 15], 15) 4 >>> linear_search([0, 5, 7, 10, 15], 5) 1 >>> linear_search([0, 5, 7, 10, 15], 6) """ for index, item in enumerate(sequence): if item == target: return index return None
Three way partition the data into smaller, equal and greater lists, in relationship to the pivot :param data: The data to be sorted (a list) :param pivot: The value to partition the data on :return: Three list: smaller, equal and greater
def _partition(data, pivot): """ Three way partition the data into smaller, equal and greater lists, in relationship to the pivot :param data: The data to be sorted (a list) :param pivot: The value to partition the data on :return: Three list: smaller, equal and greater """ less, equal, greater = [], [], [] for element in data: if element.address < pivot.address: less.append(element) elif element.address > pivot.address: greater.append(element) else: equal.append(element) return less, equal, greater
Pure implementation of sentinel linear search algorithm in Python :param sequence: some sequence with comparable items :param target: item value to search :return: index of found item or None if item is not found Examples: >>> sentinel_linear_search([0, 5, 7, 10, 15], 0) 0 >>> sentinel_linear_search([0, 5, 7, 10, 15], 15) 4 >>> sentinel_linear_search([0, 5, 7, 10, 15], 5) 1 >>> sentinel_linear_search([0, 5, 7, 10, 15], 6)
def sentinel_linear_search(sequence, target): """Pure implementation of sentinel linear search algorithm in Python :param sequence: some sequence with comparable items :param target: item value to search :return: index of found item or None if item is not found Examples: >>> sentinel_linear_search([0, 5, 7, 10, 15], 0) 0 >>> sentinel_linear_search([0, 5, 7, 10, 15], 15) 4 >>> sentinel_linear_search([0, 5, 7, 10, 15], 5) 1 >>> sentinel_linear_search([0, 5, 7, 10, 15], 6) """ sequence.append(target) index = 0 while sequence[index] != target: index += 1 sequence.pop() if index == len(sequence): return None return index
Pure implementation of generating a dictionary of neighbors and the cost with each neighbor, given a path file that includes a graph. :param path: The path to the .txt file that includes the graph (e.g.tabudata2.txt) :return dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. Example of dict_of_neighbours: >>> dict_of_neighbours[a] [[b,20],[c,18],[d,22],[e,26]] This indicates the neighbors of node (city) 'a', which has neighbor the node 'b' with distance 20, the node 'c' with distance 18, the node 'd' with distance 22 and the node 'e' with distance 26.
def generate_neighbours(path): """ Pure implementation of generating a dictionary of neighbors and the cost with each neighbor, given a path file that includes a graph. :param path: The path to the .txt file that includes the graph (e.g.tabudata2.txt) :return dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. Example of dict_of_neighbours: >>> dict_of_neighbours[a] [[b,20],[c,18],[d,22],[e,26]] This indicates the neighbors of node (city) 'a', which has neighbor the node 'b' with distance 20, the node 'c' with distance 18, the node 'd' with distance 22 and the node 'e' with distance 26. """ dict_of_neighbours = {} with open(path) as f: for line in f: if line.split()[0] not in dict_of_neighbours: _list = list() _list.append([line.split()[1], line.split()[2]]) dict_of_neighbours[line.split()[0]] = _list else: dict_of_neighbours[line.split()[0]].append([line.split()[1], line.split()[2]]) if line.split()[1] not in dict_of_neighbours: _list = list() _list.append([line.split()[0], line.split()[2]]) dict_of_neighbours[line.split()[1]] = _list else: dict_of_neighbours[line.split()[1]].append([line.split()[0], line.split()[2]]) return dict_of_neighbours
Pure implementation of generating the first solution for the Tabu search to start, with the redundant resolution strategy. That means that we start from the starting node (e.g. node 'a'), then we go to the city nearest (lowest distance) to this node (let's assume is node 'c'), then we go to the nearest city of the node 'c', etc till we have visited all cities and return to the starting node. :param path: The path to the .txt file that includes the graph (e.g.tabudata2.txt) :param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. :return first_solution: The solution for the first iteration of Tabu search using the redundant resolution strategy in a list. :return distance_of_first_solution: The total distance that Travelling Salesman will travel, if he follows the path in first_solution.
def generate_first_solution(path, dict_of_neighbours): """ Pure implementation of generating the first solution for the Tabu search to start, with the redundant resolution strategy. That means that we start from the starting node (e.g. node 'a'), then we go to the city nearest (lowest distance) to this node (let's assume is node 'c'), then we go to the nearest city of the node 'c', etc till we have visited all cities and return to the starting node. :param path: The path to the .txt file that includes the graph (e.g.tabudata2.txt) :param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. :return first_solution: The solution for the first iteration of Tabu search using the redundant resolution strategy in a list. :return distance_of_first_solution: The total distance that Travelling Salesman will travel, if he follows the path in first_solution. """ with open(path) as f: start_node = f.read(1) end_node = start_node first_solution = [] visiting = start_node distance_of_first_solution = 0 while visiting not in first_solution: minim = 10000 for k in dict_of_neighbours[visiting]: if int(k[1]) < int(minim) and k[0] not in first_solution: minim = k[1] best_node = k[0] first_solution.append(visiting) distance_of_first_solution = distance_of_first_solution + int(minim) visiting = best_node first_solution.append(end_node) position = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 distance_of_first_solution = distance_of_first_solution + int( dict_of_neighbours[first_solution[-2]][position][1]) - 10000 return first_solution, distance_of_first_solution
Pure implementation of generating the neighborhood (sorted by total distance of each solution from lowest to highest) of a solution with 1-1 exchange method, that means we exchange each node in a solution with each other node and generating a number of solution named neighborhood. :param solution: The solution in which we want to find the neighborhood. :param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. :return neighborhood_of_solution: A list that includes the solutions and the total distance of each solution (in form of list) that are produced with 1-1 exchange from the solution that the method took as an input Example: >>> find_neighborhood(['a','c','b','d','e','a']) [['a','e','b','d','c','a',90], [['a','c','d','b','e','a',90],['a','d','b','c','e','a',93], ['a','c','b','e','d','a',102], ['a','c','e','d','b','a',113], ['a','b','c','d','e','a',93]]
def find_neighborhood(solution, dict_of_neighbours): """ Pure implementation of generating the neighborhood (sorted by total distance of each solution from lowest to highest) of a solution with 1-1 exchange method, that means we exchange each node in a solution with each other node and generating a number of solution named neighborhood. :param solution: The solution in which we want to find the neighborhood. :param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. :return neighborhood_of_solution: A list that includes the solutions and the total distance of each solution (in form of list) that are produced with 1-1 exchange from the solution that the method took as an input Example: >>> find_neighborhood(['a','c','b','d','e','a']) [['a','e','b','d','c','a',90], [['a','c','d','b','e','a',90],['a','d','b','c','e','a',93], ['a','c','b','e','d','a',102], ['a','c','e','d','b','a',113], ['a','b','c','d','e','a',93]] """ neighborhood_of_solution = [] for n in solution[1:-1]: idx1 = solution.index(n) for kn in solution[1:-1]: idx2 = solution.index(kn) if n == kn: continue _tmp = copy.deepcopy(solution) _tmp[idx1] = kn _tmp[idx2] = n distance = 0 for k in _tmp[:-1]: next_node = _tmp[_tmp.index(k) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: distance = distance + int(i[1]) _tmp.append(distance) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp) indexOfLastItemInTheList = len(neighborhood_of_solution[0]) - 1 neighborhood_of_solution.sort(key=lambda x: x[indexOfLastItemInTheList]) return neighborhood_of_solution
Pure implementation of Tabu search algorithm for a Travelling Salesman Problem in Python. :param first_solution: The solution for the first iteration of Tabu search using the redundant resolution strategy in a list. :param distance_of_first_solution: The total distance that Travelling Salesman will travel, if he follows the path in first_solution. :param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. :param iters: The number of iterations that Tabu search will execute. :param size: The size of Tabu List. :return best_solution_ever: The solution with the lowest distance that occured during the execution of Tabu search. :return best_cost: The total distance that Travelling Salesman will travel, if he follows the path in best_solution ever.
def tabu_search(first_solution, distance_of_first_solution, dict_of_neighbours, iters, size): """ Pure implementation of Tabu search algorithm for a Travelling Salesman Problem in Python. :param first_solution: The solution for the first iteration of Tabu search using the redundant resolution strategy in a list. :param distance_of_first_solution: The total distance that Travelling Salesman will travel, if he follows the path in first_solution. :param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. :param iters: The number of iterations that Tabu search will execute. :param size: The size of Tabu List. :return best_solution_ever: The solution with the lowest distance that occured during the execution of Tabu search. :return best_cost: The total distance that Travelling Salesman will travel, if he follows the path in best_solution ever. """ count = 1 solution = first_solution tabu_list = list() best_cost = distance_of_first_solution best_solution_ever = solution while count <= iters: neighborhood = find_neighborhood(solution, dict_of_neighbours) index_of_best_solution = 0 best_solution = neighborhood[index_of_best_solution] best_cost_index = len(best_solution) - 1 found = False while found is False: i = 0 while i < len(best_solution): if best_solution[i] != solution[i]: first_exchange_node = best_solution[i] second_exchange_node = solution[i] break i = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [second_exchange_node, first_exchange_node] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node]) found = True solution = best_solution[:-1] cost = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: best_cost = cost best_solution_ever = solution else: index_of_best_solution = index_of_best_solution + 1 best_solution = neighborhood[index_of_best_solution] if len(tabu_list) >= size: tabu_list.pop(0) count = count + 1 return best_solution_ever, best_cost
Pure implementation of the bogosort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> bogosort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> bogosort([]) [] >>> bogosort([-2, -5, -45]) [-45, -5, -2]
def bogosort(collection): """Pure implementation of the bogosort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> bogosort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> bogosort([]) [] >>> bogosort([-2, -5, -45]) [-45, -5, -2] """ def isSorted(collection): if len(collection) < 2: return True for i in range(len(collection) - 1): if collection[i] > collection[i + 1]: return False return True while not isSorted(collection): random.shuffle(collection) return collection
Pure implementation of bubble sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> bubble_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> bubble_sort([]) [] >>> bubble_sort([-2, -5, -45]) [-45, -5, -2] >>> bubble_sort([-23,0,6,-4,34]) [-23,-4,0,6,34]
def bubble_sort(collection): """Pure implementation of bubble sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> bubble_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> bubble_sort([]) [] >>> bubble_sort([-2, -5, -45]) [-45, -5, -2] >>> bubble_sort([-23,0,6,-4,34]) [-23,-4,0,6,34] """ length = len(collection) for i in range(length-1): swapped = False for j in range(length-1-i): if collection[j] > collection[j+1]: swapped = True collection[j], collection[j+1] = collection[j+1], collection[j] if not swapped: break # Stop iteration if the collection is sorted. return collection
Pure implementation of the cocktail shaker sort algorithm in Python.
def cocktail_shaker_sort(unsorted): """ Pure implementation of the cocktail shaker sort algorithm in Python. """ for i in range(len(unsorted)-1, 0, -1): swapped = False for j in range(i, 0, -1): if unsorted[j] < unsorted[j-1]: unsorted[j], unsorted[j-1] = unsorted[j-1], unsorted[j] swapped = True for j in range(i): if unsorted[j] > unsorted[j+1]: unsorted[j], unsorted[j+1] = unsorted[j+1], unsorted[j] swapped = True if not swapped: return unsorted
Pure implementation of comb sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> comb_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> comb_sort([]) [] >>> comb_sort([-2, -5, -45]) [-45, -5, -2]
def comb_sort(data): """Pure implementation of comb sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> comb_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> comb_sort([]) [] >>> comb_sort([-2, -5, -45]) [-45, -5, -2] """ shrink_factor = 1.3 gap = len(data) swapped = True i = 0 while gap > 1 or swapped: # Update the gap value for a next comb gap = int(float(gap) / shrink_factor) swapped = False i = 0 while gap + i < len(data): if data[i] > data[i+gap]: # Swap values data[i], data[i+gap] = data[i+gap], data[i] swapped = True i += 1 return data
Pure implementation of counting sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> counting_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> counting_sort([]) [] >>> counting_sort([-2, -5, -45]) [-45, -5, -2]
def counting_sort(collection): """Pure implementation of counting sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> counting_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> counting_sort([]) [] >>> counting_sort([-2, -5, -45]) [-45, -5, -2] """ # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection coll_len = len(collection) coll_max = max(collection) coll_min = min(collection) # create the counting array counting_arr_length = coll_max + 1 - coll_min counting_arr = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1, counting_arr_length): counting_arr[i] = counting_arr[i] + counting_arr[i-1] # create the output collection ordered = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0, coll_len)): ordered[counting_arr[collection[i] - coll_min]-1] = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered
Pure implementation of the gnome sort algorithm in Python.
def gnome_sort(unsorted): """ Pure implementation of the gnome sort algorithm in Python. """ if len(unsorted) <= 1: return unsorted i = 1 while i < len(unsorted): if unsorted[i-1] <= unsorted[i]: i += 1 else: unsorted[i-1], unsorted[i] = unsorted[i], unsorted[i-1] i -= 1 if (i == 0): i = 1
Pure implementation of the heap sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> heap_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> heap_sort([]) [] >>> heap_sort([-2, -5, -45]) [-45, -5, -2]
def heap_sort(unsorted): ''' Pure implementation of the heap sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> heap_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> heap_sort([]) [] >>> heap_sort([-2, -5, -45]) [-45, -5, -2] ''' n = len(unsorted) for i in range(n // 2 - 1, -1, -1): heapify(unsorted, i, n) for i in range(n - 1, 0, -1): unsorted[0], unsorted[i] = unsorted[i], unsorted[0] heapify(unsorted, 0, i) return unsorted
Pure implementation of the insertion sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> insertion_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> insertion_sort([]) [] >>> insertion_sort([-2, -5, -45]) [-45, -5, -2]
def insertion_sort(collection): """Pure implementation of the insertion sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> insertion_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> insertion_sort([]) [] >>> insertion_sort([-2, -5, -45]) [-45, -5, -2] """ for index in range(1, len(collection)): while index > 0 and collection[index - 1] > collection[index]: collection[index], collection[index - 1] = collection[index - 1], collection[index] index -= 1 return collection
Pure implementation of the merge sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> merge_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> merge_sort([]) [] >>> merge_sort([-2, -5, -45]) [-45, -5, -2]
def merge_sort(collection): """Pure implementation of the merge sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> merge_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> merge_sort([]) [] >>> merge_sort([-2, -5, -45]) [-45, -5, -2] """ length = len(collection) if length > 1: midpoint = length // 2 left_half = merge_sort(collection[:midpoint]) right_half = merge_sort(collection[midpoint:]) i = 0 j = 0 k = 0 left_length = len(left_half) right_length = len(right_half) while i < left_length and j < right_length: if left_half[i] < right_half[j]: collection[k] = left_half[i] i += 1 else: collection[k] = right_half[j] j += 1 k += 1 while i < left_length: collection[k] = left_half[i] i += 1 k += 1 while j < right_length: collection[k] = right_half[j] j += 1 k += 1 return collection
Pure implementation of quick sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> quick_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> quick_sort([]) [] >>> quick_sort([-2, -5, -45]) [-45, -5, -2]
def quick_sort(ARRAY): """Pure implementation of quick sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> quick_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> quick_sort([]) [] >>> quick_sort([-2, -5, -45]) [-45, -5, -2] """ ARRAY_LENGTH = len(ARRAY) if( ARRAY_LENGTH <= 1): return ARRAY else: PIVOT = ARRAY[0] GREATER = [ element for element in ARRAY[1:] if element > PIVOT ] LESSER = [ element for element in ARRAY[1:] if element <= PIVOT ] return quick_sort(LESSER) + [PIVOT] + quick_sort(GREATER)
Pure implementation of the selection sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> selection_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> selection_sort([]) [] >>> selection_sort([-2, -5, -45]) [-45, -5, -2]
def selection_sort(collection): """Pure implementation of the selection sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> selection_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> selection_sort([]) [] >>> selection_sort([-2, -5, -45]) [-45, -5, -2] """ length = len(collection) for i in range(length - 1): least = i for k in range(i + 1, length): if collection[k] < collection[least]: least = k collection[least], collection[i] = ( collection[i], collection[least] ) return collection
Pure implementation of shell sort algorithm in Python :param collection: Some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending >>> shell_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> shell_sort([]) [] >>> shell_sort([-2, -5, -45]) [-45, -5, -2]
def shell_sort(collection): """Pure implementation of shell sort algorithm in Python :param collection: Some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending >>> shell_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> shell_sort([]) [] >>> shell_sort([-2, -5, -45]) [-45, -5, -2] """ # Marcin Ciura's gap sequence gaps = [701, 301, 132, 57, 23, 10, 4, 1] for gap in gaps: i = gap while i < len(collection): temp = collection[i] j = i while j >= gap and collection[j - gap] > temp: collection[j] = collection[j - gap] j -= gap collection[j] = temp i += 1 return collection
Perform topolical sort on a directed acyclic graph.
def topological_sort(start, visited, sort): """Perform topolical sort on a directed acyclic graph.""" current = start # add current to visited visited.append(current) neighbors = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: sort = topological_sort(neighbor, visited, sort) # if all neighbors visited add current to sort sort.append(current) # if all vertices haven't been visited select a new one to visit if len(visited) != len(vertices): for vertice in vertices: if vertice not in visited: sort = topological_sort(vertice, visited, sort) # return sort return sort
The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text with complexity O(n + m) 1) Preprocess pattern to identify any suffixes that are identical to prefixes This tells us where to continue from if we get a mismatch between a character in our pattern and the text. 2) Step through the text one character at a time and compare it to a character in the pattern updating our location within the pattern if necessary
def kmp(pattern, text): """ The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text with complexity O(n + m) 1) Preprocess pattern to identify any suffixes that are identical to prefixes This tells us where to continue from if we get a mismatch between a character in our pattern and the text. 2) Step through the text one character at a time and compare it to a character in the pattern updating our location within the pattern if necessary """ # 1) Construct the failure array failure = get_failure_array(pattern) # 2) Step through text searching for pattern i, j = 0, 0 # index into text, pattern while i < len(text): if pattern[j] == text[i]: if j == (len(pattern) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: j = failure[j - 1] continue i += 1 return False
Calculates the new index we should go to if we fail a comparison :param pattern: :return:
def get_failure_array(pattern): """ Calculates the new index we should go to if we fail a comparison :param pattern: :return: """ failure = [0] i = 0 j = 1 while j < len(pattern): if pattern[i] == pattern[j]: i += 1 elif i > 0: i = failure[i-1] continue j += 1 failure.append(i) return failure
Implementation of the levenshtein distance in Python. :param first_word: the first word to measure the difference. :param second_word: the second word to measure the difference. :return: the levenshtein distance between the two words. Examples: >>> levenshtein_distance("planet", "planetary") 3 >>> levenshtein_distance("", "test") 4 >>> levenshtein_distance("book", "back") 2 >>> levenshtein_distance("book", "book") 0 >>> levenshtein_distance("test", "") 4 >>> levenshtein_distance("", "") 0 >>> levenshtein_distance("orchestration", "container") 10
def levenshtein_distance(first_word, second_word): """Implementation of the levenshtein distance in Python. :param first_word: the first word to measure the difference. :param second_word: the second word to measure the difference. :return: the levenshtein distance between the two words. Examples: >>> levenshtein_distance("planet", "planetary") 3 >>> levenshtein_distance("", "test") 4 >>> levenshtein_distance("book", "back") 2 >>> levenshtein_distance("book", "book") 0 >>> levenshtein_distance("test", "") 4 >>> levenshtein_distance("", "") 0 >>> levenshtein_distance("orchestration", "container") 10 """ # The longer word should come first if len(first_word) < len(second_word): return levenshtein_distance(second_word, first_word) if len(second_word) == 0: return len(first_word) previous_row = range(len(second_word) + 1) for i, c1 in enumerate(first_word): current_row = [i + 1] for j, c2 in enumerate(second_word): # Calculate insertions, deletions and substitutions insertions = previous_row[j + 1] + 1 deletions = current_row[j] + 1 substitutions = previous_row[j] + (c1 != c2) # Get the minimum to append to the current row current_row.append(min(insertions, deletions, substitutions)) # Store the previous row previous_row = current_row # Returns the last element (distance) return previous_row[-1]
Manacher’s algorithm which finds Longest Palindromic Substring in linear time. 1. first this conver input_string("xyx") into new_string("x|y|x") where odd positions are actual input characters. 2. for each character in new_string it find corresponding length and store, a. max_length b. max_length's center 3. return output_string from center - max_length to center + max_length and remove all "|"
def palindromic_string( input_string ): """ Manacher’s algorithm which finds Longest Palindromic Substring in linear time. 1. first this conver input_string("xyx") into new_string("x|y|x") where odd positions are actual input characters. 2. for each character in new_string it find corresponding length and store, a. max_length b. max_length's center 3. return output_string from center - max_length to center + max_length and remove all "|" """ max_length = 0 # if input_string is "aba" than new_input_string become "a|b|a" new_input_string = "" output_string = "" # append each character + "|" in new_string for range(0, length-1) for i in input_string[:len(input_string)-1] : new_input_string += i + "|" #append last character new_input_string += input_string[-1] # for each character in new_string find corresponding palindromic string for i in range(len(new_input_string)) : # get palindromic length from ith position length = palindromic_length(i, 1, new_input_string) # update max_length and start position if max_length < length : max_length = length start = i #create that string for i in new_input_string[start-max_length:start+max_length+1] : if i != "|": output_string += i return output_string
The Rabin-Karp Algorithm for finding a pattern within a piece of text with complexity O(nm), most efficient when it is used with multiple patterns as it is able to check if any of a set of patterns match a section of text in o(1) given the precomputed hashes. This will be the simple version which only assumes one pattern is being searched for but it's not hard to modify 1) Calculate pattern hash 2) Step through the text one character at a time passing a window with the same length as the pattern calculating the hash of the text within the window compare it with the hash of the pattern. Only testing equality if the hashes match
def rabin_karp(pattern, text): """ The Rabin-Karp Algorithm for finding a pattern within a piece of text with complexity O(nm), most efficient when it is used with multiple patterns as it is able to check if any of a set of patterns match a section of text in o(1) given the precomputed hashes. This will be the simple version which only assumes one pattern is being searched for but it's not hard to modify 1) Calculate pattern hash 2) Step through the text one character at a time passing a window with the same length as the pattern calculating the hash of the text within the window compare it with the hash of the pattern. Only testing equality if the hashes match """ p_len = len(pattern) p_hash = hash(pattern) for i in range(0, len(text) - (p_len - 1)): # written like this t text_hash = hash(text[i:i + p_len]) if text_hash == p_hash and \ text[i:i + p_len] == pattern: return True return False
Yield write_script() argument tuples for a distribution's console_scripts and gui_scripts entry points.
def get_args(cls, dist, header=None): """ Yield write_script() argument tuples for a distribution's console_scripts and gui_scripts entry points. """ if header is None: header = cls.get_header() spec = str(dist.as_requirement()) for type_ in 'console', 'gui': group = type_ + '_scripts' for name, ep in dist.get_entry_map(group).items(): # ensure_safe_name if re.search(r'[\\/]', name): raise ValueError("Path separators not allowed in script names") script_text = TEMPLATE.format( ep.module_name, ep.attrs[0], '.'.join(ep.attrs), spec, group, name) args = cls._get_script_args(type_, name, header, script_text) for res in args: yield res
Adds `--enable-functional` argument.
def pytest_addoption(parser): """Adds `--enable-functional` argument.""" group = parser.getgroup("thefuck") group.addoption('--enable-functional', action="store_true", default=False, help="Enable functional tests")
Ensures that the function removes duplicates and sorts commands.
def test_organize_commands(): """Ensures that the function removes duplicates and sorts commands.""" commands = [CorrectedCommand('ls'), CorrectedCommand('ls -la', priority=9000), CorrectedCommand('ls -lh', priority=100), CorrectedCommand(u'echo café', priority=200), CorrectedCommand('ls -lh', priority=9999)] assert list(organize_commands(iter(commands))) \ == [CorrectedCommand('ls'), CorrectedCommand('ls -lh', priority=100), CorrectedCommand(u'echo café', priority=200), CorrectedCommand('ls -la', priority=9000)]
Ensures that command can be fixed when confirmation enabled.
def with_confirmation(proc, TIMEOUT): """Ensures that command can be fixed when confirmation enabled.""" _set_confirmation(proc, True) proc.sendline(u'ehco test') proc.sendline(u'fuck') assert proc.expect([TIMEOUT, u'echo test']) assert proc.expect([TIMEOUT, u'enter']) assert proc.expect_exact([TIMEOUT, u'ctrl+c']) proc.send('\n') assert proc.expect([TIMEOUT, u'test'])
Ensures that history changed.
def history_changed(proc, TIMEOUT, *to): """Ensures that history changed.""" proc.send('\033[A') pattern = [TIMEOUT] pattern.extend(to) assert proc.expect(pattern)
Ensures that history not changed.
def history_not_changed(proc, TIMEOUT): """Ensures that history not changed.""" proc.send('\033[A') assert proc.expect([TIMEOUT, u'fuck'])
Ensures that command can be selected with arrow keys.
def select_command_with_arrows(proc, TIMEOUT): """Ensures that command can be selected with arrow keys.""" _set_confirmation(proc, True) proc.sendline(u'git h') assert proc.expect([TIMEOUT, u"git: 'h' is not a git command."]) proc.sendline(u'fuck') assert proc.expect([TIMEOUT, u'git show']) proc.send('\033[B') assert proc.expect([TIMEOUT, u'git push']) proc.send('\033[B') assert proc.expect([TIMEOUT, u'git help', u'git hook']) proc.send('\033[A') assert proc.expect([TIMEOUT, u'git push']) proc.send('\033[B') assert proc.expect([TIMEOUT, u'git help', u'git hook']) proc.send('\n') assert proc.expect([TIMEOUT, u'usage', u'fatal: not a git repository'])
Ensures that fix can be refused when confirmation enabled.
def refuse_with_confirmation(proc, TIMEOUT): """Ensures that fix can be refused when confirmation enabled.""" _set_confirmation(proc, True) proc.sendline(u'ehco test') proc.sendline(u'fuck') assert proc.expect([TIMEOUT, u'echo test']) assert proc.expect([TIMEOUT, u'enter']) assert proc.expect_exact([TIMEOUT, u'ctrl+c']) proc.send('\003') assert proc.expect([TIMEOUT, u'Aborted'])
Ensures that command can be fixed when confirmation disabled.
def without_confirmation(proc, TIMEOUT): """Ensures that command can be fixed when confirmation disabled.""" _set_confirmation(proc, False) proc.sendline(u'ehco test') proc.sendline(u'fuck') assert proc.expect([TIMEOUT, u'echo test']) assert proc.expect([TIMEOUT, u'test'])
The character before 'grep' is Alt+Space, which happens frequently on the Mac when typing the pipe character (Alt+7), and holding the Alt key pressed for longer than necessary.
def test_match(): """The character before 'grep' is Alt+Space, which happens frequently on the Mac when typing the pipe character (Alt+7), and holding the Alt key pressed for longer than necessary. """ assert match(Command(u'ps -ef | grep foo', u'-bash:  grep: command not found')) assert not match(Command('ps -ef | grep foo', '')) assert not match(Command('', ''))
Replace the Alt+Space character by a simple space
def test_get_new_command(): """ Replace the Alt+Space character by a simple space """ assert (get_new_command(Command(u'ps -ef | grep foo', '')) == 'ps -ef | grep foo')
test goenv's specific output with quotes (')
def test_match_goenv_output_quote(): """test goenv's specific output with quotes (')""" assert match(Command('goenv list', output="goenv: no such command 'list'"))
Yields all available rules. :type rules_paths: [Path] :rtype: Iterable[Rule]
def get_loaded_rules(rules_paths): """Yields all available rules. :type rules_paths: [Path] :rtype: Iterable[Rule] """ for path in rules_paths: if path.name != '__init__.py': rule = Rule.from_path(path) if rule and rule.is_enabled: yield rule
Yields all rules import paths. :rtype: Iterable[Path]
def get_rules_import_paths(): """Yields all rules import paths. :rtype: Iterable[Path] """ # Bundled rules: yield Path(__file__).parent.joinpath('rules') # Rules defined by user: yield settings.user_dir.joinpath('rules') # Packages with third-party rules: for path in sys.path: for contrib_module in Path(path).glob('thefuck_contrib_*'): contrib_rules = contrib_module.joinpath('rules') if contrib_rules.is_dir(): yield contrib_rules
Returns all enabled rules. :rtype: [Rule]
def get_rules(): """Returns all enabled rules. :rtype: [Rule] """ paths = [rule_path for path in get_rules_import_paths() for rule_path in sorted(path.glob('*.py'))] return sorted(get_loaded_rules(paths), key=lambda rule: rule.priority)
Yields sorted commands without duplicates. :type corrected_commands: Iterable[thefuck.types.CorrectedCommand] :rtype: Iterable[thefuck.types.CorrectedCommand]
def organize_commands(corrected_commands): """Yields sorted commands without duplicates. :type corrected_commands: Iterable[thefuck.types.CorrectedCommand] :rtype: Iterable[thefuck.types.CorrectedCommand] """ try: first_command = next(corrected_commands) yield first_command except StopIteration: return without_duplicates = { command for command in sorted( corrected_commands, key=lambda command: command.priority) if command != first_command} sorted_commands = sorted( without_duplicates, key=lambda corrected_command: corrected_command.priority) logs.debug(u'Corrected commands: {}'.format( ', '.join(u'{}'.format(cmd) for cmd in [first_command] + sorted_commands))) for command in sorted_commands: yield command
Returns generator with sorted and unique corrected commands. :type command: thefuck.types.Command :rtype: Iterable[thefuck.types.CorrectedCommand]
def get_corrected_commands(command): """Returns generator with sorted and unique corrected commands. :type command: thefuck.types.Command :rtype: Iterable[thefuck.types.CorrectedCommand] """ corrected_commands = ( corrected for rule in get_rules() if rule.is_match(command) for corrected in rule.get_corrected_commands(command)) return organize_commands(corrected_commands)
Utility for ability to disabling colored output.
def color(color_): """Utility for ability to disabling colored output.""" if settings.no_colors: return '' else: return color_
Yields actions for pressed keys.
def read_actions(): """Yields actions for pressed keys.""" while True: key = get_key() # Handle arrows, j/k (qwerty), and n/e (colemak) if key in (const.KEY_UP, const.KEY_CTRL_N, 'k', 'e'): yield const.ACTION_PREVIOUS elif key in (const.KEY_DOWN, const.KEY_CTRL_P, 'j', 'n'): yield const.ACTION_NEXT elif key in (const.KEY_CTRL_C, 'q'): yield const.ACTION_ABORT elif key in ('\n', '\r'): yield const.ACTION_SELECT
Returns: - the first command when confirmation disabled; - None when ctrl+c pressed; - selected command. :type corrected_commands: Iterable[thefuck.types.CorrectedCommand] :rtype: thefuck.types.CorrectedCommand | None
def select_command(corrected_commands): """Returns: - the first command when confirmation disabled; - None when ctrl+c pressed; - selected command. :type corrected_commands: Iterable[thefuck.types.CorrectedCommand] :rtype: thefuck.types.CorrectedCommand | None """ try: selector = CommandSelector(corrected_commands) except NoRuleMatched: logs.failed('No fucks given' if get_alias() == 'fuck' else 'Nothing found') return if not settings.require_confirmation: logs.show_corrected_command(selector.value) return selector.value logs.confirm_text(selector.value) for action in read_actions(): if action == const.ACTION_SELECT: sys.stderr.write('\n') return selector.value elif action == const.ACTION_ABORT: logs.failed('\nAborted') return elif action == const.ACTION_PREVIOUS: selector.previous() logs.confirm_text(selector.value) elif action == const.ACTION_NEXT: selector.next() logs.confirm_text(selector.value)
Caches previous calls to the function.
def memoize(fn): """Caches previous calls to the function.""" memo = {} @wraps(fn) def wrapper(*args, **kwargs): if not memoize.disabled: key = pickle.dumps((args, kwargs)) if key not in memo: memo[key] = fn(*args, **kwargs) value = memo[key] else: # Memoize is disabled, call the function value = fn(*args, **kwargs) return value return wrapper
Returns `program` path or `None`.
def which(program): """Returns `program` path or `None`.""" try: from shutil import which return which(program) except ImportError: def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None
Adds default values to settings if it not presented. Usage: @default_settings({'apt': '/usr/bin/apt'}) def match(command): print(settings.apt)
def default_settings(params): """Adds default values to settings if it not presented. Usage: @default_settings({'apt': '/usr/bin/apt'}) def match(command): print(settings.apt) """ def _default_settings(fn, command): for k, w in params.items(): settings.setdefault(k, w) return fn(command) return decorator(_default_settings)
Returns closest match or just first from possibilities.
def get_closest(word, possibilities, cutoff=0.6, fallback_to_first=True): """Returns closest match or just first from possibilities.""" possibilities = list(possibilities) try: return difflib_get_close_matches(word, possibilities, 1, cutoff)[0] except IndexError: if fallback_to_first: return possibilities[0]
Overrides `difflib.get_close_match` to control argument `n`.
def get_close_matches(word, possibilities, n=None, cutoff=0.6): """Overrides `difflib.get_close_match` to control argument `n`.""" if n is None: n = settings.num_close_matches return difflib_get_close_matches(word, possibilities, n, cutoff)
Replaces command line argument.
def replace_argument(script, from_, to): """Replaces command line argument.""" replaced_in_the_end = re.sub(u' {}$'.format(re.escape(from_)), u' {}'.format(to), script, count=1) if replaced_in_the_end != script: return replaced_in_the_end else: return script.replace( u' {} '.format(from_), u' {} '.format(to), 1)
Helper for *_no_command rules.
def replace_command(command, broken, matched): """Helper for *_no_command rules.""" new_cmds = get_close_matches(broken, matched, cutoff=0.1) return [replace_argument(command.script, broken, new_cmd.strip()) for new_cmd in new_cmds]
Returns `True` if command is call to one of passed app names.
def is_app(command, *app_names, **kwargs): """Returns `True` if command is call to one of passed app names.""" at_least = kwargs.pop('at_least', 0) if kwargs: raise TypeError("got an unexpected keyword argument '{}'".format(kwargs.keys())) if len(command.script_parts) > at_least: return os.path.basename(command.script_parts[0]) in app_names return False
Specifies that matching script is for one of app names.
def for_app(*app_names, **kwargs): """Specifies that matching script is for one of app names.""" def _for_app(fn, command): if is_app(command, *app_names, **kwargs): return fn(command) else: return False return decorator(_for_app)
Caches function result in temporary file. Cache will be expired when modification date of files from `depends_on` will be changed. Only functions should be wrapped in `cache`, not methods.
def cache(*depends_on): """Caches function result in temporary file. Cache will be expired when modification date of files from `depends_on` will be changed. Only functions should be wrapped in `cache`, not methods. """ def cache_decorator(fn): @memoize @wraps(fn) def wrapper(*args, **kwargs): if cache.disabled: return fn(*args, **kwargs) else: return _cache.get_value(fn, depends_on, args, kwargs) return wrapper return cache_decorator
Creates single script from a list of script parts. :type raw_script: [basestring] :rtype: basestring
def format_raw_script(raw_script): """Creates single script from a list of script parts. :type raw_script: [basestring] :rtype: basestring """ if six.PY2: script = ' '.join(arg.decode('utf-8') for arg in raw_script) else: script = ' '.join(raw_script) return script.lstrip()
Fixes previous command. Used when `thefuck` called without arguments.
def fix_command(known_args): """Fixes previous command. Used when `thefuck` called without arguments.""" settings.init(known_args) with logs.debug_time('Total'): logs.debug(u'Run with settings: {}'.format(pformat(settings))) raw_command = _get_raw_command(known_args) try: command = types.Command.from_raw_script(raw_command) except EmptyCommand: logs.debug('Empty command, nothing to do') return corrected_commands = get_corrected_commands(command) selected_command = select_command(corrected_commands) if selected_command: selected_command.run(command) else: sys.exit(1)
Returns parent process pid.
def _get_shell_pid(): """Returns parent process pid.""" proc = Process(os.getpid()) try: return proc.parent().pid except TypeError: return proc.parent.pid
Returns path of special file where we store latest shell pid.
def _get_not_configured_usage_tracker_path(): """Returns path of special file where we store latest shell pid.""" return Path(gettempdir()).joinpath(u'thefuck.last_not_configured_run_{}'.format( getpass.getuser(), ))
Records shell pid to tracker file.
def _record_first_run(): """Records shell pid to tracker file.""" info = {'pid': _get_shell_pid(), 'time': time.time()} mode = 'wb' if six.PY2 else 'w' with _get_not_configured_usage_tracker_path().open(mode) as tracker: json.dump(info, tracker)
Returns `True` when we know that `fuck` called second time.
def _is_second_run(): """Returns `True` when we know that `fuck` called second time.""" tracker_path = _get_not_configured_usage_tracker_path() if not tracker_path.exists(): return False current_pid = _get_shell_pid() with tracker_path.open('r') as tracker: try: info = json.load(tracker) except ValueError: return False if not (isinstance(info, dict) and info.get('pid') == current_pid): return False return (_get_previous_command() == 'fuck' or time.time() - info.get('time', 0) < const.CONFIGURATION_TIMEOUT)
Returns `True` when alias already in shell config.
def _is_already_configured(configuration_details): """Returns `True` when alias already in shell config.""" path = Path(configuration_details.path).expanduser() with path.open('r') as shell_config: return configuration_details.content in shell_config.read()
Adds alias to shell config.
def _configure(configuration_details): """Adds alias to shell config.""" path = Path(configuration_details.path).expanduser() with path.open('a') as shell_config: shell_config.write(u'\n') shell_config.write(configuration_details.content) shell_config.write(u'\n')
Shows useful information about how-to configure alias on a first run and configure automatically on a second. It'll be only visible when user type fuck and when alias isn't configured.
def main(): """Shows useful information about how-to configure alias on a first run and configure automatically on a second. It'll be only visible when user type fuck and when alias isn't configured. """ settings.init() configuration_details = shell.how_to_configure() if ( configuration_details and configuration_details.can_configure_automatically ): if _is_already_configured(configuration_details): logs.already_configured(configuration_details) return elif _is_second_run(): _configure(configuration_details) logs.configured_successfully(configuration_details) return else: _record_first_run() logs.how_to_configure_alias(configuration_details)
Create a spawned process. Modified version of pty.spawn with terminal size support.
def _spawn(shell, master_read): """Create a spawned process. Modified version of pty.spawn with terminal size support. """ pid, master_fd = pty.fork() if pid == pty.CHILD: os.execlp(shell, shell) try: mode = tty.tcgetattr(pty.STDIN_FILENO) tty.setraw(pty.STDIN_FILENO) restore = True except tty.error: # This is the same as termios.error restore = False _set_pty_size(master_fd) signal.signal(signal.SIGWINCH, lambda *_: _set_pty_size(master_fd)) try: pty._copy(master_fd, master_read, pty._read) except OSError: if restore: tty.tcsetattr(pty.STDIN_FILENO, tty.TCSAFLUSH, mode) os.close(master_fd) return os.waitpid(pid, 0)[1]
Logs shell output to the `output`. Works like unix script command with `-f` flag.
def shell_logger(output): """Logs shell output to the `output`. Works like unix script command with `-f` flag. """ if not os.environ.get('SHELL'): logs.warn("Shell logger doesn't support your platform.") sys.exit(1) fd = os.open(output, os.O_CREAT | os.O_TRUNC | os.O_RDWR) os.write(fd, b'\x00' * const.LOG_SIZE_IN_BYTES) buffer = mmap.mmap(fd, const.LOG_SIZE_IN_BYTES, mmap.MAP_SHARED, mmap.PROT_WRITE) return_code = _spawn(os.environ['SHELL'], partial(_read, buffer)) sys.exit(return_code)
Reads script output from log. :type script: str :rtype: str | None
def get_output(script): """Reads script output from log. :type script: str :rtype: str | None """ if six.PY2: logs.warn('Experimental instant mode is Python 3+ only') return None if 'THEFUCK_OUTPUT_LOG' not in os.environ: logs.warn("Output log isn't specified") return None if const.USER_COMMAND_MARK not in os.environ.get('PS1', ''): logs.warn( "PS1 doesn't contain user command mark, please ensure " "that PS1 is not changed after The Fuck alias initialization") return None try: with logs.debug_time(u'Read output from log'): fd = os.open(os.environ['THEFUCK_OUTPUT_LOG'], os.O_RDONLY) buffer = mmap.mmap(fd, const.LOG_SIZE_IN_BYTES, mmap.MAP_SHARED, mmap.PROT_READ) _skip_old_lines(buffer) lines = _get_output_lines(script, buffer) output = '\n'.join(lines).strip() logs.debug(u'Received output: {}'.format(output)) return output except OSError: logs.warn("Can't read output log") return None except ScriptNotInLog: logs.warn("Script not found in output log") return None
Tries to kill the process otherwise just logs a debug message, the process will be killed when thefuck terminates. :type proc: Process
def _kill_process(proc): """Tries to kill the process otherwise just logs a debug message, the process will be killed when thefuck terminates. :type proc: Process """ try: proc.kill() except AccessDenied: logs.debug(u'Rerun: process PID {} ({}) could not be terminated'.format( proc.pid, proc.exe()))
Returns `True` if we can get output of the command in the `settings.wait_command` time. Command will be killed if it wasn't finished in the time. :type popen: Popen :rtype: bool
def _wait_output(popen, is_slow): """Returns `True` if we can get output of the command in the `settings.wait_command` time. Command will be killed if it wasn't finished in the time. :type popen: Popen :rtype: bool """ proc = Process(popen.pid) try: proc.wait(settings.wait_slow_command if is_slow else settings.wait_command) return True except TimeoutExpired: for child in proc.children(recursive=True): _kill_process(child) _kill_process(proc) return False
Runs the script and obtains stdin/stderr. :type script: str :type expanded: str :rtype: str | None
def get_output(script, expanded): """Runs the script and obtains stdin/stderr. :type script: str :type expanded: str :rtype: str | None """ env = dict(os.environ) env.update(settings.env) if six.PY2: expanded = expanded.encode('utf-8') split_expand = shlex.split(expanded) is_slow = split_expand[0] in settings.slow_commands if split_expand else False with logs.debug_time(u'Call: {}; with env: {}; is slow: {}'.format( script, env, is_slow)): result = Popen(expanded, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, env=env) if _wait_output(result, is_slow): output = result.stdout.read().decode('utf-8', errors='replace') logs.debug(u'Received output: {}'.format(output)) return output else: logs.debug(u'Execution timed out!') return None
Returns `True` if shell logger socket available. :rtype: book
def is_available(): """Returns `True` if shell logger socket available. :rtype: book """ path = _get_socket_path() if not path: return False return os.path.exists(path)
Gets command output from shell logger.
def get_output(script): """Gets command output from shell logger.""" with logs.debug_time(u'Read output from external shell logger'): commands = _get_last_n(const.SHELL_LOGGER_LIMIT) for command in commands: if command['command'] == script: lines = _get_output_lines(command['output']) output = '\n'.join(lines).strip() return output else: logs.warn("Output isn't available in shell logger") return None
Get output of the script. :param script: Console script. :type script: str :param expanded: Console script with expanded aliases. :type expanded: str :rtype: str
def get_output(script, expanded): """Get output of the script. :param script: Console script. :type script: str :param expanded: Console script with expanded aliases. :type expanded: str :rtype: str """ if shell_logger.is_available(): return shell_logger.get_output(script) if settings.instant_mode: return read_log.get_output(script) else: return rerun.get_output(script, expanded)
To get brew default commands on local environment
def _get_brew_commands(brew_path_prefix): """To get brew default commands on local environment""" brew_cmd_path = brew_path_prefix + BREW_CMD_PATH return [name[:-3] for name in os.listdir(brew_cmd_path) if name.endswith(('.rb', '.sh'))]
To get tap's specific commands https://github.com/Homebrew/homebrew/blob/master/Library/brew.rb#L115
def _get_brew_tap_specific_commands(brew_path_prefix): """To get tap's specific commands https://github.com/Homebrew/homebrew/blob/master/Library/brew.rb#L115""" commands = [] brew_taps_path = brew_path_prefix + TAP_PATH for user in _get_directory_names_only(brew_taps_path): taps = _get_directory_names_only(brew_taps_path + '/%s' % user) # Brew Taps's naming rule # https://github.com/Homebrew/homebrew/blob/master/share/doc/homebrew/brew-tap.md#naming-conventions-and-limitations taps = (tap for tap in taps if tap.startswith('homebrew-')) for tap in taps: tap_cmd_path = brew_taps_path + TAP_CMD_PATH % (user, tap) if os.path.isdir(tap_cmd_path): commands += (name.replace('brew-', '').replace('.rb', '') for name in os.listdir(tap_cmd_path) if _is_brew_tap_cmd_naming(name)) return commands
Returns a list of the child directories of the given parent directory
def _get_sub_dirs(parent): """Returns a list of the child directories of the given parent directory""" return [child for child in os.listdir(parent) if os.path.isdir(os.path.join(parent, child))]
Match function copied from cd_mkdir.py
def match(command): """Match function copied from cd_mkdir.py""" return ( command.script.startswith('cd ') and any(( 'no such file or directory' in command.output.lower(), 'cd: can\'t cd to' in command.output.lower(), 'does not exist' in command.output.lower() )))
Attempt to rebuild the path string by spellchecking the directories. If it fails (i.e. no directories are a close enough match), then it defaults to the rules of cd_mkdir. Change sensitivity by changing MAX_ALLOWED_DIFF. Default value is 0.6
def get_new_command(command): """ Attempt to rebuild the path string by spellchecking the directories. If it fails (i.e. no directories are a close enough match), then it defaults to the rules of cd_mkdir. Change sensitivity by changing MAX_ALLOWED_DIFF. Default value is 0.6 """ dest = command.script_parts[1].split(os.sep) if dest[-1] == '': dest = dest[:-1] if dest[0] == '': cwd = os.sep dest = dest[1:] elif six.PY2: cwd = os.getcwdu() else: cwd = os.getcwd() for directory in dest: if directory == ".": continue elif directory == "..": cwd = os.path.split(cwd)[0] continue best_matches = get_close_matches(directory, _get_sub_dirs(cwd), cutoff=MAX_ALLOWED_DIFF) if best_matches: cwd = os.path.join(cwd, best_matches[0]) else: return cd_mkdir.get_new_command(command) return u'cd "{0}"'.format(cwd)
Match a mistyped command
def match(command): """ Match a mistyped command """ return "Did you mean 'conda" in command.output
Matches a command's output with docker's output warning you that you need to remove a container before removing an image.
def match(command): ''' Matches a command's output with docker's output warning you that you need to remove a container before removing an image. ''' return 'image is being used by running container' in command.output
Prepends docker container rm -f {container ID} to the previous docker image rm {image ID} command
def get_new_command(command): ''' Prepends docker container rm -f {container ID} to the previous docker image rm {image ID} command ''' container_id = command.output.strip().split(' ') return shell.and_('docker container rm -f {}', '{}').format(container_id[-1], command.script)
Match a mistyped command
def match(command): ''' Match a mistyped command ''' return 'lfs' in command.script and 'Did you mean this?' in command.output
When arguments order is wrong first argument will be destination.
def _get_destination(script_parts): """When arguments order is wrong first argument will be destination.""" for part in script_parts: if part not in {'ln', '-s', '--symbolic'} and os.path.exists(part): return part
What the `whois` command returns depends on the 'Whois server' it contacted and is not consistent through different servers. But there can be only two types of errors I can think of with `whois`: - `whois https://en.wikipedia.org/` → `whois en.wikipedia.org`; - `whois en.wikipedia.org` → `whois wikipedia.org`. So we match any `whois` command and then: - if there is a slash: keep only the FQDN; - if there is no slash but there is a point: removes the left-most subdomain. We cannot either remove all subdomains because we cannot know which part is the subdomains and which is the domain, consider: - www.google.fr → subdomain: www, domain: 'google.fr'; - google.co.uk → subdomain: None, domain; 'google.co.uk'.
def match(command): """ What the `whois` command returns depends on the 'Whois server' it contacted and is not consistent through different servers. But there can be only two types of errors I can think of with `whois`: - `whois https://en.wikipedia.org/` → `whois en.wikipedia.org`; - `whois en.wikipedia.org` → `whois wikipedia.org`. So we match any `whois` command and then: - if there is a slash: keep only the FQDN; - if there is no slash but there is a point: removes the left-most subdomain. We cannot either remove all subdomains because we cannot know which part is the subdomains and which is the domain, consider: - www.google.fr → subdomain: www, domain: 'google.fr'; - google.co.uk → subdomain: None, domain; 'google.co.uk'. """ return True
Gets the packages that provide the given command using `pkgfile`. If the command is of the form `sudo foo`, searches for the `foo` command instead.
def get_pkgfile(command): """ Gets the packages that provide the given command using `pkgfile`. If the command is of the form `sudo foo`, searches for the `foo` command instead. """ try: command = command.strip() if command.startswith('sudo '): command = command[5:] command = command.split(" ")[0] packages = subprocess.check_output( ['pkgfile', '-b', '-v', command], universal_newlines=True, stderr=utils.DEVNULL ).splitlines() return [package.split()[0] for package in packages] except subprocess.CalledProcessError as err: if err.returncode == 1 and err.output == "": return [] else: raise err
To get brew path
def get_brew_path_prefix(): """To get brew path""" try: return subprocess.check_output(['brew', '--prefix'], universal_newlines=True).strip() except Exception: return None
Resolves git aliases and supports testing for both git and hub.
def git_support(fn, command): """Resolves git aliases and supports testing for both git and hub.""" # supports GitHub's `hub` command # which is recommended to be used with `alias git=hub` # but at this point, shell aliases have already been resolved if not is_app(command, 'git', 'hub'): return False # perform git aliases expansion if command.output and 'trace: alias expansion:' in command.output: search = re.search("trace: alias expansion: ([^ ]*) => ([^\n]*)", command.output) alias = search.group(1) # by default git quotes everything, for example: # 'commit' '--amend' # which is surprising and does not allow to easily test for # eg. 'git commit' expansion = ' '.join(shell.quote(part) for part in shell.split_command(search.group(2))) new_script = re.sub(r"\b{}\b".format(alias), expansion, command.script) command = command.update(script=new_script) return fn(command)
Get custom npm scripts.
def get_scripts(): """Get custom npm scripts.""" proc = Popen(['npm', 'run-script'], stdout=PIPE) should_yeild = False for line in proc.stdout.readlines(): line = line.decode() if 'available via `npm run-script`:' in line: should_yeild = True continue if should_yeild and re.match(r'^ [^ ]+', line): yield line.strip().split(' ')[0]
Removes sudo before calling fn and adds it after.
def sudo_support(fn, command): """Removes sudo before calling fn and adds it after.""" if not command.script.startswith('sudo '): return fn(command) result = fn(command.update(script=command.script[5:])) if result and isinstance(result, six.string_types): return u'sudo {}'.format(result) elif isinstance(result, list): return [u'sudo {}'.format(x) for x in result] else: return result
Method that filters list :param lst: list to be filtered :return: new filtered list
def filter(lst): """ Method that filters list :param lst: list to be filtered :return: new filtered list """ if lst is None: return [] if not isinstance(lst, set): lst = set(lst) # Remove duplicates. new_lst = [] for item in lst: item = str(item) if (item[0].isalpha() or item[0].isdigit()) and ('xxx' not in item) and ('..' not in item): item = item.replace('252f', '').replace('2F', '').replace('2f', '') new_lst.append(item.lower()) return new_lst
Method that is used to generate a random delay
def get_delay() -> float: """Method that is used to generate a random delay""" return random.randint(1, 3) - 0.5
Serialize a network range in a constant format, 'x.x.x.x/y'. Parameters ---------- ip: str. A serialized ip in the format 'x.x.x.x'. Extra information like port (':z') or subnet ('/n') will be ignored. netmask: str. The subnet subdivision, represented by a 2 digit netmask. Returns ------- out: str. The network OSI address, like '192.168.0.0/24'.
def serialize_ip_range(ip: str, netmask: str = '24') -> str: """ Serialize a network range in a constant format, 'x.x.x.x/y'. Parameters ---------- ip: str. A serialized ip in the format 'x.x.x.x'. Extra information like port (':z') or subnet ('/n') will be ignored. netmask: str. The subnet subdivision, represented by a 2 digit netmask. Returns ------- out: str. The network OSI address, like '192.168.0.0/24'. """ __ip_matches = re.search(NETWORK_REGEX, ip, re.IGNORECASE) if __ip_matches and __ip_matches.groups(): __ip = __ip_matches.group(1) __netmask = netmask if netmask else __ip_matches.group(3) if __ip and __netmask: return str(IPv4Network(f'{__ip}/{__netmask}', strict=False)) elif __ip: return str(IPv4Network('{}/{}'.format(__ip, '24'), strict=False)) # invalid input ip return ''
List all the IPs in the range. Parameters ---------- iprange: str. A serialized ip range, like '1.2.3.0/24'. The last digit can be set to anything, it will be ignored. Returns ------- out: list. The list of IPs in the range.
def list_ips_in_network_range(iprange: str) -> list[str]: """ List all the IPs in the range. Parameters ---------- iprange: str. A serialized ip range, like '1.2.3.0/24'. The last digit can be set to anything, it will be ignored. Returns ------- out: list. The list of IPs in the range. """ try: __network = IPv4Network(iprange, strict=False) return [__address.exploded for __address in __network.hosts()] except Exception: return []
Display the current query in the console. Parameters ---------- ip: str. Queried ip. Results ------- out: None.
def log_query(ip: str) -> None: """ Display the current query in the console. Parameters ---------- ip: str. Queried ip. Results ------- out: None. """ sys.stdout.write(chr(27) + '[2K' + chr(27) + '[G') sys.stdout.write('\r' + ip + ' - ') sys.stdout.flush()
Display the query result in the console. Parameters ---------- host: str. Host name returned by the DNS query. Results ------- out: None.
def log_result(host: str) -> None: """ Display the query result in the console. Parameters ---------- host: str. Host name returned by the DNS query. Results ------- out: None. """ if host: print(host)
Postprocess the query results asynchronously too, instead of waiting for the querying stage to be completely finished. Parameters ---------- target: str. The domain wanted as TLD. allhosts: List. A collection of all the subdomains -of target- found so far. Returns ------- out: Callable. A function that will update the collection of target subdomains when the query result is satisfying.
def generate_postprocessing_callback(target: str, **allhosts: list[str]) -> Callable: """ Postprocess the query results asynchronously too, instead of waiting for the querying stage to be completely finished. Parameters ---------- target: str. The domain wanted as TLD. allhosts: List. A collection of all the subdomains -of target- found so far. Returns ------- out: Callable. A function that will update the collection of target subdomains when the query result is satisfying. """ def append_matching_hosts(host: str) -> None: if host and target in host: for __name, __hosts in allhosts.items(): if host not in __hosts: __hosts.append(host) return append_matching_hosts
Get a copy of the default config.
def get_cfg(): """ Get a copy of the default config. """ return _assert_and_infer_cfg(_C.clone())