function
stringlengths
18
3.86k
intent_category
stringlengths
5
24
def euclidean_distance_sqr(point1, point2): return (point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2
divide_and_conquer
def column_based_sort(array, column=0): return sorted(array, key=lambda x: x[column])
divide_and_conquer
def dis_between_closest_pair(points, points_counts, min_dis=float("inf")): for i in range(points_counts - 1): for j in range(i + 1, points_counts): current_dis = euclidean_distance_sqr(points[i], points[j]) if current_dis < min_dis: min_dis = current_dis return min_dis
divide_and_conquer
def dis_between_closest_in_strip(points, points_counts, min_dis=float("inf")): for i in range(min(6, points_counts - 1), points_counts): for j in range(max(0, i - 6), i): current_dis = euclidean_distance_sqr(points[i], points[j]) if current_dis < min_dis: min_dis = current_dis return min_dis
divide_and_conquer
def closest_pair_of_points_sqr(points_sorted_on_x, points_sorted_on_y, points_counts): # base case if points_counts <= 3: return dis_between_closest_pair(points_sorted_on_x, points_counts) # recursion mid = points_counts // 2 closest_in_left = closest_pair_of_points_sqr( points_sorted_on_x, points_sorted_on_y[:mid], mid ) closest_in_right = closest_pair_of_points_sqr( points_sorted_on_y, points_sorted_on_y[mid:], points_counts - mid ) closest_pair_dis = min(closest_in_left, closest_in_right) cross_strip = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0]) < closest_pair_dis: cross_strip.append(point) closest_in_strip = dis_between_closest_in_strip( cross_strip, len(cross_strip), closest_pair_dis ) return min(closest_pair_dis, closest_in_strip)
divide_and_conquer
def closest_pair_of_points(points, points_counts): points_sorted_on_x = column_based_sort(points, column=0) points_sorted_on_y = column_based_sort(points, column=1) return ( closest_pair_of_points_sqr( points_sorted_on_x, points_sorted_on_y, points_counts ) ) ** 0.5
divide_and_conquer
def default_matrix_multiplication(a: list, b: list) -> list: if len(a) != 2 or len(a[0]) != 2 or len(b) != 2 or len(b[0]) != 2: raise Exception("Matrices are not 2x2") new_matrix = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix
divide_and_conquer
def matrix_addition(matrix_a: list, matrix_b: list): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(matrix_a)) ]
divide_and_conquer
def matrix_subtraction(matrix_a: list, matrix_b: list): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(matrix_a)) ]
divide_and_conquer
def split_matrix(a: list) -> tuple[list, list, list, list]: if len(a) % 2 != 0 or len(a[0]) % 2 != 0: raise Exception("Odd matrices are not supported!") matrix_length = len(a) mid = matrix_length // 2 top_right = [[a[i][j] for j in range(mid, matrix_length)] for i in range(mid)] bot_right = [ [a[i][j] for j in range(mid, matrix_length)] for i in range(mid, matrix_length) ] top_left = [[a[i][j] for j in range(mid)] for i in range(mid)] bot_left = [[a[i][j] for j in range(mid)] for i in range(mid, matrix_length)] return top_left, top_right, bot_left, bot_right
divide_and_conquer
def matrix_dimensions(matrix: list) -> tuple[int, int]: return len(matrix), len(matrix[0])
divide_and_conquer
def print_matrix(matrix: list) -> None: print("\n".join(str(line) for line in matrix))
divide_and_conquer
def actual_strassen(matrix_a: list, matrix_b: list) -> list: if matrix_dimensions(matrix_a) == (2, 2): return default_matrix_multiplication(matrix_a, matrix_b) a, b, c, d = split_matrix(matrix_a) e, f, g, h = split_matrix(matrix_b) t1 = actual_strassen(a, matrix_subtraction(f, h)) t2 = actual_strassen(matrix_addition(a, b), h) t3 = actual_strassen(matrix_addition(c, d), e) t4 = actual_strassen(d, matrix_subtraction(g, e)) t5 = actual_strassen(matrix_addition(a, d), matrix_addition(e, h)) t6 = actual_strassen(matrix_subtraction(b, d), matrix_addition(g, h)) t7 = actual_strassen(matrix_subtraction(a, c), matrix_addition(e, f)) top_left = matrix_addition(matrix_subtraction(matrix_addition(t5, t4), t2), t6) top_right = matrix_addition(t1, t2) bot_left = matrix_addition(t3, t4) bot_right = matrix_subtraction(matrix_subtraction(matrix_addition(t1, t5), t3), t7) # construct the new matrix from our 4 quadrants new_matrix = [] for i in range(len(top_right)): new_matrix.append(top_left[i] + top_right[i]) for i in range(len(bot_right)): new_matrix.append(bot_left[i] + bot_right[i]) return new_matrix
divide_and_conquer
def strassen(matrix1: list, matrix2: list) -> list: if matrix_dimensions(matrix1)[1] != matrix_dimensions(matrix2)[0]: raise Exception( "Unable to multiply these matrices, please check the dimensions. \n" f"Matrix A:{matrix1} \nMatrix B:{matrix2}" ) dimension1 = matrix_dimensions(matrix1) dimension2 = matrix_dimensions(matrix2) if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]: return [matrix1, matrix2] maximum = max(max(dimension1), max(dimension2)) maxim = int(math.pow(2, math.ceil(math.log2(maximum)))) new_matrix1 = matrix1 new_matrix2 = matrix2 # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0, maxim): if i < dimension1[0]: for _ in range(dimension1[1], maxim): new_matrix1[i].append(0) else: new_matrix1.append([0] * maxim) if i < dimension2[0]: for _ in range(dimension2[1], maxim): new_matrix2[i].append(0) else: new_matrix2.append([0] * maxim) final_matrix = actual_strassen(new_matrix1, new_matrix2) # Removing the additional zeros for i in range(0, maxim): if i < dimension1[0]: for _ in range(dimension2[1], maxim): final_matrix[i].pop() else: final_matrix.pop() return final_matrix
divide_and_conquer
def random_pivot(lst): return choice(lst)
divide_and_conquer
def kth_number(lst: list[int], k: int) -> int: # pick a pivot and separate into list based on pivot. pivot = random_pivot(lst) # partition based on pivot # linear time small = [e for e in lst if e < pivot] big = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(small) == k - 1: return pivot # pivot is in elements bigger than k elif len(small) < k - 1: return kth_number(big, k - len(small) - 1) # pivot is in elements smaller than k else: return kth_number(small, k)
divide_and_conquer
def merge(left_half: list, right_half: list) -> list: sorted_array = [None] * (len(right_half) + len(left_half)) pointer1 = 0 # pointer to current index for left Half pointer2 = 0 # pointer to current index for the right Half index = 0 # pointer to current index for the sorted array Half while pointer1 < len(left_half) and pointer2 < len(right_half): if left_half[pointer1] < right_half[pointer2]: sorted_array[index] = left_half[pointer1] pointer1 += 1 index += 1 else: sorted_array[index] = right_half[pointer2] pointer2 += 1 index += 1 while pointer1 < len(left_half): sorted_array[index] = left_half[pointer1] pointer1 += 1 index += 1 while pointer2 < len(right_half): sorted_array[index] = right_half[pointer2] pointer2 += 1 index += 1 return sorted_array
divide_and_conquer
def merge_sort(array: list) -> list: if len(array) <= 1: return array # the actual formula to calculate the middle element = left + (right - left) // 2 # this avoids integer overflow in case of large N middle = 0 + (len(array) - 0) // 2 # Split the array into halves till the array length becomes equal to One # merge the arrays of single length returned by mergeSort function and # pass them into the merge arrays function which merges the array left_half = array[:middle] right_half = array[middle:] return merge(merge_sort(left_half), merge_sort(right_half))
divide_and_conquer
def actual_power(a: int, b: int): if b == 0: return 1 if (b % 2) == 0: return actual_power(a, int(b / 2)) * actual_power(a, int(b / 2)) else: return a * actual_power(a, int(b / 2)) * actual_power(a, int(b / 2))
divide_and_conquer
def power(a: int, b: int) -> float: if b < 0: return 1 / actual_power(a, b) return actual_power(a, b)
divide_and_conquer
def electric_power(voltage: float, current: float, power: float) -> tuple: result = namedtuple("result", "name value") if (voltage, current, power).count(0) != 1: raise ValueError("Only one argument must be 0") elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage", power / current) elif current == 0: return result("current", power / voltage) elif power == 0: return result("power", float(round(abs(voltage * current), 2))) else: raise ValueError("Exactly one argument must be 0")
electronics
def electrical_impedance( resistance: float, reactance: float, impedance: float ) -> dict[str, float]: if (resistance, reactance, impedance).count(0) != 1: raise ValueError("One and only one argument must be 0") if resistance == 0: return {"resistance": sqrt(pow(impedance, 2) - pow(reactance, 2))} elif reactance == 0: return {"reactance": sqrt(pow(impedance, 2) - pow(resistance, 2))} elif impedance == 0: return {"impedance": sqrt(pow(resistance, 2) + pow(reactance, 2))} else: raise ValueError("Exactly one argument must be 0")
electronics
def ohms_law(voltage: float, current: float, resistance: float) -> dict[str, float]: if (voltage, current, resistance).count(0) != 1: raise ValueError("One and only one argument must be 0") if resistance < 0: raise ValueError("Resistance cannot be negative") if voltage == 0: return {"voltage": float(current * resistance)} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("Exactly one argument must be 0")
electronics
def electric_conductivity( conductivity: float, electron_conc: float, mobility: float, ) -> tuple[str, float]: if (conductivity, electron_conc, mobility).count(0) != 1: raise ValueError("You cannot supply more or less than 2 values") elif conductivity < 0: raise ValueError("Conductivity cannot be negative") elif electron_conc < 0: raise ValueError("Electron concentration cannot be negative") elif mobility < 0: raise ValueError("mobility cannot be negative") elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), )
electronics
def couloumbs_law( force: float, charge1: float, charge2: float, distance: float ) -> dict[str, float]: charge_product = abs(charge1 * charge2) if (force, charge1, charge2, distance).count(0) != 1: raise ValueError("One and only one argument must be 0") if distance < 0: raise ValueError("Distance cannot be negative") if force == 0: force = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif charge1 == 0: charge1 = abs(force) * (distance**2) / (COULOMBS_CONSTANT * charge2) return {"charge1": charge1} elif charge2 == 0: charge2 = abs(force) * (distance**2) / (COULOMBS_CONSTANT * charge1) return {"charge2": charge2} elif distance == 0: distance = (COULOMBS_CONSTANT * charge_product / abs(force)) ** 0.5 return {"distance": distance} raise ValueError("Exactly one argument must be 0")
electronics
def builtin_voltage( donor_conc: float, # donor concentration acceptor_conc: float, # acceptor concentration intrinsic_conc: float, # intrinsic concentration ) -> float: if donor_conc <= 0: raise ValueError("Donor concentration should be positive") elif acceptor_conc <= 0: raise ValueError("Acceptor concentration should be positive") elif intrinsic_conc <= 0: raise ValueError("Intrinsic concentration should be positive") elif donor_conc <= intrinsic_conc: raise ValueError( "Donor concentration should be greater than intrinsic concentration" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( "Acceptor concentration should be greater than intrinsic concentration" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2) / physical_constants["electron volt"][0] )
electronics
def carrier_concentration( electron_conc: float, hole_conc: float, intrinsic_conc: float, ) -> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1: raise ValueError("You cannot supply more or less than 2 values") elif electron_conc < 0: raise ValueError("Electron concentration cannot be negative in a semiconductor") elif hole_conc < 0: raise ValueError("Hole concentration cannot be negative in a semiconductor") elif intrinsic_conc < 0: raise ValueError( "Intrinsic concentration cannot be negative in a semiconductor" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1)
electronics
def resistor_parallel(resistors: list[float]) -> float: first_sum = 0.00 index = 0 for resistor in resistors: if resistor <= 0: raise ValueError(f"Resistor at index {index} has a negative or zero value!") first_sum += 1 / float(resistor) index += 1 return 1 / first_sum
electronics
def resistor_series(resistors: list[float]) -> float: sum_r = 0.00 index = 0 for resistor in resistors: sum_r += resistor if resistor < 0: raise ValueError(f"Resistor at index {index} has a negative value!") index += 1 return sum_r
electronics
def resonant_frequency(inductance: float, capacitance: float) -> tuple: if inductance <= 0: raise ValueError("Inductance cannot be 0 or negative") elif capacitance <= 0: raise ValueError("Capacitance cannot be 0 or negative") else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance)))), )
electronics
def ind_reactance( inductance: float, frequency: float, reactance: float ) -> dict[str, float]: if (inductance, frequency, reactance).count(0) != 1: raise ValueError("One and only one argument must be 0") if inductance < 0: raise ValueError("Inductance cannot be negative") if frequency < 0: raise ValueError("Frequency cannot be negative") if reactance < 0: raise ValueError("Inductive reactance cannot be negative") if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0")
electronics
def __init__(self) -> None: self.first_signal = [2, 1, 2, -1] self.second_signal = [1, 2, 3, 4]
electronics
def circular_convolution(self) -> list[float]: length_first_signal = len(self.first_signal) length_second_signal = len(self.second_signal) max_length = max(length_first_signal, length_second_signal) # create a zero matrix of max_length x max_length matrix = [[0] * max_length for i in range(max_length)] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(max_length): rotated_signal = deque(self.second_signal) rotated_signal.rotate(i) for j, item in enumerate(rotated_signal): matrix[i][j] += item # multiply the matrix with the first signal final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal)) # rounding-off to two decimal places return [round(i, 2) for i in final_signal]
electronics
def mae(predict, actual): predict = np.array(predict) actual = np.array(actual) difference = abs(predict - actual) score = difference.mean() return score
machine_learning
def mse(predict, actual): predict = np.array(predict) actual = np.array(actual) difference = predict - actual square_diff = np.square(difference) score = square_diff.mean() return score
machine_learning
def rmse(predict, actual): predict = np.array(predict) actual = np.array(actual) difference = predict - actual square_diff = np.square(difference) mean_square_diff = square_diff.mean() score = np.sqrt(mean_square_diff) return score
machine_learning
def rmsle(predict, actual): predict = np.array(predict) actual = np.array(actual) log_predict = np.log(predict + 1) log_actual = np.log(actual + 1) difference = log_predict - log_actual square_diff = np.square(difference) mean_square_diff = square_diff.mean() score = np.sqrt(mean_square_diff) return score
machine_learning
def mbd(predict, actual): predict = np.array(predict) actual = np.array(actual) difference = predict - actual numerator = np.sum(difference) / len(predict) denumerator = np.sum(actual) / len(predict) # print(numerator, denumerator) score = float(numerator) / denumerator * 100 return score
machine_learning
def data_handling(data: dict) -> tuple: # Split dataset into features and target # data is features return (data["data"], data["target"])
machine_learning
def xgboost(features: np.ndarray, target: np.ndarray) -> XGBClassifier: classifier = XGBClassifier() classifier.fit(features, target) return classifier
machine_learning
def main() -> None: # Load Iris dataset iris = load_iris() features, targets = data_handling(iris) x_train, x_test, y_train, y_test = train_test_split( features, targets, test_size=0.25 ) names = iris["target_names"] # Create an XGBoost Classifier from the training data xgboost_classifier = xgboost(x_train, y_train) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( xgboost_classifier, x_test, y_test, display_labels=names, cmap="Blues", normalize="true", ) plt.title("Normalized Confusion Matrix - IRIS Dataset") plt.show()
machine_learning
def gaussian_distribution(mean: float, std_dev: float, instance_count: int) -> list: seed(1) return [gauss(mean, std_dev) for _ in range(instance_count)]
machine_learning
def y_generator(class_count: int, instance_count: list) -> list: return [k for k in range(class_count) for _ in range(instance_count[k])]
machine_learning
def calculate_mean(instance_count: int, items: list) -> float: # the sum of all items divided by number of instances return sum(items) / instance_count
machine_learning
def calculate_probabilities(instance_count: int, total_count: int) -> float: # number of instances in specific class divided by number of all instances return instance_count / total_count
machine_learning
def calculate_variance(items: list, means: list, total_count: int) -> float: squared_diff = [] # An empty list to store all squared differences # iterate over number of elements in items for i in range(len(items)): # for loop iterates over number of elements in inner layer of items for j in range(len(items[i])): # appending squared differences to 'squared_diff' list squared_diff.append((items[i][j] - means[i]) ** 2) # one divided by (the number of all instances - number of classes) multiplied by # sum of all squared differences n_classes = len(means) # Number of classes in dataset return 1 / (total_count - n_classes) * sum(squared_diff)
machine_learning
def predict_y_values( x_items: list, means: list, variance: float, probabilities: list ) -> list: # An empty list to store generated discriminant values of all items in dataset for # each class results = [] # for loop iterates over number of elements in list for i in range(len(x_items)): # for loop iterates over number of inner items of each element for j in range(len(x_items[i])): temp = [] # to store all discriminant values of each item as a list # for loop iterates over number of classes we have in our dataset for k in range(len(x_items)): # appending values of discriminants for each class to 'temp' list temp.append( x_items[i][j] * (means[k] / variance) - (means[k] ** 2 / (2 * variance)) + log(probabilities[k]) ) # appending discriminant values of each item to 'results' list results.append(temp) return [result.index(max(result)) for result in results]
machine_learning
def accuracy(actual_y: list, predicted_y: list) -> float: # iterate over one element of each list at a time (zip mode) # prediction is correct if actual Y value equals to predicted Y value correct = sum(1 for i, j in zip(actual_y, predicted_y) if i == j) # percentage of accuracy equals to number of correct predictions divided by number # of all data and multiplied by 100 return (correct / len(actual_y)) * 100
machine_learning
def valid_input( input_type: Callable[[object], num], # Usually float or int input_msg: str, err_msg: str, condition: Callable[[num], bool] = lambda x: True, default: str | None = None, ) -> num: while True: try: user_input = input_type(input(input_msg).strip() or default) if condition(user_input): return user_input else: print(f"{user_input}: {err_msg}") continue except ValueError: print( f"{user_input}: Incorrect input type, expected {input_type.__name__!r}" )
machine_learning
def _error(example_no, data_set="train"): return calculate_hypothesis_value(example_no, data_set) - output( example_no, data_set )
machine_learning
def _hypothesis_value(data_input_tuple): hyp_val = 0 for i in range(len(parameter_vector) - 1): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val
machine_learning
def output(example_no, data_set): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None
machine_learning
def calculate_hypothesis_value(example_no, data_set): if data_set == "train": return _hypothesis_value(train_data[example_no][0]) elif data_set == "test": return _hypothesis_value(test_data[example_no][0]) return None
machine_learning
def summation_of_cost_derivative(index, end=m): summation_value = 0 for i in range(end): if index == -1: summation_value += _error(i) else: summation_value += _error(i) * train_data[i][0][index] return summation_value
machine_learning
def get_cost_derivative(index): cost_derivative_value = summation_of_cost_derivative(index, m) / m return cost_derivative_value
machine_learning
def run_gradient_descent(): global parameter_vector # Tune these values to set a tolerance value for predicted output absolute_error_limit = 0.000002 relative_error_limit = 0 j = 0 while True: j += 1 temp_parameter_vector = [0, 0, 0, 0] for i in range(0, len(parameter_vector)): cost_derivative = get_cost_derivative(i - 1) temp_parameter_vector[i] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( parameter_vector, temp_parameter_vector, atol=absolute_error_limit, rtol=relative_error_limit, ): break parameter_vector = temp_parameter_vector print(("Number of iterations:", j))
machine_learning
def test_gradient_descent(): for i in range(len(test_data)): print(("Actual output value:", output(i, "test"))) print(("Hypothesis output:", calculate_hypothesis_value(i, "test")))
machine_learning
def euclidean(input_a: np.ndarray, input_b: np.ndarray) -> float: return math.sqrt(sum(pow(a - b, 2) for a, b in zip(input_a, input_b)))
machine_learning
def similarity_search( dataset: np.ndarray, value_array: np.ndarray ) -> list[list[list[float] | float]]: if dataset.ndim != value_array.ndim: raise ValueError( f"Wrong input data's dimensions... dataset : {dataset.ndim}, " f"value_array : {value_array.ndim}" ) try: if dataset.shape[1] != value_array.shape[1]: raise ValueError( f"Wrong input data's shape... dataset : {dataset.shape[1]}, " f"value_array : {value_array.shape[1]}" ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape") if dataset.dtype != value_array.dtype: raise TypeError( f"Input data have different datatype... dataset : {dataset.dtype}, " f"value_array : {value_array.dtype}" ) answer = [] for value in value_array: dist = euclidean(value, dataset[0]) vector = dataset[0].tolist() for dataset_value in dataset[1:]: temp_dist = euclidean(value, dataset_value) if dist > temp_dist: dist = temp_dist vector = dataset_value.tolist() answer.append([vector, dist]) return answer
machine_learning
def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float: return np.dot(input_a, input_b) / (norm(input_a) * norm(input_b))
machine_learning
def euclidean_distance(a, b): return np.linalg.norm(np.array(a) - np.array(b))
machine_learning
def classifier(train_data, train_target, classes, point, k=5): data = zip(train_data, train_target) # List of distances of all points from the point to be classified distances = [] for data_point in data: distance = euclidean_distance(data_point[0], point) distances.append((distance, data_point[1])) # Choosing 'k' points with the least distances. votes = [i[1] for i in sorted(distances)[:k]] # Most commonly occurring class among them # is the class into which the point is classified result = Counter(votes).most_common(1)[0][0] return classes[result]
machine_learning
def __init__(self, depth=5, min_leaf_size=5): self.depth = depth self.decision_boundary = 0 self.left = None self.right = None self.min_leaf_size = min_leaf_size self.prediction = None
machine_learning
def mean_squared_error(self, labels, prediction): if labels.ndim != 1: print("Error: Input labels must be one dimensional") return np.mean((labels - prediction) ** 2)
machine_learning
def train(self, x, y): if x.ndim != 1: print("Error: Input data set must be one dimensional") return if len(x) != len(y): print("Error: X and y have different lengths") return if y.ndim != 1: print("Error: Data set labels must be one dimensional") return if len(x) < 2 * self.min_leaf_size: self.prediction = np.mean(y) return if self.depth == 1: self.prediction = np.mean(y) return best_split = 0 min_error = self.mean_squared_error(x, np.mean(y)) * 2 for i in range(len(x)): if len(x[:i]) < self.min_leaf_size: continue elif len(x[i:]) < self.min_leaf_size: continue else: error_left = self.mean_squared_error(x[:i], np.mean(y[:i])) error_right = self.mean_squared_error(x[i:], np.mean(y[i:])) error = error_left + error_right if error < min_error: best_split = i min_error = error if best_split != 0: left_x = x[:best_split] left_y = y[:best_split] right_x = x[best_split:] right_y = y[best_split:] self.decision_boundary = x[best_split] self.left = DecisionTree( depth=self.depth - 1, min_leaf_size=self.min_leaf_size ) self.right = DecisionTree( depth=self.depth - 1, min_leaf_size=self.min_leaf_size ) self.left.train(left_x, left_y) self.right.train(right_x, right_y) else: self.prediction = np.mean(y) return
machine_learning
def predict(self, x): if self.prediction is not None: return self.prediction elif self.left or self.right is not None: if x >= self.decision_boundary: return self.right.predict(x) else: return self.left.predict(x) else: print("Error: Decision tree not yet trained") return None
machine_learning
def __init__(self): self.position = (0, 0) self.parent = None self.g = 0 self.h = 0 self.f = 0
machine_learning
def __eq__(self, cell): return self.position == cell.position
machine_learning
def showcell(self): print(self.position)
machine_learning
def __init__(self, world_size=(5, 5)): self.w = np.zeros(world_size) self.world_x_limit = world_size[0] self.world_y_limit = world_size[1]
machine_learning
def show(self): print(self.w)
machine_learning
def get_neigbours(self, cell): neughbour_cord = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] current_x = cell.position[0] current_y = cell.position[1] neighbours = [] for n in neughbour_cord: x = current_x + n[0] y = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: c = Cell() c.position = (x, y) c.parent = cell neighbours.append(c) return neighbours
machine_learning
def astar(world, start, goal): _open = [] _closed = [] _open.append(start) while _open: min_f = np.argmin([n.f for n in _open]) current = _open[min_f] _closed.append(_open.pop(min_f)) if current == goal: break for n in world.get_neigbours(current): for c in _closed: if c == n: continue n.g = current.g + 1 x1, y1 = n.position x2, y2 = goal.position n.h = (y2 - y1) ** 2 + (x2 - x1) ** 2 n.f = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(n) path = [] while current.parent is not None: path.append(current.position) current = current.parent path.append(current.position) return path[::-1]
machine_learning
def sigmoid_function(z): return 1 / (1 + np.exp(-z))
machine_learning
def cost_function(h, y): return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
machine_learning
def log_likelihood(x, y, weights): scores = np.dot(x, weights) return np.sum(y * scores - np.log(1 + np.exp(scores)))
machine_learning
def logistic_reg(alpha, x, y, max_iterations=70000): theta = np.zeros(x.shape[1]) for iterations in range(max_iterations): z = np.dot(x, theta) h = sigmoid_function(z) gradient = np.dot(x.T, h - y) / y.size theta = theta - alpha * gradient # updating the weights z = np.dot(x, theta) h = sigmoid_function(z) j = cost_function(h, y) if iterations % 100 == 0: print(f"loss: {j} \t") # printing the loss after every 100 iterations return theta
machine_learning
def predict_prob(x): return sigmoid_function( np.dot(x, theta) ) # predicting the value of probability from the logistic regression algorithm
machine_learning
def get_winner(self, weights: list[list[float]], sample: list[int]) -> int: d0 = 0.0 d1 = 0.0 for i in range(len(sample)): d0 += math.pow((sample[i] - weights[0][i]), 2) d1 += math.pow((sample[i] - weights[1][i]), 2) return 0 if d0 > d1 else 1 return 0
machine_learning
def update( self, weights: list[list[int | float]], sample: list[int], j: int, alpha: float ) -> list[list[int | float]]: for i in range(len(weights)): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights
machine_learning
def main() -> None: # Training Examples ( m, n ) training_samples = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) weights = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training self_organizing_map = SelfOrganizingMap() epochs = 3 alpha = 0.5 for _ in range(epochs): for j in range(len(training_samples)): # training sample sample = training_samples[j] # Compute the winning vector winner = self_organizing_map.get_winner(weights, sample) # Update the winning vector weights = self_organizing_map.update(weights, sample, winner, alpha) # classify test sample sample = [0, 0, 0, 1] winner = self_organizing_map.get_winner(weights, sample) # results print(f"Clusters that the test sample belongs to : {winner}") print(f"Weights that have been trained : {weights}")
machine_learning
def term_frequency(term: str, document: str) -> int: # strip all punctuation and newlines and replace it with '' document_without_punctuation = document.translate( str.maketrans("", "", string.punctuation) ).replace("\n", "") tokenize_document = document_without_punctuation.split(" ") # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()])
machine_learning
def document_frequency(term: str, corpus: str) -> tuple[int, int]: corpus_without_punctuation = corpus.lower().translate( str.maketrans("", "", string.punctuation) ) # strip all punctuation and replace it with '' docs = corpus_without_punctuation.split("\n") term = term.lower() return (len([doc for doc in docs if term in doc]), len(docs))
machine_learning
def inverse_document_frequency(df: int, n: int, smoothing=False) -> float: if smoothing: if n == 0: raise ValueError("log10(0) is undefined.") return round(1 + log10(n / (1 + df)), 3) if df == 0: raise ZeroDivisionError("df must be > 0") elif n == 0: raise ValueError("log10(0) is undefined.") return round(log10(n / df), 3)
machine_learning
def norm_squared(vector: ndarray) -> float: return np.dot(vector, vector)
machine_learning
def __init__( self, *, regularization: float = np.inf, kernel: str = "linear", gamma: float = 0, ) -> None: self.regularization = regularization self.gamma = gamma if kernel == "linear": self.kernel = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma") if not (isinstance(self.gamma, float) or isinstance(self.gamma, int)): raise ValueError("gamma must be float or int") if not self.gamma > 0: raise ValueError("gamma must be > 0") self.kernel = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: raise ValueError(f"Unknown kernel: {kernel}")
machine_learning
def normalization(data: list, ndigits: int = 3) -> list: # variables for calculation x_min = min(data) x_max = max(data) # normalize data return [round((x - x_min) / (x_max - x_min), ndigits) for x in data]
machine_learning
def __init__( self, train, kernel_func, alpha_list=None, cost=0.4, b=0.0, tolerance=0.001, auto_norm=True, ): self._init = True self._auto_norm = auto_norm self._c = np.float64(cost) self._b = np.float64(b) self._tol = np.float64(tolerance) if tolerance > 0.0001 else np.float64(0.001) self.tags = train[:, 0] self.samples = self._norm(train[:, 1:]) if self._auto_norm else train[:, 1:] self.alphas = alpha_list if alpha_list is not None else np.zeros(train.shape[0]) self.Kernel = kernel_func self._eps = 0.001 self._all_samples = list(range(self.length)) self._K_matrix = self._calculate_k_matrix() self._error = np.zeros(self.length) self._unbound = [] self.choose_alpha = self._choose_alphas()
machine_learning
def fit(self): k = self._k state = None while True: # 1: Find alpha1, alpha2 try: i1, i2 = self.choose_alpha.send(state) state = None except StopIteration: print("Optimization done!\nEvery sample satisfy the KKT condition!") break # 2: calculate new alpha2 and new alpha1 y1, y2 = self.tags[i1], self.tags[i2] a1, a2 = self.alphas[i1].copy(), self.alphas[i2].copy() e1, e2 = self._e(i1), self._e(i2) args = (i1, i2, a1, a2, e1, e2, y1, y2) a1_new, a2_new = self._get_new_alpha(*args) if not a1_new and not a2_new: state = False continue self.alphas[i1], self.alphas[i2] = a1_new, a2_new # 3: update threshold(b) b1_new = np.float64( -e1 - y1 * k(i1, i1) * (a1_new - a1) - y2 * k(i2, i1) * (a2_new - a2) + self._b ) b2_new = np.float64( -e2 - y2 * k(i2, i2) * (a2_new - a2) - y1 * k(i1, i2) * (a1_new - a1) + self._b ) if 0.0 < a1_new < self._c: b = b1_new if 0.0 < a2_new < self._c: b = b2_new if not (np.float64(0) < a2_new < self._c) and not ( np.float64(0) < a1_new < self._c ): b = (b1_new + b2_new) / 2.0 b_old = self._b self._b = b # 4: update error value,here we only calculate those non-bound samples' # error self._unbound = [i for i in self._all_samples if self._is_unbound(i)] for s in self.unbound: if s in (i1, i2): continue self._error[s] += ( y1 * (a1_new - a1) * k(i1, s) + y2 * (a2_new - a2) * k(i2, s) + (self._b - b_old) ) # if i1 or i2 is non-bound,update there error value to zero if self._is_unbound(i1): self._error[i1] = 0 if self._is_unbound(i2): self._error[i2] = 0
machine_learning
def predict(self, test_samples, classify=True): if test_samples.shape[1] > self.samples.shape[1]: raise ValueError( "Test samples' feature length does not equal to that of train samples" ) if self._auto_norm: test_samples = self._norm(test_samples) results = [] for test_sample in test_samples: result = self._predict(test_sample) if classify: results.append(1 if result > 0 else -1) else: results.append(result) return np.array(results)
machine_learning
def _check_obey_kkt(self, index): alphas = self.alphas tol = self._tol r = self._e(index) * self.tags[index] c = self._c return (r < -tol and alphas[index] < c) or (r > tol and alphas[index] > 0.0)
machine_learning
def _k(self, i1, i2): # for test samples,use Kernel function if isinstance(i2, np.ndarray): return self.Kernel(self.samples[i1], i2) # for train samples,Kernel values have been saved in matrix else: return self._K_matrix[i1, i2]
machine_learning
def _e(self, index): # get from error data if self._is_unbound(index): return self._error[index] # get by g(xi) - yi else: gx = np.dot(self.alphas * self.tags, self._K_matrix[:, index]) + self._b yi = self.tags[index] return gx - yi
machine_learning
def _calculate_k_matrix(self): k_matrix = np.zeros([self.length, self.length]) for i in self._all_samples: for j in self._all_samples: k_matrix[i, j] = np.float64( self.Kernel(self.samples[i, :], self.samples[j, :]) ) return k_matrix
machine_learning
def _predict(self, sample): k = self._k predicted_value = ( np.sum( [ self.alphas[i1] * self.tags[i1] * k(i1, sample) for i1 in self._all_samples ] ) + self._b ) return predicted_value
machine_learning
def _choose_alphas(self): locis = yield from self._choose_a1() if not locis: return None return locis
machine_learning
def _choose_a1(self): while True: all_not_obey = True # all sample print("scanning all sample!") for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]: all_not_obey = False yield from self._choose_a2(i1) # non-bound sample print("scanning non-bound sample!") while True: not_obey = True for i1 in [ i for i in self._all_samples if self._check_obey_kkt(i) and self._is_unbound(i) ]: not_obey = False yield from self._choose_a2(i1) if not_obey: print("all non-bound samples fit the KKT condition!") break if all_not_obey: print("all samples fit the KKT condition! Optimization done!") break return False
machine_learning
def _choose_a2(self, i1): self._unbound = [i for i in self._all_samples if self._is_unbound(i)] if len(self.unbound) > 0: tmp_error = self._error.copy().tolist() tmp_error_dict = { index: value for index, value in enumerate(tmp_error) if self._is_unbound(index) } if self._e(i1) >= 0: i2 = min(tmp_error_dict, key=lambda index: tmp_error_dict[index]) else: i2 = max(tmp_error_dict, key=lambda index: tmp_error_dict[index]) cmd = yield i1, i2 if cmd is None: return for i2 in np.roll(self.unbound, np.random.choice(self.length)): cmd = yield i1, i2 if cmd is None: return for i2 in np.roll(self._all_samples, np.random.choice(self.length)): cmd = yield i1, i2 if cmd is None: return
machine_learning
def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): k = self._k if i1 == i2: return None, None # calculate L and H which bound the new alpha2 s = y1 * y2 if s == -1: l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) else: l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) if l == h: return None, None # calculate eta k11 = k(i1, i1) k22 = k(i2, i2) k12 = k(i1, i2) # select the new alpha2 which could get the minimal objectives if (eta := k11 + k22 - 2.0 * k12) > 0.0: a2_new_unc = a2 + (y2 * (e1 - e2)) / eta # a2_new has a boundary if a2_new_unc >= h: a2_new = h elif a2_new_unc <= l: a2_new = l else: a2_new = a2_new_unc else: b = self._b l1 = a1 + s * (a2 - l) h1 = a1 + s * (a2 - h) # way 1 f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2) f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2) ol = ( l1 * f1 + l * f2 + 1 / 2 * l1**2 * k(i1, i1) + 1 / 2 * l**2 * k(i2, i2) + s * l * l1 * k(i1, i2) ) oh = ( h1 * f1 + h * f2 + 1 / 2 * h1**2 * k(i1, i1) + 1 / 2 * h**2 * k(i2, i2) + s * h * h1 * k(i1, i2) ) if ol < (oh - self._eps): a2_new = l elif ol > oh + self._eps: a2_new = h else: a2_new = a2 # a1_new has a boundary too a1_new = a1 + s * (a2 - a2_new) if a1_new < 0: a2_new += s * a1_new a1_new = 0 if a1_new > self._c: a2_new += s * (a1_new - self._c) a1_new = self._c return a1_new, a2_new
machine_learning
def _norm(self, data): if self._init: self._min = np.min(data, axis=0) self._max = np.max(data, axis=0) self._init = False return (data - self._min) / (self._max - self._min) else: return (data - self._min) / (self._max - self._min)
machine_learning