INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Calculate TP,TN,FP,FN for each class. :param actual_vector: actual values :type actual_vector : list :param predict_vector: predict value :type predict_vector : list :param sample_weight : sample weights list :type sample_weight : list :return: [classes_list,table,TP,TN,FP,FN]
def matrix_params_calc(actual_vector, predict_vector, sample_weight): """ Calculate TP,TN,FP,FN for each class. :param actual_vector: actual values :type actual_vector : list :param predict_vector: predict value :type predict_vector : list :param sample_weight : sample weights list :type sample_weight : list :return: [classes_list,table,TP,TN,FP,FN] """ if isinstance(actual_vector, numpy.ndarray): actual_vector = actual_vector.tolist() if isinstance(predict_vector, numpy.ndarray): predict_vector = predict_vector.tolist() classes = set(actual_vector).union(set(predict_vector)) classes = sorted(classes) map_dict = {k: 0 for k in classes} table = {k: map_dict.copy() for k in classes} weight_vector = [1] * len(actual_vector) if isinstance(sample_weight, (list, numpy.ndarray)): if len(sample_weight) == len(actual_vector): weight_vector = sample_weight for index, item in enumerate(actual_vector): table[item][predict_vector[index]] += 1 * weight_vector[index] [classes, table, TP_dict, TN_dict, FP_dict, FN_dict] = matrix_params_from_table(table) return [classes, table, TP_dict, TN_dict, FP_dict, FN_dict]
Check if the dataset is imbalanced. :param P: condition positive :type P : dict :return: is_imbalanced as bool
def imbalance_check(P): """ Check if the dataset is imbalanced. :param P: condition positive :type P : dict :return: is_imbalanced as bool """ p_list = list(P.values()) max_value = max(p_list) min_value = min(p_list) if min_value > 0: balance_ratio = max_value / min_value else: balance_ratio = max_value is_imbalanced = False if balance_ratio > BALANCE_RATIO_THRESHOLD: is_imbalanced = True return is_imbalanced
Check if the problem is a binary classification. :param classes: all classes name :type classes : list :return: is_binary as bool
def binary_check(classes): """ Check if the problem is a binary classification. :param classes: all classes name :type classes : list :return: is_binary as bool """ num_classes = len(classes) is_binary = False if num_classes == 2: is_binary = True return is_binary
Return recommend parameters which are more suitable due to the input dataset characteristics. :param classes: all classes name :type classes : list :param P: condition positive :type P : dict :return: recommendation_list as list
def statistic_recommend(classes, P): """ Return recommend parameters which are more suitable due to the input dataset characteristics. :param classes: all classes name :type classes : list :param P: condition positive :type P : dict :return: recommendation_list as list """ if imbalance_check(P): return IMBALANCED_RECOMMEND if binary_check(classes): return BINARY_RECOMMEND return MULTICLASS_RECOMMEND
Print final result. :param failed: failed flag :type failed: bool :return: None
def print_result(failed=False): """ Print final result. :param failed: failed flag :type failed: bool :return: None """ message = "Version tag tests " if not failed: print("\n" + message + "passed!") else: print("\n" + message + "failed!") print("Passed : " + str(TEST_NUMBER - Failed) + "/" + str(TEST_NUMBER))
Calculate C (Pearson's C). :param chi_square: chi squared :type chi_square : float :param POP: population :type POP : int :return: C as float
def pearson_C_calc(chi_square, POP): """ Calculate C (Pearson's C). :param chi_square: chi squared :type chi_square : float :param POP: population :type POP : int :return: C as float """ try: C = math.sqrt(chi_square / (POP + chi_square)) return C except Exception: return "None"
Calculate AUNP. :param classes: classes :type classes : list :param P: condition positive :type P : dict :param POP: population :type POP : dict :param AUC_dict: AUC (Area under the ROC curve) for each class :type AUC_dict : dict :return: AUNP as float
def AUNP_calc(classes, P, POP, AUC_dict): """ Calculate AUNP. :param classes: classes :type classes : list :param P: condition positive :type P : dict :param POP: population :type POP : dict :param AUC_dict: AUC (Area under the ROC curve) for each class :type AUC_dict : dict :return: AUNP as float """ try: result = 0 for i in classes: result += (P[i] / POP[i]) * AUC_dict[i] return result except Exception: return "None"
Calculate CBA (Class balance accuracy). :param classes: classes :type classes : list :param table: input matrix :type table : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :return: CBA as float
def CBA_calc(classes, table, TOP, P): """ Calculate CBA (Class balance accuracy). :param classes: classes :type classes : list :param table: input matrix :type table : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :return: CBA as float """ try: result = 0 class_number = len(classes) for i in classes: result += ((table[i][i]) / (max(TOP[i], P[i]))) return result / class_number except Exception: return "None"
Calculate RR (Global performance index). :param classes: classes :type classes : list :param TOP: test outcome positive :type TOP : dict :return: RR as float
def RR_calc(classes, TOP): """ Calculate RR (Global performance index). :param classes: classes :type classes : list :param TOP: test outcome positive :type TOP : dict :return: RR as float """ try: class_number = len(classes) result = sum(list(TOP.values())) return result / class_number except Exception: return "None"
Calculate Overall_MCC. :param classes: classes :type classes : list :param table: input matrix :type table : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :return: Overall_MCC as float
def overall_MCC_calc(classes, table, TOP, P): """ Calculate Overall_MCC. :param classes: classes :type classes : list :param table: input matrix :type table : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :return: Overall_MCC as float """ try: cov_x_y = 0 cov_x_x = 0 cov_y_y = 0 matrix_sum = sum(list(TOP.values())) for i in classes: cov_x_x += TOP[i] * (matrix_sum - TOP[i]) cov_y_y += P[i] * (matrix_sum - P[i]) cov_x_y += (table[i][i] * matrix_sum - P[i] * TOP[i]) return cov_x_y / (math.sqrt(cov_y_y * cov_x_x)) except Exception: return "None"
Calculate Overall_CEN coefficient. :param classes: classes :type classes : list :param TP: true Positive Dict For All Classes :type TP : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param class_name: reviewed class name :type class_name : any valid type :param modified : modified mode flag :type modified : bool :return: coefficient as float
def convex_combination(classes, TP, TOP, P, class_name, modified=False): """ Calculate Overall_CEN coefficient. :param classes: classes :type classes : list :param TP: true Positive Dict For All Classes :type TP : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param class_name: reviewed class name :type class_name : any valid type :param modified : modified mode flag :type modified : bool :return: coefficient as float """ try: class_number = len(classes) alpha = 1 if class_number == 2: alpha = 0 matrix_sum = sum(list(TOP.values())) TP_sum = sum(list(TP.values())) up = TOP[class_name] + P[class_name] down = 2 * matrix_sum if modified: down -= (alpha * TP_sum) up -= TP[class_name] return up / down except Exception: return "None"
Calculate Overall_CEN (Overall confusion entropy). :param classes: classes :type classes : list :param TP: true positive dict for all classes :type TP : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param CEN_dict: CEN dictionary for each class :type CEN_dict : dict :param modified : modified mode flag :type modified : bool :return: Overall_CEN(MCEN) as float
def overall_CEN_calc(classes, TP, TOP, P, CEN_dict, modified=False): """ Calculate Overall_CEN (Overall confusion entropy). :param classes: classes :type classes : list :param TP: true positive dict for all classes :type TP : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param CEN_dict: CEN dictionary for each class :type CEN_dict : dict :param modified : modified mode flag :type modified : bool :return: Overall_CEN(MCEN) as float """ try: result = 0 for i in classes: result += (convex_combination(classes, TP, TOP, P, i, modified) * CEN_dict[i]) return result except Exception: return "None"
Calculate n choose r. :param n: n :type n : int :param r: r :type r :int :return: n choose r as int
def ncr(n, r): """ Calculate n choose r. :param n: n :type n : int :param r: r :type r :int :return: n choose r as int """ r = min(r, n - r) numer = reduce(op.mul, range(n, n - r, -1), 1) denom = reduce(op.mul, range(1, r + 1), 1) return numer // denom
Calculate p_value. :param TP: true positive :type TP : dict :param POP: population :type POP : int :param NIR: no information rate :type NIR : float :return: p_value as float
def p_value_calc(TP, POP, NIR): """ Calculate p_value. :param TP: true positive :type TP : dict :param POP: population :type POP : int :param NIR: no information rate :type NIR : float :return: p_value as float """ try: n = POP x = sum(list(TP.values())) p = NIR result = 0 for j in range(x): result += ncr(n, j) * (p ** j) * ((1 - p) ** (n - j)) return 1 - result except Exception: return "None"
Calculate NIR (No information rate). :param P: condition positive :type P : dict :param POP: population :type POP : int :return: NIR as float
def NIR_calc(P, POP): """ Calculate NIR (No information rate). :param P: condition positive :type P : dict :param POP: population :type POP : int :return: NIR as float """ try: max_P = max(list(P.values())) length = POP return max_P / length except Exception: return "None"
Calculate hamming loss. :param TP: true positive :type TP : dict :param POP: population :type POP : int :return: hamming loss as float
def hamming_calc(TP, POP): """ Calculate hamming loss. :param TP: true positive :type TP : dict :param POP: population :type POP : int :return: hamming loss as float """ try: length = POP return (1 / length) * (length - sum(TP.values())) except Exception: return "None"
Calculate zero-one loss. :param TP: true Positive :type TP : dict :param POP: population :type POP : int :return: zero_one loss as integer
def zero_one_loss_calc(TP, POP): """ Calculate zero-one loss. :param TP: true Positive :type TP : dict :param POP: population :type POP : int :return: zero_one loss as integer """ try: length = POP return (length - sum(TP.values())) except Exception: return "None"
Calculate reference and response likelihood. :param item : TOP or P :type item : dict :param POP: population :type POP : dict :return: reference or response likelihood as float
def entropy_calc(item, POP): """ Calculate reference and response likelihood. :param item : TOP or P :type item : dict :param POP: population :type POP : dict :return: reference or response likelihood as float """ try: result = 0 for i in item.keys(): likelihood = item[i] / POP[i] if likelihood != 0: result += likelihood * math.log(likelihood, 2) return -result except Exception: return "None"
Calculate cross entropy. :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param POP: population :type POP : dict :return: cross entropy as float
def cross_entropy_calc(TOP, P, POP): """ Calculate cross entropy. :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param POP: population :type POP : dict :return: cross entropy as float """ try: result = 0 for i in TOP.keys(): reference_likelihood = P[i] / POP[i] response_likelihood = TOP[i] / POP[i] if response_likelihood != 0 and reference_likelihood != 0: result += reference_likelihood * \ math.log(response_likelihood, 2) return -result except Exception: return "None"
Calculate joint entropy. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param POP: population :type POP : dict :return: joint entropy as float
def joint_entropy_calc(classes, table, POP): """ Calculate joint entropy. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param POP: population :type POP : dict :return: joint entropy as float """ try: result = 0 for i in classes: for index, j in enumerate(classes): p_prime = table[i][j] / POP[i] if p_prime != 0: result += p_prime * math.log(p_prime, 2) return -result except Exception: return "None"
Calculate conditional entropy. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param P: condition positive :type P : dict :param POP: population :type POP : dict :return: conditional entropy as float
def conditional_entropy_calc(classes, table, P, POP): """ Calculate conditional entropy. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param P: condition positive :type P : dict :param POP: population :type POP : dict :return: conditional entropy as float """ try: result = 0 for i in classes: temp = 0 for index, j in enumerate(classes): p_prime = 0 if P[i] != 0: p_prime = table[i][j] / P[i] if p_prime != 0: temp += p_prime * math.log(p_prime, 2) result += temp * (P[i] / POP[i]) return -result except Exception: return "None"
Calculate Goodman and Kruskal's lambda B. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param TOP: test outcome positive :type TOP : dict :param POP: population :type POP : int :return: Goodman and Kruskal's lambda B as float
def lambda_B_calc(classes, table, TOP, POP): """ Calculate Goodman and Kruskal's lambda B. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param TOP: test outcome positive :type TOP : dict :param POP: population :type POP : int :return: Goodman and Kruskal's lambda B as float """ try: result = 0 length = POP maxresponse = max(list(TOP.values())) for i in classes: result += max(list(table[i].values())) result = (result - maxresponse) / (length - maxresponse) return result except Exception: return "None"
Calculate Goodman and Kruskal's lambda A. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param P: condition positive :type P : dict :param POP: population :type POP : int :return: Goodman and Kruskal's lambda A as float
def lambda_A_calc(classes, table, P, POP): """ Calculate Goodman and Kruskal's lambda A. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param P: condition positive :type P : dict :param POP: population :type POP : int :return: Goodman and Kruskal's lambda A as float """ try: result = 0 maxreference = max(list(P.values())) length = POP for i in classes: col = [] for col_item in table.values(): col.append(col_item[i]) result += max(col) result = (result - maxreference) / (length - maxreference) return result except Exception: return "None"
Calculate chi-squared. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param POP: population :type POP : dict :return: chi-squared as float
def chi_square_calc(classes, table, TOP, P, POP): """ Calculate chi-squared. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param POP: population :type POP : dict :return: chi-squared as float """ try: result = 0 for i in classes: for index, j in enumerate(classes): expected = (TOP[j] * P[i]) / (POP[i]) result += ((table[i][j] - expected)**2) / expected return result except Exception: return "None"
Calculate kappa standard error. :param PA: observed agreement among raters (overall accuracy) :type PA : float :param PE: hypothetical probability of chance agreement (random accuracy) :type PE : float :param POP: population :type POP:int :return: kappa standard error as float
def kappa_se_calc(PA, PE, POP): """ Calculate kappa standard error. :param PA: observed agreement among raters (overall accuracy) :type PA : float :param PE: hypothetical probability of chance agreement (random accuracy) :type PE : float :param POP: population :type POP:int :return: kappa standard error as float """ try: result = math.sqrt((PA * (1 - PA)) / (POP * ((1 - PE)**2))) return result except Exception: return "None"
Calculate confidence interval. :param mean: mean of data :type mean : float :param SE: standard error of data :type SE : float :param CV: critical value :type CV:float :return: confidence interval as tuple
def CI_calc(mean, SE, CV=1.96): """ Calculate confidence interval. :param mean: mean of data :type mean : float :param SE: standard error of data :type SE : float :param CV: critical value :type CV:float :return: confidence interval as tuple """ try: CI_down = mean - CV * SE CI_up = mean + CV * SE return (CI_down, CI_up) except Exception: return ("None", "None")
Calculate PPV_Micro and TPR_Micro. :param TP: true positive :type TP:dict :param item: FN or FP :type item : dict :return: PPV_Micro or TPR_Micro as float
def micro_calc(TP, item): """ Calculate PPV_Micro and TPR_Micro. :param TP: true positive :type TP:dict :param item: FN or FP :type item : dict :return: PPV_Micro or TPR_Micro as float """ try: TP_sum = sum(TP.values()) item_sum = sum(item.values()) return TP_sum / (TP_sum + item_sum) except Exception: return "None"
Calculate PPV_Macro and TPR_Macro. :param item: PPV or TPR :type item:dict :return: PPV_Macro or TPR_Macro as float
def macro_calc(item): """ Calculate PPV_Macro and TPR_Macro. :param item: PPV or TPR :type item:dict :return: PPV_Macro or TPR_Macro as float """ try: item_sum = sum(item.values()) item_len = len(item.values()) return item_sum / item_len except Exception: return "None"
Calculate percent chance agreement for Scott's Pi. :param P: condition positive :type P : dict :param TOP: test outcome positive :type TOP : dict :param POP: population :type POP:dict :return: percent chance agreement as float
def PC_PI_calc(P, TOP, POP): """ Calculate percent chance agreement for Scott's Pi. :param P: condition positive :type P : dict :param TOP: test outcome positive :type TOP : dict :param POP: population :type POP:dict :return: percent chance agreement as float """ try: result = 0 for i in P.keys(): result += ((P[i] + TOP[i]) / (2 * POP[i]))**2 return result except Exception: return "None"
Calculate percent chance agreement for Gwet's AC1. :param P: condition positive :type P : dict :param TOP: test outcome positive :type TOP : dict :param POP: population :type POP:dict :return: percent chance agreement as float
def PC_AC1_calc(P, TOP, POP): """ Calculate percent chance agreement for Gwet's AC1. :param P: condition positive :type P : dict :param TOP: test outcome positive :type TOP : dict :param POP: population :type POP:dict :return: percent chance agreement as float """ try: result = 0 classes = list(P.keys()) for i in classes: pi = ((P[i] + TOP[i]) / (2 * POP[i])) result += pi * (1 - pi) result = result / (len(classes) - 1) return result except Exception: return "None"
Calculate overall jaccard index. :param jaccard_list : list of jaccard index for each class :type jaccard_list : list :return: (jaccard_sum , jaccard_mean) as tuple
def overall_jaccard_index_calc(jaccard_list): """ Calculate overall jaccard index. :param jaccard_list : list of jaccard index for each class :type jaccard_list : list :return: (jaccard_sum , jaccard_mean) as tuple """ try: jaccard_sum = sum(jaccard_list) jaccard_mean = jaccard_sum / len(jaccard_list) return (jaccard_sum, jaccard_mean) except Exception: return "None"
Calculate overall accuracy. :param TP: true positive :type TP : dict :param POP: population :type POP:int :return: overall_accuracy as float
def overall_accuracy_calc(TP, POP): """ Calculate overall accuracy. :param TP: true positive :type TP : dict :param POP: population :type POP:int :return: overall_accuracy as float """ try: overall_accuracy = sum(TP.values()) / POP return overall_accuracy except Exception: return "None"
Return overall statistics. :param RACC: random accuracy :type RACC : dict :param TPR: sensitivity, recall, hit rate, or true positive rate :type TPR : dict :param PPV: precision or positive predictive value :type PPV : dict :param TP: true positive :type TP : dict :param FN: false negative :type FN : dict :param FP: false positive :type FP: dict :param POP: population :type POP:dict :param P: condition positive :type P : dict :param POP: population :type POP:dict :param TOP: test outcome positive :type TOP : dict :param jaccard_list : list of jaccard index for each class :type jaccard_list : list :param CEN_dict: CEN dictionary for each class :type CEN_dict : dict :param classes: confusion matrix classes :type classes : list :param table: input matrix :type table : dict :return: overall statistics as dict
def overall_statistics( RACC, RACCU, TPR, PPV, TP, FN, FP, POP, P, TOP, jaccard_list, CEN_dict, MCEN_dict, AUC_dict, classes, table): """ Return overall statistics. :param RACC: random accuracy :type RACC : dict :param TPR: sensitivity, recall, hit rate, or true positive rate :type TPR : dict :param PPV: precision or positive predictive value :type PPV : dict :param TP: true positive :type TP : dict :param FN: false negative :type FN : dict :param FP: false positive :type FP: dict :param POP: population :type POP:dict :param P: condition positive :type P : dict :param POP: population :type POP:dict :param TOP: test outcome positive :type TOP : dict :param jaccard_list : list of jaccard index for each class :type jaccard_list : list :param CEN_dict: CEN dictionary for each class :type CEN_dict : dict :param classes: confusion matrix classes :type classes : list :param table: input matrix :type table : dict :return: overall statistics as dict """ population = list(POP.values())[0] overall_accuracy = overall_accuracy_calc(TP, population) overall_random_accuracy_unbiased = overall_random_accuracy_calc(RACCU) overall_random_accuracy = overall_random_accuracy_calc(RACC) overall_kappa = reliability_calc(overall_random_accuracy, overall_accuracy) PC_PI = PC_PI_calc(P, TOP, POP) PC_AC1 = PC_AC1_calc(P, TOP, POP) PC_S = PC_S_calc(classes) PI = reliability_calc(PC_PI, overall_accuracy) AC1 = reliability_calc(PC_AC1, overall_accuracy) S = reliability_calc(PC_S, overall_accuracy) kappa_SE = kappa_se_calc( overall_accuracy, overall_random_accuracy, population) kappa_unbiased = reliability_calc( overall_random_accuracy_unbiased, overall_accuracy) kappa_no_prevalence = kappa_no_prevalence_calc(overall_accuracy) kappa_CI = CI_calc(overall_kappa, kappa_SE) overall_accuracy_se = se_calc(overall_accuracy, population) overall_accuracy_CI = CI_calc(overall_accuracy, overall_accuracy_se) chi_squared = chi_square_calc(classes, table, TOP, P, POP) phi_squared = phi_square_calc(chi_squared, population) cramer_V = cramers_V_calc(phi_squared, classes) response_entropy = entropy_calc(TOP, POP) reference_entropy = entropy_calc(P, POP) cross_entropy = cross_entropy_calc(TOP, P, POP) join_entropy = joint_entropy_calc(classes, table, POP) conditional_entropy = conditional_entropy_calc(classes, table, P, POP) mutual_information = mutual_information_calc( response_entropy, conditional_entropy) kl_divergence = kl_divergence_calc(P, TOP, POP) lambda_B = lambda_B_calc(classes, table, TOP, population) lambda_A = lambda_A_calc(classes, table, P, population) DF = DF_calc(classes) overall_jaccard_index = overall_jaccard_index_calc(list( jaccard_list.values())) hamming_loss = hamming_calc(TP, population) zero_one_loss = zero_one_loss_calc(TP, population) NIR = NIR_calc(P, population) p_value = p_value_calc(TP, population, NIR) overall_CEN = overall_CEN_calc(classes, TP, TOP, P, CEN_dict) overall_MCEN = overall_CEN_calc(classes, TP, TOP, P, MCEN_dict, True) overall_MCC = overall_MCC_calc(classes, table, TOP, P) RR = RR_calc(classes, TOP) CBA = CBA_calc(classes, table, TOP, P) AUNU = macro_calc(AUC_dict) AUNP = AUNP_calc(classes, P, POP, AUC_dict) RCI = RCI_calc(mutual_information, reference_entropy) C = pearson_C_calc(chi_squared, population) return { "Overall ACC": overall_accuracy, "Kappa": overall_kappa, "Overall RACC": overall_random_accuracy, "SOA1(Landis & Koch)": kappa_analysis_koch(overall_kappa), "SOA2(Fleiss)": kappa_analysis_fleiss(overall_kappa), "SOA3(Altman)": kappa_analysis_altman(overall_kappa), "SOA4(Cicchetti)": kappa_analysis_cicchetti(overall_kappa), "TPR Macro": macro_calc(TPR), "PPV Macro": macro_calc(PPV), "TPR Micro": micro_calc( TP=TP, item=FN), "PPV Micro": micro_calc( TP=TP, item=FP), "Scott PI": PI, "Gwet AC1": AC1, "Bennett S": S, "Kappa Standard Error": kappa_SE, "Kappa 95% CI": kappa_CI, "Chi-Squared": chi_squared, "Phi-Squared": phi_squared, "Cramer V": cramer_V, "Chi-Squared DF": DF, "95% CI": overall_accuracy_CI, "Standard Error": overall_accuracy_se, "Response Entropy": response_entropy, "Reference Entropy": reference_entropy, "Cross Entropy": cross_entropy, "Joint Entropy": join_entropy, "Conditional Entropy": conditional_entropy, "KL Divergence": kl_divergence, "Lambda B": lambda_B, "Lambda A": lambda_A, "Kappa Unbiased": kappa_unbiased, "Overall RACCU": overall_random_accuracy_unbiased, "Kappa No Prevalence": kappa_no_prevalence, "Mutual Information": mutual_information, "Overall J": overall_jaccard_index, "Hamming Loss": hamming_loss, "Zero-one Loss": zero_one_loss, "NIR": NIR, "P-Value": p_value, "Overall CEN": overall_CEN, "Overall MCEN": overall_MCEN, "Overall MCC": overall_MCC, "RR": RR, "CBA": CBA, "AUNU": AUNU, "AUNP": AUNP, "RCI": RCI, "Pearson C": C}
Analysis AUC with interpretation table. :param AUC: area under the ROC curve :type AUC : float :return: interpretation result as str
def AUC_analysis(AUC): """ Analysis AUC with interpretation table. :param AUC: area under the ROC curve :type AUC : float :return: interpretation result as str """ try: if AUC == "None": return "None" if AUC < 0.6: return "Poor" if AUC >= 0.6 and AUC < 0.7: return "Fair" if AUC >= 0.7 and AUC < 0.8: return "Good" if AUC >= 0.8 and AUC < 0.9: return "Very Good" return "Excellent" except Exception: # pragma: no cover return "None"
Analysis kappa number with Cicchetti benchmark. :param kappa: kappa number :type kappa : float :return: strength of agreement as str
def kappa_analysis_cicchetti(kappa): """ Analysis kappa number with Cicchetti benchmark. :param kappa: kappa number :type kappa : float :return: strength of agreement as str """ try: if kappa < 0.4: return "Poor" if kappa >= 0.4 and kappa < 0.59: return "Fair" if kappa >= 0.59 and kappa < 0.74: return "Good" if kappa >= 0.74 and kappa <= 1: return "Excellent" return "None" except Exception: # pragma: no cover return "None"
Analysis kappa number with Landis-Koch benchmark. :param kappa: kappa number :type kappa : float :return: strength of agreement as str
def kappa_analysis_koch(kappa): """ Analysis kappa number with Landis-Koch benchmark. :param kappa: kappa number :type kappa : float :return: strength of agreement as str """ try: if kappa < 0: return "Poor" if kappa >= 0 and kappa < 0.2: return "Slight" if kappa >= 0.20 and kappa < 0.4: return "Fair" if kappa >= 0.40 and kappa < 0.6: return "Moderate" if kappa >= 0.60 and kappa < 0.8: return "Substantial" if kappa >= 0.80 and kappa <= 1: return "Almost Perfect" return "None" except Exception: # pragma: no cover return "None"
Analysis kappa number with Altman benchmark. :param kappa: kappa number :type kappa : float :return: strength of agreement as str
def kappa_analysis_altman(kappa): """ Analysis kappa number with Altman benchmark. :param kappa: kappa number :type kappa : float :return: strength of agreement as str """ try: if kappa < 0.2: return "Poor" if kappa >= 0.20 and kappa < 0.4: return "Fair" if kappa >= 0.40 and kappa < 0.6: return "Moderate" if kappa >= 0.60 and kappa < 0.8: return "Good" if kappa >= 0.80 and kappa <= 1: return "Very Good" return "None" except Exception: # pragma: no cover return "None"
Read requirements.txt.
def get_requires(): """Read requirements.txt.""" requirements = open("requirements.txt", "r").read() return list(filter(lambda x: x != "", requirements.split()))
Read README.md and CHANGELOG.md.
def read_description(): """Read README.md and CHANGELOG.md.""" try: with open("README.md") as r: description = "\n" description += r.read() with open("CHANGELOG.md") as c: description += "\n" description += c.read() return description except Exception: return ''' PyCM is a multi-class confusion matrix library written in Python that supports both input data vectors and direct matrix, and a proper tool for post-classification model evaluation that supports most classes and overall statistics parameters. PyCM is the swiss-army knife of confusion matrices, targeted mainly at data scientists that need a broad array of metrics for predictive models and an accurate evaluation of large variety of classifiers.'''
Print confusion matrix. :param one_vs_all : One-Vs-All mode flag :type one_vs_all : bool :param class_name : target class name for One-Vs-All mode :type class_name : any valid type :return: None
def print_matrix(self, one_vs_all=False, class_name=None): """ Print confusion matrix. :param one_vs_all : One-Vs-All mode flag :type one_vs_all : bool :param class_name : target class name for One-Vs-All mode :type class_name : any valid type :return: None """ classes = self.classes table = self.table if one_vs_all: [classes, table] = one_vs_all_func( classes, table, self.TP, self.TN, self.FP, self.FN, class_name) print(table_print(classes, table))
Print statistical measures table. :param overall_param : overall parameters list for print, Example : ["Kappa","Scott PI] :type overall_param : list :param class_param : class parameters list for print, Example : ["TPR","TNR","AUC"] :type class_param : list :param class_name : class name (sub set of classes), Example :[1,2,3] :type class_name : list :return: None
def stat(self, overall_param=None, class_param=None, class_name=None): """ Print statistical measures table. :param overall_param : overall parameters list for print, Example : ["Kappa","Scott PI] :type overall_param : list :param class_param : class parameters list for print, Example : ["TPR","TNR","AUC"] :type class_param : list :param class_name : class name (sub set of classes), Example :[1,2,3] :type class_name : list :return: None """ classes = class_filter(self.classes, class_name) print( stat_print( classes, self.class_stat, self.overall_stat, self.digit, overall_param, class_param))
Save ConfusionMatrix in .pycm (flat file format). :param name: filename :type name : str :param address: flag for address return :type address : bool :param overall_param : overall parameters list for save, Example : ["Kappa","Scott PI] :type overall_param : list :param class_param : class parameters list for save, Example : ["TPR","TNR","AUC"] :type class_param : list :param class_name : class name (sub set of classes), Example :[1,2,3] :type class_name : list :return: saving Status as dict {"Status":bool , "Message":str}
def save_stat( self, name, address=True, overall_param=None, class_param=None, class_name=None): """ Save ConfusionMatrix in .pycm (flat file format). :param name: filename :type name : str :param address: flag for address return :type address : bool :param overall_param : overall parameters list for save, Example : ["Kappa","Scott PI] :type overall_param : list :param class_param : class parameters list for save, Example : ["TPR","TNR","AUC"] :type class_param : list :param class_name : class name (sub set of classes), Example :[1,2,3] :type class_name : list :return: saving Status as dict {"Status":bool , "Message":str} """ try: message = None file = open(name + ".pycm", "w") matrix = "Matrix : \n\n" + table_print(self.classes, self.table) + "\n\n" normalized_matrix = "Normalized Matrix : \n\n" + \ table_print(self.classes, self.normalized_table) + "\n\n" one_vs_all = "\nOne-Vs-All : \n\n" for c in self.classes: one_vs_all += str(c) + "-Vs-All : \n\n" [classes, table] = one_vs_all_func(self.classes, self.table, self.TP, self.TN, self.FP, self.FN, c) one_vs_all += table_print(classes, table) + "\n\n" classes = class_filter(self.classes, class_name) stat = stat_print( classes, self.class_stat, self.overall_stat, self.digit, overall_param, class_param) file.write(matrix + normalized_matrix + stat + one_vs_all) file.close() if address: message = os.path.join(os.getcwd(), name + ".pycm") return {"Status": True, "Message": message} except Exception as e: return {"Status": False, "Message": str(e)}
Save ConfusionMatrix in HTML file. :param name: filename :type name : str :param address: flag for address return :type address : bool :param overall_param : overall parameters list for save, Example : ["Kappa","Scott PI] :type overall_param : list :param class_param : class parameters list for save, Example : ["TPR","TNR","AUC"] :type class_param : list :param class_name : class name (sub set of classes), Example :[1,2,3] :type class_name : list :param color : matrix color (R,G,B) :type color : tuple :param normalize : save normalize matrix flag :type normalize : bool :return: saving Status as dict {"Status":bool , "Message":str}
def save_html( self, name, address=True, overall_param=None, class_param=None, class_name=None, color=(0, 0, 0), normalize=False): """ Save ConfusionMatrix in HTML file. :param name: filename :type name : str :param address: flag for address return :type address : bool :param overall_param : overall parameters list for save, Example : ["Kappa","Scott PI] :type overall_param : list :param class_param : class parameters list for save, Example : ["TPR","TNR","AUC"] :type class_param : list :param class_name : class name (sub set of classes), Example :[1,2,3] :type class_name : list :param color : matrix color (R,G,B) :type color : tuple :param normalize : save normalize matrix flag :type normalize : bool :return: saving Status as dict {"Status":bool , "Message":str} """ try: message = None table = self.table if normalize: table = self.normalized_table html_file = open(name + ".html", "w") html_file.write(html_init(name)) html_file.write(html_dataset_type(self.binary, self.imbalance)) html_file.write(html_table(self.classes, table, color, normalize)) html_file.write( html_overall_stat( self.overall_stat, self.digit, overall_param, self.recommended_list)) class_stat_classes = class_filter(self.classes, class_name) html_file.write( html_class_stat( class_stat_classes, self.class_stat, self.digit, class_param, self.recommended_list)) html_file.write(html_end(VERSION)) html_file.close() if address: message = os.path.join(os.getcwd(), name + ".html") return {"Status": True, "Message": message} except Exception as e: return {"Status": False, "Message": str(e)}
Save ConfusionMatrix in CSV file. :param name: filename :type name : str :param address: flag for address return :type address : bool :param class_param : class parameters list for save, Example : ["TPR","TNR","AUC"] :type class_param : list :param class_name : class name (sub set of classes), Example :[1,2,3] :type class_name : list :param matrix_save : save matrix flag :type matrix_save : bool :param normalize : save normalize matrix flag :type normalize : bool :return: saving Status as dict {"Status":bool , "Message":str}
def save_csv( self, name, address=True, class_param=None, class_name=None, matrix_save=True, normalize=False): """ Save ConfusionMatrix in CSV file. :param name: filename :type name : str :param address: flag for address return :type address : bool :param class_param : class parameters list for save, Example : ["TPR","TNR","AUC"] :type class_param : list :param class_name : class name (sub set of classes), Example :[1,2,3] :type class_name : list :param matrix_save : save matrix flag :type matrix_save : bool :param normalize : save normalize matrix flag :type normalize : bool :return: saving Status as dict {"Status":bool , "Message":str} """ try: message = None classes = class_filter(self.classes, class_name) csv_file = open(name + ".csv", "w") csv_data = csv_print( classes, self.class_stat, self.digit, class_param) csv_file.write(csv_data) if matrix_save: matrix = self.table if normalize: matrix = self.normalized_table csv_matrix_file = open(name + "_matrix" + ".csv", "w") csv_matrix_data = csv_matrix_print(self.classes, matrix) csv_matrix_file.write(csv_matrix_data) if address: message = os.path.join(os.getcwd(), name + ".csv") return {"Status": True, "Message": message} except Exception as e: return {"Status": False, "Message": str(e)}
Save ConfusionMatrix in .obj file. :param name: filename :type name : str :param address: flag for address return :type address : bool :return: saving Status as dict {"Status":bool , "Message":str}
def save_obj(self, name, address=True): """ Save ConfusionMatrix in .obj file. :param name: filename :type name : str :param address: flag for address return :type address : bool :return: saving Status as dict {"Status":bool , "Message":str} """ try: message = None obj_file = open(name + ".obj", "w") actual_vector_temp = self.actual_vector predict_vector_temp = self.predict_vector matrix_temp = {k: self.table[k].copy() for k in self.classes} matrix_items = [] for i in self.classes: matrix_items.append((i, list(matrix_temp[i].items()))) if isinstance(actual_vector_temp, numpy.ndarray): actual_vector_temp = actual_vector_temp.tolist() if isinstance(predict_vector_temp, numpy.ndarray): predict_vector_temp = predict_vector_temp.tolist() json.dump({"Actual-Vector": actual_vector_temp, "Predict-Vector": predict_vector_temp, "Matrix": matrix_items, "Digit": self.digit, "Sample-Weight": self.weights, "Transpose": self.transpose}, obj_file) if address: message = os.path.join(os.getcwd(), name + ".obj") return {"Status": True, "Message": message} except Exception as e: return {"Status": False, "Message": str(e)}
Calculate FBeta score. :param beta: beta parameter :type beta : float :return: FBeta score for classes as dict
def F_beta(self, beta): """ Calculate FBeta score. :param beta: beta parameter :type beta : float :return: FBeta score for classes as dict """ try: F_dict = {} for i in self.TP.keys(): F_dict[i] = F_calc( TP=self.TP[i], FP=self.FP[i], FN=self.FN[i], beta=beta) return F_dict except Exception: return {}
Calculate IBA_alpha score. :param alpha: alpha parameter :type alpha: float :return: IBA_alpha score for classes as dict
def IBA_alpha(self, alpha): """ Calculate IBA_alpha score. :param alpha: alpha parameter :type alpha: float :return: IBA_alpha score for classes as dict """ try: IBA_dict = {} for i in self.classes: IBA_dict[i] = IBA_calc(self.TPR[i], self.TNR[i], alpha=alpha) return IBA_dict except Exception: return {}
Rename ConfusionMatrix classes. :param mapping: mapping dictionary :type mapping : dict :return: None
def relabel(self, mapping): """ Rename ConfusionMatrix classes. :param mapping: mapping dictionary :type mapping : dict :return: None """ if not isinstance(mapping, dict): raise pycmMatrixError(MAPPING_FORMAT_ERROR) if self.classes != list(mapping.keys()): raise pycmMatrixError(MAPPING_CLASS_NAME_ERROR) for row in self.classes: temp_dict = {} temp_dict_normalized = {} for col in self.classes: temp_dict[mapping[col]] = self.table[row][col] temp_dict_normalized[mapping[col] ] = self.normalized_table[row][col] del self.table[row] self.table[mapping[row]] = temp_dict del self.normalized_table[row] self.normalized_table[mapping[row]] = temp_dict_normalized self.matrix = self.table self.normalized_matrix = self.normalized_table for param in self.class_stat.keys(): temp_dict = {} for classname in self.classes: temp_dict[mapping[classname] ] = self.class_stat[param][classname] self.class_stat[param] = temp_dict self.classes = list(mapping.values()) self.TP = self.class_stat["TP"] self.TN = self.class_stat["TN"] self.FP = self.class_stat["FP"] self.FN = self.class_stat["FN"] __class_stat_init__(self)
*Sets the client cert for the requests.* The cert is either a path to a .pem file, or a JSON array, or a list having the cert path and the key path. Values ``null`` and ``${None}`` can be used for clearing the cert. *Examples* | `Set Client Cert` | ${CURDIR}/client.pem | | `Set Client Cert` | ["${CURDIR}/client.cert", "${CURDIR}/client.key"] | | `Set Client Cert` | ${paths_list} |
def set_client_cert(self, cert): """*Sets the client cert for the requests.* The cert is either a path to a .pem file, or a JSON array, or a list having the cert path and the key path. Values ``null`` and ``${None}`` can be used for clearing the cert. *Examples* | `Set Client Cert` | ${CURDIR}/client.pem | | `Set Client Cert` | ["${CURDIR}/client.cert", "${CURDIR}/client.key"] | | `Set Client Cert` | ${paths_list} | """ self.request["cert"] = self._input_client_cert(cert) return self.request["cert"]
*Sets new request headers or updates the existing.* ``headers``: The headers to add or update as a JSON object or a dictionary. *Examples* | `Set Headers` | { "authorization": "Basic QWxhZGRpbjpPcGVuU2VzYW1"} | | `Set Headers` | { "Accept-Encoding": "identity"} | | `Set Headers` | ${auth_dict} |
def set_headers(self, headers): """*Sets new request headers or updates the existing.* ``headers``: The headers to add or update as a JSON object or a dictionary. *Examples* | `Set Headers` | { "authorization": "Basic QWxhZGRpbjpPcGVuU2VzYW1"} | | `Set Headers` | { "Accept-Encoding": "identity"} | | `Set Headers` | ${auth_dict} | """ self.request["headers"].update(self._input_object(headers)) return self.request["headers"]
*Sets the schema to validate the request properties* Expectations are effective for following requests in the test suite, or until they are reset or updated by using expectation keywords again. On the test suite level (suite setup), they are best used for expecting the endpoint wide properties that are common regardless of the tested HTTP method, and on the test case level (test setup) to merge in the HTTP method specific properties. `Expect Request` is intented to be used with tests that have some of the request properties, e.g. body or query parameters, randomized ("fuzzing") for validating that the sent values are within the expected scope. If the keyword is used, following HTTP keywords will fail when their request properties are not valid against the expected schema. If the keyword is not used, a new schema is generated for each following request for its ``body`` and ``query`` properties. Use `Output Schema` to output it and use it as an input to this keyword. *Options* ``merge``: Merges the new schema with the current instead of replacing it *Examples* | `Expect Request` | ${CURDIR}/valid_payload.json | | # See `Output Schema` | | `Expect Request` | { "body": { "required": ["id"] } } | merge=true |
def expect_request(self, schema, merge=False): """*Sets the schema to validate the request properties* Expectations are effective for following requests in the test suite, or until they are reset or updated by using expectation keywords again. On the test suite level (suite setup), they are best used for expecting the endpoint wide properties that are common regardless of the tested HTTP method, and on the test case level (test setup) to merge in the HTTP method specific properties. `Expect Request` is intented to be used with tests that have some of the request properties, e.g. body or query parameters, randomized ("fuzzing") for validating that the sent values are within the expected scope. If the keyword is used, following HTTP keywords will fail when their request properties are not valid against the expected schema. If the keyword is not used, a new schema is generated for each following request for its ``body`` and ``query`` properties. Use `Output Schema` to output it and use it as an input to this keyword. *Options* ``merge``: Merges the new schema with the current instead of replacing it *Examples* | `Expect Request` | ${CURDIR}/valid_payload.json | | # See `Output Schema` | | `Expect Request` | { "body": { "required": ["id"] } } | merge=true | """ schema = self._input_object(schema) if "properties" not in schema: schema = {"properties": schema} if self._input_boolean(merge): new_schema = SchemaBuilder(schema_uri=False) new_schema.add_schema(self.schema["properties"]["request"]) new_schema.add_schema(schema) self.schema["properties"]["request"] = new_schema.to_schema() else: self.schema["properties"]["request"] = schema return self.schema["properties"]["request"]
*Updates the schema to validate the response body properties.* Expectations are effective for following requests in the test suite, or until they are reset or updated by using expectation keywords again. On the test suite level (suite setup), they are best used for expecting the endpoint wide properties that are common regardless of the tested HTTP method, and on the test case level (test setup) to merge in the HTTP method specific properties. `Expect Response Body` is intented to be used on the test case level, to validate that the response body has the expected properties for the particular HTTP method. Note that if something about response body has been already expected with `Expected Response`, using this keyword updates the expectations in terms of given response body properties. If the keyword is used, following HTTP keywords will fail if their response body is not valid against the expected schema. If the keyword is not used, and no schema is already expected with `Expect Response` for response ``body``, a new schema is inferred for it. Use `Output Schema` to output it and use it as an input to this keyword. *Tips* Regardless whether the HTTP method returns one (an object) or many (an array of objects), the validation of the object property types and features can usually be done to some extent on the test suite level with `Expect Response`, then extended on the test case level using this keyword. This helps in ensuring that the data model is unified between the different HTTP methods. *Examples* | `Expect Response Body` | ${CURDIR}/user_properties.json | # See `Output Schema` | | `Expect Response Body` | { "required": ["id", "token"] } | # Only these are required from this method | | `Expect Response Body` | { "additionalProperties": false } | # Nothing extra should be responded by this method |
def expect_response_body(self, schema): """*Updates the schema to validate the response body properties.* Expectations are effective for following requests in the test suite, or until they are reset or updated by using expectation keywords again. On the test suite level (suite setup), they are best used for expecting the endpoint wide properties that are common regardless of the tested HTTP method, and on the test case level (test setup) to merge in the HTTP method specific properties. `Expect Response Body` is intented to be used on the test case level, to validate that the response body has the expected properties for the particular HTTP method. Note that if something about response body has been already expected with `Expected Response`, using this keyword updates the expectations in terms of given response body properties. If the keyword is used, following HTTP keywords will fail if their response body is not valid against the expected schema. If the keyword is not used, and no schema is already expected with `Expect Response` for response ``body``, a new schema is inferred for it. Use `Output Schema` to output it and use it as an input to this keyword. *Tips* Regardless whether the HTTP method returns one (an object) or many (an array of objects), the validation of the object property types and features can usually be done to some extent on the test suite level with `Expect Response`, then extended on the test case level using this keyword. This helps in ensuring that the data model is unified between the different HTTP methods. *Examples* | `Expect Response Body` | ${CURDIR}/user_properties.json | # See `Output Schema` | | `Expect Response Body` | { "required": ["id", "token"] } | # Only these are required from this method | | `Expect Response Body` | { "additionalProperties": false } | # Nothing extra should be responded by this method | """ response_properties = self.schema["properties"]["response"][ "properties" ] if "body" in response_properties: response_properties["body"].update(self._input_object(schema)) else: response_properties["body"] = self._input_object(schema) return response_properties["body"]
*Sends a GET request to the endpoint.* The endpoint is joined with the URL given on library init (if any). If endpoint starts with ``http://`` or ``https://``, it is assumed an URL outside the tested API (which may affect logging). *Options* ``query``: Request query parameters as a JSON object or a dictionary. Alternatively, query parameters can be given as part of endpoint as well. ``timeout``: A number of seconds to wait for the response before failing the keyword. ``allow_redirects``: If false, do not follow any redirects. ``validate``: If false, skips any request and response validations set by expectation keywords and a spec given on library init. ``headers``: Headers as a JSON object to add or override for the request. *Examples* | `GET` | /users/1 | | `GET` | /users | timeout=2.5 | | `GET` | /users?_limit=2 | | `GET` | /users | _limit=2 | | `GET` | /users | { "_limit": "2" } | | `GET` | https://jsonplaceholder.typicode.com/users | headers={ "Authentication": "" } |
def get( self, endpoint, query=None, timeout=None, allow_redirects=None, validate=True, headers=None, ): """*Sends a GET request to the endpoint.* The endpoint is joined with the URL given on library init (if any). If endpoint starts with ``http://`` or ``https://``, it is assumed an URL outside the tested API (which may affect logging). *Options* ``query``: Request query parameters as a JSON object or a dictionary. Alternatively, query parameters can be given as part of endpoint as well. ``timeout``: A number of seconds to wait for the response before failing the keyword. ``allow_redirects``: If false, do not follow any redirects. ``validate``: If false, skips any request and response validations set by expectation keywords and a spec given on library init. ``headers``: Headers as a JSON object to add or override for the request. *Examples* | `GET` | /users/1 | | `GET` | /users | timeout=2.5 | | `GET` | /users?_limit=2 | | `GET` | /users | _limit=2 | | `GET` | /users | { "_limit": "2" } | | `GET` | https://jsonplaceholder.typicode.com/users | headers={ "Authentication": "" } | """ endpoint = self._input_string(endpoint) request = deepcopy(self.request) request["method"] = "GET" request["query"] = OrderedDict() query_in_url = OrderedDict(parse_qsl(urlparse(endpoint).query)) if query_in_url: request["query"].update(query_in_url) endpoint = endpoint.rsplit("?", 1)[0] if query: request["query"].update(self._input_object(query)) if allow_redirects is not None: request["allowRedirects"] = self._input_boolean(allow_redirects) if timeout is not None: request["timeout"] = self._input_timeout(timeout) validate = self._input_boolean(validate) if headers: request["headers"].update(self._input_object(headers)) return self._request(endpoint, request, validate)["response"]
*Sends a POST request to the endpoint.* The endpoint is joined with the URL given on library init (if any). If endpoint starts with ``http://`` or ``https://``, it is assumed an URL outside the tested API (which may affect logging). *Options* ``body``: Request body parameters as a JSON object, file or a dictionary. ``timeout``: A number of seconds to wait for the response before failing the keyword. ``allow_redirects``: If false, do not follow any redirects. ``validate``: If false, skips any request and response validations set by expectation keywords and a spec given on library init. ``headers``: Headers as a JSON object to add or override for the request. *Examples* | `POST` | /users | { "id": 11, "name": "Gil Alexander" } | | `POST` | /users | ${CURDIR}/new_user.json |
def post( self, endpoint, body=None, timeout=None, allow_redirects=None, validate=True, headers=None, ): """*Sends a POST request to the endpoint.* The endpoint is joined with the URL given on library init (if any). If endpoint starts with ``http://`` or ``https://``, it is assumed an URL outside the tested API (which may affect logging). *Options* ``body``: Request body parameters as a JSON object, file or a dictionary. ``timeout``: A number of seconds to wait for the response before failing the keyword. ``allow_redirects``: If false, do not follow any redirects. ``validate``: If false, skips any request and response validations set by expectation keywords and a spec given on library init. ``headers``: Headers as a JSON object to add or override for the request. *Examples* | `POST` | /users | { "id": 11, "name": "Gil Alexander" } | | `POST` | /users | ${CURDIR}/new_user.json | """ endpoint = self._input_string(endpoint) request = deepcopy(self.request) request["method"] = "POST" request["body"] = self.input(body) if allow_redirects is not None: request["allowRedirects"] = self._input_boolean(allow_redirects) if timeout is not None: request["timeout"] = self._input_timeout(timeout) validate = self._input_boolean(validate) if headers: request["headers"].update(self._input_object(headers)) return self._request(endpoint, request, validate)["response"]
*Sends a DELETE request to the endpoint.* The endpoint is joined with the URL given on library init (if any). If endpoint starts with ``http://`` or ``https://``, it is assumed an URL outside the tested API (which may affect logging). *Options* ``timeout``: A number of seconds to wait for the response before failing the keyword. ``allow_redirects``: If false, do not follow any redirects. ``validate``: If false, skips any request and response validations set by expectation keywords and a spec given on library init. ``headers``: Headers as a JSON object to add or override for the request. *Examples* | `DELETE` | /users/6 | | `DELETE` | http://localhost:8273/state | validate=false |
def delete( self, endpoint, timeout=None, allow_redirects=None, validate=True, headers=None, ): """*Sends a DELETE request to the endpoint.* The endpoint is joined with the URL given on library init (if any). If endpoint starts with ``http://`` or ``https://``, it is assumed an URL outside the tested API (which may affect logging). *Options* ``timeout``: A number of seconds to wait for the response before failing the keyword. ``allow_redirects``: If false, do not follow any redirects. ``validate``: If false, skips any request and response validations set by expectation keywords and a spec given on library init. ``headers``: Headers as a JSON object to add or override for the request. *Examples* | `DELETE` | /users/6 | | `DELETE` | http://localhost:8273/state | validate=false | """ endpoint = self._input_string(endpoint) request = deepcopy(self.request) request["method"] = "DELETE" if allow_redirects is not None: request["allowRedirects"] = self._input_boolean(allow_redirects) if timeout is not None: request["timeout"] = self._input_timeout(timeout) validate = self._input_boolean(validate) if headers: request["headers"].update(self._input_object(headers)) return self._request(endpoint, request, validate)["response"]
*Asserts the field does not exist.* The field consists of parts separated by spaces, the parts being object property names or array indices starting from 0, and the root being the instance created by the last request (see `Output` for it). For asserting deeply nested properties or multiple objects at once, [http://goessner.net/articles/JsonPath|JSONPath] can be used with [https://github.com/h2non/jsonpath-ng#jsonpath-syntax|supported JSONPath expressions], the root being the response body. *Examples* | `GET` | /users/1 | # https://jsonplaceholder.typicode.com/users/1 | | `Missing` | response body password | | `Missing` | $.password | | `Missing` | $..geo.elevation | # response body address geo elevation | | `GET` | /users | # https://jsonplaceholder.typicode.com/users | | `Missing` | response body 0 password | | `Missing` | $[*].password | | `Missing` | $[*]..geo.elevation |
def missing(self, field): """*Asserts the field does not exist.* The field consists of parts separated by spaces, the parts being object property names or array indices starting from 0, and the root being the instance created by the last request (see `Output` for it). For asserting deeply nested properties or multiple objects at once, [http://goessner.net/articles/JsonPath|JSONPath] can be used with [https://github.com/h2non/jsonpath-ng#jsonpath-syntax|supported JSONPath expressions], the root being the response body. *Examples* | `GET` | /users/1 | # https://jsonplaceholder.typicode.com/users/1 | | `Missing` | response body password | | `Missing` | $.password | | `Missing` | $..geo.elevation | # response body address geo elevation | | `GET` | /users | # https://jsonplaceholder.typicode.com/users | | `Missing` | response body 0 password | | `Missing` | $[*].password | | `Missing` | $[*]..geo.elevation | """ try: matches = self._find_by_field(field, print_found=False) except AssertionError: return for found in matches: self.log_json( found["reality"], "\n\nExpected '%s' to not exist, but it is:" % (field), ) raise AssertionError( "Expected '%s' to not exist, but it does." % (field) )
*Asserts the field as JSON null.* The field consists of parts separated by spaces, the parts being object property names or array indices starting from 0, and the root being the instance created by the last request (see `Output` for it). For asserting deeply nested properties or multiple objects at once, [http://goessner.net/articles/JsonPath|JSONPath] can be used with [https://github.com/h2non/jsonpath-ng#jsonpath-syntax|supported JSONPath expressions], the root being the response body. *Validations* The JSON Schema validation keywords [https://json-schema.org/understanding-json-schema/reference/generic.html|common for all types] can be used. Validations are optional but update the schema of the property (more accurate) if given. `Output Schema` can be used for the current schema of the field. The keyword will fail if any of the given validations fail. Given validations can be skipped altogether by adding ``skip=true``. When skipped, the schema is updated but the validations are not ran. Skip is intented mainly for debugging the updated schema before aborting. *Examples* | `PUT` | /users/1 | { "deactivated_at": null } | # https://jsonplaceholder.typicode.com/users/1 | | `Null` | response body deactivated_at | | | `Null` | $.deactivated_at | | # JSONPath alternative |
def null(self, field, **validations): """*Asserts the field as JSON null.* The field consists of parts separated by spaces, the parts being object property names or array indices starting from 0, and the root being the instance created by the last request (see `Output` for it). For asserting deeply nested properties or multiple objects at once, [http://goessner.net/articles/JsonPath|JSONPath] can be used with [https://github.com/h2non/jsonpath-ng#jsonpath-syntax|supported JSONPath expressions], the root being the response body. *Validations* The JSON Schema validation keywords [https://json-schema.org/understanding-json-schema/reference/generic.html|common for all types] can be used. Validations are optional but update the schema of the property (more accurate) if given. `Output Schema` can be used for the current schema of the field. The keyword will fail if any of the given validations fail. Given validations can be skipped altogether by adding ``skip=true``. When skipped, the schema is updated but the validations are not ran. Skip is intented mainly for debugging the updated schema before aborting. *Examples* | `PUT` | /users/1 | { "deactivated_at": null } | # https://jsonplaceholder.typicode.com/users/1 | | `Null` | response body deactivated_at | | | `Null` | $.deactivated_at | | # JSONPath alternative | """ values = [] for found in self._find_by_field(field): reality = found["reality"] schema = {"type": "null"} skip = self._input_boolean(validations.pop("skip", False)) if not skip: self._assert_schema(schema, reality) values.append(reality) return values
*Asserts the field as JSON boolean.* The field consists of parts separated by spaces, the parts being object property names or array indices starting from 0, and the root being the instance created by the last request (see `Output` for it). For asserting deeply nested properties or multiple objects at once, [http://goessner.net/articles/JsonPath|JSONPath] can be used with [https://github.com/h2non/jsonpath-ng#jsonpath-syntax|supported JSONPath expressions], the root being the response body. *Value* If given, the property value is validated in addition to the type. *Validations* The JSON Schema validation keywords [https://json-schema.org/understanding-json-schema/reference/generic.html|common for all types] can be used. Validations are optional but update the schema of the property (more accurate) if given. `Output Schema` can be used for the current schema of the field. The keyword will fail if any of the given validations fail. Given validations can be skipped altogether by adding ``skip=true``. When skipped, the schema is updated but the validations are not ran. Skip is intented mainly for debugging the updated schema before aborting. *Examples* | `PUT` | /users/1 | { "verified_email": true } | | | # https://jsonplaceholder.typicode.com/users/1 | | `Boolean` | response body verified_email | | | | # value is optional | | `Boolean` | response body verified_email | true | | `Boolean` | response body verified_email | ${True} | | | # same as above | | `Boolean` | $.verified_email | true | | | # JSONPath alternative | | `Boolean` | $.verified_email | true | enum=[1, "1"] | skip=true | # would pass |
def boolean(self, field, value=None, **validations): """*Asserts the field as JSON boolean.* The field consists of parts separated by spaces, the parts being object property names or array indices starting from 0, and the root being the instance created by the last request (see `Output` for it). For asserting deeply nested properties or multiple objects at once, [http://goessner.net/articles/JsonPath|JSONPath] can be used with [https://github.com/h2non/jsonpath-ng#jsonpath-syntax|supported JSONPath expressions], the root being the response body. *Value* If given, the property value is validated in addition to the type. *Validations* The JSON Schema validation keywords [https://json-schema.org/understanding-json-schema/reference/generic.html|common for all types] can be used. Validations are optional but update the schema of the property (more accurate) if given. `Output Schema` can be used for the current schema of the field. The keyword will fail if any of the given validations fail. Given validations can be skipped altogether by adding ``skip=true``. When skipped, the schema is updated but the validations are not ran. Skip is intented mainly for debugging the updated schema before aborting. *Examples* | `PUT` | /users/1 | { "verified_email": true } | | | # https://jsonplaceholder.typicode.com/users/1 | | `Boolean` | response body verified_email | | | | # value is optional | | `Boolean` | response body verified_email | true | | `Boolean` | response body verified_email | ${True} | | | # same as above | | `Boolean` | $.verified_email | true | | | # JSONPath alternative | | `Boolean` | $.verified_email | true | enum=[1, "1"] | skip=true | # would pass | """ values = [] for found in self._find_by_field(field): reality = found["reality"] schema = {"type": "boolean"} if value is not None: schema["enum"] = [self._input_boolean(value)] elif self._should_add_examples(): schema["examples"] = [reality] skip = self._input_boolean(validations.pop("skip", False)) if not skip: self._assert_schema(schema, reality) values.append(reality) return values
*Asserts the field as JSON integer.* The field consists of parts separated by spaces, the parts being object property names or array indices starting from 0, and the root being the instance created by the last request (see `Output` for it). For asserting deeply nested properties or multiple objects at once, [http://goessner.net/articles/JsonPath|JSONPath] can be used with [https://github.com/h2non/jsonpath-ng#jsonpath-syntax|supported JSONPath expressions], the root being the response body. *Enum* The allowed values for the property as zero or more arguments. If none given, the value of the property is not asserted. *Validations* The JSON Schema validation keywords [https://json-schema.org/understanding-json-schema/reference/numeric.html#integer|for numeric types] can be used. Validations are optional but update the schema of the property (more accurate) if given. `Output Schema` can be used for the current schema of the field. The keyword will fail if any of the given validations fail. Given validations can be skipped altogether by adding ``skip=true``. When skipped, the schema is updated but the validations are not ran. Skip is intented mainly for debugging the updated schema before aborting. *Examples* | `GET` | /users/1 | | # https://jsonplaceholder.typicode.com/users/1 | | `Integer` | response body id | | # value is optional | | `Integer` | response body id | 1 | | `Integer` | response body id | ${1} | # same as above | | `Integer` | $.id | 1 | # JSONPath alternative | | `GET` | /users?_limit=10 | | | | # https://jsonplaceholder.typicode.com/users | | `Integer` | response body 0 id | 1 | | | | `Integer` | $[0].id | 1 | | | # same as above | | `Integer` | $[*].id | | minimum=1 | maximum=10 |
def integer(self, field, *enum, **validations): """*Asserts the field as JSON integer.* The field consists of parts separated by spaces, the parts being object property names or array indices starting from 0, and the root being the instance created by the last request (see `Output` for it). For asserting deeply nested properties or multiple objects at once, [http://goessner.net/articles/JsonPath|JSONPath] can be used with [https://github.com/h2non/jsonpath-ng#jsonpath-syntax|supported JSONPath expressions], the root being the response body. *Enum* The allowed values for the property as zero or more arguments. If none given, the value of the property is not asserted. *Validations* The JSON Schema validation keywords [https://json-schema.org/understanding-json-schema/reference/numeric.html#integer|for numeric types] can be used. Validations are optional but update the schema of the property (more accurate) if given. `Output Schema` can be used for the current schema of the field. The keyword will fail if any of the given validations fail. Given validations can be skipped altogether by adding ``skip=true``. When skipped, the schema is updated but the validations are not ran. Skip is intented mainly for debugging the updated schema before aborting. *Examples* | `GET` | /users/1 | | # https://jsonplaceholder.typicode.com/users/1 | | `Integer` | response body id | | # value is optional | | `Integer` | response body id | 1 | | `Integer` | response body id | ${1} | # same as above | | `Integer` | $.id | 1 | # JSONPath alternative | | `GET` | /users?_limit=10 | | | | # https://jsonplaceholder.typicode.com/users | | `Integer` | response body 0 id | 1 | | | | `Integer` | $[0].id | 1 | | | # same as above | | `Integer` | $[*].id | | minimum=1 | maximum=10 | """ values = [] for found in self._find_by_field(field): schema = found["schema"] reality = found["reality"] skip = self._input_boolean(validations.pop("skip", False)) self._set_type_validations("integer", schema, validations) if enum: if "enum" not in schema: schema["enum"] = [] for value in enum: value = self._input_integer(value) if value not in schema["enum"]: schema["enum"].append(value) elif self._should_add_examples(): schema["examples"] = [reality] if not skip: self._assert_schema(schema, reality) values.append(reality) return values
*Converts the input to JSON and returns it.* Any of the following is accepted: - The path to JSON file - Any scalar that can be interpreted as JSON - A dictionary or a list *Examples* | ${payload} | `Input` | ${CURDIR}/payload.json | | ${object} | `Input` | { "name": "Julie Langford", "username": "jlangfor" } | | ${object} | `Input` | ${dict} | | ${array} | `Input` | ["name", "username"] | | ${array} | `Input` | ${list} | | ${boolean} | `Input` | true | | ${boolean} | `Input` | ${True} | | ${number} | `Input` | 2.0 | | ${number} | `Input` | ${2.0} | | ${string} | `Input` | Quotes are optional for strings |
def input(self, what): """*Converts the input to JSON and returns it.* Any of the following is accepted: - The path to JSON file - Any scalar that can be interpreted as JSON - A dictionary or a list *Examples* | ${payload} | `Input` | ${CURDIR}/payload.json | | ${object} | `Input` | { "name": "Julie Langford", "username": "jlangfor" } | | ${object} | `Input` | ${dict} | | ${array} | `Input` | ["name", "username"] | | ${array} | `Input` | ${list} | | ${boolean} | `Input` | true | | ${boolean} | `Input` | ${True} | | ${number} | `Input` | 2.0 | | ${number} | `Input` | ${2.0} | | ${string} | `Input` | Quotes are optional for strings | """ if what is None: return None if not isinstance(what, STRING_TYPES): return self._input_json_from_non_string(what) if path.isfile(what): return self._input_json_from_file(what) try: return self._input_json_as_string(what) except ValueError: return self._input_string(what)
*Outputs JSON Schema to terminal or a file.* By default, the schema is output for the last request and response. The output can be limited further by: - The property of the last instance, e.g. ``request`` or ``response`` - Any nested property that exists, similarly as for assertion keywords Also variables and values that can be converted to JSON are accepted, in which case the schema is generated for those instead. *Options* ``file_path``: The JSON Schema is written to a file instead of terminal. The file is created if it does not exist. ``append``: If true, the JSON Schema is appended to the given file instead of truncating it first. ``sort_keys``: If true, the JSON Schema is sorted alphabetically by property names before it is output. *Examples* | `Output Schema` | response | ${CURDIR}/response_schema.json | # Write a file to use with `Expect Response` | | `Output Schema` | response body | ${CURDIR}/response_body_schema.json | # Write a file to use with `Expect Response Body` | | `Output Schema` | $.email | # only the schema for one response body property | | `Output Schema` | $..geo | # only the schema for the nested response body property |
def output_schema( self, what="", file_path=None, append=False, sort_keys=False ): """*Outputs JSON Schema to terminal or a file.* By default, the schema is output for the last request and response. The output can be limited further by: - The property of the last instance, e.g. ``request`` or ``response`` - Any nested property that exists, similarly as for assertion keywords Also variables and values that can be converted to JSON are accepted, in which case the schema is generated for those instead. *Options* ``file_path``: The JSON Schema is written to a file instead of terminal. The file is created if it does not exist. ``append``: If true, the JSON Schema is appended to the given file instead of truncating it first. ``sort_keys``: If true, the JSON Schema is sorted alphabetically by property names before it is output. *Examples* | `Output Schema` | response | ${CURDIR}/response_schema.json | # Write a file to use with `Expect Response` | | `Output Schema` | response body | ${CURDIR}/response_body_schema.json | # Write a file to use with `Expect Response Body` | | `Output Schema` | $.email | # only the schema for one response body property | | `Output Schema` | $..geo | # only the schema for the nested response body property | """ if isinstance(what, (STRING_TYPES)): if what == "": try: json = self._last_instance_or_error()["schema"] except IndexError: raise RuntimeError(no_instances_error) elif what.startswith(("request", "response", "$")): self._last_instance_or_error() matches = self._find_by_field(what) if len(matches) > 1: json = [found["schema"] for found in matches] else: json = matches[0]["schema"] else: try: json = self._new_schema(self._input_json_as_string(what)) except ValueError: json = self._new_schema(self._input_string(what)) else: json = self._new_schema(self._input_json_from_non_string(what)) sort_keys = self._input_boolean(sort_keys) if not file_path: self.log_json(json, sort_keys=sort_keys) else: content = dumps( json, ensure_ascii=False, indent=4, separators=(",", ": "), sort_keys=sort_keys, ) write_mode = "a" if self._input_boolean(append) else "w" try: with open( path.join(getcwd(), file_path), write_mode, encoding="utf-8" ) as file: if IS_PYTHON_2: content = unicode(content) file.write(content) except IOError as e: raise RuntimeError( "Error outputting to file '%s':\n%s" % (file_path, e) ) return json
*Writes the instances as JSON to a file.* The instances are written to file as a JSON array of JSON objects, each object representing a single instance, and having three properties: - the request - the response - the schema for both, which have been updated according to the tests The file is created if it does not exist, otherwise it is truncated. *Options* ``sort_keys``: If true, the instances are sorted alphabetically by property names. *Examples* | `Rest Instances` | ${CURDIR}/log.json |
def rest_instances(self, file_path=None, sort_keys=False): """*Writes the instances as JSON to a file.* The instances are written to file as a JSON array of JSON objects, each object representing a single instance, and having three properties: - the request - the response - the schema for both, which have been updated according to the tests The file is created if it does not exist, otherwise it is truncated. *Options* ``sort_keys``: If true, the instances are sorted alphabetically by property names. *Examples* | `Rest Instances` | ${CURDIR}/log.json | """ if not file_path: outputdir_path = BuiltIn().get_variable_value("${OUTPUTDIR}") if self.request["netloc"]: file_path = ( path.join(outputdir_path, self.request["netloc"]) + ".json" ) else: file_path = path.join(outputdir_path, "instances") + ".json" sort_keys = self._input_boolean(sort_keys) content = dumps( self.instances, ensure_ascii=False, indent=4, separators=(",", ": "), sort_keys=sort_keys, ) try: with open(file_path, "w", encoding="utf-8") as file: if IS_PYTHON_2: content = unicode(content) file.write(content) except IOError as e: raise RuntimeError( "Error exporting instances " + "to file '%s':\n%s" % (file_path, e) ) return self.instances
Adds tools from self.toolbardata to self
def add_tools(self): """Adds tools from self.toolbardata to self""" for data in self.toolbardata: # tool type is in data[0] if data[0] == "T": # Simple tool _, msg_type, label, tool_tip = data icon = icons[label] self.label2id[label] = tool_id = wx.NewId() self.AddSimpleTool(tool_id, label, icon, short_help_string=tool_tip) self.ids_msgs[tool_id] = msg_type self.parent.Bind(wx.EVT_TOOL, self.OnTool, id=tool_id) elif data[0] == "S": # Separator self.AddSeparator() elif data[0] == "C": # Control _, control, tool_tip = data self.AddControl(control, label=tool_tip) elif data[0] == "O": # Check tool / option button _, label, tool_tip = data icon = icons[label] self.label2id[label] = tool_id = wx.NewId() self.AddCheckTool(tool_id, label, icon, icon, tool_tip) else: raise ValueError("Unknown tooltype " + str(data[0])) self.SetCustomOverflowItems([], []) self.Realize() # Adjust Toolbar size self.SetSize(self.DoGetBestSize())
Toolbar event handler
def OnTool(self, event): """Toolbar event handler""" msgtype = self.ids_msgs[event.GetId()] post_command_event(self, msgtype)
Tool event handler
def OnToggleTool(self, event): """Tool event handler""" config["check_spelling"] = str(event.IsChecked()) toggle_id = self.parent.menubar.FindMenuItem(_("View"), _("Check spelling")) if toggle_id != -1: # Check may fail if translation is incomplete toggle_item = self.parent.menubar.FindItemById(toggle_id) toggle_item.Check(event.IsChecked()) self.parent.grid.grid_renderer.cell_cache.clear() self.parent.grid.ForceRefresh() event.Skip()
Updates the toolbar states
def OnUpdate(self, event): """Updates the toolbar states""" # Gray out undo and redo id not available undo_toolid = self.label2id["Undo"] redo_toolid = self.label2id["Redo"] self.EnableTool(undo_toolid, undo.stack().canundo()) self.EnableTool(redo_toolid, undo.stack().canredo()) # Set ToolTip strings to potential next undo / redo action undotext = undo.stack().undotext() undo_tool = self.FindTool(undo_toolid) if undotext is None: undo_tool.SetShortHelp(_("No undo actions available")) else: undo_tool.SetShortHelp(undotext) redotext = undo.stack().redotext() redo_tool = self.FindTool(redo_toolid) if redotext is None: redo_tool.SetShortHelp(_("No redo actions available")) else: redo_tool.SetShortHelp(redotext) self.Refresh() event.Skip()
Gets Button label from user and returns string
def _get_button_label(self): """Gets Button label from user and returns string""" dlg = wx.TextEntryDialog(self, _('Button label:')) if dlg.ShowModal() == wx.ID_OK: label = dlg.GetValue() else: label = "" dlg.Destroy() return label
Event handler for cell button toggle button
def OnButtonCell(self, event): """Event handler for cell button toggle button""" if self.button_cell_button_id == event.GetId(): if event.IsChecked(): label = self._get_button_label() post_command_event(self, self.ButtonCellMsg, text=label) else: post_command_event(self, self.ButtonCellMsg, text=False) event.Skip()
Event handler for video cell toggle button
def OnVideoCell(self, event): """Event handler for video cell toggle button""" if self.video_cell_button_id == event.GetId(): if event.IsChecked(): wildcard = _("Media files") + " (*.*)|*.*" videofile, __ = self.get_filepath_findex_from_user( wildcard, "Choose video or audio file", wx.OPEN) post_command_event(self, self.VideoCellMsg, videofile=videofile) else: post_command_event(self, self.VideoCellMsg, videofile=False) event.Skip()
Updates the toolbar states
def OnUpdate(self, event): """Updates the toolbar states""" attributes = event.attr self._update_buttoncell(attributes["button_cell"]) self.Refresh() event.Skip()
Creates the search menu
def make_menu(self): """Creates the search menu""" menu = wx.Menu() item = menu.Append(-1, "Recent Searches") item.Enable(False) for __id, txt in enumerate(self.search_history): menu.Append(__id, txt) return menu
Search history has been selected
def OnMenu(self, event): """Search history has been selected""" __id = event.GetId() try: menuitem = event.GetEventObject().FindItemById(__id) selected_text = menuitem.GetItemLabel() self.search.SetValue(selected_text) except AttributeError: # Not called by menu event.Skip()
Event handler for starting the search
def OnSearch(self, event): """Event handler for starting the search""" search_string = self.search.GetValue() if search_string not in self.search_history: self.search_history.append(search_string) if len(self.search_history) > 10: self.search_history.pop(0) self.menu = self.make_menu() self.search.SetMenu(self.menu) search_flags = self.search_options + ["FIND_NEXT"] post_command_event(self, self.FindMsg, text=search_string, flags=search_flags) self.search.SetFocus()
Event handler for search direction toggle button
def OnSearchDirectionButton(self, event): """Event handler for search direction toggle button""" if "DOWN" in self.search_options: flag_index = self.search_options.index("DOWN") self.search_options[flag_index] = "UP" elif "UP" in self.search_options: flag_index = self.search_options.index("UP") self.search_options[flag_index] = "DOWN" else: raise AttributeError(_("Neither UP nor DOWN in search_flags")) event.Skip()
Event handler for search flag toggle buttons
def OnSearchFlag(self, event): """Event handler for search flag toggle buttons""" for label in self.search_options_buttons: button_id = self.label2id[label] if button_id == event.GetId(): if event.IsChecked(): self.search_options.append(label) else: flag_index = self.search_options.index(label) self.search_options.pop(flag_index) event.Skip()
Creates font choice combo box
def _create_font_choice_combo(self): """Creates font choice combo box""" self.fonts = get_font_list() self.font_choice_combo = \ _widgets.FontChoiceCombobox(self, choices=self.fonts, style=wx.CB_READONLY, size=(125, -1)) self.font_choice_combo.SetToolTipString(_(u"Text font")) self.AddControl(self.font_choice_combo) self.Bind(wx.EVT_COMBOBOX, self.OnTextFont, self.font_choice_combo) self.parent.Bind(self.EVT_CMD_TOOLBAR_UPDATE, self.OnUpdate)
Creates font size combo box
def _create_font_size_combo(self): """Creates font size combo box""" self.std_font_sizes = config["font_default_sizes"] font_size = str(get_default_font().GetPointSize()) self.font_size_combo = \ wx.ComboBox(self, -1, value=font_size, size=(60, -1), choices=map(unicode, self.std_font_sizes), style=wx.CB_DROPDOWN | wx.TE_PROCESS_ENTER) self.font_size_combo.SetToolTipString(_(u"Text size\n(points)")) self.AddControl(self.font_size_combo) self.Bind(wx.EVT_COMBOBOX, self.OnTextSize, self.font_size_combo) self.Bind(wx.EVT_TEXT_ENTER, self.OnTextSize, self.font_size_combo)
Creates font face buttons
def _create_font_face_buttons(self): """Creates font face buttons""" font_face_buttons = [ (wx.FONTFLAG_BOLD, "OnBold", "FormatTextBold", _("Bold")), (wx.FONTFLAG_ITALIC, "OnItalics", "FormatTextItalic", _("Italics")), (wx.FONTFLAG_UNDERLINED, "OnUnderline", "FormatTextUnderline", _("Underline")), (wx.FONTFLAG_STRIKETHROUGH, "OnStrikethrough", "FormatTextStrikethrough", _("Strikethrough")), (wx.FONTFLAG_MASK, "OnFreeze", "Freeze", _("Freeze")), (wx.FONTFLAG_NOT_ANTIALIASED, "OnLock", "Lock", _("Lock cell")), (wx.FONTFAMILY_DECORATIVE, "OnMarkup", "Markup", _("Markup")), ] for __id, method, iconname, helpstring in font_face_buttons: bmp = icons[iconname] self.AddCheckTool(__id, iconname, bmp, bmp, short_help_string=helpstring) self.Bind(wx.EVT_TOOL, getattr(self, method), id=__id)
Create text rotation toggle button
def _create_textrotation_button(self): """Create text rotation toggle button""" iconnames = ["TextRotate270", "TextRotate0", "TextRotate90", "TextRotate180"] bmplist = [icons[iconname] for iconname in iconnames] self.rotation_tb = _widgets.BitmapToggleButton(self, bmplist) self.rotation_tb.SetToolTipString(_(u"Cell text rotation")) self.Bind(wx.EVT_BUTTON, self.OnRotate, self.rotation_tb) self.AddControl(self.rotation_tb)
Creates horizontal justification button
def _create_justification_button(self): """Creates horizontal justification button""" iconnames = ["JustifyLeft", "JustifyCenter", "JustifyRight"] bmplist = [icons[iconname] for iconname in iconnames] self.justify_tb = _widgets.BitmapToggleButton(self, bmplist) self.justify_tb.SetToolTipString(_(u"Justification")) self.Bind(wx.EVT_BUTTON, self.OnJustification, self.justify_tb) self.AddControl(self.justify_tb)
Creates vertical alignment button
def _create_alignment_button(self): """Creates vertical alignment button""" iconnames = ["AlignTop", "AlignCenter", "AlignBottom"] bmplist = [icons[iconname] for iconname in iconnames] self.alignment_tb = _widgets.BitmapToggleButton(self, bmplist) self.alignment_tb.SetToolTipString(_(u"Alignment")) self.Bind(wx.EVT_BUTTON, self.OnAlignment, self.alignment_tb) self.AddControl(self.alignment_tb)
Create border choice combo box
def _create_borderchoice_combo(self): """Create border choice combo box""" choices = [c[0] for c in self.border_toggles] self.borderchoice_combo = \ _widgets.BorderEditChoice(self, choices=choices, style=wx.CB_READONLY, size=(50, -1)) self.borderchoice_combo.SetToolTipString( _(u"Choose borders for which attributes are changed")) self.borderstate = self.border_toggles[0][0] self.AddControl(self.borderchoice_combo) self.Bind(wx.EVT_COMBOBOX, self.OnBorderChoice, self.borderchoice_combo) self.borderchoice_combo.SetValue("AllBorders")
Create pen width combo box
def _create_penwidth_combo(self): """Create pen width combo box""" choices = map(unicode, xrange(12)) self.pen_width_combo = \ _widgets.PenWidthComboBox(self, choices=choices, style=wx.CB_READONLY, size=(50, -1)) self.pen_width_combo.SetToolTipString(_(u"Border width")) self.AddControl(self.pen_width_combo) self.Bind(wx.EVT_COMBOBOX, self.OnLineWidth, self.pen_width_combo)
Create color choice buttons
def _create_color_buttons(self): """Create color choice buttons""" button_size = (30, 30) button_style = wx.NO_BORDER try: self.linecolor_choice = \ csel.ColourSelect(self, -1, unichr(0x2500), (0, 0, 0), size=button_size, style=button_style) except UnicodeEncodeError: # ANSI wxPython installed self.linecolor_choice = \ csel.ColourSelect(self, -1, "-", (0, 0, 0), size=button_size, style=button_style) self.bgcolor_choice = \ csel.ColourSelect(self, -1, "", (255, 255, 255), size=button_size, style=button_style) self.textcolor_choice = \ csel.ColourSelect(self, -1, "A", (0, 0, 0), size=button_size, style=button_style) self.linecolor_choice.SetToolTipString(_(u"Border line color")) self.bgcolor_choice.SetToolTipString(_(u"Cell background")) self.textcolor_choice.SetToolTipString(_(u"Text color")) self.AddControl(self.linecolor_choice) self.AddControl(self.bgcolor_choice) self.AddControl(self.textcolor_choice) self.linecolor_choice.Bind(csel.EVT_COLOURSELECT, self.OnLineColor) self.bgcolor_choice.Bind(csel.EVT_COLOURSELECT, self.OnBGColor) self.textcolor_choice.Bind(csel.EVT_COLOURSELECT, self.OnTextColor)
Create merge button
def _create_merge_button(self): """Create merge button""" bmp = icons["Merge"] self.mergetool_id = wx.NewId() self.AddCheckTool(self.mergetool_id, "Merge", bmp, bmp, short_help_string=_("Merge cells")) self.Bind(wx.EVT_TOOL, self.OnMerge, id=self.mergetool_id)
Updates text font widget Parameters ---------- textfont: String \tFont name
def _update_font(self, textfont): """Updates text font widget Parameters ---------- textfont: String \tFont name """ try: fontface_id = self.fonts.index(textfont) except ValueError: fontface_id = 0 self.font_choice_combo.Select(fontface_id)
Updates font weight widget Parameters ---------- font_weight: Integer \tButton down iif font_weight == wx.FONTWEIGHT_BOLD
def _update_font_weight(self, font_weight): """Updates font weight widget Parameters ---------- font_weight: Integer \tButton down iif font_weight == wx.FONTWEIGHT_BOLD """ toggle_state = font_weight & wx.FONTWEIGHT_BOLD == wx.FONTWEIGHT_BOLD self.ToggleTool(wx.FONTFLAG_BOLD, toggle_state)
Updates font style widget Parameters ---------- font_style: Integer \tButton down iif font_style == wx.FONTSTYLE_ITALIC
def _update_font_style(self, font_style): """Updates font style widget Parameters ---------- font_style: Integer \tButton down iif font_style == wx.FONTSTYLE_ITALIC """ toggle_state = font_style & wx.FONTSTYLE_ITALIC == wx.FONTSTYLE_ITALIC self.ToggleTool(wx.FONTFLAG_ITALIC, toggle_state)
Updates frozen cell widget Parameters ---------- frozen: Bool or string \tUntoggled iif False
def _update_frozencell(self, frozen): """Updates frozen cell widget Parameters ---------- frozen: Bool or string \tUntoggled iif False """ toggle_state = frozen is not False self.ToggleTool(wx.FONTFLAG_MASK, toggle_state)
Updates text rotation toggle button
def _update_textrotation(self, angle): """Updates text rotation toggle button""" states = {0: 0, -90: 1, 180: 2, 90: 3} try: self.rotation_tb.state = states[round(angle)] except KeyError: self.rotation_tb.state = 0 self.rotation_tb.toggle(None) self.rotation_tb.Refresh()
Updates horizontal text justification button Parameters ---------- justification: String in ["left", "center", "right"] \tSwitches button to untoggled if False and toggled if True
def _update_justification(self, justification): """Updates horizontal text justification button Parameters ---------- justification: String in ["left", "center", "right"] \tSwitches button to untoggled if False and toggled if True """ states = {"left": 2, "center": 0, "right": 1} self.justify_tb.state = states[justification] self.justify_tb.toggle(None) self.justify_tb.Refresh()
Updates vertical text alignment button Parameters ---------- alignment: String in ["top", "middle", "right"] \tSwitches button to untoggled if False and toggled if True
def _update_alignment(self, alignment): """Updates vertical text alignment button Parameters ---------- alignment: String in ["top", "middle", "right"] \tSwitches button to untoggled if False and toggled if True """ states = {"top": 2, "middle": 0, "bottom": 1} self.alignment_tb.state = states[alignment] self.alignment_tb.toggle(None) self.alignment_tb.Refresh()
Updates text font color button Parameters ---------- fontcolor: Integer \tText color in integer RGB format
def _update_fontcolor(self, fontcolor): """Updates text font color button Parameters ---------- fontcolor: Integer \tText color in integer RGB format """ textcolor = wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOWTEXT) textcolor.SetRGB(fontcolor) self.textcolor_choice.SetColour(textcolor)
Updates background color
def _update_bgbrush(self, bgcolor): """Updates background color""" brush_color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW) brush_color.SetRGB(bgcolor) self.bgcolor_choice.SetColour(brush_color)
Updates background color
def _update_bordercolor(self, bordercolor): """Updates background color""" border_color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVEBORDER) border_color.SetRGB(bordercolor) self.linecolor_choice.SetColour(border_color)
Updates the toolbar states
def OnUpdate(self, event): """Updates the toolbar states""" attributes = event.attr self._update_font(attributes["textfont"]) self._update_pointsize(attributes["pointsize"]) self._update_font_weight(attributes["fontweight"]) self._update_font_style(attributes["fontstyle"]) self._update_frozencell(attributes["frozen"]) self._update_lockedcell(attributes["locked"]) self._update_markupcell(attributes["markup"]) self._update_underline(attributes["underline"]) self._update_strikethrough(attributes["strikethrough"]) self._update_justification(attributes["justification"]) self._update_alignment(attributes["vertical_align"]) self._update_fontcolor(attributes["textcolor"]) self._update_merge(attributes["merge_area"] is not None) self._update_textrotation(attributes["angle"]) self._update_bgbrush(attributes["bgcolor"]) self._update_bordercolor(attributes["bordercolor_bottom"]) self._update_borderwidth(attributes["borderwidth_bottom"]) self.Refresh() event.Skip()
Change the borders that are affected by color and width changes
def OnBorderChoice(self, event): """Change the borders that are affected by color and width changes""" choicelist = event.GetEventObject().GetItems() self.borderstate = choicelist[event.GetInt()]
Line width choice event handler
def OnLineWidth(self, event): """Line width choice event handler""" linewidth_combobox = event.GetEventObject() idx = event.GetInt() width = int(linewidth_combobox.GetString(idx)) borders = self.bordermap[self.borderstate] post_command_event(self, self.BorderWidthMsg, width=width, borders=borders)
Line color choice event handler
def OnLineColor(self, event): """Line color choice event handler""" color = event.GetValue().GetRGB() borders = self.bordermap[self.borderstate] post_command_event(self, self.BorderColorMsg, color=color, borders=borders)
Background color choice event handler
def OnBGColor(self, event): """Background color choice event handler""" color = event.GetValue().GetRGB() post_command_event(self, self.BackgroundColorMsg, color=color)