id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
3,600
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/__init__.py
Network.clip
def clip(self, lower=None, upper=None): ''' Trim values at input thresholds using pandas function ''' df = self.export_df() df = df.clip(lower=lower, upper=upper) self.load_df(df)
python
def clip(self, lower=None, upper=None): ''' Trim values at input thresholds using pandas function ''' df = self.export_df() df = df.clip(lower=lower, upper=upper) self.load_df(df)
[ "def", "clip", "(", "self", ",", "lower", "=", "None", ",", "upper", "=", "None", ")", ":", "df", "=", "self", ".", "export_df", "(", ")", "df", "=", "df", ".", "clip", "(", "lower", "=", "lower", ",", "upper", "=", "upper", ")", "self", ".", "load_df", "(", "df", ")" ]
Trim values at input thresholds using pandas function
[ "Trim", "values", "at", "input", "thresholds", "using", "pandas", "function" ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L321-L327
3,601
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/__init__.py
Network.random_sample
def random_sample(self, num_samples, df=None, replace=False, weights=None, random_state=100, axis='row'): ''' Return random sample of matrix. ''' if df is None: df = self.dat_to_df() if axis == 'row': axis = 0 if axis == 'col': axis = 1 df = self.export_df() df = df.sample(n=num_samples, replace=replace, weights=weights, random_state=random_state, axis=axis) self.load_df(df)
python
def random_sample(self, num_samples, df=None, replace=False, weights=None, random_state=100, axis='row'): ''' Return random sample of matrix. ''' if df is None: df = self.dat_to_df() if axis == 'row': axis = 0 if axis == 'col': axis = 1 df = self.export_df() df = df.sample(n=num_samples, replace=replace, weights=weights, random_state=random_state, axis=axis) self.load_df(df)
[ "def", "random_sample", "(", "self", ",", "num_samples", ",", "df", "=", "None", ",", "replace", "=", "False", ",", "weights", "=", "None", ",", "random_state", "=", "100", ",", "axis", "=", "'row'", ")", ":", "if", "df", "is", "None", ":", "df", "=", "self", ".", "dat_to_df", "(", ")", "if", "axis", "==", "'row'", ":", "axis", "=", "0", "if", "axis", "==", "'col'", ":", "axis", "=", "1", "df", "=", "self", ".", "export_df", "(", ")", "df", "=", "df", ".", "sample", "(", "n", "=", "num_samples", ",", "replace", "=", "replace", ",", "weights", "=", "weights", ",", "random_state", "=", "random_state", ",", "axis", "=", "axis", ")", "self", ".", "load_df", "(", "df", ")" ]
Return random sample of matrix.
[ "Return", "random", "sample", "of", "matrix", "." ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L342-L358
3,602
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/__init__.py
Network.load_gene_exp_to_df
def load_gene_exp_to_df(inst_path): ''' Loads gene expression data from 10x in sparse matrix format and returns a Pandas dataframe ''' import pandas as pd from scipy import io from scipy import sparse from ast import literal_eval as make_tuple # matrix Matrix = io.mmread( inst_path + 'matrix.mtx') mat = Matrix.todense() # genes filename = inst_path + 'genes.tsv' f = open(filename, 'r') lines = f.readlines() f.close() # # add unique id to all genes # genes = [] # unique_id = 0 # for inst_line in lines: # inst_line = inst_line.strip().split() # if len(inst_line) > 1: # inst_gene = inst_line[1] # else: # inst_gene = inst_line[0] # genes.append(inst_gene + '_' + str(unique_id)) # unique_id = unique_id + 1 # add unique id only to duplicate genes ini_genes = [] for inst_line in lines: inst_line = inst_line.strip().split() if len(inst_line) > 1: inst_gene = inst_line[1] else: inst_gene = inst_line[0] ini_genes.append(inst_gene) gene_name_count = pd.Series(ini_genes).value_counts() duplicate_genes = gene_name_count[gene_name_count > 1].index.tolist() dup_index = {} genes = [] for inst_row in ini_genes: # add index to non-unique genes if inst_row in duplicate_genes: # calc_non-unque index if inst_row not in dup_index: dup_index[inst_row] = 1 else: dup_index[inst_row] = dup_index[inst_row] + 1 new_row = inst_row + '_' + str(dup_index[inst_row]) else: new_row = inst_row genes.append(new_row) # barcodes filename = inst_path + 'barcodes.tsv' f = open(filename, 'r') lines = f.readlines() f.close() cell_barcodes = [] for inst_bc in lines: inst_bc = inst_bc.strip().split('\t') # remove dash from barcodes if necessary if '-' in inst_bc[0]: inst_bc[0] = inst_bc[0].split('-')[0] cell_barcodes.append(inst_bc[0]) # parse tuples if necessary try: cell_barcodes = [make_tuple(x) for x in cell_barcodes] except: pass try: genes = [make_tuple(x) for x in genes] except: pass # make dataframe df = pd.DataFrame(mat, index=genes, columns=cell_barcodes) return df
python
def load_gene_exp_to_df(inst_path): ''' Loads gene expression data from 10x in sparse matrix format and returns a Pandas dataframe ''' import pandas as pd from scipy import io from scipy import sparse from ast import literal_eval as make_tuple # matrix Matrix = io.mmread( inst_path + 'matrix.mtx') mat = Matrix.todense() # genes filename = inst_path + 'genes.tsv' f = open(filename, 'r') lines = f.readlines() f.close() # # add unique id to all genes # genes = [] # unique_id = 0 # for inst_line in lines: # inst_line = inst_line.strip().split() # if len(inst_line) > 1: # inst_gene = inst_line[1] # else: # inst_gene = inst_line[0] # genes.append(inst_gene + '_' + str(unique_id)) # unique_id = unique_id + 1 # add unique id only to duplicate genes ini_genes = [] for inst_line in lines: inst_line = inst_line.strip().split() if len(inst_line) > 1: inst_gene = inst_line[1] else: inst_gene = inst_line[0] ini_genes.append(inst_gene) gene_name_count = pd.Series(ini_genes).value_counts() duplicate_genes = gene_name_count[gene_name_count > 1].index.tolist() dup_index = {} genes = [] for inst_row in ini_genes: # add index to non-unique genes if inst_row in duplicate_genes: # calc_non-unque index if inst_row not in dup_index: dup_index[inst_row] = 1 else: dup_index[inst_row] = dup_index[inst_row] + 1 new_row = inst_row + '_' + str(dup_index[inst_row]) else: new_row = inst_row genes.append(new_row) # barcodes filename = inst_path + 'barcodes.tsv' f = open(filename, 'r') lines = f.readlines() f.close() cell_barcodes = [] for inst_bc in lines: inst_bc = inst_bc.strip().split('\t') # remove dash from barcodes if necessary if '-' in inst_bc[0]: inst_bc[0] = inst_bc[0].split('-')[0] cell_barcodes.append(inst_bc[0]) # parse tuples if necessary try: cell_barcodes = [make_tuple(x) for x in cell_barcodes] except: pass try: genes = [make_tuple(x) for x in genes] except: pass # make dataframe df = pd.DataFrame(mat, index=genes, columns=cell_barcodes) return df
[ "def", "load_gene_exp_to_df", "(", "inst_path", ")", ":", "import", "pandas", "as", "pd", "from", "scipy", "import", "io", "from", "scipy", "import", "sparse", "from", "ast", "import", "literal_eval", "as", "make_tuple", "# matrix", "Matrix", "=", "io", ".", "mmread", "(", "inst_path", "+", "'matrix.mtx'", ")", "mat", "=", "Matrix", ".", "todense", "(", ")", "# genes", "filename", "=", "inst_path", "+", "'genes.tsv'", "f", "=", "open", "(", "filename", ",", "'r'", ")", "lines", "=", "f", ".", "readlines", "(", ")", "f", ".", "close", "(", ")", "# # add unique id to all genes", "# genes = []", "# unique_id = 0", "# for inst_line in lines:", "# inst_line = inst_line.strip().split()", "# if len(inst_line) > 1:", "# inst_gene = inst_line[1]", "# else:", "# inst_gene = inst_line[0]", "# genes.append(inst_gene + '_' + str(unique_id))", "# unique_id = unique_id + 1", "# add unique id only to duplicate genes", "ini_genes", "=", "[", "]", "for", "inst_line", "in", "lines", ":", "inst_line", "=", "inst_line", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "len", "(", "inst_line", ")", ">", "1", ":", "inst_gene", "=", "inst_line", "[", "1", "]", "else", ":", "inst_gene", "=", "inst_line", "[", "0", "]", "ini_genes", ".", "append", "(", "inst_gene", ")", "gene_name_count", "=", "pd", ".", "Series", "(", "ini_genes", ")", ".", "value_counts", "(", ")", "duplicate_genes", "=", "gene_name_count", "[", "gene_name_count", ">", "1", "]", ".", "index", ".", "tolist", "(", ")", "dup_index", "=", "{", "}", "genes", "=", "[", "]", "for", "inst_row", "in", "ini_genes", ":", "# add index to non-unique genes", "if", "inst_row", "in", "duplicate_genes", ":", "# calc_non-unque index", "if", "inst_row", "not", "in", "dup_index", ":", "dup_index", "[", "inst_row", "]", "=", "1", "else", ":", "dup_index", "[", "inst_row", "]", "=", "dup_index", "[", "inst_row", "]", "+", "1", "new_row", "=", "inst_row", "+", "'_'", "+", "str", "(", "dup_index", "[", "inst_row", "]", ")", "else", ":", "new_row", "=", "inst_row", "genes", ".", "append", "(", "new_row", ")", "# barcodes", "filename", "=", "inst_path", "+", "'barcodes.tsv'", "f", "=", "open", "(", "filename", ",", "'r'", ")", "lines", "=", "f", ".", "readlines", "(", ")", "f", ".", "close", "(", ")", "cell_barcodes", "=", "[", "]", "for", "inst_bc", "in", "lines", ":", "inst_bc", "=", "inst_bc", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "# remove dash from barcodes if necessary", "if", "'-'", "in", "inst_bc", "[", "0", "]", ":", "inst_bc", "[", "0", "]", "=", "inst_bc", "[", "0", "]", ".", "split", "(", "'-'", ")", "[", "0", "]", "cell_barcodes", ".", "append", "(", "inst_bc", "[", "0", "]", ")", "# parse tuples if necessary", "try", ":", "cell_barcodes", "=", "[", "make_tuple", "(", "x", ")", "for", "x", "in", "cell_barcodes", "]", "except", ":", "pass", "try", ":", "genes", "=", "[", "make_tuple", "(", "x", ")", "for", "x", "in", "genes", "]", "except", ":", "pass", "# make dataframe", "df", "=", "pd", ".", "DataFrame", "(", "mat", ",", "index", "=", "genes", ",", "columns", "=", "cell_barcodes", ")", "return", "df" ]
Loads gene expression data from 10x in sparse matrix format and returns a Pandas dataframe
[ "Loads", "gene", "expression", "data", "from", "10x", "in", "sparse", "matrix", "format", "and", "returns", "a", "Pandas", "dataframe" ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L453-L551
3,603
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/__init__.py
Network.sim_same_and_diff_category_samples
def sim_same_and_diff_category_samples(self, df, cat_index=1, dist_type='cosine', equal_var=False, plot_roc=True, precalc_dist=False, calc_roc=True): ''' Calculate the similarity of samples from the same and different categories. The cat_index gives the index of the category, where 1 in the first category ''' cols = df.columns.tolist() if type(precalc_dist) == bool: # compute distnace between rows (transpose to get cols as rows) dist_arr = 1 - pdist(df.transpose(), metric=dist_type) else: dist_arr = precalc_dist # generate sample names with categories sample_combos = list(combinations(range(df.shape[1]),2)) sample_names = [str(ind) + '_same' if cols[x[0]][cat_index] == cols[x[1]][cat_index] else str(ind) + '_different' for ind, x in enumerate(sample_combos)] ser_dist = pd.Series(data=dist_arr, index=sample_names) # find same-cat sample comparisons same_cat = [x for x in sample_names if x.split('_')[1] == 'same'] # find diff-cat sample comparisons diff_cat = [x for x in sample_names if x.split('_')[1] == 'different'] # make series of same and diff category sample comparisons ser_same = ser_dist[same_cat] ser_same.name = 'Same Category' ser_diff = ser_dist[diff_cat] ser_diff.name = 'Different Category' sim_dict = {} roc_data = {} sim_data = {} sim_dict['same'] = ser_same sim_dict['diff'] = ser_diff pval_dict = {} ttest_stat, pval_dict['ttest'] = ttest_ind(ser_diff, ser_same, equal_var=equal_var) ttest_stat, pval_dict['mannwhitney'] = mannwhitneyu(ser_diff, ser_same) if calc_roc: # calc AUC true_index = list(np.ones(sim_dict['same'].shape[0])) false_index = list(np.zeros(sim_dict['diff'].shape[0])) y_true = true_index + false_index true_val = list(sim_dict['same'].get_values()) false_val = list(sim_dict['diff'].get_values()) y_score = true_val + false_val fpr, tpr, thresholds = roc_curve(y_true, y_score) inst_auc = auc(fpr, tpr) if plot_roc: plt.figure() plt.plot(fpr, tpr) plt.plot([0, 1], [0, 1], color='navy', linestyle='--') plt.figure(figsize=(10,10)) print('AUC', inst_auc) roc_data['true'] = y_true roc_data['score'] = y_score roc_data['fpr'] = fpr roc_data['tpr'] = tpr roc_data['thresholds'] = thresholds roc_data['auc'] = inst_auc sim_data['sim_dict'] = sim_dict sim_data['pval_dict'] = pval_dict sim_data['roc_data'] = roc_data return sim_data
python
def sim_same_and_diff_category_samples(self, df, cat_index=1, dist_type='cosine', equal_var=False, plot_roc=True, precalc_dist=False, calc_roc=True): ''' Calculate the similarity of samples from the same and different categories. The cat_index gives the index of the category, where 1 in the first category ''' cols = df.columns.tolist() if type(precalc_dist) == bool: # compute distnace between rows (transpose to get cols as rows) dist_arr = 1 - pdist(df.transpose(), metric=dist_type) else: dist_arr = precalc_dist # generate sample names with categories sample_combos = list(combinations(range(df.shape[1]),2)) sample_names = [str(ind) + '_same' if cols[x[0]][cat_index] == cols[x[1]][cat_index] else str(ind) + '_different' for ind, x in enumerate(sample_combos)] ser_dist = pd.Series(data=dist_arr, index=sample_names) # find same-cat sample comparisons same_cat = [x for x in sample_names if x.split('_')[1] == 'same'] # find diff-cat sample comparisons diff_cat = [x for x in sample_names if x.split('_')[1] == 'different'] # make series of same and diff category sample comparisons ser_same = ser_dist[same_cat] ser_same.name = 'Same Category' ser_diff = ser_dist[diff_cat] ser_diff.name = 'Different Category' sim_dict = {} roc_data = {} sim_data = {} sim_dict['same'] = ser_same sim_dict['diff'] = ser_diff pval_dict = {} ttest_stat, pval_dict['ttest'] = ttest_ind(ser_diff, ser_same, equal_var=equal_var) ttest_stat, pval_dict['mannwhitney'] = mannwhitneyu(ser_diff, ser_same) if calc_roc: # calc AUC true_index = list(np.ones(sim_dict['same'].shape[0])) false_index = list(np.zeros(sim_dict['diff'].shape[0])) y_true = true_index + false_index true_val = list(sim_dict['same'].get_values()) false_val = list(sim_dict['diff'].get_values()) y_score = true_val + false_val fpr, tpr, thresholds = roc_curve(y_true, y_score) inst_auc = auc(fpr, tpr) if plot_roc: plt.figure() plt.plot(fpr, tpr) plt.plot([0, 1], [0, 1], color='navy', linestyle='--') plt.figure(figsize=(10,10)) print('AUC', inst_auc) roc_data['true'] = y_true roc_data['score'] = y_score roc_data['fpr'] = fpr roc_data['tpr'] = tpr roc_data['thresholds'] = thresholds roc_data['auc'] = inst_auc sim_data['sim_dict'] = sim_dict sim_data['pval_dict'] = pval_dict sim_data['roc_data'] = roc_data return sim_data
[ "def", "sim_same_and_diff_category_samples", "(", "self", ",", "df", ",", "cat_index", "=", "1", ",", "dist_type", "=", "'cosine'", ",", "equal_var", "=", "False", ",", "plot_roc", "=", "True", ",", "precalc_dist", "=", "False", ",", "calc_roc", "=", "True", ")", ":", "cols", "=", "df", ".", "columns", ".", "tolist", "(", ")", "if", "type", "(", "precalc_dist", ")", "==", "bool", ":", "# compute distnace between rows (transpose to get cols as rows)", "dist_arr", "=", "1", "-", "pdist", "(", "df", ".", "transpose", "(", ")", ",", "metric", "=", "dist_type", ")", "else", ":", "dist_arr", "=", "precalc_dist", "# generate sample names with categories", "sample_combos", "=", "list", "(", "combinations", "(", "range", "(", "df", ".", "shape", "[", "1", "]", ")", ",", "2", ")", ")", "sample_names", "=", "[", "str", "(", "ind", ")", "+", "'_same'", "if", "cols", "[", "x", "[", "0", "]", "]", "[", "cat_index", "]", "==", "cols", "[", "x", "[", "1", "]", "]", "[", "cat_index", "]", "else", "str", "(", "ind", ")", "+", "'_different'", "for", "ind", ",", "x", "in", "enumerate", "(", "sample_combos", ")", "]", "ser_dist", "=", "pd", ".", "Series", "(", "data", "=", "dist_arr", ",", "index", "=", "sample_names", ")", "# find same-cat sample comparisons", "same_cat", "=", "[", "x", "for", "x", "in", "sample_names", "if", "x", ".", "split", "(", "'_'", ")", "[", "1", "]", "==", "'same'", "]", "# find diff-cat sample comparisons", "diff_cat", "=", "[", "x", "for", "x", "in", "sample_names", "if", "x", ".", "split", "(", "'_'", ")", "[", "1", "]", "==", "'different'", "]", "# make series of same and diff category sample comparisons", "ser_same", "=", "ser_dist", "[", "same_cat", "]", "ser_same", ".", "name", "=", "'Same Category'", "ser_diff", "=", "ser_dist", "[", "diff_cat", "]", "ser_diff", ".", "name", "=", "'Different Category'", "sim_dict", "=", "{", "}", "roc_data", "=", "{", "}", "sim_data", "=", "{", "}", "sim_dict", "[", "'same'", "]", "=", "ser_same", "sim_dict", "[", "'diff'", "]", "=", "ser_diff", "pval_dict", "=", "{", "}", "ttest_stat", ",", "pval_dict", "[", "'ttest'", "]", "=", "ttest_ind", "(", "ser_diff", ",", "ser_same", ",", "equal_var", "=", "equal_var", ")", "ttest_stat", ",", "pval_dict", "[", "'mannwhitney'", "]", "=", "mannwhitneyu", "(", "ser_diff", ",", "ser_same", ")", "if", "calc_roc", ":", "# calc AUC", "true_index", "=", "list", "(", "np", ".", "ones", "(", "sim_dict", "[", "'same'", "]", ".", "shape", "[", "0", "]", ")", ")", "false_index", "=", "list", "(", "np", ".", "zeros", "(", "sim_dict", "[", "'diff'", "]", ".", "shape", "[", "0", "]", ")", ")", "y_true", "=", "true_index", "+", "false_index", "true_val", "=", "list", "(", "sim_dict", "[", "'same'", "]", ".", "get_values", "(", ")", ")", "false_val", "=", "list", "(", "sim_dict", "[", "'diff'", "]", ".", "get_values", "(", ")", ")", "y_score", "=", "true_val", "+", "false_val", "fpr", ",", "tpr", ",", "thresholds", "=", "roc_curve", "(", "y_true", ",", "y_score", ")", "inst_auc", "=", "auc", "(", "fpr", ",", "tpr", ")", "if", "plot_roc", ":", "plt", ".", "figure", "(", ")", "plt", ".", "plot", "(", "fpr", ",", "tpr", ")", "plt", ".", "plot", "(", "[", "0", ",", "1", "]", ",", "[", "0", ",", "1", "]", ",", "color", "=", "'navy'", ",", "linestyle", "=", "'--'", ")", "plt", ".", "figure", "(", "figsize", "=", "(", "10", ",", "10", ")", ")", "print", "(", "'AUC'", ",", "inst_auc", ")", "roc_data", "[", "'true'", "]", "=", "y_true", "roc_data", "[", "'score'", "]", "=", "y_score", "roc_data", "[", "'fpr'", "]", "=", "fpr", "roc_data", "[", "'tpr'", "]", "=", "tpr", "roc_data", "[", "'thresholds'", "]", "=", "thresholds", "roc_data", "[", "'auc'", "]", "=", "inst_auc", "sim_data", "[", "'sim_dict'", "]", "=", "sim_dict", "sim_data", "[", "'pval_dict'", "]", "=", "pval_dict", "sim_data", "[", "'roc_data'", "]", "=", "roc_data", "return", "sim_data" ]
Calculate the similarity of samples from the same and different categories. The cat_index gives the index of the category, where 1 in the first category
[ "Calculate", "the", "similarity", "of", "samples", "from", "the", "same", "and", "different", "categories", ".", "The", "cat_index", "gives", "the", "index", "of", "the", "category", "where", "1", "in", "the", "first", "category" ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L583-L663
3,604
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/__init__.py
Network.generate_signatures
def generate_signatures(self, df_ini, category_level, pval_cutoff=0.05, num_top_dims=False, verbose=True, equal_var=False): ''' Generate signatures for column categories ''' df_t = df_ini.transpose() # remove columns with constant values df_t = df_t.loc[:, (df_t != df_t.iloc[0]).any()] df = self.row_tuple_to_multiindex(df_t) cell_types = sorted(list(set(df.index.get_level_values(category_level).tolist()))) keep_genes = [] keep_genes_dict = {} gene_pval_dict = {} all_fold_info = {} for inst_ct in cell_types: inst_ct_mat = df.xs(key=inst_ct, level=category_level) inst_other_mat = df.drop(inst_ct, level=category_level) # save mean values and fold change fold_info = {} fold_info['cluster_mean'] = inst_ct_mat.mean() fold_info['other_mean'] = inst_other_mat.mean() fold_info['log2_fold'] = fold_info['cluster_mean']/fold_info['other_mean'] fold_info['log2_fold'] = fold_info['log2_fold'].apply(np.log2) all_fold_info[inst_ct] = fold_info inst_stats, inst_pvals = ttest_ind(inst_ct_mat, inst_other_mat, axis=0, equal_var=equal_var) ser_pval = pd.Series(data=inst_pvals, index=df.columns.tolist()).sort_values() if num_top_dims == False: ser_pval_keep = ser_pval[ser_pval < pval_cutoff] else: ser_pval_keep = ser_pval[:num_top_dims] gene_pval_dict[inst_ct] = ser_pval_keep inst_keep = ser_pval_keep.index.tolist() keep_genes.extend(inst_keep) keep_genes_dict[inst_ct] = inst_keep keep_genes = sorted(list(set(keep_genes))) df_gbm = df.groupby(level=category_level).mean().transpose() cols = df_gbm.columns.tolist() new_cols = [] for inst_col in cols: new_col = (inst_col, category_level + ': ' + inst_col) new_cols.append(new_col) df_gbm.columns = new_cols df_sig = df_gbm.ix[keep_genes] if len(keep_genes) == 0 and verbose: print('found no informative dimensions') df_gene_pval = pd.concat(gene_pval_dict, axis=1, sort=False) return df_sig, keep_genes_dict, df_gene_pval, all_fold_info
python
def generate_signatures(self, df_ini, category_level, pval_cutoff=0.05, num_top_dims=False, verbose=True, equal_var=False): ''' Generate signatures for column categories ''' df_t = df_ini.transpose() # remove columns with constant values df_t = df_t.loc[:, (df_t != df_t.iloc[0]).any()] df = self.row_tuple_to_multiindex(df_t) cell_types = sorted(list(set(df.index.get_level_values(category_level).tolist()))) keep_genes = [] keep_genes_dict = {} gene_pval_dict = {} all_fold_info = {} for inst_ct in cell_types: inst_ct_mat = df.xs(key=inst_ct, level=category_level) inst_other_mat = df.drop(inst_ct, level=category_level) # save mean values and fold change fold_info = {} fold_info['cluster_mean'] = inst_ct_mat.mean() fold_info['other_mean'] = inst_other_mat.mean() fold_info['log2_fold'] = fold_info['cluster_mean']/fold_info['other_mean'] fold_info['log2_fold'] = fold_info['log2_fold'].apply(np.log2) all_fold_info[inst_ct] = fold_info inst_stats, inst_pvals = ttest_ind(inst_ct_mat, inst_other_mat, axis=0, equal_var=equal_var) ser_pval = pd.Series(data=inst_pvals, index=df.columns.tolist()).sort_values() if num_top_dims == False: ser_pval_keep = ser_pval[ser_pval < pval_cutoff] else: ser_pval_keep = ser_pval[:num_top_dims] gene_pval_dict[inst_ct] = ser_pval_keep inst_keep = ser_pval_keep.index.tolist() keep_genes.extend(inst_keep) keep_genes_dict[inst_ct] = inst_keep keep_genes = sorted(list(set(keep_genes))) df_gbm = df.groupby(level=category_level).mean().transpose() cols = df_gbm.columns.tolist() new_cols = [] for inst_col in cols: new_col = (inst_col, category_level + ': ' + inst_col) new_cols.append(new_col) df_gbm.columns = new_cols df_sig = df_gbm.ix[keep_genes] if len(keep_genes) == 0 and verbose: print('found no informative dimensions') df_gene_pval = pd.concat(gene_pval_dict, axis=1, sort=False) return df_sig, keep_genes_dict, df_gene_pval, all_fold_info
[ "def", "generate_signatures", "(", "self", ",", "df_ini", ",", "category_level", ",", "pval_cutoff", "=", "0.05", ",", "num_top_dims", "=", "False", ",", "verbose", "=", "True", ",", "equal_var", "=", "False", ")", ":", "df_t", "=", "df_ini", ".", "transpose", "(", ")", "# remove columns with constant values", "df_t", "=", "df_t", ".", "loc", "[", ":", ",", "(", "df_t", "!=", "df_t", ".", "iloc", "[", "0", "]", ")", ".", "any", "(", ")", "]", "df", "=", "self", ".", "row_tuple_to_multiindex", "(", "df_t", ")", "cell_types", "=", "sorted", "(", "list", "(", "set", "(", "df", ".", "index", ".", "get_level_values", "(", "category_level", ")", ".", "tolist", "(", ")", ")", ")", ")", "keep_genes", "=", "[", "]", "keep_genes_dict", "=", "{", "}", "gene_pval_dict", "=", "{", "}", "all_fold_info", "=", "{", "}", "for", "inst_ct", "in", "cell_types", ":", "inst_ct_mat", "=", "df", ".", "xs", "(", "key", "=", "inst_ct", ",", "level", "=", "category_level", ")", "inst_other_mat", "=", "df", ".", "drop", "(", "inst_ct", ",", "level", "=", "category_level", ")", "# save mean values and fold change", "fold_info", "=", "{", "}", "fold_info", "[", "'cluster_mean'", "]", "=", "inst_ct_mat", ".", "mean", "(", ")", "fold_info", "[", "'other_mean'", "]", "=", "inst_other_mat", ".", "mean", "(", ")", "fold_info", "[", "'log2_fold'", "]", "=", "fold_info", "[", "'cluster_mean'", "]", "/", "fold_info", "[", "'other_mean'", "]", "fold_info", "[", "'log2_fold'", "]", "=", "fold_info", "[", "'log2_fold'", "]", ".", "apply", "(", "np", ".", "log2", ")", "all_fold_info", "[", "inst_ct", "]", "=", "fold_info", "inst_stats", ",", "inst_pvals", "=", "ttest_ind", "(", "inst_ct_mat", ",", "inst_other_mat", ",", "axis", "=", "0", ",", "equal_var", "=", "equal_var", ")", "ser_pval", "=", "pd", ".", "Series", "(", "data", "=", "inst_pvals", ",", "index", "=", "df", ".", "columns", ".", "tolist", "(", ")", ")", ".", "sort_values", "(", ")", "if", "num_top_dims", "==", "False", ":", "ser_pval_keep", "=", "ser_pval", "[", "ser_pval", "<", "pval_cutoff", "]", "else", ":", "ser_pval_keep", "=", "ser_pval", "[", ":", "num_top_dims", "]", "gene_pval_dict", "[", "inst_ct", "]", "=", "ser_pval_keep", "inst_keep", "=", "ser_pval_keep", ".", "index", ".", "tolist", "(", ")", "keep_genes", ".", "extend", "(", "inst_keep", ")", "keep_genes_dict", "[", "inst_ct", "]", "=", "inst_keep", "keep_genes", "=", "sorted", "(", "list", "(", "set", "(", "keep_genes", ")", ")", ")", "df_gbm", "=", "df", ".", "groupby", "(", "level", "=", "category_level", ")", ".", "mean", "(", ")", ".", "transpose", "(", ")", "cols", "=", "df_gbm", ".", "columns", ".", "tolist", "(", ")", "new_cols", "=", "[", "]", "for", "inst_col", "in", "cols", ":", "new_col", "=", "(", "inst_col", ",", "category_level", "+", "': '", "+", "inst_col", ")", "new_cols", ".", "append", "(", "new_col", ")", "df_gbm", ".", "columns", "=", "new_cols", "df_sig", "=", "df_gbm", ".", "ix", "[", "keep_genes", "]", "if", "len", "(", "keep_genes", ")", "==", "0", "and", "verbose", ":", "print", "(", "'found no informative dimensions'", ")", "df_gene_pval", "=", "pd", ".", "concat", "(", "gene_pval_dict", ",", "axis", "=", "1", ",", "sort", "=", "False", ")", "return", "df_sig", ",", "keep_genes_dict", ",", "df_gene_pval", ",", "all_fold_info" ]
Generate signatures for column categories
[ "Generate", "signatures", "for", "column", "categories" ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L665-L729
3,605
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/__init__.py
Network.predict_cats_from_sigs
def predict_cats_from_sigs(self, df_data_ini, df_sig_ini, dist_type='cosine', predict_level='Predict Category', truth_level=1, unknown_thresh=-1): ''' Predict category using signature ''' keep_rows = df_sig_ini.index.tolist() data_rows = df_data_ini.index.tolist() common_rows = list(set(data_rows).intersection(keep_rows)) df_data = deepcopy(df_data_ini.ix[common_rows]) df_sig = deepcopy(df_sig_ini.ix[common_rows]) # calculate sim_mat of df_data and df_sig cell_types = df_sig.columns.tolist() barcodes = df_data.columns.tolist() sim_mat = 1 - pairwise_distances(df_sig.transpose(), df_data.transpose(), metric=dist_type) df_sim = pd.DataFrame(data=sim_mat, index=cell_types, columns=barcodes).transpose() # get the top column value (most similar signature) df_sim_top = df_sim.idxmax(axis=1) # get the maximum similarity of a cell to a cell type definition max_sim = df_sim.max(axis=1) unknown_cells = max_sim[max_sim < unknown_thresh].index.tolist() # assign unknown cells (need category of same name) df_sim_top[unknown_cells] = 'Unknown' # add predicted category name to top list top_list = df_sim_top.get_values() top_list = [ predict_level + ': ' + x[0] if type(x) is tuple else predict_level + ': ' + x for x in top_list] # add cell type category to input data df_cat = deepcopy(df_data) cols = df_cat.columns.tolist() new_cols = [] # check whether the columns have the true category available has_truth = False if type(cols[0]) is tuple: has_truth = True if has_truth: new_cols = [tuple(list(a) + [b]) for a,b in zip(cols, top_list)] else: new_cols = [tuple([a] + [b]) for a,b in zip(cols, top_list)] # transfer new categories df_cat.columns = new_cols # keep track of true and predicted labels y_info = {} y_info['true'] = [] y_info['pred'] = [] if has_truth: y_info['true'] = [x[truth_level].split(': ')[1] for x in cols] y_info['pred'] = [x.split(': ')[1] for x in top_list] return df_cat, df_sim.transpose(), y_info
python
def predict_cats_from_sigs(self, df_data_ini, df_sig_ini, dist_type='cosine', predict_level='Predict Category', truth_level=1, unknown_thresh=-1): ''' Predict category using signature ''' keep_rows = df_sig_ini.index.tolist() data_rows = df_data_ini.index.tolist() common_rows = list(set(data_rows).intersection(keep_rows)) df_data = deepcopy(df_data_ini.ix[common_rows]) df_sig = deepcopy(df_sig_ini.ix[common_rows]) # calculate sim_mat of df_data and df_sig cell_types = df_sig.columns.tolist() barcodes = df_data.columns.tolist() sim_mat = 1 - pairwise_distances(df_sig.transpose(), df_data.transpose(), metric=dist_type) df_sim = pd.DataFrame(data=sim_mat, index=cell_types, columns=barcodes).transpose() # get the top column value (most similar signature) df_sim_top = df_sim.idxmax(axis=1) # get the maximum similarity of a cell to a cell type definition max_sim = df_sim.max(axis=1) unknown_cells = max_sim[max_sim < unknown_thresh].index.tolist() # assign unknown cells (need category of same name) df_sim_top[unknown_cells] = 'Unknown' # add predicted category name to top list top_list = df_sim_top.get_values() top_list = [ predict_level + ': ' + x[0] if type(x) is tuple else predict_level + ': ' + x for x in top_list] # add cell type category to input data df_cat = deepcopy(df_data) cols = df_cat.columns.tolist() new_cols = [] # check whether the columns have the true category available has_truth = False if type(cols[0]) is tuple: has_truth = True if has_truth: new_cols = [tuple(list(a) + [b]) for a,b in zip(cols, top_list)] else: new_cols = [tuple([a] + [b]) for a,b in zip(cols, top_list)] # transfer new categories df_cat.columns = new_cols # keep track of true and predicted labels y_info = {} y_info['true'] = [] y_info['pred'] = [] if has_truth: y_info['true'] = [x[truth_level].split(': ')[1] for x in cols] y_info['pred'] = [x.split(': ')[1] for x in top_list] return df_cat, df_sim.transpose(), y_info
[ "def", "predict_cats_from_sigs", "(", "self", ",", "df_data_ini", ",", "df_sig_ini", ",", "dist_type", "=", "'cosine'", ",", "predict_level", "=", "'Predict Category'", ",", "truth_level", "=", "1", ",", "unknown_thresh", "=", "-", "1", ")", ":", "keep_rows", "=", "df_sig_ini", ".", "index", ".", "tolist", "(", ")", "data_rows", "=", "df_data_ini", ".", "index", ".", "tolist", "(", ")", "common_rows", "=", "list", "(", "set", "(", "data_rows", ")", ".", "intersection", "(", "keep_rows", ")", ")", "df_data", "=", "deepcopy", "(", "df_data_ini", ".", "ix", "[", "common_rows", "]", ")", "df_sig", "=", "deepcopy", "(", "df_sig_ini", ".", "ix", "[", "common_rows", "]", ")", "# calculate sim_mat of df_data and df_sig", "cell_types", "=", "df_sig", ".", "columns", ".", "tolist", "(", ")", "barcodes", "=", "df_data", ".", "columns", ".", "tolist", "(", ")", "sim_mat", "=", "1", "-", "pairwise_distances", "(", "df_sig", ".", "transpose", "(", ")", ",", "df_data", ".", "transpose", "(", ")", ",", "metric", "=", "dist_type", ")", "df_sim", "=", "pd", ".", "DataFrame", "(", "data", "=", "sim_mat", ",", "index", "=", "cell_types", ",", "columns", "=", "barcodes", ")", ".", "transpose", "(", ")", "# get the top column value (most similar signature)", "df_sim_top", "=", "df_sim", ".", "idxmax", "(", "axis", "=", "1", ")", "# get the maximum similarity of a cell to a cell type definition", "max_sim", "=", "df_sim", ".", "max", "(", "axis", "=", "1", ")", "unknown_cells", "=", "max_sim", "[", "max_sim", "<", "unknown_thresh", "]", ".", "index", ".", "tolist", "(", ")", "# assign unknown cells (need category of same name)", "df_sim_top", "[", "unknown_cells", "]", "=", "'Unknown'", "# add predicted category name to top list", "top_list", "=", "df_sim_top", ".", "get_values", "(", ")", "top_list", "=", "[", "predict_level", "+", "': '", "+", "x", "[", "0", "]", "if", "type", "(", "x", ")", "is", "tuple", "else", "predict_level", "+", "': '", "+", "x", "for", "x", "in", "top_list", "]", "# add cell type category to input data", "df_cat", "=", "deepcopy", "(", "df_data", ")", "cols", "=", "df_cat", ".", "columns", ".", "tolist", "(", ")", "new_cols", "=", "[", "]", "# check whether the columns have the true category available", "has_truth", "=", "False", "if", "type", "(", "cols", "[", "0", "]", ")", "is", "tuple", ":", "has_truth", "=", "True", "if", "has_truth", ":", "new_cols", "=", "[", "tuple", "(", "list", "(", "a", ")", "+", "[", "b", "]", ")", "for", "a", ",", "b", "in", "zip", "(", "cols", ",", "top_list", ")", "]", "else", ":", "new_cols", "=", "[", "tuple", "(", "[", "a", "]", "+", "[", "b", "]", ")", "for", "a", ",", "b", "in", "zip", "(", "cols", ",", "top_list", ")", "]", "# transfer new categories", "df_cat", ".", "columns", "=", "new_cols", "# keep track of true and predicted labels", "y_info", "=", "{", "}", "y_info", "[", "'true'", "]", "=", "[", "]", "y_info", "[", "'pred'", "]", "=", "[", "]", "if", "has_truth", ":", "y_info", "[", "'true'", "]", "=", "[", "x", "[", "truth_level", "]", ".", "split", "(", "': '", ")", "[", "1", "]", "for", "x", "in", "cols", "]", "y_info", "[", "'pred'", "]", "=", "[", "x", ".", "split", "(", "': '", ")", "[", "1", "]", "for", "x", "in", "top_list", "]", "return", "df_cat", ",", "df_sim", ".", "transpose", "(", ")", ",", "y_info" ]
Predict category using signature
[ "Predict", "category", "using", "signature" ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L731-L791
3,606
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/__init__.py
Network.confusion_matrix_and_correct_series
def confusion_matrix_and_correct_series(self, y_info): ''' Generate confusion matrix from y_info ''' a = deepcopy(y_info['true']) true_count = dict((i, a.count(i)) for i in set(a)) a = deepcopy(y_info['pred']) pred_count = dict((i, a.count(i)) for i in set(a)) sorted_cats = sorted(list(set(y_info['true'] + y_info['pred']))) conf_mat = confusion_matrix(y_info['true'], y_info['pred'], sorted_cats) df_conf = pd.DataFrame(conf_mat, index=sorted_cats, columns=sorted_cats) total_correct = np.trace(df_conf) total_pred = df_conf.sum().sum() fraction_correct = total_correct/float(total_pred) # calculate ser_correct correct_list = [] cat_counts = df_conf.sum(axis=1) all_cols = df_conf.columns.tolist() for inst_cat in all_cols: inst_correct = df_conf[inst_cat].loc[inst_cat] / cat_counts[inst_cat] correct_list.append(inst_correct) ser_correct = pd.Series(data=correct_list, index=all_cols) populations = {} populations['true'] = true_count populations['pred'] = pred_count return df_conf, populations, ser_correct, fraction_correct
python
def confusion_matrix_and_correct_series(self, y_info): ''' Generate confusion matrix from y_info ''' a = deepcopy(y_info['true']) true_count = dict((i, a.count(i)) for i in set(a)) a = deepcopy(y_info['pred']) pred_count = dict((i, a.count(i)) for i in set(a)) sorted_cats = sorted(list(set(y_info['true'] + y_info['pred']))) conf_mat = confusion_matrix(y_info['true'], y_info['pred'], sorted_cats) df_conf = pd.DataFrame(conf_mat, index=sorted_cats, columns=sorted_cats) total_correct = np.trace(df_conf) total_pred = df_conf.sum().sum() fraction_correct = total_correct/float(total_pred) # calculate ser_correct correct_list = [] cat_counts = df_conf.sum(axis=1) all_cols = df_conf.columns.tolist() for inst_cat in all_cols: inst_correct = df_conf[inst_cat].loc[inst_cat] / cat_counts[inst_cat] correct_list.append(inst_correct) ser_correct = pd.Series(data=correct_list, index=all_cols) populations = {} populations['true'] = true_count populations['pred'] = pred_count return df_conf, populations, ser_correct, fraction_correct
[ "def", "confusion_matrix_and_correct_series", "(", "self", ",", "y_info", ")", ":", "a", "=", "deepcopy", "(", "y_info", "[", "'true'", "]", ")", "true_count", "=", "dict", "(", "(", "i", ",", "a", ".", "count", "(", "i", ")", ")", "for", "i", "in", "set", "(", "a", ")", ")", "a", "=", "deepcopy", "(", "y_info", "[", "'pred'", "]", ")", "pred_count", "=", "dict", "(", "(", "i", ",", "a", ".", "count", "(", "i", ")", ")", "for", "i", "in", "set", "(", "a", ")", ")", "sorted_cats", "=", "sorted", "(", "list", "(", "set", "(", "y_info", "[", "'true'", "]", "+", "y_info", "[", "'pred'", "]", ")", ")", ")", "conf_mat", "=", "confusion_matrix", "(", "y_info", "[", "'true'", "]", ",", "y_info", "[", "'pred'", "]", ",", "sorted_cats", ")", "df_conf", "=", "pd", ".", "DataFrame", "(", "conf_mat", ",", "index", "=", "sorted_cats", ",", "columns", "=", "sorted_cats", ")", "total_correct", "=", "np", ".", "trace", "(", "df_conf", ")", "total_pred", "=", "df_conf", ".", "sum", "(", ")", ".", "sum", "(", ")", "fraction_correct", "=", "total_correct", "/", "float", "(", "total_pred", ")", "# calculate ser_correct", "correct_list", "=", "[", "]", "cat_counts", "=", "df_conf", ".", "sum", "(", "axis", "=", "1", ")", "all_cols", "=", "df_conf", ".", "columns", ".", "tolist", "(", ")", "for", "inst_cat", "in", "all_cols", ":", "inst_correct", "=", "df_conf", "[", "inst_cat", "]", ".", "loc", "[", "inst_cat", "]", "/", "cat_counts", "[", "inst_cat", "]", "correct_list", ".", "append", "(", "inst_correct", ")", "ser_correct", "=", "pd", ".", "Series", "(", "data", "=", "correct_list", ",", "index", "=", "all_cols", ")", "populations", "=", "{", "}", "populations", "[", "'true'", "]", "=", "true_count", "populations", "[", "'pred'", "]", "=", "pred_count", "return", "df_conf", ",", "populations", ",", "ser_correct", ",", "fraction_correct" ]
Generate confusion matrix from y_info
[ "Generate", "confusion", "matrix", "from", "y_info" ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L793-L825
3,607
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/load_data.py
load_data_to_net
def load_data_to_net(net, inst_net): ''' load data into nodes and mat, also convert mat to numpy array''' net.dat['nodes'] = inst_net['nodes'] net.dat['mat'] = inst_net['mat'] data_formats.mat_to_numpy_arr(net)
python
def load_data_to_net(net, inst_net): ''' load data into nodes and mat, also convert mat to numpy array''' net.dat['nodes'] = inst_net['nodes'] net.dat['mat'] = inst_net['mat'] data_formats.mat_to_numpy_arr(net)
[ "def", "load_data_to_net", "(", "net", ",", "inst_net", ")", ":", "net", ".", "dat", "[", "'nodes'", "]", "=", "inst_net", "[", "'nodes'", "]", "net", ".", "dat", "[", "'mat'", "]", "=", "inst_net", "[", "'mat'", "]", "data_formats", ".", "mat_to_numpy_arr", "(", "net", ")" ]
load data into nodes and mat, also convert mat to numpy array
[ "load", "data", "into", "nodes", "and", "mat", "also", "convert", "mat", "to", "numpy", "array" ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/load_data.py#L96-L100
3,608
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/export_data.py
export_net_json
def export_net_json(net, net_type, indent='no-indent'): ''' export json string of dat ''' import json from copy import deepcopy if net_type == 'dat': exp_dict = deepcopy(net.dat) if type(exp_dict['mat']) is not list: exp_dict['mat'] = exp_dict['mat'].tolist() if 'mat_orig' in exp_dict: exp_dict['mat_orig'] = exp_dict['mat_orig'].tolist() elif net_type == 'viz': exp_dict = net.viz elif net_type == 'sim_row': exp_dict = net.sim['row'] elif net_type == 'sim_col': exp_dict = net.sim['col'] # make json if indent == 'indent': exp_json = json.dumps(exp_dict, indent=2) else: exp_json = json.dumps(exp_dict) return exp_json
python
def export_net_json(net, net_type, indent='no-indent'): ''' export json string of dat ''' import json from copy import deepcopy if net_type == 'dat': exp_dict = deepcopy(net.dat) if type(exp_dict['mat']) is not list: exp_dict['mat'] = exp_dict['mat'].tolist() if 'mat_orig' in exp_dict: exp_dict['mat_orig'] = exp_dict['mat_orig'].tolist() elif net_type == 'viz': exp_dict = net.viz elif net_type == 'sim_row': exp_dict = net.sim['row'] elif net_type == 'sim_col': exp_dict = net.sim['col'] # make json if indent == 'indent': exp_json = json.dumps(exp_dict, indent=2) else: exp_json = json.dumps(exp_dict) return exp_json
[ "def", "export_net_json", "(", "net", ",", "net_type", ",", "indent", "=", "'no-indent'", ")", ":", "import", "json", "from", "copy", "import", "deepcopy", "if", "net_type", "==", "'dat'", ":", "exp_dict", "=", "deepcopy", "(", "net", ".", "dat", ")", "if", "type", "(", "exp_dict", "[", "'mat'", "]", ")", "is", "not", "list", ":", "exp_dict", "[", "'mat'", "]", "=", "exp_dict", "[", "'mat'", "]", ".", "tolist", "(", ")", "if", "'mat_orig'", "in", "exp_dict", ":", "exp_dict", "[", "'mat_orig'", "]", "=", "exp_dict", "[", "'mat_orig'", "]", ".", "tolist", "(", ")", "elif", "net_type", "==", "'viz'", ":", "exp_dict", "=", "net", ".", "viz", "elif", "net_type", "==", "'sim_row'", ":", "exp_dict", "=", "net", ".", "sim", "[", "'row'", "]", "elif", "net_type", "==", "'sim_col'", ":", "exp_dict", "=", "net", ".", "sim", "[", "'col'", "]", "# make json", "if", "indent", "==", "'indent'", ":", "exp_json", "=", "json", ".", "dumps", "(", "exp_dict", ",", "indent", "=", "2", ")", "else", ":", "exp_json", "=", "json", ".", "dumps", "(", "exp_dict", ")", "return", "exp_json" ]
export json string of dat
[ "export", "json", "string", "of", "dat" ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/export_data.py#L1-L29
3,609
ismms-himc/clustergrammer2
setupbase.py
install_npm
def install_npm(path=None, build_dir=None, source_dir=None, build_cmd='build', force=False, npm=None): """Return a Command for managing an npm installation. Note: The command is skipped if the `--skip-npm` flag is used. Parameters ---------- path: str, optional The base path of the node package. Defaults to the repo root. build_dir: str, optional The target build directory. If this and source_dir are given, the JavaScript will only be build if necessary. source_dir: str, optional The source code directory. build_cmd: str, optional The npm command to build assets to the build_dir. npm: str or list, optional. The npm executable name, or a tuple of ['node', executable]. """ class NPM(BaseCommand): description = 'install package.json dependencies using npm' def run(self): if skip_npm: log.info('Skipping npm-installation') return node_package = path or HERE node_modules = pjoin(node_package, 'node_modules') is_yarn = os.path.exists(pjoin(node_package, 'yarn.lock')) npm_cmd = npm if npm is None: if is_yarn: npm_cmd = ['yarn'] else: npm_cmd = ['npm'] if not which(npm_cmd[0]): log.error("`{0}` unavailable. If you're running this command " "using sudo, make sure `{0}` is availble to sudo" .format(npm_cmd[0])) return if force or is_stale(node_modules, pjoin(node_package, 'package.json')): log.info('Installing build dependencies with npm. This may ' 'take a while...') run(npm_cmd + ['install'], cwd=node_package) if build_dir and source_dir and not force: should_build = is_stale(build_dir, source_dir) else: should_build = True if should_build: run(npm_cmd + ['run', build_cmd], cwd=node_package) return NPM
python
def install_npm(path=None, build_dir=None, source_dir=None, build_cmd='build', force=False, npm=None): class NPM(BaseCommand): description = 'install package.json dependencies using npm' def run(self): if skip_npm: log.info('Skipping npm-installation') return node_package = path or HERE node_modules = pjoin(node_package, 'node_modules') is_yarn = os.path.exists(pjoin(node_package, 'yarn.lock')) npm_cmd = npm if npm is None: if is_yarn: npm_cmd = ['yarn'] else: npm_cmd = ['npm'] if not which(npm_cmd[0]): log.error("`{0}` unavailable. If you're running this command " "using sudo, make sure `{0}` is availble to sudo" .format(npm_cmd[0])) return if force or is_stale(node_modules, pjoin(node_package, 'package.json')): log.info('Installing build dependencies with npm. This may ' 'take a while...') run(npm_cmd + ['install'], cwd=node_package) if build_dir and source_dir and not force: should_build = is_stale(build_dir, source_dir) else: should_build = True if should_build: run(npm_cmd + ['run', build_cmd], cwd=node_package) return NPM
[ "def", "install_npm", "(", "path", "=", "None", ",", "build_dir", "=", "None", ",", "source_dir", "=", "None", ",", "build_cmd", "=", "'build'", ",", "force", "=", "False", ",", "npm", "=", "None", ")", ":", "class", "NPM", "(", "BaseCommand", ")", ":", "description", "=", "'install package.json dependencies using npm'", "def", "run", "(", "self", ")", ":", "if", "skip_npm", ":", "log", ".", "info", "(", "'Skipping npm-installation'", ")", "return", "node_package", "=", "path", "or", "HERE", "node_modules", "=", "pjoin", "(", "node_package", ",", "'node_modules'", ")", "is_yarn", "=", "os", ".", "path", ".", "exists", "(", "pjoin", "(", "node_package", ",", "'yarn.lock'", ")", ")", "npm_cmd", "=", "npm", "if", "npm", "is", "None", ":", "if", "is_yarn", ":", "npm_cmd", "=", "[", "'yarn'", "]", "else", ":", "npm_cmd", "=", "[", "'npm'", "]", "if", "not", "which", "(", "npm_cmd", "[", "0", "]", ")", ":", "log", ".", "error", "(", "\"`{0}` unavailable. If you're running this command \"", "\"using sudo, make sure `{0}` is availble to sudo\"", ".", "format", "(", "npm_cmd", "[", "0", "]", ")", ")", "return", "if", "force", "or", "is_stale", "(", "node_modules", ",", "pjoin", "(", "node_package", ",", "'package.json'", ")", ")", ":", "log", ".", "info", "(", "'Installing build dependencies with npm. This may '", "'take a while...'", ")", "run", "(", "npm_cmd", "+", "[", "'install'", "]", ",", "cwd", "=", "node_package", ")", "if", "build_dir", "and", "source_dir", "and", "not", "force", ":", "should_build", "=", "is_stale", "(", "build_dir", ",", "source_dir", ")", "else", ":", "should_build", "=", "True", "if", "should_build", ":", "run", "(", "npm_cmd", "+", "[", "'run'", ",", "build_cmd", "]", ",", "cwd", "=", "node_package", ")", "return", "NPM" ]
Return a Command for managing an npm installation. Note: The command is skipped if the `--skip-npm` flag is used. Parameters ---------- path: str, optional The base path of the node package. Defaults to the repo root. build_dir: str, optional The target build directory. If this and source_dir are given, the JavaScript will only be build if necessary. source_dir: str, optional The source code directory. build_cmd: str, optional The npm command to build assets to the build_dir. npm: str or list, optional. The npm executable name, or a tuple of ['node', executable].
[ "Return", "a", "Command", "for", "managing", "an", "npm", "installation", "." ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/setupbase.py#L321-L377
3,610
ismms-himc/clustergrammer2
setupbase.py
_glob_pjoin
def _glob_pjoin(*parts): """Join paths for glob processing""" if parts[0] in ('.', ''): parts = parts[1:] return pjoin(*parts).replace(os.sep, '/')
python
def _glob_pjoin(*parts): if parts[0] in ('.', ''): parts = parts[1:] return pjoin(*parts).replace(os.sep, '/')
[ "def", "_glob_pjoin", "(", "*", "parts", ")", ":", "if", "parts", "[", "0", "]", "in", "(", "'.'", ",", "''", ")", ":", "parts", "=", "parts", "[", "1", ":", "]", "return", "pjoin", "(", "*", "parts", ")", ".", "replace", "(", "os", ".", "sep", ",", "'/'", ")" ]
Join paths for glob processing
[ "Join", "paths", "for", "glob", "processing" ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/setupbase.py#L507-L511
3,611
ismms-himc/clustergrammer2
setupbase.py
_get_data_files
def _get_data_files(data_specs, existing, top=HERE): """Expand data file specs into valid data files metadata. Parameters ---------- data_specs: list of tuples See [create_cmdclass] for description. existing: list of tuples The existing distrubution data_files metadata. Returns ------- A valid list of data_files items. """ # Extract the existing data files into a staging object. file_data = defaultdict(list) for (path, files) in existing or []: file_data[path] = files # Extract the files and assign them to the proper data # files path. for (path, dname, pattern) in data_specs or []: if os.path.isabs(dname): dname = os.path.relpath(dname, top) dname = dname.replace(os.sep, '/') offset = 0 if dname in ('.', '') else len(dname) + 1 files = _get_files(_glob_pjoin(dname, pattern), top=top) for fname in files: # Normalize the path. root = os.path.dirname(fname) full_path = _glob_pjoin(path, root[offset:]) print(dname, root, full_path, offset) if full_path.endswith('/'): full_path = full_path[:-1] file_data[full_path].append(fname) # Construct the data files spec. data_files = [] for (path, files) in file_data.items(): data_files.append((path, files)) return data_files
python
def _get_data_files(data_specs, existing, top=HERE): # Extract the existing data files into a staging object. file_data = defaultdict(list) for (path, files) in existing or []: file_data[path] = files # Extract the files and assign them to the proper data # files path. for (path, dname, pattern) in data_specs or []: if os.path.isabs(dname): dname = os.path.relpath(dname, top) dname = dname.replace(os.sep, '/') offset = 0 if dname in ('.', '') else len(dname) + 1 files = _get_files(_glob_pjoin(dname, pattern), top=top) for fname in files: # Normalize the path. root = os.path.dirname(fname) full_path = _glob_pjoin(path, root[offset:]) print(dname, root, full_path, offset) if full_path.endswith('/'): full_path = full_path[:-1] file_data[full_path].append(fname) # Construct the data files spec. data_files = [] for (path, files) in file_data.items(): data_files.append((path, files)) return data_files
[ "def", "_get_data_files", "(", "data_specs", ",", "existing", ",", "top", "=", "HERE", ")", ":", "# Extract the existing data files into a staging object.", "file_data", "=", "defaultdict", "(", "list", ")", "for", "(", "path", ",", "files", ")", "in", "existing", "or", "[", "]", ":", "file_data", "[", "path", "]", "=", "files", "# Extract the files and assign them to the proper data", "# files path.", "for", "(", "path", ",", "dname", ",", "pattern", ")", "in", "data_specs", "or", "[", "]", ":", "if", "os", ".", "path", ".", "isabs", "(", "dname", ")", ":", "dname", "=", "os", ".", "path", ".", "relpath", "(", "dname", ",", "top", ")", "dname", "=", "dname", ".", "replace", "(", "os", ".", "sep", ",", "'/'", ")", "offset", "=", "0", "if", "dname", "in", "(", "'.'", ",", "''", ")", "else", "len", "(", "dname", ")", "+", "1", "files", "=", "_get_files", "(", "_glob_pjoin", "(", "dname", ",", "pattern", ")", ",", "top", "=", "top", ")", "for", "fname", "in", "files", ":", "# Normalize the path.", "root", "=", "os", ".", "path", ".", "dirname", "(", "fname", ")", "full_path", "=", "_glob_pjoin", "(", "path", ",", "root", "[", "offset", ":", "]", ")", "print", "(", "dname", ",", "root", ",", "full_path", ",", "offset", ")", "if", "full_path", ".", "endswith", "(", "'/'", ")", ":", "full_path", "=", "full_path", "[", ":", "-", "1", "]", "file_data", "[", "full_path", "]", ".", "append", "(", "fname", ")", "# Construct the data files spec.", "data_files", "=", "[", "]", "for", "(", "path", ",", "files", ")", "in", "file_data", ".", "items", "(", ")", ":", "data_files", ".", "append", "(", "(", "path", ",", "files", ")", ")", "return", "data_files" ]
Expand data file specs into valid data files metadata. Parameters ---------- data_specs: list of tuples See [create_cmdclass] for description. existing: list of tuples The existing distrubution data_files metadata. Returns ------- A valid list of data_files items.
[ "Expand", "data", "file", "specs", "into", "valid", "data", "files", "metadata", "." ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/setupbase.py#L514-L554
3,612
ismms-himc/clustergrammer2
setupbase.py
_get_files
def _get_files(file_patterns, top=HERE): """Expand file patterns to a list of paths. Parameters ----------- file_patterns: list or str A list of glob patterns for the data file locations. The globs can be recursive if they include a `**`. They should be relative paths from the top directory or absolute paths. top: str the directory to consider for data files Note: Files in `node_modules` are ignored. """ if not isinstance(file_patterns, (list, tuple)): file_patterns = [file_patterns] for i, p in enumerate(file_patterns): if os.path.isabs(p): file_patterns[i] = os.path.relpath(p, top) matchers = [_compile_pattern(p) for p in file_patterns] files = set() for root, dirnames, filenames in os.walk(top): # Don't recurse into node_modules if 'node_modules' in dirnames: dirnames.remove('node_modules') for m in matchers: for filename in filenames: fn = os.path.relpath(_glob_pjoin(root, filename), top) fn = fn.replace(os.sep, '/') if m(fn): files.add(fn.replace(os.sep, '/')) return list(files)
python
def _get_files(file_patterns, top=HERE): if not isinstance(file_patterns, (list, tuple)): file_patterns = [file_patterns] for i, p in enumerate(file_patterns): if os.path.isabs(p): file_patterns[i] = os.path.relpath(p, top) matchers = [_compile_pattern(p) for p in file_patterns] files = set() for root, dirnames, filenames in os.walk(top): # Don't recurse into node_modules if 'node_modules' in dirnames: dirnames.remove('node_modules') for m in matchers: for filename in filenames: fn = os.path.relpath(_glob_pjoin(root, filename), top) fn = fn.replace(os.sep, '/') if m(fn): files.add(fn.replace(os.sep, '/')) return list(files)
[ "def", "_get_files", "(", "file_patterns", ",", "top", "=", "HERE", ")", ":", "if", "not", "isinstance", "(", "file_patterns", ",", "(", "list", ",", "tuple", ")", ")", ":", "file_patterns", "=", "[", "file_patterns", "]", "for", "i", ",", "p", "in", "enumerate", "(", "file_patterns", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "p", ")", ":", "file_patterns", "[", "i", "]", "=", "os", ".", "path", ".", "relpath", "(", "p", ",", "top", ")", "matchers", "=", "[", "_compile_pattern", "(", "p", ")", "for", "p", "in", "file_patterns", "]", "files", "=", "set", "(", ")", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "top", ")", ":", "# Don't recurse into node_modules", "if", "'node_modules'", "in", "dirnames", ":", "dirnames", ".", "remove", "(", "'node_modules'", ")", "for", "m", "in", "matchers", ":", "for", "filename", "in", "filenames", ":", "fn", "=", "os", ".", "path", ".", "relpath", "(", "_glob_pjoin", "(", "root", ",", "filename", ")", ",", "top", ")", "fn", "=", "fn", ".", "replace", "(", "os", ".", "sep", ",", "'/'", ")", "if", "m", "(", "fn", ")", ":", "files", ".", "add", "(", "fn", ".", "replace", "(", "os", ".", "sep", ",", "'/'", ")", ")", "return", "list", "(", "files", ")" ]
Expand file patterns to a list of paths. Parameters ----------- file_patterns: list or str A list of glob patterns for the data file locations. The globs can be recursive if they include a `**`. They should be relative paths from the top directory or absolute paths. top: str the directory to consider for data files Note: Files in `node_modules` are ignored.
[ "Expand", "file", "patterns", "to", "a", "list", "of", "paths", "." ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/setupbase.py#L557-L595
3,613
ismms-himc/clustergrammer2
setupbase.py
_get_package_data
def _get_package_data(root, file_patterns=None): """Expand file patterns to a list of `package_data` paths. Parameters ----------- root: str The relative path to the package root from `HERE`. file_patterns: list or str, optional A list of glob patterns for the data file locations. The globs can be recursive if they include a `**`. They should be relative paths from the root or absolute paths. If not given, all files will be used. Note: Files in `node_modules` are ignored. """ if file_patterns is None: file_patterns = ['*'] return _get_files(file_patterns, _glob_pjoin(HERE, root))
python
def _get_package_data(root, file_patterns=None): if file_patterns is None: file_patterns = ['*'] return _get_files(file_patterns, _glob_pjoin(HERE, root))
[ "def", "_get_package_data", "(", "root", ",", "file_patterns", "=", "None", ")", ":", "if", "file_patterns", "is", "None", ":", "file_patterns", "=", "[", "'*'", "]", "return", "_get_files", "(", "file_patterns", ",", "_glob_pjoin", "(", "HERE", ",", "root", ")", ")" ]
Expand file patterns to a list of `package_data` paths. Parameters ----------- root: str The relative path to the package root from `HERE`. file_patterns: list or str, optional A list of glob patterns for the data file locations. The globs can be recursive if they include a `**`. They should be relative paths from the root or absolute paths. If not given, all files will be used. Note: Files in `node_modules` are ignored.
[ "Expand", "file", "patterns", "to", "a", "list", "of", "package_data", "paths", "." ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/setupbase.py#L598-L616
3,614
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/run_filter.py
df_filter_row_sum
def df_filter_row_sum(df, threshold, take_abs=True): ''' filter rows in matrix at some threshold and remove columns that have a sum below this threshold ''' from copy import deepcopy from .__init__ import Network net = Network() if take_abs is True: df_copy = deepcopy(df['mat'].abs()) else: df_copy = deepcopy(df['mat']) ini_rows = df_copy.index.values.tolist() df_copy = df_copy.transpose() tmp_sum = df_copy.sum(axis=0) tmp_sum = tmp_sum.abs() tmp_sum.sort_values(inplace=True, ascending=False) tmp_sum = tmp_sum[tmp_sum > threshold] keep_rows = sorted(tmp_sum.index.values.tolist()) if len(keep_rows) < len(ini_rows): df['mat'] = grab_df_subset(df['mat'], keep_rows=keep_rows) if 'mat_up' in df: df['mat_up'] = grab_df_subset(df['mat_up'], keep_rows=keep_rows) df['mat_dn'] = grab_df_subset(df['mat_dn'], keep_rows=keep_rows) if 'mat_orig' in df: df['mat_orig'] = grab_df_subset(df['mat_orig'], keep_rows=keep_rows) return df
python
def df_filter_row_sum(df, threshold, take_abs=True): ''' filter rows in matrix at some threshold and remove columns that have a sum below this threshold ''' from copy import deepcopy from .__init__ import Network net = Network() if take_abs is True: df_copy = deepcopy(df['mat'].abs()) else: df_copy = deepcopy(df['mat']) ini_rows = df_copy.index.values.tolist() df_copy = df_copy.transpose() tmp_sum = df_copy.sum(axis=0) tmp_sum = tmp_sum.abs() tmp_sum.sort_values(inplace=True, ascending=False) tmp_sum = tmp_sum[tmp_sum > threshold] keep_rows = sorted(tmp_sum.index.values.tolist()) if len(keep_rows) < len(ini_rows): df['mat'] = grab_df_subset(df['mat'], keep_rows=keep_rows) if 'mat_up' in df: df['mat_up'] = grab_df_subset(df['mat_up'], keep_rows=keep_rows) df['mat_dn'] = grab_df_subset(df['mat_dn'], keep_rows=keep_rows) if 'mat_orig' in df: df['mat_orig'] = grab_df_subset(df['mat_orig'], keep_rows=keep_rows) return df
[ "def", "df_filter_row_sum", "(", "df", ",", "threshold", ",", "take_abs", "=", "True", ")", ":", "from", "copy", "import", "deepcopy", "from", ".", "__init__", "import", "Network", "net", "=", "Network", "(", ")", "if", "take_abs", "is", "True", ":", "df_copy", "=", "deepcopy", "(", "df", "[", "'mat'", "]", ".", "abs", "(", ")", ")", "else", ":", "df_copy", "=", "deepcopy", "(", "df", "[", "'mat'", "]", ")", "ini_rows", "=", "df_copy", ".", "index", ".", "values", ".", "tolist", "(", ")", "df_copy", "=", "df_copy", ".", "transpose", "(", ")", "tmp_sum", "=", "df_copy", ".", "sum", "(", "axis", "=", "0", ")", "tmp_sum", "=", "tmp_sum", ".", "abs", "(", ")", "tmp_sum", ".", "sort_values", "(", "inplace", "=", "True", ",", "ascending", "=", "False", ")", "tmp_sum", "=", "tmp_sum", "[", "tmp_sum", ">", "threshold", "]", "keep_rows", "=", "sorted", "(", "tmp_sum", ".", "index", ".", "values", ".", "tolist", "(", ")", ")", "if", "len", "(", "keep_rows", ")", "<", "len", "(", "ini_rows", ")", ":", "df", "[", "'mat'", "]", "=", "grab_df_subset", "(", "df", "[", "'mat'", "]", ",", "keep_rows", "=", "keep_rows", ")", "if", "'mat_up'", "in", "df", ":", "df", "[", "'mat_up'", "]", "=", "grab_df_subset", "(", "df", "[", "'mat_up'", "]", ",", "keep_rows", "=", "keep_rows", ")", "df", "[", "'mat_dn'", "]", "=", "grab_df_subset", "(", "df", "[", "'mat_dn'", "]", ",", "keep_rows", "=", "keep_rows", ")", "if", "'mat_orig'", "in", "df", ":", "df", "[", "'mat_orig'", "]", "=", "grab_df_subset", "(", "df", "[", "'mat_orig'", "]", ",", "keep_rows", "=", "keep_rows", ")", "return", "df" ]
filter rows in matrix at some threshold and remove columns that have a sum below this threshold
[ "filter", "rows", "in", "matrix", "at", "some", "threshold", "and", "remove", "columns", "that", "have", "a", "sum", "below", "this", "threshold" ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/run_filter.py#L1-L33
3,615
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/run_filter.py
df_filter_col_sum
def df_filter_col_sum(df, threshold, take_abs=True): ''' filter columns in matrix at some threshold and remove rows that have all zero values ''' from copy import deepcopy from .__init__ import Network net = Network() if take_abs is True: df_copy = deepcopy(df['mat'].abs()) else: df_copy = deepcopy(df['mat']) df_copy = df_copy.transpose() df_copy = df_copy[df_copy.sum(axis=1) > threshold] df_copy = df_copy.transpose() df_copy = df_copy[df_copy.sum(axis=1) > 0] if take_abs is True: inst_rows = df_copy.index.tolist() inst_cols = df_copy.columns.tolist() df['mat'] = grab_df_subset(df['mat'], inst_rows, inst_cols) if 'mat_up' in df: df['mat_up'] = grab_df_subset(df['mat_up'], inst_rows, inst_cols) df['mat_dn'] = grab_df_subset(df['mat_dn'], inst_rows, inst_cols) if 'mat_orig' in df: df['mat_orig'] = grab_df_subset(df['mat_orig'], inst_rows, inst_cols) else: df['mat'] = df_copy return df
python
def df_filter_col_sum(df, threshold, take_abs=True): ''' filter columns in matrix at some threshold and remove rows that have all zero values ''' from copy import deepcopy from .__init__ import Network net = Network() if take_abs is True: df_copy = deepcopy(df['mat'].abs()) else: df_copy = deepcopy(df['mat']) df_copy = df_copy.transpose() df_copy = df_copy[df_copy.sum(axis=1) > threshold] df_copy = df_copy.transpose() df_copy = df_copy[df_copy.sum(axis=1) > 0] if take_abs is True: inst_rows = df_copy.index.tolist() inst_cols = df_copy.columns.tolist() df['mat'] = grab_df_subset(df['mat'], inst_rows, inst_cols) if 'mat_up' in df: df['mat_up'] = grab_df_subset(df['mat_up'], inst_rows, inst_cols) df['mat_dn'] = grab_df_subset(df['mat_dn'], inst_rows, inst_cols) if 'mat_orig' in df: df['mat_orig'] = grab_df_subset(df['mat_orig'], inst_rows, inst_cols) else: df['mat'] = df_copy return df
[ "def", "df_filter_col_sum", "(", "df", ",", "threshold", ",", "take_abs", "=", "True", ")", ":", "from", "copy", "import", "deepcopy", "from", ".", "__init__", "import", "Network", "net", "=", "Network", "(", ")", "if", "take_abs", "is", "True", ":", "df_copy", "=", "deepcopy", "(", "df", "[", "'mat'", "]", ".", "abs", "(", ")", ")", "else", ":", "df_copy", "=", "deepcopy", "(", "df", "[", "'mat'", "]", ")", "df_copy", "=", "df_copy", ".", "transpose", "(", ")", "df_copy", "=", "df_copy", "[", "df_copy", ".", "sum", "(", "axis", "=", "1", ")", ">", "threshold", "]", "df_copy", "=", "df_copy", ".", "transpose", "(", ")", "df_copy", "=", "df_copy", "[", "df_copy", ".", "sum", "(", "axis", "=", "1", ")", ">", "0", "]", "if", "take_abs", "is", "True", ":", "inst_rows", "=", "df_copy", ".", "index", ".", "tolist", "(", ")", "inst_cols", "=", "df_copy", ".", "columns", ".", "tolist", "(", ")", "df", "[", "'mat'", "]", "=", "grab_df_subset", "(", "df", "[", "'mat'", "]", ",", "inst_rows", ",", "inst_cols", ")", "if", "'mat_up'", "in", "df", ":", "df", "[", "'mat_up'", "]", "=", "grab_df_subset", "(", "df", "[", "'mat_up'", "]", ",", "inst_rows", ",", "inst_cols", ")", "df", "[", "'mat_dn'", "]", "=", "grab_df_subset", "(", "df", "[", "'mat_dn'", "]", ",", "inst_rows", ",", "inst_cols", ")", "if", "'mat_orig'", "in", "df", ":", "df", "[", "'mat_orig'", "]", "=", "grab_df_subset", "(", "df", "[", "'mat_orig'", "]", ",", "inst_rows", ",", "inst_cols", ")", "else", ":", "df", "[", "'mat'", "]", "=", "df_copy", "return", "df" ]
filter columns in matrix at some threshold and remove rows that have all zero values
[ "filter", "columns", "in", "matrix", "at", "some", "threshold", "and", "remove", "rows", "that", "have", "all", "zero", "values" ]
5acea9bff7eda546cf0647b9e3647f631eb6f5f5
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/run_filter.py#L35-L68
3,616
vstinner/bytecode
bytecode/flags.py
infer_flags
def infer_flags(bytecode, is_async=False): """Infer the proper flags for a bytecode based on the instructions. """ flags = CompilerFlags(0) if not isinstance(bytecode, (_bytecode.Bytecode, _bytecode.ConcreteBytecode, _bytecode.ControlFlowGraph)): msg = ('Expected a Bytecode, ConcreteBytecode or ControlFlowGraph ' 'instance not %s') raise ValueError(msg % bytecode) instructions = (bytecode.get_instructions() if isinstance(bytecode, _bytecode.ControlFlowGraph) else bytecode) instr_names = {i.name for i in instructions if not isinstance(i, (_bytecode.SetLineno, _bytecode.Label))} if not (instr_names & {'STORE_NAME', 'LOAD_NAME', 'DELETE_NAME'}): flags |= CompilerFlags.OPTIMIZED flags |= bytecode.flags & (CompilerFlags.NEWLOCALS | CompilerFlags.VARARGS | CompilerFlags.VARKEYWORDS | CompilerFlags.NESTED) if instr_names & {'YIELD_VALUE', 'YIELD_FROM'}: if not is_async and not bytecode.flags & CompilerFlags.ASYNC_GENERATOR: flags |= CompilerFlags.GENERATOR else: flags |= CompilerFlags.ASYNC_GENERATOR if not (instr_names & {'LOAD_CLOSURE', 'LOAD_DEREF', 'STORE_DEREF', 'DELETE_DEREF', 'LOAD_CLASSDEREF'}): flags |= CompilerFlags.NOFREE if (not (bytecode.flags & CompilerFlags.ITERABLE_COROUTINE or flags & CompilerFlags.ASYNC_GENERATOR) and (instr_names & {'GET_AWAITABLE', 'GET_AITER', 'GET_ANEXT', 'BEFORE_ASYNC_WITH', 'SETUP_ASYNC_WITH'} or bytecode.flags & CompilerFlags.COROUTINE)): flags |= CompilerFlags.COROUTINE flags |= bytecode.flags & CompilerFlags.ITERABLE_COROUTINE flags |= bytecode.flags & CompilerFlags.FUTURE_GENERATOR_STOP if ([bool(flags & getattr(CompilerFlags, k)) for k in ('COROUTINE', 'ITERABLE_COROUTINE', 'GENERATOR', 'ASYNC_GENERATOR')].count(True) > 1): raise ValueError("Code should not have more than one of the " "following flag set : generator, coroutine, " "iterable coroutine and async generator, got:" "%s" % flags) return flags
python
def infer_flags(bytecode, is_async=False): flags = CompilerFlags(0) if not isinstance(bytecode, (_bytecode.Bytecode, _bytecode.ConcreteBytecode, _bytecode.ControlFlowGraph)): msg = ('Expected a Bytecode, ConcreteBytecode or ControlFlowGraph ' 'instance not %s') raise ValueError(msg % bytecode) instructions = (bytecode.get_instructions() if isinstance(bytecode, _bytecode.ControlFlowGraph) else bytecode) instr_names = {i.name for i in instructions if not isinstance(i, (_bytecode.SetLineno, _bytecode.Label))} if not (instr_names & {'STORE_NAME', 'LOAD_NAME', 'DELETE_NAME'}): flags |= CompilerFlags.OPTIMIZED flags |= bytecode.flags & (CompilerFlags.NEWLOCALS | CompilerFlags.VARARGS | CompilerFlags.VARKEYWORDS | CompilerFlags.NESTED) if instr_names & {'YIELD_VALUE', 'YIELD_FROM'}: if not is_async and not bytecode.flags & CompilerFlags.ASYNC_GENERATOR: flags |= CompilerFlags.GENERATOR else: flags |= CompilerFlags.ASYNC_GENERATOR if not (instr_names & {'LOAD_CLOSURE', 'LOAD_DEREF', 'STORE_DEREF', 'DELETE_DEREF', 'LOAD_CLASSDEREF'}): flags |= CompilerFlags.NOFREE if (not (bytecode.flags & CompilerFlags.ITERABLE_COROUTINE or flags & CompilerFlags.ASYNC_GENERATOR) and (instr_names & {'GET_AWAITABLE', 'GET_AITER', 'GET_ANEXT', 'BEFORE_ASYNC_WITH', 'SETUP_ASYNC_WITH'} or bytecode.flags & CompilerFlags.COROUTINE)): flags |= CompilerFlags.COROUTINE flags |= bytecode.flags & CompilerFlags.ITERABLE_COROUTINE flags |= bytecode.flags & CompilerFlags.FUTURE_GENERATOR_STOP if ([bool(flags & getattr(CompilerFlags, k)) for k in ('COROUTINE', 'ITERABLE_COROUTINE', 'GENERATOR', 'ASYNC_GENERATOR')].count(True) > 1): raise ValueError("Code should not have more than one of the " "following flag set : generator, coroutine, " "iterable coroutine and async generator, got:" "%s" % flags) return flags
[ "def", "infer_flags", "(", "bytecode", ",", "is_async", "=", "False", ")", ":", "flags", "=", "CompilerFlags", "(", "0", ")", "if", "not", "isinstance", "(", "bytecode", ",", "(", "_bytecode", ".", "Bytecode", ",", "_bytecode", ".", "ConcreteBytecode", ",", "_bytecode", ".", "ControlFlowGraph", ")", ")", ":", "msg", "=", "(", "'Expected a Bytecode, ConcreteBytecode or ControlFlowGraph '", "'instance not %s'", ")", "raise", "ValueError", "(", "msg", "%", "bytecode", ")", "instructions", "=", "(", "bytecode", ".", "get_instructions", "(", ")", "if", "isinstance", "(", "bytecode", ",", "_bytecode", ".", "ControlFlowGraph", ")", "else", "bytecode", ")", "instr_names", "=", "{", "i", ".", "name", "for", "i", "in", "instructions", "if", "not", "isinstance", "(", "i", ",", "(", "_bytecode", ".", "SetLineno", ",", "_bytecode", ".", "Label", ")", ")", "}", "if", "not", "(", "instr_names", "&", "{", "'STORE_NAME'", ",", "'LOAD_NAME'", ",", "'DELETE_NAME'", "}", ")", ":", "flags", "|=", "CompilerFlags", ".", "OPTIMIZED", "flags", "|=", "bytecode", ".", "flags", "&", "(", "CompilerFlags", ".", "NEWLOCALS", "|", "CompilerFlags", ".", "VARARGS", "|", "CompilerFlags", ".", "VARKEYWORDS", "|", "CompilerFlags", ".", "NESTED", ")", "if", "instr_names", "&", "{", "'YIELD_VALUE'", ",", "'YIELD_FROM'", "}", ":", "if", "not", "is_async", "and", "not", "bytecode", ".", "flags", "&", "CompilerFlags", ".", "ASYNC_GENERATOR", ":", "flags", "|=", "CompilerFlags", ".", "GENERATOR", "else", ":", "flags", "|=", "CompilerFlags", ".", "ASYNC_GENERATOR", "if", "not", "(", "instr_names", "&", "{", "'LOAD_CLOSURE'", ",", "'LOAD_DEREF'", ",", "'STORE_DEREF'", ",", "'DELETE_DEREF'", ",", "'LOAD_CLASSDEREF'", "}", ")", ":", "flags", "|=", "CompilerFlags", ".", "NOFREE", "if", "(", "not", "(", "bytecode", ".", "flags", "&", "CompilerFlags", ".", "ITERABLE_COROUTINE", "or", "flags", "&", "CompilerFlags", ".", "ASYNC_GENERATOR", ")", "and", "(", "instr_names", "&", "{", "'GET_AWAITABLE'", ",", "'GET_AITER'", ",", "'GET_ANEXT'", ",", "'BEFORE_ASYNC_WITH'", ",", "'SETUP_ASYNC_WITH'", "}", "or", "bytecode", ".", "flags", "&", "CompilerFlags", ".", "COROUTINE", ")", ")", ":", "flags", "|=", "CompilerFlags", ".", "COROUTINE", "flags", "|=", "bytecode", ".", "flags", "&", "CompilerFlags", ".", "ITERABLE_COROUTINE", "flags", "|=", "bytecode", ".", "flags", "&", "CompilerFlags", ".", "FUTURE_GENERATOR_STOP", "if", "(", "[", "bool", "(", "flags", "&", "getattr", "(", "CompilerFlags", ",", "k", ")", ")", "for", "k", "in", "(", "'COROUTINE'", ",", "'ITERABLE_COROUTINE'", ",", "'GENERATOR'", ",", "'ASYNC_GENERATOR'", ")", "]", ".", "count", "(", "True", ")", ">", "1", ")", ":", "raise", "ValueError", "(", "\"Code should not have more than one of the \"", "\"following flag set : generator, coroutine, \"", "\"iterable coroutine and async generator, got:\"", "\"%s\"", "%", "flags", ")", "return", "flags" ]
Infer the proper flags for a bytecode based on the instructions.
[ "Infer", "the", "proper", "flags", "for", "a", "bytecode", "based", "on", "the", "instructions", "." ]
e2a27287a464a10557c89c7959f3c4c4ac3cb8bf
https://github.com/vstinner/bytecode/blob/e2a27287a464a10557c89c7959f3c4c4ac3cb8bf/bytecode/flags.py#L33-L89
3,617
vstinner/bytecode
bytecode/cfg.py
ControlFlowGraph.to_bytecode
def to_bytecode(self): """Convert to Bytecode.""" used_blocks = set() for block in self: target_block = block.get_jump() if target_block is not None: used_blocks.add(id(target_block)) labels = {} jumps = [] instructions = [] for block in self: if id(block) in used_blocks: new_label = Label() labels[id(block)] = new_label instructions.append(new_label) for instr in block: # don't copy SetLineno objects if isinstance(instr, (Instr, ConcreteInstr)): instr = instr.copy() if isinstance(instr.arg, BasicBlock): jumps.append(instr) instructions.append(instr) # Map to new labels for instr in jumps: instr.arg = labels[id(instr.arg)] bytecode = _bytecode.Bytecode() bytecode._copy_attr_from(self) bytecode.argnames = list(self.argnames) bytecode[:] = instructions return bytecode
python
def to_bytecode(self): used_blocks = set() for block in self: target_block = block.get_jump() if target_block is not None: used_blocks.add(id(target_block)) labels = {} jumps = [] instructions = [] for block in self: if id(block) in used_blocks: new_label = Label() labels[id(block)] = new_label instructions.append(new_label) for instr in block: # don't copy SetLineno objects if isinstance(instr, (Instr, ConcreteInstr)): instr = instr.copy() if isinstance(instr.arg, BasicBlock): jumps.append(instr) instructions.append(instr) # Map to new labels for instr in jumps: instr.arg = labels[id(instr.arg)] bytecode = _bytecode.Bytecode() bytecode._copy_attr_from(self) bytecode.argnames = list(self.argnames) bytecode[:] = instructions return bytecode
[ "def", "to_bytecode", "(", "self", ")", ":", "used_blocks", "=", "set", "(", ")", "for", "block", "in", "self", ":", "target_block", "=", "block", ".", "get_jump", "(", ")", "if", "target_block", "is", "not", "None", ":", "used_blocks", ".", "add", "(", "id", "(", "target_block", ")", ")", "labels", "=", "{", "}", "jumps", "=", "[", "]", "instructions", "=", "[", "]", "for", "block", "in", "self", ":", "if", "id", "(", "block", ")", "in", "used_blocks", ":", "new_label", "=", "Label", "(", ")", "labels", "[", "id", "(", "block", ")", "]", "=", "new_label", "instructions", ".", "append", "(", "new_label", ")", "for", "instr", "in", "block", ":", "# don't copy SetLineno objects", "if", "isinstance", "(", "instr", ",", "(", "Instr", ",", "ConcreteInstr", ")", ")", ":", "instr", "=", "instr", ".", "copy", "(", ")", "if", "isinstance", "(", "instr", ".", "arg", ",", "BasicBlock", ")", ":", "jumps", ".", "append", "(", "instr", ")", "instructions", ".", "append", "(", "instr", ")", "# Map to new labels", "for", "instr", "in", "jumps", ":", "instr", ".", "arg", "=", "labels", "[", "id", "(", "instr", ".", "arg", ")", "]", "bytecode", "=", "_bytecode", ".", "Bytecode", "(", ")", "bytecode", ".", "_copy_attr_from", "(", "self", ")", "bytecode", ".", "argnames", "=", "list", "(", "self", ".", "argnames", ")", "bytecode", "[", ":", "]", "=", "instructions", "return", "bytecode" ]
Convert to Bytecode.
[ "Convert", "to", "Bytecode", "." ]
e2a27287a464a10557c89c7959f3c4c4ac3cb8bf
https://github.com/vstinner/bytecode/blob/e2a27287a464a10557c89c7959f3c4c4ac3cb8bf/bytecode/cfg.py#L279-L315
3,618
vstinner/bytecode
bytecode/cfg.py
ControlFlowGraph.to_code
def to_code(self, stacksize=None): """Convert to code.""" if stacksize is None: stacksize = self.compute_stacksize() bc = self.to_bytecode() return bc.to_code(stacksize=stacksize)
python
def to_code(self, stacksize=None): if stacksize is None: stacksize = self.compute_stacksize() bc = self.to_bytecode() return bc.to_code(stacksize=stacksize)
[ "def", "to_code", "(", "self", ",", "stacksize", "=", "None", ")", ":", "if", "stacksize", "is", "None", ":", "stacksize", "=", "self", ".", "compute_stacksize", "(", ")", "bc", "=", "self", ".", "to_bytecode", "(", ")", "return", "bc", ".", "to_code", "(", "stacksize", "=", "stacksize", ")" ]
Convert to code.
[ "Convert", "to", "code", "." ]
e2a27287a464a10557c89c7959f3c4c4ac3cb8bf
https://github.com/vstinner/bytecode/blob/e2a27287a464a10557c89c7959f3c4c4ac3cb8bf/bytecode/cfg.py#L317-L322
3,619
vstinner/bytecode
bytecode/instr.py
Instr.set
def set(self, name, arg=UNSET): """Modify the instruction in-place. Replace name and arg attributes. Don't modify lineno. """ self._set(name, arg, self._lineno)
python
def set(self, name, arg=UNSET): self._set(name, arg, self._lineno)
[ "def", "set", "(", "self", ",", "name", ",", "arg", "=", "UNSET", ")", ":", "self", ".", "_set", "(", "name", ",", "arg", ",", "self", ".", "_lineno", ")" ]
Modify the instruction in-place. Replace name and arg attributes. Don't modify lineno.
[ "Modify", "the", "instruction", "in", "-", "place", "." ]
e2a27287a464a10557c89c7959f3c4c4ac3cb8bf
https://github.com/vstinner/bytecode/blob/e2a27287a464a10557c89c7959f3c4c4ac3cb8bf/bytecode/instr.py#L248-L253
3,620
riga/tfdeploy
tfdeploy.py
LinSpace
def LinSpace(start, stop, num): """ Linspace op. """ return np.linspace(start, stop, num=num, dtype=np.float32),
python
def LinSpace(start, stop, num): return np.linspace(start, stop, num=num, dtype=np.float32),
[ "def", "LinSpace", "(", "start", ",", "stop", ",", "num", ")", ":", "return", "np", ".", "linspace", "(", "start", ",", "stop", ",", "num", "=", "num", ",", "dtype", "=", "np", ".", "float32", ")", "," ]
Linspace op.
[ "Linspace", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1003-L1007
3,621
riga/tfdeploy
tfdeploy.py
Range
def Range(start, limit, delta): """ Range op. """ return np.arange(start, limit, delta, dtype=np.int32),
python
def Range(start, limit, delta): return np.arange(start, limit, delta, dtype=np.int32),
[ "def", "Range", "(", "start", ",", "limit", ",", "delta", ")", ":", "return", "np", ".", "arange", "(", "start", ",", "limit", ",", "delta", ",", "dtype", "=", "np", ".", "int32", ")", "," ]
Range op.
[ "Range", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1011-L1015
3,622
riga/tfdeploy
tfdeploy.py
RandomUniformInt
def RandomUniformInt(shape, minval, maxval, seed): """ Random uniform int op. """ if seed: np.random.seed(seed) return np.random.randint(minval, maxval, size=shape),
python
def RandomUniformInt(shape, minval, maxval, seed): if seed: np.random.seed(seed) return np.random.randint(minval, maxval, size=shape),
[ "def", "RandomUniformInt", "(", "shape", ",", "minval", ",", "maxval", ",", "seed", ")", ":", "if", "seed", ":", "np", ".", "random", ".", "seed", "(", "seed", ")", "return", "np", ".", "random", ".", "randint", "(", "minval", ",", "maxval", ",", "size", "=", "shape", ")", "," ]
Random uniform int op.
[ "Random", "uniform", "int", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1060-L1066
3,623
riga/tfdeploy
tfdeploy.py
Rank
def Rank(a): """ Rank op. """ return np.array([len(a.shape)], dtype=np.int32),
python
def Rank(a): return np.array([len(a.shape)], dtype=np.int32),
[ "def", "Rank", "(", "a", ")", ":", "return", "np", ".", "array", "(", "[", "len", "(", "a", ".", "shape", ")", "]", ",", "dtype", "=", "np", ".", "int32", ")", "," ]
Rank op.
[ "Rank", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1114-L1118
3,624
riga/tfdeploy
tfdeploy.py
Squeeze
def Squeeze(a, squeeze_dims): """ Squeeze op, i.e. removes singular axes. """ if not squeeze_dims: squeeze_dims = list(range(len(a.shape))) slices = [(0 if (dim == 1 and i in squeeze_dims) else slice(None)) \ for i, dim in enumerate(a.shape)] return np.copy(a)[slices],
python
def Squeeze(a, squeeze_dims): if not squeeze_dims: squeeze_dims = list(range(len(a.shape))) slices = [(0 if (dim == 1 and i in squeeze_dims) else slice(None)) \ for i, dim in enumerate(a.shape)] return np.copy(a)[slices],
[ "def", "Squeeze", "(", "a", ",", "squeeze_dims", ")", ":", "if", "not", "squeeze_dims", ":", "squeeze_dims", "=", "list", "(", "range", "(", "len", "(", "a", ".", "shape", ")", ")", ")", "slices", "=", "[", "(", "0", "if", "(", "dim", "==", "1", "and", "i", "in", "squeeze_dims", ")", "else", "slice", "(", "None", ")", ")", "for", "i", ",", "dim", "in", "enumerate", "(", "a", ".", "shape", ")", "]", "return", "np", ".", "copy", "(", "a", ")", "[", "slices", "]", "," ]
Squeeze op, i.e. removes singular axes.
[ "Squeeze", "op", "i", ".", "e", ".", "removes", "singular", "axes", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1130-L1138
3,625
riga/tfdeploy
tfdeploy.py
ExpandDims
def ExpandDims(a, dim): """ Expand dim op, i.e. add singular axis at dim. """ shape = list(a.shape) if dim >= 0: shape.insert(dim, 1) else: shape.insert(len(shape) + dim + 1, 1) return np.copy(a).reshape(*shape),
python
def ExpandDims(a, dim): shape = list(a.shape) if dim >= 0: shape.insert(dim, 1) else: shape.insert(len(shape) + dim + 1, 1) return np.copy(a).reshape(*shape),
[ "def", "ExpandDims", "(", "a", ",", "dim", ")", ":", "shape", "=", "list", "(", "a", ".", "shape", ")", "if", "dim", ">=", "0", ":", "shape", ".", "insert", "(", "dim", ",", "1", ")", "else", ":", "shape", ".", "insert", "(", "len", "(", "shape", ")", "+", "dim", "+", "1", ",", "1", ")", "return", "np", ".", "copy", "(", "a", ")", ".", "reshape", "(", "*", "shape", ")", "," ]
Expand dim op, i.e. add singular axis at dim.
[ "Expand", "dim", "op", "i", ".", "e", ".", "add", "singular", "axis", "at", "dim", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1142-L1151
3,626
riga/tfdeploy
tfdeploy.py
Slice
def Slice(a, begin, size): """ Slicing op. """ return np.copy(a)[[slice(*tpl) for tpl in zip(begin, begin+size)]],
python
def Slice(a, begin, size): return np.copy(a)[[slice(*tpl) for tpl in zip(begin, begin+size)]],
[ "def", "Slice", "(", "a", ",", "begin", ",", "size", ")", ":", "return", "np", ".", "copy", "(", "a", ")", "[", "[", "slice", "(", "*", "tpl", ")", "for", "tpl", "in", "zip", "(", "begin", ",", "begin", "+", "size", ")", "]", "]", "," ]
Slicing op.
[ "Slicing", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1159-L1163
3,627
riga/tfdeploy
tfdeploy.py
Split
def Split(axis, a, n): """ Split op with n splits. """ return tuple(np.split(np.copy(a), n, axis=axis))
python
def Split(axis, a, n): return tuple(np.split(np.copy(a), n, axis=axis))
[ "def", "Split", "(", "axis", ",", "a", ",", "n", ")", ":", "return", "tuple", "(", "np", ".", "split", "(", "np", ".", "copy", "(", "a", ")", ",", "n", ",", "axis", "=", "axis", ")", ")" ]
Split op with n splits.
[ "Split", "op", "with", "n", "splits", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1167-L1171
3,628
riga/tfdeploy
tfdeploy.py
SplitV
def SplitV(a, splits, axis): """ Split op with multiple split sizes. """ return tuple(np.split(np.copy(a), np.cumsum(splits), axis=axis))
python
def SplitV(a, splits, axis): return tuple(np.split(np.copy(a), np.cumsum(splits), axis=axis))
[ "def", "SplitV", "(", "a", ",", "splits", ",", "axis", ")", ":", "return", "tuple", "(", "np", ".", "split", "(", "np", ".", "copy", "(", "a", ")", ",", "np", ".", "cumsum", "(", "splits", ")", ",", "axis", "=", "axis", ")", ")" ]
Split op with multiple split sizes.
[ "Split", "op", "with", "multiple", "split", "sizes", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1175-L1179
3,629
riga/tfdeploy
tfdeploy.py
ConcatV2
def ConcatV2(inputs): """ Concat op. """ axis = inputs.pop() return np.concatenate(inputs, axis=axis),
python
def ConcatV2(inputs): axis = inputs.pop() return np.concatenate(inputs, axis=axis),
[ "def", "ConcatV2", "(", "inputs", ")", ":", "axis", "=", "inputs", ".", "pop", "(", ")", "return", "np", ".", "concatenate", "(", "inputs", ",", "axis", "=", "axis", ")", "," ]
Concat op.
[ "Concat", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1199-L1204
3,630
riga/tfdeploy
tfdeploy.py
Unpack
def Unpack(a, num, axis): """ Unpack op. """ return tuple(np.squeeze(b, axis=axis) for b in np.split(a, num, axis=axis))
python
def Unpack(a, num, axis): return tuple(np.squeeze(b, axis=axis) for b in np.split(a, num, axis=axis))
[ "def", "Unpack", "(", "a", ",", "num", ",", "axis", ")", ":", "return", "tuple", "(", "np", ".", "squeeze", "(", "b", ",", "axis", "=", "axis", ")", "for", "b", "in", "np", ".", "split", "(", "a", ",", "num", ",", "axis", "=", "axis", ")", ")" ]
Unpack op.
[ "Unpack", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1216-L1220
3,631
riga/tfdeploy
tfdeploy.py
ReverseSequence
def ReverseSequence(a, seq_lengths, seq_dim, batch_dim): """ Sequential reverse op. """ r = np.copy(a) invidxs = (len(r.shape) - 1) * [slice(None)] if seq_dim < batch_dim: invidxs[seq_dim] = slice(None, None, -1) else: invidxs[seq_dim - 1] = slice(None, None, -1) _invidxs = tuple(invidxs) selidxs = len(r.shape) * [slice(None)] for i, l in enumerate(seq_lengths): if not l: continue selidxs[batch_dim] = i selidxs[seq_dim] = slice(0, l) _selidxs = tuple(selidxs) r[_selidxs] = a[_selidxs][_invidxs] return r,
python
def ReverseSequence(a, seq_lengths, seq_dim, batch_dim): r = np.copy(a) invidxs = (len(r.shape) - 1) * [slice(None)] if seq_dim < batch_dim: invidxs[seq_dim] = slice(None, None, -1) else: invidxs[seq_dim - 1] = slice(None, None, -1) _invidxs = tuple(invidxs) selidxs = len(r.shape) * [slice(None)] for i, l in enumerate(seq_lengths): if not l: continue selidxs[batch_dim] = i selidxs[seq_dim] = slice(0, l) _selidxs = tuple(selidxs) r[_selidxs] = a[_selidxs][_invidxs] return r,
[ "def", "ReverseSequence", "(", "a", ",", "seq_lengths", ",", "seq_dim", ",", "batch_dim", ")", ":", "r", "=", "np", ".", "copy", "(", "a", ")", "invidxs", "=", "(", "len", "(", "r", ".", "shape", ")", "-", "1", ")", "*", "[", "slice", "(", "None", ")", "]", "if", "seq_dim", "<", "batch_dim", ":", "invidxs", "[", "seq_dim", "]", "=", "slice", "(", "None", ",", "None", ",", "-", "1", ")", "else", ":", "invidxs", "[", "seq_dim", "-", "1", "]", "=", "slice", "(", "None", ",", "None", ",", "-", "1", ")", "_invidxs", "=", "tuple", "(", "invidxs", ")", "selidxs", "=", "len", "(", "r", ".", "shape", ")", "*", "[", "slice", "(", "None", ")", "]", "for", "i", ",", "l", "in", "enumerate", "(", "seq_lengths", ")", ":", "if", "not", "l", ":", "continue", "selidxs", "[", "batch_dim", "]", "=", "i", "selidxs", "[", "seq_dim", "]", "=", "slice", "(", "0", ",", "l", ")", "_selidxs", "=", "tuple", "(", "selidxs", ")", "r", "[", "_selidxs", "]", "=", "a", "[", "_selidxs", "]", "[", "_invidxs", "]", "return", "r", "," ]
Sequential reverse op.
[ "Sequential", "reverse", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1224-L1243
3,632
riga/tfdeploy
tfdeploy.py
ReverseV2
def ReverseV2(a, axes): """ Reverse op. """ idxs = tuple(slice(None, None, 2 * int(i not in axes) - 1) for i in range(len(a.shape))) return np.copy(a[idxs]),
python
def ReverseV2(a, axes): idxs = tuple(slice(None, None, 2 * int(i not in axes) - 1) for i in range(len(a.shape))) return np.copy(a[idxs]),
[ "def", "ReverseV2", "(", "a", ",", "axes", ")", ":", "idxs", "=", "tuple", "(", "slice", "(", "None", ",", "None", ",", "2", "*", "int", "(", "i", "not", "in", "axes", ")", "-", "1", ")", "for", "i", "in", "range", "(", "len", "(", "a", ".", "shape", ")", ")", ")", "return", "np", ".", "copy", "(", "a", "[", "idxs", "]", ")", "," ]
Reverse op.
[ "Reverse", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1247-L1252
3,633
riga/tfdeploy
tfdeploy.py
Betainc
def Betainc(a, b, x): """ Complemented, incomplete gamma op. """ return sp.special.betainc(a, b, x),
python
def Betainc(a, b, x): return sp.special.betainc(a, b, x),
[ "def", "Betainc", "(", "a", ",", "b", ",", "x", ")", ":", "return", "sp", ".", "special", ".", "betainc", "(", "a", ",", "b", ",", "x", ")", "," ]
Complemented, incomplete gamma op.
[ "Complemented", "incomplete", "gamma", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1588-L1592
3,634
riga/tfdeploy
tfdeploy.py
Diag
def Diag(a): """ Diag op. """ r = np.zeros(2 * a.shape, dtype=a.dtype) for idx, v in np.ndenumerate(a): r[2 * idx] = v return r,
python
def Diag(a): r = np.zeros(2 * a.shape, dtype=a.dtype) for idx, v in np.ndenumerate(a): r[2 * idx] = v return r,
[ "def", "Diag", "(", "a", ")", ":", "r", "=", "np", ".", "zeros", "(", "2", "*", "a", ".", "shape", ",", "dtype", "=", "a", ".", "dtype", ")", "for", "idx", ",", "v", "in", "np", ".", "ndenumerate", "(", "a", ")", ":", "r", "[", "2", "*", "idx", "]", "=", "v", "return", "r", "," ]
Diag op.
[ "Diag", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1600-L1607
3,635
riga/tfdeploy
tfdeploy.py
MatrixDiagPart
def MatrixDiagPart(a): """ Batched diag op that returns only the diagonal elements. """ r = np.zeros(a.shape[:-2] + (min(a.shape[-2:]),)) for coord in np.ndindex(a.shape[:-2]): pos = coord + (Ellipsis,) r[pos] = np.diagonal(a[pos]) return r,
python
def MatrixDiagPart(a): r = np.zeros(a.shape[:-2] + (min(a.shape[-2:]),)) for coord in np.ndindex(a.shape[:-2]): pos = coord + (Ellipsis,) r[pos] = np.diagonal(a[pos]) return r,
[ "def", "MatrixDiagPart", "(", "a", ")", ":", "r", "=", "np", ".", "zeros", "(", "a", ".", "shape", "[", ":", "-", "2", "]", "+", "(", "min", "(", "a", ".", "shape", "[", "-", "2", ":", "]", ")", ",", ")", ")", "for", "coord", "in", "np", ".", "ndindex", "(", "a", ".", "shape", "[", ":", "-", "2", "]", ")", ":", "pos", "=", "coord", "+", "(", "Ellipsis", ",", ")", "r", "[", "pos", "]", "=", "np", ".", "diagonal", "(", "a", "[", "pos", "]", ")", "return", "r", "," ]
Batched diag op that returns only the diagonal elements.
[ "Batched", "diag", "op", "that", "returns", "only", "the", "diagonal", "elements", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1619-L1627
3,636
riga/tfdeploy
tfdeploy.py
MatMul
def MatMul(a, b, transpose_a, transpose_b): """ Matrix multiplication op. """ return np.dot(a if not transpose_a else np.transpose(a), b if not transpose_b else np.transpose(b)),
python
def MatMul(a, b, transpose_a, transpose_b): return np.dot(a if not transpose_a else np.transpose(a), b if not transpose_b else np.transpose(b)),
[ "def", "MatMul", "(", "a", ",", "b", ",", "transpose_a", ",", "transpose_b", ")", ":", "return", "np", ".", "dot", "(", "a", "if", "not", "transpose_a", "else", "np", ".", "transpose", "(", "a", ")", ",", "b", "if", "not", "transpose_b", "else", "np", ".", "transpose", "(", "b", ")", ")", "," ]
Matrix multiplication op.
[ "Matrix", "multiplication", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1631-L1636
3,637
riga/tfdeploy
tfdeploy.py
MatrixInverse
def MatrixInverse(a, adj): """ Matrix inversion op. """ return np.linalg.inv(a if not adj else _adjoint(a)),
python
def MatrixInverse(a, adj): return np.linalg.inv(a if not adj else _adjoint(a)),
[ "def", "MatrixInverse", "(", "a", ",", "adj", ")", ":", "return", "np", ".", "linalg", ".", "inv", "(", "a", "if", "not", "adj", "else", "_adjoint", "(", "a", ")", ")", "," ]
Matrix inversion op.
[ "Matrix", "inversion", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1648-L1652
3,638
riga/tfdeploy
tfdeploy.py
MatrixSolve
def MatrixSolve(a, rhs, adj): """ Matrix solve op. """ return np.linalg.solve(a if not adj else _adjoint(a), rhs),
python
def MatrixSolve(a, rhs, adj): return np.linalg.solve(a if not adj else _adjoint(a), rhs),
[ "def", "MatrixSolve", "(", "a", ",", "rhs", ",", "adj", ")", ":", "return", "np", ".", "linalg", ".", "solve", "(", "a", "if", "not", "adj", "else", "_adjoint", "(", "a", ")", ",", "rhs", ")", "," ]
Matrix solve op.
[ "Matrix", "solve", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1664-L1668
3,639
riga/tfdeploy
tfdeploy.py
MatrixTriangularSolve
def MatrixTriangularSolve(a, rhs, lower, adj): """ Matrix triangular solve op. """ trans = 0 if not adj else 2 r = np.empty(rhs.shape).astype(a.dtype) for coord in np.ndindex(a.shape[:-2]): pos = coord + (Ellipsis,) r[pos] = sp.linalg.solve_triangular(a[pos] if not adj else np.conj(a[pos]), rhs[pos], trans=trans, lower=lower) return r,
python
def MatrixTriangularSolve(a, rhs, lower, adj): trans = 0 if not adj else 2 r = np.empty(rhs.shape).astype(a.dtype) for coord in np.ndindex(a.shape[:-2]): pos = coord + (Ellipsis,) r[pos] = sp.linalg.solve_triangular(a[pos] if not adj else np.conj(a[pos]), rhs[pos], trans=trans, lower=lower) return r,
[ "def", "MatrixTriangularSolve", "(", "a", ",", "rhs", ",", "lower", ",", "adj", ")", ":", "trans", "=", "0", "if", "not", "adj", "else", "2", "r", "=", "np", ".", "empty", "(", "rhs", ".", "shape", ")", ".", "astype", "(", "a", ".", "dtype", ")", "for", "coord", "in", "np", ".", "ndindex", "(", "a", ".", "shape", "[", ":", "-", "2", "]", ")", ":", "pos", "=", "coord", "+", "(", "Ellipsis", ",", ")", "r", "[", "pos", "]", "=", "sp", ".", "linalg", ".", "solve_triangular", "(", "a", "[", "pos", "]", "if", "not", "adj", "else", "np", ".", "conj", "(", "a", "[", "pos", "]", ")", ",", "rhs", "[", "pos", "]", ",", "trans", "=", "trans", ",", "lower", "=", "lower", ")", "return", "r", "," ]
Matrix triangular solve op.
[ "Matrix", "triangular", "solve", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1672-L1684
3,640
riga/tfdeploy
tfdeploy.py
MatrixSolveLs
def MatrixSolveLs(a, rhs, l2_reg): """ Matrix least-squares solve op. """ r = np.empty(rhs.shape).astype(a.dtype) for coord in np.ndindex(a.shape[:-2]): pos = coord + (Ellipsis,) r[pos] = np.linalg.lstsq(a[pos], rhs[pos])[0] return r,
python
def MatrixSolveLs(a, rhs, l2_reg): r = np.empty(rhs.shape).astype(a.dtype) for coord in np.ndindex(a.shape[:-2]): pos = coord + (Ellipsis,) r[pos] = np.linalg.lstsq(a[pos], rhs[pos])[0] return r,
[ "def", "MatrixSolveLs", "(", "a", ",", "rhs", ",", "l2_reg", ")", ":", "r", "=", "np", ".", "empty", "(", "rhs", ".", "shape", ")", ".", "astype", "(", "a", ".", "dtype", ")", "for", "coord", "in", "np", ".", "ndindex", "(", "a", ".", "shape", "[", ":", "-", "2", "]", ")", ":", "pos", "=", "coord", "+", "(", "Ellipsis", ",", ")", "r", "[", "pos", "]", "=", "np", ".", "linalg", ".", "lstsq", "(", "a", "[", "pos", "]", ",", "rhs", "[", "pos", "]", ")", "[", "0", "]", "return", "r", "," ]
Matrix least-squares solve op.
[ "Matrix", "least", "-", "squares", "solve", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1688-L1697
3,641
riga/tfdeploy
tfdeploy.py
SelfAdjointEig
def SelfAdjointEig(a): """ Eigen decomp op. """ shape = list(a.shape) shape[-2] += 1 return np.append(*np.linalg.eig(a)).reshape(*shape),
python
def SelfAdjointEig(a): shape = list(a.shape) shape[-2] += 1 return np.append(*np.linalg.eig(a)).reshape(*shape),
[ "def", "SelfAdjointEig", "(", "a", ")", ":", "shape", "=", "list", "(", "a", ".", "shape", ")", "shape", "[", "-", "2", "]", "+=", "1", "return", "np", ".", "append", "(", "*", "np", ".", "linalg", ".", "eig", "(", "a", ")", ")", ".", "reshape", "(", "*", "shape", ")", "," ]
Eigen decomp op.
[ "Eigen", "decomp", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1701-L1707
3,642
riga/tfdeploy
tfdeploy.py
Svd
def Svd(a, uv, full): """ Single value decomp op. """ u, s, v = np.linalg.svd(a, full_matrices=full, compute_uv=uv) return s, u, v
python
def Svd(a, uv, full): u, s, v = np.linalg.svd(a, full_matrices=full, compute_uv=uv) return s, u, v
[ "def", "Svd", "(", "a", ",", "uv", ",", "full", ")", ":", "u", ",", "s", ",", "v", "=", "np", ".", "linalg", ".", "svd", "(", "a", ",", "full_matrices", "=", "full", ",", "compute_uv", "=", "uv", ")", "return", "s", ",", "u", ",", "v" ]
Single value decomp op.
[ "Single", "value", "decomp", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1719-L1724
3,643
riga/tfdeploy
tfdeploy.py
Sum
def Sum(a, axis, keep_dims): """ Sum reduction op. """ return np.sum(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
python
def Sum(a, axis, keep_dims): return np.sum(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
[ "def", "Sum", "(", "a", ",", "axis", ",", "keep_dims", ")", ":", "return", "np", ".", "sum", "(", "a", ",", "axis", "=", "axis", "if", "not", "isinstance", "(", "axis", ",", "np", ".", "ndarray", ")", "else", "tuple", "(", "axis", ")", ",", "keepdims", "=", "keep_dims", ")", "," ]
Sum reduction op.
[ "Sum", "reduction", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1804-L1809
3,644
riga/tfdeploy
tfdeploy.py
Prod
def Prod(a, axis, keep_dims): """ Prod reduction op. """ return np.prod(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
python
def Prod(a, axis, keep_dims): return np.prod(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
[ "def", "Prod", "(", "a", ",", "axis", ",", "keep_dims", ")", ":", "return", "np", ".", "prod", "(", "a", ",", "axis", "=", "axis", "if", "not", "isinstance", "(", "axis", ",", "np", ".", "ndarray", ")", "else", "tuple", "(", "axis", ")", ",", "keepdims", "=", "keep_dims", ")", "," ]
Prod reduction op.
[ "Prod", "reduction", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1813-L1818
3,645
riga/tfdeploy
tfdeploy.py
Min
def Min(a, axis, keep_dims): """ Min reduction op. """ return np.amin(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
python
def Min(a, axis, keep_dims): return np.amin(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
[ "def", "Min", "(", "a", ",", "axis", ",", "keep_dims", ")", ":", "return", "np", ".", "amin", "(", "a", ",", "axis", "=", "axis", "if", "not", "isinstance", "(", "axis", ",", "np", ".", "ndarray", ")", "else", "tuple", "(", "axis", ")", ",", "keepdims", "=", "keep_dims", ")", "," ]
Min reduction op.
[ "Min", "reduction", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1822-L1827
3,646
riga/tfdeploy
tfdeploy.py
Max
def Max(a, axis, keep_dims): """ Max reduction op. """ return np.amax(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
python
def Max(a, axis, keep_dims): return np.amax(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
[ "def", "Max", "(", "a", ",", "axis", ",", "keep_dims", ")", ":", "return", "np", ".", "amax", "(", "a", ",", "axis", "=", "axis", "if", "not", "isinstance", "(", "axis", ",", "np", ".", "ndarray", ")", "else", "tuple", "(", "axis", ")", ",", "keepdims", "=", "keep_dims", ")", "," ]
Max reduction op.
[ "Max", "reduction", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1831-L1836
3,647
riga/tfdeploy
tfdeploy.py
Mean
def Mean(a, axis, keep_dims): """ Mean reduction op. """ return np.mean(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
python
def Mean(a, axis, keep_dims): return np.mean(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
[ "def", "Mean", "(", "a", ",", "axis", ",", "keep_dims", ")", ":", "return", "np", ".", "mean", "(", "a", ",", "axis", "=", "axis", "if", "not", "isinstance", "(", "axis", ",", "np", ".", "ndarray", ")", "else", "tuple", "(", "axis", ")", ",", "keepdims", "=", "keep_dims", ")", "," ]
Mean reduction op.
[ "Mean", "reduction", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1840-L1845
3,648
riga/tfdeploy
tfdeploy.py
All
def All(a, axis, keep_dims): """ All reduction op. """ return np.all(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
python
def All(a, axis, keep_dims): return np.all(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
[ "def", "All", "(", "a", ",", "axis", ",", "keep_dims", ")", ":", "return", "np", ".", "all", "(", "a", ",", "axis", "=", "axis", "if", "not", "isinstance", "(", "axis", ",", "np", ".", "ndarray", ")", "else", "tuple", "(", "axis", ")", ",", "keepdims", "=", "keep_dims", ")", "," ]
All reduction op.
[ "All", "reduction", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1849-L1854
3,649
riga/tfdeploy
tfdeploy.py
Any
def Any(a, axis, keep_dims): """ Any reduction op. """ return np.any(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
python
def Any(a, axis, keep_dims): return np.any(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
[ "def", "Any", "(", "a", ",", "axis", ",", "keep_dims", ")", ":", "return", "np", ".", "any", "(", "a", ",", "axis", "=", "axis", "if", "not", "isinstance", "(", "axis", ",", "np", ".", "ndarray", ")", "else", "tuple", "(", "axis", ")", ",", "keepdims", "=", "keep_dims", ")", "," ]
Any reduction op.
[ "Any", "reduction", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1858-L1863
3,650
riga/tfdeploy
tfdeploy.py
SegmentSum
def SegmentSum(a, ids, *args): """ Segmented sum op. """ func = lambda idxs: reduce(np.add, a[idxs]) return seg_map(func, a, ids),
python
def SegmentSum(a, ids, *args): func = lambda idxs: reduce(np.add, a[idxs]) return seg_map(func, a, ids),
[ "def", "SegmentSum", "(", "a", ",", "ids", ",", "*", "args", ")", ":", "func", "=", "lambda", "idxs", ":", "reduce", "(", "np", ".", "add", ",", "a", "[", "idxs", "]", ")", "return", "seg_map", "(", "func", ",", "a", ",", "ids", ")", "," ]
Segmented sum op.
[ "Segmented", "sum", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1881-L1886
3,651
riga/tfdeploy
tfdeploy.py
SegmentProd
def SegmentProd(a, ids): """ Segmented prod op. """ func = lambda idxs: reduce(np.multiply, a[idxs]) return seg_map(func, a, ids),
python
def SegmentProd(a, ids): func = lambda idxs: reduce(np.multiply, a[idxs]) return seg_map(func, a, ids),
[ "def", "SegmentProd", "(", "a", ",", "ids", ")", ":", "func", "=", "lambda", "idxs", ":", "reduce", "(", "np", ".", "multiply", ",", "a", "[", "idxs", "]", ")", "return", "seg_map", "(", "func", ",", "a", ",", "ids", ")", "," ]
Segmented prod op.
[ "Segmented", "prod", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1890-L1895
3,652
riga/tfdeploy
tfdeploy.py
SegmentMin
def SegmentMin(a, ids): """ Segmented min op. """ func = lambda idxs: np.amin(a[idxs], axis=0) return seg_map(func, a, ids),
python
def SegmentMin(a, ids): func = lambda idxs: np.amin(a[idxs], axis=0) return seg_map(func, a, ids),
[ "def", "SegmentMin", "(", "a", ",", "ids", ")", ":", "func", "=", "lambda", "idxs", ":", "np", ".", "amin", "(", "a", "[", "idxs", "]", ",", "axis", "=", "0", ")", "return", "seg_map", "(", "func", ",", "a", ",", "ids", ")", "," ]
Segmented min op.
[ "Segmented", "min", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1899-L1904
3,653
riga/tfdeploy
tfdeploy.py
SegmentMax
def SegmentMax(a, ids): """ Segmented max op. """ func = lambda idxs: np.amax(a[idxs], axis=0) return seg_map(func, a, ids),
python
def SegmentMax(a, ids): func = lambda idxs: np.amax(a[idxs], axis=0) return seg_map(func, a, ids),
[ "def", "SegmentMax", "(", "a", ",", "ids", ")", ":", "func", "=", "lambda", "idxs", ":", "np", ".", "amax", "(", "a", "[", "idxs", "]", ",", "axis", "=", "0", ")", "return", "seg_map", "(", "func", ",", "a", ",", "ids", ")", "," ]
Segmented max op.
[ "Segmented", "max", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1908-L1913
3,654
riga/tfdeploy
tfdeploy.py
SegmentMean
def SegmentMean(a, ids): """ Segmented mean op. """ func = lambda idxs: np.mean(a[idxs], axis=0) return seg_map(func, a, ids),
python
def SegmentMean(a, ids): func = lambda idxs: np.mean(a[idxs], axis=0) return seg_map(func, a, ids),
[ "def", "SegmentMean", "(", "a", ",", "ids", ")", ":", "func", "=", "lambda", "idxs", ":", "np", ".", "mean", "(", "a", "[", "idxs", "]", ",", "axis", "=", "0", ")", "return", "seg_map", "(", "func", ",", "a", ",", "ids", ")", "," ]
Segmented mean op.
[ "Segmented", "mean", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1917-L1922
3,655
riga/tfdeploy
tfdeploy.py
ListDiff
def ListDiff(a, b): """ List diff op. """ d = np.setdiff1d(a, b) return d, np.searchsorted(a, d).astype(np.int32)
python
def ListDiff(a, b): d = np.setdiff1d(a, b) return d, np.searchsorted(a, d).astype(np.int32)
[ "def", "ListDiff", "(", "a", ",", "b", ")", ":", "d", "=", "np", ".", "setdiff1d", "(", "a", ",", "b", ")", "return", "d", ",", "np", ".", "searchsorted", "(", "a", ",", "d", ")", ".", "astype", "(", "np", ".", "int32", ")" ]
List diff op.
[ "List", "diff", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1971-L1976
3,656
riga/tfdeploy
tfdeploy.py
Unique
def Unique(a, t): """ Unique op. """ _, idxs, inv = np.unique(a, return_index=True, return_inverse=True) return np.copy(a)[np.sort(idxs)], idxs[inv].astype(dtype_map[t])
python
def Unique(a, t): _, idxs, inv = np.unique(a, return_index=True, return_inverse=True) return np.copy(a)[np.sort(idxs)], idxs[inv].astype(dtype_map[t])
[ "def", "Unique", "(", "a", ",", "t", ")", ":", "_", ",", "idxs", ",", "inv", "=", "np", ".", "unique", "(", "a", ",", "return_index", "=", "True", ",", "return_inverse", "=", "True", ")", "return", "np", ".", "copy", "(", "a", ")", "[", "np", ".", "sort", "(", "idxs", ")", "]", ",", "idxs", "[", "inv", "]", ".", "astype", "(", "dtype_map", "[", "t", "]", ")" ]
Unique op.
[ "Unique", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1988-L1993
3,657
riga/tfdeploy
tfdeploy.py
Elu
def Elu(a): """ Elu op. """ return np.where(a < 0, np.subtract(np.exp(a), 1), a),
python
def Elu(a): return np.where(a < 0, np.subtract(np.exp(a), 1), a),
[ "def", "Elu", "(", "a", ")", ":", "return", "np", ".", "where", "(", "a", "<", "0", ",", "np", ".", "subtract", "(", "np", ".", "exp", "(", "a", ")", ",", "1", ")", ",", "a", ")", "," ]
Elu op.
[ "Elu", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2037-L2041
3,658
riga/tfdeploy
tfdeploy.py
Softsign
def Softsign(a): """ Softsign op. """ return np.divide(a, np.add(np.abs(a), 1)),
python
def Softsign(a): return np.divide(a, np.add(np.abs(a), 1)),
[ "def", "Softsign", "(", "a", ")", ":", "return", "np", ".", "divide", "(", "a", ",", "np", ".", "add", "(", "np", ".", "abs", "(", "a", ")", ",", "1", ")", ")", "," ]
Softsign op.
[ "Softsign", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2053-L2057
3,659
riga/tfdeploy
tfdeploy.py
Softmax
def Softmax(a): """ Softmax op. """ e = np.exp(a) return np.divide(e, np.sum(e, axis=-1, keepdims=True)),
python
def Softmax(a): e = np.exp(a) return np.divide(e, np.sum(e, axis=-1, keepdims=True)),
[ "def", "Softmax", "(", "a", ")", ":", "e", "=", "np", ".", "exp", "(", "a", ")", "return", "np", ".", "divide", "(", "e", ",", "np", ".", "sum", "(", "e", ",", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", ")", "," ]
Softmax op.
[ "Softmax", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2077-L2082
3,660
riga/tfdeploy
tfdeploy.py
Conv1D
def Conv1D(a, f, strides, padding, data_format): """ 1D conv op. """ if data_format.decode("ascii") == "NCHW": a = np.rollaxis(a, 1, -1), patches = _conv_patches(a, f, 3 * [strides], padding.decode("ascii")) conv = np.sum(patches, axis=tuple(range(-f.ndim, -1))) if data_format.decode("ascii") == "NCHW": conv = np.rollaxis(conv, -1, 1) return conv,
python
def Conv1D(a, f, strides, padding, data_format): if data_format.decode("ascii") == "NCHW": a = np.rollaxis(a, 1, -1), patches = _conv_patches(a, f, 3 * [strides], padding.decode("ascii")) conv = np.sum(patches, axis=tuple(range(-f.ndim, -1))) if data_format.decode("ascii") == "NCHW": conv = np.rollaxis(conv, -1, 1) return conv,
[ "def", "Conv1D", "(", "a", ",", "f", ",", "strides", ",", "padding", ",", "data_format", ")", ":", "if", "data_format", ".", "decode", "(", "\"ascii\"", ")", "==", "\"NCHW\"", ":", "a", "=", "np", ".", "rollaxis", "(", "a", ",", "1", ",", "-", "1", ")", ",", "patches", "=", "_conv_patches", "(", "a", ",", "f", ",", "3", "*", "[", "strides", "]", ",", "padding", ".", "decode", "(", "\"ascii\"", ")", ")", "conv", "=", "np", ".", "sum", "(", "patches", ",", "axis", "=", "tuple", "(", "range", "(", "-", "f", ".", "ndim", ",", "-", "1", ")", ")", ")", "if", "data_format", ".", "decode", "(", "\"ascii\"", ")", "==", "\"NCHW\"", ":", "conv", "=", "np", ".", "rollaxis", "(", "conv", ",", "-", "1", ",", "1", ")", "return", "conv", "," ]
1D conv op.
[ "1D", "conv", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2125-L2138
3,661
riga/tfdeploy
tfdeploy.py
Conv3D
def Conv3D(a, f, strides, padding): """ 3D conv op. """ patches = _conv_patches(a, f, strides, padding.decode("ascii")) return np.sum(patches, axis=tuple(range(-f.ndim, -1))),
python
def Conv3D(a, f, strides, padding): patches = _conv_patches(a, f, strides, padding.decode("ascii")) return np.sum(patches, axis=tuple(range(-f.ndim, -1))),
[ "def", "Conv3D", "(", "a", ",", "f", ",", "strides", ",", "padding", ")", ":", "patches", "=", "_conv_patches", "(", "a", ",", "f", ",", "strides", ",", "padding", ".", "decode", "(", "\"ascii\"", ")", ")", "return", "np", ".", "sum", "(", "patches", ",", "axis", "=", "tuple", "(", "range", "(", "-", "f", ".", "ndim", ",", "-", "1", ")", ")", ")", "," ]
3D conv op.
[ "3D", "conv", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2159-L2164
3,662
riga/tfdeploy
tfdeploy.py
AvgPool
def AvgPool(a, k, strides, padding, data_format): """ Average pooling op. """ if data_format.decode("ascii") == "NCHW": a = np.rollaxis(a, 1, -1), patches = _pool_patches(a, k, strides, padding.decode("ascii")) pool = np.average(patches, axis=tuple(range(-len(k), 0))) if data_format.decode("ascii") == "NCHW": pool = np.rollaxis(pool, -1, 1) return pool,
python
def AvgPool(a, k, strides, padding, data_format): if data_format.decode("ascii") == "NCHW": a = np.rollaxis(a, 1, -1), patches = _pool_patches(a, k, strides, padding.decode("ascii")) pool = np.average(patches, axis=tuple(range(-len(k), 0))) if data_format.decode("ascii") == "NCHW": pool = np.rollaxis(pool, -1, 1) return pool,
[ "def", "AvgPool", "(", "a", ",", "k", ",", "strides", ",", "padding", ",", "data_format", ")", ":", "if", "data_format", ".", "decode", "(", "\"ascii\"", ")", "==", "\"NCHW\"", ":", "a", "=", "np", ".", "rollaxis", "(", "a", ",", "1", ",", "-", "1", ")", ",", "patches", "=", "_pool_patches", "(", "a", ",", "k", ",", "strides", ",", "padding", ".", "decode", "(", "\"ascii\"", ")", ")", "pool", "=", "np", ".", "average", "(", "patches", ",", "axis", "=", "tuple", "(", "range", "(", "-", "len", "(", "k", ")", ",", "0", ")", ")", ")", "if", "data_format", ".", "decode", "(", "\"ascii\"", ")", "==", "\"NCHW\"", ":", "pool", "=", "np", ".", "rollaxis", "(", "pool", ",", "-", "1", ",", "1", ")", "return", "pool", "," ]
Average pooling op.
[ "Average", "pooling", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2190-L2203
3,663
riga/tfdeploy
tfdeploy.py
MaxPool
def MaxPool(a, k, strides, padding, data_format): """ Maximum pooling op. """ if data_format.decode("ascii") == "NCHW": a = np.rollaxis(a, 1, -1), patches = _pool_patches(a, k, strides, padding.decode("ascii")) pool = np.amax(patches, axis=tuple(range(-len(k), 0))) if data_format.decode("ascii") == "NCHW": pool = np.rollaxis(pool, -1, 1) return pool,
python
def MaxPool(a, k, strides, padding, data_format): if data_format.decode("ascii") == "NCHW": a = np.rollaxis(a, 1, -1), patches = _pool_patches(a, k, strides, padding.decode("ascii")) pool = np.amax(patches, axis=tuple(range(-len(k), 0))) if data_format.decode("ascii") == "NCHW": pool = np.rollaxis(pool, -1, 1) return pool,
[ "def", "MaxPool", "(", "a", ",", "k", ",", "strides", ",", "padding", ",", "data_format", ")", ":", "if", "data_format", ".", "decode", "(", "\"ascii\"", ")", "==", "\"NCHW\"", ":", "a", "=", "np", ".", "rollaxis", "(", "a", ",", "1", ",", "-", "1", ")", ",", "patches", "=", "_pool_patches", "(", "a", ",", "k", ",", "strides", ",", "padding", ".", "decode", "(", "\"ascii\"", ")", ")", "pool", "=", "np", ".", "amax", "(", "patches", ",", "axis", "=", "tuple", "(", "range", "(", "-", "len", "(", "k", ")", ",", "0", ")", ")", ")", "if", "data_format", ".", "decode", "(", "\"ascii\"", ")", "==", "\"NCHW\"", ":", "pool", "=", "np", ".", "rollaxis", "(", "pool", ",", "-", "1", ",", "1", ")", "return", "pool", "," ]
Maximum pooling op.
[ "Maximum", "pooling", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2207-L2220
3,664
riga/tfdeploy
tfdeploy.py
AvgPool3D
def AvgPool3D(a, k, strides, padding): """ Average 3D pooling op. """ patches = _pool_patches(a, k, strides, padding.decode("ascii")) return np.average(patches, axis=tuple(range(-len(k), 0))),
python
def AvgPool3D(a, k, strides, padding): patches = _pool_patches(a, k, strides, padding.decode("ascii")) return np.average(patches, axis=tuple(range(-len(k), 0))),
[ "def", "AvgPool3D", "(", "a", ",", "k", ",", "strides", ",", "padding", ")", ":", "patches", "=", "_pool_patches", "(", "a", ",", "k", ",", "strides", ",", "padding", ".", "decode", "(", "\"ascii\"", ")", ")", "return", "np", ".", "average", "(", "patches", ",", "axis", "=", "tuple", "(", "range", "(", "-", "len", "(", "k", ")", ",", "0", ")", ")", ")", "," ]
Average 3D pooling op.
[ "Average", "3D", "pooling", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2224-L2229
3,665
riga/tfdeploy
tfdeploy.py
MaxPool3D
def MaxPool3D(a, k, strides, padding): """ Maximum 3D pooling op. """ patches = _pool_patches(a, k, strides, padding.decode("ascii")) return np.amax(patches, axis=tuple(range(-len(k), 0))),
python
def MaxPool3D(a, k, strides, padding): patches = _pool_patches(a, k, strides, padding.decode("ascii")) return np.amax(patches, axis=tuple(range(-len(k), 0))),
[ "def", "MaxPool3D", "(", "a", ",", "k", ",", "strides", ",", "padding", ")", ":", "patches", "=", "_pool_patches", "(", "a", ",", "k", ",", "strides", ",", "padding", ".", "decode", "(", "\"ascii\"", ")", ")", "return", "np", ".", "amax", "(", "patches", ",", "axis", "=", "tuple", "(", "range", "(", "-", "len", "(", "k", ")", ",", "0", ")", ")", ")", "," ]
Maximum 3D pooling op.
[ "Maximum", "3D", "pooling", "op", "." ]
8481f657d6e3a51d76185a195b993e45f448828a
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2233-L2238
3,666
adafruit/Adafruit_CircuitPython_MatrixKeypad
adafruit_matrixkeypad.py
Matrix_Keypad.pressed_keys
def pressed_keys(self): """An array containing all detected keys that are pressed from the initalized list-of-lists passed in during creation""" # make a list of all the keys that are detected pressed = [] # set all pins pins to be inputs w/pullups for pin in self.row_pins+self.col_pins: pin.direction = Direction.INPUT pin.pull = Pull.UP for row in range(len(self.row_pins)): # set one row low at a time self.row_pins[row].direction = Direction.OUTPUT self.row_pins[row].value = False # check the column pins, which ones are pulled down for col in range(len(self.col_pins)): if not self.col_pins[col].value: pressed.append(self.keys[row][col]) # reset the pin to be an input self.row_pins[row].direction = Direction.INPUT self.row_pins[row].pull = Pull.UP return pressed
python
def pressed_keys(self): # make a list of all the keys that are detected pressed = [] # set all pins pins to be inputs w/pullups for pin in self.row_pins+self.col_pins: pin.direction = Direction.INPUT pin.pull = Pull.UP for row in range(len(self.row_pins)): # set one row low at a time self.row_pins[row].direction = Direction.OUTPUT self.row_pins[row].value = False # check the column pins, which ones are pulled down for col in range(len(self.col_pins)): if not self.col_pins[col].value: pressed.append(self.keys[row][col]) # reset the pin to be an input self.row_pins[row].direction = Direction.INPUT self.row_pins[row].pull = Pull.UP return pressed
[ "def", "pressed_keys", "(", "self", ")", ":", "# make a list of all the keys that are detected", "pressed", "=", "[", "]", "# set all pins pins to be inputs w/pullups", "for", "pin", "in", "self", ".", "row_pins", "+", "self", ".", "col_pins", ":", "pin", ".", "direction", "=", "Direction", ".", "INPUT", "pin", ".", "pull", "=", "Pull", ".", "UP", "for", "row", "in", "range", "(", "len", "(", "self", ".", "row_pins", ")", ")", ":", "# set one row low at a time", "self", ".", "row_pins", "[", "row", "]", ".", "direction", "=", "Direction", ".", "OUTPUT", "self", ".", "row_pins", "[", "row", "]", ".", "value", "=", "False", "# check the column pins, which ones are pulled down", "for", "col", "in", "range", "(", "len", "(", "self", ".", "col_pins", ")", ")", ":", "if", "not", "self", ".", "col_pins", "[", "col", "]", ".", "value", ":", "pressed", ".", "append", "(", "self", ".", "keys", "[", "row", "]", "[", "col", "]", ")", "# reset the pin to be an input", "self", ".", "row_pins", "[", "row", "]", ".", "direction", "=", "Direction", ".", "INPUT", "self", ".", "row_pins", "[", "row", "]", ".", "pull", "=", "Pull", ".", "UP", "return", "pressed" ]
An array containing all detected keys that are pressed from the initalized list-of-lists passed in during creation
[ "An", "array", "containing", "all", "detected", "keys", "that", "are", "pressed", "from", "the", "initalized", "list", "-", "of", "-", "lists", "passed", "in", "during", "creation" ]
f530b1a920a40ef09ec1394b7760f243a243045a
https://github.com/adafruit/Adafruit_CircuitPython_MatrixKeypad/blob/f530b1a920a40ef09ec1394b7760f243a243045a/adafruit_matrixkeypad.py#L69-L91
3,667
mrname/haralyzer
haralyzer/multihar.py
MultiHarParser.get_load_times
def get_load_times(self, asset_type): """ Just a ``list`` of the load times of a certain asset type for each page :param asset_type: ``str`` of the asset type to return load times for """ load_times = [] search_str = '{0}_load_time'.format(asset_type) for har_page in self.pages: val = getattr(har_page, search_str, None) if val is not None: load_times.append(val) return load_times
python
def get_load_times(self, asset_type): load_times = [] search_str = '{0}_load_time'.format(asset_type) for har_page in self.pages: val = getattr(har_page, search_str, None) if val is not None: load_times.append(val) return load_times
[ "def", "get_load_times", "(", "self", ",", "asset_type", ")", ":", "load_times", "=", "[", "]", "search_str", "=", "'{0}_load_time'", ".", "format", "(", "asset_type", ")", "for", "har_page", "in", "self", ".", "pages", ":", "val", "=", "getattr", "(", "har_page", ",", "search_str", ",", "None", ")", "if", "val", "is", "not", "None", ":", "load_times", ".", "append", "(", "val", ")", "return", "load_times" ]
Just a ``list`` of the load times of a certain asset type for each page :param asset_type: ``str`` of the asset type to return load times for
[ "Just", "a", "list", "of", "the", "load", "times", "of", "a", "certain", "asset", "type", "for", "each", "page" ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L41-L53
3,668
mrname/haralyzer
haralyzer/multihar.py
MultiHarParser.get_stdev
def get_stdev(self, asset_type): """ Returns the standard deviation for a set of a certain asset type. :param asset_type: ``str`` of the asset type to calculate standard deviation for. :returns: A ``int`` or ``float`` of standard deviation, depending on the self.decimal_precision """ load_times = [] # Handle edge cases like TTFB if asset_type == 'ttfb': for page in self.pages: if page.time_to_first_byte is not None: load_times.append(page.time_to_first_byte) elif asset_type not in self.asset_types and asset_type != 'page': raise ValueError('asset_type must be one of:\nttfb\n{0}'.format( '\n'.join(self.asset_types))) else: load_times = self.get_load_times(asset_type) if not load_times or not sum(load_times): return 0 return round(stdev(load_times), self.decimal_precision)
python
def get_stdev(self, asset_type): load_times = [] # Handle edge cases like TTFB if asset_type == 'ttfb': for page in self.pages: if page.time_to_first_byte is not None: load_times.append(page.time_to_first_byte) elif asset_type not in self.asset_types and asset_type != 'page': raise ValueError('asset_type must be one of:\nttfb\n{0}'.format( '\n'.join(self.asset_types))) else: load_times = self.get_load_times(asset_type) if not load_times or not sum(load_times): return 0 return round(stdev(load_times), self.decimal_precision)
[ "def", "get_stdev", "(", "self", ",", "asset_type", ")", ":", "load_times", "=", "[", "]", "# Handle edge cases like TTFB", "if", "asset_type", "==", "'ttfb'", ":", "for", "page", "in", "self", ".", "pages", ":", "if", "page", ".", "time_to_first_byte", "is", "not", "None", ":", "load_times", ".", "append", "(", "page", ".", "time_to_first_byte", ")", "elif", "asset_type", "not", "in", "self", ".", "asset_types", "and", "asset_type", "!=", "'page'", ":", "raise", "ValueError", "(", "'asset_type must be one of:\\nttfb\\n{0}'", ".", "format", "(", "'\\n'", ".", "join", "(", "self", ".", "asset_types", ")", ")", ")", "else", ":", "load_times", "=", "self", ".", "get_load_times", "(", "asset_type", ")", "if", "not", "load_times", "or", "not", "sum", "(", "load_times", ")", ":", "return", "0", "return", "round", "(", "stdev", "(", "load_times", ")", ",", "self", ".", "decimal_precision", ")" ]
Returns the standard deviation for a set of a certain asset type. :param asset_type: ``str`` of the asset type to calculate standard deviation for. :returns: A ``int`` or ``float`` of standard deviation, depending on the self.decimal_precision
[ "Returns", "the", "standard", "deviation", "for", "a", "set", "of", "a", "certain", "asset", "type", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L55-L79
3,669
mrname/haralyzer
haralyzer/multihar.py
MultiHarParser.pages
def pages(self): """ The aggregate pages of all the parser objects. """ pages = [] for har_dict in self.har_data: har_parser = HarParser(har_data=har_dict) if self.page_id: for page in har_parser.pages: if page.page_id == self.page_id: pages.append(page) else: pages = pages + har_parser.pages return pages
python
def pages(self): pages = [] for har_dict in self.har_data: har_parser = HarParser(har_data=har_dict) if self.page_id: for page in har_parser.pages: if page.page_id == self.page_id: pages.append(page) else: pages = pages + har_parser.pages return pages
[ "def", "pages", "(", "self", ")", ":", "pages", "=", "[", "]", "for", "har_dict", "in", "self", ".", "har_data", ":", "har_parser", "=", "HarParser", "(", "har_data", "=", "har_dict", ")", "if", "self", ".", "page_id", ":", "for", "page", "in", "har_parser", ".", "pages", ":", "if", "page", ".", "page_id", "==", "self", ".", "page_id", ":", "pages", ".", "append", "(", "page", ")", "else", ":", "pages", "=", "pages", "+", "har_parser", ".", "pages", "return", "pages" ]
The aggregate pages of all the parser objects.
[ "The", "aggregate", "pages", "of", "all", "the", "parser", "objects", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L82-L95
3,670
mrname/haralyzer
haralyzer/multihar.py
MultiHarParser.time_to_first_byte
def time_to_first_byte(self): """ The aggregate time to first byte for all pages. """ ttfb = [] for page in self.pages: if page.time_to_first_byte is not None: ttfb.append(page.time_to_first_byte) return round(mean(ttfb), self.decimal_precision)
python
def time_to_first_byte(self): ttfb = [] for page in self.pages: if page.time_to_first_byte is not None: ttfb.append(page.time_to_first_byte) return round(mean(ttfb), self.decimal_precision)
[ "def", "time_to_first_byte", "(", "self", ")", ":", "ttfb", "=", "[", "]", "for", "page", "in", "self", ".", "pages", ":", "if", "page", ".", "time_to_first_byte", "is", "not", "None", ":", "ttfb", ".", "append", "(", "page", ".", "time_to_first_byte", ")", "return", "round", "(", "mean", "(", "ttfb", ")", ",", "self", ".", "decimal_precision", ")" ]
The aggregate time to first byte for all pages.
[ "The", "aggregate", "time", "to", "first", "byte", "for", "all", "pages", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L105-L113
3,671
mrname/haralyzer
haralyzer/multihar.py
MultiHarParser.js_load_time
def js_load_time(self): """ Returns aggregate javascript load time. """ load_times = self.get_load_times('js') return round(mean(load_times), self.decimal_precision)
python
def js_load_time(self): load_times = self.get_load_times('js') return round(mean(load_times), self.decimal_precision)
[ "def", "js_load_time", "(", "self", ")", ":", "load_times", "=", "self", ".", "get_load_times", "(", "'js'", ")", "return", "round", "(", "mean", "(", "load_times", ")", ",", "self", ".", "decimal_precision", ")" ]
Returns aggregate javascript load time.
[ "Returns", "aggregate", "javascript", "load", "time", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L124-L129
3,672
mrname/haralyzer
haralyzer/multihar.py
MultiHarParser.css_load_time
def css_load_time(self): """ Returns aggregate css load time for all pages. """ load_times = self.get_load_times('css') return round(mean(load_times), self.decimal_precision)
python
def css_load_time(self): load_times = self.get_load_times('css') return round(mean(load_times), self.decimal_precision)
[ "def", "css_load_time", "(", "self", ")", ":", "load_times", "=", "self", ".", "get_load_times", "(", "'css'", ")", "return", "round", "(", "mean", "(", "load_times", ")", ",", "self", ".", "decimal_precision", ")" ]
Returns aggregate css load time for all pages.
[ "Returns", "aggregate", "css", "load", "time", "for", "all", "pages", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L132-L137
3,673
mrname/haralyzer
haralyzer/multihar.py
MultiHarParser.image_load_time
def image_load_time(self): """ Returns aggregate image load time for all pages. """ load_times = self.get_load_times('image') return round(mean(load_times), self.decimal_precision)
python
def image_load_time(self): load_times = self.get_load_times('image') return round(mean(load_times), self.decimal_precision)
[ "def", "image_load_time", "(", "self", ")", ":", "load_times", "=", "self", ".", "get_load_times", "(", "'image'", ")", "return", "round", "(", "mean", "(", "load_times", ")", ",", "self", ".", "decimal_precision", ")" ]
Returns aggregate image load time for all pages.
[ "Returns", "aggregate", "image", "load", "time", "for", "all", "pages", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L140-L145
3,674
mrname/haralyzer
haralyzer/multihar.py
MultiHarParser.html_load_time
def html_load_time(self): """ Returns aggregate html load time for all pages. """ load_times = self.get_load_times('html') return round(mean(load_times), self.decimal_precision)
python
def html_load_time(self): load_times = self.get_load_times('html') return round(mean(load_times), self.decimal_precision)
[ "def", "html_load_time", "(", "self", ")", ":", "load_times", "=", "self", ".", "get_load_times", "(", "'html'", ")", "return", "round", "(", "mean", "(", "load_times", ")", ",", "self", ".", "decimal_precision", ")" ]
Returns aggregate html load time for all pages.
[ "Returns", "aggregate", "html", "load", "time", "for", "all", "pages", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L148-L153
3,675
mrname/haralyzer
haralyzer/multihar.py
MultiHarParser.audio_load_time
def audio_load_time(self): """ Returns aggregate audio load time for all pages. """ load_times = self.get_load_times('audio') return round(mean(load_times), self.decimal_precision)
python
def audio_load_time(self): load_times = self.get_load_times('audio') return round(mean(load_times), self.decimal_precision)
[ "def", "audio_load_time", "(", "self", ")", ":", "load_times", "=", "self", ".", "get_load_times", "(", "'audio'", ")", "return", "round", "(", "mean", "(", "load_times", ")", ",", "self", ".", "decimal_precision", ")" ]
Returns aggregate audio load time for all pages.
[ "Returns", "aggregate", "audio", "load", "time", "for", "all", "pages", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L156-L161
3,676
mrname/haralyzer
haralyzer/multihar.py
MultiHarParser.video_load_time
def video_load_time(self): """ Returns aggregate video load time for all pages. """ load_times = self.get_load_times('video') return round(mean(load_times), self.decimal_precision)
python
def video_load_time(self): load_times = self.get_load_times('video') return round(mean(load_times), self.decimal_precision)
[ "def", "video_load_time", "(", "self", ")", ":", "load_times", "=", "self", ".", "get_load_times", "(", "'video'", ")", "return", "round", "(", "mean", "(", "load_times", ")", ",", "self", ".", "decimal_precision", ")" ]
Returns aggregate video load time for all pages.
[ "Returns", "aggregate", "video", "load", "time", "for", "all", "pages", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L164-L169
3,677
mrname/haralyzer
haralyzer/assets.py
HarParser.match_headers
def match_headers(self, entry, header_type, header, value, regex=True): """ Function to match headers. Since the output of headers might use different case, like: 'content-type' vs 'Content-Type' This function is case-insensitive :param entry: entry object :param header_type: ``str`` of header type. Valid values: * 'request' * 'response' :param header: ``str`` of the header to search for :param value: ``str`` of value to search for :param regex: ``bool`` indicating whether to use regex or exact match :returns: a ``bool`` indicating whether a match was found """ if header_type not in entry: raise ValueError('Invalid header_type, should be either:\n\n' '* \'request\'\n*\'response\'') # TODO - headers are empty in some HAR data.... need fallbacks here for h in entry[header_type]['headers']: if h['name'].lower() == header.lower() and h['value'] is not None: if regex and re.search(value, h['value'], flags=re.IGNORECASE): return True elif value == h['value']: return True return False
python
def match_headers(self, entry, header_type, header, value, regex=True): if header_type not in entry: raise ValueError('Invalid header_type, should be either:\n\n' '* \'request\'\n*\'response\'') # TODO - headers are empty in some HAR data.... need fallbacks here for h in entry[header_type]['headers']: if h['name'].lower() == header.lower() and h['value'] is not None: if regex and re.search(value, h['value'], flags=re.IGNORECASE): return True elif value == h['value']: return True return False
[ "def", "match_headers", "(", "self", ",", "entry", ",", "header_type", ",", "header", ",", "value", ",", "regex", "=", "True", ")", ":", "if", "header_type", "not", "in", "entry", ":", "raise", "ValueError", "(", "'Invalid header_type, should be either:\\n\\n'", "'* \\'request\\'\\n*\\'response\\''", ")", "# TODO - headers are empty in some HAR data.... need fallbacks here", "for", "h", "in", "entry", "[", "header_type", "]", "[", "'headers'", "]", ":", "if", "h", "[", "'name'", "]", ".", "lower", "(", ")", "==", "header", ".", "lower", "(", ")", "and", "h", "[", "'value'", "]", "is", "not", "None", ":", "if", "regex", "and", "re", ".", "search", "(", "value", ",", "h", "[", "'value'", "]", ",", "flags", "=", "re", ".", "IGNORECASE", ")", ":", "return", "True", "elif", "value", "==", "h", "[", "'value'", "]", ":", "return", "True", "return", "False" ]
Function to match headers. Since the output of headers might use different case, like: 'content-type' vs 'Content-Type' This function is case-insensitive :param entry: entry object :param header_type: ``str`` of header type. Valid values: * 'request' * 'response' :param header: ``str`` of the header to search for :param value: ``str`` of value to search for :param regex: ``bool`` indicating whether to use regex or exact match :returns: a ``bool`` indicating whether a match was found
[ "Function", "to", "match", "headers", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L40-L73
3,678
mrname/haralyzer
haralyzer/assets.py
HarParser.match_content_type
def match_content_type(entry, content_type, regex=True): """ Matches the content type of a request using the mimeType metadata. :param entry: ``dict`` of a single entry from a HarPage :param content_type: ``str`` of regex to use for finding content type :param regex: ``bool`` indicating whether to use regex or exact match. """ mimeType = entry['response']['content']['mimeType'] if regex and re.search(content_type, mimeType, flags=re.IGNORECASE): return True elif content_type == mimeType: return True return False
python
def match_content_type(entry, content_type, regex=True): mimeType = entry['response']['content']['mimeType'] if regex and re.search(content_type, mimeType, flags=re.IGNORECASE): return True elif content_type == mimeType: return True return False
[ "def", "match_content_type", "(", "entry", ",", "content_type", ",", "regex", "=", "True", ")", ":", "mimeType", "=", "entry", "[", "'response'", "]", "[", "'content'", "]", "[", "'mimeType'", "]", "if", "regex", "and", "re", ".", "search", "(", "content_type", ",", "mimeType", ",", "flags", "=", "re", ".", "IGNORECASE", ")", ":", "return", "True", "elif", "content_type", "==", "mimeType", ":", "return", "True", "return", "False" ]
Matches the content type of a request using the mimeType metadata. :param entry: ``dict`` of a single entry from a HarPage :param content_type: ``str`` of regex to use for finding content type :param regex: ``bool`` indicating whether to use regex or exact match.
[ "Matches", "the", "content", "type", "of", "a", "request", "using", "the", "mimeType", "metadata", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L76-L92
3,679
mrname/haralyzer
haralyzer/assets.py
HarParser.match_status_code
def match_status_code(self, entry, status_code, regex=True): """ Helper function that returns entries with a status code matching then given `status_code` argument. NOTE: This is doing a STRING comparison NOT NUMERICAL :param entry: entry object to analyze :param status_code: ``str`` of status code to search for :param request_type: ``regex`` of request type to match """ if regex: return re.search(status_code, str(entry['response']['status'])) is not None else: return str(entry['response']['status']) == status_code
python
def match_status_code(self, entry, status_code, regex=True): if regex: return re.search(status_code, str(entry['response']['status'])) is not None else: return str(entry['response']['status']) == status_code
[ "def", "match_status_code", "(", "self", ",", "entry", ",", "status_code", ",", "regex", "=", "True", ")", ":", "if", "regex", ":", "return", "re", ".", "search", "(", "status_code", ",", "str", "(", "entry", "[", "'response'", "]", "[", "'status'", "]", ")", ")", "is", "not", "None", "else", ":", "return", "str", "(", "entry", "[", "'response'", "]", "[", "'status'", "]", ")", "==", "status_code" ]
Helper function that returns entries with a status code matching then given `status_code` argument. NOTE: This is doing a STRING comparison NOT NUMERICAL :param entry: entry object to analyze :param status_code: ``str`` of status code to search for :param request_type: ``regex`` of request type to match
[ "Helper", "function", "that", "returns", "entries", "with", "a", "status", "code", "matching", "then", "given", "status_code", "argument", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L126-L141
3,680
mrname/haralyzer
haralyzer/assets.py
HarParser.pages
def pages(self): """ This is a list of HarPage objects, each of which represents a page from the HAR file. """ # Start with a page object for unknown entries if the HAR data has # any entries with no page ID pages = [] if any('pageref' not in entry for entry in self.har_data['entries']): pages.append(HarPage('unknown', har_parser=self)) for har_page in self.har_data['pages']: page = HarPage(har_page['id'], har_parser=self) pages.append(page) return pages
python
def pages(self): # Start with a page object for unknown entries if the HAR data has # any entries with no page ID pages = [] if any('pageref' not in entry for entry in self.har_data['entries']): pages.append(HarPage('unknown', har_parser=self)) for har_page in self.har_data['pages']: page = HarPage(har_page['id'], har_parser=self) pages.append(page) return pages
[ "def", "pages", "(", "self", ")", ":", "# Start with a page object for unknown entries if the HAR data has", "# any entries with no page ID", "pages", "=", "[", "]", "if", "any", "(", "'pageref'", "not", "in", "entry", "for", "entry", "in", "self", ".", "har_data", "[", "'entries'", "]", ")", ":", "pages", ".", "append", "(", "HarPage", "(", "'unknown'", ",", "har_parser", "=", "self", ")", ")", "for", "har_page", "in", "self", ".", "har_data", "[", "'pages'", "]", ":", "page", "=", "HarPage", "(", "har_page", "[", "'id'", "]", ",", "har_parser", "=", "self", ")", "pages", ".", "append", "(", "page", ")", "return", "pages" ]
This is a list of HarPage objects, each of which represents a page from the HAR file.
[ "This", "is", "a", "list", "of", "HarPage", "objects", "each", "of", "which", "represents", "a", "page", "from", "the", "HAR", "file", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L174-L188
3,681
mrname/haralyzer
haralyzer/assets.py
HarPage.filter_entries
def filter_entries(self, request_type=None, content_type=None, status_code=None, http_version=None, regex=True): """ Returns a ``list`` of entry objects based on the filter criteria. :param request_type: ``str`` of request type (i.e. - GET or POST) :param content_type: ``str`` of regex to use for finding content type :param status_code: ``int`` of the desired status code :param http_version: ``str`` of HTTP version of request :param regex: ``bool`` indicating whether to use regex or exact match. """ results = [] for entry in self.entries: """ So yea... this is a bit ugly. We are looking for: * The request type using self._match_request_type() * The content type using self._match_headers() * The HTTP response status code using self._match_status_code() * The HTTP version using self._match_headers() Oh lords of python.... please forgive my soul """ valid_entry = True p = self.parser if request_type is not None and not p.match_request_type( entry, request_type, regex=regex): valid_entry = False if content_type is not None: if not self.parser.match_content_type(entry, content_type, regex=regex): valid_entry = False if status_code is not None and not p.match_status_code( entry, status_code, regex=regex): valid_entry = False if http_version is not None and not p.match_http_version( entry, http_version, regex=regex): valid_entry = False if valid_entry: results.append(entry) return results
python
def filter_entries(self, request_type=None, content_type=None, status_code=None, http_version=None, regex=True): results = [] for entry in self.entries: """ So yea... this is a bit ugly. We are looking for: * The request type using self._match_request_type() * The content type using self._match_headers() * The HTTP response status code using self._match_status_code() * The HTTP version using self._match_headers() Oh lords of python.... please forgive my soul """ valid_entry = True p = self.parser if request_type is not None and not p.match_request_type( entry, request_type, regex=regex): valid_entry = False if content_type is not None: if not self.parser.match_content_type(entry, content_type, regex=regex): valid_entry = False if status_code is not None and not p.match_status_code( entry, status_code, regex=regex): valid_entry = False if http_version is not None and not p.match_http_version( entry, http_version, regex=regex): valid_entry = False if valid_entry: results.append(entry) return results
[ "def", "filter_entries", "(", "self", ",", "request_type", "=", "None", ",", "content_type", "=", "None", ",", "status_code", "=", "None", ",", "http_version", "=", "None", ",", "regex", "=", "True", ")", ":", "results", "=", "[", "]", "for", "entry", "in", "self", ".", "entries", ":", "\"\"\"\n So yea... this is a bit ugly. We are looking for:\n\n * The request type using self._match_request_type()\n * The content type using self._match_headers()\n * The HTTP response status code using self._match_status_code()\n * The HTTP version using self._match_headers()\n\n Oh lords of python.... please forgive my soul\n \"\"\"", "valid_entry", "=", "True", "p", "=", "self", ".", "parser", "if", "request_type", "is", "not", "None", "and", "not", "p", ".", "match_request_type", "(", "entry", ",", "request_type", ",", "regex", "=", "regex", ")", ":", "valid_entry", "=", "False", "if", "content_type", "is", "not", "None", ":", "if", "not", "self", ".", "parser", ".", "match_content_type", "(", "entry", ",", "content_type", ",", "regex", "=", "regex", ")", ":", "valid_entry", "=", "False", "if", "status_code", "is", "not", "None", "and", "not", "p", ".", "match_status_code", "(", "entry", ",", "status_code", ",", "regex", "=", "regex", ")", ":", "valid_entry", "=", "False", "if", "http_version", "is", "not", "None", "and", "not", "p", ".", "match_http_version", "(", "entry", ",", "http_version", ",", "regex", "=", "regex", ")", ":", "valid_entry", "=", "False", "if", "valid_entry", ":", "results", ".", "append", "(", "entry", ")", "return", "results" ]
Returns a ``list`` of entry objects based on the filter criteria. :param request_type: ``str`` of request type (i.e. - GET or POST) :param content_type: ``str`` of regex to use for finding content type :param status_code: ``int`` of the desired status code :param http_version: ``str`` of HTTP version of request :param regex: ``bool`` indicating whether to use regex or exact match.
[ "Returns", "a", "list", "of", "entry", "objects", "based", "on", "the", "filter", "criteria", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L308-L351
3,682
mrname/haralyzer
haralyzer/assets.py
HarPage.get_load_time
def get_load_time(self, request_type=None, content_type=None, status_code=None, asynchronous=True, **kwargs): """ This method can return the TOTAL load time for the assets or the ACTUAL load time, the difference being that the actual load time takes asynchronous transactions into account. So, if you want the total load time, set asynchronous=False. EXAMPLE: I want to know the load time for images on a page that has two images, each of which took 2 seconds to download, but the browser downloaded them at the same time. self.get_load_time(content_types=['image']) (returns 2) self.get_load_time(content_types=['image'], asynchronous=False) (returns 4) """ entries = self.filter_entries( request_type=request_type, content_type=content_type, status_code=status_code ) if "async" in kwargs: asynchronous = kwargs['async'] if not asynchronous: time = 0 for entry in entries: time += entry['time'] return time else: return len(self.parser.create_asset_timeline(entries))
python
def get_load_time(self, request_type=None, content_type=None, status_code=None, asynchronous=True, **kwargs): entries = self.filter_entries( request_type=request_type, content_type=content_type, status_code=status_code ) if "async" in kwargs: asynchronous = kwargs['async'] if not asynchronous: time = 0 for entry in entries: time += entry['time'] return time else: return len(self.parser.create_asset_timeline(entries))
[ "def", "get_load_time", "(", "self", ",", "request_type", "=", "None", ",", "content_type", "=", "None", ",", "status_code", "=", "None", ",", "asynchronous", "=", "True", ",", "*", "*", "kwargs", ")", ":", "entries", "=", "self", ".", "filter_entries", "(", "request_type", "=", "request_type", ",", "content_type", "=", "content_type", ",", "status_code", "=", "status_code", ")", "if", "\"async\"", "in", "kwargs", ":", "asynchronous", "=", "kwargs", "[", "'async'", "]", "if", "not", "asynchronous", ":", "time", "=", "0", "for", "entry", "in", "entries", ":", "time", "+=", "entry", "[", "'time'", "]", "return", "time", "else", ":", "return", "len", "(", "self", ".", "parser", ".", "create_asset_timeline", "(", "entries", ")", ")" ]
This method can return the TOTAL load time for the assets or the ACTUAL load time, the difference being that the actual load time takes asynchronous transactions into account. So, if you want the total load time, set asynchronous=False. EXAMPLE: I want to know the load time for images on a page that has two images, each of which took 2 seconds to download, but the browser downloaded them at the same time. self.get_load_time(content_types=['image']) (returns 2) self.get_load_time(content_types=['image'], asynchronous=False) (returns 4)
[ "This", "method", "can", "return", "the", "TOTAL", "load", "time", "for", "the", "assets", "or", "the", "ACTUAL", "load", "time", "the", "difference", "being", "that", "the", "actual", "load", "time", "takes", "asynchronous", "transactions", "into", "account", ".", "So", "if", "you", "want", "the", "total", "load", "time", "set", "asynchronous", "=", "False", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L353-L384
3,683
mrname/haralyzer
haralyzer/assets.py
HarPage.get_total_size
def get_total_size(self, entries): """ Returns the total size of a collection of entries. :param entries: ``list`` of entries to calculate the total size of. """ size = 0 for entry in entries: if entry['response']['bodySize'] > 0: size += entry['response']['bodySize'] return size
python
def get_total_size(self, entries): size = 0 for entry in entries: if entry['response']['bodySize'] > 0: size += entry['response']['bodySize'] return size
[ "def", "get_total_size", "(", "self", ",", "entries", ")", ":", "size", "=", "0", "for", "entry", "in", "entries", ":", "if", "entry", "[", "'response'", "]", "[", "'bodySize'", "]", ">", "0", ":", "size", "+=", "entry", "[", "'response'", "]", "[", "'bodySize'", "]", "return", "size" ]
Returns the total size of a collection of entries. :param entries: ``list`` of entries to calculate the total size of.
[ "Returns", "the", "total", "size", "of", "a", "collection", "of", "entries", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L386-L396
3,684
mrname/haralyzer
haralyzer/assets.py
HarPage.get_total_size_trans
def get_total_size_trans(self, entries): """ Returns the total size of a collection of entries - transferred. NOTE: use with har file generated with chrome-har-capturer :param entries: ``list`` of entries to calculate the total size of. """ size = 0 for entry in entries: if entry['response']['_transferSize'] > 0: size += entry['response']['_transferSize'] return size
python
def get_total_size_trans(self, entries): size = 0 for entry in entries: if entry['response']['_transferSize'] > 0: size += entry['response']['_transferSize'] return size
[ "def", "get_total_size_trans", "(", "self", ",", "entries", ")", ":", "size", "=", "0", "for", "entry", "in", "entries", ":", "if", "entry", "[", "'response'", "]", "[", "'_transferSize'", "]", ">", "0", ":", "size", "+=", "entry", "[", "'response'", "]", "[", "'_transferSize'", "]", "return", "size" ]
Returns the total size of a collection of entries - transferred. NOTE: use with har file generated with chrome-har-capturer :param entries: ``list`` of entries to calculate the total size of.
[ "Returns", "the", "total", "size", "of", "a", "collection", "of", "entries", "-", "transferred", "." ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L398-L410
3,685
mrname/haralyzer
haralyzer/assets.py
HarPage.time_to_first_byte
def time_to_first_byte(self): """ Time to first byte of the page request in ms """ # The unknown page is just a placeholder for entries with no page ID. # As such, it would not have a TTFB if self.page_id == 'unknown': return None ttfb = 0 for entry in self.entries: if entry['response']['status'] == 200: for k, v in iteritems(entry['timings']): if k != 'receive': if v > 0: ttfb += v break else: ttfb += entry['time'] return ttfb
python
def time_to_first_byte(self): # The unknown page is just a placeholder for entries with no page ID. # As such, it would not have a TTFB if self.page_id == 'unknown': return None ttfb = 0 for entry in self.entries: if entry['response']['status'] == 200: for k, v in iteritems(entry['timings']): if k != 'receive': if v > 0: ttfb += v break else: ttfb += entry['time'] return ttfb
[ "def", "time_to_first_byte", "(", "self", ")", ":", "# The unknown page is just a placeholder for entries with no page ID.", "# As such, it would not have a TTFB", "if", "self", ".", "page_id", "==", "'unknown'", ":", "return", "None", "ttfb", "=", "0", "for", "entry", "in", "self", ".", "entries", ":", "if", "entry", "[", "'response'", "]", "[", "'status'", "]", "==", "200", ":", "for", "k", ",", "v", "in", "iteritems", "(", "entry", "[", "'timings'", "]", ")", ":", "if", "k", "!=", "'receive'", ":", "if", "v", ">", "0", ":", "ttfb", "+=", "v", "break", "else", ":", "ttfb", "+=", "entry", "[", "'time'", "]", "return", "ttfb" ]
Time to first byte of the page request in ms
[ "Time", "to", "first", "byte", "of", "the", "page", "request", "in", "ms" ]
5ef38b8cfc044d2dfeacf2dd4d1efb810228309d
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L449-L468
3,686
kako-nawao/django-group-by
django_group_by/group.py
AggregatedGroup._data
def _data(self): """ Cached data built from instance raw _values as a dictionary. """ d = {} # Iterate all keys and values for k, v in self._row_values.items(): # Split related model fields attrs = k.rsplit('__', 1) # Set value depending case if len(attrs) == 2: # Related model field, store nested fk, fn = attrs if fk not in d: d[fk] = {} d[fk][fn] = v else: # Own model field, store directly d[k] = v # Return (+cache) data return d
python
def _data(self): d = {} # Iterate all keys and values for k, v in self._row_values.items(): # Split related model fields attrs = k.rsplit('__', 1) # Set value depending case if len(attrs) == 2: # Related model field, store nested fk, fn = attrs if fk not in d: d[fk] = {} d[fk][fn] = v else: # Own model field, store directly d[k] = v # Return (+cache) data return d
[ "def", "_data", "(", "self", ")", ":", "d", "=", "{", "}", "# Iterate all keys and values", "for", "k", ",", "v", "in", "self", ".", "_row_values", ".", "items", "(", ")", ":", "# Split related model fields", "attrs", "=", "k", ".", "rsplit", "(", "'__'", ",", "1", ")", "# Set value depending case", "if", "len", "(", "attrs", ")", "==", "2", ":", "# Related model field, store nested", "fk", ",", "fn", "=", "attrs", "if", "fk", "not", "in", "d", ":", "d", "[", "fk", "]", "=", "{", "}", "d", "[", "fk", "]", "[", "fn", "]", "=", "v", "else", ":", "# Own model field, store directly", "d", "[", "k", "]", "=", "v", "# Return (+cache) data", "return", "d" ]
Cached data built from instance raw _values as a dictionary.
[ "Cached", "data", "built", "from", "instance", "raw", "_values", "as", "a", "dictionary", "." ]
0d901513620acebc736722b040cff83d6483789a
https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/group.py#L19-L43
3,687
kako-nawao/django-group-by
django_group_by/group.py
AggregatedGroup._set_values
def _set_values(self): """ Populate instance with given. """ # Iterate all keys and values in data for k, v in self._data.items(): # If it's a dict, process it (it's probably instance data) if isinstance(v, dict): try: # Get related model from field (follow path) rel_model = self._model for attr in k.split('__'): rel_model = getattr(rel_model, attr).field.related_model except AttributeError: # Not a model, maybe it is a dict field (?) pass else: # Model, first shorten field name k = k.replace('__', '_') # Now init instance if required (not if we got ID None) if 'id' in v and v['id'] is None: # This means we grouped by ID, if it's none then FK is None v = None else: # Either we have ID or we didn't group by ID, use instance v = rel_model(**v) # Set value setattr(self, k, v)
python
def _set_values(self): # Iterate all keys and values in data for k, v in self._data.items(): # If it's a dict, process it (it's probably instance data) if isinstance(v, dict): try: # Get related model from field (follow path) rel_model = self._model for attr in k.split('__'): rel_model = getattr(rel_model, attr).field.related_model except AttributeError: # Not a model, maybe it is a dict field (?) pass else: # Model, first shorten field name k = k.replace('__', '_') # Now init instance if required (not if we got ID None) if 'id' in v and v['id'] is None: # This means we grouped by ID, if it's none then FK is None v = None else: # Either we have ID or we didn't group by ID, use instance v = rel_model(**v) # Set value setattr(self, k, v)
[ "def", "_set_values", "(", "self", ")", ":", "# Iterate all keys and values in data", "for", "k", ",", "v", "in", "self", ".", "_data", ".", "items", "(", ")", ":", "# If it's a dict, process it (it's probably instance data)", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "try", ":", "# Get related model from field (follow path)", "rel_model", "=", "self", ".", "_model", "for", "attr", "in", "k", ".", "split", "(", "'__'", ")", ":", "rel_model", "=", "getattr", "(", "rel_model", ",", "attr", ")", ".", "field", ".", "related_model", "except", "AttributeError", ":", "# Not a model, maybe it is a dict field (?)", "pass", "else", ":", "# Model, first shorten field name", "k", "=", "k", ".", "replace", "(", "'__'", ",", "'_'", ")", "# Now init instance if required (not if we got ID None)", "if", "'id'", "in", "v", "and", "v", "[", "'id'", "]", "is", "None", ":", "# This means we grouped by ID, if it's none then FK is None", "v", "=", "None", "else", ":", "# Either we have ID or we didn't group by ID, use instance", "v", "=", "rel_model", "(", "*", "*", "v", ")", "# Set value", "setattr", "(", "self", ",", "k", ",", "v", ")" ]
Populate instance with given.
[ "Populate", "instance", "with", "given", "." ]
0d901513620acebc736722b040cff83d6483789a
https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/group.py#L45-L77
3,688
kako-nawao/django-group-by
django_group_by/mixin.py
GroupByMixin._expand_group_by_fields
def _expand_group_by_fields(cls, model, fields): """ Expand FK fields into all related object's fields to avoid future lookups. :param fields: fields to "group by" :return: expanded fields """ # Containers for resulting fields and related model fields res = [] related = {} # Add own fields and populate related fields for field_name in fields: if '__' in field_name: # Related model field: append to related model's fields fk_field_name, related_field = field_name.split('__', 1) if fk_field_name not in related: related[fk_field_name] = [related_field] else: related[fk_field_name].append(related_field) else: # Simple field, get the field instance model_field = model._meta.get_field(field_name) if isinstance(model_field, (ForeignKey, ManyToManyField)): # It's a related field, get model related_model = model_field.related_model # Append all its fields with the correct prefix res.extend('{}__{}'.format(field_name, f.column) for f in related_model._meta.fields) else: # It's a common field, just append it res.append(field_name) # Resolve all related fields for fk_field_name, field_names in related.items(): # Get field fk = model._meta.get_field(fk_field_name) # Get all fields for that related model related_fields = cls._expand_group_by_fields(fk.related_model, field_names) # Append them with the correct prefix res.extend('{}__{}'.format(fk_field_name, f) for f in related_fields) # Return all fields return res
python
def _expand_group_by_fields(cls, model, fields): # Containers for resulting fields and related model fields res = [] related = {} # Add own fields and populate related fields for field_name in fields: if '__' in field_name: # Related model field: append to related model's fields fk_field_name, related_field = field_name.split('__', 1) if fk_field_name not in related: related[fk_field_name] = [related_field] else: related[fk_field_name].append(related_field) else: # Simple field, get the field instance model_field = model._meta.get_field(field_name) if isinstance(model_field, (ForeignKey, ManyToManyField)): # It's a related field, get model related_model = model_field.related_model # Append all its fields with the correct prefix res.extend('{}__{}'.format(field_name, f.column) for f in related_model._meta.fields) else: # It's a common field, just append it res.append(field_name) # Resolve all related fields for fk_field_name, field_names in related.items(): # Get field fk = model._meta.get_field(fk_field_name) # Get all fields for that related model related_fields = cls._expand_group_by_fields(fk.related_model, field_names) # Append them with the correct prefix res.extend('{}__{}'.format(fk_field_name, f) for f in related_fields) # Return all fields return res
[ "def", "_expand_group_by_fields", "(", "cls", ",", "model", ",", "fields", ")", ":", "# Containers for resulting fields and related model fields", "res", "=", "[", "]", "related", "=", "{", "}", "# Add own fields and populate related fields", "for", "field_name", "in", "fields", ":", "if", "'__'", "in", "field_name", ":", "# Related model field: append to related model's fields", "fk_field_name", ",", "related_field", "=", "field_name", ".", "split", "(", "'__'", ",", "1", ")", "if", "fk_field_name", "not", "in", "related", ":", "related", "[", "fk_field_name", "]", "=", "[", "related_field", "]", "else", ":", "related", "[", "fk_field_name", "]", ".", "append", "(", "related_field", ")", "else", ":", "# Simple field, get the field instance", "model_field", "=", "model", ".", "_meta", ".", "get_field", "(", "field_name", ")", "if", "isinstance", "(", "model_field", ",", "(", "ForeignKey", ",", "ManyToManyField", ")", ")", ":", "# It's a related field, get model", "related_model", "=", "model_field", ".", "related_model", "# Append all its fields with the correct prefix", "res", ".", "extend", "(", "'{}__{}'", ".", "format", "(", "field_name", ",", "f", ".", "column", ")", "for", "f", "in", "related_model", ".", "_meta", ".", "fields", ")", "else", ":", "# It's a common field, just append it", "res", ".", "append", "(", "field_name", ")", "# Resolve all related fields", "for", "fk_field_name", ",", "field_names", "in", "related", ".", "items", "(", ")", ":", "# Get field", "fk", "=", "model", ".", "_meta", ".", "get_field", "(", "fk_field_name", ")", "# Get all fields for that related model", "related_fields", "=", "cls", ".", "_expand_group_by_fields", "(", "fk", ".", "related_model", ",", "field_names", ")", "# Append them with the correct prefix", "res", ".", "extend", "(", "'{}__{}'", ".", "format", "(", "fk_field_name", ",", "f", ")", "for", "f", "in", "related_fields", ")", "# Return all fields", "return", "res" ]
Expand FK fields into all related object's fields to avoid future lookups. :param fields: fields to "group by" :return: expanded fields
[ "Expand", "FK", "fields", "into", "all", "related", "object", "s", "fields", "to", "avoid", "future", "lookups", "." ]
0d901513620acebc736722b040cff83d6483789a
https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/mixin.py#L23-L74
3,689
SiLab-Bonn/basil
basil/TL/Dummy.py
Dummy.write
def write(self, addr, data): '''Write to dummy memory Parameters ---------- addr : int The register address. data : list, tuple Data (byte array) to be written. Returns ------- nothing ''' logger.debug( "Dummy SiTransferLayer.write addr: %s data: %s" % (hex(addr), data)) for curr_addr, d in enumerate(data, start=addr): self.mem[curr_addr] = array.array('B', [d])[0]
python
def write(self, addr, data): '''Write to dummy memory Parameters ---------- addr : int The register address. data : list, tuple Data (byte array) to be written. Returns ------- nothing ''' logger.debug( "Dummy SiTransferLayer.write addr: %s data: %s" % (hex(addr), data)) for curr_addr, d in enumerate(data, start=addr): self.mem[curr_addr] = array.array('B', [d])[0]
[ "def", "write", "(", "self", ",", "addr", ",", "data", ")", ":", "logger", ".", "debug", "(", "\"Dummy SiTransferLayer.write addr: %s data: %s\"", "%", "(", "hex", "(", "addr", ")", ",", "data", ")", ")", "for", "curr_addr", ",", "d", "in", "enumerate", "(", "data", ",", "start", "=", "addr", ")", ":", "self", ".", "mem", "[", "curr_addr", "]", "=", "array", ".", "array", "(", "'B'", ",", "[", "d", "]", ")", "[", "0", "]" ]
Write to dummy memory Parameters ---------- addr : int The register address. data : list, tuple Data (byte array) to be written. Returns ------- nothing
[ "Write", "to", "dummy", "memory" ]
99052482d9334dd1f5598eb2d2fb4d5399a32291
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/TL/Dummy.py#L36-L53
3,690
SiLab-Bonn/basil
basil/utils/BitLogic.py
BitLogic._swap_slice_indices
def _swap_slice_indices(self, slc, make_slice=False): '''Swap slice indices Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing. ''' try: start = slc.start stop = slc.stop slc_step = slc.step except AttributeError: if make_slice: if slc < 0: slc += self.length() return slice(slc, slc + 1) else: return slc else: if not start and start != 0: slc_stop = self.length() elif start < 0: slc_stop = self.length() + start + 1 else: slc_stop = start + 1 if not stop and stop != 0: slc_start = 0 elif stop < 0: slc_start = self.length() + stop else: slc_start = stop return slice(slc_start, slc_stop, slc_step)
python
def _swap_slice_indices(self, slc, make_slice=False): '''Swap slice indices Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing. ''' try: start = slc.start stop = slc.stop slc_step = slc.step except AttributeError: if make_slice: if slc < 0: slc += self.length() return slice(slc, slc + 1) else: return slc else: if not start and start != 0: slc_stop = self.length() elif start < 0: slc_stop = self.length() + start + 1 else: slc_stop = start + 1 if not stop and stop != 0: slc_start = 0 elif stop < 0: slc_start = self.length() + stop else: slc_start = stop return slice(slc_start, slc_stop, slc_step)
[ "def", "_swap_slice_indices", "(", "self", ",", "slc", ",", "make_slice", "=", "False", ")", ":", "try", ":", "start", "=", "slc", ".", "start", "stop", "=", "slc", ".", "stop", "slc_step", "=", "slc", ".", "step", "except", "AttributeError", ":", "if", "make_slice", ":", "if", "slc", "<", "0", ":", "slc", "+=", "self", ".", "length", "(", ")", "return", "slice", "(", "slc", ",", "slc", "+", "1", ")", "else", ":", "return", "slc", "else", ":", "if", "not", "start", "and", "start", "!=", "0", ":", "slc_stop", "=", "self", ".", "length", "(", ")", "elif", "start", "<", "0", ":", "slc_stop", "=", "self", ".", "length", "(", ")", "+", "start", "+", "1", "else", ":", "slc_stop", "=", "start", "+", "1", "if", "not", "stop", "and", "stop", "!=", "0", ":", "slc_start", "=", "0", "elif", "stop", "<", "0", ":", "slc_start", "=", "self", ".", "length", "(", ")", "+", "stop", "else", ":", "slc_start", "=", "stop", "return", "slice", "(", "slc_start", ",", "slc_stop", ",", "slc_step", ")" ]
Swap slice indices Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
[ "Swap", "slice", "indices" ]
99052482d9334dd1f5598eb2d2fb4d5399a32291
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/BitLogic.py#L107-L136
3,691
SiLab-Bonn/basil
examples/lx9/host/lx9.py
Pixel._clear_strobes
def _clear_strobes(self): """ Resets the "enable" and "load" output streams to all 0. """ #reset some stuff self['SEQ']['GLOBAL_SHIFT_EN'].setall(False) self['SEQ']['GLOBAL_CTR_LD'].setall(False) self['SEQ']['GLOBAL_DAC_LD'].setall(False) self['SEQ']['PIXEL_SHIFT_EN'].setall(False) self['SEQ']['INJECTION'].setall(False)
python
def _clear_strobes(self): #reset some stuff self['SEQ']['GLOBAL_SHIFT_EN'].setall(False) self['SEQ']['GLOBAL_CTR_LD'].setall(False) self['SEQ']['GLOBAL_DAC_LD'].setall(False) self['SEQ']['PIXEL_SHIFT_EN'].setall(False) self['SEQ']['INJECTION'].setall(False)
[ "def", "_clear_strobes", "(", "self", ")", ":", "#reset some stuff", "self", "[", "'SEQ'", "]", "[", "'GLOBAL_SHIFT_EN'", "]", ".", "setall", "(", "False", ")", "self", "[", "'SEQ'", "]", "[", "'GLOBAL_CTR_LD'", "]", ".", "setall", "(", "False", ")", "self", "[", "'SEQ'", "]", "[", "'GLOBAL_DAC_LD'", "]", ".", "setall", "(", "False", ")", "self", "[", "'SEQ'", "]", "[", "'PIXEL_SHIFT_EN'", "]", ".", "setall", "(", "False", ")", "self", "[", "'SEQ'", "]", "[", "'INJECTION'", "]", ".", "setall", "(", "False", ")" ]
Resets the "enable" and "load" output streams to all 0.
[ "Resets", "the", "enable", "and", "load", "output", "streams", "to", "all", "0", "." ]
99052482d9334dd1f5598eb2d2fb4d5399a32291
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/examples/lx9/host/lx9.py#L91-L101
3,692
SiLab-Bonn/basil
basil/HL/spi.py
spi.set_data
def set_data(self, data, addr=0): ''' Sets data for outgoing stream ''' if self._mem_bytes < len(data): raise ValueError('Size of data (%d bytes) is too big for memory (%d bytes)' % (len(data), self._mem_bytes)) self._intf.write(self._conf['base_addr'] + self._spi_mem_offset + addr, data)
python
def set_data(self, data, addr=0): ''' Sets data for outgoing stream ''' if self._mem_bytes < len(data): raise ValueError('Size of data (%d bytes) is too big for memory (%d bytes)' % (len(data), self._mem_bytes)) self._intf.write(self._conf['base_addr'] + self._spi_mem_offset + addr, data)
[ "def", "set_data", "(", "self", ",", "data", ",", "addr", "=", "0", ")", ":", "if", "self", ".", "_mem_bytes", "<", "len", "(", "data", ")", ":", "raise", "ValueError", "(", "'Size of data (%d bytes) is too big for memory (%d bytes)'", "%", "(", "len", "(", "data", ")", ",", "self", ".", "_mem_bytes", ")", ")", "self", ".", "_intf", ".", "write", "(", "self", ".", "_conf", "[", "'base_addr'", "]", "+", "self", ".", "_spi_mem_offset", "+", "addr", ",", "data", ")" ]
Sets data for outgoing stream
[ "Sets", "data", "for", "outgoing", "stream" ]
99052482d9334dd1f5598eb2d2fb4d5399a32291
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/spi.py#L107-L113
3,693
SiLab-Bonn/basil
basil/HL/spi.py
spi.get_data
def get_data(self, size=None, addr=None): ''' Gets data for incoming stream ''' # readback memory offset if addr is None: addr = self._mem_bytes if size and self._mem_bytes < size: raise ValueError('Size is too big') if size is None: return self._intf.read(self._conf['base_addr'] + self._spi_mem_offset + addr, self._mem_bytes) else: return self._intf.read(self._conf['base_addr'] + self._spi_mem_offset + addr, size)
python
def get_data(self, size=None, addr=None): ''' Gets data for incoming stream ''' # readback memory offset if addr is None: addr = self._mem_bytes if size and self._mem_bytes < size: raise ValueError('Size is too big') if size is None: return self._intf.read(self._conf['base_addr'] + self._spi_mem_offset + addr, self._mem_bytes) else: return self._intf.read(self._conf['base_addr'] + self._spi_mem_offset + addr, size)
[ "def", "get_data", "(", "self", ",", "size", "=", "None", ",", "addr", "=", "None", ")", ":", "# readback memory offset", "if", "addr", "is", "None", ":", "addr", "=", "self", ".", "_mem_bytes", "if", "size", "and", "self", ".", "_mem_bytes", "<", "size", ":", "raise", "ValueError", "(", "'Size is too big'", ")", "if", "size", "is", "None", ":", "return", "self", ".", "_intf", ".", "read", "(", "self", ".", "_conf", "[", "'base_addr'", "]", "+", "self", ".", "_spi_mem_offset", "+", "addr", ",", "self", ".", "_mem_bytes", ")", "else", ":", "return", "self", ".", "_intf", ".", "read", "(", "self", ".", "_conf", "[", "'base_addr'", "]", "+", "self", ".", "_spi_mem_offset", "+", "addr", ",", "size", ")" ]
Gets data for incoming stream
[ "Gets", "data", "for", "incoming", "stream" ]
99052482d9334dd1f5598eb2d2fb4d5399a32291
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/spi.py#L116-L130
3,694
SiLab-Bonn/basil
basil/HL/GPAC.py
GPAC.read_eeprom_calibration
def read_eeprom_calibration(self): # use default values for temperature, EEPROM values are usually not calibrated and random '''Reading EEPROM calibration for sources and regulators ''' header = self.get_format() if header == self.HEADER_GPAC: data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_GPAC_FORMAT)) for idx, channel in enumerate(self._ch_cal.iterkeys()): ch_data = data[idx * calcsize(self.CAL_DATA_CH_GPAC_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_GPAC_FORMAT)] values = unpack_from(self.CAL_DATA_CH_GPAC_FORMAT, ch_data) self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip() self._ch_cal[channel]['default'] = values[1] self._ch_cal[channel]['min'] = values[2] self._ch_cal[channel]['max'] = values[3] self._ch_cal[channel]['ADCI']['gain'] = -values[4] # fix gain sign self._ch_cal[channel]['ADCI']['offset'] = values[5] self._ch_cal[channel]['ADCV']['gain'] = values[6] self._ch_cal[channel]['ADCV']['offset'] = values[7] self._ch_cal[channel]['DAC']['gain'] = values[8] self._ch_cal[channel]['DAC']['offset'] = values[9] self._ch_cal[channel]['limit'] = values[10] else: raise ValueError('EEPROM data format not supported (header: %s)' % header)
python
def read_eeprom_calibration(self): # use default values for temperature, EEPROM values are usually not calibrated and random '''Reading EEPROM calibration for sources and regulators ''' header = self.get_format() if header == self.HEADER_GPAC: data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_GPAC_FORMAT)) for idx, channel in enumerate(self._ch_cal.iterkeys()): ch_data = data[idx * calcsize(self.CAL_DATA_CH_GPAC_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_GPAC_FORMAT)] values = unpack_from(self.CAL_DATA_CH_GPAC_FORMAT, ch_data) self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip() self._ch_cal[channel]['default'] = values[1] self._ch_cal[channel]['min'] = values[2] self._ch_cal[channel]['max'] = values[3] self._ch_cal[channel]['ADCI']['gain'] = -values[4] # fix gain sign self._ch_cal[channel]['ADCI']['offset'] = values[5] self._ch_cal[channel]['ADCV']['gain'] = values[6] self._ch_cal[channel]['ADCV']['offset'] = values[7] self._ch_cal[channel]['DAC']['gain'] = values[8] self._ch_cal[channel]['DAC']['offset'] = values[9] self._ch_cal[channel]['limit'] = values[10] else: raise ValueError('EEPROM data format not supported (header: %s)' % header)
[ "def", "read_eeprom_calibration", "(", "self", ")", ":", "# use default values for temperature, EEPROM values are usually not calibrated and random", "header", "=", "self", ".", "get_format", "(", ")", "if", "header", "==", "self", ".", "HEADER_GPAC", ":", "data", "=", "self", ".", "_read_eeprom", "(", "self", ".", "CAL_DATA_ADDR", ",", "size", "=", "calcsize", "(", "self", ".", "CAL_DATA_GPAC_FORMAT", ")", ")", "for", "idx", ",", "channel", "in", "enumerate", "(", "self", ".", "_ch_cal", ".", "iterkeys", "(", ")", ")", ":", "ch_data", "=", "data", "[", "idx", "*", "calcsize", "(", "self", ".", "CAL_DATA_CH_GPAC_FORMAT", ")", ":", "(", "idx", "+", "1", ")", "*", "calcsize", "(", "self", ".", "CAL_DATA_CH_GPAC_FORMAT", ")", "]", "values", "=", "unpack_from", "(", "self", ".", "CAL_DATA_CH_GPAC_FORMAT", ",", "ch_data", ")", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'name'", "]", "=", "\"\"", ".", "join", "(", "[", "c", "for", "c", "in", "values", "[", "0", "]", "if", "(", "c", "in", "string", ".", "printable", ")", "]", ")", "# values[0].strip()", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'default'", "]", "=", "values", "[", "1", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'min'", "]", "=", "values", "[", "2", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'max'", "]", "=", "values", "[", "3", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'ADCI'", "]", "[", "'gain'", "]", "=", "-", "values", "[", "4", "]", "# fix gain sign", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'ADCI'", "]", "[", "'offset'", "]", "=", "values", "[", "5", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'ADCV'", "]", "[", "'gain'", "]", "=", "values", "[", "6", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'ADCV'", "]", "[", "'offset'", "]", "=", "values", "[", "7", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'DAC'", "]", "[", "'gain'", "]", "=", "values", "[", "8", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'DAC'", "]", "[", "'offset'", "]", "=", "values", "[", "9", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'limit'", "]", "=", "values", "[", "10", "]", "else", ":", "raise", "ValueError", "(", "'EEPROM data format not supported (header: %s)'", "%", "header", ")" ]
Reading EEPROM calibration for sources and regulators
[ "Reading", "EEPROM", "calibration", "for", "sources", "and", "regulators" ]
99052482d9334dd1f5598eb2d2fb4d5399a32291
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L716-L737
3,695
SiLab-Bonn/basil
basil/HL/GPAC.py
GPAC.get_over_current
def get_over_current(self, channel): '''Reading over current status of power channel ''' try: bit = self._ch_map[channel]['GPIOOC']['bit'] except KeyError: raise ValueError('get_over_current() not supported for channel %s' % channel) return not self._get_power_gpio_value(bit)
python
def get_over_current(self, channel): '''Reading over current status of power channel ''' try: bit = self._ch_map[channel]['GPIOOC']['bit'] except KeyError: raise ValueError('get_over_current() not supported for channel %s' % channel) return not self._get_power_gpio_value(bit)
[ "def", "get_over_current", "(", "self", ",", "channel", ")", ":", "try", ":", "bit", "=", "self", ".", "_ch_map", "[", "channel", "]", "[", "'GPIOOC'", "]", "[", "'bit'", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'get_over_current() not supported for channel %s'", "%", "channel", ")", "return", "not", "self", ".", "_get_power_gpio_value", "(", "bit", ")" ]
Reading over current status of power channel
[ "Reading", "over", "current", "status", "of", "power", "channel" ]
99052482d9334dd1f5598eb2d2fb4d5399a32291
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L830-L837
3,696
SiLab-Bonn/basil
basil/HL/GPAC.py
GPAC.set_current
def set_current(self, channel, value, unit='A'): '''Setting current of current source ''' dac_offset = self._ch_cal[channel]['DAC']['offset'] dac_gain = self._ch_cal[channel]['DAC']['gain'] if unit == 'raw': value = value elif unit == 'A': value = int((-value * 1000000 - dac_offset) / dac_gain) # fix sign of output elif unit == 'mA': value = int((-value * 1000 - dac_offset) / dac_gain) # fix sign of output elif unit == 'uA': value = int((-value - dac_offset) / dac_gain) # fix sign of output else: raise TypeError("Invalid unit type.") self._set_dac_value(channel=channel, value=value)
python
def set_current(self, channel, value, unit='A'): '''Setting current of current source ''' dac_offset = self._ch_cal[channel]['DAC']['offset'] dac_gain = self._ch_cal[channel]['DAC']['gain'] if unit == 'raw': value = value elif unit == 'A': value = int((-value * 1000000 - dac_offset) / dac_gain) # fix sign of output elif unit == 'mA': value = int((-value * 1000 - dac_offset) / dac_gain) # fix sign of output elif unit == 'uA': value = int((-value - dac_offset) / dac_gain) # fix sign of output else: raise TypeError("Invalid unit type.") self._set_dac_value(channel=channel, value=value)
[ "def", "set_current", "(", "self", ",", "channel", ",", "value", ",", "unit", "=", "'A'", ")", ":", "dac_offset", "=", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'DAC'", "]", "[", "'offset'", "]", "dac_gain", "=", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'DAC'", "]", "[", "'gain'", "]", "if", "unit", "==", "'raw'", ":", "value", "=", "value", "elif", "unit", "==", "'A'", ":", "value", "=", "int", "(", "(", "-", "value", "*", "1000000", "-", "dac_offset", ")", "/", "dac_gain", ")", "# fix sign of output", "elif", "unit", "==", "'mA'", ":", "value", "=", "int", "(", "(", "-", "value", "*", "1000", "-", "dac_offset", ")", "/", "dac_gain", ")", "# fix sign of output", "elif", "unit", "==", "'uA'", ":", "value", "=", "int", "(", "(", "-", "value", "-", "dac_offset", ")", "/", "dac_gain", ")", "# fix sign of output", "else", ":", "raise", "TypeError", "(", "\"Invalid unit type.\"", ")", "self", ".", "_set_dac_value", "(", "channel", "=", "channel", ",", "value", "=", "value", ")" ]
Setting current of current source
[ "Setting", "current", "of", "current", "source" ]
99052482d9334dd1f5598eb2d2fb4d5399a32291
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L858-L874
3,697
SiLab-Bonn/basil
basil/HL/bram_fifo.py
bram_fifo.get_data
def get_data(self): ''' Reading data in BRAM. Returns ------- array : numpy.ndarray Array of unsigned integers (32 bit). ''' fifo_int_size_1 = self.FIFO_INT_SIZE fifo_int_size_2 = self.FIFO_INT_SIZE if fifo_int_size_1 > fifo_int_size_2: fifo_int_size = fifo_int_size_2 # use smaller chunk logger.warning("Reading wrong FIFO size. Expected: %d <= %d" % (fifo_int_size_1, fifo_int_size_2)) else: fifo_int_size = fifo_int_size_1 # use smaller chunk return np.frombuffer(self._intf.read(self._conf['base_data_addr'], size=4 * fifo_int_size), dtype=np.dtype('<u4'))
python
def get_data(self): ''' Reading data in BRAM. Returns ------- array : numpy.ndarray Array of unsigned integers (32 bit). ''' fifo_int_size_1 = self.FIFO_INT_SIZE fifo_int_size_2 = self.FIFO_INT_SIZE if fifo_int_size_1 > fifo_int_size_2: fifo_int_size = fifo_int_size_2 # use smaller chunk logger.warning("Reading wrong FIFO size. Expected: %d <= %d" % (fifo_int_size_1, fifo_int_size_2)) else: fifo_int_size = fifo_int_size_1 # use smaller chunk return np.frombuffer(self._intf.read(self._conf['base_data_addr'], size=4 * fifo_int_size), dtype=np.dtype('<u4'))
[ "def", "get_data", "(", "self", ")", ":", "fifo_int_size_1", "=", "self", ".", "FIFO_INT_SIZE", "fifo_int_size_2", "=", "self", ".", "FIFO_INT_SIZE", "if", "fifo_int_size_1", ">", "fifo_int_size_2", ":", "fifo_int_size", "=", "fifo_int_size_2", "# use smaller chunk", "logger", ".", "warning", "(", "\"Reading wrong FIFO size. Expected: %d <= %d\"", "%", "(", "fifo_int_size_1", ",", "fifo_int_size_2", ")", ")", "else", ":", "fifo_int_size", "=", "fifo_int_size_1", "# use smaller chunk", "return", "np", ".", "frombuffer", "(", "self", ".", "_intf", ".", "read", "(", "self", ".", "_conf", "[", "'base_data_addr'", "]", ",", "size", "=", "4", "*", "fifo_int_size", ")", ",", "dtype", "=", "np", ".", "dtype", "(", "'<u4'", ")", ")" ]
Reading data in BRAM. Returns ------- array : numpy.ndarray Array of unsigned integers (32 bit).
[ "Reading", "data", "in", "BRAM", "." ]
99052482d9334dd1f5598eb2d2fb4d5399a32291
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/bram_fifo.py#L60-L75
3,698
SiLab-Bonn/basil
basil/HL/FEI4AdapterCard.py
Fei4Dcs.set_default
def set_default(self, channels=None): '''Setting default voltage ''' if not channels: channels = self._ch_cal.keys() for channel in channels: self.set_voltage(channel, self._ch_cal[channel]['default'], unit='V')
python
def set_default(self, channels=None): '''Setting default voltage ''' if not channels: channels = self._ch_cal.keys() for channel in channels: self.set_voltage(channel, self._ch_cal[channel]['default'], unit='V')
[ "def", "set_default", "(", "self", ",", "channels", "=", "None", ")", ":", "if", "not", "channels", ":", "channels", "=", "self", ".", "_ch_cal", ".", "keys", "(", ")", "for", "channel", "in", "channels", ":", "self", ".", "set_voltage", "(", "channel", ",", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'default'", "]", ",", "unit", "=", "'V'", ")" ]
Setting default voltage
[ "Setting", "default", "voltage" ]
99052482d9334dd1f5598eb2d2fb4d5399a32291
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/FEI4AdapterCard.py#L140-L146
3,699
SiLab-Bonn/basil
basil/utils/sim/Protocol.py
PickleInterface.send
def send(self, obj): """Prepend a 4-byte length to the string""" assert isinstance(obj, ProtocolBase) string = pickle.dumps(obj) length = len(string) self.sock.sendall(struct.pack("<I", length) + string)
python
def send(self, obj): assert isinstance(obj, ProtocolBase) string = pickle.dumps(obj) length = len(string) self.sock.sendall(struct.pack("<I", length) + string)
[ "def", "send", "(", "self", ",", "obj", ")", ":", "assert", "isinstance", "(", "obj", ",", "ProtocolBase", ")", "string", "=", "pickle", ".", "dumps", "(", "obj", ")", "length", "=", "len", "(", "string", ")", "self", ".", "sock", ".", "sendall", "(", "struct", ".", "pack", "(", "\"<I\"", ",", "length", ")", "+", "string", ")" ]
Prepend a 4-byte length to the string
[ "Prepend", "a", "4", "-", "byte", "length", "to", "the", "string" ]
99052482d9334dd1f5598eb2d2fb4d5399a32291
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/sim/Protocol.py#L57-L62