_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q3600
|
Network.clip
|
train
|
def clip(self, lower=None, upper=None):
'''
Trim values at input thresholds using pandas function
'''
df = self.export_df()
|
python
|
{
"resource": ""
}
|
q3601
|
Network.random_sample
|
train
|
def random_sample(self, num_samples, df=None, replace=False, weights=None, random_state=100, axis='row'):
'''
Return random sample of matrix.
'''
if df is None:
df = self.dat_to_df()
if axis == 'row':
axis = 0
if axis == 'col':
|
python
|
{
"resource": ""
}
|
q3602
|
Network.load_gene_exp_to_df
|
train
|
def load_gene_exp_to_df(inst_path):
'''
Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe
'''
import pandas as pd
from scipy import io
from scipy import sparse
from ast import literal_eval as make_tuple
# matrix
Matrix = io.mmread( inst_path + 'matrix.mtx')
mat = Matrix.todense()
# genes
filename = inst_path + 'genes.tsv'
f = open(filename, 'r')
lines = f.readlines()
f.close()
# # add unique id to all genes
# genes = []
# unique_id = 0
# for inst_line in lines:
# inst_line = inst_line.strip().split()
# if len(inst_line) > 1:
# inst_gene = inst_line[1]
# else:
# inst_gene = inst_line[0]
# genes.append(inst_gene + '_' + str(unique_id))
# unique_id = unique_id + 1
# add unique id only to duplicate genes
ini_genes = []
for inst_line in lines:
inst_line = inst_line.strip().split()
if len(inst_line) > 1:
inst_gene = inst_line[1]
else:
inst_gene = inst_line[0]
ini_genes.append(inst_gene)
gene_name_count = pd.Series(ini_genes).value_counts()
duplicate_genes = gene_name_count[gene_name_count > 1].index.tolist()
dup_index = {}
genes = []
for inst_row in ini_genes:
# add index to non-unique genes
if inst_row in duplicate_genes:
# calc_non-unque index
if inst_row not in dup_index:
|
python
|
{
"resource": ""
}
|
q3603
|
Network.sim_same_and_diff_category_samples
|
train
|
def sim_same_and_diff_category_samples(self, df, cat_index=1, dist_type='cosine',
equal_var=False, plot_roc=True,
precalc_dist=False, calc_roc=True):
'''
Calculate the similarity of samples from the same and different categories. The
cat_index gives the index of the category, where 1 in the first category
'''
cols = df.columns.tolist()
if type(precalc_dist) == bool:
# compute distnace between rows (transpose to get cols as rows)
dist_arr = 1 - pdist(df.transpose(), metric=dist_type)
else:
dist_arr = precalc_dist
# generate sample names with categories
sample_combos = list(combinations(range(df.shape[1]),2))
sample_names = [str(ind) + '_same' if cols[x[0]][cat_index] == cols[x[1]][cat_index] else str(ind) + '_different' for ind, x in enumerate(sample_combos)]
ser_dist = pd.Series(data=dist_arr, index=sample_names)
# find same-cat sample comparisons
same_cat = [x for x in sample_names if x.split('_')[1] == 'same']
# find diff-cat sample comparisons
diff_cat = [x for x in sample_names if x.split('_')[1] == 'different']
# make series of same and diff category sample comparisons
ser_same = ser_dist[same_cat]
ser_same.name = 'Same Category'
ser_diff = ser_dist[diff_cat]
ser_diff.name = 'Different Category'
sim_dict = {}
roc_data = {}
sim_data = {}
sim_dict['same'] = ser_same
sim_dict['diff'] = ser_diff
pval_dict = {}
ttest_stat, pval_dict['ttest'] = ttest_ind(ser_diff, ser_same, equal_var=equal_var)
|
python
|
{
"resource": ""
}
|
q3604
|
Network.generate_signatures
|
train
|
def generate_signatures(self, df_ini, category_level, pval_cutoff=0.05,
num_top_dims=False, verbose=True, equal_var=False):
''' Generate signatures for column categories '''
df_t = df_ini.transpose()
# remove columns with constant values
df_t = df_t.loc[:, (df_t != df_t.iloc[0]).any()]
df = self.row_tuple_to_multiindex(df_t)
cell_types = sorted(list(set(df.index.get_level_values(category_level).tolist())))
keep_genes = []
keep_genes_dict = {}
gene_pval_dict = {}
all_fold_info = {}
for inst_ct in cell_types:
inst_ct_mat = df.xs(key=inst_ct, level=category_level)
inst_other_mat = df.drop(inst_ct, level=category_level)
# save mean values and fold change
fold_info = {}
fold_info['cluster_mean'] = inst_ct_mat.mean()
fold_info['other_mean'] = inst_other_mat.mean()
fold_info['log2_fold'] = fold_info['cluster_mean']/fold_info['other_mean']
fold_info['log2_fold'] = fold_info['log2_fold'].apply(np.log2)
all_fold_info[inst_ct] = fold_info
inst_stats, inst_pvals = ttest_ind(inst_ct_mat, inst_other_mat, axis=0, equal_var=equal_var)
ser_pval = pd.Series(data=inst_pvals, index=df.columns.tolist()).sort_values()
if num_top_dims == False:
ser_pval_keep = ser_pval[ser_pval < pval_cutoff]
else:
ser_pval_keep = ser_pval[:num_top_dims]
gene_pval_dict[inst_ct]
|
python
|
{
"resource": ""
}
|
q3605
|
Network.predict_cats_from_sigs
|
train
|
def predict_cats_from_sigs(self, df_data_ini, df_sig_ini, dist_type='cosine', predict_level='Predict Category',
truth_level=1, unknown_thresh=-1):
''' Predict category using signature '''
keep_rows = df_sig_ini.index.tolist()
data_rows = df_data_ini.index.tolist()
common_rows = list(set(data_rows).intersection(keep_rows))
df_data = deepcopy(df_data_ini.ix[common_rows])
df_sig = deepcopy(df_sig_ini.ix[common_rows])
# calculate sim_mat of df_data and df_sig
cell_types = df_sig.columns.tolist()
barcodes = df_data.columns.tolist()
sim_mat = 1 - pairwise_distances(df_sig.transpose(), df_data.transpose(), metric=dist_type)
df_sim = pd.DataFrame(data=sim_mat, index=cell_types, columns=barcodes).transpose()
# get the top column value (most similar signature)
df_sim_top = df_sim.idxmax(axis=1)
# get the maximum similarity of a cell to a cell type definition
max_sim = df_sim.max(axis=1)
unknown_cells = max_sim[max_sim < unknown_thresh].index.tolist()
# assign unknown cells (need category of same name)
df_sim_top[unknown_cells] = 'Unknown'
# add predicted category name to top list
top_list = df_sim_top.get_values()
top_list = [ predict_level + ': ' + x[0] if type(x) is tuple else predict_level + ': ' + x for x in top_list]
# add cell type category to input data
df_cat = deepcopy(df_data)
|
python
|
{
"resource": ""
}
|
q3606
|
Network.confusion_matrix_and_correct_series
|
train
|
def confusion_matrix_and_correct_series(self, y_info):
''' Generate confusion matrix from y_info '''
a = deepcopy(y_info['true'])
true_count = dict((i, a.count(i)) for i in set(a))
a = deepcopy(y_info['pred'])
pred_count = dict((i, a.count(i)) for i in set(a))
sorted_cats = sorted(list(set(y_info['true'] + y_info['pred'])))
conf_mat = confusion_matrix(y_info['true'], y_info['pred'], sorted_cats)
df_conf = pd.DataFrame(conf_mat, index=sorted_cats, columns=sorted_cats)
total_correct = np.trace(df_conf)
total_pred = df_conf.sum().sum()
fraction_correct = total_correct/float(total_pred)
# calculate ser_correct
correct_list = []
cat_counts = df_conf.sum(axis=1)
all_cols = df_conf.columns.tolist()
|
python
|
{
"resource": ""
}
|
q3607
|
load_data_to_net
|
train
|
def load_data_to_net(net, inst_net):
''' load data into nodes and mat, also convert mat to numpy array'''
net.dat['nodes'] = inst_net['nodes']
|
python
|
{
"resource": ""
}
|
q3608
|
export_net_json
|
train
|
def export_net_json(net, net_type, indent='no-indent'):
''' export json string of dat '''
import json
from copy import deepcopy
if net_type == 'dat':
exp_dict = deepcopy(net.dat)
if type(exp_dict['mat']) is not list:
exp_dict['mat'] = exp_dict['mat'].tolist()
if 'mat_orig' in exp_dict:
exp_dict['mat_orig'] = exp_dict['mat_orig'].tolist()
elif net_type == 'viz':
exp_dict = net.viz
|
python
|
{
"resource": ""
}
|
q3609
|
install_npm
|
train
|
def install_npm(path=None, build_dir=None, source_dir=None, build_cmd='build', force=False, npm=None):
"""Return a Command for managing an npm installation.
Note: The command is skipped if the `--skip-npm` flag is used.
Parameters
----------
path: str, optional
The base path of the node package. Defaults to the repo root.
build_dir: str, optional
The target build directory. If this and source_dir are given,
the JavaScript will only be build if necessary.
source_dir: str, optional
The source code directory.
build_cmd: str, optional
The npm command to build assets to the build_dir.
npm: str or list, optional.
The npm executable name, or a tuple of ['node', executable].
"""
class NPM(BaseCommand):
description = 'install package.json dependencies using npm'
def run(self):
if skip_npm:
log.info('Skipping npm-installation')
return
node_package = path or HERE
node_modules = pjoin(node_package, 'node_modules')
is_yarn = os.path.exists(pjoin(node_package, 'yarn.lock'))
npm_cmd = npm
if npm is None:
if is_yarn:
npm_cmd = ['yarn']
else:
npm_cmd = ['npm']
if not which(npm_cmd[0]):
log.error("`{0}` unavailable. If you're running this command "
"using sudo,
|
python
|
{
"resource": ""
}
|
q3610
|
_glob_pjoin
|
train
|
def _glob_pjoin(*parts):
"""Join paths for glob processing"""
if parts[0] in ('.', ''):
parts =
|
python
|
{
"resource": ""
}
|
q3611
|
_get_data_files
|
train
|
def _get_data_files(data_specs, existing, top=HERE):
"""Expand data file specs into valid data files metadata.
Parameters
----------
data_specs: list of tuples
See [create_cmdclass] for description.
existing: list of tuples
The existing distrubution data_files metadata.
Returns
-------
A valid list of data_files items.
"""
# Extract the existing data files into a staging object.
file_data = defaultdict(list)
for (path, files) in existing or []:
file_data[path] = files
# Extract the files and assign them to the proper data
|
python
|
{
"resource": ""
}
|
q3612
|
_get_files
|
train
|
def _get_files(file_patterns, top=HERE):
"""Expand file patterns to a list of paths.
Parameters
-----------
file_patterns: list or str
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the top directory or
absolute paths.
top: str
the directory to consider for data files
Note:
Files in `node_modules` are ignored.
"""
if not isinstance(file_patterns, (list, tuple)):
file_patterns = [file_patterns]
for i, p in enumerate(file_patterns):
if os.path.isabs(p):
file_patterns[i] = os.path.relpath(p, top)
matchers = [_compile_pattern(p) for p in file_patterns]
files = set()
|
python
|
{
"resource": ""
}
|
q3613
|
_get_package_data
|
train
|
def _get_package_data(root, file_patterns=None):
"""Expand file patterns to a list of `package_data` paths.
Parameters
-----------
root: str
The relative path to the package root from `HERE`.
file_patterns: list or str, optional
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should
|
python
|
{
"resource": ""
}
|
q3614
|
df_filter_row_sum
|
train
|
def df_filter_row_sum(df, threshold, take_abs=True):
''' filter rows in matrix at some threshold
and remove columns that have a sum below this threshold '''
from copy import deepcopy
from .__init__ import Network
net = Network()
if take_abs is True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
ini_rows = df_copy.index.values.tolist()
df_copy = df_copy.transpose()
tmp_sum = df_copy.sum(axis=0)
tmp_sum = tmp_sum.abs()
tmp_sum.sort_values(inplace=True, ascending=False)
tmp_sum = tmp_sum[tmp_sum > threshold]
keep_rows = sorted(tmp_sum.index.values.tolist())
if len(keep_rows) < len(ini_rows):
|
python
|
{
"resource": ""
}
|
q3615
|
df_filter_col_sum
|
train
|
def df_filter_col_sum(df, threshold, take_abs=True):
''' filter columns in matrix at some threshold
and remove rows that have all zero values '''
from copy import deepcopy
from .__init__ import Network
net = Network()
if take_abs is True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > threshold]
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > 0]
if take_abs is True:
inst_rows = df_copy.index.tolist()
inst_cols = df_copy.columns.tolist()
|
python
|
{
"resource": ""
}
|
q3616
|
infer_flags
|
train
|
def infer_flags(bytecode, is_async=False):
"""Infer the proper flags for a bytecode based on the instructions.
"""
flags = CompilerFlags(0)
if not isinstance(bytecode, (_bytecode.Bytecode,
_bytecode.ConcreteBytecode,
_bytecode.ControlFlowGraph)):
msg = ('Expected a Bytecode, ConcreteBytecode or ControlFlowGraph '
'instance not %s')
raise ValueError(msg % bytecode)
instructions = (bytecode.get_instructions()
if isinstance(bytecode, _bytecode.ControlFlowGraph) else
bytecode)
instr_names = {i.name for i in instructions
if not isinstance(i, (_bytecode.SetLineno,
_bytecode.Label))}
if not (instr_names & {'STORE_NAME', 'LOAD_NAME', 'DELETE_NAME'}):
flags |= CompilerFlags.OPTIMIZED
flags |= bytecode.flags & (CompilerFlags.NEWLOCALS |
CompilerFlags.VARARGS |
CompilerFlags.VARKEYWORDS |
CompilerFlags.NESTED)
if instr_names & {'YIELD_VALUE', 'YIELD_FROM'}:
if not is_async and not bytecode.flags & CompilerFlags.ASYNC_GENERATOR:
flags |= CompilerFlags.GENERATOR
else:
flags |= CompilerFlags.ASYNC_GENERATOR
if
|
python
|
{
"resource": ""
}
|
q3617
|
ControlFlowGraph.to_bytecode
|
train
|
def to_bytecode(self):
"""Convert to Bytecode."""
used_blocks = set()
for block in self:
target_block = block.get_jump()
if target_block is not None:
used_blocks.add(id(target_block))
labels = {}
jumps = []
instructions = []
for block in self:
if id(block) in used_blocks:
new_label = Label()
labels[id(block)] = new_label
instructions.append(new_label)
for instr in block:
# don't copy SetLineno objects
if isinstance(instr, (Instr, ConcreteInstr)):
instr = instr.copy()
if isinstance(instr.arg, BasicBlock):
|
python
|
{
"resource": ""
}
|
q3618
|
ControlFlowGraph.to_code
|
train
|
def to_code(self, stacksize=None):
"""Convert to code."""
if stacksize is None:
stacksize = self.compute_stacksize()
|
python
|
{
"resource": ""
}
|
q3619
|
Instr.set
|
train
|
def set(self, name, arg=UNSET):
"""Modify the instruction in-place.
Replace name
|
python
|
{
"resource": ""
}
|
q3620
|
LinSpace
|
train
|
def LinSpace(start, stop, num):
"""
Linspace op.
"""
|
python
|
{
"resource": ""
}
|
q3621
|
Range
|
train
|
def Range(start, limit, delta):
"""
Range op.
"""
|
python
|
{
"resource": ""
}
|
q3622
|
RandomUniformInt
|
train
|
def RandomUniformInt(shape, minval, maxval, seed):
"""
Random uniform int op.
"""
if seed:
|
python
|
{
"resource": ""
}
|
q3623
|
Rank
|
train
|
def Rank(a):
"""
Rank op.
"""
|
python
|
{
"resource": ""
}
|
q3624
|
Squeeze
|
train
|
def Squeeze(a, squeeze_dims):
"""
Squeeze op, i.e. removes singular axes.
"""
if not squeeze_dims:
squeeze_dims =
|
python
|
{
"resource": ""
}
|
q3625
|
ExpandDims
|
train
|
def ExpandDims(a, dim):
"""
Expand dim op, i.e. add singular axis at dim.
"""
shape = list(a.shape)
|
python
|
{
"resource": ""
}
|
q3626
|
Slice
|
train
|
def Slice(a, begin, size):
"""
Slicing op.
|
python
|
{
"resource": ""
}
|
q3627
|
Split
|
train
|
def Split(axis, a, n):
"""
Split op with n splits.
"""
|
python
|
{
"resource": ""
}
|
q3628
|
SplitV
|
train
|
def SplitV(a, splits, axis):
"""
Split op with multiple split sizes.
"""
|
python
|
{
"resource": ""
}
|
q3629
|
ConcatV2
|
train
|
def ConcatV2(inputs):
"""
Concat op.
"""
axis = inputs.pop()
|
python
|
{
"resource": ""
}
|
q3630
|
Unpack
|
train
|
def Unpack(a, num, axis):
"""
Unpack op.
"""
|
python
|
{
"resource": ""
}
|
q3631
|
ReverseSequence
|
train
|
def ReverseSequence(a, seq_lengths, seq_dim, batch_dim):
"""
Sequential reverse op.
"""
r = np.copy(a)
invidxs = (len(r.shape) - 1) * [slice(None)]
if seq_dim < batch_dim:
invidxs[seq_dim] = slice(None, None, -1)
else:
invidxs[seq_dim - 1] = slice(None, None, -1)
_invidxs = tuple(invidxs)
selidxs = len(r.shape) * [slice(None)]
|
python
|
{
"resource": ""
}
|
q3632
|
ReverseV2
|
train
|
def ReverseV2(a, axes):
"""
Reverse op.
"""
idxs = tuple(slice(None, None, 2 * int(i not in axes) - 1)
|
python
|
{
"resource": ""
}
|
q3633
|
Betainc
|
train
|
def Betainc(a, b, x):
"""
Complemented, incomplete gamma op.
"""
|
python
|
{
"resource": ""
}
|
q3634
|
Diag
|
train
|
def Diag(a):
"""
Diag op.
"""
r = np.zeros(2 * a.shape, dtype=a.dtype)
for idx, v in
|
python
|
{
"resource": ""
}
|
q3635
|
MatrixDiagPart
|
train
|
def MatrixDiagPart(a):
"""
Batched diag op that returns only the diagonal elements.
"""
r = np.zeros(a.shape[:-2] + (min(a.shape[-2:]),))
for coord in
|
python
|
{
"resource": ""
}
|
q3636
|
MatMul
|
train
|
def MatMul(a, b, transpose_a, transpose_b):
"""
Matrix multiplication op.
"""
return np.dot(a if
|
python
|
{
"resource": ""
}
|
q3637
|
MatrixInverse
|
train
|
def MatrixInverse(a, adj):
"""
Matrix inversion op.
"""
|
python
|
{
"resource": ""
}
|
q3638
|
MatrixSolve
|
train
|
def MatrixSolve(a, rhs, adj):
"""
Matrix solve op.
"""
|
python
|
{
"resource": ""
}
|
q3639
|
MatrixTriangularSolve
|
train
|
def MatrixTriangularSolve(a, rhs, lower, adj):
"""
Matrix triangular solve op.
"""
trans = 0 if not adj else 2
r = np.empty(rhs.shape).astype(a.dtype)
|
python
|
{
"resource": ""
}
|
q3640
|
MatrixSolveLs
|
train
|
def MatrixSolveLs(a, rhs, l2_reg):
"""
Matrix least-squares solve op.
"""
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos =
|
python
|
{
"resource": ""
}
|
q3641
|
SelfAdjointEig
|
train
|
def SelfAdjointEig(a):
"""
Eigen decomp op.
"""
shape = list(a.shape)
shape[-2] += 1
|
python
|
{
"resource": ""
}
|
q3642
|
Svd
|
train
|
def Svd(a, uv, full):
"""
Single value decomp op.
"""
u, s, v
|
python
|
{
"resource": ""
}
|
q3643
|
Sum
|
train
|
def Sum(a, axis, keep_dims):
"""
Sum reduction op.
"""
return np.sum(a, axis=axis if not
|
python
|
{
"resource": ""
}
|
q3644
|
Prod
|
train
|
def Prod(a, axis, keep_dims):
"""
Prod reduction op.
"""
return np.prod(a, axis=axis if not
|
python
|
{
"resource": ""
}
|
q3645
|
Min
|
train
|
def Min(a, axis, keep_dims):
"""
Min reduction op.
"""
return np.amin(a, axis=axis if not
|
python
|
{
"resource": ""
}
|
q3646
|
Max
|
train
|
def Max(a, axis, keep_dims):
"""
Max reduction op.
"""
return np.amax(a, axis=axis if not
|
python
|
{
"resource": ""
}
|
q3647
|
Mean
|
train
|
def Mean(a, axis, keep_dims):
"""
Mean reduction op.
"""
return np.mean(a, axis=axis if not
|
python
|
{
"resource": ""
}
|
q3648
|
All
|
train
|
def All(a, axis, keep_dims):
"""
All reduction op.
"""
return np.all(a, axis=axis if not
|
python
|
{
"resource": ""
}
|
q3649
|
Any
|
train
|
def Any(a, axis, keep_dims):
"""
Any reduction op.
"""
return np.any(a, axis=axis if not
|
python
|
{
"resource": ""
}
|
q3650
|
SegmentSum
|
train
|
def SegmentSum(a, ids, *args):
"""
Segmented sum op.
"""
|
python
|
{
"resource": ""
}
|
q3651
|
SegmentProd
|
train
|
def SegmentProd(a, ids):
"""
Segmented prod op.
"""
|
python
|
{
"resource": ""
}
|
q3652
|
SegmentMin
|
train
|
def SegmentMin(a, ids):
"""
Segmented min op.
"""
func = lambda idxs:
|
python
|
{
"resource": ""
}
|
q3653
|
SegmentMax
|
train
|
def SegmentMax(a, ids):
"""
Segmented max op.
"""
func = lambda idxs:
|
python
|
{
"resource": ""
}
|
q3654
|
SegmentMean
|
train
|
def SegmentMean(a, ids):
"""
Segmented mean op.
"""
func = lambda
|
python
|
{
"resource": ""
}
|
q3655
|
ListDiff
|
train
|
def ListDiff(a, b):
"""
List diff op.
"""
d = np.setdiff1d(a, b)
|
python
|
{
"resource": ""
}
|
q3656
|
Unique
|
train
|
def Unique(a, t):
"""
Unique op.
"""
_, idxs, inv = np.unique(a,
|
python
|
{
"resource": ""
}
|
q3657
|
Elu
|
train
|
def Elu(a):
"""
Elu op.
"""
|
python
|
{
"resource": ""
}
|
q3658
|
Softsign
|
train
|
def Softsign(a):
"""
Softsign op.
"""
|
python
|
{
"resource": ""
}
|
q3659
|
Softmax
|
train
|
def Softmax(a):
"""
Softmax op.
"""
e = np.exp(a)
return
|
python
|
{
"resource": ""
}
|
q3660
|
Conv1D
|
train
|
def Conv1D(a, f, strides, padding, data_format):
"""
1D conv op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _conv_patches(a, f, 3 * [strides], padding.decode("ascii"))
conv
|
python
|
{
"resource": ""
}
|
q3661
|
Conv3D
|
train
|
def Conv3D(a, f, strides, padding):
"""
3D conv op.
"""
patches = _conv_patches(a, f, strides,
|
python
|
{
"resource": ""
}
|
q3662
|
AvgPool
|
train
|
def AvgPool(a, k, strides, padding, data_format):
"""
Average pooling op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool
|
python
|
{
"resource": ""
}
|
q3663
|
MaxPool
|
train
|
def MaxPool(a, k, strides, padding, data_format):
"""
Maximum pooling op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool
|
python
|
{
"resource": ""
}
|
q3664
|
AvgPool3D
|
train
|
def AvgPool3D(a, k, strides, padding):
"""
Average 3D pooling op.
"""
patches = _pool_patches(a,
|
python
|
{
"resource": ""
}
|
q3665
|
MaxPool3D
|
train
|
def MaxPool3D(a, k, strides, padding):
"""
Maximum 3D pooling op.
"""
|
python
|
{
"resource": ""
}
|
q3666
|
Matrix_Keypad.pressed_keys
|
train
|
def pressed_keys(self):
"""An array containing all detected keys that are pressed from the initalized
list-of-lists passed in during creation"""
# make a list of all the keys that are detected
pressed = []
# set all pins pins to be inputs w/pullups
for pin in self.row_pins+self.col_pins:
pin.direction = Direction.INPUT
pin.pull = Pull.UP
for row in range(len(self.row_pins)):
# set one row low at a time
self.row_pins[row].direction = Direction.OUTPUT
self.row_pins[row].value = False
|
python
|
{
"resource": ""
}
|
q3667
|
MultiHarParser.get_load_times
|
train
|
def get_load_times(self, asset_type):
"""
Just a ``list`` of the load times of a certain asset type for each page
:param asset_type: ``str`` of the asset type to return load times for
"""
load_times = []
search_str = '{0}_load_time'.format(asset_type)
|
python
|
{
"resource": ""
}
|
q3668
|
MultiHarParser.get_stdev
|
train
|
def get_stdev(self, asset_type):
"""
Returns the standard deviation for a set of a certain asset type.
:param asset_type: ``str`` of the asset type to calculate standard
deviation for.
:returns: A ``int`` or ``float`` of standard deviation, depending on
the self.decimal_precision
"""
load_times = []
# Handle edge cases like TTFB
if asset_type == 'ttfb':
for page in self.pages:
if page.time_to_first_byte is not None:
load_times.append(page.time_to_first_byte)
|
python
|
{
"resource": ""
}
|
q3669
|
MultiHarParser.pages
|
train
|
def pages(self):
"""
The aggregate pages of all the parser objects.
"""
pages = []
for har_dict in self.har_data:
har_parser = HarParser(har_data=har_dict)
if self.page_id:
for page in har_parser.pages:
|
python
|
{
"resource": ""
}
|
q3670
|
MultiHarParser.time_to_first_byte
|
train
|
def time_to_first_byte(self):
"""
The aggregate time to first byte for all pages.
"""
ttfb = []
for page in self.pages:
if page.time_to_first_byte is not None:
|
python
|
{
"resource": ""
}
|
q3671
|
MultiHarParser.js_load_time
|
train
|
def js_load_time(self):
"""
Returns aggregate javascript load time.
"""
load_times
|
python
|
{
"resource": ""
}
|
q3672
|
MultiHarParser.css_load_time
|
train
|
def css_load_time(self):
"""
Returns aggregate css load time for all pages.
"""
load_times
|
python
|
{
"resource": ""
}
|
q3673
|
MultiHarParser.image_load_time
|
train
|
def image_load_time(self):
"""
Returns aggregate image load time for all pages.
"""
|
python
|
{
"resource": ""
}
|
q3674
|
MultiHarParser.html_load_time
|
train
|
def html_load_time(self):
"""
Returns aggregate html load time for all pages.
"""
|
python
|
{
"resource": ""
}
|
q3675
|
MultiHarParser.audio_load_time
|
train
|
def audio_load_time(self):
"""
Returns aggregate audio load time for all pages.
"""
|
python
|
{
"resource": ""
}
|
q3676
|
MultiHarParser.video_load_time
|
train
|
def video_load_time(self):
"""
Returns aggregate video load time for all pages.
"""
|
python
|
{
"resource": ""
}
|
q3677
|
HarParser.match_headers
|
train
|
def match_headers(self, entry, header_type, header, value, regex=True):
"""
Function to match headers.
Since the output of headers might use different case, like:
'content-type' vs 'Content-Type'
This function is case-insensitive
:param entry: entry object
:param header_type: ``str`` of header type. Valid values:
* 'request'
* 'response'
:param header: ``str`` of the header to search for
:param value: ``str`` of value to search for
:param regex: ``bool`` indicating whether to use regex or exact match
:returns: a ``bool`` indicating whether a match was found
"""
if header_type not in entry:
|
python
|
{
"resource": ""
}
|
q3678
|
HarParser.match_content_type
|
train
|
def match_content_type(entry, content_type, regex=True):
"""
Matches the content type of a request using the mimeType metadata.
:param entry: ``dict`` of a single entry from a HarPage
:param content_type: ``str`` of regex to use for finding content type
:param regex: ``bool`` indicating whether to use regex or exact match.
"""
|
python
|
{
"resource": ""
}
|
q3679
|
HarParser.match_status_code
|
train
|
def match_status_code(self, entry, status_code, regex=True):
"""
Helper function that returns entries with a status code matching
then given `status_code` argument.
NOTE: This is doing a STRING comparison NOT NUMERICAL
:param entry: entry object to analyze
:param status_code: ``str`` of status code to search for
:param request_type: ``regex`` of request type
|
python
|
{
"resource": ""
}
|
q3680
|
HarParser.pages
|
train
|
def pages(self):
"""
This is a list of HarPage objects, each of which represents a page
from the HAR file.
"""
# Start with a page object for unknown entries if the HAR data has
# any entries with no page ID
pages = []
if any('pageref' not in entry for entry in self.har_data['entries']):
|
python
|
{
"resource": ""
}
|
q3681
|
HarPage.filter_entries
|
train
|
def filter_entries(self, request_type=None, content_type=None,
status_code=None, http_version=None, regex=True):
"""
Returns a ``list`` of entry objects based on the filter criteria.
:param request_type: ``str`` of request type (i.e. - GET or POST)
:param content_type: ``str`` of regex to use for finding content type
:param status_code: ``int`` of the desired status code
:param http_version: ``str`` of HTTP version of request
:param regex: ``bool`` indicating whether to use regex or exact match.
"""
results = []
for entry in self.entries:
"""
So yea... this is a bit ugly. We are looking for:
* The request type using self._match_request_type()
* The content type using self._match_headers()
* The HTTP response status code using self._match_status_code()
* The HTTP version using self._match_headers()
Oh lords of python.... please forgive my soul
"""
valid_entry = True
p = self.parser
if request_type
|
python
|
{
"resource": ""
}
|
q3682
|
HarPage.get_load_time
|
train
|
def get_load_time(self, request_type=None, content_type=None,
status_code=None, asynchronous=True, **kwargs):
"""
This method can return the TOTAL load time for the assets or the ACTUAL
load time, the difference being that the actual load time takes
asynchronous transactions into account. So, if you want the total load
time, set asynchronous=False.
EXAMPLE:
I want to know the load time for images on a page that has two images,
each of which took 2 seconds to download, but the browser downloaded
them at the same time.
self.get_load_time(content_types=['image']) (returns 2)
self.get_load_time(content_types=['image'], asynchronous=False) (returns 4)
"""
|
python
|
{
"resource": ""
}
|
q3683
|
HarPage.get_total_size
|
train
|
def get_total_size(self, entries):
"""
Returns the total size of a collection of entries.
:param entries: ``list`` of entries to calculate the total size of.
"""
size = 0
for entry
|
python
|
{
"resource": ""
}
|
q3684
|
HarPage.get_total_size_trans
|
train
|
def get_total_size_trans(self, entries):
"""
Returns the total size of a collection of entries - transferred.
NOTE: use with har file generated with chrome-har-capturer
:param entries: ``list`` of entries to calculate the total size of.
"""
|
python
|
{
"resource": ""
}
|
q3685
|
HarPage.time_to_first_byte
|
train
|
def time_to_first_byte(self):
"""
Time to first byte of the page request in ms
"""
# The unknown page is just a placeholder for entries with no page ID.
# As such, it would not have a TTFB
if self.page_id == 'unknown':
return None
ttfb = 0
for entry in self.entries:
if entry['response']['status'] == 200:
for k, v in
|
python
|
{
"resource": ""
}
|
q3686
|
AggregatedGroup._data
|
train
|
def _data(self):
"""
Cached data built from instance raw _values as a dictionary.
"""
d = {}
# Iterate all keys and values
for k, v in self._row_values.items():
# Split related model fields
attrs = k.rsplit('__', 1)
# Set value depending case
if len(attrs) == 2:
# Related model field, store nested
|
python
|
{
"resource": ""
}
|
q3687
|
AggregatedGroup._set_values
|
train
|
def _set_values(self):
"""
Populate instance with given.
"""
# Iterate all keys and values in data
for k, v in self._data.items():
# If it's a dict, process it (it's probably instance data)
if isinstance(v, dict):
try:
# Get related model from field (follow path)
rel_model = self._model
for attr in k.split('__'):
rel_model = getattr(rel_model, attr).field.related_model
except AttributeError:
# Not a model, maybe it is a dict field (?)
pass
else:
# Model, first shorten field name
k = k.replace('__', '_')
|
python
|
{
"resource": ""
}
|
q3688
|
GroupByMixin._expand_group_by_fields
|
train
|
def _expand_group_by_fields(cls, model, fields):
"""
Expand FK fields into all related object's fields to avoid future
lookups.
:param fields: fields to "group by"
:return: expanded fields
"""
# Containers for resulting fields and related model fields
res = []
related = {}
# Add own fields and populate related fields
for field_name in fields:
if '__' in field_name:
# Related model field: append to related model's fields
fk_field_name, related_field = field_name.split('__', 1)
if fk_field_name not in related:
related[fk_field_name] = [related_field]
else:
related[fk_field_name].append(related_field)
else:
|
python
|
{
"resource": ""
}
|
q3689
|
Dummy.write
|
train
|
def write(self, addr, data):
'''Write to dummy memory
Parameters
----------
addr : int
The register address.
data : list, tuple
Data (byte array) to be written.
|
python
|
{
"resource": ""
}
|
q3690
|
BitLogic._swap_slice_indices
|
train
|
def _swap_slice_indices(self, slc, make_slice=False):
'''Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
'''
try:
start = slc.start
stop = slc.stop
slc_step = slc.step
except AttributeError:
if make_slice:
if slc < 0:
slc += self.length()
return slice(slc, slc + 1)
else:
return slc
else:
if not start and start != 0:
slc_stop = self.length()
elif start < 0:
slc_stop = self.length() + start + 1
|
python
|
{
"resource": ""
}
|
q3691
|
Pixel._clear_strobes
|
train
|
def _clear_strobes(self):
"""
Resets the "enable" and "load" output streams to all 0.
"""
#reset some stuff
self['SEQ']['GLOBAL_SHIFT_EN'].setall(False)
|
python
|
{
"resource": ""
}
|
q3692
|
spi.set_data
|
train
|
def set_data(self, data, addr=0):
'''
Sets data for outgoing stream
'''
if self._mem_bytes < len(data):
raise ValueError('Size of data (%d bytes) is
|
python
|
{
"resource": ""
}
|
q3693
|
spi.get_data
|
train
|
def get_data(self, size=None, addr=None):
'''
Gets data for incoming stream
'''
# readback memory offset
if addr is None:
addr = self._mem_bytes
if size and self._mem_bytes < size:
raise ValueError('Size is too big')
if size is None:
|
python
|
{
"resource": ""
}
|
q3694
|
GPAC.read_eeprom_calibration
|
train
|
def read_eeprom_calibration(self): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for sources and regulators
'''
header = self.get_format()
if header == self.HEADER_GPAC:
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_GPAC_FORMAT))
for idx, channel in enumerate(self._ch_cal.iterkeys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_GPAC_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_GPAC_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_GPAC_FORMAT, ch_data)
|
python
|
{
"resource": ""
}
|
q3695
|
GPAC.get_over_current
|
train
|
def get_over_current(self, channel):
'''Reading over current status of power channel
'''
try:
bit = self._ch_map[channel]['GPIOOC']['bit']
except KeyError:
|
python
|
{
"resource": ""
}
|
q3696
|
GPAC.set_current
|
train
|
def set_current(self, channel, value, unit='A'):
'''Setting current of current source
'''
dac_offset = self._ch_cal[channel]['DAC']['offset']
dac_gain = self._ch_cal[channel]['DAC']['gain']
if unit == 'raw':
value = value
elif unit == 'A':
value = int((-value * 1000000 - dac_offset) / dac_gain) # fix sign of output
elif unit == 'mA':
value = int((-value * 1000 - dac_offset) / dac_gain) # fix sign of output
|
python
|
{
"resource": ""
}
|
q3697
|
bram_fifo.get_data
|
train
|
def get_data(self):
''' Reading data in BRAM.
Returns
-------
array : numpy.ndarray
Array of unsigned integers (32 bit).
'''
fifo_int_size_1 = self.FIFO_INT_SIZE
fifo_int_size_2 = self.FIFO_INT_SIZE
if fifo_int_size_1 > fifo_int_size_2:
fifo_int_size = fifo_int_size_2 # use smaller chunk
logger.warning("Reading
|
python
|
{
"resource": ""
}
|
q3698
|
Fei4Dcs.set_default
|
train
|
def set_default(self, channels=None):
'''Setting default voltage
'''
if not channels:
channels = self._ch_cal.keys()
|
python
|
{
"resource": ""
}
|
q3699
|
PickleInterface.send
|
train
|
def send(self, obj):
"""Prepend a 4-byte length to the string"""
assert isinstance(obj, ProtocolBase)
string = pickle.dumps(obj)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.