_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q1000
store
train
def store(data, arr, start=0, stop=None, offset=0, blen=None): """Copy `data` block-wise into `arr`.""" # setup blen = _util.get_blen_array(data, blen) if stop is None: stop = len(data) else: stop = min(stop, len(data)) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise for bi in range(start, stop, blen): bj = min(bi+blen, stop) bl = bj - bi arr[offset:offset+bl] = data[bi:bj] offset += bl
python
{ "resource": "" }
q1001
copy
train
def copy(data, start=0, stop=None, blen=None, storage=None, create='array', **kwargs): """Copy `data` block-wise into a new array.""" # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) if stop is None: stop = len(data) else: stop = min(stop, len(data)) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise out = None for i in range(start, stop, blen): j = min(i+blen, stop) block = data[i:j] if out is None: out = getattr(storage, create)(block, expectedlen=length, **kwargs) else: out.append(block) return out
python
{ "resource": "" }
q1002
copy_table
train
def copy_table(tbl, start=0, stop=None, blen=None, storage=None, create='table', **kwargs): """Copy `tbl` block-wise into a new table.""" # setup names, columns = _util.check_table_like(tbl) storage = _util.get_storage(storage) blen = _util.get_blen_table(tbl, blen) if stop is None: stop = len(columns[0]) else: stop = min(stop, len(columns[0])) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise out = None for i in range(start, stop, blen): j = min(i+blen, stop) res = [c[i:j] for c in columns] if out is None: out = getattr(storage, create)(res, names=names, expectedlen=length, **kwargs) else: out.append(res) return out
python
{ "resource": "" }
q1003
map_blocks
train
def map_blocks(data, f, blen=None, storage=None, create='array', **kwargs): """Apply function `f` block-wise over `data`.""" # setup storage = _util.get_storage(storage) if isinstance(data, tuple): blen = max(_util.get_blen_array(d, blen) for d in data) else: blen = _util.get_blen_array(data, blen) if isinstance(data, tuple): _util.check_equal_length(*data) length = len(data[0]) else: length = len(data) # block-wise iteration out = None for i in range(0, length, blen): j = min(i+blen, length) # obtain blocks if isinstance(data, tuple): blocks = [d[i:j] for d in data] else: blocks = [data[i:j]] # map res = f(*blocks) # store if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out
python
{ "resource": "" }
q1004
reduce_axis
train
def reduce_axis(data, reducer, block_reducer, mapper=None, axis=None, blen=None, storage=None, create='array', **kwargs): """Apply an operation to `data` that reduces over one or more axes.""" # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) # normalise axis arg if isinstance(axis, int): axis = (axis,) # deal with 'out' kwarg if supplied, can arise if a chunked array is # passed as an argument to numpy.sum(), see also # https://github.com/cggh/scikit-allel/issues/66 kwarg_out = kwargs.pop('out', None) if kwarg_out is not None: raise ValueError('keyword argument "out" is not supported') if axis is None or 0 in axis: # two-step reduction out = None for i in range(0, length, blen): j = min(i+blen, length) block = data[i:j] if mapper: block = mapper(block) res = reducer(block, axis=axis) if out is None: out = res else: out = block_reducer(out, res) if np.isscalar(out): return out elif len(out.shape) == 0: return out[()] else: return getattr(storage, create)(out, **kwargs) else: # first dimension is preserved, no need to reduce blocks out = None for i in range(0, length, blen): j = min(i+blen, length) block = data[i:j] if mapper: block = mapper(block) r = reducer(block, axis=axis) if out is None: out = getattr(storage, create)(r, expectedlen=length, **kwargs) else: out.append(r) return out
python
{ "resource": "" }
q1005
amax
train
def amax(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs): """Compute the maximum value.""" return reduce_axis(data, axis=axis, reducer=np.amax, block_reducer=np.maximum, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
python
{ "resource": "" }
q1006
amin
train
def amin(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs): """Compute the minimum value.""" return reduce_axis(data, axis=axis, reducer=np.amin, block_reducer=np.minimum, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
python
{ "resource": "" }
q1007
asum
train
def asum(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs): """Compute the sum.""" return reduce_axis(data, axis=axis, reducer=np.sum, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
python
{ "resource": "" }
q1008
count_nonzero
train
def count_nonzero(data, mapper=None, blen=None, storage=None, create='array', **kwargs): """Count the number of non-zero elements.""" return reduce_axis(data, reducer=np.count_nonzero, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
python
{ "resource": "" }
q1009
subset
train
def subset(data, sel0=None, sel1=None, blen=None, storage=None, create='array', **kwargs): """Return selected rows and columns of an array.""" # TODO refactor sel0 and sel1 normalization with ndarray.subset # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) if sel0 is not None: sel0 = np.asanyarray(sel0) if sel1 is not None: sel1 = np.asanyarray(sel1) # ensure boolean array for dim 0 if sel0 is not None and sel0.dtype.kind != 'b': # assume indices, convert to boolean condition tmp = np.zeros(length, dtype=bool) tmp[sel0] = True sel0 = tmp # ensure indices for dim 1 if sel1 is not None and sel1.dtype.kind == 'b': # assume boolean condition, convert to indices sel1, = np.nonzero(sel1) # shortcuts if sel0 is None and sel1 is None: return copy(data, blen=blen, storage=storage, create=create, **kwargs) elif sel1 is None: return compress(sel0, data, axis=0, blen=blen, storage=storage, create=create, **kwargs) elif sel0 is None: return take(data, sel1, axis=1, blen=blen, storage=storage, create=create, **kwargs) # build output sel0_nnz = count_nonzero(sel0) out = None for i in range(0, length, blen): j = min(i+blen, length) bsel0 = sel0[i:j] # don't access data unless we have to if np.any(bsel0): block = data[i:j] res = _numpy_subset(block, bsel0, sel1) if out is None: out = getattr(storage, create)(res, expectedlen=sel0_nnz, **kwargs) else: out.append(res) return out
python
{ "resource": "" }
q1010
binary_op
train
def binary_op(data, op, other, blen=None, storage=None, create='array', **kwargs): """Compute a binary operation block-wise over `data`.""" # normalise scalars if hasattr(other, 'shape') and len(other.shape) == 0: other = other[()] if np.isscalar(other): def f(block): return op(block, other) return map_blocks(data, f, blen=blen, storage=storage, create=create, **kwargs) elif len(data) == len(other): def f(a, b): return op(a, b) return map_blocks((data, other), f, blen=blen, storage=storage, create=create, **kwargs) else: raise NotImplementedError('argument type not supported')
python
{ "resource": "" }
q1011
eval_table
train
def eval_table(tbl, expression, vm='python', blen=None, storage=None, create='array', vm_kwargs=None, **kwargs): """Evaluate `expression` against columns of a table.""" # setup storage = _util.get_storage(storage) names, columns = _util.check_table_like(tbl) length = len(columns[0]) if vm_kwargs is None: vm_kwargs = dict() # setup vm if vm == 'numexpr': import numexpr evaluate = numexpr.evaluate elif vm == 'python': # noinspection PyUnusedLocal def evaluate(expr, local_dict=None, **kw): # takes no keyword arguments return eval(expr, dict(), local_dict) else: raise ValueError('expected vm either "numexpr" or "python"') # compile expression and get required columns variables = _get_expression_variables(expression, vm) required_columns = {v: columns[names.index(v)] for v in variables} # determine block size for evaluation blen = _util.get_blen_table(required_columns, blen=blen) # build output out = None for i in range(0, length, blen): j = min(i+blen, length) blocals = {v: c[i:j] for v, c in required_columns.items()} res = evaluate(expression, local_dict=blocals, **vm_kwargs) if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out
python
{ "resource": "" }
q1012
create_allele_mapping
train
def create_allele_mapping(ref, alt, alleles, dtype='i1'): """Create an array mapping variant alleles into a different allele index system. Parameters ---------- ref : array_like, S1, shape (n_variants,) Reference alleles. alt : array_like, S1, shape (n_variants, n_alt_alleles) Alternate alleles. alleles : array_like, S1, shape (n_variants, n_alleles) Alleles defining the new allele indexing. dtype : dtype, optional Output dtype. Returns ------- mapping : ndarray, int8, shape (n_variants, n_alt_alleles + 1) Examples -------- Example with biallelic variants:: >>> import allel >>> ref = [b'A', b'C', b'T', b'G'] >>> alt = [b'T', b'G', b'C', b'A'] >>> alleles = [[b'A', b'T'], # no transformation ... [b'G', b'C'], # swap ... [b'T', b'A'], # 1 missing ... [b'A', b'C']] # 1 missing >>> mapping = allel.create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1], [ 1, 0], [ 0, -1], [-1, 0]], dtype=int8) Example with multiallelic variants:: >>> ref = [b'A', b'C', b'T'] >>> alt = [[b'T', b'G'], ... [b'A', b'T'], ... [b'G', b'.']] >>> alleles = [[b'A', b'T'], ... [b'C', b'T'], ... [b'G', b'A']] >>> mapping = create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1, -1], [ 0, -1, 1], [-1, 0, -1]], dtype=int8) See Also -------- GenotypeArray.map_alleles, HaplotypeArray.map_alleles, AlleleCountsArray.map_alleles """ ref = asarray_ndim(ref, 1) alt = asarray_ndim(alt, 1, 2) alleles = asarray_ndim(alleles, 1, 2) check_dim0_aligned(ref, alt, alleles) # reshape for convenience ref = ref[:, None] if alt.ndim == 1: alt = alt[:, None] if alleles.ndim == 1: alleles = alleles[:, None] source_alleles = np.append(ref, alt, axis=1) # setup output array out = np.empty(source_alleles.shape, dtype=dtype) out.fill(-1) # find matches for ai in range(source_alleles.shape[1]): match = source_alleles[:, ai, None] == alleles match_i, match_j = match.nonzero() out[match_i, ai] = match_j return out
python
{ "resource": "" }
q1013
locate_fixed_differences
train
def locate_fixed_differences(ac1, ac2): """Locate variants with no shared alleles between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. Returns ------- loc : ndarray, bool, shape (n_variants,) See Also -------- allel.stats.diversity.windowed_df Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2, 3]) >>> loc_df = allel.locate_fixed_differences(ac1, ac2) >>> loc_df array([ True, False, False, True, True]) """ # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # stack allele counts for convenience pac = np.dstack([ac1, ac2]) # count numbers of alleles called in each population pan = np.sum(pac, axis=1) # count the numbers of populations with each allele npa = np.sum(pac > 0, axis=2) # locate variants with allele calls in both populations non_missing = np.all(pan > 0, axis=1) # locate variants where all alleles are only found in a single population no_shared_alleles = np.all(npa <= 1, axis=1) return non_missing & no_shared_alleles
python
{ "resource": "" }
q1014
locate_private_alleles
train
def locate_private_alleles(*acs): """Locate alleles that are found only in a single population. Parameters ---------- *acs : array_like, int, shape (n_variants, n_alleles) Allele counts arrays from each population. Returns ------- loc : ndarray, bool, shape (n_variants, n_alleles) Boolean array where elements are True if allele is private to a single population. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2]) >>> ac3 = g.count_alleles(subpop=[3]) >>> loc_private_alleles = allel.locate_private_alleles(ac1, ac2, ac3) >>> loc_private_alleles array([[ True, False, False], [False, False, False], [ True, False, False], [ True, True, True], [ True, True, False]]) >>> loc_private_variants = np.any(loc_private_alleles, axis=1) >>> loc_private_variants array([ True, False, True, True, True]) """ # check inputs acs = [asarray_ndim(ac, 2) for ac in acs] check_dim0_aligned(*acs) acs = ensure_dim1_aligned(*acs) # stack allele counts for convenience pac = np.dstack(acs) # count the numbers of populations with each allele npa = np.sum(pac > 0, axis=2) # locate alleles found only in a single population loc_pa = npa == 1 return loc_pa
python
{ "resource": "" }
q1015
patterson_fst
train
def patterson_fst(aca, acb): """Estimator of differentiation between populations A and B based on the F2 parameter. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- num : ndarray, shape (n_variants,), float Numerator. den : ndarray, shape (n_variants,), float Denominator. Notes ----- See Patterson (2012), Appendix A. TODO check if this is numerically equivalent to Hudson's estimator. """ from allel.stats.admixture import patterson_f2, h_hat num = patterson_f2(aca, acb) den = num + h_hat(aca) + h_hat(acb) return num, den
python
{ "resource": "" }
q1016
average_weir_cockerham_fst
train
def average_weir_cockerham_fst(g, subpops, blen, max_allele=None): """Estimate average Fst and standard error using the block-jackknife. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. blen : int Block size (number of variants). max_allele : int, optional The highest allele index to consider. Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # calculate overall estimate a_sum = np.nansum(a) b_sum = np.nansum(b) c_sum = np.nansum(c) fst = a_sum / (a_sum + b_sum + c_sum) # compute the numerator and denominator within each block num_bsum = moving_statistic(a, statistic=np.nansum, size=blen) den_bsum = moving_statistic(a + b + c, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj
python
{ "resource": "" }
q1017
plot_pairwise_ld
train
def plot_pairwise_ld(m, colorbar=True, ax=None, imshow_kwargs=None): """Plot a matrix of genotype linkage disequilibrium values between all pairs of variants. Parameters ---------- m : array_like Array of linkage disequilibrium values in condensed form. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt # check inputs m_square = ensure_square(m) # blank out lower triangle and flip up/down m_square = np.tril(m_square)[::-1, :] # set up axes if ax is None: # make a square figure with enough pixels to represent each variant x = m_square.shape[0] / plt.rcParams['figure.dpi'] x = max(x, plt.rcParams['figure.figsize'][0]) fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout(pad=0) # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'Greys') imshow_kwargs.setdefault('vmin', 0) imshow_kwargs.setdefault('vmax', 1) # plot as image im = ax.imshow(m_square, **imshow_kwargs) # tidy up ax.set_xticks([]) ax.set_yticks([]) for s in 'bottom', 'right': ax.spines[s].set_visible(False) if colorbar: plt.gcf().colorbar(im, shrink=.5, pad=0) return ax
python
{ "resource": "" }
q1018
array_to_hdf5
train
def array_to_hdf5(a, parent, name, **kwargs): """Write a Numpy array to an HDF5 dataset. Parameters ---------- a : ndarray Data to write. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of dataset to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5d : h5py dataset """ import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: kwargs.setdefault('chunks', True) # auto-chunking kwargs.setdefault('dtype', a.dtype) kwargs.setdefault('compression', 'gzip') h5d = parent.require_dataset(name, shape=a.shape, **kwargs) h5d[...] = a return h5d finally: if h5f is not None: h5f.close()
python
{ "resource": "" }
q1019
recarray_from_hdf5_group
train
def recarray_from_hdf5_group(*args, **kwargs): """Load a recarray from columns stored as separate datasets with an HDF5 group. Either provide an h5py group as a single positional argument, or provide two positional arguments giving the HDF5 file path and the group node path within the file. The following optional parameters may be given. Parameters ---------- start : int, optional Index to start loading from. stop : int, optional Index to finish loading at. condition : array_like, bool, optional A 1-dimensional boolean array of the same length as the columns of the table to load, indicating a selection of rows to load. """ import h5py h5f = None if len(args) == 1: group = args[0] elif len(args) == 2: file_path, node_path = args h5f = h5py.File(file_path, mode='r') try: group = h5f[node_path] except Exception as e: h5f.close() raise e else: raise ValueError('bad arguments; expected group or (file_path, ' 'node_path), found %s' % repr(args)) try: if not isinstance(group, h5py.Group): raise ValueError('expected group, found %r' % group) # determine dataset names to load available_dataset_names = [n for n in group.keys() if isinstance(group[n], h5py.Dataset)] names = kwargs.pop('names', available_dataset_names) names = [str(n) for n in names] # needed for PY2 for n in names: if n not in set(group.keys()): raise ValueError('name not found: %s' % n) if not isinstance(group[n], h5py.Dataset): raise ValueError('name does not refer to a dataset: %s, %r' % (n, group[n])) # check datasets are aligned datasets = [group[n] for n in names] length = datasets[0].shape[0] for d in datasets[1:]: if d.shape[0] != length: raise ValueError('datasets must be of equal length') # determine start and stop parameters for load start = kwargs.pop('start', 0) stop = kwargs.pop('stop', length) # check condition condition = kwargs.pop('condition', None) # type: np.ndarray condition = asarray_ndim(condition, 1, allow_none=True) if condition is not None and condition.size != length: raise ValueError('length of condition does not match length ' 'of datasets') # setup output data dtype = [(n, d.dtype, d.shape[1:]) for n, d in zip(names, datasets)] ra = np.empty(length, dtype=dtype) for n, d in zip(names, datasets): a = d[start:stop] if condition is not None: a = np.compress(condition[start:stop], a, axis=0) ra[n] = a return ra finally: if h5f is not None: h5f.close()
python
{ "resource": "" }
q1020
recarray_to_hdf5_group
train
def recarray_to_hdf5_group(ra, parent, name, **kwargs): """Write each column in a recarray to a dataset in an HDF5 group. Parameters ---------- ra : recarray Numpy recarray to store. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of group to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5g : h5py group """ import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: h5g = parent.require_group(name) for n in ra.dtype.names: array_to_hdf5(ra[n], h5g, n, **kwargs) return h5g finally: if h5f is not None: h5f.close()
python
{ "resource": "" }
q1021
subset
train
def subset(data, sel0, sel1): """Apply selections on first and second axes.""" # check inputs data = np.asarray(data) if data.ndim < 2: raise ValueError('data must have 2 or more dimensions') sel0 = asarray_ndim(sel0, 1, allow_none=True) sel1 = asarray_ndim(sel1, 1, allow_none=True) # ensure indices if sel0 is not None and sel0.dtype.kind == 'b': sel0, = np.nonzero(sel0) if sel1 is not None and sel1.dtype.kind == 'b': sel1, = np.nonzero(sel1) # ensure leading dimension indices can be broadcast correctly if sel0 is not None and sel1 is not None: sel0 = sel0[:, np.newaxis] # deal with None arguments if sel0 is None: sel0 = _total_slice if sel1 is None: sel1 = _total_slice return data[sel0, sel1]
python
{ "resource": "" }
q1022
NumpyRecArrayWrapper.eval
train
def eval(self, expression, vm='python'): """Evaluate an expression against the table columns. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : ndarray """ if vm == 'numexpr': import numexpr as ne return ne.evaluate(expression, local_dict=self) else: if PY2: # locals must be a mapping m = {k: self[k] for k in self.dtype.names} else: m = self return eval(expression, dict(), m)
python
{ "resource": "" }
q1023
NumpyRecArrayWrapper.query
train
def query(self, expression, vm='python'): """Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array """ condition = self.eval(expression, vm=vm) return self.compress(condition)
python
{ "resource": "" }
q1024
Genotypes.fill_masked
train
def fill_masked(self, value=-1, copy=True): """Fill masked genotype calls with a given value. Parameters ---------- value : int, optional The fill value. copy : bool, optional If False, modify the array in place. Returns ------- g : GenotypeArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]], dtype='i1') >>> mask = [[True, False], [False, True], [False, False]] >>> g.mask = mask >>> g.fill_masked().values array([[[-1, -1], [ 0, 1]], [[ 0, 1], [-1, -1]], [[ 0, 2], [-1, -1]]], dtype=int8) """ if self.mask is None: raise ValueError('no mask is set') # apply the mask data = np.array(self.values, copy=copy) data[self.mask, ...] = value if copy: out = type(self)(data) # wrap out.is_phased = self.is_phased # don't set mask because it has been filled in else: out = self out.mask = None # reset mask return out
python
{ "resource": "" }
q1025
Genotypes.is_called
train
def is_called(self): """Find non-missing genotype calls. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_called() array([[ True, True], [ True, True], [ True, False]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_called() array([ True, True, False]) """ out = np.all(self.values >= 0, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
python
{ "resource": "" }
q1026
Genotypes.is_missing
train
def is_missing(self): """Find missing genotype calls. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_missing() array([[False, False], [False, False], [False, True]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_missing() array([False, False, True]) """ out = np.any(self.values < 0, axis=-1) # handle mask if self.mask is not None: out |= self.mask return out
python
{ "resource": "" }
q1027
Genotypes.is_hom
train
def is_hom(self, allele=None): """Find genotype calls that are homozygous. Parameters ---------- allele : int, optional Allele index. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.is_hom() array([[ True, False], [False, True], [ True, False]]) >>> g.is_hom(allele=1) array([[False, False], [False, True], [False, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 2/2 >>> v.is_hom() array([ True, False, True]) """ if allele is None: allele1 = self.values[..., 0, np.newaxis] other_alleles = self.values[..., 1:] tmp = (allele1 >= 0) & (allele1 == other_alleles) out = np.all(tmp, axis=-1) else: out = np.all(self.values == allele, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
python
{ "resource": "" }
q1028
Genotypes.is_het
train
def is_het(self, allele=None): """Find genotype calls that are heterozygous. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. allele : int, optional Heterozygous allele. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_het() array([[False, True], [ True, False], [ True, False]]) >>> g.is_het(2) array([[False, False], [False, False], [ True, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 0/2 >>> v.is_het() array([False, True, True]) """ allele1 = self.values[..., 0, np.newaxis] # type: np.ndarray other_alleles = self.values[..., 1:] # type: np.ndarray out = np.all(self.values >= 0, axis=-1) & np.any(allele1 != other_alleles, axis=-1) if allele is not None: out &= np.any(self.values == allele, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
python
{ "resource": "" }
q1029
Genotypes.is_call
train
def is_call(self, call): """Locate genotypes with a given call. Parameters ---------- call : array_like, int, shape (ploidy,) The genotype call to find. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype is `call`. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_call((0, 2)) array([[False, False], [False, False], [ True, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 0/2 >>> v.is_call((0, 2)) array([False, False, True]) """ # guard conditions if not len(call) == self.shape[-1]: raise ValueError('invalid call ploidy: %s', repr(call)) if self.ndim == 2: call = np.asarray(call)[np.newaxis, :] else: call = np.asarray(call)[np.newaxis, np.newaxis, :] out = np.all(self.values == call, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
python
{ "resource": "" }
q1030
Genotypes.count_called
train
def count_called(self, axis=None): """Count called genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_called() return np.sum(b, axis=axis)
python
{ "resource": "" }
q1031
Genotypes.count_missing
train
def count_missing(self, axis=None): """Count missing genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_missing() return np.sum(b, axis=axis)
python
{ "resource": "" }
q1032
Genotypes.count_hom
train
def count_hom(self, allele=None, axis=None): """Count homozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_hom(allele=allele) return np.sum(b, axis=axis)
python
{ "resource": "" }
q1033
Genotypes.count_hom_ref
train
def count_hom_ref(self, axis=None): """Count homozygous reference genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_hom_ref() return np.sum(b, axis=axis)
python
{ "resource": "" }
q1034
Genotypes.count_hom_alt
train
def count_hom_alt(self, axis=None): """Count homozygous alternate genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_hom_alt() return np.sum(b, axis=axis)
python
{ "resource": "" }
q1035
Genotypes.count_het
train
def count_het(self, allele=None, axis=None): """Count heterozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_het(allele=allele) return np.sum(b, axis=axis)
python
{ "resource": "" }
q1036
Genotypes.count_call
train
def count_call(self, call, axis=None): """Count genotypes with a given call. Parameters ---------- call : array_like, int, shape (ploidy,) The genotype call to find. axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_call(call=call) return np.sum(b, axis=axis)
python
{ "resource": "" }
q1037
Genotypes.to_n_ref
train
def to_n_ref(self, fill=0, dtype='i1'): """Transform each genotype call into the number of reference alleles. Parameters ---------- fill : int, optional Use this value to represent missing calls. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, int8, shape (n_variants, n_samples) Array of ref alleles per genotype call. Notes ----- By default this function returns 0 for missing genotype calls **and** for homozygous non-reference genotype calls. Use the `fill` argument to change how missing calls are represented. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_n_ref() array([[2, 1], [1, 0], [0, 0]], dtype=int8) >>> g.to_n_ref(fill=-1) array([[ 2, 1], [ 1, 0], [ 0, -1]], dtype=int8) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_n_ref() array([2, 1, 0], dtype=int8) """ # count number of alternate alleles out = np.empty(self.shape[:-1], dtype=dtype) np.sum(self.values == 0, axis=-1, out=out) # fill missing calls if fill != 0: m = self.is_missing() out[m] = fill # handle mask if self.mask is not None: out[self.mask] = fill return out
python
{ "resource": "" }
q1038
Genotypes.to_allele_counts
train
def to_allele_counts(self, max_allele=None, dtype='u1'): """Transform genotype calls into allele counts per call. Parameters ---------- max_allele : int, optional Highest allele index. Provide this value to speed up computation. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, uint8, shape (n_variants, n_samples, len(alleles)) Array of allele counts per call. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_allele_counts() <GenotypeAlleleCountsArray shape=(3, 2, 3) dtype=uint8> 2:0:0 1:1:0 1:0:1 0:2:0 0:0:2 0:0:0 >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_allele_counts() <GenotypeAlleleCountsVector shape=(3, 3) dtype=uint8> 2:0:0 1:0:1 0:0:2 """ # determine alleles to count if max_allele is None: max_allele = self.max() alleles = list(range(max_allele + 1)) # set up output array outshape = self.shape[:-1] + (len(alleles),) out = np.zeros(outshape, dtype=dtype) for allele in alleles: # count alleles along ploidy dimension allele_match = self.values == allele if self.mask is not None: allele_match &= ~self.mask[..., np.newaxis] np.sum(allele_match, axis=-1, out=out[..., allele]) if self.ndim == 2: out = GenotypeAlleleCountsVector(out) elif self.ndim == 3: out = GenotypeAlleleCountsArray(out) return out
python
{ "resource": "" }
q1039
Genotypes.to_gt
train
def to_gt(self, max_allele=None): """Convert genotype calls to VCF-style string representation. Returns ------- gt : ndarray, string, shape (n_variants, n_samples) Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_gt() chararray([[b'0/0', b'0/1'], [b'0/2', b'1/1'], [b'1/2', b'2/1'], [b'2/2', b'./.']], dtype='|S3') >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int64> 0/0 0/2 1/2 2/2 >>> v.to_gt() chararray([b'0/0', b'0/2', b'1/2', b'2/2'], dtype='|S3') >>> g.is_phased = np.ones(g.shape[:-1]) >>> g.to_gt() chararray([[b'0|0', b'0|1'], [b'0|2', b'1|1'], [b'1|2', b'2|1'], [b'2|2', b'.|.']], dtype='|S3') >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int64> 0|0 0|2 1|2 2|2 >>> v.to_gt() chararray([b'0|0', b'0|2', b'1|2', b'2|2'], dtype='|S3') """ # how many characters needed per allele call? if max_allele is None: max_allele = np.max(self) if max_allele <= 0: max_allele = 1 nchar = int(np.floor(np.log10(max_allele))) + 1 # convert to string a = self.astype((np.string_, nchar)).view(np.chararray) # recode missing alleles a[self < 0] = b'.' if self.mask is not None: a[self.mask] = b'.' # determine allele call separator if self.is_phased is None: sep = b'/' else: sep = np.empty(self.shape[:-1], dtype='S1').view(np.chararray) sep[self.is_phased] = b'|' sep[~self.is_phased] = b'/' # join via separator, coping with any ploidy gt = a[..., 0] for i in range(1, self.ploidy): gt = gt + sep + a[..., i] return gt
python
{ "resource": "" }
q1040
GenotypeArray.to_packed
train
def to_packed(self, boundscheck=True): """Pack diploid genotypes into a single byte for each genotype, using the left-most 4 bits for the first allele and the right-most 4 bits for the second allele. Allows single byte encoding of diploid genotypes for variants with up to 15 alleles. Parameters ---------- boundscheck : bool, optional If False, do not check that minimum and maximum alleles are compatible with bit-packing. Returns ------- packed : ndarray, uint8, shape (n_variants, n_samples) Bit-packed genotype array. Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]], dtype='i1') >>> g.to_packed() array([[ 0, 1], [ 2, 17], [ 34, 239]], dtype=uint8) """ check_ploidy(self.ploidy, 2) if boundscheck: amx = self.max() if amx > 14: raise ValueError('max allele for packing is 14, found %s' % amx) amn = self.min() if amn < -1: raise ValueError('min allele for packing is -1, found %s' % amn) # pack data values = memoryview_safe(self.values) packed = genotype_array_pack_diploid(values) return packed
python
{ "resource": "" }
q1041
GenotypeArray.from_packed
train
def from_packed(cls, packed): """Unpack diploid genotypes that have been bit-packed into single bytes. Parameters ---------- packed : ndarray, uint8, shape (n_variants, n_samples) Bit-packed diploid genotype array. Returns ------- g : GenotypeArray, shape (n_variants, n_samples, 2) Genotype array. Examples -------- >>> import allel >>> import numpy as np >>> packed = np.array([[0, 1], ... [2, 17], ... [34, 239]], dtype='u1') >>> allel.GenotypeArray.from_packed(packed) <GenotypeArray shape=(3, 2, 2) dtype=int8> 0/0 0/1 0/2 1/1 2/2 ./. """ # check arguments packed = np.asarray(packed) check_ndim(packed, 2) check_dtype(packed, 'u1') packed = memoryview_safe(packed) data = genotype_array_unpack_diploid(packed) return cls(data)
python
{ "resource": "" }
q1042
GenotypeArray.from_sparse
train
def from_sparse(m, ploidy, order=None, out=None): """Construct a genotype array from a sparse matrix. Parameters ---------- m : scipy.sparse.spmatrix Sparse matrix ploidy : int The sample ploidy. order : {'C', 'F'}, optional Whether to store data in C (row-major) or Fortran (column-major) order in memory. out : ndarray, shape (n_variants, n_samples), optional Use this array as the output buffer. Returns ------- g : GenotypeArray, shape (n_variants, n_samples, ploidy) Genotype array. Examples -------- >>> import allel >>> import numpy as np >>> import scipy.sparse >>> data = np.array([ 1, 1, 1, 1, -1, -1], dtype=np.int8) >>> indices = np.array([1, 3, 0, 1, 2, 3], dtype=np.int32) >>> indptr = np.array([0, 0, 2, 4, 6], dtype=np.int32) >>> m = scipy.sparse.csr_matrix((data, indices, indptr)) >>> g = allel.GenotypeArray.from_sparse(m, ploidy=2) >>> g <GenotypeArray shape=(4, 2, 2) dtype=int8> 0/0 0/0 0/1 0/1 1/1 0/0 0/0 ./. """ h = HaplotypeArray.from_sparse(m, order=order, out=out) g = h.to_genotypes(ploidy=ploidy) return g
python
{ "resource": "" }
q1043
GenotypeArray.haploidify_samples
train
def haploidify_samples(self): """Construct a pseudo-haplotype for each sample by randomly selecting an allele from each genotype call. Returns ------- h : HaplotypeArray Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> import numpy as np >>> np.random.seed(42) >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(4, 2) dtype=int64> 0 1 0 1 1 1 2 . >>> g = allel.GenotypeArray([[[0, 0, 0], [0, 0, 1]], ... [[0, 1, 1], [1, 1, 1]], ... [[0, 1, 2], [-1, -1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(3, 2) dtype=int64> 0 0 1 1 2 . """ # N.B., this implementation is obscure and uses more memory than # necessary, TODO review # define the range of possible indices, e.g., diploid => (0, 1) index_range = np.arange(0, self.ploidy, dtype='u1') # create a random index for each genotype call indices = np.random.choice(index_range, size=self.n_calls, replace=True) # reshape genotype data so it's suitable for passing to np.choose # by merging the variants and samples dimensions choices = self.reshape(-1, self.ploidy).T # now use random indices to haploidify data = np.choose(indices, choices) # reshape the haploidified data to restore the variants and samples # dimensions data = data.reshape((self.n_variants, self.n_samples)) # view as haplotype array h = HaplotypeArray(data, copy=False) return h
python
{ "resource": "" }
q1044
GenotypeArray.count_alleles_subpops
train
def count_alleles_subpops(self, subpops, max_allele=None): """Count alleles for multiple subpopulations simultaneously. Parameters ---------- subpops : dict (string -> sequence of ints) Mapping of subpopulation names to sample indices. max_allele : int, optional The highest allele index to count. Alleles above this will be ignored. Returns ------- out : dict (string -> AlleleCountsArray) A mapping of subpopulation names to allele counts arrays. """ if max_allele is None: max_allele = self.max() out = {name: self.count_alleles(max_allele=max_allele, subpop=subpop) for name, subpop in subpops.items()} return out
python
{ "resource": "" }
q1045
HaplotypeArray.subset
train
def subset(self, sel0=None, sel1=None): """Make a sub-selection of variants and haplotypes. Parameters ---------- sel0 : array_like Boolean array or array of indices selecting variants. sel1 : array_like Boolean array or array of indices selecting haplotypes. Returns ------- out : HaplotypeArray See Also -------- HaplotypeArray.take, HaplotypeArray.compress """ return subset_haplotype_array(self, sel0, sel1, cls=type(self), subset=subset)
python
{ "resource": "" }
q1046
HaplotypeArray.concatenate
train
def concatenate(self, others, axis=0): """Join a sequence of arrays along an existing axis. Parameters ---------- others : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- res : ndarray The concatenated array. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.concatenate([h], axis=0) <HaplotypeArray shape=(6, 4) dtype=int8> 0 0 0 1 0 1 1 1 0 2 . . 0 0 0 1 0 1 1 1 0 2 . . >>> h.concatenate([h], axis=1) <HaplotypeArray shape=(3, 8) dtype=int8> 0 0 0 1 0 0 0 1 0 1 1 1 0 1 1 1 0 2 . . 0 2 . . """ return concatenate_haplotype_array(self, others, axis=axis, cls=type(self), concatenate=np.concatenate)
python
{ "resource": "" }
q1047
HaplotypeArray.to_genotypes
train
def to_genotypes(self, ploidy, copy=False): """Reshape a haplotype array to view it as genotypes by restoring the ploidy dimension. Parameters ---------- ploidy : int The sample ploidy. copy : bool, optional If True, make a copy of data. Returns ------- g : ndarray, int, shape (n_variants, n_samples, ploidy) Genotype array (sharing same underlying buffer). copy : bool, optional If True, copy the data. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.to_genotypes(ploidy=2) <GenotypeArray shape=(3, 2, 2) dtype=int8> 0/0 0/1 0/1 1/1 0/2 ./. """ # check ploidy is compatible if (self.shape[1] % ploidy) > 0: raise ValueError('incompatible ploidy') # reshape newshape = (self.shape[0], -1, ploidy) data = self.reshape(newshape) # wrap g = GenotypeArray(data, copy=copy) return g
python
{ "resource": "" }
q1048
HaplotypeArray.from_sparse
train
def from_sparse(m, order=None, out=None): """Construct a haplotype array from a sparse matrix. Parameters ---------- m : scipy.sparse.spmatrix Sparse matrix order : {'C', 'F'}, optional Whether to store data in C (row-major) or Fortran (column-major) order in memory. out : ndarray, shape (n_variants, n_samples), optional Use this array as the output buffer. Returns ------- h : HaplotypeArray, shape (n_variants, n_haplotypes) Haplotype array. Examples -------- >>> import allel >>> import numpy as np >>> import scipy.sparse >>> data = np.array([ 1, 1, 1, 1, -1, -1], dtype=np.int8) >>> indices = np.array([1, 3, 0, 1, 2, 3], dtype=np.int32) >>> indptr = np.array([0, 0, 2, 4, 6], dtype=np.int32) >>> m = scipy.sparse.csr_matrix((data, indices, indptr)) >>> h = allel.HaplotypeArray.from_sparse(m) >>> h <HaplotypeArray shape=(4, 4) dtype=int8> 0 0 0 0 0 1 0 1 1 1 0 0 0 0 . . """ import scipy.sparse # check arguments if not scipy.sparse.isspmatrix(m): raise ValueError('not a sparse matrix: %r' % m) # convert to dense array data = m.toarray(order=order, out=out) # wrap h = HaplotypeArray(data) return h
python
{ "resource": "" }
q1049
HaplotypeArray.distinct
train
def distinct(self): """Return sets of indices for each distinct haplotype.""" # setup collection d = collections.defaultdict(set) # iterate over haplotypes for i in range(self.shape[1]): # hash the haplotype k = hash(self.values[:, i].tobytes()) # collect d[k].add(i) # extract sets, sorted by most common return sorted(d.values(), key=len, reverse=True)
python
{ "resource": "" }
q1050
HaplotypeArray.distinct_counts
train
def distinct_counts(self): """Return counts for each distinct haplotype.""" # hash the haplotypes k = [hash(self.values[:, i].tobytes()) for i in range(self.shape[1])] # count and sort # noinspection PyArgumentList counts = sorted(collections.Counter(k).values(), reverse=True) return np.asarray(counts)
python
{ "resource": "" }
q1051
HaplotypeArray.distinct_frequencies
train
def distinct_frequencies(self): """Return frequencies for each distinct haplotype.""" c = self.distinct_counts() n = self.shape[1] return c / n
python
{ "resource": "" }
q1052
AlleleCountsArray.to_frequencies
train
def to_frequencies(self, fill=np.nan): """Compute allele frequencies. Parameters ---------- fill : float, optional Value to use when number of allele calls is 0. Returns ------- af : ndarray, float, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.to_frequencies() array([[0.75, 0.25, 0. ], [0.25, 0.5 , 0.25], [0. , 0. , 1. ]]) """ an = np.sum(self, axis=1)[:, None] with ignore_invalid(): af = np.where(an > 0, self / an, fill) return af
python
{ "resource": "" }
q1053
AlleleCountsArray.max_allele
train
def max_allele(self): """Return the highest allele index for each variant. Returns ------- n : ndarray, int, shape (n_variants,) Allele index array. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.max_allele() array([1, 2, 2], dtype=int8) """ out = np.empty(self.shape[0], dtype='i1') out.fill(-1) for i in range(self.shape[1]): d = self.values[:, i] > 0 out[d] = i return out
python
{ "resource": "" }
q1054
SortedIndex.is_unique
train
def is_unique(self): """True if no duplicate entries.""" if self._is_unique is None: t = self.values[:-1] == self.values[1:] # type: np.ndarray self._is_unique = ~np.any(t) return self._is_unique
python
{ "resource": "" }
q1055
SortedIndex.intersect
train
def intersect(self, other): """Intersect with `other` sorted index. Parameters ---------- other : array_like, int Array of values to intersect with. Returns ------- out : SortedIndex Values in common. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> idx1.intersect(idx2) <SortedIndex shape=(2,) dtype=int64> [6, 20] """ loc = self.locate_keys(other, strict=False) return self.compress(loc, axis=0)
python
{ "resource": "" }
q1056
SortedIndex.locate_intersection_ranges
train
def locate_intersection_ranges(self, starts, stops): """Locate the intersection with a set of ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. Returns ------- loc : ndarray, bool Boolean array with location of entries found. loc_ranges : ndarray, bool Boolean array with location of ranges containing one or more entries. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc, loc_ranges = idx.locate_intersection_ranges(starts, stops) >>> loc array([False, True, True, False, True]) >>> loc_ranges array([False, True, False, True, False]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35] >>> ranges[loc_ranges] array([[ 6, 17], [31, 35]]) """ # check inputs starts = asarray_ndim(starts, 1) stops = asarray_ndim(stops, 1) check_dim0_aligned(starts, stops) # find indices of start and stop values in idx start_indices = np.searchsorted(self, starts) stop_indices = np.searchsorted(self, stops, side='right') # find intervals overlapping at least one value loc_ranges = start_indices < stop_indices # find values within at least one interval loc = np.zeros(self.shape, dtype=np.bool) for i, j in zip(start_indices[loc_ranges], stop_indices[loc_ranges]): loc[i:j] = True return loc, loc_ranges
python
{ "resource": "" }
q1057
SortedIndex.locate_ranges
train
def locate_ranges(self, starts, stops, strict=True): """Locate items within the given ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. strict : bool, optional If True, raise KeyError if any ranges contain no entries. Returns ------- loc : ndarray, bool Boolean array with location of entries found. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc = idx.locate_ranges(starts, stops, strict=False) >>> loc array([False, True, True, False, True]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35] """ loc, found = self.locate_intersection_ranges(starts, stops) if strict and np.any(~found): raise KeyError(starts[~found], stops[~found]) return loc
python
{ "resource": "" }
q1058
SortedIndex.intersect_ranges
train
def intersect_ranges(self, starts, stops): """Intersect with a set of ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. Returns ------- idx : SortedIndex Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> idx.intersect_ranges(starts, stops) <SortedIndex shape=(3,) dtype=int64> [6, 11, 35] """ loc = self.locate_ranges(starts, stops, strict=False) return self.compress(loc, axis=0)
python
{ "resource": "" }
q1059
VariantTable.set_index
train
def set_index(self, index): """Set or reset the index. Parameters ---------- index : string or pair of strings, optional Names of columns to use for positional index, e.g., 'POS' if table contains a 'POS' column and records from a single chromosome/contig, or ('CHROM', 'POS') if table contains records from multiple chromosomes/contigs. """ if index is None: pass elif isinstance(index, str): index = SortedIndex(self[index], copy=False) elif isinstance(index, (tuple, list)) and len(index) == 2: index = SortedMultiIndex(self[index[0]], self[index[1]], copy=False) else: raise ValueError('invalid index argument, expected string or ' 'pair of strings, found %s' % repr(index)) self.index = index
python
{ "resource": "" }
q1060
VariantTable.query_position
train
def query_position(self, chrom=None, position=None): """Query the table, returning row or rows matching the given genomic position. Parameters ---------- chrom : string, optional Chromosome/contig. position : int, optional Position (1-based). Returns ------- result : row or VariantTable """ if self.index is None: raise ValueError('no index has been set') if isinstance(self.index, SortedIndex): # ignore chrom loc = self.index.locate_key(position) else: loc = self.index.locate_key(chrom, position) return self[loc]
python
{ "resource": "" }
q1061
VariantTable.query_region
train
def query_region(self, chrom=None, start=None, stop=None): """Query the table, returning row or rows within the given genomic region. Parameters ---------- chrom : string, optional Chromosome/contig. start : int, optional Region start position (1-based). stop : int, optional Region stop position (1-based). Returns ------- result : VariantTable """ if self.index is None: raise ValueError('no index has been set') if isinstance(self.index, SortedIndex): # ignore chrom loc = self.index.locate_range(start, stop) else: loc = self.index.locate_range(chrom, start, stop) return self[loc]
python
{ "resource": "" }
q1062
FeatureTable.to_mask
train
def to_mask(self, size, start_name='start', stop_name='end'): """Construct a mask array where elements are True if the fall within features in the table. Parameters ---------- size : int Size of chromosome/contig. start_name : string, optional Name of column with start coordinates. stop_name : string, optional Name of column with stop coordinates. Returns ------- mask : ndarray, bool """ m = np.zeros(size, dtype=bool) for start, stop in self[[start_name, stop_name]]: m[start-1:stop] = True return m
python
{ "resource": "" }
q1063
FeatureTable.from_gff3
train
def from_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', dtype=None): """Read a feature table from a GFF3 format file. Parameters ---------- path : string File path. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. dtype : numpy dtype, optional Manually specify a dtype. Returns ------- ft : FeatureTable """ a = gff3_to_recarray(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, dtype=dtype) if a is None: return None else: return FeatureTable(a, copy=False)
python
{ "resource": "" }
q1064
pcoa
train
def pcoa(dist): """Perform principal coordinate analysis of a distance matrix, a.k.a. classical multi-dimensional scaling. Parameters ---------- dist : array_like Distance matrix in condensed form. Returns ------- coords : ndarray, shape (n_samples, n_dimensions) Transformed coordinates for the samples. explained_ratio : ndarray, shape (n_dimensions) Variance explained by each dimension. """ import scipy.linalg # This implementation is based on the skbio.math.stats.ordination.PCoA # implementation, with some minor adjustments. # check inputs dist = ensure_square(dist) # perform scaling e_matrix = (dist ** 2) / -2 row_means = np.mean(e_matrix, axis=1, keepdims=True) col_means = np.mean(e_matrix, axis=0, keepdims=True) matrix_mean = np.mean(e_matrix) f_matrix = e_matrix - row_means - col_means + matrix_mean eigvals, eigvecs = scipy.linalg.eigh(f_matrix) # deal with eigvals close to zero close_to_zero = np.isclose(eigvals, 0) eigvals[close_to_zero] = 0 # sort descending idxs = eigvals.argsort()[::-1] eigvals = eigvals[idxs] eigvecs = eigvecs[:, idxs] # keep only positive eigenvalues keep = eigvals >= 0 eigvecs = eigvecs[:, keep] eigvals = eigvals[keep] # compute coordinates coords = eigvecs * np.sqrt(eigvals) # compute ratio explained explained_ratio = eigvals / eigvals.sum() return coords, explained_ratio
python
{ "resource": "" }
q1065
condensed_coords
train
def condensed_coords(i, j, n): """Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns ------- ix : int """ # guard conditions if i == j or i >= n or j >= n or i < 0 or j < 0: raise ValueError('invalid coordinates: %s, %s' % (i, j)) # normalise order i, j = sorted([i, j]) # calculate number of items in rows before this one (sum of arithmetic # progression) x = i * ((2 * n) - i - 1) / 2 # add on previous items in current row ix = x + j - i - 1 return int(ix)
python
{ "resource": "" }
q1066
condensed_coords_within
train
def condensed_coords_within(pop, n): """Return indices into a condensed distance matrix for all pairwise comparisons within the given population. Parameters ---------- pop : array_like, int Indices of samples or haplotypes within the population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int """ return [condensed_coords(i, j, n) for i, j in itertools.combinations(sorted(pop), 2)]
python
{ "resource": "" }
q1067
condensed_coords_between
train
def condensed_coords_between(pop1, pop2, n): """Return indices into a condensed distance matrix for all pairwise comparisons between two populations. Parameters ---------- pop1 : array_like, int Indices of samples or haplotypes within the first population. pop2 : array_like, int Indices of samples or haplotypes within the second population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int """ return [condensed_coords(i, j, n) for i, j in itertools.product(sorted(pop1), sorted(pop2))]
python
{ "resource": "" }
q1068
plot_pairwise_distance
train
def plot_pairwise_distance(dist, labels=None, colorbar=True, ax=None, imshow_kwargs=None): """Plot a pairwise distance matrix. Parameters ---------- dist : array_like The distance matrix in condensed form. labels : sequence of strings, optional Sample labels for the axes. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn """ import matplotlib.pyplot as plt # check inputs dist_square = ensure_square(dist) # set up axes if ax is None: # make a square figure x = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout() # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('vmin', np.min(dist)) imshow_kwargs.setdefault('vmax', np.max(dist)) # plot as image im = ax.imshow(dist_square, **imshow_kwargs) # tidy up if labels: ax.set_xticks(range(len(labels))) ax.set_yticks(range(len(labels))) ax.set_xticklabels(labels, rotation=90) ax.set_yticklabels(labels, rotation=0) else: ax.set_xticks([]) ax.set_yticks([]) if colorbar: plt.gcf().colorbar(im, shrink=.5) return ax
python
{ "resource": "" }
q1069
jackknife
train
def jackknife(values, statistic): """Estimate standard error for `statistic` computed over `values` using the jackknife. Parameters ---------- values : array_like or tuple of array_like Input array, or tuple of input arrays. statistic : function The statistic to compute. Returns ------- m : float Mean of jackknife values. se : float Estimate of standard error. vj : ndarray Statistic values computed for each jackknife iteration. """ if isinstance(values, tuple): # multiple input arrays n = len(values[0]) masked_values = [np.ma.asarray(v) for v in values] for m in masked_values: assert m.ndim == 1, 'only 1D arrays supported' assert m.shape[0] == n, 'input arrays not of equal length' m.mask = np.zeros(m.shape, dtype=bool) else: n = len(values) masked_values = np.ma.asarray(values) assert masked_values.ndim == 1, 'only 1D arrays supported' masked_values.mask = np.zeros(masked_values.shape, dtype=bool) # values of the statistic calculated in each jackknife iteration vj = list() for i in range(n): if isinstance(values, tuple): # multiple input arrays for m in masked_values: m.mask[i] = True x = statistic(*masked_values) for m in masked_values: m.mask[i] = False else: masked_values.mask[i] = True x = statistic(masked_values) masked_values.mask[i] = False vj.append(x) # convert to array for convenience vj = np.array(vj) # compute mean of jackknife values m = vj.mean() # compute standard error sv = ((n - 1) / n) * np.sum((vj - m) ** 2) se = np.sqrt(sv) return m, se, vj
python
{ "resource": "" }
q1070
tabulate_state_transitions
train
def tabulate_state_transitions(x, states, pos=None): """Construct a dataframe where each row provides information about a state transition. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Notes ----- The resulting dataframe includes one row at the start representing the first state observation and one row at the end representing the last state observation. Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_transitions(x, states={1, 2}) >>> df lstate rstate lidx ridx 0 -1 1 -1 0 1 1 2 4 5 2 2 1 8 9 3 1 -1 10 -1 >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_transitions(x, states={1, 2}, pos=pos) >>> df lstate rstate lidx ridx lpos rpos 0 -1 1 -1 0 -1 2 1 1 2 4 5 10 14 2 2 1 8 9 28 30 3 1 -1 10 -1 31 -1 """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, _ = state_transitions(x, states) # start to build a dataframe items = [('lstate', transitions[:, 0]), ('rstate', transitions[:, 1]), ('lidx', switch_points[:, 0]), ('ridx', switch_points[:, 1])] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # find switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # add columns into dataframe items += [('lpos', switch_positions[:, 0]), ('rpos', switch_positions[:, 1])] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
python
{ "resource": "" }
q1071
tabulate_state_blocks
train
def tabulate_state_blocks(x, states, pos=None): """Construct a dataframe where each row provides information about continuous state blocks. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_blocks(x, states={1, 2}) >>> df state support start_lidx ... size_min size_max is_marginal 0 1 4 -1 ... 5 -1 True 1 2 3 4 ... 4 4 False 2 1 2 8 ... 2 -1 True [3 rows x 9 columns] >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_blocks(x, states={1, 2}, pos=pos) >>> df state support start_lidx ... stop_rpos length_min length_max 0 1 4 -1 ... 14 9 -1 1 2 3 4 ... 30 15 19 2 1 2 8 ... -1 2 -1 [3 rows x 15 columns] """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, observations = state_transitions(x, states) # setup some helpers t = transitions[1:, 0] o = observations[1:] s1 = switch_points[:-1] s2 = switch_points[1:] is_marginal = (s1[:, 0] < 0) | (s2[:, 1] < 0) size_min = s2[:, 0] - s1[:, 1] + 1 size_max = s2[:, 1] - s1[:, 0] - 1 size_max[is_marginal] = -1 # start to build a dataframe items = [ ('state', t), ('support', o), ('start_lidx', s1[:, 0]), ('start_ridx', s1[:, 1]), ('stop_lidx', s2[:, 0]), ('stop_ridx', s2[:, 1]), ('size_min', size_min), ('size_max', size_max), ('is_marginal', is_marginal) ] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # obtain switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # setup helpers p1 = switch_positions[:-1] p2 = switch_positions[1:] length_min = p2[:, 0] - p1[:, 1] + 1 length_max = p2[:, 1] - p1[:, 0] - 1 length_max[is_marginal] = -1 items += [ ('start_lpos', p1[:, 0]), ('start_rpos', p1[:, 1]), ('stop_lpos', p2[:, 0]), ('stop_rpos', p2[:, 1]), ('length_min', length_min), ('length_max', length_max), ] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
python
{ "resource": "" }
q1072
write_vcf
train
def write_vcf(path, callset, rename=None, number=None, description=None, fill=None, write_header=True): """Preliminary support for writing a VCF file. Currently does not support sample data. Needs further work.""" names, callset = normalize_callset(callset) with open(path, 'w') as vcf_file: if write_header: write_vcf_header(vcf_file, names, callset=callset, rename=rename, number=number, description=description) write_vcf_data(vcf_file, names, callset=callset, rename=rename, fill=fill)
python
{ "resource": "" }
q1073
asarray_ndim
train
def asarray_ndim(a, *ndims, **kwargs): """Ensure numpy array. Parameters ---------- a : array_like *ndims : int, optional Allowed values for number of dimensions. **kwargs Passed through to :func:`numpy.array`. Returns ------- a : numpy.ndarray """ allow_none = kwargs.pop('allow_none', False) kwargs.setdefault('copy', False) if a is None and allow_none: return None a = np.array(a, **kwargs) if a.ndim not in ndims: if len(ndims) > 1: expect_str = 'one of %s' % str(ndims) else: # noinspection PyUnresolvedReferences expect_str = '%s' % ndims[0] raise TypeError('bad number of dimensions: expected %s; found %s' % (expect_str, a.ndim)) return a
python
{ "resource": "" }
q1074
hdf5_cache
train
def hdf5_cache(filepath=None, parent=None, group=None, names=None, typed=False, hashed_key=False, **h5dcreate_kwargs): """HDF5 cache decorator. Parameters ---------- filepath : string, optional Path to HDF5 file. If None a temporary file name will be used. parent : string, optional Path to group within HDF5 file to use as parent. If None the root group will be used. group : string, optional Path to group within HDF5 file, relative to parent, to use as container for cached data. If None the name of the wrapped function will be used. names : sequence of strings, optional Name(s) of dataset(s). If None, default names will be 'f00', 'f01', etc. typed : bool, optional If True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. hashed_key : bool, optional If False (default) the key will not be hashed, which makes for readable cache group names. If True the key will be hashed, however note that on Python >= 3.3 the hash value will not be the same between sessions unless the environment variable PYTHONHASHSEED has been set to the same value. Returns ------- decorator : function Examples -------- Without any arguments, will cache using a temporary HDF5 file:: >>> import allel >>> @allel.util.hdf5_cache() ... def foo(n): ... print('executing foo') ... return np.arange(n) ... >>> foo(3) executing foo array([0, 1, 2]) >>> foo(3) array([0, 1, 2]) >>> foo.cache_filepath # doctest: +SKIP '/tmp/tmp_jwtwgjz' Supports multiple return values, including scalars, e.g.:: >>> @allel.util.hdf5_cache() ... def bar(n): ... print('executing bar') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> bar(3) executing bar (array([0, 1, 2]), array([0, 1, 4]), 9) >>> bar(3) (array([0, 1, 2]), array([0, 1, 4]), 9) Names can also be specified for the datasets, e.g.:: >>> @allel.util.hdf5_cache(names=['z', 'x', 'y']) ... def baz(n): ... print('executing baz') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> baz(3) executing baz (array([0, 1, 2]), array([0, 1, 4]), 9) >>> baz(3) (array([0, 1, 2]), array([0, 1, 4]), 9) """ # initialise HDF5 file path if filepath is None: import tempfile filepath = tempfile.mktemp(prefix='scikit_allel_', suffix='.h5') atexit.register(os.remove, filepath) # initialise defaults for dataset creation h5dcreate_kwargs.setdefault('chunks', True) def decorator(user_function): # setup the name for the cache container group if group is None: container = user_function.__name__ else: container = group def wrapper(*args, **kwargs): # load from cache or not no_cache = kwargs.pop('no_cache', False) # compute a key from the function arguments key = _make_key(args, kwargs, typed) if hashed_key: key = str(hash(key)) else: key = str(key).replace('/', '__slash__') return _hdf5_cache_act(filepath, parent, container, key, names, no_cache, user_function, args, kwargs, h5dcreate_kwargs) wrapper.cache_filepath = filepath return update_wrapper(wrapper, user_function) return decorator
python
{ "resource": "" }
q1075
pca
train
def pca(gn, n_components=10, copy=True, scaler='patterson', ploidy=2): """Perform principal components analysis of genotype data, via singular value decomposition. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypePCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. See Also -------- randomized_pca, allel.stats.ld.locate_unlinked """ # set up the model model = GenotypePCA(n_components, copy=copy, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model
python
{ "resource": "" }
q1076
randomized_pca
train
def randomized_pca(gn, n_components=10, copy=True, iterated_power=3, random_state=None, scaler='patterson', ploidy=2): """Perform principal components analysis of genotype data, via an approximate truncated singular value decomposition using randomization to speed up the computation. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. iterated_power : int, optional Number of iterations for the power method. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypeRandomizedPCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. Based on the :class:`sklearn.decomposition.RandomizedPCA` implementation. See Also -------- pca, allel.stats.ld.locate_unlinked """ # set up the model model = GenotypeRandomizedPCA(n_components, copy=copy, iterated_power=iterated_power, random_state=random_state, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model
python
{ "resource": "" }
q1077
get_chunks
train
def get_chunks(data, chunks=None): """Try to guess a reasonable chunk shape to use for block-wise algorithms operating over `data`.""" if chunks is None: if hasattr(data, 'chunklen') and hasattr(data, 'shape'): # bcolz carray, chunk first dimension only return (data.chunklen,) + data.shape[1:] elif hasattr(data, 'chunks') and hasattr(data, 'shape') and \ len(data.chunks) == len(data.shape): # h5py dataset or zarr array return data.chunks else: # fall back to something simple, ~4Mb chunks of first dimension row = np.asarray(data[0]) chunklen = max(1, (2**22) // row.nbytes) if row.shape: chunks = (chunklen,) + row.shape else: chunks = (chunklen,) return chunks else: return chunks
python
{ "resource": "" }
q1078
iter_gff3
train
def iter_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix'): """Iterate over records in a GFF3 file. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string Tabix command. Returns ------- Iterator """ # prepare fill values for attributes if attributes is not None: attributes = list(attributes) if isinstance(attributes_fill, (list, tuple)): if len(attributes) != len(attributes_fill): raise ValueError('number of fills does not match attributes') else: attributes_fill = [attributes_fill] * len(attributes) # open input stream if region is not None: cmd = [tabix, path, region] buffer = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout elif path.endswith('.gz') or path.endswith('.bgz'): buffer = gzip.open(path, mode='rb') else: buffer = open(path, mode='rb') try: for line in buffer: if line[0] == b'>': # assume begin embedded FASTA return if line[0] == b'#': # skip comment lines continue vals = line.split(b'\t') if len(vals) == 9: # unpack for processing fseqid, fsource, ftype, fstart, fend, fscore, fstrand, fphase, fattrs = vals # convert numerics fstart = int(fstart) fend = int(fend) if fscore == b'.': fscore = score_fill else: fscore = float(fscore) if fphase == b'.': fphase = phase_fill else: fphase = int(fphase) if not PY2: fseqid = str(fseqid, 'ascii') fsource = str(fsource, 'ascii') ftype = str(ftype, 'ascii') fstrand = str(fstrand, 'ascii') fattrs = str(fattrs, 'ascii') rec = (fseqid, fsource, ftype, fstart, fend, fscore, fstrand, fphase) if attributes is not None: dattrs = gff3_parse_attributes(fattrs) vattrs = tuple( dattrs.get(k, f) for k, f in zip(attributes, attributes_fill) ) rec += vattrs yield rec finally: buffer.close()
python
{ "resource": "" }
q1079
gff3_to_recarray
train
def gff3_to_recarray(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', dtype=None): """Load data from a GFF3 into a NumPy recarray. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. dtype : dtype, optional Override dtype. Returns ------- np.recarray """ # read records recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, tabix=tabix)) if not recs: return None # determine dtype if dtype is None: dtype = [('seqid', object), ('source', object), ('type', object), ('start', int), ('end', int), ('score', float), ('strand', object), ('phase', int)] if attributes: for n in attributes: dtype.append((n, object)) a = np.rec.fromrecords(recs, dtype=dtype) return a
python
{ "resource": "" }
q1080
gff3_to_dataframe
train
def gff3_to_dataframe(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', **kwargs): """Load data from a GFF3 into a pandas DataFrame. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. Returns ------- pandas.DataFrame """ import pandas # read records recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, tabix=tabix)) # load into pandas columns = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase'] if attributes: columns += list(attributes) df = pandas.DataFrame.from_records(recs, columns=columns, **kwargs) return df
python
{ "resource": "" }
q1081
voight_painting
train
def voight_painting(h): """Paint haplotypes, assigning a unique integer to each shared haplotype prefix. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- painting : ndarray, int, shape (n_variants, n_haplotypes) Painting array. indices : ndarray, int, shape (n_hapotypes,) Haplotype indices after sorting by prefix. """ # check inputs # N.B., ensure int8 so we can use cython optimisation h = HaplotypeArray(np.asarray(h), copy=False) if h.max() > 1: raise NotImplementedError('only biallelic variants are supported') if h.min() < 0: raise NotImplementedError('missing calls are not supported') # sort by prefix indices = h.prefix_argsort() h = np.take(h, indices, axis=1) # paint painting = paint_shared_prefixes(memoryview_safe(np.asarray(h))) return painting, indices
python
{ "resource": "" }
q1082
plot_voight_painting
train
def plot_voight_painting(painting, palette='colorblind', flank='right', ax=None, height_factor=0.01): """Plot a painting of shared haplotype prefixes. Parameters ---------- painting : array_like, int, shape (n_variants, n_haplotypes) Painting array. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. palette : string, optional A Seaborn palette name. flank : {'right', 'left'}, optional If left, painting will be reversed along first axis. height_factor : float, optional If no axes provided, determine height of figure by multiplying height of painting array by this number. Returns ------- ax : axes """ import seaborn as sns from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt if flank == 'left': painting = painting[::-1] n_colors = painting.max() palette = sns.color_palette(palette, n_colors) # use white for singleton haplotypes cmap = ListedColormap(['white'] + palette) # setup axes if ax is None: w = plt.rcParams['figure.figsize'][0] h = height_factor*painting.shape[1] fig, ax = plt.subplots(figsize=(w, h)) sns.despine(ax=ax, bottom=True, left=True) ax.pcolormesh(painting.T, cmap=cmap) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlim(0, painting.shape[0]) ax.set_ylim(0, painting.shape[1]) return ax
python
{ "resource": "" }
q1083
fig_voight_painting
train
def fig_voight_painting(h, index=None, palette='colorblind', height_factor=0.01, fig=None): """Make a figure of shared haplotype prefixes for both left and right flanks, centred on some variant of choice. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. index : int, optional Index of the variant within the haplotype array to centre on. If not provided, the middle variant will be used. palette : string, optional A Seaborn palette name. height_factor : float, optional If no axes provided, determine height of figure by multiplying height of painting array by this number. fig : figure The figure on which to draw. If not provided, a new figure will be created. Returns ------- fig : figure Notes ----- N.B., the ordering of haplotypes on the left and right flanks will be different. This means that haplotypes on the right flank **will not** correspond to haplotypes on the left flank at the same vertical position. """ import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec import seaborn as sns # check inputs h = asarray_ndim(h, 2) if index is None: # use midpoint index = h.shape[0] // 2 # divide data into two flanks hl = h[:index+1][::-1] hr = h[index:] # paint both flanks pl, il = voight_painting(hl) pr, ir = voight_painting(hr) # compute ehh decay for both flanks el = ehh_decay(hl, truncate=False) er = ehh_decay(hr, truncate=False) # setup figure # fixed height for EHH decay subplot h_ehh = plt.rcParams['figure.figsize'][1] // 3 # add height for paintings h_painting = height_factor*h.shape[1] if fig is None: w = plt.rcParams['figure.figsize'][0] h = h_ehh + h_painting fig = plt.figure(figsize=(w, h)) # setup gridspec gs = GridSpec(2, 2, width_ratios=[hl.shape[0], hr.shape[0]], height_ratios=[h_painting, h_ehh]) # plot paintings ax = fig.add_subplot(gs[0, 0]) sns.despine(ax=ax, left=True, bottom=True) plot_voight_painting(pl, palette=palette, flank='left', ax=ax) ax = fig.add_subplot(gs[0, 1]) sns.despine(ax=ax, left=True, bottom=True) plot_voight_painting(pr, palette=palette, flank='right', ax=ax) # plot ehh ax = fig.add_subplot(gs[1, 0]) sns.despine(ax=ax, offset=3) x = np.arange(el.shape[0]) y = el ax.fill_between(x, 0, y) ax.set_ylim(0, 1) ax.set_yticks([0, 1]) ax.set_ylabel('EHH') ax.invert_xaxis() ax = fig.add_subplot(gs[1, 1]) sns.despine(ax=ax, left=True, right=False, offset=3) ax.yaxis.tick_right() ax.set_ylim(0, 1) ax.set_yticks([0, 1]) x = np.arange(er.shape[0]) y = er ax.fill_between(x, 0, y) # tidy up fig.tight_layout() return fig
python
{ "resource": "" }
q1084
compute_ihh_gaps
train
def compute_ihh_gaps(pos, map_pos, gap_scale, max_gap, is_accessible): """Compute spacing between variants for integrating haplotype homozygosity. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions (physical distance). map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). gap_scale : int, optional Rescale distance between variants if gap is larger than this value. max_gap : int, optional Do not report scores if EHH spans a gap larger than this number of base pairs. is_accessible : array_like, bool, optional Genome accessibility array. If provided, distance between variants will be computed as the number of accessible bases between them. Returns ------- gaps : ndarray, float, shape (n_variants - 1,) """ # check inputs if map_pos is None: # integrate over physical distance map_pos = pos else: map_pos = asarray_ndim(map_pos, 1) check_dim0_aligned(pos, map_pos) # compute physical gaps physical_gaps = np.diff(pos) # compute genetic gaps gaps = np.diff(map_pos).astype('f8') if is_accessible is not None: # compute accessible gaps is_accessible = asarray_ndim(is_accessible, 1) assert is_accessible.shape[0] > pos[-1], \ 'accessibility array too short' accessible_gaps = np.zeros_like(physical_gaps) for i in range(1, len(pos)): # N.B., expect pos is 1-based n_access = np.count_nonzero(is_accessible[pos[i-1]-1:pos[i]-1]) accessible_gaps[i-1] = n_access # adjust using accessibility scaling = accessible_gaps / physical_gaps gaps = gaps * scaling elif gap_scale is not None and gap_scale > 0: scaling = np.ones(gaps.shape, dtype='f8') loc_scale = physical_gaps > gap_scale scaling[loc_scale] = gap_scale / physical_gaps[loc_scale] gaps = gaps * scaling if max_gap is not None and max_gap > 0: # deal with very large gaps gaps[physical_gaps > max_gap] = -1 return gaps
python
{ "resource": "" }
q1085
xpnsl
train
def xpnsl(h1, h2, use_threads=True): """Cross-population version of the NSL statistic. Parameters ---------- h1 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the first population. h2 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the second population. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Unstandardized XPNSL scores. """ # check inputs h1 = asarray_ndim(h1, 2) check_integer_dtype(h1) h2 = asarray_ndim(h2, 2) check_integer_dtype(h2) check_dim0_aligned(h1, h2) h1 = memoryview_safe(h1) h2 = memoryview_safe(h2) if use_threads and multiprocessing.cpu_count() > 1: # use multiple threads # setup threadpool pool = ThreadPool(min(4, multiprocessing.cpu_count())) # scan forward res1_fwd = pool.apply_async(nsl_scan, args=(h1,)) res2_fwd = pool.apply_async(nsl_scan, args=(h2,)) # scan backward res1_rev = pool.apply_async(nsl_scan, args=(h1[::-1],)) res2_rev = pool.apply_async(nsl_scan, args=(h2[::-1],)) # wait for both to finish pool.close() pool.join() # obtain results nsl1_fwd = res1_fwd.get() nsl2_fwd = res2_fwd.get() nsl1_rev = res1_rev.get() nsl2_rev = res2_rev.get() # cleanup pool.terminate() else: # compute without threads # scan forward nsl1_fwd = nsl_scan(h1) nsl2_fwd = nsl_scan(h2) # scan backward nsl1_rev = nsl_scan(h1[::-1]) nsl2_rev = nsl_scan(h2[::-1]) # handle reverse scans nsl1_rev = nsl1_rev[::-1] nsl2_rev = nsl2_rev[::-1] # compute unstandardized score nsl1 = nsl1_fwd + nsl1_rev nsl2 = nsl2_fwd + nsl2_rev score = np.log(nsl1 / nsl2) return score
python
{ "resource": "" }
q1086
haplotype_diversity
train
def haplotype_diversity(h): """Estimate haplotype diversity. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- hd : float Haplotype diversity. """ # check inputs h = HaplotypeArray(h, copy=False) # number of haplotypes n = h.n_haplotypes # compute haplotype frequencies f = h.distinct_frequencies() # estimate haplotype diversity hd = (1 - np.sum(f**2)) * n / (n - 1) return hd
python
{ "resource": "" }
q1087
moving_haplotype_diversity
train
def moving_haplotype_diversity(h, size, start=0, stop=None, step=None): """Estimate haplotype diversity in moving windows. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- hd : ndarray, float, shape (n_windows,) Haplotype diversity. """ hd = moving_statistic(values=h, statistic=haplotype_diversity, size=size, start=start, stop=stop, step=step) return hd
python
{ "resource": "" }
q1088
plot_haplotype_frequencies
train
def plot_haplotype_frequencies(h, palette='Paired', singleton_color='w', ax=None): """Plot haplotype frequencies. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. palette : string, optional A Seaborn palette name. singleton_color : string, optional Color to paint singleton haplotypes. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes """ import matplotlib.pyplot as plt import seaborn as sns # check inputs h = HaplotypeArray(h, copy=False) # setup figure if ax is None: width = plt.rcParams['figure.figsize'][0] height = width / 10 fig, ax = plt.subplots(figsize=(width, height)) sns.despine(ax=ax, left=True) # count distinct haplotypes hc = h.distinct_counts() # setup palette n_colors = np.count_nonzero(hc > 1) palette = sns.color_palette(palette, n_colors) # paint frequencies x1 = 0 for i, c in enumerate(hc): x2 = x1 + c if c > 1: color = palette[i] else: color = singleton_color ax.axvspan(x1, x2, color=color) x1 = x2 # tidy up ax.set_xlim(0, h.shape[1]) ax.set_yticks([]) return ax
python
{ "resource": "" }
q1089
moving_hfs_rank
train
def moving_hfs_rank(h, size, start=0, stop=None): """Helper function for plotting haplotype frequencies in moving windows. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. Returns ------- hr : ndarray, int, shape (n_windows, n_haplotypes) Haplotype rank array. """ # determine windows windows = np.asarray(list(index_windows(h, size=size, start=start, stop=stop, step=None))) # setup output hr = np.zeros((windows.shape[0], h.shape[1]), dtype='i4') # iterate over windows for i, (window_start, window_stop) in enumerate(windows): # extract haplotypes for the current window hw = h[window_start:window_stop] # count haplotypes hc = hw.distinct_counts() # ensure sorted descending hc.sort() hc = hc[::-1] # compute ranks for non-singleton haplotypes cp = 0 for j, c in enumerate(hc): if c > 1: hr[i, cp:cp+c] = j+1 cp += c return hr
python
{ "resource": "" }
q1090
plot_moving_haplotype_frequencies
train
def plot_moving_haplotype_frequencies(pos, h, size, start=0, stop=None, n=None, palette='Paired', singleton_color='w', ax=None): """Plot haplotype frequencies in moving windows over the genome. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. n : int, optional Color only the `n` most frequent haplotypes (by default, all non-singleton haplotypes are colored). palette : string, optional A Seaborn palette name. singleton_color : string, optional Color to paint singleton haplotypes. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes """ import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns # setup figure if ax is None: fig, ax = plt.subplots() # compute haplotype frequencies # N.B., here we use a haplotype rank data structure to enable the use of # pcolormesh() which is a lot faster than any other type of plotting # function hr = moving_hfs_rank(h, size=size, start=start, stop=stop) # truncate to n most common haplotypes if n: hr[hr > n] = 0 # compute window start and stop positions windows = moving_statistic(pos, statistic=lambda v: (v[0], v[-1]), size=size, start=start, stop=stop) # create color map colors = [singleton_color] + sns.color_palette(palette, n_colors=hr.max()) cmap = mpl.colors.ListedColormap(colors) # draw colors x = np.append(windows[:, 0], windows[-1, -1]) y = np.arange(h.shape[1]+1) ax.pcolormesh(x, y, hr.T, cmap=cmap) # tidy up ax.set_xlim(windows[0, 0], windows[-1, -1]) ax.set_ylim(0, h.shape[1]) ax.set_ylabel('haplotype count') ax.set_xlabel('position (bp)') return ax
python
{ "resource": "" }
q1091
moving_delta_tajima_d
train
def moving_delta_tajima_d(ac1, ac2, size, start=0, stop=None, step=None): """Compute the difference in Tajima's D between two populations in moving windows. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- delta_d : ndarray, float, shape (n_windows,) Standardized delta Tajima's D. See Also -------- allel.stats.diversity.moving_tajima_d """ d1 = moving_tajima_d(ac1, size=size, start=start, stop=stop, step=step) d2 = moving_tajima_d(ac2, size=size, start=start, stop=stop, step=step) delta = d1 - d2 delta_z = (delta - np.mean(delta)) / np.std(delta) return delta_z
python
{ "resource": "" }
q1092
make_similar_sized_bins
train
def make_similar_sized_bins(x, n): """Utility function to create a set of bins over the range of values in `x` such that each bin contains roughly the same number of values. Parameters ---------- x : array_like The values to be binned. n : int The number of bins to create. Returns ------- bins : ndarray An array of bin edges. Notes ----- The actual number of bins returned may be less than `n` if `x` contains integer values and any single value is represented more than len(x)//n times. """ # copy and sort the array y = np.array(x).flatten() y.sort() # setup bins bins = [y[0]] # determine step size step = len(y) // n # add bin edges for i in range(step, len(y), step): # get value at this index v = y[i] # only add bin edge if larger than previous if v > bins[-1]: bins.append(v) # fix last bin edge bins[-1] = y[-1] return np.array(bins)
python
{ "resource": "" }
q1093
standardize
train
def standardize(score): """Centre and scale to unit variance.""" score = asarray_ndim(score, 1) return (score - np.nanmean(score)) / np.nanstd(score)
python
{ "resource": "" }
q1094
standardize_by_allele_count
train
def standardize_by_allele_count(score, aac, bins=None, n_bins=None, diagnostics=True): """Standardize `score` within allele frequency bins. Parameters ---------- score : array_like, float The score to be standardized, e.g., IHS or NSL. aac : array_like, int An array of alternate allele counts. bins : array_like, int, optional Allele count bins, overrides `n_bins`. n_bins : int, optional Number of allele count bins to use. diagnostics : bool, optional If True, plot some diagnostic information about the standardization. Returns ------- score_standardized : ndarray, float Standardized scores. bins : ndarray, int Allele count bins used for standardization. """ from scipy.stats import binned_statistic # check inputs score = asarray_ndim(score, 1) aac = asarray_ndim(aac, 1) check_dim0_aligned(score, aac) # remove nans nonan = ~np.isnan(score) score_nonan = score[nonan] aac_nonan = aac[nonan] if bins is None: # make our own similar sized bins # how many bins to make? if n_bins is None: # something vaguely reasonable n_bins = np.max(aac) // 2 # make bins bins = make_similar_sized_bins(aac_nonan, n_bins) else: # user-provided bins bins = asarray_ndim(bins, 1) mean_score, _, _ = binned_statistic(aac_nonan, score_nonan, statistic=np.mean, bins=bins) std_score, _, _ = binned_statistic(aac_nonan, score_nonan, statistic=np.std, bins=bins) if diagnostics: import matplotlib.pyplot as plt x = (bins[:-1] + bins[1:]) / 2 plt.figure() plt.fill_between(x, mean_score - std_score, mean_score + std_score, alpha=.5, label='std') plt.plot(x, mean_score, marker='o', label='mean') plt.grid(axis='y') plt.xlabel('Alternate allele count') plt.ylabel('Unstandardized score') plt.title('Standardization diagnostics') plt.legend() # apply standardization score_standardized = np.empty_like(score) for i in range(len(bins) - 1): x1 = bins[i] x2 = bins[i + 1] if i == 0: # first bin loc = (aac < x2) elif i == len(bins) - 2: # last bin loc = (aac >= x1) else: # middle bins loc = (aac >= x1) & (aac < x2) m = mean_score[i] s = std_score[i] score_standardized[loc] = (score[loc] - m) / s return score_standardized, bins
python
{ "resource": "" }
q1095
moving_statistic
train
def moving_statistic(values, statistic, size, start=0, stop=None, step=None, **kwargs): """Calculate a statistic in a moving window over `values`. Parameters ---------- values : array_like The data to summarise. statistic : function The statistic to compute within each window. size : int The window size (number of values). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. kwargs Additional keyword arguments are passed through to the `statistic` function. Returns ------- out : ndarray, shape (n_windows,) Examples -------- >>> import allel >>> values = [2, 5, 8, 16] >>> allel.moving_statistic(values, np.sum, size=2) array([ 7, 24]) >>> allel.moving_statistic(values, np.sum, size=2, step=1) array([ 7, 13, 24]) """ windows = index_windows(values, size, start, stop, step) # setup output out = np.array([statistic(values[i:j], **kwargs) for i, j in windows]) return out
python
{ "resource": "" }
q1096
window_locations
train
def window_locations(pos, windows): """Locate indices in `pos` corresponding to the start and stop positions of `windows`. """ start_locs = np.searchsorted(pos, windows[:, 0]) stop_locs = np.searchsorted(pos, windows[:, 1], side='right') locs = np.column_stack((start_locs, stop_locs)) return locs
python
{ "resource": "" }
q1097
per_base
train
def per_base(x, windows, is_accessible=None, fill=np.nan): """Calculate the per-base value of a windowed statistic. Parameters ---------- x : array_like, shape (n_windows,) The statistic to average per-base. windows : array_like, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions using 1-based coordinates. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional Use this value where there are no accessible bases in a window. Returns ------- y : ndarray, float, shape (n_windows,) The input array divided by the number of (accessible) bases in each window. n_bases : ndarray, int, shape (n_windows,) The number of (accessible) bases in each window """ # calculate window sizes if is_accessible is None: # N.B., window stops are included n_bases = np.diff(windows, axis=1).reshape(-1) + 1 else: n_bases = np.array([np.count_nonzero(is_accessible[i-1:j]) for i, j in windows]) # deal with multidimensional x if x.ndim == 1: pass elif x.ndim == 2: n_bases = n_bases[:, None] else: raise NotImplementedError('only arrays of 1 or 2 dimensions supported') # calculate density per-base with ignore_invalid(): y = np.where(n_bases > 0, x / n_bases, fill) # restore to 1-dimensional if n_bases.ndim > 1: n_bases = n_bases.reshape(-1) return y, n_bases
python
{ "resource": "" }
q1098
equally_accessible_windows
train
def equally_accessible_windows(is_accessible, size, start=0, stop=None, step=None): """Create windows each containing the same number of accessible bases. Parameters ---------- is_accessible : array_like, bool, shape (n_bases,) Array defining accessible status of all bases on a contig/chromosome. size : int Window size (number of accessible bases). start : int, optional The genome position at which to start. stop : int, optional The genome position at which to stop. step : int, optional The number of accessible sites between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Use half the window size to get half-overlapping windows. Returns ------- windows : ndarray, int, shape (n_windows, 2) Window start/stop positions (1-based). """ pos_accessible, = np.nonzero(is_accessible) pos_accessible += 1 # convert to 1-based coordinates # N.B., need some care in handling start and stop positions, these are # genomic positions at which to start and stop the windows if start: pos_accessible = pos_accessible[pos_accessible >= start] if stop: pos_accessible = pos_accessible[pos_accessible <= stop] # now construct moving windows windows = moving_statistic(pos_accessible, lambda v: [v[0], v[-1]], size=size, step=step) return windows
python
{ "resource": "" }
q1099
attachment_form
train
def attachment_form(context, obj): """ Renders a "upload attachment" form. The user must own ``attachments.add_attachment permission`` to add attachments. """ if context['user'].has_perm('attachments.add_attachment'): return { 'form': AttachmentForm(), 'form_url': add_url_for_obj(obj), 'next': context.request.build_absolute_uri(), } else: return {'form': None}
python
{ "resource": "" }