repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
cggh/scikit-allel
allel/io/vcf_read.py
vcf_to_csv
def vcf_to_csv(input, output, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None, **kwargs): r"""Read data from a VCF file and write out to a comma-separated values (CSV) file. Parameters ---------- input : string {input} output : string {output} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} kwargs : keyword arguments All remaining keyword arguments are passed through to pandas.DataFrame.to_csv(). E.g., to write a tab-delimited file, provide `sep='\t'`. """ # samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_csv]') kwargs['index'] = False for i, (chunk, _, _, _) in enumerate(it): df = _chunk_to_dataframe(fields, chunk) if i == 0: kwargs['header'] = True kwargs['mode'] = 'w' else: kwargs['header'] = False kwargs['mode'] = 'a' df.to_csv(output, **kwargs)
python
def vcf_to_csv(input, output, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None, **kwargs): r"""Read data from a VCF file and write out to a comma-separated values (CSV) file. Parameters ---------- input : string {input} output : string {output} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} kwargs : keyword arguments All remaining keyword arguments are passed through to pandas.DataFrame.to_csv(). E.g., to write a tab-delimited file, provide `sep='\t'`. """ # samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_csv]') kwargs['index'] = False for i, (chunk, _, _, _) in enumerate(it): df = _chunk_to_dataframe(fields, chunk) if i == 0: kwargs['header'] = True kwargs['mode'] = 'w' else: kwargs['header'] = False kwargs['mode'] = 'a' df.to_csv(output, **kwargs)
r"""Read data from a VCF file and write out to a comma-separated values (CSV) file. Parameters ---------- input : string {input} output : string {output} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} kwargs : keyword arguments All remaining keyword arguments are passed through to pandas.DataFrame.to_csv(). E.g., to write a tab-delimited file, provide `sep='\t'`.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L1902-L1979
cggh/scikit-allel
allel/io/vcf_read.py
vcf_to_recarray
def vcf_to_recarray(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): """Read data from a VCF file into a NumPy recarray. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- ra : np.rec.array """ # samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup chunk iterator # N.B., set samples to empty list so we don't get any calldata fields fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_recarray]') # read all chunks into a list chunks = [d[0] for d in it] # setup output output = None if chunks: # concatenate chunks output = np.concatenate([_chunk_to_recarray(fields, chunk) for chunk in chunks]) return output
python
def vcf_to_recarray(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): """Read data from a VCF file into a NumPy recarray. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- ra : np.rec.array """ # samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup chunk iterator # N.B., set samples to empty list so we don't get any calldata fields fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_recarray]') # read all chunks into a list chunks = [d[0] for d in it] # setup output output = None if chunks: # concatenate chunks output = np.concatenate([_chunk_to_recarray(fields, chunk) for chunk in chunks]) return output
Read data from a VCF file into a NumPy recarray. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- ra : np.rec.array
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L2020-L2098
cggh/scikit-allel
allel/io/fasta.py
write_fasta
def write_fasta(path, sequences, names, mode='w', width=80): """Write nucleotide sequences stored as numpy arrays to a FASTA file. Parameters ---------- path : string File path. sequences : sequence of arrays One or more ndarrays of dtype 'S1' containing the sequences. names : sequence of strings Names of the sequences. mode : string, optional Use 'a' to append to an existing file. width : int, optional Maximum line width. """ # check inputs if isinstance(sequences, np.ndarray): # single sequence sequences = [sequences] names = [names] if len(sequences) != len(names): raise ValueError('must provide the same number of sequences and names') for sequence in sequences: if sequence.dtype != np.dtype('S1'): raise ValueError('expected S1 dtype, found %r' % sequence.dtype) # force binary mode mode = 'ab' if 'a' in mode else 'wb' # write to file with open(path, mode=mode) as fasta: for name, sequence in zip(names, sequences): # force bytes if isinstance(name, text_type): name = name.encode('ascii') header = b'>' + name + b'\n' fasta.write(header) for i in range(0, sequence.size, width): line = sequence[i:i+width].tostring() + b'\n' fasta.write(line)
python
def write_fasta(path, sequences, names, mode='w', width=80): """Write nucleotide sequences stored as numpy arrays to a FASTA file. Parameters ---------- path : string File path. sequences : sequence of arrays One or more ndarrays of dtype 'S1' containing the sequences. names : sequence of strings Names of the sequences. mode : string, optional Use 'a' to append to an existing file. width : int, optional Maximum line width. """ # check inputs if isinstance(sequences, np.ndarray): # single sequence sequences = [sequences] names = [names] if len(sequences) != len(names): raise ValueError('must provide the same number of sequences and names') for sequence in sequences: if sequence.dtype != np.dtype('S1'): raise ValueError('expected S1 dtype, found %r' % sequence.dtype) # force binary mode mode = 'ab' if 'a' in mode else 'wb' # write to file with open(path, mode=mode) as fasta: for name, sequence in zip(names, sequences): # force bytes if isinstance(name, text_type): name = name.encode('ascii') header = b'>' + name + b'\n' fasta.write(header) for i in range(0, sequence.size, width): line = sequence[i:i+width].tostring() + b'\n' fasta.write(line)
Write nucleotide sequences stored as numpy arrays to a FASTA file. Parameters ---------- path : string File path. sequences : sequence of arrays One or more ndarrays of dtype 'S1' containing the sequences. names : sequence of strings Names of the sequences. mode : string, optional Use 'a' to append to an existing file. width : int, optional Maximum line width.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/fasta.py#L11-L54
cggh/scikit-allel
allel/stats/hw.py
heterozygosity_observed
def heterozygosity_observed(g, fill=np.nan): """Calculate the rate of observed heterozygosity for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where all calls are missing. Returns ------- ho : ndarray, float, shape (n_variants,) Observed heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.heterozygosity_observed(g) array([0. , 0.33333333, 0. , 0.5 ]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # count hets n_het = np.asarray(g.count_het(axis=1)) n_called = np.asarray(g.count_called(axis=1)) # calculate rate of observed heterozygosity, accounting for variants # where all calls are missing with ignore_invalid(): ho = np.where(n_called > 0, n_het / n_called, fill) return ho
python
def heterozygosity_observed(g, fill=np.nan): """Calculate the rate of observed heterozygosity for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where all calls are missing. Returns ------- ho : ndarray, float, shape (n_variants,) Observed heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.heterozygosity_observed(g) array([0. , 0.33333333, 0. , 0.5 ]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # count hets n_het = np.asarray(g.count_het(axis=1)) n_called = np.asarray(g.count_called(axis=1)) # calculate rate of observed heterozygosity, accounting for variants # where all calls are missing with ignore_invalid(): ho = np.where(n_called > 0, n_het / n_called, fill) return ho
Calculate the rate of observed heterozygosity for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where all calls are missing. Returns ------- ho : ndarray, float, shape (n_variants,) Observed heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.heterozygosity_observed(g) array([0. , 0.33333333, 0. , 0.5 ])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/hw.py#L12-L55
cggh/scikit-allel
allel/stats/hw.py
heterozygosity_expected
def heterozygosity_expected(af, ploidy, fill=np.nan): """Calculate the expected rate of heterozygosity for each variant under Hardy-Weinberg equilibrium. Parameters ---------- af : array_like, float, shape (n_variants, n_alleles) Allele frequencies array. ploidy : int Sample ploidy. fill : float, optional Use this value for variants where allele frequencies do not sum to 1. Returns ------- he : ndarray, float, shape (n_variants,) Expected heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> af = g.count_alleles().to_frequencies() >>> allel.heterozygosity_expected(af, ploidy=2) array([0. , 0.5 , 0.66666667, 0.375 ]) """ # check inputs af = asarray_ndim(af, 2) # calculate expected heterozygosity out = 1 - np.sum(np.power(af, ploidy), axis=1) # fill values where allele frequencies could not be calculated af_sum = np.sum(af, axis=1) with ignore_invalid(): out[(af_sum < 1) | np.isnan(af_sum)] = fill return out
python
def heterozygosity_expected(af, ploidy, fill=np.nan): """Calculate the expected rate of heterozygosity for each variant under Hardy-Weinberg equilibrium. Parameters ---------- af : array_like, float, shape (n_variants, n_alleles) Allele frequencies array. ploidy : int Sample ploidy. fill : float, optional Use this value for variants where allele frequencies do not sum to 1. Returns ------- he : ndarray, float, shape (n_variants,) Expected heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> af = g.count_alleles().to_frequencies() >>> allel.heterozygosity_expected(af, ploidy=2) array([0. , 0.5 , 0.66666667, 0.375 ]) """ # check inputs af = asarray_ndim(af, 2) # calculate expected heterozygosity out = 1 - np.sum(np.power(af, ploidy), axis=1) # fill values where allele frequencies could not be calculated af_sum = np.sum(af, axis=1) with ignore_invalid(): out[(af_sum < 1) | np.isnan(af_sum)] = fill return out
Calculate the expected rate of heterozygosity for each variant under Hardy-Weinberg equilibrium. Parameters ---------- af : array_like, float, shape (n_variants, n_alleles) Allele frequencies array. ploidy : int Sample ploidy. fill : float, optional Use this value for variants where allele frequencies do not sum to 1. Returns ------- he : ndarray, float, shape (n_variants,) Expected heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> af = g.count_alleles().to_frequencies() >>> allel.heterozygosity_expected(af, ploidy=2) array([0. , 0.5 , 0.66666667, 0.375 ])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/hw.py#L58-L103
cggh/scikit-allel
allel/stats/hw.py
inbreeding_coefficient
def inbreeding_coefficient(g, fill=np.nan): """Calculate the inbreeding coefficient for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where the expected heterozygosity is zero. Returns ------- f : ndarray, float, shape (n_variants,) Inbreeding coefficient. Notes ----- The inbreeding coefficient is calculated as *1 - (Ho/He)* where *Ho* is the observed heterozygosity and *He* is the expected heterozygosity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.inbreeding_coefficient(g) array([ nan, 0.33333333, 1. , -0.33333333]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # calculate observed and expected heterozygosity ho = heterozygosity_observed(g) af = g.count_alleles().to_frequencies() he = heterozygosity_expected(af, ploidy=g.shape[-1], fill=0) # calculate inbreeding coefficient, accounting for variants with no # expected heterozygosity with ignore_invalid(): f = np.where(he > 0, 1 - (ho / he), fill) return f
python
def inbreeding_coefficient(g, fill=np.nan): """Calculate the inbreeding coefficient for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where the expected heterozygosity is zero. Returns ------- f : ndarray, float, shape (n_variants,) Inbreeding coefficient. Notes ----- The inbreeding coefficient is calculated as *1 - (Ho/He)* where *Ho* is the observed heterozygosity and *He* is the expected heterozygosity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.inbreeding_coefficient(g) array([ nan, 0.33333333, 1. , -0.33333333]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # calculate observed and expected heterozygosity ho = heterozygosity_observed(g) af = g.count_alleles().to_frequencies() he = heterozygosity_expected(af, ploidy=g.shape[-1], fill=0) # calculate inbreeding coefficient, accounting for variants with no # expected heterozygosity with ignore_invalid(): f = np.where(he > 0, 1 - (ho / he), fill) return f
Calculate the inbreeding coefficient for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where the expected heterozygosity is zero. Returns ------- f : ndarray, float, shape (n_variants,) Inbreeding coefficient. Notes ----- The inbreeding coefficient is calculated as *1 - (Ho/He)* where *Ho* is the observed heterozygosity and *He* is the expected heterozygosity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.inbreeding_coefficient(g) array([ nan, 0.33333333, 1. , -0.33333333])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/hw.py#L106-L157
cggh/scikit-allel
allel/stats/mendel.py
mendel_errors
def mendel_errors(parent_genotypes, progeny_genotypes): """Locate genotype calls not consistent with Mendelian transmission of alleles. Parameters ---------- parent_genotypes : array_like, int, shape (n_variants, 2, 2) Genotype calls for the two parents. progeny_genotypes : array_like, int, shape (n_variants, n_progeny, 2) Genotype calls for the progeny. Returns ------- me : ndarray, int, shape (n_variants, n_progeny) Count of Mendel errors for each progeny genotype call. Examples -------- The following are all consistent with Mendelian transmission. Note that a value of 0 is returned for missing calls:: >>> import allel >>> import numpy as np >>> genotypes = np.array([ ... # aa x aa -> aa ... [[0, 0], [0, 0], [0, 0], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [1, 1], [1, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[2, 2], [2, 2], [2, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x ab -> aa or ab ... [[0, 0], [0, 1], [0, 0], [0, 1], [-1, -1], [-1, -1]], ... [[0, 0], [0, 2], [0, 0], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 1], [1, 1], [0, 1], [-1, -1], [-1, -1]], ... # aa x bb -> ab ... [[0, 0], [1, 1], [0, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[0, 0], [2, 2], [0, 2], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [2, 2], [1, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x bc -> ab or ac ... [[0, 0], [1, 2], [0, 1], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 2], [0, 1], [1, 2], [-1, -1], [-1, -1]], ... # ab x ab -> aa or ab or bb ... [[0, 1], [0, 1], [0, 0], [0, 1], [1, 1], [-1, -1]], ... [[1, 2], [1, 2], [1, 1], [1, 2], [2, 2], [-1, -1]], ... [[0, 2], [0, 2], [0, 0], [0, 2], [2, 2], [-1, -1]], ... # ab x bc -> ab or ac or bb or bc ... [[0, 1], [1, 2], [0, 1], [0, 2], [1, 1], [1, 2]], ... [[0, 1], [0, 2], [0, 0], [0, 1], [0, 1], [1, 2]], ... # ab x cd -> ac or ad or bc or bd ... [[0, 1], [2, 3], [0, 2], [0, 3], [1, 2], [1, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) The following are cases of 'non-parental' inheritance where one or two alleles are found in the progeny that are not present in either parent. Note that the number of errors may be 1 or 2 depending on the number of non-parental alleles:: >>> genotypes = np.array([ ... # aa x aa -> ab or ac or bb or cc ... [[0, 0], [0, 0], [0, 1], [0, 2], [1, 1], [2, 2]], ... [[1, 1], [1, 1], [0, 1], [1, 2], [0, 0], [2, 2]], ... [[2, 2], [2, 2], [0, 2], [1, 2], [0, 0], [1, 1]], ... # aa x ab -> ac or bc or cc ... [[0, 0], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [0, 1], [1, 2], [0, 2], [2, 2], [2, 2]], ... # aa x bb -> ac or bc or cc ... [[0, 0], [1, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [2, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [2, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x ab -> ac or bc or cc ... [[0, 1], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 2], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 2], [1, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x bc -> ad or bd or cd or dd ... [[0, 1], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 1], [0, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 2], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... # ab x cd -> ae or be or ce or de ... [[0, 1], [2, 3], [0, 4], [1, 4], [2, 4], [3, 4]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 1]]) The following are cases of 'hemi-parental' inheritance, where progeny appear to have inherited two copies of an allele found only once in one of the parents:: >>> genotypes = np.array([ ... # aa x ab -> bb ... [[0, 0], [0, 1], [1, 1], [-1, -1]], ... [[0, 0], [0, 2], [2, 2], [-1, -1]], ... [[1, 1], [0, 1], [0, 0], [-1, -1]], ... # ab x bc -> aa or cc ... [[0, 1], [1, 2], [0, 0], [2, 2]], ... [[0, 1], [0, 2], [1, 1], [2, 2]], ... [[0, 2], [1, 2], [0, 0], [1, 1]], ... # ab x cd -> aa or bb or cc or dd ... [[0, 1], [2, 3], [0, 0], [1, 1]], ... [[0, 1], [2, 3], [2, 2], [3, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 0], [1, 0], [1, 0], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]) The following are cases of 'uni-parental' inheritance, where progeny appear to have inherited both alleles from a single parent:: >>> genotypes = np.array([ ... # aa x bb -> aa or bb ... [[0, 0], [1, 1], [0, 0], [1, 1]], ... [[0, 0], [2, 2], [0, 0], [2, 2]], ... [[1, 1], [2, 2], [1, 1], [2, 2]], ... # aa x bc -> aa or bc ... [[0, 0], [1, 2], [0, 0], [1, 2]], ... [[1, 1], [0, 2], [1, 1], [0, 2]], ... # ab x cd -> ab or cd ... [[0, 1], [2, 3], [0, 1], [2, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]) """ # setup parent_genotypes = GenotypeArray(parent_genotypes) progeny_genotypes = GenotypeArray(progeny_genotypes) check_ploidy(parent_genotypes.ploidy, 2) check_ploidy(progeny_genotypes.ploidy, 2) # transform into per-call allele counts max_allele = max(parent_genotypes.max(), progeny_genotypes.max()) parent_gc = parent_genotypes.to_allele_counts(max_allele=max_allele, dtype='i1') progeny_gc = progeny_genotypes.to_allele_counts(max_allele=max_allele, dtype='i1') # detect nonparental and hemiparental inheritance by comparing allele # counts between parents and progeny max_progeny_gc = parent_gc.clip(max=1).sum(axis=1) max_progeny_gc = max_progeny_gc[:, np.newaxis, :] me = (progeny_gc - max_progeny_gc).clip(min=0).sum(axis=2) # detect uniparental inheritance by finding cases where no alleles are # shared between parents, then comparing progeny allele counts to each # parent p1_gc = parent_gc[:, 0, np.newaxis, :] p2_gc = parent_gc[:, 1, np.newaxis, :] # find variants where parents don't share any alleles is_shared_allele = (p1_gc > 0) & (p2_gc > 0) no_shared_alleles = ~np.any(is_shared_allele, axis=2) # find calls where progeny genotype is identical to one or the other parent me[no_shared_alleles & (np.all(progeny_gc == p1_gc, axis=2) | np.all(progeny_gc == p2_gc, axis=2))] = 1 # retrofit where either or both parent has a missing call me[np.any(parent_genotypes.is_missing(), axis=1)] = 0 return me
python
def mendel_errors(parent_genotypes, progeny_genotypes): """Locate genotype calls not consistent with Mendelian transmission of alleles. Parameters ---------- parent_genotypes : array_like, int, shape (n_variants, 2, 2) Genotype calls for the two parents. progeny_genotypes : array_like, int, shape (n_variants, n_progeny, 2) Genotype calls for the progeny. Returns ------- me : ndarray, int, shape (n_variants, n_progeny) Count of Mendel errors for each progeny genotype call. Examples -------- The following are all consistent with Mendelian transmission. Note that a value of 0 is returned for missing calls:: >>> import allel >>> import numpy as np >>> genotypes = np.array([ ... # aa x aa -> aa ... [[0, 0], [0, 0], [0, 0], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [1, 1], [1, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[2, 2], [2, 2], [2, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x ab -> aa or ab ... [[0, 0], [0, 1], [0, 0], [0, 1], [-1, -1], [-1, -1]], ... [[0, 0], [0, 2], [0, 0], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 1], [1, 1], [0, 1], [-1, -1], [-1, -1]], ... # aa x bb -> ab ... [[0, 0], [1, 1], [0, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[0, 0], [2, 2], [0, 2], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [2, 2], [1, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x bc -> ab or ac ... [[0, 0], [1, 2], [0, 1], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 2], [0, 1], [1, 2], [-1, -1], [-1, -1]], ... # ab x ab -> aa or ab or bb ... [[0, 1], [0, 1], [0, 0], [0, 1], [1, 1], [-1, -1]], ... [[1, 2], [1, 2], [1, 1], [1, 2], [2, 2], [-1, -1]], ... [[0, 2], [0, 2], [0, 0], [0, 2], [2, 2], [-1, -1]], ... # ab x bc -> ab or ac or bb or bc ... [[0, 1], [1, 2], [0, 1], [0, 2], [1, 1], [1, 2]], ... [[0, 1], [0, 2], [0, 0], [0, 1], [0, 1], [1, 2]], ... # ab x cd -> ac or ad or bc or bd ... [[0, 1], [2, 3], [0, 2], [0, 3], [1, 2], [1, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) The following are cases of 'non-parental' inheritance where one or two alleles are found in the progeny that are not present in either parent. Note that the number of errors may be 1 or 2 depending on the number of non-parental alleles:: >>> genotypes = np.array([ ... # aa x aa -> ab or ac or bb or cc ... [[0, 0], [0, 0], [0, 1], [0, 2], [1, 1], [2, 2]], ... [[1, 1], [1, 1], [0, 1], [1, 2], [0, 0], [2, 2]], ... [[2, 2], [2, 2], [0, 2], [1, 2], [0, 0], [1, 1]], ... # aa x ab -> ac or bc or cc ... [[0, 0], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [0, 1], [1, 2], [0, 2], [2, 2], [2, 2]], ... # aa x bb -> ac or bc or cc ... [[0, 0], [1, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [2, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [2, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x ab -> ac or bc or cc ... [[0, 1], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 2], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 2], [1, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x bc -> ad or bd or cd or dd ... [[0, 1], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 1], [0, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 2], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... # ab x cd -> ae or be or ce or de ... [[0, 1], [2, 3], [0, 4], [1, 4], [2, 4], [3, 4]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 1]]) The following are cases of 'hemi-parental' inheritance, where progeny appear to have inherited two copies of an allele found only once in one of the parents:: >>> genotypes = np.array([ ... # aa x ab -> bb ... [[0, 0], [0, 1], [1, 1], [-1, -1]], ... [[0, 0], [0, 2], [2, 2], [-1, -1]], ... [[1, 1], [0, 1], [0, 0], [-1, -1]], ... # ab x bc -> aa or cc ... [[0, 1], [1, 2], [0, 0], [2, 2]], ... [[0, 1], [0, 2], [1, 1], [2, 2]], ... [[0, 2], [1, 2], [0, 0], [1, 1]], ... # ab x cd -> aa or bb or cc or dd ... [[0, 1], [2, 3], [0, 0], [1, 1]], ... [[0, 1], [2, 3], [2, 2], [3, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 0], [1, 0], [1, 0], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]) The following are cases of 'uni-parental' inheritance, where progeny appear to have inherited both alleles from a single parent:: >>> genotypes = np.array([ ... # aa x bb -> aa or bb ... [[0, 0], [1, 1], [0, 0], [1, 1]], ... [[0, 0], [2, 2], [0, 0], [2, 2]], ... [[1, 1], [2, 2], [1, 1], [2, 2]], ... # aa x bc -> aa or bc ... [[0, 0], [1, 2], [0, 0], [1, 2]], ... [[1, 1], [0, 2], [1, 1], [0, 2]], ... # ab x cd -> ab or cd ... [[0, 1], [2, 3], [0, 1], [2, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]) """ # setup parent_genotypes = GenotypeArray(parent_genotypes) progeny_genotypes = GenotypeArray(progeny_genotypes) check_ploidy(parent_genotypes.ploidy, 2) check_ploidy(progeny_genotypes.ploidy, 2) # transform into per-call allele counts max_allele = max(parent_genotypes.max(), progeny_genotypes.max()) parent_gc = parent_genotypes.to_allele_counts(max_allele=max_allele, dtype='i1') progeny_gc = progeny_genotypes.to_allele_counts(max_allele=max_allele, dtype='i1') # detect nonparental and hemiparental inheritance by comparing allele # counts between parents and progeny max_progeny_gc = parent_gc.clip(max=1).sum(axis=1) max_progeny_gc = max_progeny_gc[:, np.newaxis, :] me = (progeny_gc - max_progeny_gc).clip(min=0).sum(axis=2) # detect uniparental inheritance by finding cases where no alleles are # shared between parents, then comparing progeny allele counts to each # parent p1_gc = parent_gc[:, 0, np.newaxis, :] p2_gc = parent_gc[:, 1, np.newaxis, :] # find variants where parents don't share any alleles is_shared_allele = (p1_gc > 0) & (p2_gc > 0) no_shared_alleles = ~np.any(is_shared_allele, axis=2) # find calls where progeny genotype is identical to one or the other parent me[no_shared_alleles & (np.all(progeny_gc == p1_gc, axis=2) | np.all(progeny_gc == p2_gc, axis=2))] = 1 # retrofit where either or both parent has a missing call me[np.any(parent_genotypes.is_missing(), axis=1)] = 0 return me
Locate genotype calls not consistent with Mendelian transmission of alleles. Parameters ---------- parent_genotypes : array_like, int, shape (n_variants, 2, 2) Genotype calls for the two parents. progeny_genotypes : array_like, int, shape (n_variants, n_progeny, 2) Genotype calls for the progeny. Returns ------- me : ndarray, int, shape (n_variants, n_progeny) Count of Mendel errors for each progeny genotype call. Examples -------- The following are all consistent with Mendelian transmission. Note that a value of 0 is returned for missing calls:: >>> import allel >>> import numpy as np >>> genotypes = np.array([ ... # aa x aa -> aa ... [[0, 0], [0, 0], [0, 0], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [1, 1], [1, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[2, 2], [2, 2], [2, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x ab -> aa or ab ... [[0, 0], [0, 1], [0, 0], [0, 1], [-1, -1], [-1, -1]], ... [[0, 0], [0, 2], [0, 0], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 1], [1, 1], [0, 1], [-1, -1], [-1, -1]], ... # aa x bb -> ab ... [[0, 0], [1, 1], [0, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[0, 0], [2, 2], [0, 2], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [2, 2], [1, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x bc -> ab or ac ... [[0, 0], [1, 2], [0, 1], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 2], [0, 1], [1, 2], [-1, -1], [-1, -1]], ... # ab x ab -> aa or ab or bb ... [[0, 1], [0, 1], [0, 0], [0, 1], [1, 1], [-1, -1]], ... [[1, 2], [1, 2], [1, 1], [1, 2], [2, 2], [-1, -1]], ... [[0, 2], [0, 2], [0, 0], [0, 2], [2, 2], [-1, -1]], ... # ab x bc -> ab or ac or bb or bc ... [[0, 1], [1, 2], [0, 1], [0, 2], [1, 1], [1, 2]], ... [[0, 1], [0, 2], [0, 0], [0, 1], [0, 1], [1, 2]], ... # ab x cd -> ac or ad or bc or bd ... [[0, 1], [2, 3], [0, 2], [0, 3], [1, 2], [1, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) The following are cases of 'non-parental' inheritance where one or two alleles are found in the progeny that are not present in either parent. Note that the number of errors may be 1 or 2 depending on the number of non-parental alleles:: >>> genotypes = np.array([ ... # aa x aa -> ab or ac or bb or cc ... [[0, 0], [0, 0], [0, 1], [0, 2], [1, 1], [2, 2]], ... [[1, 1], [1, 1], [0, 1], [1, 2], [0, 0], [2, 2]], ... [[2, 2], [2, 2], [0, 2], [1, 2], [0, 0], [1, 1]], ... # aa x ab -> ac or bc or cc ... [[0, 0], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [0, 1], [1, 2], [0, 2], [2, 2], [2, 2]], ... # aa x bb -> ac or bc or cc ... [[0, 0], [1, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [2, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [2, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x ab -> ac or bc or cc ... [[0, 1], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 2], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 2], [1, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x bc -> ad or bd or cd or dd ... [[0, 1], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 1], [0, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 2], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... # ab x cd -> ae or be or ce or de ... [[0, 1], [2, 3], [0, 4], [1, 4], [2, 4], [3, 4]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 1]]) The following are cases of 'hemi-parental' inheritance, where progeny appear to have inherited two copies of an allele found only once in one of the parents:: >>> genotypes = np.array([ ... # aa x ab -> bb ... [[0, 0], [0, 1], [1, 1], [-1, -1]], ... [[0, 0], [0, 2], [2, 2], [-1, -1]], ... [[1, 1], [0, 1], [0, 0], [-1, -1]], ... # ab x bc -> aa or cc ... [[0, 1], [1, 2], [0, 0], [2, 2]], ... [[0, 1], [0, 2], [1, 1], [2, 2]], ... [[0, 2], [1, 2], [0, 0], [1, 1]], ... # ab x cd -> aa or bb or cc or dd ... [[0, 1], [2, 3], [0, 0], [1, 1]], ... [[0, 1], [2, 3], [2, 2], [3, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 0], [1, 0], [1, 0], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]) The following are cases of 'uni-parental' inheritance, where progeny appear to have inherited both alleles from a single parent:: >>> genotypes = np.array([ ... # aa x bb -> aa or bb ... [[0, 0], [1, 1], [0, 0], [1, 1]], ... [[0, 0], [2, 2], [0, 0], [2, 2]], ... [[1, 1], [2, 2], [1, 1], [2, 2]], ... # aa x bc -> aa or bc ... [[0, 0], [1, 2], [0, 0], [1, 2]], ... [[1, 1], [0, 2], [1, 1], [0, 2]], ... # ab x cd -> ab or cd ... [[0, 1], [2, 3], [0, 1], [2, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/mendel.py#L15-L218
cggh/scikit-allel
allel/stats/mendel.py
paint_transmission
def paint_transmission(parent_haplotypes, progeny_haplotypes): """Paint haplotypes inherited from a single diploid parent according to their allelic inheritance. Parameters ---------- parent_haplotypes : array_like, int, shape (n_variants, 2) Both haplotypes from a single diploid parent. progeny_haplotypes : array_like, int, shape (n_variants, n_progeny) Haplotypes found in progeny of the given parent, inherited from the given parent. I.e., haplotypes from gametes of the given parent. Returns ------- painting : ndarray, uint8, shape (n_variants, n_progeny) An array of integers coded as follows: 1 = allele inherited from first parental haplotype; 2 = allele inherited from second parental haplotype; 3 = reference allele, also carried by both parental haplotypes; 4 = non-reference allele, also carried by both parental haplotypes; 5 = non-parental allele; 6 = either or both parental alleles missing; 7 = missing allele; 0 = undetermined. Examples -------- >>> import allel >>> haplotypes = allel.HaplotypeArray([ ... [0, 0, 0, 1, 2, -1], ... [0, 1, 0, 1, 2, -1], ... [1, 0, 0, 1, 2, -1], ... [1, 1, 0, 1, 2, -1], ... [0, 2, 0, 1, 2, -1], ... [0, -1, 0, 1, 2, -1], ... [-1, 1, 0, 1, 2, -1], ... [-1, -1, 0, 1, 2, -1], ... ], dtype='i1') >>> painting = allel.paint_transmission(haplotypes[:, :2], ... haplotypes[:, 2:]) >>> painting array([[3, 5, 5, 7], [1, 2, 5, 7], [2, 1, 5, 7], [5, 4, 5, 7], [1, 5, 2, 7], [6, 6, 6, 7], [6, 6, 6, 7], [6, 6, 6, 7]], dtype=uint8) """ # check inputs parent_haplotypes = HaplotypeArray(parent_haplotypes) progeny_haplotypes = HaplotypeArray(progeny_haplotypes) if parent_haplotypes.n_haplotypes != 2: raise ValueError('exactly two parental haplotypes should be provided') # convenience variables parent1 = parent_haplotypes[:, 0, np.newaxis] parent2 = parent_haplotypes[:, 1, np.newaxis] progeny_is_missing = progeny_haplotypes < 0 parent_is_missing = np.any(parent_haplotypes < 0, axis=1) # need this for broadcasting, but also need to retain original for later parent_is_missing_bc = parent_is_missing[:, np.newaxis] parent_diplotype = GenotypeArray(parent_haplotypes[:, np.newaxis, :]) parent_is_hom_ref = parent_diplotype.is_hom_ref() parent_is_het = parent_diplotype.is_het() parent_is_hom_alt = parent_diplotype.is_hom_alt() # identify allele calls where inheritance can be determined is_callable = ~progeny_is_missing & ~parent_is_missing_bc is_callable_seg = is_callable & parent_is_het # main inheritance states inherit_parent1 = is_callable_seg & (progeny_haplotypes == parent1) inherit_parent2 = is_callable_seg & (progeny_haplotypes == parent2) nonseg_ref = (is_callable & parent_is_hom_ref & (progeny_haplotypes == parent1)) nonseg_alt = (is_callable & parent_is_hom_alt & (progeny_haplotypes == parent1)) nonparental = ( is_callable & (progeny_haplotypes != parent1) & (progeny_haplotypes != parent2) ) # record inheritance states # N.B., order in which these are set matters painting = np.zeros(progeny_haplotypes.shape, dtype='u1') painting[inherit_parent1] = INHERIT_PARENT1 painting[inherit_parent2] = INHERIT_PARENT2 painting[nonseg_ref] = INHERIT_NONSEG_REF painting[nonseg_alt] = INHERIT_NONSEG_ALT painting[nonparental] = INHERIT_NONPARENTAL painting[parent_is_missing] = INHERIT_PARENT_MISSING painting[progeny_is_missing] = INHERIT_MISSING return painting
python
def paint_transmission(parent_haplotypes, progeny_haplotypes): """Paint haplotypes inherited from a single diploid parent according to their allelic inheritance. Parameters ---------- parent_haplotypes : array_like, int, shape (n_variants, 2) Both haplotypes from a single diploid parent. progeny_haplotypes : array_like, int, shape (n_variants, n_progeny) Haplotypes found in progeny of the given parent, inherited from the given parent. I.e., haplotypes from gametes of the given parent. Returns ------- painting : ndarray, uint8, shape (n_variants, n_progeny) An array of integers coded as follows: 1 = allele inherited from first parental haplotype; 2 = allele inherited from second parental haplotype; 3 = reference allele, also carried by both parental haplotypes; 4 = non-reference allele, also carried by both parental haplotypes; 5 = non-parental allele; 6 = either or both parental alleles missing; 7 = missing allele; 0 = undetermined. Examples -------- >>> import allel >>> haplotypes = allel.HaplotypeArray([ ... [0, 0, 0, 1, 2, -1], ... [0, 1, 0, 1, 2, -1], ... [1, 0, 0, 1, 2, -1], ... [1, 1, 0, 1, 2, -1], ... [0, 2, 0, 1, 2, -1], ... [0, -1, 0, 1, 2, -1], ... [-1, 1, 0, 1, 2, -1], ... [-1, -1, 0, 1, 2, -1], ... ], dtype='i1') >>> painting = allel.paint_transmission(haplotypes[:, :2], ... haplotypes[:, 2:]) >>> painting array([[3, 5, 5, 7], [1, 2, 5, 7], [2, 1, 5, 7], [5, 4, 5, 7], [1, 5, 2, 7], [6, 6, 6, 7], [6, 6, 6, 7], [6, 6, 6, 7]], dtype=uint8) """ # check inputs parent_haplotypes = HaplotypeArray(parent_haplotypes) progeny_haplotypes = HaplotypeArray(progeny_haplotypes) if parent_haplotypes.n_haplotypes != 2: raise ValueError('exactly two parental haplotypes should be provided') # convenience variables parent1 = parent_haplotypes[:, 0, np.newaxis] parent2 = parent_haplotypes[:, 1, np.newaxis] progeny_is_missing = progeny_haplotypes < 0 parent_is_missing = np.any(parent_haplotypes < 0, axis=1) # need this for broadcasting, but also need to retain original for later parent_is_missing_bc = parent_is_missing[:, np.newaxis] parent_diplotype = GenotypeArray(parent_haplotypes[:, np.newaxis, :]) parent_is_hom_ref = parent_diplotype.is_hom_ref() parent_is_het = parent_diplotype.is_het() parent_is_hom_alt = parent_diplotype.is_hom_alt() # identify allele calls where inheritance can be determined is_callable = ~progeny_is_missing & ~parent_is_missing_bc is_callable_seg = is_callable & parent_is_het # main inheritance states inherit_parent1 = is_callable_seg & (progeny_haplotypes == parent1) inherit_parent2 = is_callable_seg & (progeny_haplotypes == parent2) nonseg_ref = (is_callable & parent_is_hom_ref & (progeny_haplotypes == parent1)) nonseg_alt = (is_callable & parent_is_hom_alt & (progeny_haplotypes == parent1)) nonparental = ( is_callable & (progeny_haplotypes != parent1) & (progeny_haplotypes != parent2) ) # record inheritance states # N.B., order in which these are set matters painting = np.zeros(progeny_haplotypes.shape, dtype='u1') painting[inherit_parent1] = INHERIT_PARENT1 painting[inherit_parent2] = INHERIT_PARENT2 painting[nonseg_ref] = INHERIT_NONSEG_REF painting[nonseg_alt] = INHERIT_NONSEG_ALT painting[nonparental] = INHERIT_NONPARENTAL painting[parent_is_missing] = INHERIT_PARENT_MISSING painting[progeny_is_missing] = INHERIT_MISSING return painting
Paint haplotypes inherited from a single diploid parent according to their allelic inheritance. Parameters ---------- parent_haplotypes : array_like, int, shape (n_variants, 2) Both haplotypes from a single diploid parent. progeny_haplotypes : array_like, int, shape (n_variants, n_progeny) Haplotypes found in progeny of the given parent, inherited from the given parent. I.e., haplotypes from gametes of the given parent. Returns ------- painting : ndarray, uint8, shape (n_variants, n_progeny) An array of integers coded as follows: 1 = allele inherited from first parental haplotype; 2 = allele inherited from second parental haplotype; 3 = reference allele, also carried by both parental haplotypes; 4 = non-reference allele, also carried by both parental haplotypes; 5 = non-parental allele; 6 = either or both parental alleles missing; 7 = missing allele; 0 = undetermined. Examples -------- >>> import allel >>> haplotypes = allel.HaplotypeArray([ ... [0, 0, 0, 1, 2, -1], ... [0, 1, 0, 1, 2, -1], ... [1, 0, 0, 1, 2, -1], ... [1, 1, 0, 1, 2, -1], ... [0, 2, 0, 1, 2, -1], ... [0, -1, 0, 1, 2, -1], ... [-1, 1, 0, 1, 2, -1], ... [-1, -1, 0, 1, 2, -1], ... ], dtype='i1') >>> painting = allel.paint_transmission(haplotypes[:, :2], ... haplotypes[:, 2:]) >>> painting array([[3, 5, 5, 7], [1, 2, 5, 7], [2, 1, 5, 7], [5, 4, 5, 7], [1, 5, 2, 7], [6, 6, 6, 7], [6, 6, 6, 7], [6, 6, 6, 7]], dtype=uint8)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/mendel.py#L232-L323
cggh/scikit-allel
allel/stats/mendel.py
phase_progeny_by_transmission
def phase_progeny_by_transmission(g): """Phase progeny genotypes from a trio or cross using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. Returns ------- g : ndarray, int8, shape (n_variants, n_samples, 2) Genotype array with progeny phased where possible. Examples -------- >>> import allel >>> g = allel.GenotypeArray([ ... [[0, 0], [0, 0], [0, 0]], ... [[1, 1], [1, 1], [1, 1]], ... [[0, 0], [1, 1], [0, 1]], ... [[1, 1], [0, 0], [0, 1]], ... [[0, 0], [0, 1], [0, 0]], ... [[0, 0], [0, 1], [0, 1]], ... [[0, 1], [0, 0], [0, 1]], ... [[0, 1], [0, 1], [0, 1]], ... [[0, 1], [1, 2], [0, 1]], ... [[1, 2], [0, 1], [1, 2]], ... [[0, 1], [2, 3], [0, 2]], ... [[2, 3], [0, 1], [1, 3]], ... [[0, 0], [0, 0], [-1, -1]], ... [[0, 0], [0, 0], [1, 1]], ... ], dtype='i1') >>> g = allel.phase_progeny_by_transmission(g) >>> print(g.to_str(row_threshold=None)) 0/0 0/0 0|0 1/1 1/1 1|1 0/0 1/1 0|1 1/1 0/0 1|0 0/0 0/1 0|0 0/0 0/1 0|1 0/1 0/0 1|0 0/1 0/1 0/1 0/1 1/2 0|1 1/2 0/1 2|1 0/1 2/3 0|2 2/3 0/1 3|1 0/0 0/0 ./. 0/0 0/0 1/1 >>> g.is_phased array([[False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, False]]) """ # setup g = GenotypeArray(g, dtype='i1', copy=True) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # run the phasing # N.B., a copy has already been made, so no need to make memoryview safe is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # outputs return g
python
def phase_progeny_by_transmission(g): """Phase progeny genotypes from a trio or cross using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. Returns ------- g : ndarray, int8, shape (n_variants, n_samples, 2) Genotype array with progeny phased where possible. Examples -------- >>> import allel >>> g = allel.GenotypeArray([ ... [[0, 0], [0, 0], [0, 0]], ... [[1, 1], [1, 1], [1, 1]], ... [[0, 0], [1, 1], [0, 1]], ... [[1, 1], [0, 0], [0, 1]], ... [[0, 0], [0, 1], [0, 0]], ... [[0, 0], [0, 1], [0, 1]], ... [[0, 1], [0, 0], [0, 1]], ... [[0, 1], [0, 1], [0, 1]], ... [[0, 1], [1, 2], [0, 1]], ... [[1, 2], [0, 1], [1, 2]], ... [[0, 1], [2, 3], [0, 2]], ... [[2, 3], [0, 1], [1, 3]], ... [[0, 0], [0, 0], [-1, -1]], ... [[0, 0], [0, 0], [1, 1]], ... ], dtype='i1') >>> g = allel.phase_progeny_by_transmission(g) >>> print(g.to_str(row_threshold=None)) 0/0 0/0 0|0 1/1 1/1 1|1 0/0 1/1 0|1 1/1 0/0 1|0 0/0 0/1 0|0 0/0 0/1 0|1 0/1 0/0 1|0 0/1 0/1 0/1 0/1 1/2 0|1 1/2 0/1 2|1 0/1 2/3 0|2 2/3 0/1 3|1 0/0 0/0 ./. 0/0 0/0 1/1 >>> g.is_phased array([[False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, False]]) """ # setup g = GenotypeArray(g, dtype='i1', copy=True) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # run the phasing # N.B., a copy has already been made, so no need to make memoryview safe is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # outputs return g
Phase progeny genotypes from a trio or cross using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. Returns ------- g : ndarray, int8, shape (n_variants, n_samples, 2) Genotype array with progeny phased where possible. Examples -------- >>> import allel >>> g = allel.GenotypeArray([ ... [[0, 0], [0, 0], [0, 0]], ... [[1, 1], [1, 1], [1, 1]], ... [[0, 0], [1, 1], [0, 1]], ... [[1, 1], [0, 0], [0, 1]], ... [[0, 0], [0, 1], [0, 0]], ... [[0, 0], [0, 1], [0, 1]], ... [[0, 1], [0, 0], [0, 1]], ... [[0, 1], [0, 1], [0, 1]], ... [[0, 1], [1, 2], [0, 1]], ... [[1, 2], [0, 1], [1, 2]], ... [[0, 1], [2, 3], [0, 2]], ... [[2, 3], [0, 1], [1, 3]], ... [[0, 0], [0, 0], [-1, -1]], ... [[0, 0], [0, 0], [1, 1]], ... ], dtype='i1') >>> g = allel.phase_progeny_by_transmission(g) >>> print(g.to_str(row_threshold=None)) 0/0 0/0 0|0 1/1 1/1 1|1 0/0 1/1 0|1 1/1 0/0 1|0 0/0 0/1 0|0 0/0 0/1 0|1 0/1 0/0 1|0 0/1 0/1 0/1 0/1 1/2 0|1 1/2 0/1 2|1 0/1 2/3 0|2 2/3 0/1 3|1 0/0 0/0 ./. 0/0 0/0 1/1 >>> g.is_phased array([[False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, False]])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/mendel.py#L326-L405
cggh/scikit-allel
allel/stats/mendel.py
phase_parents_by_transmission
def phase_parents_by_transmission(g, window_size): """Phase parent genotypes from a trio or cross, given progeny genotypes already phased by Mendelian transmission. Parameters ---------- g : GenotypeArray Genotype array, with parents as first two columns and progeny as remaining columns, where progeny genotypes are already phased. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. Returns ------- g : GenotypeArray Genotype array with parents phased where possible. """ # setup check_type(g, GenotypeArray) check_dtype(g.values, 'i1') check_ploidy(g.ploidy, 2) if g.is_phased is None: raise ValueError('genotype array must first have progeny phased by transmission') check_min_samples(g.n_samples, 3) # run the phasing g._values = memoryview_safe(g.values) g._is_phased = memoryview_safe(g.is_phased) _opt_phase_parents_by_transmission(g.values, g.is_phased.view('u1'), window_size) # outputs return g
python
def phase_parents_by_transmission(g, window_size): """Phase parent genotypes from a trio or cross, given progeny genotypes already phased by Mendelian transmission. Parameters ---------- g : GenotypeArray Genotype array, with parents as first two columns and progeny as remaining columns, where progeny genotypes are already phased. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. Returns ------- g : GenotypeArray Genotype array with parents phased where possible. """ # setup check_type(g, GenotypeArray) check_dtype(g.values, 'i1') check_ploidy(g.ploidy, 2) if g.is_phased is None: raise ValueError('genotype array must first have progeny phased by transmission') check_min_samples(g.n_samples, 3) # run the phasing g._values = memoryview_safe(g.values) g._is_phased = memoryview_safe(g.is_phased) _opt_phase_parents_by_transmission(g.values, g.is_phased.view('u1'), window_size) # outputs return g
Phase parent genotypes from a trio or cross, given progeny genotypes already phased by Mendelian transmission. Parameters ---------- g : GenotypeArray Genotype array, with parents as first two columns and progeny as remaining columns, where progeny genotypes are already phased. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. Returns ------- g : GenotypeArray Genotype array with parents phased where possible.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/mendel.py#L408-L443
cggh/scikit-allel
allel/stats/mendel.py
phase_by_transmission
def phase_by_transmission(g, window_size, copy=True): """Phase genotypes in a trio or cross where possible using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. copy : bool, optional If False, attempt to phase genotypes in-place. Note that this is only possible if the input array has int8 dtype, otherwise a copy is always made regardless of this parameter. Returns ------- g : GenotypeArray Genotype array with progeny phased where possible. """ # setup g = np.asarray(g, dtype='i1') g = GenotypeArray(g, copy=copy) g._values = memoryview_safe(g.values) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # phase the progeny is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # phase the parents _opt_phase_parents_by_transmission(g.values, is_phased, window_size) return g
python
def phase_by_transmission(g, window_size, copy=True): """Phase genotypes in a trio or cross where possible using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. copy : bool, optional If False, attempt to phase genotypes in-place. Note that this is only possible if the input array has int8 dtype, otherwise a copy is always made regardless of this parameter. Returns ------- g : GenotypeArray Genotype array with progeny phased where possible. """ # setup g = np.asarray(g, dtype='i1') g = GenotypeArray(g, copy=copy) g._values = memoryview_safe(g.values) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # phase the progeny is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # phase the parents _opt_phase_parents_by_transmission(g.values, is_phased, window_size) return g
Phase genotypes in a trio or cross where possible using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. copy : bool, optional If False, attempt to phase genotypes in-place. Note that this is only possible if the input array has int8 dtype, otherwise a copy is always made regardless of this parameter. Returns ------- g : GenotypeArray Genotype array with progeny phased where possible.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/mendel.py#L446-L485
cggh/scikit-allel
allel/chunked/util.py
get_blen_array
def get_blen_array(data, blen=None): """Try to guess a reasonable block length to use for block-wise iteration over `data`.""" if blen is None: if hasattr(data, 'chunklen'): # bcolz carray return data.chunklen elif hasattr(data, 'chunks') and \ hasattr(data, 'shape') and \ hasattr(data.chunks, '__len__') and \ hasattr(data.shape, '__len__') and \ len(data.chunks) == len(data.shape): # something like h5py dataset return data.chunks[0] else: # fall back to something simple, ~1Mb chunks row = np.asarray(data[0]) return max(1, (2**20) // row.nbytes) else: return blen
python
def get_blen_array(data, blen=None): """Try to guess a reasonable block length to use for block-wise iteration over `data`.""" if blen is None: if hasattr(data, 'chunklen'): # bcolz carray return data.chunklen elif hasattr(data, 'chunks') and \ hasattr(data, 'shape') and \ hasattr(data.chunks, '__len__') and \ hasattr(data.shape, '__len__') and \ len(data.chunks) == len(data.shape): # something like h5py dataset return data.chunks[0] else: # fall back to something simple, ~1Mb chunks row = np.asarray(data[0]) return max(1, (2**20) // row.nbytes) else: return blen
Try to guess a reasonable block length to use for block-wise iteration over `data`.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/util.py#L96-L120
cggh/scikit-allel
allel/chunked/storage_hdf5.py
h5fmem
def h5fmem(**kwargs): """Create an in-memory HDF5 file.""" # need a file name even tho nothing is ever written fn = tempfile.mktemp() # file creation args kwargs['mode'] = 'w' kwargs['driver'] = 'core' kwargs['backing_store'] = False # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
python
def h5fmem(**kwargs): """Create an in-memory HDF5 file.""" # need a file name even tho nothing is ever written fn = tempfile.mktemp() # file creation args kwargs['mode'] = 'w' kwargs['driver'] = 'core' kwargs['backing_store'] = False # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
Create an in-memory HDF5 file.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/storage_hdf5.py#L17-L31
cggh/scikit-allel
allel/chunked/storage_hdf5.py
h5ftmp
def h5ftmp(**kwargs): """Create an HDF5 file backed by a temporary file.""" # create temporary file name suffix = kwargs.pop('suffix', '.h5') prefix = kwargs.pop('prefix', 'scikit_allel_') tempdir = kwargs.pop('dir', None) fn = tempfile.mktemp(suffix=suffix, prefix=prefix, dir=tempdir) atexit.register(os.remove, fn) # file creation args kwargs['mode'] = 'w' # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
python
def h5ftmp(**kwargs): """Create an HDF5 file backed by a temporary file.""" # create temporary file name suffix = kwargs.pop('suffix', '.h5') prefix = kwargs.pop('prefix', 'scikit_allel_') tempdir = kwargs.pop('dir', None) fn = tempfile.mktemp(suffix=suffix, prefix=prefix, dir=tempdir) atexit.register(os.remove, fn) # file creation args kwargs['mode'] = 'w' # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
Create an HDF5 file backed by a temporary file.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/storage_hdf5.py#L34-L50
cggh/scikit-allel
allel/chunked/core.py
store
def store(data, arr, start=0, stop=None, offset=0, blen=None): """Copy `data` block-wise into `arr`.""" # setup blen = _util.get_blen_array(data, blen) if stop is None: stop = len(data) else: stop = min(stop, len(data)) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise for bi in range(start, stop, blen): bj = min(bi+blen, stop) bl = bj - bi arr[offset:offset+bl] = data[bi:bj] offset += bl
python
def store(data, arr, start=0, stop=None, offset=0, blen=None): """Copy `data` block-wise into `arr`.""" # setup blen = _util.get_blen_array(data, blen) if stop is None: stop = len(data) else: stop = min(stop, len(data)) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise for bi in range(start, stop, blen): bj = min(bi+blen, stop) bl = bj - bi arr[offset:offset+bl] = data[bi:bj] offset += bl
Copy `data` block-wise into `arr`.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L16-L34
cggh/scikit-allel
allel/chunked/core.py
copy
def copy(data, start=0, stop=None, blen=None, storage=None, create='array', **kwargs): """Copy `data` block-wise into a new array.""" # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) if stop is None: stop = len(data) else: stop = min(stop, len(data)) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise out = None for i in range(start, stop, blen): j = min(i+blen, stop) block = data[i:j] if out is None: out = getattr(storage, create)(block, expectedlen=length, **kwargs) else: out.append(block) return out
python
def copy(data, start=0, stop=None, blen=None, storage=None, create='array', **kwargs): """Copy `data` block-wise into a new array.""" # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) if stop is None: stop = len(data) else: stop = min(stop, len(data)) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise out = None for i in range(start, stop, blen): j = min(i+blen, stop) block = data[i:j] if out is None: out = getattr(storage, create)(block, expectedlen=length, **kwargs) else: out.append(block) return out
Copy `data` block-wise into a new array.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L37-L62
cggh/scikit-allel
allel/chunked/core.py
copy_table
def copy_table(tbl, start=0, stop=None, blen=None, storage=None, create='table', **kwargs): """Copy `tbl` block-wise into a new table.""" # setup names, columns = _util.check_table_like(tbl) storage = _util.get_storage(storage) blen = _util.get_blen_table(tbl, blen) if stop is None: stop = len(columns[0]) else: stop = min(stop, len(columns[0])) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise out = None for i in range(start, stop, blen): j = min(i+blen, stop) res = [c[i:j] for c in columns] if out is None: out = getattr(storage, create)(res, names=names, expectedlen=length, **kwargs) else: out.append(res) return out
python
def copy_table(tbl, start=0, stop=None, blen=None, storage=None, create='table', **kwargs): """Copy `tbl` block-wise into a new table.""" # setup names, columns = _util.check_table_like(tbl) storage = _util.get_storage(storage) blen = _util.get_blen_table(tbl, blen) if stop is None: stop = len(columns[0]) else: stop = min(stop, len(columns[0])) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise out = None for i in range(start, stop, blen): j = min(i+blen, stop) res = [c[i:j] for c in columns] if out is None: out = getattr(storage, create)(res, names=names, expectedlen=length, **kwargs) else: out.append(res) return out
Copy `tbl` block-wise into a new table.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L65-L92
cggh/scikit-allel
allel/chunked/core.py
map_blocks
def map_blocks(data, f, blen=None, storage=None, create='array', **kwargs): """Apply function `f` block-wise over `data`.""" # setup storage = _util.get_storage(storage) if isinstance(data, tuple): blen = max(_util.get_blen_array(d, blen) for d in data) else: blen = _util.get_blen_array(data, blen) if isinstance(data, tuple): _util.check_equal_length(*data) length = len(data[0]) else: length = len(data) # block-wise iteration out = None for i in range(0, length, blen): j = min(i+blen, length) # obtain blocks if isinstance(data, tuple): blocks = [d[i:j] for d in data] else: blocks = [data[i:j]] # map res = f(*blocks) # store if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out
python
def map_blocks(data, f, blen=None, storage=None, create='array', **kwargs): """Apply function `f` block-wise over `data`.""" # setup storage = _util.get_storage(storage) if isinstance(data, tuple): blen = max(_util.get_blen_array(d, blen) for d in data) else: blen = _util.get_blen_array(data, blen) if isinstance(data, tuple): _util.check_equal_length(*data) length = len(data[0]) else: length = len(data) # block-wise iteration out = None for i in range(0, length, blen): j = min(i+blen, length) # obtain blocks if isinstance(data, tuple): blocks = [d[i:j] for d in data] else: blocks = [data[i:j]] # map res = f(*blocks) # store if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out
Apply function `f` block-wise over `data`.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L95-L130
cggh/scikit-allel
allel/chunked/core.py
reduce_axis
def reduce_axis(data, reducer, block_reducer, mapper=None, axis=None, blen=None, storage=None, create='array', **kwargs): """Apply an operation to `data` that reduces over one or more axes.""" # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) # normalise axis arg if isinstance(axis, int): axis = (axis,) # deal with 'out' kwarg if supplied, can arise if a chunked array is # passed as an argument to numpy.sum(), see also # https://github.com/cggh/scikit-allel/issues/66 kwarg_out = kwargs.pop('out', None) if kwarg_out is not None: raise ValueError('keyword argument "out" is not supported') if axis is None or 0 in axis: # two-step reduction out = None for i in range(0, length, blen): j = min(i+blen, length) block = data[i:j] if mapper: block = mapper(block) res = reducer(block, axis=axis) if out is None: out = res else: out = block_reducer(out, res) if np.isscalar(out): return out elif len(out.shape) == 0: return out[()] else: return getattr(storage, create)(out, **kwargs) else: # first dimension is preserved, no need to reduce blocks out = None for i in range(0, length, blen): j = min(i+blen, length) block = data[i:j] if mapper: block = mapper(block) r = reducer(block, axis=axis) if out is None: out = getattr(storage, create)(r, expectedlen=length, **kwargs) else: out.append(r) return out
python
def reduce_axis(data, reducer, block_reducer, mapper=None, axis=None, blen=None, storage=None, create='array', **kwargs): """Apply an operation to `data` that reduces over one or more axes.""" # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) # normalise axis arg if isinstance(axis, int): axis = (axis,) # deal with 'out' kwarg if supplied, can arise if a chunked array is # passed as an argument to numpy.sum(), see also # https://github.com/cggh/scikit-allel/issues/66 kwarg_out = kwargs.pop('out', None) if kwarg_out is not None: raise ValueError('keyword argument "out" is not supported') if axis is None or 0 in axis: # two-step reduction out = None for i in range(0, length, blen): j = min(i+blen, length) block = data[i:j] if mapper: block = mapper(block) res = reducer(block, axis=axis) if out is None: out = res else: out = block_reducer(out, res) if np.isscalar(out): return out elif len(out.shape) == 0: return out[()] else: return getattr(storage, create)(out, **kwargs) else: # first dimension is preserved, no need to reduce blocks out = None for i in range(0, length, blen): j = min(i+blen, length) block = data[i:j] if mapper: block = mapper(block) r = reducer(block, axis=axis) if out is None: out = getattr(storage, create)(r, expectedlen=length, **kwargs) else: out.append(r) return out
Apply an operation to `data` that reduces over one or more axes.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L133-L185
cggh/scikit-allel
allel/chunked/core.py
amax
def amax(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs): """Compute the maximum value.""" return reduce_axis(data, axis=axis, reducer=np.amax, block_reducer=np.maximum, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
python
def amax(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs): """Compute the maximum value.""" return reduce_axis(data, axis=axis, reducer=np.amax, block_reducer=np.maximum, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
Compute the maximum value.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L188-L193
cggh/scikit-allel
allel/chunked/core.py
amin
def amin(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs): """Compute the minimum value.""" return reduce_axis(data, axis=axis, reducer=np.amin, block_reducer=np.minimum, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
python
def amin(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs): """Compute the minimum value.""" return reduce_axis(data, axis=axis, reducer=np.amin, block_reducer=np.minimum, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
Compute the minimum value.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L196-L201
cggh/scikit-allel
allel/chunked/core.py
asum
def asum(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs): """Compute the sum.""" return reduce_axis(data, axis=axis, reducer=np.sum, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
python
def asum(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs): """Compute the sum.""" return reduce_axis(data, axis=axis, reducer=np.sum, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
Compute the sum.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L205-L210
cggh/scikit-allel
allel/chunked/core.py
count_nonzero
def count_nonzero(data, mapper=None, blen=None, storage=None, create='array', **kwargs): """Count the number of non-zero elements.""" return reduce_axis(data, reducer=np.count_nonzero, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
python
def count_nonzero(data, mapper=None, blen=None, storage=None, create='array', **kwargs): """Count the number of non-zero elements.""" return reduce_axis(data, reducer=np.count_nonzero, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
Count the number of non-zero elements.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L213-L218
cggh/scikit-allel
allel/chunked/core.py
compress
def compress(condition, data, axis=0, out=None, blen=None, storage=None, create='array', **kwargs): """Return selected slices of an array along given axis.""" # setup if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) nnz = count_nonzero(condition) if axis == 0: _util.check_equal_length(data, condition) # block iteration out = None for i in range(0, length, blen): j = min(i+blen, length) bcond = np.asarray(condition[i:j]) # don't access any data unless we have to if np.any(bcond): block = np.asarray(data[i:j]) res = np.compress(bcond, block, axis=0) if out is None: out = getattr(storage, create)(res, expectedlen=nnz, **kwargs) else: out.append(res) return out elif axis == 1: # block iteration out = None condition = np.asanyarray(condition) for i in range(0, length, blen): j = min(i+blen, length) block = np.asarray(data[i:j]) res = np.compress(condition, block, axis=1) if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out else: raise NotImplementedError('axis not supported: %s' % axis)
python
def compress(condition, data, axis=0, out=None, blen=None, storage=None, create='array', **kwargs): """Return selected slices of an array along given axis.""" # setup if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) nnz = count_nonzero(condition) if axis == 0: _util.check_equal_length(data, condition) # block iteration out = None for i in range(0, length, blen): j = min(i+blen, length) bcond = np.asarray(condition[i:j]) # don't access any data unless we have to if np.any(bcond): block = np.asarray(data[i:j]) res = np.compress(bcond, block, axis=0) if out is None: out = getattr(storage, create)(res, expectedlen=nnz, **kwargs) else: out.append(res) return out elif axis == 1: # block iteration out = None condition = np.asanyarray(condition) for i in range(0, length, blen): j = min(i+blen, length) block = np.asarray(data[i:j]) res = np.compress(condition, block, axis=1) if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out else: raise NotImplementedError('axis not supported: %s' % axis)
Return selected slices of an array along given axis.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L221-L270
cggh/scikit-allel
allel/chunked/core.py
take
def take(data, indices, axis=0, out=None, mode='raise', blen=None, storage=None, create='array', **kwargs): """Take elements from an array along an axis.""" # setup if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') length = len(data) if axis == 0: # check that indices are strictly increasing indices = np.asanyarray(indices) if np.any(indices[1:] <= indices[:-1]): raise NotImplementedError( 'indices must be strictly increasing' ) # implement via compress() condition = np.zeros((length,), dtype=bool) condition[indices] = True return compress(condition, data, axis=0, blen=blen, storage=storage, create=create, **kwargs) elif axis == 1: # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) # block iteration out = None for i in range(0, length, blen): j = min(i+blen, length) block = data[i:j] res = np.take(block, indices, axis=1, mode=mode) if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out else: raise NotImplementedError('axis not supported: %s' % axis)
python
def take(data, indices, axis=0, out=None, mode='raise', blen=None, storage=None, create='array', **kwargs): """Take elements from an array along an axis.""" # setup if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') length = len(data) if axis == 0: # check that indices are strictly increasing indices = np.asanyarray(indices) if np.any(indices[1:] <= indices[:-1]): raise NotImplementedError( 'indices must be strictly increasing' ) # implement via compress() condition = np.zeros((length,), dtype=bool) condition[indices] = True return compress(condition, data, axis=0, blen=blen, storage=storage, create=create, **kwargs) elif axis == 1: # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) # block iteration out = None for i in range(0, length, blen): j = min(i+blen, length) block = data[i:j] res = np.take(block, indices, axis=1, mode=mode) if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out else: raise NotImplementedError('axis not supported: %s' % axis)
Take elements from an array along an axis.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L273-L318
cggh/scikit-allel
allel/chunked/core.py
compress_table
def compress_table(condition, tbl, axis=None, out=None, blen=None, storage=None, create='table', **kwargs): """Return selected rows of a table.""" # setup if axis is not None and axis != 0: raise NotImplementedError('only axis 0 is supported') if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') storage = _util.get_storage(storage) names, columns = _util.check_table_like(tbl) blen = _util.get_blen_table(tbl, blen) _util.check_equal_length(columns[0], condition) length = len(columns[0]) nnz = count_nonzero(condition) # block iteration out = None for i in range(0, length, blen): j = min(i+blen, length) bcond = condition[i:j] # don't access any data unless we have to if np.any(bcond): bcolumns = [c[i:j] for c in columns] res = [np.compress(bcond, c, axis=0) for c in bcolumns] if out is None: out = getattr(storage, create)(res, names=names, expectedlen=nnz, **kwargs) else: out.append(res) return out
python
def compress_table(condition, tbl, axis=None, out=None, blen=None, storage=None, create='table', **kwargs): """Return selected rows of a table.""" # setup if axis is not None and axis != 0: raise NotImplementedError('only axis 0 is supported') if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') storage = _util.get_storage(storage) names, columns = _util.check_table_like(tbl) blen = _util.get_blen_table(tbl, blen) _util.check_equal_length(columns[0], condition) length = len(columns[0]) nnz = count_nonzero(condition) # block iteration out = None for i in range(0, length, blen): j = min(i+blen, length) bcond = condition[i:j] # don't access any data unless we have to if np.any(bcond): bcolumns = [c[i:j] for c in columns] res = [np.compress(bcond, c, axis=0) for c in bcolumns] if out is None: out = getattr(storage, create)(res, names=names, expectedlen=nnz, **kwargs) else: out.append(res) return out
Return selected rows of a table.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L321-L352
cggh/scikit-allel
allel/chunked/core.py
take_table
def take_table(tbl, indices, axis=None, out=None, mode='raise', blen=None, storage=None, create='table', **kwargs): """Return selected rows of a table.""" # setup if axis is not None and axis != 0: raise NotImplementedError('only axis 0 is supported') if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') if mode is not None and mode != 'raise': raise NotImplementedError('only mode=raise is supported') names, columns = _util.check_table_like(tbl) length = len(columns[0]) # check that indices are strictly increasing indices = np.asanyarray(indices) if np.any(indices[1:] <= indices[:-1]): raise NotImplementedError( 'indices must be strictly increasing' ) # implement via compress() condition = np.zeros((length,), dtype=bool) condition[indices] = True return compress_table(condition, tbl, blen=blen, storage=storage, create=create, **kwargs)
python
def take_table(tbl, indices, axis=None, out=None, mode='raise', blen=None, storage=None, create='table', **kwargs): """Return selected rows of a table.""" # setup if axis is not None and axis != 0: raise NotImplementedError('only axis 0 is supported') if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') if mode is not None and mode != 'raise': raise NotImplementedError('only mode=raise is supported') names, columns = _util.check_table_like(tbl) length = len(columns[0]) # check that indices are strictly increasing indices = np.asanyarray(indices) if np.any(indices[1:] <= indices[:-1]): raise NotImplementedError( 'indices must be strictly increasing' ) # implement via compress() condition = np.zeros((length,), dtype=bool) condition[indices] = True return compress_table(condition, tbl, blen=blen, storage=storage, create=create, **kwargs)
Return selected rows of a table.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L355-L381
cggh/scikit-allel
allel/chunked/core.py
subset
def subset(data, sel0=None, sel1=None, blen=None, storage=None, create='array', **kwargs): """Return selected rows and columns of an array.""" # TODO refactor sel0 and sel1 normalization with ndarray.subset # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) if sel0 is not None: sel0 = np.asanyarray(sel0) if sel1 is not None: sel1 = np.asanyarray(sel1) # ensure boolean array for dim 0 if sel0 is not None and sel0.dtype.kind != 'b': # assume indices, convert to boolean condition tmp = np.zeros(length, dtype=bool) tmp[sel0] = True sel0 = tmp # ensure indices for dim 1 if sel1 is not None and sel1.dtype.kind == 'b': # assume boolean condition, convert to indices sel1, = np.nonzero(sel1) # shortcuts if sel0 is None and sel1 is None: return copy(data, blen=blen, storage=storage, create=create, **kwargs) elif sel1 is None: return compress(sel0, data, axis=0, blen=blen, storage=storage, create=create, **kwargs) elif sel0 is None: return take(data, sel1, axis=1, blen=blen, storage=storage, create=create, **kwargs) # build output sel0_nnz = count_nonzero(sel0) out = None for i in range(0, length, blen): j = min(i+blen, length) bsel0 = sel0[i:j] # don't access data unless we have to if np.any(bsel0): block = data[i:j] res = _numpy_subset(block, bsel0, sel1) if out is None: out = getattr(storage, create)(res, expectedlen=sel0_nnz, **kwargs) else: out.append(res) return out
python
def subset(data, sel0=None, sel1=None, blen=None, storage=None, create='array', **kwargs): """Return selected rows and columns of an array.""" # TODO refactor sel0 and sel1 normalization with ndarray.subset # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) if sel0 is not None: sel0 = np.asanyarray(sel0) if sel1 is not None: sel1 = np.asanyarray(sel1) # ensure boolean array for dim 0 if sel0 is not None and sel0.dtype.kind != 'b': # assume indices, convert to boolean condition tmp = np.zeros(length, dtype=bool) tmp[sel0] = True sel0 = tmp # ensure indices for dim 1 if sel1 is not None and sel1.dtype.kind == 'b': # assume boolean condition, convert to indices sel1, = np.nonzero(sel1) # shortcuts if sel0 is None and sel1 is None: return copy(data, blen=blen, storage=storage, create=create, **kwargs) elif sel1 is None: return compress(sel0, data, axis=0, blen=blen, storage=storage, create=create, **kwargs) elif sel0 is None: return take(data, sel1, axis=1, blen=blen, storage=storage, create=create, **kwargs) # build output sel0_nnz = count_nonzero(sel0) out = None for i in range(0, length, blen): j = min(i+blen, length) bsel0 = sel0[i:j] # don't access data unless we have to if np.any(bsel0): block = data[i:j] res = _numpy_subset(block, bsel0, sel1) if out is None: out = getattr(storage, create)(res, expectedlen=sel0_nnz, **kwargs) else: out.append(res) return out
Return selected rows and columns of an array.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L384-L437
cggh/scikit-allel
allel/chunked/core.py
concatenate_table
def concatenate_table(tup, blen=None, storage=None, create='table', **kwargs): """Stack tables in sequence vertically (row-wise).""" # setup storage = _util.get_storage(storage) if not isinstance(tup, (tuple, list)): raise ValueError('expected tuple or list, found %r' % tup) if len(tup) < 2: raise ValueError('expected two or more tables to stack') # build output expectedlen = sum(len(t) for t in tup) out = None tnames = None for tdata in tup: tblen = _util.get_blen_table(tdata, blen) tnames, tcolumns = _util.check_table_like(tdata, names=tnames) tlen = len(tcolumns[0]) for i in range(0, tlen, tblen): j = min(i+tblen, tlen) bcolumns = [c[i:j] for c in tcolumns] if out is None: out = getattr(storage, create)(bcolumns, names=tnames, expectedlen=expectedlen, **kwargs) else: out.append(bcolumns) return out
python
def concatenate_table(tup, blen=None, storage=None, create='table', **kwargs): """Stack tables in sequence vertically (row-wise).""" # setup storage = _util.get_storage(storage) if not isinstance(tup, (tuple, list)): raise ValueError('expected tuple or list, found %r' % tup) if len(tup) < 2: raise ValueError('expected two or more tables to stack') # build output expectedlen = sum(len(t) for t in tup) out = None tnames = None for tdata in tup: tblen = _util.get_blen_table(tdata, blen) tnames, tcolumns = _util.check_table_like(tdata, names=tnames) tlen = len(tcolumns[0]) for i in range(0, tlen, tblen): j = min(i+tblen, tlen) bcolumns = [c[i:j] for c in tcolumns] if out is None: out = getattr(storage, create)(bcolumns, names=tnames, expectedlen=expectedlen, **kwargs) else: out.append(bcolumns) return out
Stack tables in sequence vertically (row-wise).
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L440-L467
cggh/scikit-allel
allel/chunked/core.py
concatenate
def concatenate(tup, axis=0, blen=None, storage=None, create='array', **kwargs): """Concatenate arrays.""" # setup storage = _util.get_storage(storage) if not isinstance(tup, (tuple, list)): raise ValueError('expected tuple or list, found %r' % tup) if len(tup) < 2: raise ValueError('expected two or more arrays') if axis == 0: # build output expectedlen = sum(len(a) for a in tup) out = None for a in tup: ablen = _util.get_blen_array(a, blen) for i in range(0, len(a), ablen): j = min(i+ablen, len(a)) block = a[i:j] if out is None: out = getattr(storage, create)(block, expectedlen=expectedlen, **kwargs) else: out.append(block) else: def f(*blocks): return np.concatenate(blocks, axis=axis) out = map_blocks(tup, f, blen=blen, storage=storage, create=create, **kwargs) return out
python
def concatenate(tup, axis=0, blen=None, storage=None, create='array', **kwargs): """Concatenate arrays.""" # setup storage = _util.get_storage(storage) if not isinstance(tup, (tuple, list)): raise ValueError('expected tuple or list, found %r' % tup) if len(tup) < 2: raise ValueError('expected two or more arrays') if axis == 0: # build output expectedlen = sum(len(a) for a in tup) out = None for a in tup: ablen = _util.get_blen_array(a, blen) for i in range(0, len(a), ablen): j = min(i+ablen, len(a)) block = a[i:j] if out is None: out = getattr(storage, create)(block, expectedlen=expectedlen, **kwargs) else: out.append(block) else: def f(*blocks): return np.concatenate(blocks, axis=axis) out = map_blocks(tup, f, blen=blen, storage=storage, create=create, **kwargs) return out
Concatenate arrays.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L470-L502
cggh/scikit-allel
allel/chunked/core.py
binary_op
def binary_op(data, op, other, blen=None, storage=None, create='array', **kwargs): """Compute a binary operation block-wise over `data`.""" # normalise scalars if hasattr(other, 'shape') and len(other.shape) == 0: other = other[()] if np.isscalar(other): def f(block): return op(block, other) return map_blocks(data, f, blen=blen, storage=storage, create=create, **kwargs) elif len(data) == len(other): def f(a, b): return op(a, b) return map_blocks((data, other), f, blen=blen, storage=storage, create=create, **kwargs) else: raise NotImplementedError('argument type not supported')
python
def binary_op(data, op, other, blen=None, storage=None, create='array', **kwargs): """Compute a binary operation block-wise over `data`.""" # normalise scalars if hasattr(other, 'shape') and len(other.shape) == 0: other = other[()] if np.isscalar(other): def f(block): return op(block, other) return map_blocks(data, f, blen=blen, storage=storage, create=create, **kwargs) elif len(data) == len(other): def f(a, b): return op(a, b) return map_blocks((data, other), f, blen=blen, storage=storage, create=create, **kwargs) else: raise NotImplementedError('argument type not supported')
Compute a binary operation block-wise over `data`.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L505-L525
cggh/scikit-allel
allel/chunked/core.py
eval_table
def eval_table(tbl, expression, vm='python', blen=None, storage=None, create='array', vm_kwargs=None, **kwargs): """Evaluate `expression` against columns of a table.""" # setup storage = _util.get_storage(storage) names, columns = _util.check_table_like(tbl) length = len(columns[0]) if vm_kwargs is None: vm_kwargs = dict() # setup vm if vm == 'numexpr': import numexpr evaluate = numexpr.evaluate elif vm == 'python': # noinspection PyUnusedLocal def evaluate(expr, local_dict=None, **kw): # takes no keyword arguments return eval(expr, dict(), local_dict) else: raise ValueError('expected vm either "numexpr" or "python"') # compile expression and get required columns variables = _get_expression_variables(expression, vm) required_columns = {v: columns[names.index(v)] for v in variables} # determine block size for evaluation blen = _util.get_blen_table(required_columns, blen=blen) # build output out = None for i in range(0, length, blen): j = min(i+blen, length) blocals = {v: c[i:j] for v, c in required_columns.items()} res = evaluate(expression, local_dict=blocals, **vm_kwargs) if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out
python
def eval_table(tbl, expression, vm='python', blen=None, storage=None, create='array', vm_kwargs=None, **kwargs): """Evaluate `expression` against columns of a table.""" # setup storage = _util.get_storage(storage) names, columns = _util.check_table_like(tbl) length = len(columns[0]) if vm_kwargs is None: vm_kwargs = dict() # setup vm if vm == 'numexpr': import numexpr evaluate = numexpr.evaluate elif vm == 'python': # noinspection PyUnusedLocal def evaluate(expr, local_dict=None, **kw): # takes no keyword arguments return eval(expr, dict(), local_dict) else: raise ValueError('expected vm either "numexpr" or "python"') # compile expression and get required columns variables = _get_expression_variables(expression, vm) required_columns = {v: columns[names.index(v)] for v in variables} # determine block size for evaluation blen = _util.get_blen_table(required_columns, blen=blen) # build output out = None for i in range(0, length, blen): j = min(i+blen, length) blocals = {v: c[i:j] for v, c in required_columns.items()} res = evaluate(expression, local_dict=blocals, **vm_kwargs) if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out
Evaluate `expression` against columns of a table.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/core.py#L545-L586
cggh/scikit-allel
allel/model/util.py
create_allele_mapping
def create_allele_mapping(ref, alt, alleles, dtype='i1'): """Create an array mapping variant alleles into a different allele index system. Parameters ---------- ref : array_like, S1, shape (n_variants,) Reference alleles. alt : array_like, S1, shape (n_variants, n_alt_alleles) Alternate alleles. alleles : array_like, S1, shape (n_variants, n_alleles) Alleles defining the new allele indexing. dtype : dtype, optional Output dtype. Returns ------- mapping : ndarray, int8, shape (n_variants, n_alt_alleles + 1) Examples -------- Example with biallelic variants:: >>> import allel >>> ref = [b'A', b'C', b'T', b'G'] >>> alt = [b'T', b'G', b'C', b'A'] >>> alleles = [[b'A', b'T'], # no transformation ... [b'G', b'C'], # swap ... [b'T', b'A'], # 1 missing ... [b'A', b'C']] # 1 missing >>> mapping = allel.create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1], [ 1, 0], [ 0, -1], [-1, 0]], dtype=int8) Example with multiallelic variants:: >>> ref = [b'A', b'C', b'T'] >>> alt = [[b'T', b'G'], ... [b'A', b'T'], ... [b'G', b'.']] >>> alleles = [[b'A', b'T'], ... [b'C', b'T'], ... [b'G', b'A']] >>> mapping = create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1, -1], [ 0, -1, 1], [-1, 0, -1]], dtype=int8) See Also -------- GenotypeArray.map_alleles, HaplotypeArray.map_alleles, AlleleCountsArray.map_alleles """ ref = asarray_ndim(ref, 1) alt = asarray_ndim(alt, 1, 2) alleles = asarray_ndim(alleles, 1, 2) check_dim0_aligned(ref, alt, alleles) # reshape for convenience ref = ref[:, None] if alt.ndim == 1: alt = alt[:, None] if alleles.ndim == 1: alleles = alleles[:, None] source_alleles = np.append(ref, alt, axis=1) # setup output array out = np.empty(source_alleles.shape, dtype=dtype) out.fill(-1) # find matches for ai in range(source_alleles.shape[1]): match = source_alleles[:, ai, None] == alleles match_i, match_j = match.nonzero() out[match_i, ai] = match_j return out
python
def create_allele_mapping(ref, alt, alleles, dtype='i1'): """Create an array mapping variant alleles into a different allele index system. Parameters ---------- ref : array_like, S1, shape (n_variants,) Reference alleles. alt : array_like, S1, shape (n_variants, n_alt_alleles) Alternate alleles. alleles : array_like, S1, shape (n_variants, n_alleles) Alleles defining the new allele indexing. dtype : dtype, optional Output dtype. Returns ------- mapping : ndarray, int8, shape (n_variants, n_alt_alleles + 1) Examples -------- Example with biallelic variants:: >>> import allel >>> ref = [b'A', b'C', b'T', b'G'] >>> alt = [b'T', b'G', b'C', b'A'] >>> alleles = [[b'A', b'T'], # no transformation ... [b'G', b'C'], # swap ... [b'T', b'A'], # 1 missing ... [b'A', b'C']] # 1 missing >>> mapping = allel.create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1], [ 1, 0], [ 0, -1], [-1, 0]], dtype=int8) Example with multiallelic variants:: >>> ref = [b'A', b'C', b'T'] >>> alt = [[b'T', b'G'], ... [b'A', b'T'], ... [b'G', b'.']] >>> alleles = [[b'A', b'T'], ... [b'C', b'T'], ... [b'G', b'A']] >>> mapping = create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1, -1], [ 0, -1, 1], [-1, 0, -1]], dtype=int8) See Also -------- GenotypeArray.map_alleles, HaplotypeArray.map_alleles, AlleleCountsArray.map_alleles """ ref = asarray_ndim(ref, 1) alt = asarray_ndim(alt, 1, 2) alleles = asarray_ndim(alleles, 1, 2) check_dim0_aligned(ref, alt, alleles) # reshape for convenience ref = ref[:, None] if alt.ndim == 1: alt = alt[:, None] if alleles.ndim == 1: alleles = alleles[:, None] source_alleles = np.append(ref, alt, axis=1) # setup output array out = np.empty(source_alleles.shape, dtype=dtype) out.fill(-1) # find matches for ai in range(source_alleles.shape[1]): match = source_alleles[:, ai, None] == alleles match_i, match_j = match.nonzero() out[match_i, ai] = match_j return out
Create an array mapping variant alleles into a different allele index system. Parameters ---------- ref : array_like, S1, shape (n_variants,) Reference alleles. alt : array_like, S1, shape (n_variants, n_alt_alleles) Alternate alleles. alleles : array_like, S1, shape (n_variants, n_alleles) Alleles defining the new allele indexing. dtype : dtype, optional Output dtype. Returns ------- mapping : ndarray, int8, shape (n_variants, n_alt_alleles + 1) Examples -------- Example with biallelic variants:: >>> import allel >>> ref = [b'A', b'C', b'T', b'G'] >>> alt = [b'T', b'G', b'C', b'A'] >>> alleles = [[b'A', b'T'], # no transformation ... [b'G', b'C'], # swap ... [b'T', b'A'], # 1 missing ... [b'A', b'C']] # 1 missing >>> mapping = allel.create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1], [ 1, 0], [ 0, -1], [-1, 0]], dtype=int8) Example with multiallelic variants:: >>> ref = [b'A', b'C', b'T'] >>> alt = [[b'T', b'G'], ... [b'A', b'T'], ... [b'G', b'.']] >>> alleles = [[b'A', b'T'], ... [b'C', b'T'], ... [b'G', b'A']] >>> mapping = create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1, -1], [ 0, -1, 1], [-1, 0, -1]], dtype=int8) See Also -------- GenotypeArray.map_alleles, HaplotypeArray.map_alleles, AlleleCountsArray.map_alleles
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/util.py#L17-L98
cggh/scikit-allel
allel/model/util.py
locate_fixed_differences
def locate_fixed_differences(ac1, ac2): """Locate variants with no shared alleles between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. Returns ------- loc : ndarray, bool, shape (n_variants,) See Also -------- allel.stats.diversity.windowed_df Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2, 3]) >>> loc_df = allel.locate_fixed_differences(ac1, ac2) >>> loc_df array([ True, False, False, True, True]) """ # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # stack allele counts for convenience pac = np.dstack([ac1, ac2]) # count numbers of alleles called in each population pan = np.sum(pac, axis=1) # count the numbers of populations with each allele npa = np.sum(pac > 0, axis=2) # locate variants with allele calls in both populations non_missing = np.all(pan > 0, axis=1) # locate variants where all alleles are only found in a single population no_shared_alleles = np.all(npa <= 1, axis=1) return non_missing & no_shared_alleles
python
def locate_fixed_differences(ac1, ac2): """Locate variants with no shared alleles between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. Returns ------- loc : ndarray, bool, shape (n_variants,) See Also -------- allel.stats.diversity.windowed_df Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2, 3]) >>> loc_df = allel.locate_fixed_differences(ac1, ac2) >>> loc_df array([ True, False, False, True, True]) """ # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # stack allele counts for convenience pac = np.dstack([ac1, ac2]) # count numbers of alleles called in each population pan = np.sum(pac, axis=1) # count the numbers of populations with each allele npa = np.sum(pac > 0, axis=2) # locate variants with allele calls in both populations non_missing = np.all(pan > 0, axis=1) # locate variants where all alleles are only found in a single population no_shared_alleles = np.all(npa <= 1, axis=1) return non_missing & no_shared_alleles
Locate variants with no shared alleles between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. Returns ------- loc : ndarray, bool, shape (n_variants,) See Also -------- allel.stats.diversity.windowed_df Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2, 3]) >>> loc_df = allel.locate_fixed_differences(ac1, ac2) >>> loc_df array([ True, False, False, True, True])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/util.py#L101-L157
cggh/scikit-allel
allel/model/util.py
locate_private_alleles
def locate_private_alleles(*acs): """Locate alleles that are found only in a single population. Parameters ---------- *acs : array_like, int, shape (n_variants, n_alleles) Allele counts arrays from each population. Returns ------- loc : ndarray, bool, shape (n_variants, n_alleles) Boolean array where elements are True if allele is private to a single population. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2]) >>> ac3 = g.count_alleles(subpop=[3]) >>> loc_private_alleles = allel.locate_private_alleles(ac1, ac2, ac3) >>> loc_private_alleles array([[ True, False, False], [False, False, False], [ True, False, False], [ True, True, True], [ True, True, False]]) >>> loc_private_variants = np.any(loc_private_alleles, axis=1) >>> loc_private_variants array([ True, False, True, True, True]) """ # check inputs acs = [asarray_ndim(ac, 2) for ac in acs] check_dim0_aligned(*acs) acs = ensure_dim1_aligned(*acs) # stack allele counts for convenience pac = np.dstack(acs) # count the numbers of populations with each allele npa = np.sum(pac > 0, axis=2) # locate alleles found only in a single population loc_pa = npa == 1 return loc_pa
python
def locate_private_alleles(*acs): """Locate alleles that are found only in a single population. Parameters ---------- *acs : array_like, int, shape (n_variants, n_alleles) Allele counts arrays from each population. Returns ------- loc : ndarray, bool, shape (n_variants, n_alleles) Boolean array where elements are True if allele is private to a single population. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2]) >>> ac3 = g.count_alleles(subpop=[3]) >>> loc_private_alleles = allel.locate_private_alleles(ac1, ac2, ac3) >>> loc_private_alleles array([[ True, False, False], [False, False, False], [ True, False, False], [ True, True, True], [ True, True, False]]) >>> loc_private_variants = np.any(loc_private_alleles, axis=1) >>> loc_private_variants array([ True, False, True, True, True]) """ # check inputs acs = [asarray_ndim(ac, 2) for ac in acs] check_dim0_aligned(*acs) acs = ensure_dim1_aligned(*acs) # stack allele counts for convenience pac = np.dstack(acs) # count the numbers of populations with each allele npa = np.sum(pac > 0, axis=2) # locate alleles found only in a single population loc_pa = npa == 1 return loc_pa
Locate alleles that are found only in a single population. Parameters ---------- *acs : array_like, int, shape (n_variants, n_alleles) Allele counts arrays from each population. Returns ------- loc : ndarray, bool, shape (n_variants, n_alleles) Boolean array where elements are True if allele is private to a single population. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2]) >>> ac3 = g.count_alleles(subpop=[3]) >>> loc_private_alleles = allel.locate_private_alleles(ac1, ac2, ac3) >>> loc_private_alleles array([[ True, False, False], [False, False, False], [ True, False, False], [ True, True, True], [ True, True, False]]) >>> loc_private_variants = np.any(loc_private_alleles, axis=1) >>> loc_private_variants array([ True, False, True, True, True])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/util.py#L160-L213
cggh/scikit-allel
allel/stats/fst.py
weir_cockerham_fst
def weir_cockerham_fst(g, subpops, max_allele=None, blen=None): """Compute the variance components from the analyses of variance of allele frequencies according to Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. max_allele : int, optional The highest allele index to consider. blen : int, optional Block length to use for chunked computation. Returns ------- a : ndarray, float, shape (n_variants, n_alleles) Component of variance between populations. b : ndarray, float, shape (n_variants, n_alleles) Component of variance between individuals within populations. c : ndarray, float, shape (n_variants, n_alleles) Component of variance between gametes within individuals. Examples -------- Calculate variance components from some genotype data:: >>> import allel >>> g = [[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 0], [0, 0], [0, 0], [0, 0]], ... [[0, 1], [1, 2], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [0, 1], [-1, -1]]] >>> subpops = [[0, 1], [2, 3]] >>> a, b, c = allel.weir_cockerham_fst(g, subpops) >>> a array([[ 0.5 , 0.5 , 0. ], [ 0. , 0. , 0. ], [ 0. , 0. , 0. ], [ 0. , -0.125, -0.125], [-0.375, -0.375, 0. ]]) >>> b array([[ 0. , 0. , 0. ], [-0.25 , -0.25 , 0. ], [ 0. , 0. , 0. ], [ 0. , 0.125 , 0.25 ], [ 0.41666667, 0.41666667, 0. ]]) >>> c array([[0. , 0. , 0. ], [0.5 , 0.5 , 0. ], [0. , 0. , 0. ], [0.125 , 0.25 , 0.125 ], [0.16666667, 0.16666667, 0. ]]) Estimate the parameter theta (a.k.a., Fst) for each variant and each allele individually:: >>> fst = a / (a + b + c) >>> fst array([[ 1. , 1. , nan], [ 0. , 0. , nan], [ nan, nan, nan], [ 0. , -0.5, -0.5], [-1.8, -1.8, nan]]) Estimate Fst for each variant individually (averaging over alleles):: >>> fst = (np.sum(a, axis=1) / ... (np.sum(a, axis=1) + np.sum(b, axis=1) + np.sum(c, axis=1))) >>> fst array([ 1. , 0. , nan, -0.4, -1.8]) Estimate Fst averaging over all variants and alleles:: >>> fst = np.sum(a) / (np.sum(a) + np.sum(b) + np.sum(c)) >>> fst -4.36809058868914e-17 Note that estimated Fst values may be negative. """ # check inputs if not hasattr(g, 'shape') or not hasattr(g, 'ndim'): g = GenotypeArray(g, copy=False) if g.ndim != 3: raise ValueError('g must have three dimensions') if g.shape[2] != 2: raise NotImplementedError('only diploid genotypes are supported') # determine highest allele index if max_allele is None: max_allele = g.max() # compute in chunks to avoid loading big arrays into memory blen = get_blen_array(g, blen) n_variants = g.shape[0] shape = (n_variants, max_allele + 1) a = np.zeros(shape, dtype='f8') b = np.zeros(shape, dtype='f8') c = np.zeros(shape, dtype='f8') for i in range(0, n_variants, blen): j = min(n_variants, i+blen) gb = g[i:j] ab, bb, cb = _weir_cockerham_fst(gb, subpops, max_allele) a[i:j] = ab b[i:j] = bb c[i:j] = cb return a, b, c
python
def weir_cockerham_fst(g, subpops, max_allele=None, blen=None): """Compute the variance components from the analyses of variance of allele frequencies according to Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. max_allele : int, optional The highest allele index to consider. blen : int, optional Block length to use for chunked computation. Returns ------- a : ndarray, float, shape (n_variants, n_alleles) Component of variance between populations. b : ndarray, float, shape (n_variants, n_alleles) Component of variance between individuals within populations. c : ndarray, float, shape (n_variants, n_alleles) Component of variance between gametes within individuals. Examples -------- Calculate variance components from some genotype data:: >>> import allel >>> g = [[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 0], [0, 0], [0, 0], [0, 0]], ... [[0, 1], [1, 2], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [0, 1], [-1, -1]]] >>> subpops = [[0, 1], [2, 3]] >>> a, b, c = allel.weir_cockerham_fst(g, subpops) >>> a array([[ 0.5 , 0.5 , 0. ], [ 0. , 0. , 0. ], [ 0. , 0. , 0. ], [ 0. , -0.125, -0.125], [-0.375, -0.375, 0. ]]) >>> b array([[ 0. , 0. , 0. ], [-0.25 , -0.25 , 0. ], [ 0. , 0. , 0. ], [ 0. , 0.125 , 0.25 ], [ 0.41666667, 0.41666667, 0. ]]) >>> c array([[0. , 0. , 0. ], [0.5 , 0.5 , 0. ], [0. , 0. , 0. ], [0.125 , 0.25 , 0.125 ], [0.16666667, 0.16666667, 0. ]]) Estimate the parameter theta (a.k.a., Fst) for each variant and each allele individually:: >>> fst = a / (a + b + c) >>> fst array([[ 1. , 1. , nan], [ 0. , 0. , nan], [ nan, nan, nan], [ 0. , -0.5, -0.5], [-1.8, -1.8, nan]]) Estimate Fst for each variant individually (averaging over alleles):: >>> fst = (np.sum(a, axis=1) / ... (np.sum(a, axis=1) + np.sum(b, axis=1) + np.sum(c, axis=1))) >>> fst array([ 1. , 0. , nan, -0.4, -1.8]) Estimate Fst averaging over all variants and alleles:: >>> fst = np.sum(a) / (np.sum(a) + np.sum(b) + np.sum(c)) >>> fst -4.36809058868914e-17 Note that estimated Fst values may be negative. """ # check inputs if not hasattr(g, 'shape') or not hasattr(g, 'ndim'): g = GenotypeArray(g, copy=False) if g.ndim != 3: raise ValueError('g must have three dimensions') if g.shape[2] != 2: raise NotImplementedError('only diploid genotypes are supported') # determine highest allele index if max_allele is None: max_allele = g.max() # compute in chunks to avoid loading big arrays into memory blen = get_blen_array(g, blen) n_variants = g.shape[0] shape = (n_variants, max_allele + 1) a = np.zeros(shape, dtype='f8') b = np.zeros(shape, dtype='f8') c = np.zeros(shape, dtype='f8') for i in range(0, n_variants, blen): j = min(n_variants, i+blen) gb = g[i:j] ab, bb, cb = _weir_cockerham_fst(gb, subpops, max_allele) a[i:j] = ab b[i:j] = bb c[i:j] = cb return a, b, c
Compute the variance components from the analyses of variance of allele frequencies according to Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. max_allele : int, optional The highest allele index to consider. blen : int, optional Block length to use for chunked computation. Returns ------- a : ndarray, float, shape (n_variants, n_alleles) Component of variance between populations. b : ndarray, float, shape (n_variants, n_alleles) Component of variance between individuals within populations. c : ndarray, float, shape (n_variants, n_alleles) Component of variance between gametes within individuals. Examples -------- Calculate variance components from some genotype data:: >>> import allel >>> g = [[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 0], [0, 0], [0, 0], [0, 0]], ... [[0, 1], [1, 2], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [0, 1], [-1, -1]]] >>> subpops = [[0, 1], [2, 3]] >>> a, b, c = allel.weir_cockerham_fst(g, subpops) >>> a array([[ 0.5 , 0.5 , 0. ], [ 0. , 0. , 0. ], [ 0. , 0. , 0. ], [ 0. , -0.125, -0.125], [-0.375, -0.375, 0. ]]) >>> b array([[ 0. , 0. , 0. ], [-0.25 , -0.25 , 0. ], [ 0. , 0. , 0. ], [ 0. , 0.125 , 0.25 ], [ 0.41666667, 0.41666667, 0. ]]) >>> c array([[0. , 0. , 0. ], [0.5 , 0.5 , 0. ], [0. , 0. , 0. ], [0.125 , 0.25 , 0.125 ], [0.16666667, 0.16666667, 0. ]]) Estimate the parameter theta (a.k.a., Fst) for each variant and each allele individually:: >>> fst = a / (a + b + c) >>> fst array([[ 1. , 1. , nan], [ 0. , 0. , nan], [ nan, nan, nan], [ 0. , -0.5, -0.5], [-1.8, -1.8, nan]]) Estimate Fst for each variant individually (averaging over alleles):: >>> fst = (np.sum(a, axis=1) / ... (np.sum(a, axis=1) + np.sum(b, axis=1) + np.sum(c, axis=1))) >>> fst array([ 1. , 0. , nan, -0.4, -1.8]) Estimate Fst averaging over all variants and alleles:: >>> fst = np.sum(a) / (np.sum(a) + np.sum(b) + np.sum(c)) >>> fst -4.36809058868914e-17 Note that estimated Fst values may be negative.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/fst.py#L25-L135
cggh/scikit-allel
allel/stats/fst.py
hudson_fst
def hudson_fst(ac1, ac2, fill=np.nan): """Calculate the numerator and denominator for Fst estimation using the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- num : ndarray, float, shape (n_variants,) Divergence between the two populations minus average of diversity within each population. den : ndarray, float, shape (n_variants,) Divergence between the two populations. Examples -------- Calculate numerator and denominator for Fst estimation:: >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 0], [0, 0], [0, 0], [0, 0]], ... [[0, 1], [1, 2], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [0, 1], [-1, -1]]]) >>> subpops = [[0, 1], [2, 3]] >>> ac1 = g.count_alleles(subpop=subpops[0]) >>> ac2 = g.count_alleles(subpop=subpops[1]) >>> num, den = allel.hudson_fst(ac1, ac2) >>> num array([ 1. , -0.16666667, 0. , -0.125 , -0.33333333]) >>> den array([1. , 0.5 , 0. , 0.625, 0.5 ]) Estimate Fst for each variant individually:: >>> fst = num / den >>> fst array([ 1. , -0.33333333, nan, -0.2 , -0.66666667]) Estimate Fst averaging over variants:: >>> fst = np.sum(num) / np.sum(den) >>> fst 0.1428571428571429 """ # flake8: noqa # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # calculate these once only an1 = np.sum(ac1, axis=1) an2 = np.sum(ac2, axis=1) # calculate average diversity (a.k.a. heterozygosity) within each # population within = (mean_pairwise_difference(ac1, an1, fill=fill) + mean_pairwise_difference(ac2, an2, fill=fill)) / 2 # calculate divergence (a.k.a. heterozygosity) between each population between = mean_pairwise_difference_between(ac1, ac2, an1, an2, fill=fill) # define numerator and denominator for Fst calculations num = between - within den = between return num, den
python
def hudson_fst(ac1, ac2, fill=np.nan): """Calculate the numerator and denominator for Fst estimation using the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- num : ndarray, float, shape (n_variants,) Divergence between the two populations minus average of diversity within each population. den : ndarray, float, shape (n_variants,) Divergence between the two populations. Examples -------- Calculate numerator and denominator for Fst estimation:: >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 0], [0, 0], [0, 0], [0, 0]], ... [[0, 1], [1, 2], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [0, 1], [-1, -1]]]) >>> subpops = [[0, 1], [2, 3]] >>> ac1 = g.count_alleles(subpop=subpops[0]) >>> ac2 = g.count_alleles(subpop=subpops[1]) >>> num, den = allel.hudson_fst(ac1, ac2) >>> num array([ 1. , -0.16666667, 0. , -0.125 , -0.33333333]) >>> den array([1. , 0.5 , 0. , 0.625, 0.5 ]) Estimate Fst for each variant individually:: >>> fst = num / den >>> fst array([ 1. , -0.33333333, nan, -0.2 , -0.66666667]) Estimate Fst averaging over variants:: >>> fst = np.sum(num) / np.sum(den) >>> fst 0.1428571428571429 """ # flake8: noqa # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # calculate these once only an1 = np.sum(ac1, axis=1) an2 = np.sum(ac2, axis=1) # calculate average diversity (a.k.a. heterozygosity) within each # population within = (mean_pairwise_difference(ac1, an1, fill=fill) + mean_pairwise_difference(ac2, an2, fill=fill)) / 2 # calculate divergence (a.k.a. heterozygosity) between each population between = mean_pairwise_difference_between(ac1, ac2, an1, an2, fill=fill) # define numerator and denominator for Fst calculations num = between - within den = between return num, den
Calculate the numerator and denominator for Fst estimation using the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- num : ndarray, float, shape (n_variants,) Divergence between the two populations minus average of diversity within each population. den : ndarray, float, shape (n_variants,) Divergence between the two populations. Examples -------- Calculate numerator and denominator for Fst estimation:: >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 0], [0, 0], [0, 0], [0, 0]], ... [[0, 1], [1, 2], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [0, 1], [-1, -1]]]) >>> subpops = [[0, 1], [2, 3]] >>> ac1 = g.count_alleles(subpop=subpops[0]) >>> ac2 = g.count_alleles(subpop=subpops[1]) >>> num, den = allel.hudson_fst(ac1, ac2) >>> num array([ 1. , -0.16666667, 0. , -0.125 , -0.33333333]) >>> den array([1. , 0.5 , 0. , 0.625, 0.5 ]) Estimate Fst for each variant individually:: >>> fst = num / den >>> fst array([ 1. , -0.33333333, nan, -0.2 , -0.66666667]) Estimate Fst averaging over variants:: >>> fst = np.sum(num) / np.sum(den) >>> fst 0.1428571428571429
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/fst.py#L250-L327
cggh/scikit-allel
allel/stats/fst.py
patterson_fst
def patterson_fst(aca, acb): """Estimator of differentiation between populations A and B based on the F2 parameter. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- num : ndarray, shape (n_variants,), float Numerator. den : ndarray, shape (n_variants,), float Denominator. Notes ----- See Patterson (2012), Appendix A. TODO check if this is numerically equivalent to Hudson's estimator. """ from allel.stats.admixture import patterson_f2, h_hat num = patterson_f2(aca, acb) den = num + h_hat(aca) + h_hat(acb) return num, den
python
def patterson_fst(aca, acb): """Estimator of differentiation between populations A and B based on the F2 parameter. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- num : ndarray, shape (n_variants,), float Numerator. den : ndarray, shape (n_variants,), float Denominator. Notes ----- See Patterson (2012), Appendix A. TODO check if this is numerically equivalent to Hudson's estimator. """ from allel.stats.admixture import patterson_f2, h_hat num = patterson_f2(aca, acb) den = num + h_hat(aca) + h_hat(acb) return num, den
Estimator of differentiation between populations A and B based on the F2 parameter. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- num : ndarray, shape (n_variants,), float Numerator. den : ndarray, shape (n_variants,), float Denominator. Notes ----- See Patterson (2012), Appendix A. TODO check if this is numerically equivalent to Hudson's estimator.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/fst.py#L330-L360
cggh/scikit-allel
allel/stats/fst.py
windowed_weir_cockerham_fst
def windowed_weir_cockerham_fst(pos, g, subpops, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, max_allele=None): """Estimate average Fst in windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. size : int The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. max_allele : int, optional The highest allele index to consider. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. """ # compute values per-variant a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # define the statistic to compute within each window def average_fst(wa, wb, wc): return np.nansum(wa) / (np.nansum(wa) + np.nansum(wb) + np.nansum(wc)) # calculate average Fst in windows fst, windows, counts = windowed_statistic(pos, values=(a, b, c), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill) return fst, windows, counts
python
def windowed_weir_cockerham_fst(pos, g, subpops, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, max_allele=None): """Estimate average Fst in windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. size : int The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. max_allele : int, optional The highest allele index to consider. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. """ # compute values per-variant a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # define the statistic to compute within each window def average_fst(wa, wb, wc): return np.nansum(wa) / (np.nansum(wa) + np.nansum(wb) + np.nansum(wc)) # calculate average Fst in windows fst, windows, counts = windowed_statistic(pos, values=(a, b, c), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill) return fst, windows, counts
Estimate average Fst in windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. size : int The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. max_allele : int, optional The highest allele index to consider. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/fst.py#L363-L421
cggh/scikit-allel
allel/stats/fst.py
windowed_hudson_fst
def windowed_hudson_fst(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): """Estimate average Fst in windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. """ # compute values per-variants num, den = hudson_fst(ac1, ac2) # define the statistic to compute within each window def average_fst(wn, wd): return np.nansum(wn) / np.nansum(wd) # calculate average Fst in windows fst, windows, counts = windowed_statistic(pos, values=(num, den), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill) return fst, windows, counts
python
def windowed_hudson_fst(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): """Estimate average Fst in windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. """ # compute values per-variants num, den = hudson_fst(ac1, ac2) # define the statistic to compute within each window def average_fst(wn, wd): return np.nansum(wn) / np.nansum(wd) # calculate average Fst in windows fst, windows, counts = windowed_statistic(pos, values=(num, den), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill) return fst, windows, counts
Estimate average Fst in windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/fst.py#L424-L479
cggh/scikit-allel
allel/stats/fst.py
moving_weir_cockerham_fst
def moving_weir_cockerham_fst(g, subpops, size, start=0, stop=None, step=None, max_allele=None): """Estimate average Fst in moving windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. max_allele : int, optional The highest allele index to consider. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. """ # calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # compute the numerator and denominator in moving windows num = moving_statistic(a, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den = moving_statistic(a + b + c, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num / den return fst
python
def moving_weir_cockerham_fst(g, subpops, size, start=0, stop=None, step=None, max_allele=None): """Estimate average Fst in moving windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. max_allele : int, optional The highest allele index to consider. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. """ # calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # compute the numerator and denominator in moving windows num = moving_statistic(a, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den = moving_statistic(a + b + c, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num / den return fst
Estimate average Fst in moving windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. max_allele : int, optional The highest allele index to consider. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/fst.py#L540-L582
cggh/scikit-allel
allel/stats/fst.py
moving_hudson_fst
def moving_hudson_fst(ac1, ac2, size, start=0, stop=None, step=None): """Estimate average Fst in moving windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. """ # calculate per-variant values num, den = hudson_fst(ac1, ac2, fill=np.nan) # compute the numerator and denominator in moving windows num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num_sum / den_sum return fst
python
def moving_hudson_fst(ac1, ac2, size, start=0, stop=None, step=None): """Estimate average Fst in moving windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. """ # calculate per-variant values num, den = hudson_fst(ac1, ac2, fill=np.nan) # compute the numerator and denominator in moving windows num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num_sum / den_sum return fst
Estimate average Fst in moving windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/fst.py#L585-L624
cggh/scikit-allel
allel/stats/fst.py
moving_patterson_fst
def moving_patterson_fst(ac1, ac2, size, start=0, stop=None, step=None): """Estimate average Fst in moving windows over a single chromosome/contig, following the method of Patterson (2012). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. """ # calculate per-variant values num, den = patterson_fst(ac1, ac2) # compute the numerator and denominator in moving windows num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num_sum / den_sum return fst
python
def moving_patterson_fst(ac1, ac2, size, start=0, stop=None, step=None): """Estimate average Fst in moving windows over a single chromosome/contig, following the method of Patterson (2012). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. """ # calculate per-variant values num, den = patterson_fst(ac1, ac2) # compute the numerator and denominator in moving windows num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num_sum / den_sum return fst
Estimate average Fst in moving windows over a single chromosome/contig, following the method of Patterson (2012). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/fst.py#L627-L666
cggh/scikit-allel
allel/stats/fst.py
average_weir_cockerham_fst
def average_weir_cockerham_fst(g, subpops, blen, max_allele=None): """Estimate average Fst and standard error using the block-jackknife. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. blen : int Block size (number of variants). max_allele : int, optional The highest allele index to consider. Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # calculate overall estimate a_sum = np.nansum(a) b_sum = np.nansum(b) c_sum = np.nansum(c) fst = a_sum / (a_sum + b_sum + c_sum) # compute the numerator and denominator within each block num_bsum = moving_statistic(a, statistic=np.nansum, size=blen) den_bsum = moving_statistic(a + b + c, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj
python
def average_weir_cockerham_fst(g, subpops, blen, max_allele=None): """Estimate average Fst and standard error using the block-jackknife. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. blen : int Block size (number of variants). max_allele : int, optional The highest allele index to consider. Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # calculate overall estimate a_sum = np.nansum(a) b_sum = np.nansum(b) c_sum = np.nansum(c) fst = a_sum / (a_sum + b_sum + c_sum) # compute the numerator and denominator within each block num_bsum = moving_statistic(a, statistic=np.nansum, size=blen) den_bsum = moving_statistic(a + b + c, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj
Estimate average Fst and standard error using the block-jackknife. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. blen : int Block size (number of variants). max_allele : int, optional The highest allele index to consider. Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/fst.py#L669-L716
cggh/scikit-allel
allel/stats/fst.py
average_hudson_fst
def average_hudson_fst(ac1, ac2, blen): """Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. blen : int Block size (number of variants). Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values num, den = hudson_fst(ac1, ac2, fill=np.nan) # calculate overall estimate fst = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj
python
def average_hudson_fst(ac1, ac2, blen): """Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. blen : int Block size (number of variants). Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values num, den = hudson_fst(ac1, ac2, fill=np.nan) # calculate overall estimate fst = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj
Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. blen : int Block size (number of variants). Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/fst.py#L719-L762
cggh/scikit-allel
allel/stats/fst.py
average_patterson_fst
def average_patterson_fst(ac1, ac2, blen): """Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. blen : int Block size (number of variants). Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values num, den = patterson_fst(ac1, ac2) # calculate overall estimate fst = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj
python
def average_patterson_fst(ac1, ac2, blen): """Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. blen : int Block size (number of variants). Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. """ # calculate per-variant values num, den = patterson_fst(ac1, ac2) # calculate overall estimate fst = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj
Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. blen : int Block size (number of variants). Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/fst.py#L765-L808
cggh/scikit-allel
allel/stats/ld.py
rogers_huff_r
def rogers_huff_r(gn): """Estimate the linkage disequilibrium parameter *r* for each pair of variants using the method of Rogers and Huff (2008). Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). Returns ------- r : ndarray, float, shape (n_variants * (n_variants - 1) // 2,) Matrix in condensed form. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [1, 1], [0, 0]], ... [[0, 0], [1, 1], [0, 0]], ... [[1, 1], [0, 0], [1, 1]], ... [[0, 0], [0, 1], [-1, -1]]], dtype='i1') >>> gn = g.to_n_alt(fill=-1) >>> gn array([[ 0, 2, 0], [ 0, 2, 0], [ 2, 0, 2], [ 0, 1, -1]], dtype=int8) >>> r = allel.rogers_huff_r(gn) >>> r # doctest: +ELLIPSIS array([ 1. , -1.0000001, 1. , -1.0000001, 1. , -1. ], dtype=float32) >>> r ** 2 # doctest: +ELLIPSIS array([1. , 1.0000002, 1. , 1.0000002, 1. , 1. ], dtype=float32) >>> from scipy.spatial.distance import squareform >>> squareform(r ** 2) array([[0. , 1. , 1.0000002, 1. ], [1. , 0. , 1.0000002, 1. ], [1.0000002, 1.0000002, 0. , 1. ], [1. , 1. , 1. , 0. ]], dtype=float32) """ # check inputs gn = asarray_ndim(gn, 2, dtype='i1') gn = memoryview_safe(gn) # compute correlation coefficients r = gn_pairwise_corrcoef_int8(gn) # convenience for singletons if r.size == 1: r = r[0] return r
python
def rogers_huff_r(gn): """Estimate the linkage disequilibrium parameter *r* for each pair of variants using the method of Rogers and Huff (2008). Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). Returns ------- r : ndarray, float, shape (n_variants * (n_variants - 1) // 2,) Matrix in condensed form. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [1, 1], [0, 0]], ... [[0, 0], [1, 1], [0, 0]], ... [[1, 1], [0, 0], [1, 1]], ... [[0, 0], [0, 1], [-1, -1]]], dtype='i1') >>> gn = g.to_n_alt(fill=-1) >>> gn array([[ 0, 2, 0], [ 0, 2, 0], [ 2, 0, 2], [ 0, 1, -1]], dtype=int8) >>> r = allel.rogers_huff_r(gn) >>> r # doctest: +ELLIPSIS array([ 1. , -1.0000001, 1. , -1.0000001, 1. , -1. ], dtype=float32) >>> r ** 2 # doctest: +ELLIPSIS array([1. , 1.0000002, 1. , 1.0000002, 1. , 1. ], dtype=float32) >>> from scipy.spatial.distance import squareform >>> squareform(r ** 2) array([[0. , 1. , 1.0000002, 1. ], [1. , 0. , 1.0000002, 1. ], [1.0000002, 1.0000002, 0. , 1. ], [1. , 1. , 1. , 0. ]], dtype=float32) """ # check inputs gn = asarray_ndim(gn, 2, dtype='i1') gn = memoryview_safe(gn) # compute correlation coefficients r = gn_pairwise_corrcoef_int8(gn) # convenience for singletons if r.size == 1: r = r[0] return r
Estimate the linkage disequilibrium parameter *r* for each pair of variants using the method of Rogers and Huff (2008). Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). Returns ------- r : ndarray, float, shape (n_variants * (n_variants - 1) // 2,) Matrix in condensed form. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [1, 1], [0, 0]], ... [[0, 0], [1, 1], [0, 0]], ... [[1, 1], [0, 0], [1, 1]], ... [[0, 0], [0, 1], [-1, -1]]], dtype='i1') >>> gn = g.to_n_alt(fill=-1) >>> gn array([[ 0, 2, 0], [ 0, 2, 0], [ 2, 0, 2], [ 0, 1, -1]], dtype=int8) >>> r = allel.rogers_huff_r(gn) >>> r # doctest: +ELLIPSIS array([ 1. , -1.0000001, 1. , -1.0000001, 1. , -1. ], dtype=float32) >>> r ** 2 # doctest: +ELLIPSIS array([1. , 1.0000002, 1. , 1.0000002, 1. , 1. ], dtype=float32) >>> from scipy.spatial.distance import squareform >>> squareform(r ** 2) array([[0. , 1. , 1.0000002, 1. ], [1. , 0. , 1.0000002, 1. ], [1.0000002, 1.0000002, 0. , 1. ], [1. , 1. , 1. , 0. ]], dtype=float32)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/ld.py#L16-L72
cggh/scikit-allel
allel/stats/ld.py
rogers_huff_r_between
def rogers_huff_r_between(gna, gnb): """Estimate the linkage disequilibrium parameter *r* for each pair of variants between the two input arrays, using the method of Rogers and Huff (2008). Parameters ---------- gna, gnb : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). Returns ------- r : ndarray, float, shape (m_variants, n_variants ) Matrix in rectangular form. """ # check inputs gna = asarray_ndim(gna, 2, dtype='i1') gnb = asarray_ndim(gnb, 2, dtype='i1') gna = memoryview_safe(gna) gnb = memoryview_safe(gnb) # compute correlation coefficients r = gn_pairwise2_corrcoef_int8(gna, gnb) # convenience for singletons if r.size == 1: r = r[0, 0] return r
python
def rogers_huff_r_between(gna, gnb): """Estimate the linkage disequilibrium parameter *r* for each pair of variants between the two input arrays, using the method of Rogers and Huff (2008). Parameters ---------- gna, gnb : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). Returns ------- r : ndarray, float, shape (m_variants, n_variants ) Matrix in rectangular form. """ # check inputs gna = asarray_ndim(gna, 2, dtype='i1') gnb = asarray_ndim(gnb, 2, dtype='i1') gna = memoryview_safe(gna) gnb = memoryview_safe(gnb) # compute correlation coefficients r = gn_pairwise2_corrcoef_int8(gna, gnb) # convenience for singletons if r.size == 1: r = r[0, 0] return r
Estimate the linkage disequilibrium parameter *r* for each pair of variants between the two input arrays, using the method of Rogers and Huff (2008). Parameters ---------- gna, gnb : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). Returns ------- r : ndarray, float, shape (m_variants, n_variants ) Matrix in rectangular form.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/ld.py#L75-L106
cggh/scikit-allel
allel/stats/ld.py
locate_unlinked
def locate_unlinked(gn, size=100, step=20, threshold=.1, blen=None): """Locate variants in approximate linkage equilibrium, where r**2 is below the given `threshold`. Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). size : int Window size (number of variants). step : int Number of variants to advance to the next window. threshold : float Maximum value of r**2 to include variants. blen : int, optional Block length to use for chunked computation. Returns ------- loc : ndarray, bool, shape (n_variants) Boolean array where True items locate variants in approximate linkage equilibrium. Notes ----- The value of r**2 between each pair of variants is calculated using the method of Rogers and Huff (2008). """ # check inputs if not hasattr(gn, 'shape') or not hasattr(gn, 'dtype'): gn = np.asarray(gn, dtype='i1') if gn.ndim != 2: raise ValueError('gn must have two dimensions') # setup output loc = np.ones(gn.shape[0], dtype='u1') # compute in chunks to avoid loading big arrays into memory blen = get_blen_array(gn, blen) blen = max(blen, 10*size) # avoid too small chunks n_variants = gn.shape[0] for i in range(0, n_variants, blen): # N.B., ensure overlap with next window j = min(n_variants, i+blen+size) gnb = np.asarray(gn[i:j], dtype='i1') gnb = memoryview_safe(gnb) locb = loc[i:j] gn_locate_unlinked_int8(gnb, locb, size, step, threshold) return loc.astype('b1')
python
def locate_unlinked(gn, size=100, step=20, threshold=.1, blen=None): """Locate variants in approximate linkage equilibrium, where r**2 is below the given `threshold`. Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). size : int Window size (number of variants). step : int Number of variants to advance to the next window. threshold : float Maximum value of r**2 to include variants. blen : int, optional Block length to use for chunked computation. Returns ------- loc : ndarray, bool, shape (n_variants) Boolean array where True items locate variants in approximate linkage equilibrium. Notes ----- The value of r**2 between each pair of variants is calculated using the method of Rogers and Huff (2008). """ # check inputs if not hasattr(gn, 'shape') or not hasattr(gn, 'dtype'): gn = np.asarray(gn, dtype='i1') if gn.ndim != 2: raise ValueError('gn must have two dimensions') # setup output loc = np.ones(gn.shape[0], dtype='u1') # compute in chunks to avoid loading big arrays into memory blen = get_blen_array(gn, blen) blen = max(blen, 10*size) # avoid too small chunks n_variants = gn.shape[0] for i in range(0, n_variants, blen): # N.B., ensure overlap with next window j = min(n_variants, i+blen+size) gnb = np.asarray(gn[i:j], dtype='i1') gnb = memoryview_safe(gnb) locb = loc[i:j] gn_locate_unlinked_int8(gnb, locb, size, step, threshold) return loc.astype('b1')
Locate variants in approximate linkage equilibrium, where r**2 is below the given `threshold`. Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). size : int Window size (number of variants). step : int Number of variants to advance to the next window. threshold : float Maximum value of r**2 to include variants. blen : int, optional Block length to use for chunked computation. Returns ------- loc : ndarray, bool, shape (n_variants) Boolean array where True items locate variants in approximate linkage equilibrium. Notes ----- The value of r**2 between each pair of variants is calculated using the method of Rogers and Huff (2008).
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/ld.py#L109-L161
cggh/scikit-allel
allel/stats/ld.py
windowed_r_squared
def windowed_r_squared(pos, gn, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, percentile=50): """Summarise linkage disequilibrium in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. percentile : int or sequence of ints, optional The percentile or percentiles to calculate within each window. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- Linkage disequilibrium (r**2) is calculated using the method of Rogers and Huff (2008). See Also -------- allel.stats.window.windowed_statistic """ # define the statistic function if isinstance(percentile, (list, tuple)): fill = [fill for _ in percentile] def statistic(gnw): r_squared = rogers_huff_r(gnw) ** 2 return [np.percentile(r_squared, p) for p in percentile] else: def statistic(gnw): r_squared = rogers_huff_r(gnw) ** 2 return np.percentile(r_squared, percentile) return windowed_statistic(pos, gn, statistic, size, start=start, stop=stop, step=step, windows=windows, fill=fill)
python
def windowed_r_squared(pos, gn, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, percentile=50): """Summarise linkage disequilibrium in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. percentile : int or sequence of ints, optional The percentile or percentiles to calculate within each window. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- Linkage disequilibrium (r**2) is calculated using the method of Rogers and Huff (2008). See Also -------- allel.stats.window.windowed_statistic """ # define the statistic function if isinstance(percentile, (list, tuple)): fill = [fill for _ in percentile] def statistic(gnw): r_squared = rogers_huff_r(gnw) ** 2 return [np.percentile(r_squared, p) for p in percentile] else: def statistic(gnw): r_squared = rogers_huff_r(gnw) ** 2 return np.percentile(r_squared, percentile) return windowed_statistic(pos, gn, statistic, size, start=start, stop=stop, step=step, windows=windows, fill=fill)
Summarise linkage disequilibrium in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. percentile : int or sequence of ints, optional The percentile or percentiles to calculate within each window. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- Linkage disequilibrium (r**2) is calculated using the method of Rogers and Huff (2008). See Also -------- allel.stats.window.windowed_statistic
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/ld.py#L164-L230
cggh/scikit-allel
allel/stats/ld.py
plot_pairwise_ld
def plot_pairwise_ld(m, colorbar=True, ax=None, imshow_kwargs=None): """Plot a matrix of genotype linkage disequilibrium values between all pairs of variants. Parameters ---------- m : array_like Array of linkage disequilibrium values in condensed form. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt # check inputs m_square = ensure_square(m) # blank out lower triangle and flip up/down m_square = np.tril(m_square)[::-1, :] # set up axes if ax is None: # make a square figure with enough pixels to represent each variant x = m_square.shape[0] / plt.rcParams['figure.dpi'] x = max(x, plt.rcParams['figure.figsize'][0]) fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout(pad=0) # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'Greys') imshow_kwargs.setdefault('vmin', 0) imshow_kwargs.setdefault('vmax', 1) # plot as image im = ax.imshow(m_square, **imshow_kwargs) # tidy up ax.set_xticks([]) ax.set_yticks([]) for s in 'bottom', 'right': ax.spines[s].set_visible(False) if colorbar: plt.gcf().colorbar(im, shrink=.5, pad=0) return ax
python
def plot_pairwise_ld(m, colorbar=True, ax=None, imshow_kwargs=None): """Plot a matrix of genotype linkage disequilibrium values between all pairs of variants. Parameters ---------- m : array_like Array of linkage disequilibrium values in condensed form. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt # check inputs m_square = ensure_square(m) # blank out lower triangle and flip up/down m_square = np.tril(m_square)[::-1, :] # set up axes if ax is None: # make a square figure with enough pixels to represent each variant x = m_square.shape[0] / plt.rcParams['figure.dpi'] x = max(x, plt.rcParams['figure.figsize'][0]) fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout(pad=0) # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'Greys') imshow_kwargs.setdefault('vmin', 0) imshow_kwargs.setdefault('vmax', 1) # plot as image im = ax.imshow(m_square, **imshow_kwargs) # tidy up ax.set_xticks([]) ax.set_yticks([]) for s in 'bottom', 'right': ax.spines[s].set_visible(False) if colorbar: plt.gcf().colorbar(im, shrink=.5, pad=0) return ax
Plot a matrix of genotype linkage disequilibrium values between all pairs of variants. Parameters ---------- m : array_like Array of linkage disequilibrium values in condensed form. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/ld.py#L233-L292
cggh/scikit-allel
allel/io/util.py
array_to_hdf5
def array_to_hdf5(a, parent, name, **kwargs): """Write a Numpy array to an HDF5 dataset. Parameters ---------- a : ndarray Data to write. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of dataset to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5d : h5py dataset """ import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: kwargs.setdefault('chunks', True) # auto-chunking kwargs.setdefault('dtype', a.dtype) kwargs.setdefault('compression', 'gzip') h5d = parent.require_dataset(name, shape=a.shape, **kwargs) h5d[...] = a return h5d finally: if h5f is not None: h5f.close()
python
def array_to_hdf5(a, parent, name, **kwargs): """Write a Numpy array to an HDF5 dataset. Parameters ---------- a : ndarray Data to write. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of dataset to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5d : h5py dataset """ import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: kwargs.setdefault('chunks', True) # auto-chunking kwargs.setdefault('dtype', a.dtype) kwargs.setdefault('compression', 'gzip') h5d = parent.require_dataset(name, shape=a.shape, **kwargs) h5d[...] = a return h5d finally: if h5f is not None: h5f.close()
Write a Numpy array to an HDF5 dataset. Parameters ---------- a : ndarray Data to write. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of dataset to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5d : h5py dataset
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/util.py#L11-L51
cggh/scikit-allel
allel/io/util.py
recarray_from_hdf5_group
def recarray_from_hdf5_group(*args, **kwargs): """Load a recarray from columns stored as separate datasets with an HDF5 group. Either provide an h5py group as a single positional argument, or provide two positional arguments giving the HDF5 file path and the group node path within the file. The following optional parameters may be given. Parameters ---------- start : int, optional Index to start loading from. stop : int, optional Index to finish loading at. condition : array_like, bool, optional A 1-dimensional boolean array of the same length as the columns of the table to load, indicating a selection of rows to load. """ import h5py h5f = None if len(args) == 1: group = args[0] elif len(args) == 2: file_path, node_path = args h5f = h5py.File(file_path, mode='r') try: group = h5f[node_path] except Exception as e: h5f.close() raise e else: raise ValueError('bad arguments; expected group or (file_path, ' 'node_path), found %s' % repr(args)) try: if not isinstance(group, h5py.Group): raise ValueError('expected group, found %r' % group) # determine dataset names to load available_dataset_names = [n for n in group.keys() if isinstance(group[n], h5py.Dataset)] names = kwargs.pop('names', available_dataset_names) names = [str(n) for n in names] # needed for PY2 for n in names: if n not in set(group.keys()): raise ValueError('name not found: %s' % n) if not isinstance(group[n], h5py.Dataset): raise ValueError('name does not refer to a dataset: %s, %r' % (n, group[n])) # check datasets are aligned datasets = [group[n] for n in names] length = datasets[0].shape[0] for d in datasets[1:]: if d.shape[0] != length: raise ValueError('datasets must be of equal length') # determine start and stop parameters for load start = kwargs.pop('start', 0) stop = kwargs.pop('stop', length) # check condition condition = kwargs.pop('condition', None) # type: np.ndarray condition = asarray_ndim(condition, 1, allow_none=True) if condition is not None and condition.size != length: raise ValueError('length of condition does not match length ' 'of datasets') # setup output data dtype = [(n, d.dtype, d.shape[1:]) for n, d in zip(names, datasets)] ra = np.empty(length, dtype=dtype) for n, d in zip(names, datasets): a = d[start:stop] if condition is not None: a = np.compress(condition[start:stop], a, axis=0) ra[n] = a return ra finally: if h5f is not None: h5f.close()
python
def recarray_from_hdf5_group(*args, **kwargs): """Load a recarray from columns stored as separate datasets with an HDF5 group. Either provide an h5py group as a single positional argument, or provide two positional arguments giving the HDF5 file path and the group node path within the file. The following optional parameters may be given. Parameters ---------- start : int, optional Index to start loading from. stop : int, optional Index to finish loading at. condition : array_like, bool, optional A 1-dimensional boolean array of the same length as the columns of the table to load, indicating a selection of rows to load. """ import h5py h5f = None if len(args) == 1: group = args[0] elif len(args) == 2: file_path, node_path = args h5f = h5py.File(file_path, mode='r') try: group = h5f[node_path] except Exception as e: h5f.close() raise e else: raise ValueError('bad arguments; expected group or (file_path, ' 'node_path), found %s' % repr(args)) try: if not isinstance(group, h5py.Group): raise ValueError('expected group, found %r' % group) # determine dataset names to load available_dataset_names = [n for n in group.keys() if isinstance(group[n], h5py.Dataset)] names = kwargs.pop('names', available_dataset_names) names = [str(n) for n in names] # needed for PY2 for n in names: if n not in set(group.keys()): raise ValueError('name not found: %s' % n) if not isinstance(group[n], h5py.Dataset): raise ValueError('name does not refer to a dataset: %s, %r' % (n, group[n])) # check datasets are aligned datasets = [group[n] for n in names] length = datasets[0].shape[0] for d in datasets[1:]: if d.shape[0] != length: raise ValueError('datasets must be of equal length') # determine start and stop parameters for load start = kwargs.pop('start', 0) stop = kwargs.pop('stop', length) # check condition condition = kwargs.pop('condition', None) # type: np.ndarray condition = asarray_ndim(condition, 1, allow_none=True) if condition is not None and condition.size != length: raise ValueError('length of condition does not match length ' 'of datasets') # setup output data dtype = [(n, d.dtype, d.shape[1:]) for n, d in zip(names, datasets)] ra = np.empty(length, dtype=dtype) for n, d in zip(names, datasets): a = d[start:stop] if condition is not None: a = np.compress(condition[start:stop], a, axis=0) ra[n] = a return ra finally: if h5f is not None: h5f.close()
Load a recarray from columns stored as separate datasets with an HDF5 group. Either provide an h5py group as a single positional argument, or provide two positional arguments giving the HDF5 file path and the group node path within the file. The following optional parameters may be given. Parameters ---------- start : int, optional Index to start loading from. stop : int, optional Index to finish loading at. condition : array_like, bool, optional A 1-dimensional boolean array of the same length as the columns of the table to load, indicating a selection of rows to load.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/util.py#L55-L146
cggh/scikit-allel
allel/io/util.py
recarray_to_hdf5_group
def recarray_to_hdf5_group(ra, parent, name, **kwargs): """Write each column in a recarray to a dataset in an HDF5 group. Parameters ---------- ra : recarray Numpy recarray to store. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of group to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5g : h5py group """ import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: h5g = parent.require_group(name) for n in ra.dtype.names: array_to_hdf5(ra[n], h5g, n, **kwargs) return h5g finally: if h5f is not None: h5f.close()
python
def recarray_to_hdf5_group(ra, parent, name, **kwargs): """Write each column in a recarray to a dataset in an HDF5 group. Parameters ---------- ra : recarray Numpy recarray to store. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of group to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5g : h5py group """ import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: h5g = parent.require_group(name) for n in ra.dtype.names: array_to_hdf5(ra[n], h5g, n, **kwargs) return h5g finally: if h5f is not None: h5f.close()
Write each column in a recarray to a dataset in an HDF5 group. Parameters ---------- ra : recarray Numpy recarray to store. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of group to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5g : h5py group
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/util.py#L149-L188
cggh/scikit-allel
allel/model/ndarray.py
subset
def subset(data, sel0, sel1): """Apply selections on first and second axes.""" # check inputs data = np.asarray(data) if data.ndim < 2: raise ValueError('data must have 2 or more dimensions') sel0 = asarray_ndim(sel0, 1, allow_none=True) sel1 = asarray_ndim(sel1, 1, allow_none=True) # ensure indices if sel0 is not None and sel0.dtype.kind == 'b': sel0, = np.nonzero(sel0) if sel1 is not None and sel1.dtype.kind == 'b': sel1, = np.nonzero(sel1) # ensure leading dimension indices can be broadcast correctly if sel0 is not None and sel1 is not None: sel0 = sel0[:, np.newaxis] # deal with None arguments if sel0 is None: sel0 = _total_slice if sel1 is None: sel1 = _total_slice return data[sel0, sel1]
python
def subset(data, sel0, sel1): """Apply selections on first and second axes.""" # check inputs data = np.asarray(data) if data.ndim < 2: raise ValueError('data must have 2 or more dimensions') sel0 = asarray_ndim(sel0, 1, allow_none=True) sel1 = asarray_ndim(sel1, 1, allow_none=True) # ensure indices if sel0 is not None and sel0.dtype.kind == 'b': sel0, = np.nonzero(sel0) if sel1 is not None and sel1.dtype.kind == 'b': sel1, = np.nonzero(sel1) # ensure leading dimension indices can be broadcast correctly if sel0 is not None and sel1 is not None: sel0 = sel0[:, np.newaxis] # deal with None arguments if sel0 is None: sel0 = _total_slice if sel1 is None: sel1 = _total_slice return data[sel0, sel1]
Apply selections on first and second axes.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L43-L69
cggh/scikit-allel
allel/model/ndarray.py
NumpyRecArrayWrapper.eval
def eval(self, expression, vm='python'): """Evaluate an expression against the table columns. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : ndarray """ if vm == 'numexpr': import numexpr as ne return ne.evaluate(expression, local_dict=self) else: if PY2: # locals must be a mapping m = {k: self[k] for k in self.dtype.names} else: m = self return eval(expression, dict(), m)
python
def eval(self, expression, vm='python'): """Evaluate an expression against the table columns. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : ndarray """ if vm == 'numexpr': import numexpr as ne return ne.evaluate(expression, local_dict=self) else: if PY2: # locals must be a mapping m = {k: self[k] for k in self.dtype.names} else: m = self return eval(expression, dict(), m)
Evaluate an expression against the table columns. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : ndarray
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L129-L154
cggh/scikit-allel
allel/model/ndarray.py
NumpyRecArrayWrapper.query
def query(self, expression, vm='python'): """Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array """ condition = self.eval(expression, vm=vm) return self.compress(condition)
python
def query(self, expression, vm='python'): """Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array """ condition = self.eval(expression, vm=vm) return self.compress(condition)
Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L156-L173
cggh/scikit-allel
allel/model/ndarray.py
NumpyRecArrayWrapper.concatenate
def concatenate(self, others): """Concatenate arrays.""" if not isinstance(others, (list, tuple)): others = others, tup = (self.values,) + tuple(o.values for o in others) out = np.concatenate(tup, axis=0) out = type(self)(out) return out
python
def concatenate(self, others): """Concatenate arrays.""" if not isinstance(others, (list, tuple)): others = others, tup = (self.values,) + tuple(o.values for o in others) out = np.concatenate(tup, axis=0) out = type(self)(out) return out
Concatenate arrays.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L192-L199
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.fill_masked
def fill_masked(self, value=-1, copy=True): """Fill masked genotype calls with a given value. Parameters ---------- value : int, optional The fill value. copy : bool, optional If False, modify the array in place. Returns ------- g : GenotypeArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]], dtype='i1') >>> mask = [[True, False], [False, True], [False, False]] >>> g.mask = mask >>> g.fill_masked().values array([[[-1, -1], [ 0, 1]], [[ 0, 1], [-1, -1]], [[ 0, 2], [-1, -1]]], dtype=int8) """ if self.mask is None: raise ValueError('no mask is set') # apply the mask data = np.array(self.values, copy=copy) data[self.mask, ...] = value if copy: out = type(self)(data) # wrap out.is_phased = self.is_phased # don't set mask because it has been filled in else: out = self out.mask = None # reset mask return out
python
def fill_masked(self, value=-1, copy=True): """Fill masked genotype calls with a given value. Parameters ---------- value : int, optional The fill value. copy : bool, optional If False, modify the array in place. Returns ------- g : GenotypeArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]], dtype='i1') >>> mask = [[True, False], [False, True], [False, False]] >>> g.mask = mask >>> g.fill_masked().values array([[[-1, -1], [ 0, 1]], [[ 0, 1], [-1, -1]], [[ 0, 2], [-1, -1]]], dtype=int8) """ if self.mask is None: raise ValueError('no mask is set') # apply the mask data = np.array(self.values, copy=copy) data[self.mask, ...] = value if copy: out = type(self)(data) # wrap out.is_phased = self.is_phased # don't set mask because it has been filled in else: out = self out.mask = None # reset mask return out
Fill masked genotype calls with a given value. Parameters ---------- value : int, optional The fill value. copy : bool, optional If False, modify the array in place. Returns ------- g : GenotypeArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]], dtype='i1') >>> mask = [[True, False], [False, True], [False, False]] >>> g.mask = mask >>> g.fill_masked().values array([[[-1, -1], [ 0, 1]], [[ 0, 1], [-1, -1]], [[ 0, 2], [-1, -1]]], dtype=int8)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L332-L380
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.is_called
def is_called(self): """Find non-missing genotype calls. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_called() array([[ True, True], [ True, True], [ True, False]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_called() array([ True, True, False]) """ out = np.all(self.values >= 0, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
python
def is_called(self): """Find non-missing genotype calls. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_called() array([[ True, True], [ True, True], [ True, False]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_called() array([ True, True, False]) """ out = np.all(self.values >= 0, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
Find non-missing genotype calls. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_called() array([[ True, True], [ True, True], [ True, False]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_called() array([ True, True, False])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L382-L417
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.is_missing
def is_missing(self): """Find missing genotype calls. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_missing() array([[False, False], [False, False], [False, True]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_missing() array([False, False, True]) """ out = np.any(self.values < 0, axis=-1) # handle mask if self.mask is not None: out |= self.mask return out
python
def is_missing(self): """Find missing genotype calls. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_missing() array([[False, False], [False, False], [False, True]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_missing() array([False, False, True]) """ out = np.any(self.values < 0, axis=-1) # handle mask if self.mask is not None: out |= self.mask return out
Find missing genotype calls. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_missing() array([[False, False], [False, False], [False, True]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_missing() array([False, False, True])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L419-L454
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.is_hom
def is_hom(self, allele=None): """Find genotype calls that are homozygous. Parameters ---------- allele : int, optional Allele index. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.is_hom() array([[ True, False], [False, True], [ True, False]]) >>> g.is_hom(allele=1) array([[False, False], [False, True], [False, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 2/2 >>> v.is_hom() array([ True, False, True]) """ if allele is None: allele1 = self.values[..., 0, np.newaxis] other_alleles = self.values[..., 1:] tmp = (allele1 >= 0) & (allele1 == other_alleles) out = np.all(tmp, axis=-1) else: out = np.all(self.values == allele, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
python
def is_hom(self, allele=None): """Find genotype calls that are homozygous. Parameters ---------- allele : int, optional Allele index. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.is_hom() array([[ True, False], [False, True], [ True, False]]) >>> g.is_hom(allele=1) array([[False, False], [False, True], [False, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 2/2 >>> v.is_hom() array([ True, False, True]) """ if allele is None: allele1 = self.values[..., 0, np.newaxis] other_alleles = self.values[..., 1:] tmp = (allele1 >= 0) & (allele1 == other_alleles) out = np.all(tmp, axis=-1) else: out = np.all(self.values == allele, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
Find genotype calls that are homozygous. Parameters ---------- allele : int, optional Allele index. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.is_hom() array([[ True, False], [False, True], [ True, False]]) >>> g.is_hom(allele=1) array([[False, False], [False, True], [False, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 2/2 >>> v.is_hom() array([ True, False, True])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L456-L506
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.is_hom_alt
def is_hom_alt(self): """Find genotype calls that are homozygous for any alternate (i.e., non-reference) allele. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.is_hom_alt() array([[False, False], [False, True], [ True, False]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_hom_alt() array([False, True, False]) """ allele1 = self.values[..., 0, np.newaxis] other_alleles = self.values[..., 1:] tmp = (allele1 > 0) & (allele1 == other_alleles) out = np.all(tmp, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
python
def is_hom_alt(self): """Find genotype calls that are homozygous for any alternate (i.e., non-reference) allele. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.is_hom_alt() array([[False, False], [False, True], [ True, False]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_hom_alt() array([False, True, False]) """ allele1 = self.values[..., 0, np.newaxis] other_alleles = self.values[..., 1:] tmp = (allele1 > 0) & (allele1 == other_alleles) out = np.all(tmp, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
Find genotype calls that are homozygous for any alternate (i.e., non-reference) allele. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.is_hom_alt() array([[False, False], [False, True], [ True, False]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_hom_alt() array([False, True, False])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L539-L578
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.is_het
def is_het(self, allele=None): """Find genotype calls that are heterozygous. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. allele : int, optional Heterozygous allele. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_het() array([[False, True], [ True, False], [ True, False]]) >>> g.is_het(2) array([[False, False], [False, False], [ True, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 0/2 >>> v.is_het() array([False, True, True]) """ allele1 = self.values[..., 0, np.newaxis] # type: np.ndarray other_alleles = self.values[..., 1:] # type: np.ndarray out = np.all(self.values >= 0, axis=-1) & np.any(allele1 != other_alleles, axis=-1) if allele is not None: out &= np.any(self.values == allele, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
python
def is_het(self, allele=None): """Find genotype calls that are heterozygous. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. allele : int, optional Heterozygous allele. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_het() array([[False, True], [ True, False], [ True, False]]) >>> g.is_het(2) array([[False, False], [False, False], [ True, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 0/2 >>> v.is_het() array([False, True, True]) """ allele1 = self.values[..., 0, np.newaxis] # type: np.ndarray other_alleles = self.values[..., 1:] # type: np.ndarray out = np.all(self.values >= 0, axis=-1) & np.any(allele1 != other_alleles, axis=-1) if allele is not None: out &= np.any(self.values == allele, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
Find genotype calls that are heterozygous. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. allele : int, optional Heterozygous allele. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_het() array([[False, True], [ True, False], [ True, False]]) >>> g.is_het(2) array([[False, False], [False, False], [ True, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 0/2 >>> v.is_het() array([False, True, True])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L580-L625
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.is_call
def is_call(self, call): """Locate genotypes with a given call. Parameters ---------- call : array_like, int, shape (ploidy,) The genotype call to find. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype is `call`. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_call((0, 2)) array([[False, False], [False, False], [ True, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 0/2 >>> v.is_call((0, 2)) array([False, False, True]) """ # guard conditions if not len(call) == self.shape[-1]: raise ValueError('invalid call ploidy: %s', repr(call)) if self.ndim == 2: call = np.asarray(call)[np.newaxis, :] else: call = np.asarray(call)[np.newaxis, np.newaxis, :] out = np.all(self.values == call, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
python
def is_call(self, call): """Locate genotypes with a given call. Parameters ---------- call : array_like, int, shape (ploidy,) The genotype call to find. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype is `call`. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_call((0, 2)) array([[False, False], [False, False], [ True, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 0/2 >>> v.is_call((0, 2)) array([False, False, True]) """ # guard conditions if not len(call) == self.shape[-1]: raise ValueError('invalid call ploidy: %s', repr(call)) if self.ndim == 2: call = np.asarray(call)[np.newaxis, :] else: call = np.asarray(call)[np.newaxis, np.newaxis, :] out = np.all(self.values == call, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
Locate genotypes with a given call. Parameters ---------- call : array_like, int, shape (ploidy,) The genotype call to find. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype is `call`. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_call((0, 2)) array([[False, False], [False, False], [ True, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 0/2 >>> v.is_call((0, 2)) array([False, False, True])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L627-L674
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.count_called
def count_called(self, axis=None): """Count called genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_called() return np.sum(b, axis=axis)
python
def count_called(self, axis=None): """Count called genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_called() return np.sum(b, axis=axis)
Count called genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L676-L686
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.count_missing
def count_missing(self, axis=None): """Count missing genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_missing() return np.sum(b, axis=axis)
python
def count_missing(self, axis=None): """Count missing genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_missing() return np.sum(b, axis=axis)
Count missing genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L688-L698
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.count_hom
def count_hom(self, allele=None, axis=None): """Count homozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_hom(allele=allele) return np.sum(b, axis=axis)
python
def count_hom(self, allele=None, axis=None): """Count homozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_hom(allele=allele) return np.sum(b, axis=axis)
Count homozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L700-L712
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.count_hom_ref
def count_hom_ref(self, axis=None): """Count homozygous reference genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_hom_ref() return np.sum(b, axis=axis)
python
def count_hom_ref(self, axis=None): """Count homozygous reference genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_hom_ref() return np.sum(b, axis=axis)
Count homozygous reference genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L714-L724
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.count_hom_alt
def count_hom_alt(self, axis=None): """Count homozygous alternate genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_hom_alt() return np.sum(b, axis=axis)
python
def count_hom_alt(self, axis=None): """Count homozygous alternate genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_hom_alt() return np.sum(b, axis=axis)
Count homozygous alternate genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L726-L736
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.count_het
def count_het(self, allele=None, axis=None): """Count heterozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_het(allele=allele) return np.sum(b, axis=axis)
python
def count_het(self, allele=None, axis=None): """Count heterozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_het(allele=allele) return np.sum(b, axis=axis)
Count heterozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L738-L750
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.count_call
def count_call(self, call, axis=None): """Count genotypes with a given call. Parameters ---------- call : array_like, int, shape (ploidy,) The genotype call to find. axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_call(call=call) return np.sum(b, axis=axis)
python
def count_call(self, call, axis=None): """Count genotypes with a given call. Parameters ---------- call : array_like, int, shape (ploidy,) The genotype call to find. axis : int, optional Axis over which to count, or None to perform overall count. """ b = self.is_call(call=call) return np.sum(b, axis=axis)
Count genotypes with a given call. Parameters ---------- call : array_like, int, shape (ploidy,) The genotype call to find. axis : int, optional Axis over which to count, or None to perform overall count.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L752-L764
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.to_n_ref
def to_n_ref(self, fill=0, dtype='i1'): """Transform each genotype call into the number of reference alleles. Parameters ---------- fill : int, optional Use this value to represent missing calls. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, int8, shape (n_variants, n_samples) Array of ref alleles per genotype call. Notes ----- By default this function returns 0 for missing genotype calls **and** for homozygous non-reference genotype calls. Use the `fill` argument to change how missing calls are represented. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_n_ref() array([[2, 1], [1, 0], [0, 0]], dtype=int8) >>> g.to_n_ref(fill=-1) array([[ 2, 1], [ 1, 0], [ 0, -1]], dtype=int8) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_n_ref() array([2, 1, 0], dtype=int8) """ # count number of alternate alleles out = np.empty(self.shape[:-1], dtype=dtype) np.sum(self.values == 0, axis=-1, out=out) # fill missing calls if fill != 0: m = self.is_missing() out[m] = fill # handle mask if self.mask is not None: out[self.mask] = fill return out
python
def to_n_ref(self, fill=0, dtype='i1'): """Transform each genotype call into the number of reference alleles. Parameters ---------- fill : int, optional Use this value to represent missing calls. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, int8, shape (n_variants, n_samples) Array of ref alleles per genotype call. Notes ----- By default this function returns 0 for missing genotype calls **and** for homozygous non-reference genotype calls. Use the `fill` argument to change how missing calls are represented. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_n_ref() array([[2, 1], [1, 0], [0, 0]], dtype=int8) >>> g.to_n_ref(fill=-1) array([[ 2, 1], [ 1, 0], [ 0, -1]], dtype=int8) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_n_ref() array([2, 1, 0], dtype=int8) """ # count number of alternate alleles out = np.empty(self.shape[:-1], dtype=dtype) np.sum(self.values == 0, axis=-1, out=out) # fill missing calls if fill != 0: m = self.is_missing() out[m] = fill # handle mask if self.mask is not None: out[self.mask] = fill return out
Transform each genotype call into the number of reference alleles. Parameters ---------- fill : int, optional Use this value to represent missing calls. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, int8, shape (n_variants, n_samples) Array of ref alleles per genotype call. Notes ----- By default this function returns 0 for missing genotype calls **and** for homozygous non-reference genotype calls. Use the `fill` argument to change how missing calls are represented. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_n_ref() array([[2, 1], [1, 0], [0, 0]], dtype=int8) >>> g.to_n_ref(fill=-1) array([[ 2, 1], [ 1, 0], [ 0, -1]], dtype=int8) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_n_ref() array([2, 1, 0], dtype=int8)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L766-L825
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.to_allele_counts
def to_allele_counts(self, max_allele=None, dtype='u1'): """Transform genotype calls into allele counts per call. Parameters ---------- max_allele : int, optional Highest allele index. Provide this value to speed up computation. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, uint8, shape (n_variants, n_samples, len(alleles)) Array of allele counts per call. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_allele_counts() <GenotypeAlleleCountsArray shape=(3, 2, 3) dtype=uint8> 2:0:0 1:1:0 1:0:1 0:2:0 0:0:2 0:0:0 >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_allele_counts() <GenotypeAlleleCountsVector shape=(3, 3) dtype=uint8> 2:0:0 1:0:1 0:0:2 """ # determine alleles to count if max_allele is None: max_allele = self.max() alleles = list(range(max_allele + 1)) # set up output array outshape = self.shape[:-1] + (len(alleles),) out = np.zeros(outshape, dtype=dtype) for allele in alleles: # count alleles along ploidy dimension allele_match = self.values == allele if self.mask is not None: allele_match &= ~self.mask[..., np.newaxis] np.sum(allele_match, axis=-1, out=out[..., allele]) if self.ndim == 2: out = GenotypeAlleleCountsVector(out) elif self.ndim == 3: out = GenotypeAlleleCountsArray(out) return out
python
def to_allele_counts(self, max_allele=None, dtype='u1'): """Transform genotype calls into allele counts per call. Parameters ---------- max_allele : int, optional Highest allele index. Provide this value to speed up computation. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, uint8, shape (n_variants, n_samples, len(alleles)) Array of allele counts per call. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_allele_counts() <GenotypeAlleleCountsArray shape=(3, 2, 3) dtype=uint8> 2:0:0 1:1:0 1:0:1 0:2:0 0:0:2 0:0:0 >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_allele_counts() <GenotypeAlleleCountsVector shape=(3, 3) dtype=uint8> 2:0:0 1:0:1 0:0:2 """ # determine alleles to count if max_allele is None: max_allele = self.max() alleles = list(range(max_allele + 1)) # set up output array outshape = self.shape[:-1] + (len(alleles),) out = np.zeros(outshape, dtype=dtype) for allele in alleles: # count alleles along ploidy dimension allele_match = self.values == allele if self.mask is not None: allele_match &= ~self.mask[..., np.newaxis] np.sum(allele_match, axis=-1, out=out[..., allele]) if self.ndim == 2: out = GenotypeAlleleCountsVector(out) elif self.ndim == 3: out = GenotypeAlleleCountsArray(out) return out
Transform genotype calls into allele counts per call. Parameters ---------- max_allele : int, optional Highest allele index. Provide this value to speed up computation. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, uint8, shape (n_variants, n_samples, len(alleles)) Array of allele counts per call. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_allele_counts() <GenotypeAlleleCountsArray shape=(3, 2, 3) dtype=uint8> 2:0:0 1:1:0 1:0:1 0:2:0 0:0:2 0:0:0 >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_allele_counts() <GenotypeAlleleCountsVector shape=(3, 3) dtype=uint8> 2:0:0 1:0:1 0:0:2
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L892-L950
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.to_gt
def to_gt(self, max_allele=None): """Convert genotype calls to VCF-style string representation. Returns ------- gt : ndarray, string, shape (n_variants, n_samples) Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_gt() chararray([[b'0/0', b'0/1'], [b'0/2', b'1/1'], [b'1/2', b'2/1'], [b'2/2', b'./.']], dtype='|S3') >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int64> 0/0 0/2 1/2 2/2 >>> v.to_gt() chararray([b'0/0', b'0/2', b'1/2', b'2/2'], dtype='|S3') >>> g.is_phased = np.ones(g.shape[:-1]) >>> g.to_gt() chararray([[b'0|0', b'0|1'], [b'0|2', b'1|1'], [b'1|2', b'2|1'], [b'2|2', b'.|.']], dtype='|S3') >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int64> 0|0 0|2 1|2 2|2 >>> v.to_gt() chararray([b'0|0', b'0|2', b'1|2', b'2|2'], dtype='|S3') """ # how many characters needed per allele call? if max_allele is None: max_allele = np.max(self) if max_allele <= 0: max_allele = 1 nchar = int(np.floor(np.log10(max_allele))) + 1 # convert to string a = self.astype((np.string_, nchar)).view(np.chararray) # recode missing alleles a[self < 0] = b'.' if self.mask is not None: a[self.mask] = b'.' # determine allele call separator if self.is_phased is None: sep = b'/' else: sep = np.empty(self.shape[:-1], dtype='S1').view(np.chararray) sep[self.is_phased] = b'|' sep[~self.is_phased] = b'/' # join via separator, coping with any ploidy gt = a[..., 0] for i in range(1, self.ploidy): gt = gt + sep + a[..., i] return gt
python
def to_gt(self, max_allele=None): """Convert genotype calls to VCF-style string representation. Returns ------- gt : ndarray, string, shape (n_variants, n_samples) Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_gt() chararray([[b'0/0', b'0/1'], [b'0/2', b'1/1'], [b'1/2', b'2/1'], [b'2/2', b'./.']], dtype='|S3') >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int64> 0/0 0/2 1/2 2/2 >>> v.to_gt() chararray([b'0/0', b'0/2', b'1/2', b'2/2'], dtype='|S3') >>> g.is_phased = np.ones(g.shape[:-1]) >>> g.to_gt() chararray([[b'0|0', b'0|1'], [b'0|2', b'1|1'], [b'1|2', b'2|1'], [b'2|2', b'.|.']], dtype='|S3') >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int64> 0|0 0|2 1|2 2|2 >>> v.to_gt() chararray([b'0|0', b'0|2', b'1|2', b'2|2'], dtype='|S3') """ # how many characters needed per allele call? if max_allele is None: max_allele = np.max(self) if max_allele <= 0: max_allele = 1 nchar = int(np.floor(np.log10(max_allele))) + 1 # convert to string a = self.astype((np.string_, nchar)).view(np.chararray) # recode missing alleles a[self < 0] = b'.' if self.mask is not None: a[self.mask] = b'.' # determine allele call separator if self.is_phased is None: sep = b'/' else: sep = np.empty(self.shape[:-1], dtype='S1').view(np.chararray) sep[self.is_phased] = b'|' sep[~self.is_phased] = b'/' # join via separator, coping with any ploidy gt = a[..., 0] for i in range(1, self.ploidy): gt = gt + sep + a[..., i] return gt
Convert genotype calls to VCF-style string representation. Returns ------- gt : ndarray, string, shape (n_variants, n_samples) Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_gt() chararray([[b'0/0', b'0/1'], [b'0/2', b'1/1'], [b'1/2', b'2/1'], [b'2/2', b'./.']], dtype='|S3') >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int64> 0/0 0/2 1/2 2/2 >>> v.to_gt() chararray([b'0/0', b'0/2', b'1/2', b'2/2'], dtype='|S3') >>> g.is_phased = np.ones(g.shape[:-1]) >>> g.to_gt() chararray([[b'0|0', b'0|1'], [b'0|2', b'1|1'], [b'1|2', b'2|1'], [b'2|2', b'.|.']], dtype='|S3') >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int64> 0|0 0|2 1|2 2|2 >>> v.to_gt() chararray([b'0|0', b'0|2', b'1|2', b'2|2'], dtype='|S3')
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L952-L1025
cggh/scikit-allel
allel/model/ndarray.py
Genotypes.map_alleles
def map_alleles(self, mapping, copy=True): """Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. copy : bool, optional If True, return a new array; if False, apply mapping in place (only applies for arrays with dtype int8; all other dtypes require a copy). Returns ------- gm : GenotypeArray Examples -------- >>> import allel >>> import numpy as np >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]], dtype='i1') >>> mapping = np.array([[1, 2, 0], ... [2, 0, 1], ... [2, 1, 0], ... [0, 2, 1]], dtype='i1') >>> g.map_alleles(mapping) <GenotypeArray shape=(4, 2, 2) dtype=int8> 1/1 1/2 2/1 0/0 1/0 0/1 1/1 ./. >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int8> 0/0 0/2 1/2 2/2 >>> v.map_alleles(mapping) <GenotypeVector shape=(4, 2) dtype=int8> 1/1 2/1 1/0 1/1 Notes ----- If a mask has been set, it is ignored by this function. For arrays with dtype int8 an optimised implementation is used which is faster and uses far less memory. It is recommended to convert arrays to dtype int8 where possible before calling this method. See Also -------- create_allele_mapping """ h = self.to_haplotypes() hm = h.map_alleles(mapping, copy=copy) if self.ndim == 2: gm = GenotypeVector(hm) else: gm = hm.to_genotypes(ploidy=self.ploidy) return gm
python
def map_alleles(self, mapping, copy=True): """Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. copy : bool, optional If True, return a new array; if False, apply mapping in place (only applies for arrays with dtype int8; all other dtypes require a copy). Returns ------- gm : GenotypeArray Examples -------- >>> import allel >>> import numpy as np >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]], dtype='i1') >>> mapping = np.array([[1, 2, 0], ... [2, 0, 1], ... [2, 1, 0], ... [0, 2, 1]], dtype='i1') >>> g.map_alleles(mapping) <GenotypeArray shape=(4, 2, 2) dtype=int8> 1/1 1/2 2/1 0/0 1/0 0/1 1/1 ./. >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int8> 0/0 0/2 1/2 2/2 >>> v.map_alleles(mapping) <GenotypeVector shape=(4, 2) dtype=int8> 1/1 2/1 1/0 1/1 Notes ----- If a mask has been set, it is ignored by this function. For arrays with dtype int8 an optimised implementation is used which is faster and uses far less memory. It is recommended to convert arrays to dtype int8 where possible before calling this method. See Also -------- create_allele_mapping """ h = self.to_haplotypes() hm = h.map_alleles(mapping, copy=copy) if self.ndim == 2: gm = GenotypeVector(hm) else: gm = hm.to_genotypes(ploidy=self.ploidy) return gm
Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. copy : bool, optional If True, return a new array; if False, apply mapping in place (only applies for arrays with dtype int8; all other dtypes require a copy). Returns ------- gm : GenotypeArray Examples -------- >>> import allel >>> import numpy as np >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]], dtype='i1') >>> mapping = np.array([[1, 2, 0], ... [2, 0, 1], ... [2, 1, 0], ... [0, 2, 1]], dtype='i1') >>> g.map_alleles(mapping) <GenotypeArray shape=(4, 2, 2) dtype=int8> 1/1 1/2 2/1 0/0 1/0 0/1 1/1 ./. >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int8> 0/0 0/2 1/2 2/2 >>> v.map_alleles(mapping) <GenotypeVector shape=(4, 2) dtype=int8> 1/1 2/1 1/0 1/1 Notes ----- If a mask has been set, it is ignored by this function. For arrays with dtype int8 an optimised implementation is used which is faster and uses far less memory. It is recommended to convert arrays to dtype int8 where possible before calling this method. See Also -------- create_allele_mapping
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L1036-L1099
cggh/scikit-allel
allel/model/ndarray.py
GenotypeArray.to_packed
def to_packed(self, boundscheck=True): """Pack diploid genotypes into a single byte for each genotype, using the left-most 4 bits for the first allele and the right-most 4 bits for the second allele. Allows single byte encoding of diploid genotypes for variants with up to 15 alleles. Parameters ---------- boundscheck : bool, optional If False, do not check that minimum and maximum alleles are compatible with bit-packing. Returns ------- packed : ndarray, uint8, shape (n_variants, n_samples) Bit-packed genotype array. Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]], dtype='i1') >>> g.to_packed() array([[ 0, 1], [ 2, 17], [ 34, 239]], dtype=uint8) """ check_ploidy(self.ploidy, 2) if boundscheck: amx = self.max() if amx > 14: raise ValueError('max allele for packing is 14, found %s' % amx) amn = self.min() if amn < -1: raise ValueError('min allele for packing is -1, found %s' % amn) # pack data values = memoryview_safe(self.values) packed = genotype_array_pack_diploid(values) return packed
python
def to_packed(self, boundscheck=True): """Pack diploid genotypes into a single byte for each genotype, using the left-most 4 bits for the first allele and the right-most 4 bits for the second allele. Allows single byte encoding of diploid genotypes for variants with up to 15 alleles. Parameters ---------- boundscheck : bool, optional If False, do not check that minimum and maximum alleles are compatible with bit-packing. Returns ------- packed : ndarray, uint8, shape (n_variants, n_samples) Bit-packed genotype array. Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]], dtype='i1') >>> g.to_packed() array([[ 0, 1], [ 2, 17], [ 34, 239]], dtype=uint8) """ check_ploidy(self.ploidy, 2) if boundscheck: amx = self.max() if amx > 14: raise ValueError('max allele for packing is 14, found %s' % amx) amn = self.min() if amn < -1: raise ValueError('min allele for packing is -1, found %s' % amn) # pack data values = memoryview_safe(self.values) packed = genotype_array_pack_diploid(values) return packed
Pack diploid genotypes into a single byte for each genotype, using the left-most 4 bits for the first allele and the right-most 4 bits for the second allele. Allows single byte encoding of diploid genotypes for variants with up to 15 alleles. Parameters ---------- boundscheck : bool, optional If False, do not check that minimum and maximum alleles are compatible with bit-packing. Returns ------- packed : ndarray, uint8, shape (n_variants, n_samples) Bit-packed genotype array. Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]], dtype='i1') >>> g.to_packed() array([[ 0, 1], [ 2, 17], [ 34, 239]], dtype=uint8)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L1553-L1602
cggh/scikit-allel
allel/model/ndarray.py
GenotypeArray.from_packed
def from_packed(cls, packed): """Unpack diploid genotypes that have been bit-packed into single bytes. Parameters ---------- packed : ndarray, uint8, shape (n_variants, n_samples) Bit-packed diploid genotype array. Returns ------- g : GenotypeArray, shape (n_variants, n_samples, 2) Genotype array. Examples -------- >>> import allel >>> import numpy as np >>> packed = np.array([[0, 1], ... [2, 17], ... [34, 239]], dtype='u1') >>> allel.GenotypeArray.from_packed(packed) <GenotypeArray shape=(3, 2, 2) dtype=int8> 0/0 0/1 0/2 1/1 2/2 ./. """ # check arguments packed = np.asarray(packed) check_ndim(packed, 2) check_dtype(packed, 'u1') packed = memoryview_safe(packed) data = genotype_array_unpack_diploid(packed) return cls(data)
python
def from_packed(cls, packed): """Unpack diploid genotypes that have been bit-packed into single bytes. Parameters ---------- packed : ndarray, uint8, shape (n_variants, n_samples) Bit-packed diploid genotype array. Returns ------- g : GenotypeArray, shape (n_variants, n_samples, 2) Genotype array. Examples -------- >>> import allel >>> import numpy as np >>> packed = np.array([[0, 1], ... [2, 17], ... [34, 239]], dtype='u1') >>> allel.GenotypeArray.from_packed(packed) <GenotypeArray shape=(3, 2, 2) dtype=int8> 0/0 0/1 0/2 1/1 2/2 ./. """ # check arguments packed = np.asarray(packed) check_ndim(packed, 2) check_dtype(packed, 'u1') packed = memoryview_safe(packed) data = genotype_array_unpack_diploid(packed) return cls(data)
Unpack diploid genotypes that have been bit-packed into single bytes. Parameters ---------- packed : ndarray, uint8, shape (n_variants, n_samples) Bit-packed diploid genotype array. Returns ------- g : GenotypeArray, shape (n_variants, n_samples, 2) Genotype array. Examples -------- >>> import allel >>> import numpy as np >>> packed = np.array([[0, 1], ... [2, 17], ... [34, 239]], dtype='u1') >>> allel.GenotypeArray.from_packed(packed) <GenotypeArray shape=(3, 2, 2) dtype=int8> 0/0 0/1 0/2 1/1 2/2 ./.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L1605-L1642
cggh/scikit-allel
allel/model/ndarray.py
GenotypeArray.to_sparse
def to_sparse(self, format='csr', **kwargs): """Convert into a sparse matrix. Parameters ---------- format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'} Sparse matrix format. kwargs : keyword arguments Passed through to sparse matrix constructor. Returns ------- m : scipy.sparse.spmatrix Sparse matrix Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 1], [0, 1]], ... [[1, 1], [0, 0]], ... [[0, 0], [-1, -1]]], dtype='i1') >>> m = g.to_sparse(format='csr') >>> m <4x4 sparse matrix of type '<class 'numpy.int8'>' with 6 stored elements in Compressed Sparse Row format> >>> m.data array([ 1, 1, 1, 1, -1, -1], dtype=int8) >>> m.indices array([1, 3, 0, 1, 2, 3], dtype=int32) >>> m.indptr array([0, 0, 2, 4, 6], dtype=int32) """ h = self.to_haplotypes() m = h.to_sparse(format=format, **kwargs) return m
python
def to_sparse(self, format='csr', **kwargs): """Convert into a sparse matrix. Parameters ---------- format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'} Sparse matrix format. kwargs : keyword arguments Passed through to sparse matrix constructor. Returns ------- m : scipy.sparse.spmatrix Sparse matrix Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 1], [0, 1]], ... [[1, 1], [0, 0]], ... [[0, 0], [-1, -1]]], dtype='i1') >>> m = g.to_sparse(format='csr') >>> m <4x4 sparse matrix of type '<class 'numpy.int8'>' with 6 stored elements in Compressed Sparse Row format> >>> m.data array([ 1, 1, 1, 1, -1, -1], dtype=int8) >>> m.indices array([1, 3, 0, 1, 2, 3], dtype=int32) >>> m.indptr array([0, 0, 2, 4, 6], dtype=int32) """ h = self.to_haplotypes() m = h.to_sparse(format=format, **kwargs) return m
Convert into a sparse matrix. Parameters ---------- format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'} Sparse matrix format. kwargs : keyword arguments Passed through to sparse matrix constructor. Returns ------- m : scipy.sparse.spmatrix Sparse matrix Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 1], [0, 1]], ... [[1, 1], [0, 0]], ... [[0, 0], [-1, -1]]], dtype='i1') >>> m = g.to_sparse(format='csr') >>> m <4x4 sparse matrix of type '<class 'numpy.int8'>' with 6 stored elements in Compressed Sparse Row format> >>> m.data array([ 1, 1, 1, 1, -1, -1], dtype=int8) >>> m.indices array([1, 3, 0, 1, 2, 3], dtype=int32) >>> m.indptr array([0, 0, 2, 4, 6], dtype=int32)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L1645-L1688
cggh/scikit-allel
allel/model/ndarray.py
GenotypeArray.from_sparse
def from_sparse(m, ploidy, order=None, out=None): """Construct a genotype array from a sparse matrix. Parameters ---------- m : scipy.sparse.spmatrix Sparse matrix ploidy : int The sample ploidy. order : {'C', 'F'}, optional Whether to store data in C (row-major) or Fortran (column-major) order in memory. out : ndarray, shape (n_variants, n_samples), optional Use this array as the output buffer. Returns ------- g : GenotypeArray, shape (n_variants, n_samples, ploidy) Genotype array. Examples -------- >>> import allel >>> import numpy as np >>> import scipy.sparse >>> data = np.array([ 1, 1, 1, 1, -1, -1], dtype=np.int8) >>> indices = np.array([1, 3, 0, 1, 2, 3], dtype=np.int32) >>> indptr = np.array([0, 0, 2, 4, 6], dtype=np.int32) >>> m = scipy.sparse.csr_matrix((data, indices, indptr)) >>> g = allel.GenotypeArray.from_sparse(m, ploidy=2) >>> g <GenotypeArray shape=(4, 2, 2) dtype=int8> 0/0 0/0 0/1 0/1 1/1 0/0 0/0 ./. """ h = HaplotypeArray.from_sparse(m, order=order, out=out) g = h.to_genotypes(ploidy=ploidy) return g
python
def from_sparse(m, ploidy, order=None, out=None): """Construct a genotype array from a sparse matrix. Parameters ---------- m : scipy.sparse.spmatrix Sparse matrix ploidy : int The sample ploidy. order : {'C', 'F'}, optional Whether to store data in C (row-major) or Fortran (column-major) order in memory. out : ndarray, shape (n_variants, n_samples), optional Use this array as the output buffer. Returns ------- g : GenotypeArray, shape (n_variants, n_samples, ploidy) Genotype array. Examples -------- >>> import allel >>> import numpy as np >>> import scipy.sparse >>> data = np.array([ 1, 1, 1, 1, -1, -1], dtype=np.int8) >>> indices = np.array([1, 3, 0, 1, 2, 3], dtype=np.int32) >>> indptr = np.array([0, 0, 2, 4, 6], dtype=np.int32) >>> m = scipy.sparse.csr_matrix((data, indices, indptr)) >>> g = allel.GenotypeArray.from_sparse(m, ploidy=2) >>> g <GenotypeArray shape=(4, 2, 2) dtype=int8> 0/0 0/0 0/1 0/1 1/1 0/0 0/0 ./. """ h = HaplotypeArray.from_sparse(m, order=order, out=out) g = h.to_genotypes(ploidy=ploidy) return g
Construct a genotype array from a sparse matrix. Parameters ---------- m : scipy.sparse.spmatrix Sparse matrix ploidy : int The sample ploidy. order : {'C', 'F'}, optional Whether to store data in C (row-major) or Fortran (column-major) order in memory. out : ndarray, shape (n_variants, n_samples), optional Use this array as the output buffer. Returns ------- g : GenotypeArray, shape (n_variants, n_samples, ploidy) Genotype array. Examples -------- >>> import allel >>> import numpy as np >>> import scipy.sparse >>> data = np.array([ 1, 1, 1, 1, -1, -1], dtype=np.int8) >>> indices = np.array([1, 3, 0, 1, 2, 3], dtype=np.int32) >>> indptr = np.array([0, 0, 2, 4, 6], dtype=np.int32) >>> m = scipy.sparse.csr_matrix((data, indices, indptr)) >>> g = allel.GenotypeArray.from_sparse(m, ploidy=2) >>> g <GenotypeArray shape=(4, 2, 2) dtype=int8> 0/0 0/0 0/1 0/1 1/1 0/0 0/0 ./.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L1691-L1733
cggh/scikit-allel
allel/model/ndarray.py
GenotypeArray.haploidify_samples
def haploidify_samples(self): """Construct a pseudo-haplotype for each sample by randomly selecting an allele from each genotype call. Returns ------- h : HaplotypeArray Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> import numpy as np >>> np.random.seed(42) >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(4, 2) dtype=int64> 0 1 0 1 1 1 2 . >>> g = allel.GenotypeArray([[[0, 0, 0], [0, 0, 1]], ... [[0, 1, 1], [1, 1, 1]], ... [[0, 1, 2], [-1, -1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(3, 2) dtype=int64> 0 0 1 1 2 . """ # N.B., this implementation is obscure and uses more memory than # necessary, TODO review # define the range of possible indices, e.g., diploid => (0, 1) index_range = np.arange(0, self.ploidy, dtype='u1') # create a random index for each genotype call indices = np.random.choice(index_range, size=self.n_calls, replace=True) # reshape genotype data so it's suitable for passing to np.choose # by merging the variants and samples dimensions choices = self.reshape(-1, self.ploidy).T # now use random indices to haploidify data = np.choose(indices, choices) # reshape the haploidified data to restore the variants and samples # dimensions data = data.reshape((self.n_variants, self.n_samples)) # view as haplotype array h = HaplotypeArray(data, copy=False) return h
python
def haploidify_samples(self): """Construct a pseudo-haplotype for each sample by randomly selecting an allele from each genotype call. Returns ------- h : HaplotypeArray Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> import numpy as np >>> np.random.seed(42) >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(4, 2) dtype=int64> 0 1 0 1 1 1 2 . >>> g = allel.GenotypeArray([[[0, 0, 0], [0, 0, 1]], ... [[0, 1, 1], [1, 1, 1]], ... [[0, 1, 2], [-1, -1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(3, 2) dtype=int64> 0 0 1 1 2 . """ # N.B., this implementation is obscure and uses more memory than # necessary, TODO review # define the range of possible indices, e.g., diploid => (0, 1) index_range = np.arange(0, self.ploidy, dtype='u1') # create a random index for each genotype call indices = np.random.choice(index_range, size=self.n_calls, replace=True) # reshape genotype data so it's suitable for passing to np.choose # by merging the variants and samples dimensions choices = self.reshape(-1, self.ploidy).T # now use random indices to haploidify data = np.choose(indices, choices) # reshape the haploidified data to restore the variants and samples # dimensions data = data.reshape((self.n_variants, self.n_samples)) # view as haplotype array h = HaplotypeArray(data, copy=False) return h
Construct a pseudo-haplotype for each sample by randomly selecting an allele from each genotype call. Returns ------- h : HaplotypeArray Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> import numpy as np >>> np.random.seed(42) >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(4, 2) dtype=int64> 0 1 0 1 1 1 2 . >>> g = allel.GenotypeArray([[[0, 0, 0], [0, 0, 1]], ... [[0, 1, 1], [1, 1, 1]], ... [[0, 1, 2], [-1, -1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(3, 2) dtype=int64> 0 0 1 1 2 .
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L1735-L1797
cggh/scikit-allel
allel/model/ndarray.py
GenotypeArray.count_alleles
def count_alleles(self, max_allele=None, subpop=None): """Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles above this will be ignored. subpop : sequence of ints, optional Indices of samples to include in count. Returns ------- ac : AlleleCountsArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.count_alleles() <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 2 1 0 0 2 >>> g.count_alleles(max_allele=1) <AlleleCountsArray shape=(3, 2) dtype=int32> 3 1 1 2 0 0 """ # check inputs subpop = _normalize_subpop_arg(subpop, self.shape[1]) # determine alleles to count if max_allele is None: max_allele = self.max() # use optimisations values = memoryview_safe(self.values) mask = memoryview_safe(self.mask).view(dtype='u1') if self.mask is not None else None if subpop is None and mask is None: ac = genotype_array_count_alleles(values, max_allele) elif subpop is None: ac = genotype_array_count_alleles_masked(values, mask, max_allele) elif mask is None: ac = genotype_array_count_alleles_subpop(values, max_allele, subpop) else: ac = genotype_array_count_alleles_subpop_masked(values, mask, max_allele, subpop) return AlleleCountsArray(ac, copy=False)
python
def count_alleles(self, max_allele=None, subpop=None): """Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles above this will be ignored. subpop : sequence of ints, optional Indices of samples to include in count. Returns ------- ac : AlleleCountsArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.count_alleles() <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 2 1 0 0 2 >>> g.count_alleles(max_allele=1) <AlleleCountsArray shape=(3, 2) dtype=int32> 3 1 1 2 0 0 """ # check inputs subpop = _normalize_subpop_arg(subpop, self.shape[1]) # determine alleles to count if max_allele is None: max_allele = self.max() # use optimisations values = memoryview_safe(self.values) mask = memoryview_safe(self.mask).view(dtype='u1') if self.mask is not None else None if subpop is None and mask is None: ac = genotype_array_count_alleles(values, max_allele) elif subpop is None: ac = genotype_array_count_alleles_masked(values, mask, max_allele) elif mask is None: ac = genotype_array_count_alleles_subpop(values, max_allele, subpop) else: ac = genotype_array_count_alleles_subpop_masked(values, mask, max_allele, subpop) return AlleleCountsArray(ac, copy=False)
Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles above this will be ignored. subpop : sequence of ints, optional Indices of samples to include in count. Returns ------- ac : AlleleCountsArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.count_alleles() <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 2 1 0 0 2 >>> g.count_alleles(max_allele=1) <AlleleCountsArray shape=(3, 2) dtype=int32> 3 1 1 2 0 0
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L1799-L1853
cggh/scikit-allel
allel/model/ndarray.py
GenotypeArray.count_alleles_subpops
def count_alleles_subpops(self, subpops, max_allele=None): """Count alleles for multiple subpopulations simultaneously. Parameters ---------- subpops : dict (string -> sequence of ints) Mapping of subpopulation names to sample indices. max_allele : int, optional The highest allele index to count. Alleles above this will be ignored. Returns ------- out : dict (string -> AlleleCountsArray) A mapping of subpopulation names to allele counts arrays. """ if max_allele is None: max_allele = self.max() out = {name: self.count_alleles(max_allele=max_allele, subpop=subpop) for name, subpop in subpops.items()} return out
python
def count_alleles_subpops(self, subpops, max_allele=None): """Count alleles for multiple subpopulations simultaneously. Parameters ---------- subpops : dict (string -> sequence of ints) Mapping of subpopulation names to sample indices. max_allele : int, optional The highest allele index to count. Alleles above this will be ignored. Returns ------- out : dict (string -> AlleleCountsArray) A mapping of subpopulation names to allele counts arrays. """ if max_allele is None: max_allele = self.max() out = {name: self.count_alleles(max_allele=max_allele, subpop=subpop) for name, subpop in subpops.items()} return out
Count alleles for multiple subpopulations simultaneously. Parameters ---------- subpops : dict (string -> sequence of ints) Mapping of subpopulation names to sample indices. max_allele : int, optional The highest allele index to count. Alleles above this will be ignored. Returns ------- out : dict (string -> AlleleCountsArray) A mapping of subpopulation names to allele counts arrays.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L1855-L1879
cggh/scikit-allel
allel/model/ndarray.py
HaplotypeArray.compress
def compress(self, condition, axis=0, out=None): """Return selected slices of an array along given axis. Parameters ---------- condition : array_like, bool Array that selects which entries to return. N.B., if len(condition) is less than the size of the given axis, then output is truncated to the length of the condition array. axis : int, optional Axis along which to take slices. If None, work on the flattened array. out : ndarray, optional Output array. Its type is preserved and it must be of the right shape to hold the output. Returns ------- out : HaplotypeArray A copy of the array without the slices along axis for which `condition` is false. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.compress([True, False, True], axis=0) <HaplotypeArray shape=(2, 4) dtype=int8> 0 0 0 1 0 2 . . >>> h.compress([True, False, True, False], axis=1) <HaplotypeArray shape=(3, 2) dtype=int8> 0 0 0 1 0 . """ return compress_haplotype_array(self, condition, axis=axis, cls=type(self), compress=np.compress, out=out)
python
def compress(self, condition, axis=0, out=None): """Return selected slices of an array along given axis. Parameters ---------- condition : array_like, bool Array that selects which entries to return. N.B., if len(condition) is less than the size of the given axis, then output is truncated to the length of the condition array. axis : int, optional Axis along which to take slices. If None, work on the flattened array. out : ndarray, optional Output array. Its type is preserved and it must be of the right shape to hold the output. Returns ------- out : HaplotypeArray A copy of the array without the slices along axis for which `condition` is false. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.compress([True, False, True], axis=0) <HaplotypeArray shape=(2, 4) dtype=int8> 0 0 0 1 0 2 . . >>> h.compress([True, False, True, False], axis=1) <HaplotypeArray shape=(3, 2) dtype=int8> 0 0 0 1 0 . """ return compress_haplotype_array(self, condition, axis=axis, cls=type(self), compress=np.compress, out=out)
Return selected slices of an array along given axis. Parameters ---------- condition : array_like, bool Array that selects which entries to return. N.B., if len(condition) is less than the size of the given axis, then output is truncated to the length of the condition array. axis : int, optional Axis along which to take slices. If None, work on the flattened array. out : ndarray, optional Output array. Its type is preserved and it must be of the right shape to hold the output. Returns ------- out : HaplotypeArray A copy of the array without the slices along axis for which `condition` is false. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.compress([True, False, True], axis=0) <HaplotypeArray shape=(2, 4) dtype=int8> 0 0 0 1 0 2 . . >>> h.compress([True, False, True, False], axis=1) <HaplotypeArray shape=(3, 2) dtype=int8> 0 0 0 1 0 .
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L1995-L2034
cggh/scikit-allel
allel/model/ndarray.py
HaplotypeArray.take
def take(self, indices, axis=0, out=None, mode='raise'): """Take elements from an array along an axis. This function does the same thing as "fancy" indexing (indexing arrays using arrays); however, it can be easier to use if you need elements along a given axis. Parameters ---------- indices : array_like The indices of the values to extract. axis : int, optional The axis over which to select values. out : ndarray, optional If provided, the result will be placed in this array. It should be of the appropriate shape and dtype. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' -- raise an error (default) * 'wrap' -- wrap around * 'clip' -- clip to the range 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. Returns ------- subarray : ndarray Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.take([0, 2], axis=0) <HaplotypeArray shape=(2, 4) dtype=int8> 0 0 0 1 0 2 . . >>> h.take([0, 2], axis=1) <HaplotypeArray shape=(3, 2) dtype=int8> 0 0 0 1 0 . """ return take_haplotype_array(self, indices, axis=axis, cls=type(self), take=np.take, out=out, mode=mode)
python
def take(self, indices, axis=0, out=None, mode='raise'): """Take elements from an array along an axis. This function does the same thing as "fancy" indexing (indexing arrays using arrays); however, it can be easier to use if you need elements along a given axis. Parameters ---------- indices : array_like The indices of the values to extract. axis : int, optional The axis over which to select values. out : ndarray, optional If provided, the result will be placed in this array. It should be of the appropriate shape and dtype. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' -- raise an error (default) * 'wrap' -- wrap around * 'clip' -- clip to the range 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. Returns ------- subarray : ndarray Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.take([0, 2], axis=0) <HaplotypeArray shape=(2, 4) dtype=int8> 0 0 0 1 0 2 . . >>> h.take([0, 2], axis=1) <HaplotypeArray shape=(3, 2) dtype=int8> 0 0 0 1 0 . """ return take_haplotype_array(self, indices, axis=axis, cls=type(self), take=np.take, out=out, mode=mode)
Take elements from an array along an axis. This function does the same thing as "fancy" indexing (indexing arrays using arrays); however, it can be easier to use if you need elements along a given axis. Parameters ---------- indices : array_like The indices of the values to extract. axis : int, optional The axis over which to select values. out : ndarray, optional If provided, the result will be placed in this array. It should be of the appropriate shape and dtype. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' -- raise an error (default) * 'wrap' -- wrap around * 'clip' -- clip to the range 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. Returns ------- subarray : ndarray Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.take([0, 2], axis=0) <HaplotypeArray shape=(2, 4) dtype=int8> 0 0 0 1 0 2 . . >>> h.take([0, 2], axis=1) <HaplotypeArray shape=(3, 2) dtype=int8> 0 0 0 1 0 .
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2036-L2086
cggh/scikit-allel
allel/model/ndarray.py
HaplotypeArray.subset
def subset(self, sel0=None, sel1=None): """Make a sub-selection of variants and haplotypes. Parameters ---------- sel0 : array_like Boolean array or array of indices selecting variants. sel1 : array_like Boolean array or array of indices selecting haplotypes. Returns ------- out : HaplotypeArray See Also -------- HaplotypeArray.take, HaplotypeArray.compress """ return subset_haplotype_array(self, sel0, sel1, cls=type(self), subset=subset)
python
def subset(self, sel0=None, sel1=None): """Make a sub-selection of variants and haplotypes. Parameters ---------- sel0 : array_like Boolean array or array of indices selecting variants. sel1 : array_like Boolean array or array of indices selecting haplotypes. Returns ------- out : HaplotypeArray See Also -------- HaplotypeArray.take, HaplotypeArray.compress """ return subset_haplotype_array(self, sel0, sel1, cls=type(self), subset=subset)
Make a sub-selection of variants and haplotypes. Parameters ---------- sel0 : array_like Boolean array or array of indices selecting variants. sel1 : array_like Boolean array or array of indices selecting haplotypes. Returns ------- out : HaplotypeArray See Also -------- HaplotypeArray.take, HaplotypeArray.compress
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2088-L2107
cggh/scikit-allel
allel/model/ndarray.py
HaplotypeArray.concatenate
def concatenate(self, others, axis=0): """Join a sequence of arrays along an existing axis. Parameters ---------- others : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- res : ndarray The concatenated array. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.concatenate([h], axis=0) <HaplotypeArray shape=(6, 4) dtype=int8> 0 0 0 1 0 1 1 1 0 2 . . 0 0 0 1 0 1 1 1 0 2 . . >>> h.concatenate([h], axis=1) <HaplotypeArray shape=(3, 8) dtype=int8> 0 0 0 1 0 0 0 1 0 1 1 1 0 1 1 1 0 2 . . 0 2 . . """ return concatenate_haplotype_array(self, others, axis=axis, cls=type(self), concatenate=np.concatenate)
python
def concatenate(self, others, axis=0): """Join a sequence of arrays along an existing axis. Parameters ---------- others : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- res : ndarray The concatenated array. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.concatenate([h], axis=0) <HaplotypeArray shape=(6, 4) dtype=int8> 0 0 0 1 0 1 1 1 0 2 . . 0 0 0 1 0 1 1 1 0 2 . . >>> h.concatenate([h], axis=1) <HaplotypeArray shape=(3, 8) dtype=int8> 0 0 0 1 0 0 0 1 0 1 1 1 0 1 1 1 0 2 . . 0 2 . . """ return concatenate_haplotype_array(self, others, axis=axis, cls=type(self), concatenate=np.concatenate)
Join a sequence of arrays along an existing axis. Parameters ---------- others : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- res : ndarray The concatenated array. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.concatenate([h], axis=0) <HaplotypeArray shape=(6, 4) dtype=int8> 0 0 0 1 0 1 1 1 0 2 . . 0 0 0 1 0 1 1 1 0 2 . . >>> h.concatenate([h], axis=1) <HaplotypeArray shape=(3, 8) dtype=int8> 0 0 0 1 0 0 0 1 0 1 1 1 0 1 1 1 0 2 . . 0 2 . .
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2109-L2147
cggh/scikit-allel
allel/model/ndarray.py
HaplotypeArray.to_genotypes
def to_genotypes(self, ploidy, copy=False): """Reshape a haplotype array to view it as genotypes by restoring the ploidy dimension. Parameters ---------- ploidy : int The sample ploidy. copy : bool, optional If True, make a copy of data. Returns ------- g : ndarray, int, shape (n_variants, n_samples, ploidy) Genotype array (sharing same underlying buffer). copy : bool, optional If True, copy the data. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.to_genotypes(ploidy=2) <GenotypeArray shape=(3, 2, 2) dtype=int8> 0/0 0/1 0/1 1/1 0/2 ./. """ # check ploidy is compatible if (self.shape[1] % ploidy) > 0: raise ValueError('incompatible ploidy') # reshape newshape = (self.shape[0], -1, ploidy) data = self.reshape(newshape) # wrap g = GenotypeArray(data, copy=copy) return g
python
def to_genotypes(self, ploidy, copy=False): """Reshape a haplotype array to view it as genotypes by restoring the ploidy dimension. Parameters ---------- ploidy : int The sample ploidy. copy : bool, optional If True, make a copy of data. Returns ------- g : ndarray, int, shape (n_variants, n_samples, ploidy) Genotype array (sharing same underlying buffer). copy : bool, optional If True, copy the data. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.to_genotypes(ploidy=2) <GenotypeArray shape=(3, 2, 2) dtype=int8> 0/0 0/1 0/1 1/1 0/2 ./. """ # check ploidy is compatible if (self.shape[1] % ploidy) > 0: raise ValueError('incompatible ploidy') # reshape newshape = (self.shape[0], -1, ploidy) data = self.reshape(newshape) # wrap g = GenotypeArray(data, copy=copy) return g
Reshape a haplotype array to view it as genotypes by restoring the ploidy dimension. Parameters ---------- ploidy : int The sample ploidy. copy : bool, optional If True, make a copy of data. Returns ------- g : ndarray, int, shape (n_variants, n_samples, ploidy) Genotype array (sharing same underlying buffer). copy : bool, optional If True, copy the data. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.to_genotypes(ploidy=2) <GenotypeArray shape=(3, 2, 2) dtype=int8> 0/0 0/1 0/1 1/1 0/2 ./.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2202-L2246
cggh/scikit-allel
allel/model/ndarray.py
HaplotypeArray.to_sparse
def to_sparse(self, format='csr', **kwargs): """Convert into a sparse matrix. Parameters ---------- format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'} Sparse matrix format. kwargs : keyword arguments Passed through to sparse matrix constructor. Returns ------- m : scipy.sparse.spmatrix Sparse matrix Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 1, 0, 1], ... [1, 1, 0, 0], ... [0, 0, -1, -1]], dtype='i1') >>> m = h.to_sparse(format='csr') >>> m <4x4 sparse matrix of type '<class 'numpy.int8'>' with 6 stored elements in Compressed Sparse Row format> >>> m.data array([ 1, 1, 1, 1, -1, -1], dtype=int8) >>> m.indices array([1, 3, 0, 1, 2, 3], dtype=int32) >>> m.indptr array([0, 0, 2, 4, 6], dtype=int32) """ import scipy.sparse # check arguments f = { 'bsr': scipy.sparse.bsr_matrix, 'coo': scipy.sparse.coo_matrix, 'csc': scipy.sparse.csc_matrix, 'csr': scipy.sparse.csr_matrix, 'dia': scipy.sparse.dia_matrix, 'dok': scipy.sparse.dok_matrix, 'lil': scipy.sparse.lil_matrix } if format not in f: raise ValueError('invalid format: %r' % format) # create sparse matrix m = f[format](self, **kwargs) return m
python
def to_sparse(self, format='csr', **kwargs): """Convert into a sparse matrix. Parameters ---------- format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'} Sparse matrix format. kwargs : keyword arguments Passed through to sparse matrix constructor. Returns ------- m : scipy.sparse.spmatrix Sparse matrix Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 1, 0, 1], ... [1, 1, 0, 0], ... [0, 0, -1, -1]], dtype='i1') >>> m = h.to_sparse(format='csr') >>> m <4x4 sparse matrix of type '<class 'numpy.int8'>' with 6 stored elements in Compressed Sparse Row format> >>> m.data array([ 1, 1, 1, 1, -1, -1], dtype=int8) >>> m.indices array([1, 3, 0, 1, 2, 3], dtype=int32) >>> m.indptr array([0, 0, 2, 4, 6], dtype=int32) """ import scipy.sparse # check arguments f = { 'bsr': scipy.sparse.bsr_matrix, 'coo': scipy.sparse.coo_matrix, 'csc': scipy.sparse.csc_matrix, 'csr': scipy.sparse.csr_matrix, 'dia': scipy.sparse.dia_matrix, 'dok': scipy.sparse.dok_matrix, 'lil': scipy.sparse.lil_matrix } if format not in f: raise ValueError('invalid format: %r' % format) # create sparse matrix m = f[format](self, **kwargs) return m
Convert into a sparse matrix. Parameters ---------- format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'} Sparse matrix format. kwargs : keyword arguments Passed through to sparse matrix constructor. Returns ------- m : scipy.sparse.spmatrix Sparse matrix Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 1, 0, 1], ... [1, 1, 0, 0], ... [0, 0, -1, -1]], dtype='i1') >>> m = h.to_sparse(format='csr') >>> m <4x4 sparse matrix of type '<class 'numpy.int8'>' with 6 stored elements in Compressed Sparse Row format> >>> m.data array([ 1, 1, 1, 1, -1, -1], dtype=int8) >>> m.indices array([1, 3, 0, 1, 2, 3], dtype=int32) >>> m.indptr array([0, 0, 2, 4, 6], dtype=int32)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2249-L2303
cggh/scikit-allel
allel/model/ndarray.py
HaplotypeArray.from_sparse
def from_sparse(m, order=None, out=None): """Construct a haplotype array from a sparse matrix. Parameters ---------- m : scipy.sparse.spmatrix Sparse matrix order : {'C', 'F'}, optional Whether to store data in C (row-major) or Fortran (column-major) order in memory. out : ndarray, shape (n_variants, n_samples), optional Use this array as the output buffer. Returns ------- h : HaplotypeArray, shape (n_variants, n_haplotypes) Haplotype array. Examples -------- >>> import allel >>> import numpy as np >>> import scipy.sparse >>> data = np.array([ 1, 1, 1, 1, -1, -1], dtype=np.int8) >>> indices = np.array([1, 3, 0, 1, 2, 3], dtype=np.int32) >>> indptr = np.array([0, 0, 2, 4, 6], dtype=np.int32) >>> m = scipy.sparse.csr_matrix((data, indices, indptr)) >>> h = allel.HaplotypeArray.from_sparse(m) >>> h <HaplotypeArray shape=(4, 4) dtype=int8> 0 0 0 0 0 1 0 1 1 1 0 0 0 0 . . """ import scipy.sparse # check arguments if not scipy.sparse.isspmatrix(m): raise ValueError('not a sparse matrix: %r' % m) # convert to dense array data = m.toarray(order=order, out=out) # wrap h = HaplotypeArray(data) return h
python
def from_sparse(m, order=None, out=None): """Construct a haplotype array from a sparse matrix. Parameters ---------- m : scipy.sparse.spmatrix Sparse matrix order : {'C', 'F'}, optional Whether to store data in C (row-major) or Fortran (column-major) order in memory. out : ndarray, shape (n_variants, n_samples), optional Use this array as the output buffer. Returns ------- h : HaplotypeArray, shape (n_variants, n_haplotypes) Haplotype array. Examples -------- >>> import allel >>> import numpy as np >>> import scipy.sparse >>> data = np.array([ 1, 1, 1, 1, -1, -1], dtype=np.int8) >>> indices = np.array([1, 3, 0, 1, 2, 3], dtype=np.int32) >>> indptr = np.array([0, 0, 2, 4, 6], dtype=np.int32) >>> m = scipy.sparse.csr_matrix((data, indices, indptr)) >>> h = allel.HaplotypeArray.from_sparse(m) >>> h <HaplotypeArray shape=(4, 4) dtype=int8> 0 0 0 0 0 1 0 1 1 1 0 0 0 0 . . """ import scipy.sparse # check arguments if not scipy.sparse.isspmatrix(m): raise ValueError('not a sparse matrix: %r' % m) # convert to dense array data = m.toarray(order=order, out=out) # wrap h = HaplotypeArray(data) return h
Construct a haplotype array from a sparse matrix. Parameters ---------- m : scipy.sparse.spmatrix Sparse matrix order : {'C', 'F'}, optional Whether to store data in C (row-major) or Fortran (column-major) order in memory. out : ndarray, shape (n_variants, n_samples), optional Use this array as the output buffer. Returns ------- h : HaplotypeArray, shape (n_variants, n_haplotypes) Haplotype array. Examples -------- >>> import allel >>> import numpy as np >>> import scipy.sparse >>> data = np.array([ 1, 1, 1, 1, -1, -1], dtype=np.int8) >>> indices = np.array([1, 3, 0, 1, 2, 3], dtype=np.int32) >>> indptr = np.array([0, 0, 2, 4, 6], dtype=np.int32) >>> m = scipy.sparse.csr_matrix((data, indices, indptr)) >>> h = allel.HaplotypeArray.from_sparse(m) >>> h <HaplotypeArray shape=(4, 4) dtype=int8> 0 0 0 0 0 1 0 1 1 1 0 0 0 0 . .
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2306-L2356
cggh/scikit-allel
allel/model/ndarray.py
HaplotypeArray.count_alleles
def count_alleles(self, max_allele=None, subpop=None): """Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles greater than this index will be ignored. subpop : array_like, int, optional Indices of haplotypes to include. Returns ------- ac : AlleleCountsArray, int, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> ac = h.count_alleles() >>> ac <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 3 0 1 0 1 """ # check inputs subpop = _normalize_subpop_arg(subpop, self.shape[1]) # determine alleles to count if max_allele is None: max_allele = self.max() # use optimisations values = memoryview_safe(self.values) if subpop is None: ac = haplotype_array_count_alleles(values, max_allele) else: ac = haplotype_array_count_alleles_subpop(values, max_allele, subpop) return AlleleCountsArray(ac, copy=False)
python
def count_alleles(self, max_allele=None, subpop=None): """Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles greater than this index will be ignored. subpop : array_like, int, optional Indices of haplotypes to include. Returns ------- ac : AlleleCountsArray, int, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> ac = h.count_alleles() >>> ac <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 3 0 1 0 1 """ # check inputs subpop = _normalize_subpop_arg(subpop, self.shape[1]) # determine alleles to count if max_allele is None: max_allele = self.max() # use optimisations values = memoryview_safe(self.values) if subpop is None: ac = haplotype_array_count_alleles(values, max_allele) else: ac = haplotype_array_count_alleles_subpop(values, max_allele, subpop) return AlleleCountsArray(ac, copy=False)
Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles greater than this index will be ignored. subpop : array_like, int, optional Indices of haplotypes to include. Returns ------- ac : AlleleCountsArray, int, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> ac = h.count_alleles() >>> ac <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 3 0 1 0 1
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2358-L2404
cggh/scikit-allel
allel/model/ndarray.py
HaplotypeArray.map_alleles
def map_alleles(self, mapping, copy=True): """Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. copy : bool, optional If True, return a new array; if False, apply mapping in place (only applies for arrays with dtype int8; all other dtypes require a copy). Returns ------- hm : HaplotypeArray Examples -------- >>> import allel >>> import numpy as np >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> mapping = np.array([[1, 2, 0], ... [2, 0, 1], ... [2, 1, 0]], dtype='i1') >>> h.map_alleles(mapping) <HaplotypeArray shape=(3, 4) dtype=int8> 1 1 1 2 2 0 0 0 2 0 . . Notes ----- For arrays with dtype int8 an optimised implementation is used which is faster and uses far less memory. It is recommended to convert arrays to dtype int8 where possible before calling this method. See Also -------- allel.model.util.create_allele_mapping """ # check inputs mapping = asarray_ndim(mapping, 2) check_dim0_aligned(self, mapping) # use optimisation mapping = np.asarray(mapping, dtype=self.dtype) mapping = memoryview_safe(mapping) values = memoryview_safe(self.values) data = haplotype_array_map_alleles(values, mapping, copy=copy) return HaplotypeArray(data, copy=False)
python
def map_alleles(self, mapping, copy=True): """Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. copy : bool, optional If True, return a new array; if False, apply mapping in place (only applies for arrays with dtype int8; all other dtypes require a copy). Returns ------- hm : HaplotypeArray Examples -------- >>> import allel >>> import numpy as np >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> mapping = np.array([[1, 2, 0], ... [2, 0, 1], ... [2, 1, 0]], dtype='i1') >>> h.map_alleles(mapping) <HaplotypeArray shape=(3, 4) dtype=int8> 1 1 1 2 2 0 0 0 2 0 . . Notes ----- For arrays with dtype int8 an optimised implementation is used which is faster and uses far less memory. It is recommended to convert arrays to dtype int8 where possible before calling this method. See Also -------- allel.model.util.create_allele_mapping """ # check inputs mapping = asarray_ndim(mapping, 2) check_dim0_aligned(self, mapping) # use optimisation mapping = np.asarray(mapping, dtype=self.dtype) mapping = memoryview_safe(mapping) values = memoryview_safe(self.values) data = haplotype_array_map_alleles(values, mapping, copy=copy) return HaplotypeArray(data, copy=False)
Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. copy : bool, optional If True, return a new array; if False, apply mapping in place (only applies for arrays with dtype int8; all other dtypes require a copy). Returns ------- hm : HaplotypeArray Examples -------- >>> import allel >>> import numpy as np >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> mapping = np.array([[1, 2, 0], ... [2, 0, 1], ... [2, 1, 0]], dtype='i1') >>> h.map_alleles(mapping) <HaplotypeArray shape=(3, 4) dtype=int8> 1 1 1 2 2 0 0 0 2 0 . . Notes ----- For arrays with dtype int8 an optimised implementation is used which is faster and uses far less memory. It is recommended to convert arrays to dtype int8 where possible before calling this method. See Also -------- allel.model.util.create_allele_mapping
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2432-L2486
cggh/scikit-allel
allel/model/ndarray.py
HaplotypeArray.distinct
def distinct(self): """Return sets of indices for each distinct haplotype.""" # setup collection d = collections.defaultdict(set) # iterate over haplotypes for i in range(self.shape[1]): # hash the haplotype k = hash(self.values[:, i].tobytes()) # collect d[k].add(i) # extract sets, sorted by most common return sorted(d.values(), key=len, reverse=True)
python
def distinct(self): """Return sets of indices for each distinct haplotype.""" # setup collection d = collections.defaultdict(set) # iterate over haplotypes for i in range(self.shape[1]): # hash the haplotype k = hash(self.values[:, i].tobytes()) # collect d[k].add(i) # extract sets, sorted by most common return sorted(d.values(), key=len, reverse=True)
Return sets of indices for each distinct haplotype.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2492-L2508
cggh/scikit-allel
allel/model/ndarray.py
HaplotypeArray.distinct_counts
def distinct_counts(self): """Return counts for each distinct haplotype.""" # hash the haplotypes k = [hash(self.values[:, i].tobytes()) for i in range(self.shape[1])] # count and sort # noinspection PyArgumentList counts = sorted(collections.Counter(k).values(), reverse=True) return np.asarray(counts)
python
def distinct_counts(self): """Return counts for each distinct haplotype.""" # hash the haplotypes k = [hash(self.values[:, i].tobytes()) for i in range(self.shape[1])] # count and sort # noinspection PyArgumentList counts = sorted(collections.Counter(k).values(), reverse=True) return np.asarray(counts)
Return counts for each distinct haplotype.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2510-L2520
cggh/scikit-allel
allel/model/ndarray.py
HaplotypeArray.distinct_frequencies
def distinct_frequencies(self): """Return frequencies for each distinct haplotype.""" c = self.distinct_counts() n = self.shape[1] return c / n
python
def distinct_frequencies(self): """Return frequencies for each distinct haplotype.""" c = self.distinct_counts() n = self.shape[1] return c / n
Return frequencies for each distinct haplotype.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2522-L2527
cggh/scikit-allel
allel/model/ndarray.py
AlleleCountsArray.to_frequencies
def to_frequencies(self, fill=np.nan): """Compute allele frequencies. Parameters ---------- fill : float, optional Value to use when number of allele calls is 0. Returns ------- af : ndarray, float, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.to_frequencies() array([[0.75, 0.25, 0. ], [0.25, 0.5 , 0.25], [0. , 0. , 1. ]]) """ an = np.sum(self, axis=1)[:, None] with ignore_invalid(): af = np.where(an > 0, self / an, fill) return af
python
def to_frequencies(self, fill=np.nan): """Compute allele frequencies. Parameters ---------- fill : float, optional Value to use when number of allele calls is 0. Returns ------- af : ndarray, float, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.to_frequencies() array([[0.75, 0.25, 0. ], [0.25, 0.5 , 0.25], [0. , 0. , 1. ]]) """ an = np.sum(self, axis=1)[:, None] with ignore_invalid(): af = np.where(an > 0, self / an, fill) return af
Compute allele frequencies. Parameters ---------- fill : float, optional Value to use when number of allele calls is 0. Returns ------- af : ndarray, float, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.to_frequencies() array([[0.75, 0.25, 0. ], [0.25, 0.5 , 0.25], [0. , 0. , 1. ]])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2658-L2689
cggh/scikit-allel
allel/model/ndarray.py
AlleleCountsArray.max_allele
def max_allele(self): """Return the highest allele index for each variant. Returns ------- n : ndarray, int, shape (n_variants,) Allele index array. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.max_allele() array([1, 2, 2], dtype=int8) """ out = np.empty(self.shape[0], dtype='i1') out.fill(-1) for i in range(self.shape[1]): d = self.values[:, i] > 0 out[d] = i return out
python
def max_allele(self): """Return the highest allele index for each variant. Returns ------- n : ndarray, int, shape (n_variants,) Allele index array. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.max_allele() array([1, 2, 2], dtype=int8) """ out = np.empty(self.shape[0], dtype='i1') out.fill(-1) for i in range(self.shape[1]): d = self.values[:, i] > 0 out[d] = i return out
Return the highest allele index for each variant. Returns ------- n : ndarray, int, shape (n_variants,) Allele index array. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.max_allele() array([1, 2, 2], dtype=int8)
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2714-L2740
cggh/scikit-allel
allel/model/ndarray.py
AlleleCountsArray.is_non_segregating
def is_non_segregating(self, allele=None): """Find non-segregating variants (where at most one allele is observed). Parameters ---------- allele : int, optional Allele index. Returns ------- out : ndarray, bool, shape (n_variants,) Boolean array where elements are True if variant matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.is_non_segregating() array([ True, False, False, True]) >>> ac.is_non_segregating(allele=2) array([False, False, False, True]) """ if allele is None: return self.allelism() <= 1 else: return (self.allelism() == 1) & (self.values[:, allele] > 0)
python
def is_non_segregating(self, allele=None): """Find non-segregating variants (where at most one allele is observed). Parameters ---------- allele : int, optional Allele index. Returns ------- out : ndarray, bool, shape (n_variants,) Boolean array where elements are True if variant matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.is_non_segregating() array([ True, False, False, True]) >>> ac.is_non_segregating(allele=2) array([False, False, False, True]) """ if allele is None: return self.allelism() <= 1 else: return (self.allelism() == 1) & (self.values[:, allele] > 0)
Find non-segregating variants (where at most one allele is observed). Parameters ---------- allele : int, optional Allele index. Returns ------- out : ndarray, bool, shape (n_variants,) Boolean array where elements are True if variant matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.is_non_segregating() array([ True, False, False, True]) >>> ac.is_non_segregating(allele=2) array([False, False, False, True])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2819-L2853
cggh/scikit-allel
allel/model/ndarray.py
AlleleCountsArray.is_biallelic_01
def is_biallelic_01(self, min_mac=None): """Find variants biallelic for the reference (0) and first alternate (1) allele. Parameters ---------- min_mac : int, optional Minimum minor allele count. Returns ------- out : ndarray, bool, shape (n_variants,) Boolean array where elements are True if variant matches the condition. """ loc = self.is_biallelic() & (self.max_allele() == 1) if min_mac is not None: # noinspection PyAugmentAssignment loc = loc & (self.values[:, :2].min(axis=1) >= min_mac) return loc
python
def is_biallelic_01(self, min_mac=None): """Find variants biallelic for the reference (0) and first alternate (1) allele. Parameters ---------- min_mac : int, optional Minimum minor allele count. Returns ------- out : ndarray, bool, shape (n_variants,) Boolean array where elements are True if variant matches the condition. """ loc = self.is_biallelic() & (self.max_allele() == 1) if min_mac is not None: # noinspection PyAugmentAssignment loc = loc & (self.values[:, :2].min(axis=1) >= min_mac) return loc
Find variants biallelic for the reference (0) and first alternate (1) allele. Parameters ---------- min_mac : int, optional Minimum minor allele count. Returns ------- out : ndarray, bool, shape (n_variants,) Boolean array where elements are True if variant matches the condition.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2931-L2951
cggh/scikit-allel
allel/model/ndarray.py
AlleleCountsArray.map_alleles
def map_alleles(self, mapping, max_allele=None): """Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. max_allele : int, optional Highest allele index expected in the output. If not provided will be determined from maximum value in `mapping`. Returns ------- ac : AlleleCountsArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac <AlleleCountsArray shape=(4, 3) dtype=int32> 4 0 0 3 1 0 1 2 1 0 0 2 >>> mapping = [[1, 0, 2], ... [1, 0, 2], ... [2, 1, 0], ... [1, 2, 0]] >>> ac.map_alleles(mapping) <AlleleCountsArray shape=(4, 3) dtype=int32> 0 4 0 1 3 0 1 2 1 2 0 0 See Also -------- create_allele_mapping """ # ensure correct dimensionality and matching dtype mapping = asarray_ndim(mapping, 2, dtype=self.dtype) check_dim0_aligned(self, mapping) check_dim1_aligned(self, mapping) # use optimisation out = allele_counts_array_map_alleles(self.values, mapping, max_allele) # wrap and return return type(self)(out)
python
def map_alleles(self, mapping, max_allele=None): """Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. max_allele : int, optional Highest allele index expected in the output. If not provided will be determined from maximum value in `mapping`. Returns ------- ac : AlleleCountsArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac <AlleleCountsArray shape=(4, 3) dtype=int32> 4 0 0 3 1 0 1 2 1 0 0 2 >>> mapping = [[1, 0, 2], ... [1, 0, 2], ... [2, 1, 0], ... [1, 2, 0]] >>> ac.map_alleles(mapping) <AlleleCountsArray shape=(4, 3) dtype=int32> 0 4 0 1 3 0 1 2 1 2 0 0 See Also -------- create_allele_mapping """ # ensure correct dimensionality and matching dtype mapping = asarray_ndim(mapping, 2, dtype=self.dtype) check_dim0_aligned(self, mapping) check_dim1_aligned(self, mapping) # use optimisation out = allele_counts_array_map_alleles(self.values, mapping, max_allele) # wrap and return return type(self)(out)
Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. max_allele : int, optional Highest allele index expected in the output. If not provided will be determined from maximum value in `mapping`. Returns ------- ac : AlleleCountsArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac <AlleleCountsArray shape=(4, 3) dtype=int32> 4 0 0 3 1 0 1 2 1 0 0 2 >>> mapping = [[1, 0, 2], ... [1, 0, 2], ... [2, 1, 0], ... [1, 2, 0]] >>> ac.map_alleles(mapping) <AlleleCountsArray shape=(4, 3) dtype=int32> 0 4 0 1 3 0 1 2 1 2 0 0 See Also -------- create_allele_mapping
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2971-L3027