repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
meduz/scikit-learn
|
sklearn/tests/test_metaestimators.py
|
52
|
4990
|
"""Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
|
bsd-3-clause
|
merenlab/anvio
|
anvio/trnaseq.py
|
1
|
365430
|
# -*- coding: utf-8
# pylint: disable=line-too-long
"""Library for tRNA-seq dataset operations
`bin/anvi-trnaseq` and `bin/anvi-convert-trnaseq-database` are the default clients using this
module. `anvi-trnaseq` instantiates a `TRNASeqDataset` object. `anvi-convert-trnaseq-database`
instantiates a `DatabaseConverter` object. The clients call the objects' `process` methods to start
the analytic workflows.
Each sequence library in an experiment is processed separately as a `TRNASeqDataset`, storing an
information-rich anvi'o tRNA-seq database. `DatabaseConverter` finds reference seed sequences from a
set of tRNA-seq databases, storing seeds in an anvi'o contigs database and coverage patterns for
each dataset in anvi'o profile and auxiliary databases. Contigs and profile databases interface with
a range of other tools in the anvi'o platform.
GLOSSARY of essential terms
===========================
Feature: Canonical feature or structural element of tRNA, e.g., anticodon stem
Read: Synonymous with merged paired-end tRNA-seq read oriented 5'->3'
Feature profile (profile): 5'->3' features identified de novo from the 3' end in a merged tRNA-seq
read
Profiled sequence: Sequence with an assigned feature profile, which may or may not span the whole
length of the sequence from the 3' end, but which at minimum includes the T arm
Full profile (tRNA profile): Profile that spans (nearly) the full length of the sequence, with a
small number of unprofiled nucleotides allowed at the 5' end when that number is less than the
minimum length of a missing next 5' tRNA feature
Truncated profile: Profile that does not span (nearly) the full length of the sequence (e.g., a
sequence is a chimera of two 3' tRNA fragments and the profile covers the 3' fragment but not
the unexpected 5' fragment)
Potential modification-induced substitution (sub): Detected as 3-4 different nucleotides at a tRNA
position, potentially the effect of semi-random nucleotide addition at the site of a modified
nucleotide during reverse transcription
Potential modification-induced indel (indel): Detected by alignment of tRNA sequences with and
without potential modification-induced substitutions, indels result from reverse transcriptase
skipping or adding extra nucleotides due to interaction with a modification (substitutions are
generally more common than deletions, which in turn are more common than insertions)
Unique sequence (U): Set of dereplicated merged paired-end tRNA-seq reads
Nontemplated nucleotide: Reverse transcription artifact typically added to the 5' end of a read
Trimmed sequence (T): Set of unique sequences that are identical after trimming sequence extensions
5' of the acceptor stem and 3' of the discriminator nucleotide, e.g., nontemplated 5'
nucleotides and 3'-CCA acceptor
Normalized sequence (N): The longest of a set of trimmed sequences, with shorter sequences being
tRNA fragment subsequences
Nonspecific sequence: In contrast to a specific sequence, a trimmed sequence (or its component
unique sequences and reads) that occurs in multiple normalized sequences (cannot be resolved to
a single normalized sequence) due to it being a tRNA fragment
Mapped fragment: Sequence without a feature profile that maps to a normalized sequence and may
include extra nucleotides beyond the trimmed 5' end of the normalized sequence but not
nucleotides in the trimmed 3' terminus of the normalized sequence
Modified sequence (M): Set of normalized sequences differing by potential modification-induced
substitutions and optionally indels
ABBREVIATIONS
=============
M: Modified seq
N: Normalized seq
-----------------
Nf: N with full profile
Nc: N with truncated profile
Nb: N with subs but not indels
Nqf: Nf with full profile but no subs (only important as queries in finding indels)
Nq: Nqf or Nc queried against Nb targets to find indels
Ni: N with indels and optionally subs
T: Trimmed seq
--------------
Tp: T with profile
Tf: T with full profile
Tc: T with truncated profile
Tm: Mapped T
Ti: T that is part of Ni (Ti does not necessarily contain indels, though Ni does)
Tip: Ti derived from Tp
Tim: Ti derived from Tm
U: Unique seq
-------------
Un: U not found to have tRNA profile
Up: U with profile
Uc: U with truncated profile
Uf: U with full profile
Us: U with full profile transferred from another Uf
Um: Mapped U
Ui: U that is part of Ti and Ni (Ui does not necessarily contain indels, though Ni does)
Uif: Ui derived from Uf
Uim: Ui derived from Um
"""
import gc
import os
import sys
import math
import time
import random
import shutil
import argparse
import numpy as np
import pandas as pd
import pickle as pkl
import multiprocessing as mp
from hashlib import sha1
from itertools import chain
from functools import partial
from bisect import bisect_left
from collections import defaultdict, deque, OrderedDict
import anvio
import anvio.dbops as dbops
import anvio.utils as utils
import anvio.tables as tables
import anvio.fastalib as fastalib
import anvio.terminal as terminal
import anvio.constants as constants
import anvio.clustering as clustering
import anvio.filesnpaths as filesnpaths
import anvio.tables.miscdata as miscdata
import anvio.trnaidentifier as trnaidentifier
import anvio.auxiliarydataops as auxiliarydataops
from anvio.errors import ConfigError
from anvio.sequence import Dereplicator
from anvio.drivers.vmatch import Vmatch
from anvio.agglomeration import Agglomerator
from anvio.tables.views import TablesForViews
from anvio.tables.miscdata import TableForLayerOrders
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2021, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "Samuel Miller"
__email__ = "[email protected]"
pp = terminal.pretty_print
MAXSIZE = sys.maxsize
ALL_NTS = tuple(constants.nucleotides)
UNAMBIG_NTS = ('A', 'C', 'G', 'T')
NT_INT_DICT = {nt: i for i, nt in enumerate(UNAMBIG_NTS, start=1)}
INT_NT_DICT = {i: nt for i, nt in enumerate(UNAMBIG_NTS, start=1)}
# NUM_NT_BINS is used in counting the number of distinct nucleotides (1, 2, 3, or 4) at positions in
# internally ungapped alignments: there is one bin (value 0) for end gaps in the alignment.
NUM_NT_BINS = len(UNAMBIG_NTS) + 1
ANTICODON_AA_DICT = constants.anticodon_to_AA
# The user can specify in `anvi-trnaseq` what defines a long (biological vs. non-templated) 5'
# extension. The variable is set by `TRNASeqDataset.__init__`.
MIN_LENGTH_LONG_5PRIME_EXTENSION = 4
# The user can profile tRNA that does not end in any 3' terminus, such as CCA. Normally, such
# sequences are not profiled but may be mapped.
PROFILE_ABSENT_3PRIME_TERMINUS = False
class UniqueSequence(object):
"""A dereplicated tRNA-seq read."""
# Instances of this class are called `seq_U`.
__slots__ = ('string', 'name', 'read_count')
def __init__(self, string, name, read_count):
self.string = string
self.name = name
self.read_count = read_count
class UniqueProfileSequence(UniqueSequence):
"""A tRNA feature profile, either full or truncated, was assigned to the sequence."""
# Instances of this class are called `seq_Up`.
__slots__ = (
'feature_starts',
'feature_stops',
'has_complete_feature_set',
'has_His_G',
'alpha_start',
'alpha_stop',
'beta_start',
'beta_stop',
'anticodon_string',
'anticodon_aa',
'contains_anticodon',
'length_3prime_terminus',
'num_conserved',
'num_unconserved',
'num_paired',
'num_unpaired',
'unconserved_info',
'unpaired_info',
'profiled_seq_length',
'name_T'
)
def __init__(self, string, name, read_count, profile):
super().__init__(string, name, read_count)
self.feature_starts = tuple([f.start_pos if hasattr(f, 'start_pos') else f.start_positions for f in profile.features])
self.feature_stops = tuple([f.stop_pos if hasattr(f, 'stop_pos') else f.stop_positions for f in profile.features])
self.alpha_start = profile.alpha_start
self.alpha_stop = profile.alpha_stop
self.beta_start = profile.beta_start
self.beta_stop = profile.beta_stop
self.anticodon_string = anticodon = profile.anticodon_seq
self.anticodon_aa = profile.anticodon_aa if profile.anticodon_aa else None
self.contains_anticodon = True if anticodon else False
self.length_3prime_terminus = len(profile.threeprime_terminus_seq)
self.num_conserved = profile.num_conserved
self.num_unconserved = profile.num_unconserved
self.num_paired = profile.num_paired
self.num_unpaired = profile.num_unpaired
self.unconserved_info = tuple(profile.unconserved_info) if profile.unconserved_info else tuple()
self.unpaired_info = tuple(profile.unpaired_info) if profile.unpaired_info else tuple()
self.profiled_seq_length = len(profile.profiled_seq)
self.name_T = None
class UniqueFullProfileSequence(UniqueProfileSequence):
"""A full tRNA feature profile was assigned to the sequence."""
# Instances of this class are called `seq_Uf`.
__slots__ = (
'has_complete_feature_set',
'has_His_G',
'num_extrap_5prime_nts',
'xtra_5prime_length'
)
def __init__(self, string, name, read_count, profile):
super().__init__(string, name, read_count, profile)
self.has_complete_feature_set = profile.has_complete_feature_set
self.has_His_G = True if profile.features[0].name == 'tRNA-His position 0' else False
self.num_extrap_5prime_nts = profile.num_in_extrapolated_fiveprime_feature
self.xtra_5prime_length = 0 if profile.num_extra_fiveprime is None else profile.num_extra_fiveprime
class UniqueTruncatedProfileSequence(UniqueProfileSequence):
"""A truncated tRNA feature profile was assigned to the sequence."""
# Instances of this class are called `seq_Uc`.
__slots__ = ('trunc_profile_index', )
def __init__(self, string, name, read_count, profile):
super().__init__(string, name, read_count, profile)
self.trunc_profile_index = profile.trunc_profile_index
class UniqueTransferredProfileSequence(UniqueFullProfileSequence):
"""This object is generated as part of the determination of Nf from Tf. This type of seq is
produced in the special circumstance that the profile of a shorter Tf is transferred to a longer
Tf, because the longer Tf was originally found to have a complete profile, but a shorter 3'
subseq also had a complete profile; so, parsimoniously, the profile of the shorter Tf was
transferred to the longer, and the additional 5' nts of the longer Tf reclassified as extra nts
beyond the 5' end of a mature tRNA sequence."""
# Instances of this class are called `seq_Us`.
__slots__ = ('defunct_Uf', )
def __init__(self, defunct_Uf, replacement_dict):
UniqueSequence.__init__(self, defunct_Uf.string, defunct_Uf.name, defunct_Uf.read_count)
string_U = defunct_Uf.string
length_U = len(string_U)
stop_T_in_U = length_U - string_U[::-1].index(replacement_dict['string_T'][::-1])
feature_starts = []
for feature_start_from_T_3prime in replacement_dict['feature_starts_from_T_3prime']:
if isinstance(feature_start_from_T_3prime, int):
feature_starts.append(stop_T_in_U + feature_start_from_T_3prime)
else:
feature_starts.append(tuple([stop_T_in_U + strand_start_from_T_3prime for strand_start_from_T_3prime in feature_start_from_T_3prime]))
self.feature_starts = tuple(feature_starts)
feature_stops = []
for feature_stop_from_T_3prime in replacement_dict['feature_stops_from_T_3prime']:
if isinstance(feature_stop_from_T_3prime, int):
feature_stops.append(stop_T_in_U + feature_stop_from_T_3prime)
else:
feature_stops.append(tuple([stop_T_in_U + strand_stop_from_T_3prime for strand_stop_from_T_3prime in feature_stop_from_T_3prime]))
self.feature_stops = tuple(feature_stops)
self.has_complete_feature_set = True
self.num_extrap_5prime_nts = 0
self.has_His_G = replacement_dict['has_His_G']
self.alpha_start = None if replacement_dict['alpha_start_from_T_3prime'] is None else stop_T_in_U + replacement_dict['alpha_start_from_T_3prime']
self.alpha_stop = None if replacement_dict['alpha_stop_from_T_3prime'] is None else stop_T_in_U - replacement_dict['alpha_stop_from_T_3prime']
self.beta_start = None if replacement_dict['beta_start_from_T_3prime'] is None else stop_T_in_U - replacement_dict['beta_start_from_T_3prime']
self.beta_stop = None if replacement_dict['beta_stop_from_T_3prime'] is None else stop_T_in_U - replacement_dict['beta_stop_from_T_3prime']
self.anticodon_string = replacement_dict['anticodon_string']
self.anticodon_aa = replacement_dict['anticodon_aa']
self.contains_anticodon = replacement_dict['contains_anticodon']
self.length_3prime_terminus = length_U - stop_T_in_U
self.num_conserved = replacement_dict['num_conserved']
self.num_unconserved = replacement_dict['num_unconserved']
self.num_paired = replacement_dict['num_paired']
self.num_unpaired = replacement_dict['num_unpaired']
unconserved_info = []
for unconserved_tuple in replacement_dict['unconserved_info_from_T_3prime']:
unconserved_info.append((stop_T_in_U + unconserved_tuple[0],
unconserved_tuple[1],
unconserved_tuple[2]))
self.unconserved_info = tuple(unconserved_info)
unpaired_info = []
for unpaired_tuple in replacement_dict['unpaired_info_from_T_3prime']:
unpaired_info.append((stop_T_in_U + unpaired_tuple[0],
stop_T_in_U + unpaired_tuple[1],
unpaired_tuple[2],
unpaired_tuple[3]))
self.unpaired_info = tuple(unpaired_info)
self.profiled_seq_length = replacement_dict['profiled_seq_without_terminus_length'] + self.length_3prime_terminus
self.xtra_5prime_length = length_U - self.profiled_seq_length
self.name_T = None
# Store the defunct profile information for posterity.
self.defunct_Uf = defunct_Uf
class UniqueMappedSequence(UniqueSequence):
"""This object is generated in the identification of tRNA fragments by mapping."""
# Instances of this class are called `seq_Um`.
__slots__ = ('xtra_5prime_length', 'name_T')
def __init__(self, string, name, read_count, xtra_5prime_length=0):
super().__init__(string, name, read_count)
self.xtra_5prime_length = xtra_5prime_length
self.name_T = None
class UniqueIndelSequence(UniqueSequence):
"""This object is generated in the identification of tRNA sequences with indels. Nq are found to
have indels, which can contradict feature profiles and the lengths of 5' extensions and 3'
termini in existing U. No profile is assigned to this object."""
# Instances of this class are called `seq_Ui`.
__slots__ = ('orig_U', 'xtra_5prime_length', 'length_3prime_terminus', 'name_T')
def __init__(self, seq_U, length_3prime_terminus=0, xtra_5prime_length=0):
super().__init__(seq_U.string, seq_U.name, seq_U.read_count)
self.orig_U = seq_U
self.length_3prime_terminus = length_3prime_terminus
self.xtra_5prime_length = xtra_5prime_length
self.name_T = None
class TrimmedSequence(object):
"""A tRNA sequence with bases trimmed 5' of the acceptor stem (or 5'-G in the case of tRNA-His)
and 3' of the discriminator.
The purpose of trimming is to collapse non-biological variability prevalent at the ends of
reads.
EXAMPLE 1:
E. coli tRNA-Ala-GGC-1-1
GGGGCTATAGCTCAGCTGGGAGAGCGCTTGCATGGCATGCAAGAGGTCAGCGGTTCGATCCCGCTTAGCTCCACCA
This collapses to the following T, removing the 3' terminus (the acceptor happens to be genomic
rather than post-transcriptionally added in this example, but it doesn't matter):
GGGGCTATAGCTCAGCTGGGAGAGCGCTTGCATGGCATGCAAGAGGTCAGCGGTTCGATCCCGCTTAGCTCCA
Examples of possible Up that collapse to T:
AGGGGCTATAGCTCAGCTGGGAGAGCGCTTGCATGGCATGCAAGAGGTCAGCGGTTCGATCCCGCTTAGCTCCACCA
GGGGCTATAGCTCAGCTGGGAGAGCGCTTGCATGGCATGCAAGAGGTCAGCGGTTCGATCCCGCTTAGCTCCAC
EXAMPLE 2:
3' fragment of the same tRNA, ending in 3'-CC rather than canonical 3'-CCA
TTGCATGGCATGCAAGAGGTCAGCGGTTCGATCCCGCTTAGCTCCACC
This collapses to the following T, removing 3'-CC:
TTGCATGGCATGCAAGAGGTCAGCGGTTCGATCCCGCTTAGCTCCA
"""
# Instances of this class are called T.
__slots__ = ('string', 'read_count', 'names_U', 'names_N')
def __init__(self, string, seqs_U):
self.string = string
self.names_U = tuple([seq_U.name for seq_U in seqs_U])
for seq_U in seqs_U:
if seq_U.name_T is not None:
raise ConfigError(f"The unique sequence with the name {seq_U.name} "
f"was already assigned to a trimmed sequence with the name {seq_U.name_T} "
"and so cannot be assigned to a new trimmed sequence.")
self.read_count = sum([seq_U.read_count for seq_U in seqs_U])
self.names_N = []
def get_representative_unique_sequence(seqs_U):
"""The representative U in Tf and Ti is chosen as follows:
1. Most abundant full-length tRNA without extra 5' nts, ignoring the 3' terminus nts, OR
2. Most abundant full-length tRNA with extra 5' nts, OR
3. Most abundant 3' tRNA fragment
Sort U such that the first is the most abundant+longest and the last is the least
abundant+shortest."""
seqs_U.sort(key=lambda seq_U: (-seq_U.xtra_5prime_length, -seq_U.read_count, seq_U.name))
if seqs_U[0].xtra_5prime_length > 0:
if seqs_U[-1].xtra_5prime_length == 0:
# If the first U has extra 5' nts and the last has none, then the last U and others
# without extra 5' nts must be a full-length tRNA (ignoring the 3' terminus). Therefore,
# select the most abundant of these full-length tRNAs with extra 5' nts as the
# representative seq.
represent_seq_U = sorted(seqs_U, key=lambda seq_U: (-seq_U.xtra_5prime_length, seq_U.read_count, seq_U.name))[-1]
else:
represent_seq_U = seqs_U[0]
else:
# In this case, ALL U are EITHER full-length tRNA OR a 3' tRNA fragment.
represent_seq_U = seqs_U[0]
return represent_seq_U
def get_read_threeprime_terminus_count_dict(seqs_U):
"""Count the number of reads with each 3' terminus in U."""
read_3prime_terminus_count_dict = defaultdict(int)
for seq_U in seqs_U:
if seq_U.length_3prime_terminus:
read_3prime_terminus_count_dict[seq_U.string[-seq_U.length_3prime_terminus: ]] += seq_U.read_count
else:
read_3prime_terminus_count_dict[''] += seq_U.read_count
return read_3prime_terminus_count_dict
def get_extra_fiveprime_info(seqs_U):
"""Get information on the extra 5' nucleotides in unique sequences."""
uniq_with_xtra_5prime_count = 0
read_with_xtra_5prime_count = 0
# Find the number of reads containing each unique 5' extension.
long_5prime_extension_dict = {}
for seq_U in seqs_U:
if seq_U.xtra_5prime_length:
uniq_with_xtra_5prime_count += 1
read_with_xtra_5prime_count += seq_U.read_count
if seq_U.xtra_5prime_length >= MIN_LENGTH_LONG_5PRIME_EXTENSION:
long_5prime_extension_dict[seq_U.string[: seq_U.xtra_5prime_length]] = seq_U.read_count
return uniq_with_xtra_5prime_count, read_with_xtra_5prime_count, long_5prime_extension_dict
class TrimmedFullProfileSequence(TrimmedSequence):
"""This object is formed from sequences with a full tRNA feature profile."""
# Instances of this class are called `Tf` and can derive from `Uf` and optionally `Us` objects.
__slots__ = (
'name',
'categories_U',
'feature_starts',
'feature_stops',
'contains_anticodon',
'read_3prime_terminus_count_dict',
'has_complete_feature_set',
'has_His_G',
'num_extrap_5prime_nts',
'uniq_with_xtra_5prime_count',
'read_with_xtra_5prime_count',
'long_5prime_extension_dict'
)
def __init__(self, string, seqs_U):
# U are sorted in place when finding the representative unique sequence. Call the parent
# class constructor after this to ensure recorded U names are in the correct order, with the
# representative name coming first.
represent_U = get_representative_unique_sequence(seqs_U)
super().__init__(string, seqs_U)
self.name = name = represent_U.name
categories_U = []
for seq_U in seqs_U:
if isinstance(seq_U, UniqueFullProfileSequence):
categories_U.append('Uf')
elif isinstance(seq_U, UniqueTransferredProfileSequence):
categories_U.append('Us')
else:
raise Exception(f"A unique sequence with name `{seq_U.name}` of class `{type(seq_U)}` was encountered.")
seq_U.name_T = name
self.categories_U = tuple(categories_U)
# Assume that the feature profile indices of the representative U are the same as the other
# U. The 3' terminus is the last feature in the profile and not part of T.
self.feature_starts = represent_U.feature_starts[: -1] if represent_U.feature_starts else None
self.feature_stops = represent_U.feature_stops[: -1] if represent_U.feature_stops else None
self.contains_anticodon = represent_U.contains_anticodon
self.has_complete_feature_set = represent_U.has_complete_feature_set
self.has_His_G = represent_U.has_His_G
self.num_extrap_5prime_nts = represent_U.num_extrap_5prime_nts
self.read_3prime_terminus_count_dict = get_read_threeprime_terminus_count_dict(seqs_U)
(self.uniq_with_xtra_5prime_count,
self.read_with_xtra_5prime_count,
self.long_5prime_extension_dict) = get_extra_fiveprime_info(seqs_U)
class TrimmedTruncatedProfileSequence(TrimmedSequence):
"""This object is formed from sequences with a truncated tRNA feature profile."""
# Instances of this class are called `Tc` and derive from `Uc`. All instances are initially
# categorized as `nontrna`, which can later change to `trna`. The category encompasses all
# component `Uc`.
__slots__ = (
'name',
'category',
'feature_starts',
'feature_stops',
'contains_anticodon',
'read_3prime_terminus_count_dict',
'trunc_profile_index'
)
def __init__(self, string, seqs_U):
# Make the most abundant U the representative sequence.
seqs_U.sort(key=lambda seq_U: (-seq_U.read_count, seq_U.name))
super().__init__(string, seqs_U)
represent_U = seqs_U[0]
self.name = name = represent_U.name
self.category = 'nontrna'
for seq_U in seqs_U:
seq_U.name_T = name
# Assume that the feature profile indices of the representative U are the same as the other
# U. The 3' terminus is the last feature in the profile and not part of T.
self.feature_starts = represent_U.feature_starts[: -1] if represent_U.feature_starts else None
self.feature_stops = represent_U.feature_stops[: -1] if represent_U.feature_stops else None
self.contains_anticodon = represent_U.contains_anticodon
self.read_3prime_terminus_count_dict = get_read_threeprime_terminus_count_dict(seqs_U)
self.trunc_profile_index = represent_U.trunc_profile_index
class TrimmedMappedSequence(TrimmedSequence):
"""This object is formed from a single Um in the process of mapping unique unprofiled seqs to N.
It is not like the other T objects. Its purpose is to be one of the T objects added to an N,
however, unliked Tp, no 5' nts are trimmed from the U string in creating the T string (and
mapped seqs do not have extra 3' nts). The reason for this is that the 5' extension may
represent all but a small number of nts in the seq, so it is best not to dereplicate mapped seqs
identical in the non-5' section by lumping them together as the same T."""
# Instances of this class are called `Tm` and derive from a single `Um`.
__slots__ = (
'name',
'uniq_with_xtra_5prime_count',
'read_with_xtra_5prime_count',
'long_5prime_extension_dict'
)
def __init__(self, seq_U):
super().__init__(seq_U.string, [seq_U])
self.name = seq_U.name
seq_U.name_T = self.name
xtra_5prime_length = seq_U.xtra_5prime_length
self.uniq_with_xtra_5prime_count = 1 if xtra_5prime_length else 0
self.read_with_xtra_5prime_count = seq_U.read_count if xtra_5prime_length else 0
self.long_5prime_extension_dict = {}
if xtra_5prime_length >= MIN_LENGTH_LONG_5PRIME_EXTENSION:
self.long_5prime_extension_dict[seq_U.string[: xtra_5prime_length]] = seq_U.read_count
class TrimmedIndelSequence(TrimmedSequence):
"""This object is formed in the identification of tRNA sequences with indels."""
# Instances of this class are called `Ti` and derive from `Ui`.
__slots__ = (
'name',
'read_3prime_terminus_count_dict',
'uniq_with_xtra_5prime_count',
'read_with_xtra_5prime_count',
'long_5prime_extension_dict'
)
def __init__(self, string, seqs_U):
represent_U = get_representative_unique_sequence(seqs_U)
super().__init__(string, seqs_U)
self.name = name = represent_U.name
for seq_U in seqs_U:
seq_U.name_T = name
self.read_3prime_terminus_count_dict = get_read_threeprime_terminus_count_dict(seqs_U)
(self.uniq_with_xtra_5prime_count,
self.read_with_xtra_5prime_count,
self.long_5prime_extension_dict) = get_extra_fiveprime_info(seqs_U)
class NormalizedSequence(object):
"""A tRNA sequence that can contain shorter tRNA fragment subsequences.
N are derived from T. Tp are first prefix-dereplicated from the 3' end by the method,
`TRNASeqDataset.dereplicate_threeprime`. The longest Tp in a cluster of dereplicated Tp becomes
the representative seq of N. `TRNASeqDataset.map_fragments` subsequently maps unprofiled reads
to the set of N. Mapped tRNA fragments are added as T, and the `init` method is called to
finalize attributes of each N.
EXAMPLE:
Consider the full-length and fragmentary E. coli tRNA-Ala-GGC-1-1 used in T examples.
GGGGCTATAGCTCAGCTGGGAGAGCGCTTGCATGGCATGCAAGAGGTCAGCGGTTCGATCCCGCTTAGCTCCA
TTGCATGGCATGCAAGAGGTCAGCGGTTCGATCCCGCTTAGCTCCA
The seqs collapse into a single N when dereplicated from the 3' end. N is represented by the
longer seq, which should be the first in the list of T added to N.
"""
# Instances of this class are called `N`.
__slots__ = (
'name',
'names_T',
'string',
'starts_T_in_N',
'stops_T_in_N',
'spec_read_count',
'nonspec_read_count',
'spec_covs',
'nonspec_covs',
'mean_spec_cov',
'mean_nonspec_cov',
'names_M'
)
def __init__(self, seqs_T, starts_T_in_N=None, stops_T_in_N=None, skip_init=False):
represent_T = seqs_T[0]
self.name = name = represent_T.name
for seq_T in seqs_T:
seq_T.names_N.append(name)
self.names_T = [seq_T.name for seq_T in seqs_T]
self.string = represent_T.string
if starts_T_in_N and stops_T_in_N:
self.starts_T_in_N = starts_T_in_N
self.stops_T_in_N = stops_T_in_N
elif not starts_T_in_N and not stops_T_in_N:
# This is what occurs in `anvi-trnaseq` for the instantiation of
# `NormalizedFullProfileSequence` and `NormalizedTruncatedProfileSequence` objects. All
# Tp provided as input are aligned from the 3' end. Tm that can be aligned to other
# places in N are added later.
length_N = len(self.string)
self.starts_T_in_N = [length_N - len(seq_T.string) for seq_T in seqs_T]
self.stops_T_in_N = [length_N] * len(seqs_T)
else:
self.starts_T_in_N = None
self.stops_T_in_N = None
# It is useful to know which M, if any, contain this N. Though Ni can theoretically be
# assigned to multiple M, this is not allowed for simplicity's sake.
self.names_M = []
if skip_init:
self.spec_read_count = None
self.nonspec_read_count = None
self.spec_covs = None
self.nonspec_covs = None
self.mean_spec_cov = None
self.mean_nonspec_cov = None
else:
self.init(seqs_T)
def init(self, seqs_T):
"""Set attributes for a "finalized" set of input T, potentially including those that were
added after object instantiation, such as Tm."""
self.names_T = tuple(self.names_T)
# Specific reads are those that are only contained in this N.
spec_read_count = 0
nonspec_read_count = 0
spec_covs = np.zeros(len(self.string), dtype=int)
nonspec_covs = np.zeros(len(self.string), dtype=int)
for seq_T, start_T_in_N, stop_T_in_N in zip(seqs_T, self.starts_T_in_N, self.stops_T_in_N):
if len(seq_T.names_N) == 1:
spec_read_count += seq_T.read_count
spec_covs[start_T_in_N: stop_T_in_N] += seq_T.read_count
else:
nonspec_read_count += seq_T.read_count
nonspec_covs[start_T_in_N: stop_T_in_N] += seq_T.read_count
self.spec_read_count = spec_read_count
self.nonspec_read_count = nonspec_read_count
self.spec_covs = spec_covs
self.nonspec_covs = nonspec_covs
self.mean_spec_cov = spec_covs.mean()
self.mean_nonspec_cov = nonspec_covs.mean()
class NormalizedFullProfileSequence(NormalizedSequence):
"""This object is instantiated with `TrimmedFullProfileSequence` objects in the workflow.
`TrimmedTruncatedProfileSequence` and `TrimmedMappedSequence` objects are potentially added
after instantiation."""
# Instances of this class are called `Nf`.
__slots__ = (
'categories_T',
'feature_starts',
'feature_stops',
'contains_anticodon',
'has_complete_feature_set',
'has_His_G',
'spec_read_xtra_5prime_count',
'nonspec_read_xtra_5prime_count',
'spec_map_seq_count',
'nonspec_map_seq_count',
'spec_map_read_count',
'nonspec_map_read_count',
'absent_3prime_terminus_seq_count',
'absent_3prime_terminus_read_count',
'spec_long_5prime_extension_dict',
'nonspec_long_5prime_extension_dict',
'spec_read_3prime_terminus_count_dict',
'nonspec_read_3prime_terminus_count_dict'
)
def __init__(self, seqs_T, starts_T_in_N=None, stops_T_in_N=None):
super().__init__(seqs_T, starts_T_in_N=None, stops_T_in_N=None, skip_init=True)
categories_T = []
for seq_T in seqs_T:
if isinstance(seq_T, TrimmedFullProfileSequence):
categories_T.append('Tf')
elif isinstance(seq_T, TrimmedTruncatedProfileSequence):
categories_T.append('Tc_trna')
else:
raise Exception(f"A trimmed seq ({seq_T.name}) of the unexpected class `{type(seq_T)}` was used to instantiate `Nf`.")
self.categories_T = categories_T
represent_T = seqs_T[0]
self.feature_starts = represent_T.feature_starts
self.feature_stops = represent_T.feature_stops
self.contains_anticodon = represent_T.contains_anticodon
self.has_complete_feature_set = represent_T.has_complete_feature_set
self.has_His_G = represent_T.has_His_G
self.spec_read_xtra_5prime_count = None
self.nonspec_read_xtra_5prime_count = None
self.spec_map_read_count = None
self.nonspec_map_read_count = None
self.spec_long_5prime_extension_dict = None
self.nonspec_long_5prime_extension_dict = None
self.spec_read_3prime_terminus_count_dict = None
self.nonspec_read_3prime_terminus_count_dict = None
def init(self, seqs_T):
"""Set attributes for a "finalized" set of input trimmed sequences."""
super().init(seqs_T)
self.categories_T = tuple(self.categories_T)
spec_map_seq_count = 0
nonspec_map_seq_count = 0
spec_map_read_count = 0
nonspec_map_read_count = 0
absent_3prime_terminus_seq_count = 0
absent_3prime_terminus_read_count = 0
spec_read_xtra_5prime_count = 0
nonspec_read_xtra_5prime_count = 0
spec_long_5prime_extension_dict = defaultdict(int)
nonspec_long_5prime_extension_dict = defaultdict(int)
spec_read_3prime_terminus_count_dict = defaultdict(int)
nonspec_read_3prime_terminus_count_dict = defaultdict(int)
length_N = len(self.string)
for seq_T, start_T_in_N, stop_T_in_N in zip(seqs_T, self.starts_T_in_N, self.stops_T_in_N):
if len(seq_T.names_N) == 1:
if isinstance(seq_T, TrimmedMappedSequence):
spec_map_seq_count += 1
spec_map_read_count += seq_T.read_count
if not PROFILE_ABSENT_3PRIME_TERMINUS:
if start_T_in_N == 0 and stop_T_in_N == length_N:
absent_3prime_terminus_seq_count += 1
absent_3prime_terminus_read_count += seq_T.read_count
spec_read_xtra_5prime_count += seq_T.read_with_xtra_5prime_count
for string_5prime_extension, read_count in seq_T.long_5prime_extension_dict.items():
spec_long_5prime_extension_dict[string_5prime_extension] += read_count
else:
for string_3prime_terminus, read_count in seq_T.read_3prime_terminus_count_dict.items():
spec_read_3prime_terminus_count_dict[string_3prime_terminus] += read_count
if not isinstance(seq_T, TrimmedTruncatedProfileSequence):
spec_read_xtra_5prime_count += seq_T.read_with_xtra_5prime_count
for string_5prime_extension, read_count in seq_T.long_5prime_extension_dict.items():
spec_long_5prime_extension_dict[string_5prime_extension] += read_count
else:
if isinstance(seq_T, TrimmedMappedSequence):
nonspec_map_seq_count += 1
nonspec_map_read_count += seq_T.read_count
nonspec_read_xtra_5prime_count += seq_T.read_with_xtra_5prime_count
else:
for string_3prime_terminus, read_count in seq_T.read_3prime_terminus_count_dict.items():
nonspec_read_3prime_terminus_count_dict[string_3prime_terminus] += read_count
if not isinstance(seq_T, TrimmedTruncatedProfileSequence):
nonspec_read_xtra_5prime_count += seq_T.read_with_xtra_5prime_count
for string_5prime_extension, read_count in seq_T.long_5prime_extension_dict.items():
nonspec_long_5prime_extension_dict[string_5prime_extension] += read_count
self.spec_map_seq_count = spec_map_seq_count
self.nonspec_map_seq_count = nonspec_map_seq_count
self.spec_map_read_count = spec_map_read_count
self.nonspec_map_read_count = nonspec_map_read_count
self.absent_3prime_terminus_seq_count = absent_3prime_terminus_seq_count
self.absent_3prime_terminus_read_count = absent_3prime_terminus_read_count
self.spec_read_xtra_5prime_count = spec_read_xtra_5prime_count
self.nonspec_read_xtra_5prime_count = nonspec_read_xtra_5prime_count
self.spec_long_5prime_extension_dict = spec_long_5prime_extension_dict
self.nonspec_long_5prime_extension_dict = nonspec_long_5prime_extension_dict
self.spec_read_3prime_terminus_count_dict = spec_read_3prime_terminus_count_dict
self.nonspec_read_3prime_terminus_count_dict = nonspec_read_3prime_terminus_count_dict
class NormalizedTruncatedProfileSequence(NormalizedSequence):
"""This object is formed exclusively from `TrimmedTruncatedProfileSequence` objects."""
# Instances of this class are called `Nc`.
__slots__ = (
'feature_starts',
'feature_stops',
'contains_anticodon',
'trunc_profile_index',
'spec_read_3prime_terminus_count_dict',
'nonspec_read_3prime_terminus_count_dict'
)
def __init__(self, seqs_T, starts_T_in_N=None, stops_T_in_N=None):
super().__init__(seqs_T, starts_T_in_N=None, stops_T_in_N=None, skip_init=False)
represent_T = seqs_T[0]
self.feature_starts = represent_T.feature_starts
self.feature_stops = represent_T.feature_stops
self.contains_anticodon = represent_T.contains_anticodon
self.trunc_profile_index = represent_T.trunc_profile_index
spec_read_3prime_terminus_count_dict = defaultdict(int)
nonspec_read_3prime_terminus_count_dict = defaultdict(int)
for seq_T in seqs_T:
if len(seq_T.names_N) == 1:
for string_3prime_terminus, read_count in seq_T.read_3prime_terminus_count_dict.items():
spec_read_3prime_terminus_count_dict[string_3prime_terminus] += read_count
else:
for string_3prime_terminus, read_count in seq_T.read_3prime_terminus_count_dict.items():
nonspec_read_3prime_terminus_count_dict[string_3prime_terminus] += read_count
self.spec_read_3prime_terminus_count_dict = spec_read_3prime_terminus_count_dict
self.nonspec_read_3prime_terminus_count_dict = nonspec_read_3prime_terminus_count_dict
class NormalizedIndelSequence(NormalizedSequence):
"""This object is formed exclusively from `TrimmedIndelSequence` objects."""
# Instances of this class are called `Ni`.
__slots__ = (
'insert_starts_Ni',
'insert_starts_M',
'insert_lengths',
'del_starts_Ni',
'del_starts_M',
'del_lengths',
'contains_anticodon',
'spec_insert_covs',
'nonspec_insert_covs',
'spec_del_covs',
'nonspec_del_covs',
'spec_read_xtra_5prime_count',
'nonspec_read_xtra_5prime_count',
'spec_map_read_count',
'nonspec_map_read_count',
'spec_long_5prime_extension_dict',
'nonspec_long_5prime_extension_dict',
'spec_read_3prime_terminus_count_dict',
'nonspec_read_3prime_terminus_count_dict'
)
def __init__(self,
string,
seqs_T,
starts_T_in_N,
name_M,
insert_starts_Ni,
insert_starts_M,
insert_lengths,
del_starts_Ni,
del_starts_M,
del_lengths,
contains_anticodon):
self.name = name = seqs_T[0].name
for seq_Ti in seqs_T:
seq_Ti.names_N.append(name)
self.names_T = [seq_T.name for seq_T in seqs_T]
self.string = string
self.starts_T_in_N = starts_T_in_N
self.names_M = [name_M]
self.insert_starts_Ni = insert_starts_Ni
self.insert_starts_M = insert_starts_M
self.insert_lengths = insert_lengths
self.del_starts_Ni = del_starts_Ni
self.del_starts_M = del_starts_M
self.del_lengths = del_lengths
self.contains_anticodon = contains_anticodon
def init(self, seqs_T, dict_Ui):
"""Set attributes for a "finalized" set of input Ti."""
self.names_T = tuple(self.names_T)
self.starts_T_in_N = tuple(self.starts_T_in_N)
spec_read_count = 0
nonspec_read_count = 0
length_Ni = len(self.string)
spec_covs = np.zeros(length_Ni, dtype=int)
nonspec_covs = np.zeros(length_Ni, dtype=int)
insert_starts_Ni = self.insert_starts_Ni
insert_stops_Ni = [insert_start + insert_length for insert_start, insert_length in zip(insert_starts_Ni, self.insert_lengths)]
spec_insert_covs = np.zeros(len(insert_starts_Ni), dtype=int)
nonspec_insert_covs = np.zeros(len(insert_starts_Ni), dtype=int)
del_starts_Ni = self.del_starts_Ni
spec_del_covs = np.zeros(len(del_starts_Ni), dtype=int)
nonspec_del_covs = np.zeros(len(del_starts_Ni), dtype=int)
spec_read_xtra_5prime_count = 0
nonspec_read_xtra_5prime_count = 0
spec_map_read_count = 0
nonspec_map_read_count = 0
spec_long_5prime_extension_dict = defaultdict(int)
nonspec_long_5prime_extension_dict = defaultdict(int)
spec_read_3prime_terminus_count_dict = defaultdict(int)
nonspec_read_3prime_terminus_count_dict = defaultdict(int)
for seq_Ti, start_Ti_in_Ni in zip(seqs_T, self.starts_T_in_N):
read_count = seq_Ti.read_count
orig_U = dict_Ui[seq_Ti.names_U[0]].orig_U
if len(seq_Ti.names_N) == 1:
spec_read_count += read_count
if seq_Ti.read_with_xtra_5prime_count:
spec_read_xtra_5prime_count += seq_Ti.read_with_xtra_5prime_count
if isinstance(orig_U, UniqueMappedSequence):
spec_map_read_count += 1
covs = spec_covs
insert_covs = spec_insert_covs
del_covs = spec_del_covs
long_5prime_extension_dict = spec_long_5prime_extension_dict
read_3prime_terminus_count_dict = spec_read_3prime_terminus_count_dict
else:
nonspec_read_count += read_count
if seq_Ti.read_with_xtra_5prime_count:
nonspec_read_xtra_5prime_count += seq_Ti.read_with_xtra_5prime_count
if isinstance(orig_U, UniqueMappedSequence):
nonspec_map_read_count += 1
covs = nonspec_covs
insert_covs = nonspec_insert_covs
del_covs = nonspec_del_covs
long_5prime_extension_dict = nonspec_long_5prime_extension_dict
read_3prime_terminus_count_dict = nonspec_read_3prime_terminus_count_dict
stop_Ti_in_Ni = start_Ti_in_Ni + len(seq_Ti.string)
covs[start_Ti_in_Ni: stop_Ti_in_Ni] += read_count
insert_index = 0
for insert_start, insert_stop in zip(insert_starts_Ni, insert_stops_Ni):
if start_Ti_in_Ni <= insert_start and stop_Ti_in_Ni >= insert_stop:
insert_covs[insert_index] += read_count
insert_index += 1
for del_index, del_start in enumerate(del_starts_Ni):
if start_Ti_in_Ni <= del_start and stop_Ti_in_Ni > del_start + 1:
del_covs[del_index] += read_count
for string_5prime, extension_read_count in seq_Ti.long_5prime_extension_dict.items():
long_5prime_extension_dict[string_5prime] += extension_read_count
for string_3prime, terminus_read_count in seq_Ti.read_3prime_terminus_count_dict.items():
read_3prime_terminus_count_dict[string_3prime] += terminus_read_count
self.spec_read_count = spec_read_count
self.nonspec_read_count = nonspec_read_count
self.spec_covs = spec_covs
self.nonspec_covs = nonspec_covs
self.mean_spec_cov = spec_covs.mean()
self.mean_nonspec_cov = nonspec_covs.mean()
self.spec_insert_covs = spec_insert_covs
self.nonspec_insert_covs = nonspec_insert_covs
self.spec_del_covs = spec_del_covs
self.nonspec_del_covs = nonspec_del_covs
self.spec_read_xtra_5prime_count = spec_read_xtra_5prime_count
self.nonspec_read_xtra_5prime_count = nonspec_read_xtra_5prime_count
self.spec_map_read_count = spec_map_read_count
self.nonspec_map_read_count = nonspec_map_read_count
self.spec_long_5prime_extension_dict = spec_long_5prime_extension_dict
self.nonspec_long_5prime_extension_dict = nonspec_long_5prime_extension_dict
self.spec_read_3prime_terminus_count_dict = spec_read_3prime_terminus_count_dict
self.nonspec_read_3prime_terminus_count_dict = nonspec_read_3prime_terminus_count_dict
class ModifiedSequence(object):
"""This object represents a tRNA sequence with sites of predicted potential mod-induced subs
and, optionally, indels.
The `anvi-trnaseq` workflow aggregates similar Nf. The aggregations are decomposed into clusters
of Nf distinguished by potential mod-induced subs (3-4 different nts at ≥1 aligned positions). M
is instantiated with the list of Nf, with the first Nf being longest or tied for longest.
Corresponding lists of Nf sub positions in M are required. The workflow later finds Ni and adds
them to M. Lists of indel positions in M and indel lengths are needed. The `init` method of M is
called to calculate coverages and other information from nts, subs, and indels, if present.
The workflow requires that Ni be assigned to 1 M for simplicity's sake. If the same Ni can arise
from indels in multiple M, then Ni is disregarded.
M EXAMPLE:
Consider E. coli tRNA-Ala-GGC-1-1, with detected mods at positions 17 and 46. As seen in the N
example, the first sequence is the N with unmutated nucleotides. The next set of N include
possible mod-induced subs. The next set of N are Ni with insertions, and the last set are Ni
with deletions.
| |
GGGGCTATAGCTCAGC T GGGAGAGCGCTTGCATGGCATGCAAGAG G TCAGCGGTTCGATCCCGCTTAGCTCCA
GGGGCTATAGCTCAGC A GGGAGAGCGCTTGCATGGCATGCAAGAG G TCAGCGGTTCGATCCCGCTTAGCTCCA
GGGGCTATAGCTCAGC A GGGAGAGCGCTTGCATGGCATGCAAGAG A TCAGCGGTTCGATCCCGCTTAGCTCCA
GGGGCTATAGCTCAGC A GGGAGAGCGCTTGCATGGCATGCAAGAG C TCAGCGGTTCGATCCCGCTTAGCTCCA
GGGGCTATAGCTCAGC C GGGAGAGCGCTTGCATGGCATGCAAGAG G TCAGCGGTTCGATCCCGCTTAGCTCCA
CTCAGC G GGGAGAGCGCTTGCATGGCATGCAAGAG G TCAGCGGTTCGATCCCGCTTAGCTCCA
CATGGCATGCAAGAG T TCAGCGGTTCGATCCCGCTTAGCTCCA
GGGGCTATAGCTCAGC T GGGAGAGCGCTTGCATGGCATGCAAGAGAG TCAGCGGTTCGATCCCGCTTAGCTCCA
GGGGCTATAGCTCAGC T GGGAGAGCGCTTGCATGGCATGCAAGAGGG TCAGCGGTTCGATCCCGCTTAGCTCCA
GGGGCTATAGCTCAGC A GGGAGAGCGCTTGCATGGCATGCAAGAGGAATCAGCGGTTCGATCCCGCTTAGCTCCA
GGGGCTATAGCTCAGC T GGGAGAGCGCTTGCATGGCATGCAAGAG - TCAGCGGTTCGATCCCGCTTAGCTCCA
GGGGCTATAGCTCAGC T GGGAGAGCGCTTGCATGGCATGCAAGA- - TCAGCGGTTCGATCCCGCTTAGCTCCA
GGGGCTATAGCTCAGC T GGGAGAGCGCTTGCATGGCATGCAAGA- G TCAGCGGTTCGATCCCGCTTAGCTCCA
"""
__slots__ = (
'names_Nb',
'sub_positions',
'name',
'length',
'names_Ni',
'starts_Ni_in_M',
'insert_starts',
'insert_strings',
'del_starts',
'del_lengths',
'spec_read_count',
'nonspec_read_count',
'spec_map_read_count',
'nonspec_map_read_count',
'spec_read_xtra_5prime_count',
'nonspec_read_xtra_5prime_count',
'spec_long_5prime_extension_dict',
'nonspec_long_5prime_extension_dict',
'spec_read_3prime_terminus_count_dict',
'nonspec_read_3prime_terminus_count_dict',
'spec_covs',
'nonspec_covs',
'mean_spec_cov',
'mean_nonspec_cov',
'spec_sub_covs',
'nonspec_sub_covs',
'spec_insert_covs',
'nonspec_insert_covs',
'spec_del_covs',
'nonspec_del_covs',
'consensus_string'
)
def __init__(self, seqs_Nb, sub_positions):
self.names_Nb = [seq_Nb.name for seq_Nb in seqs_Nb]
self.sub_positions = sub_positions
self.name = name = self.names_Nb[0]
for seq_Nb in seqs_Nb:
seq_Nb.names_M.append(name)
self.length = len(seqs_Nb[0].string)
self.names_Ni = tuple()
self.starts_Ni_in_M = tuple()
self.insert_starts = None
self.insert_strings = None
self.del_starts = None
self.del_lengths = None
self.spec_read_count = None
self.nonspec_read_count = None
self.spec_map_read_count = None
self.nonspec_map_read_count = None
self.spec_read_xtra_5prime_count = None
self.nonspec_read_xtra_5prime_count = None
self.spec_long_5prime_extension_dict = None
self.nonspec_long_5prime_extension_dict = None
self.spec_read_3prime_terminus_count_dict = None
self.nonspec_read_3prime_terminus_count_dict = None
self.spec_covs = None
self.nonspec_covs = None
self.mean_spec_cov = None
self.mean_nonspec_cov = None
self.spec_sub_covs = None
self.nonspec_sub_covs = None
self.spec_insert_covs = None
self.nonspec_insert_covs = None
self.spec_del_covs = None
self.nonspec_del_covs = None
self.consensus_string = None
def init(self, seqs_Nb, seqs_Ni):
"""Set attributes for a "finalized" set of input Ns and Ni objects."""
self.names_Nb = tuple(self.names_Nb)
spec_read_count = 0
nonspec_read_count = 0
spec_map_read_count = 0
nonspec_map_read_count = 0
spec_read_xtra_5prime_count = 0
nonspec_read_xtra_5prime_count = 0
spec_long_5prime_extension_dict = defaultdict(int)
nonspec_long_5prime_extension_dict = defaultdict(int)
spec_read_3prime_terminus_count_dict = defaultdict(int)
nonspec_read_3prime_terminus_count_dict = defaultdict(int)
length_M = self.length
spec_covs_M = np.zeros(length_M, dtype=int)
nonspec_covs_M = np.zeros(length_M, dtype=int)
# Find the read counts of different types of seqs composing M.
for seq_N in seqs_Nb + seqs_Ni:
spec_read_count += seq_N.spec_read_count
nonspec_read_count += seq_N.nonspec_read_count
spec_map_read_count += seq_N.spec_map_read_count
nonspec_map_read_count += seq_N.nonspec_map_read_count
spec_read_xtra_5prime_count += seq_N.spec_read_xtra_5prime_count
nonspec_read_xtra_5prime_count += seq_N.nonspec_read_xtra_5prime_count
for string_5prime, read_count in seq_N.spec_long_5prime_extension_dict.items():
spec_long_5prime_extension_dict[string_5prime] += read_count
for string_5prime, read_count in seq_N.nonspec_long_5prime_extension_dict.items():
nonspec_long_5prime_extension_dict[string_5prime] += read_count
for string_3prime, read_count in seq_N.spec_read_3prime_terminus_count_dict.items():
spec_read_3prime_terminus_count_dict[string_3prime] += read_count
for string_3prime, read_count in seq_N.nonspec_read_3prime_terminus_count_dict.items():
nonspec_read_3prime_terminus_count_dict[string_3prime] += read_count
# Find the covs of subs in Nb.
sub_positions = self.sub_positions
spec_sub_covs = np.zeros((len(sub_positions), len(UNAMBIG_NTS)), dtype=int)
nonspec_sub_covs = np.zeros((len(sub_positions), len(UNAMBIG_NTS)), dtype=int)
reverse_sub_positions = sub_positions[::-1]
for seq_Nb in seqs_Nb:
# N are aligned with the 3' end of M.
start_Nb_in_M = length_M - len(seq_Nb.string)
spec_covs_M[start_Nb_in_M: ] += seq_Nb.spec_covs
nonspec_covs_M[start_Nb_in_M: ] += seq_Nb.nonspec_covs
# Find the covs of subs in Nb. Loop through subs from the 3' end of Nb and M.
string_Nb = seq_Nb.string
spec_covs_Nb = seq_Nb.spec_covs
nonspec_covs_Nb = seq_Nb.nonspec_covs
for sub_index, sub_pos_M in enumerate(reverse_sub_positions, 1):
if sub_pos_M < start_Nb_in_M:
# Remaining subs are 5' of Nb.
break
sub_pos_Nb = sub_pos_M - start_Nb_in_M
nt_col = NT_INT_DICT[string_Nb[sub_pos_Nb]] - 1
spec_sub_covs[-sub_index, nt_col] += spec_covs_Nb[sub_pos_Nb]
nonspec_sub_covs[-sub_index, nt_col] += nonspec_covs_Nb[sub_pos_Nb]
# Find the covs of M positions, subs, and indels.
insert_covs_dict = {}
del_covs_dict = {}
for seq_Ni, start_Ni_in_M in zip(seqs_Ni, self.starts_Ni_in_M):
# Find insertion covs in Ni.
for insert_Ni_start, insert_M_start, insert_length, spec_insert_cov, nonspec_insert_cov in zip(seq_Ni.insert_starts_Ni,
seq_Ni.insert_starts_M,
seq_Ni.insert_lengths,
seq_Ni.spec_insert_covs,
seq_Ni.nonspec_insert_covs):
try:
insert_covs = insert_covs_dict[(insert_M_start, seq_Ni.string[insert_Ni_start: insert_Ni_start + insert_length])]
insert_covs[0] += spec_insert_cov
insert_covs[1] += nonspec_insert_cov
except KeyError:
insert_covs_dict[(insert_M_start, seq_Ni.string[insert_Ni_start: insert_Ni_start + insert_length])] = np.array((spec_insert_cov, nonspec_insert_cov))
# Find deletion covs in Ni.
for del_M_start, del_length, spec_del_cov, nonspec_del_cov in zip(seq_Ni.del_starts_M,
seq_Ni.del_lengths,
seq_Ni.spec_del_covs,
seq_Ni.nonspec_del_covs):
try:
del_covs = del_covs_dict[(del_M_start, del_length)]
del_covs[0] += spec_del_cov
del_covs[1] += nonspec_del_cov
except KeyError:
del_covs_dict[(del_M_start, del_length)] = np.array((spec_del_cov, nonspec_del_cov))
# Find nt and sub covs in Ni. Loop through each nt of Ni to account for the positions of
# indels, a more complex process than that used for Nb.
string_Ni = seq_Ni.string
spec_covs_Ni = seq_Ni.spec_covs
nonspec_covs_Ni = seq_Ni.nonspec_covs
# Make an iterator of the positions of inserted nts in Ni.
iter_Ni_insert_positions = iter(chain.from_iterable(
[range(insert_start, insert_start + insert_length)
for insert_start, insert_length in zip(seq_Ni.insert_starts_Ni, seq_Ni.insert_lengths)]))
try:
next_Ni_insert_pos = next(iter_Ni_insert_positions)
except StopIteration:
next_Ni_insert_pos = -1
# Make iterators of the positions and lengths of deletions in Ni.
iter_Ni_del_positions = iter([del_Ni_start + 1 for del_Ni_start in seq_Ni.del_starts_Ni])
iter_Ni_del_lengths = iter(seq_Ni.del_lengths)
try:
next_Ni_del_pos = next(iter_Ni_del_positions)
next_Ni_del_length = next(iter_Ni_del_lengths)
except StopIteration:
next_Ni_del_pos = -1
next_Ni_del_length = -1
nt_pos_M = start_Ni_in_M
sub_index = bisect_left(sub_positions, start_Ni_in_M)
iter_M_sub_positions = iter(sub_positions[sub_index: ])
try:
next_M_sub_pos = next(iter_M_sub_positions)
except StopIteration:
next_M_sub_pos = MAXSIZE
for nt_pos_Ni, nt in enumerate(string_Ni):
if nt_pos_Ni == next_Ni_insert_pos:
try:
next_Ni_insert_pos = next(iter_Ni_insert_positions)
except StopIteration:
next_Ni_insert_pos = -1
# Since insertions are not nts in M, do not increment the nt position in M.
continue
if nt_pos_Ni == next_Ni_del_pos:
# Increment the nt position in M by the size of the del.
nt_pos_M += next_Ni_del_length
try:
next_Ni_del_pos = next(iter_Ni_del_positions)
next_Ni_del_length = next(iter_Ni_del_lengths)
except StopIteration:
next_Ni_del_pos = -1
next_Ni_del_length = -1
while nt_pos_M > next_M_sub_pos:
# The del contained a sub. Find the position of the next sub after the del.
try:
next_M_sub_pos = next(iter_M_sub_positions)
sub_index += 1
except StopIteration:
break
nt_pos_M += 1
continue
# To make it to this point, the position is not an indel.
if nt_pos_M == next_M_sub_pos:
spec_cov_Ni = spec_covs_Ni[nt_pos_Ni]
nonspec_cov_Ni = nonspec_covs_Ni[nt_pos_Ni]
spec_covs_M[nt_pos_M] += spec_cov_Ni
nonspec_covs_M[nt_pos_M] += nonspec_cov_Ni
nt_col = NT_INT_DICT[string_Ni[nt_pos_Ni]] - 1
spec_sub_covs[sub_index, nt_col] += spec_cov_Ni
nonspec_sub_covs[sub_index, nt_col] += nonspec_cov_Ni
try:
next_M_sub_pos = next(iter_M_sub_positions)
sub_index += 1
except StopIteration:
next_M_sub_pos = -1
else:
spec_covs_M[nt_pos_M] += seq_Ni.spec_covs[nt_pos_Ni]
nonspec_covs_M[nt_pos_M] += seq_Ni.nonspec_covs[nt_pos_Ni]
nt_pos_M += 1
# Record the positions, lengths, and coverages of insertions in M.
insert_starts = []
insert_strings = []
spec_insert_covs = []
nonspec_insert_covs = []
for insert_config, insert_covs in sorted(insert_covs_dict.items()):
insert_starts.append(insert_config[0])
insert_strings.append(insert_config[1])
spec_insert_covs.append(insert_covs[0])
nonspec_insert_covs.append(insert_covs[1])
# Record the positions, lengths, and coverages of deletions in M.
del_starts = []
del_lengths = []
spec_del_covs = []
nonspec_del_covs = []
for del_config, del_covs in sorted(del_covs_dict.items()):
del_starts.append(del_config[0])
del_lengths.append(del_config[1])
spec_del_covs.append(del_covs[0])
nonspec_del_covs.append(del_covs[1])
# Set a consensus seq using the nts with the highest specific cov at each sub position.
consensus_string = seqs_Nb[0].string
for sub_pos, sub_nt_covs in zip(sub_positions, spec_sub_covs):
nt_int = sub_nt_covs.argmax() + 1
consensus_string = consensus_string[: sub_pos] + INT_NT_DICT[nt_int] + consensus_string[sub_pos + 1: ]
self.insert_starts = tuple(insert_starts)
self.insert_strings = tuple(insert_strings)
self.del_starts = tuple(del_starts)
self.del_lengths = tuple(del_lengths)
self.spec_read_count = spec_read_count
self.nonspec_read_count = nonspec_read_count
self.spec_map_read_count = spec_map_read_count
self.nonspec_map_read_count = nonspec_map_read_count
self.spec_read_xtra_5prime_count = spec_read_xtra_5prime_count
self.nonspec_read_xtra_5prime_count = nonspec_read_xtra_5prime_count
self.spec_long_5prime_extension_dict = spec_long_5prime_extension_dict
self.nonspec_long_5prime_extension_dict = nonspec_long_5prime_extension_dict
self.spec_read_3prime_terminus_count_dict = spec_read_3prime_terminus_count_dict
self.nonspec_read_3prime_terminus_count_dict = nonspec_read_3prime_terminus_count_dict
self.spec_covs = tuple(spec_covs_M)
self.nonspec_covs = tuple(nonspec_covs_M)
self.mean_spec_cov = spec_covs_M.mean()
self.mean_nonspec_cov = nonspec_covs_M.mean()
self.spec_sub_covs = spec_sub_covs
self.nonspec_sub_covs = nonspec_sub_covs
self.spec_insert_covs = tuple(spec_insert_covs)
self.nonspec_insert_covs = tuple(nonspec_insert_covs)
self.spec_del_covs = tuple(spec_del_covs)
self.nonspec_del_covs = tuple(nonspec_del_covs)
self.consensus_string = consensus_string
class TRNASeqDataset(object):
"""Processes reads from a tRNA-seq library. `bin/anvi-trnaseq` is the client."""
TRNA_FEATURE_NAMES = constants.TRNA_FEATURE_NAMES
RELATIVE_ANTICODON_LOOP_INDEX = TRNA_FEATURE_NAMES.index('anticodon_loop') - len(TRNA_FEATURE_NAMES) + 1
# Column headers for supplementary tables written to text files
UNIQ_NONTRNA_HEADER = [
"name",
"read_count",
"truncated_profile_index",
"sequence"
]
TRIMMED_ENDS_HEADER = [
"name",
"unique_name",
"fiveprime_sequence",
"threeprime_sequence",
"read_count"
]
def __init__(self, args=None, run=terminal.Run(), progress=terminal.Progress()):
self.args = args
self.run = run
self.progress = progress
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
# Argument group 1A: MANDATORY
self.input_fasta_path = A('trnaseq_fasta')
self.sample_id = A('sample_name')
self.out_dir = os.path.abspath(A('output_dir')) if A('output_dir') else None
self.checkpoint_dir = os.path.join(self.out_dir, "CHECKPOINT")
get_checkpoint_subdir = partial(os.path.join, self.checkpoint_dir)
self.checkpoint_subdir_dict = {checkpoint: get_checkpoint_subdir(checkpoint.upper()) for checkpoint in constants.TRNASEQ_CHECKPOINTS}
# Argument group 1B: EXTRAS
self.treatment = A('treatment')
self.overwrite_out_dest = A('overwrite_output_destinations')
self.descrip_path = os.path.abspath(A('description')) if A('description') else None
# Argument group 1C: ADVANCED
self.write_checkpoints = A('write_checkpoints')
self.load_checkpoint = A('load_checkpoint')
self.feature_param_path = os.path.abspath(A('feature_param_file')) if A('feature_param_file') else None
self.param_3prime_termini = A('threeprime_termini')
global MIN_LENGTH_LONG_5PRIME_EXTENSION
MIN_LENGTH_LONG_5PRIME_EXTENSION = A('min_length_long_fiveprime')
self.min_trna_frag_size = A('min_trna_fragment_size')
agglom_max_mismatch_freq = A('agglomeration_max_mismatch_freq')
self.agglom_max_mismatch_freq = round(agglom_max_mismatch_freq * 100) / 100
self.skip_indel_profiling = A('skip_INDEL_profiling')
max_indel_freq = A('max_indel_freq')
self.max_indel_freq = round(max_indel_freq * 100) / 100
self.left_indel_buffer = A('left_indel_buffer')
self.right_indel_buffer = A('right_indel_buffer')
# Argument group 1D: PERFORMANCE
self.num_threads = A('num_threads')
self.skip_fasta_check = A('skip_fasta_check')
self.profiling_chunk_size = A('profiling_chunk_size')
self.alignment_target_chunk_size = A('alignment_target_chunk_size')
if not self.input_fasta_path:
raise ConfigError("Please specify the path to a FASTA file of tRNA-seq reads using `--fasta-file` or `-f`.")
if not self.sample_id:
raise ConfigError("Please provide a sample name using `--sample-name` or `-S`.")
if not self.out_dir:
raise ConfigError("Please provide an output directory using `--output-dir` or `-o`.")
self.descrip = None
get_out_file_path = partial(os.path.join, self.out_dir)
self.trnaseq_db_path = get_out_file_path(self.sample_id + "-TRNASEQ.db")
self.analysis_summary_path = get_out_file_path(self.sample_id + "-ANALYSIS_SUMMARY.txt")
# Supplementary text file paths produced by DEBUG flag
self.path_Un_supplement = get_out_file_path(self.sample_id + "-UNIQUED_NONTRNA.txt")
self.path_Tf_ends = get_out_file_path(self.sample_id + "-TRIMMED_ENDS.txt")
self.consol_seqs_with_inconsis_profiles_path = get_out_file_path(self.sample_id + "-CONSOLIDATED_SEQS_WITH_INCONSISTENT_PROFILES.txt")
self.count_consol_Tf = None
# The identification of sequences as tRNA occurs through different means. By the time of the
# first "profile" checkpoint, only Uf are recognized as tRNA. Further processing before the
# second "normalize" checkpoint can show some Ut to be tRNA; mapping can reveal some Un to
# be tRNA by the third "mapping" checkpoint. The changing classification of sequences over
# the workflow means that the contents of the dictionaries and intermediate files storing U,
# T, and N also change. It is important to note that object names do not correspond to these
# classifications. For example, all Ut will be in `uniq_trunc_dict` before the "profile"
# checkpoint, but some Ut can move to `uniq_trna_dict` after being confirmed as tRNA by the
# "normalize" checkpoint. The underlying sequences encapsulated in Ut objects stay in these
# objects despite being recognized as tRNA.
# Not every dict of seq objects changes between checkpoints, yet all existing dicts are
# written at every checkpoint. The alternative would be to reduce the number of intermediate
# files stored at later checkpoints by relying on any unchanged intermediate files from
# earlier checkpoints.
self.intermed_file_path_dict = {}
get_intermed_file_path = partial(os.path.join, self.checkpoint_subdir_dict['profile'])
self.intermed_file_path_dict['profile'] = {
'dict_Uf': get_intermed_file_path("Uf.pkl"),
'dict_Uc_nontrna': get_intermed_file_path("Uc_nontRNA.pkl"),
'dict_Un': get_intermed_file_path("Un.pkl")
}
get_intermed_file_path = partial(os.path.join, self.checkpoint_subdir_dict['normalize'])
self.intermed_file_path_dict['normalize'] = {
'dict_Uf': get_intermed_file_path("Uf.pkl"),
'dict_Us': get_intermed_file_path("Us.pkl"),
'dict_Uc_trna': get_intermed_file_path("Uc_tRNA.pkl"),
'dict_Uc_nontrna': get_intermed_file_path("Uc_nontRNA.pkl"),
'dict_Un': get_intermed_file_path("Un.pkl"),
'dict_Tf': get_intermed_file_path("Tf.pkl"),
'dict_Tc_trna': get_intermed_file_path("Tc_tRNA.pkl"),
'dict_Tc_nontrna': get_intermed_file_path("Tc_nontrna.pkl"),
'dict_Nf': get_intermed_file_path("Nf.pkl"),
'dict_Nc': get_intermed_file_path("Nc.pkl")
}
get_intermed_file_path = partial(os.path.join, self.checkpoint_subdir_dict['map_fragments'])
self.intermed_file_path_dict['map_fragments'] = {
'dict_Uf': get_intermed_file_path("Uf.pkl"),
'dict_Us': get_intermed_file_path("Us.pkl"),
'dict_Uc_trna': get_intermed_file_path("Uc_tRNA.pkl"),
'dict_Um': get_intermed_file_path("Um.pkl"),
'dict_Uc_nontrna': get_intermed_file_path("Uc_nontRNA.pkl"),
'dict_Un': get_intermed_file_path("Un.pkl"),
'dict_Tf': get_intermed_file_path("Tf.pkl"),
'dict_Tc_trna': get_intermed_file_path("Tc_tRNA.pkl"),
'dict_Tc_nontrna': get_intermed_file_path("Tc_nontrna.pkl"),
'dict_Tm': get_intermed_file_path("Tm.pkl"),
'dict_Nf': get_intermed_file_path("Nf.pkl"),
'dict_Nc': get_intermed_file_path("Nc.pkl")
}
get_intermed_file_path = partial(os.path.join, self.checkpoint_subdir_dict['substitutions'])
self.intermed_file_path_dict['substitutions'] = {
'dict_Uf': get_intermed_file_path("Uf.pkl"),
'dict_Us': get_intermed_file_path("Us.pkl"),
'dict_Uc_trna': get_intermed_file_path("Uc_tRNA.pkl"),
'dict_Um': get_intermed_file_path("Um.pkl"),
'dict_Uc_nontrna': get_intermed_file_path("Uc_nontRNA.pkl"),
'dict_Un': get_intermed_file_path("Un.pkl"),
'dict_Tf': get_intermed_file_path("Tf.pkl"),
'dict_Tc_trna': get_intermed_file_path("Tc_tRNA.pkl"),
'dict_Tc_nontrna': get_intermed_file_path("Tc_nontrna.pkl"),
'dict_Tm': get_intermed_file_path("Tm.pkl"),
'dict_Nf': get_intermed_file_path("Nf.pkl"),
'dict_Nc': get_intermed_file_path("Nc.pkl"),
'dict_M': get_intermed_file_path("M.pkl")
}
get_intermed_file_path = partial(os.path.join, self.checkpoint_subdir_dict['indels'])
self.intermed_file_path_dict['indels'] = {
'dict_Uf': get_intermed_file_path("Uf.pkl"),
'dict_Us': get_intermed_file_path("Us.pkl"),
'dict_Uc_trna': get_intermed_file_path("Uc_tRNA.pkl"),
'dict_Um': get_intermed_file_path("Um.pkl"),
'dict_Ui': get_intermed_file_path("Ui.pkl"),
'dict_Uc_nontrna': get_intermed_file_path("Uc_nontRNA.pkl"),
'dict_Un': get_intermed_file_path("Un.pkl"),
'dict_Tf': get_intermed_file_path("Tf.pkl"),
'dict_Tc_trna': get_intermed_file_path("Tc_tRNA.pkl"),
'dict_Tc_nontrna': get_intermed_file_path("Tc_nontrna.pkl"),
'dict_Tm': get_intermed_file_path("Tm.pkl"),
'dict_Ti': get_intermed_file_path("Ti.pkl"),
'dict_Nf': get_intermed_file_path("Nf.pkl"),
'dict_Nc': get_intermed_file_path("Nc.pkl"),
'dict_Ni': get_intermed_file_path("Ni.pkl"),
'dict_M': get_intermed_file_path("M.pkl")
}
self.dict_Uf = {}
self.dict_Uc_trna = {}
self.dict_Uc_nontrna = {}
self.dict_Un = {}
self.dict_Us = {}
self.dict_Um = {}
self.dict_Ui = {}
self.dict_Tf = {}
self.dict_Tc_trna = {}
self.dict_Tc_nontrna = {}
self.dict_Tm = {}
self.dict_Ti = {}
self.dict_Nf = {}
self.dict_Nc = {}
self.dict_Ni = {}
self.dict_M = {}
def process(self):
"""The entry method of TRNASeqDataset, called by `anvi-trnaseq`."""
total_time_start = time.time()
self.sanity_check()
if '_' in self.param_3prime_termini:
global PROFILE_ABSENT_3PRIME_TERMINUS
PROFILE_ABSENT_3PRIME_TERMINUS = True
load_checkpoint = self.load_checkpoint
if not load_checkpoint:
# Do the steps before the "profile" checkpoint.
self.create_trnaseq_database()
if self.feature_param_path:
# The user provided an optional tRNA feature parameterization file.
trnaidentifier.TRNAFeatureParameterizer().set_params_from_file(self.feature_param_path)
# Add the user parameterizations as meta-values in the "self" table of the tRNA-seq
# database.
self.report_profiling_parameters()
self.report_fragment_mapping_parameters()
self.report_substitution_analysis_parameters()
self.report_indel_analysis_parameters()
# Profile each (unique) read for tRNA features.
self.profile_trna()
self.report_profile_stats()
if self.write_checkpoints:
self.write_checkpoint_files('profile')
elif load_checkpoint == 'profile':
self.load_checkpoint_files('profile')
self.report_fragment_mapping_parameters()
self.report_substitution_analysis_parameters()
self.report_indel_analysis_parameters()
if (load_checkpoint == 'profile'
or not load_checkpoint):
# Do the steps between the "profile" and "normalize" checkpoints.
# Trim 5' and 3' ends of Uf, forming Tf.
self.trim_trna_ends()
# Trim 3' ends of Uc, forming Tc.
self.trim_truncated_profile_ends()
self.report_trim_stats()
# Consolidate 3' fragments of longer Tf, forming Nf.
self.threeprime_dereplicate_profiled_trna()
# Recover Tc as tRNA by comparing to Nf.
self.threeprime_dereplicate_truncated_sequences()
self.report_3prime_derep_stats()
if self.write_checkpoints:
self.write_checkpoint_files('normalize')
elif load_checkpoint == 'normalize':
self.load_checkpoint_files('normalize')
self.report_fragment_mapping_parameters()
self.report_substitution_analysis_parameters()
self.report_indel_analysis_parameters()
if (load_checkpoint == 'normalize'
or load_checkpoint == 'profile'
or not load_checkpoint):
# Do the steps between the "normalize" and "map_fragments" checkpoints.
if not PROFILE_ABSENT_3PRIME_TERMINUS:
# Recover 3' tRNA sequences lacking a 3' terminus.
self.threeprime_dereplicate_sequences_without_terminus()
# Map fragments derived from the interior and 5' end of tRNA.
self.map_fragments()
# Finalize Nf now that all T found through various means have been added to them.
self.progress.new("Finalizing normalized tRNA sequences")
self.progress.update("...")
for seq_Nf in self.dict_Nf.values():
seq_Nf.init([getattr(self, 'dict_' + category_T)[name_T]
for category_T, name_T in zip(seq_Nf.categories_T, seq_Nf.names_T)])
self.progress.end()
self.report_map_stats()
self.report_N_cov_stats()
if self.write_checkpoints:
self.write_checkpoint_files('map_fragments')
elif load_checkpoint == 'map_fragments':
self.load_checkpoint_files('map_fragments')
self.report_substitution_analysis_parameters()
self.report_indel_analysis_parameters()
if (load_checkpoint == 'map_fragments'
or load_checkpoint == 'normalize'
or load_checkpoint == 'profile'
or not load_checkpoint):
# Do the steps between the "map_fragments" and "substitutions" checkpoints.
# Find modified nucleotides, grouping normalized sequences into modified sequences.
self.find_substitutions()
self.report_sub_stats()
if self.write_checkpoints:
self.write_checkpoint_files('substitutions')
elif load_checkpoint == 'substitutions':
self.load_checkpoint_files('substitutions')
self.report_indel_analysis_parameters()
if (load_checkpoint == 'substitutions'
or load_checkpoint == 'map_fragments'
or load_checkpoint == 'normalize'
or load_checkpoint == 'profile'
or not load_checkpoint):
# Do the steps between the "substitutions" and "indels" checkpoint.
if self.skip_indel_profiling:
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
set_meta_value = trnaseq_db.db.set_meta_value
set_meta_value('count_Nqf_with_indels', 0)
set_meta_value('count_Nc_with_indels', 0)
trnaseq_db.disconnect()
else:
self.find_indels()
# "Finalize" M now that all Nf and Ni have been added to them.
self.progress.new("Finalizing modified tRNA sequences")
self.progress.update("...")
dict_Nf = self.dict_Nf
dict_Ni = self.dict_Ni
for seq_M in self.dict_M.values():
seq_M.init([dict_Nf[name_Nb] for name_Nb in seq_M.names_Nb],
[dict_Ni[name_Ni] for name_Ni in seq_M.names_Ni])
self.progress.end()
self.report_M_stats()
if self.write_checkpoints:
self.write_checkpoint_files('indels')
elif load_checkpoint == 'indels':
self.load_checkpoint_files('indels')
self.report_stats()
self.write_feature_table()
self.write_unconserved_table()
self.write_unpaired_table()
self.write_sequences_table()
self.write_trimmed_table()
self.write_normalized_table()
self.write_modified_table()
# Write supplementary text files.
self.write_nontrna_supplement()
self.write_Tf_ends_supplement()
with open(self.analysis_summary_path, 'a') as f:
f.write(self.get_summary_line("Total time elapsed (min)",
time.time() - total_time_start,
is_time_value=True))
# Write an empty line to separate this run from any subsequent run starting from a
# checkpoint writing to the same summary file.
f.write("\n")
def sanity_check(self):
"""Check `anvi-trnaseq` user inputs."""
if os.path.exists(self.out_dir):
self.existing_output_directory_sanity_check()
if self.load_checkpoint:
self.load_checkpoint_sanity_check()
elif not os.path.exists(self.out_dir):
os.mkdir(self.out_dir)
filesnpaths.is_output_dir_writable(self.out_dir)
if self.descrip_path:
filesnpaths.is_file_plain_text(self.descrip_path)
self.descrip = open(self.descrip_path).read()
if self.num_threads < 1:
raise ConfigError("Surely you must be joking, Mr. Feynman! "
"`--num-threads` wants a value greater than 0. "
f"Last we checked, {self.num_threads} is not greater than 0.")
self.param_3prime_termini = self.threeprime_termini_sanity_check()
trnaidentifier.TRNAFeatureParameterizer.set_threeprime_termini(self.param_3prime_termini)
# The following variable is only used as part of a heuristic in `find_indels`.
self.max_length_3prime_terminus = max([len(t) for t in self.param_3prime_termini])
self.run.info("Input FASTA file", self.input_fasta_path, nl_after=1)
if not self.skip_fasta_check and not self.load_checkpoint:
self.progress.new("Checking input FASTA defline format")
self.progress.update("...")
utils.check_fasta_id_formatting(self.input_fasta_path)
self.progress.end()
self.run.info_single("FASTA deflines were found to be anvi'o-compliant", mc='green', nl_after=1)
def existing_output_directory_sanity_check(self):
"""Conditions must be fulfilled for the `anvi-trnaseq` output directory to already exist."""
if len(os.listdir(self.out_dir)) == 0:
# There is nothing in the output directory.
pass
elif self.overwrite_out_dest:
if self.load_checkpoint:
raise ConfigError("You cannot use `--load-checkpoint` in conjunction with `--overwrite-output-destinations`. "
"Starting at a checkpoint requires loading intermediate files written to the output "
"directory in a previous `anvi-trnaseq` run, but this directory would be removed with "
"`--overwrite-output-destinations`.")
shutil.rmtree(self.out_dir)
else:
if not self.load_checkpoint:
raise ConfigError(f"The directory that was specified by --output-dir or -o, {self.out_dir}, already exists. "
"Use the flag --overwrite-output-destinations to overwrite this directory.")
def load_checkpoint_sanity_check(self):
"""Needed intermediate files must exist to load from a checkpoint."""
checkpoint_dir = self.checkpoint_dir
load_checkpoint = self.load_checkpoint
intermed_file_path_dict = self.intermed_file_path_dict
checkpoint_subdir = os.path.join(checkpoint_dir, load_checkpoint.upper())
if not os.path.exists(checkpoint_subdir):
raise ConfigError("Intermediate files needed for running `anvi-trnaseq` with `--load-checkpoint` "
f"should be located in {checkpoint_subdir}, but this directory path does not exist. "
"You should probably run `anvi-trnaseq` from the beginning without `--load-checkpoint`. "
"To generate necessary intermediate files for future use of `--load-checkpoint`, use the flag `--write-checkpoints`.")
missing_intermed_files = []
for intermed_file_path in intermed_file_path_dict[load_checkpoint].values():
if not os.path.exists(intermed_file_path):
missing_intermed_files.append(intermed_file_path)
if missing_intermed_files:
raise ConfigError(f"Intermediate files needed for running `anvi-trnaseq` "
f"with `--load-checkpoint {load_checkpoint}` are missing: {', '.join(missing_intermed_files)}. "
"You should probably run `anvi-trnaseq` from the beginning without `--load-checkpoint`. "
"To generate necessary intermediate files for future use of `--load-checkpoint`, use the flag `--write-checkpoints`.")
def threeprime_termini_sanity_check(self):
"""Check validity of provided tRNA 3' termini, returning a list of terminus strings."""
valid_3prime_termini = []
invalid_3prime_termini = []
for terminus_3prime in self.param_3prime_termini.split(','):
if terminus_3prime == '_':
valid_3prime_termini.append('')
continue
for nt in terminus_3prime:
if nt not in ALL_NTS:
invalid_3prime_termini.append(terminus_3prime)
break
valid_3prime_termini.append(terminus_3prime)
if invalid_3prime_termini:
raise ConfigError(f"3' termini can consist of A, C, G, T, and N (any nucleotide) "
"or the discriminator nucleotide with no extension, symbolized by a single underscore, \"_\". "
f"The following invalid 3' sequence parameterizations were provided: {', '.join(invalid_3prime_termini)}")
return valid_3prime_termini
def create_trnaseq_database(self):
"""Create an empty tRNA-seq database."""
meta_values = {'sample_id': self.sample_id,
'treatment': self.treatment,
'description': self.descrip if self.descrip else '_No description is provided_',
'INDELs_profiled': not self.skip_indel_profiling}
dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True).create(meta_values)
self.run.info("New tRNA-seq db", self.trnaseq_db_path, nl_after=1)
def report_profiling_parameters(self):
"""Add profiling parameters to the database."""
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
set_meta_value = trnaseq_db.db.set_meta_value
parameterizer = trnaidentifier.TRNAFeatureParameterizer()
for param_tuple in parameterizer.list_accessible_param_tuples():
set_meta_value(param_tuple[0], param_tuple[1])
set_meta_value('min_length_long_5prime_extension', MIN_LENGTH_LONG_5PRIME_EXTENSION)
trnaseq_db.disconnect()
get_summary_line = self.get_summary_line
with open(self.analysis_summary_path, 'a') as f:
for param_name, param_value in parameterizer.list_accessible_param_tuples(pretty=True):
if 'Conserved nucleotides' in param_name:
f.write(get_summary_line(param_name.replace('Conserved nucleotides', "Conserved nts"), param_value))
continue
elif 'Number allowed unconserved' in param_name:
f.write(get_summary_line(param_name.replace('Number allowed unconserved', "Allowed number of unconserved nts"), param_value))
continue
elif 'Number allowed unpaired' in param_name:
f.write(get_summary_line(param_name.replace('Number allowed unpaired', "Allowed number of unpaired bps"), param_value))
continue
f.write(get_summary_line(param_name, param_value))
f.write(get_summary_line("Allowed 3' termini", ",".join(self.param_3prime_termini)))
f.write(get_summary_line("Min length of \"long\" 5' extension", MIN_LENGTH_LONG_5PRIME_EXTENSION))
def report_fragment_mapping_parameters(self):
"""Add fragment mapping parameters to the database."""
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
trnaseq_db.db.set_meta_value('min_map_trna_fragment_size', self.min_trna_frag_size)
trnaseq_db.disconnect()
with open(self.analysis_summary_path, 'a') as f:
f.write(self.get_summary_line("Min length of mapped tRNA fragment", self.min_trna_frag_size))
def report_substitution_analysis_parameters(self):
"""Add modification-induced substitution analysis parameters to the database."""
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
trnaseq_db.db.set_meta_value('agglomeration_max_mismatch_freq', self.agglom_max_mismatch_freq)
trnaseq_db.disconnect()
get_summary_line = self.get_summary_line
with open(self.analysis_summary_path, 'a') as f:
f.write(get_summary_line("Agglomeration max mismatch frequency", self.agglom_max_mismatch_freq))
def report_indel_analysis_parameters(self):
"""Add modification-induced indel analysis parameters to the database."""
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
set_meta_value = trnaseq_db.db.set_meta_value
set_meta_value('max_indel_freq', self.max_indel_freq)
set_meta_value('left_indel_buffer', self.left_indel_buffer)
set_meta_value('right_indel_buffer', self.right_indel_buffer)
trnaseq_db.disconnect()
get_summary_line = self.get_summary_line
with open(self.analysis_summary_path, 'a') as f:
f.write(get_summary_line("INDELs profiled", not self.skip_indel_profiling))
f.write(get_summary_line("Max indel frequency", self.max_indel_freq))
f.write(get_summary_line("Left indel buffer", self.left_indel_buffer))
f.write(get_summary_line("Right indel buffer", self.right_indel_buffer))
def get_summary_line(self, label, value, is_time_value=False, padding=68):
"""Return a string formatted to be written to the summary statistics file."""
# Report elapsed time in seconds in minutes.
if is_time_value:
value = "%.2f" % round(value / 60, 2)
return '%s%s\t%s\n' % (label, ' ' + '.' * (padding - len(label)), value)
def profile_trna(self):
"""Profile tRNA features in reads, finding Uf, Uc, and Un."""
uniq_read_infos = self.unique_reads()
start_time = time.time()
progress = self.progress
pid = "Profiling tRNA features in unique reads"
progress.new(pid)
progress.update("...")
# Count the number of reads and unique reads that have been added to the multiprocessing
# input queue.
total_read_count = 0
total_uniq_count = len(uniq_read_infos)
manager = mp.Manager()
input_queue = manager.Queue()
output_queue = manager.Queue()
profiler = trnaidentifier.Profiler()
processes = [mp.Process(target=profile_worker, args=(input_queue, output_queue, profiler))
for _ in range(self.num_threads)]
for p in processes:
p.start()
# Count the number of unique sequences that have been profiled and fetched from the
# multiprocessing output queue.
fetched_profile_count = 0
input_count = 0
interval_start = 0
profiling_chunk_size = self.profiling_chunk_size
interval_stop = profiling_chunk_size if profiling_chunk_size < total_uniq_count else total_uniq_count
dict_Uf = self.dict_Uf
dict_Uc_nontrna = self.dict_Uc_nontrna
dict_Un = self.dict_Un
pp_total_uniq_count = pp(len(uniq_read_infos))
while fetched_profile_count < total_uniq_count:
progress.update_pid(pid)
progress.update(f"{pp(input_count + 1)}-{pp(interval_stop)}/{pp_total_uniq_count}")
while input_count < interval_stop:
for uniq_read_info in uniq_read_infos[interval_start: interval_stop]:
input_queue.put(uniq_read_info)
total_read_count += uniq_read_info[2]
input_count += 1
while fetched_profile_count < interval_stop:
profile, read_count = output_queue.get()
fetched_profile_count += 1
name = profile.name
if profile.is_predicted_trna:
dict_Uf[name] = UniqueFullProfileSequence(profile.input_seq, name, read_count, profile)
else:
if profile.trunc_profile_index:
dict_Uc_nontrna[name] = UniqueTruncatedProfileSequence(profile.input_seq, name, read_count, profile)
else:
dict_Un[name] = UniqueSequence(profile.input_seq, name, read_count)
interval_start = interval_stop
interval_stop += profiling_chunk_size if interval_stop + profiling_chunk_size < total_uniq_count else total_uniq_count - interval_stop
for p in processes:
p.terminate()
p.join()
# Profiled seqs were added to the output queue as they were processed, so sort by name.
self.dict_Uf = {name: seq for name, seq in sorted(dict_Uf.items())}
self.dict_Uc_nontrna = {name: seq for name, seq in sorted(dict_Uc_nontrna.items())}
self.dict_Un = {name: seq for name, seq in sorted(dict_Un.items())}
get_summary_line = self.get_summary_line
with open(self.analysis_summary_path, 'a') as f:
f.write(get_summary_line("Time elapsed profiling tRNA (min)", time.time() - start_time, is_time_value=True))
f.write(get_summary_line("Reads processed", total_read_count))
f.write(get_summary_line("Unique seqs processed", total_uniq_count))
progress.end()
self.run.info("Reads processed", total_read_count, mc='green')
self.run.info("Unique seqs processed", total_uniq_count, mc='green')
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
set_meta_value = trnaseq_db.db.set_meta_value
set_meta_value('input_reads', total_read_count)
set_meta_value('input_U', total_uniq_count)
trnaseq_db.disconnect()
def unique_reads(self):
"""Dereplicate input reads."""
self.progress.new("Loading reads")
self.progress.update("...")
fasta = fastalib.SequenceSource(self.input_fasta_path)
names = []
seqs = []
read_count = 0
while next(fasta):
names.append(fasta.id)
seqs.append(fasta.seq)
read_count += 1
fasta.close()
self.read_count = read_count
self.progress.end()
self.progress.new("Dereplicating reads")
self.progress.update("...")
uniq_read_infos = []
for cluster in Dereplicator(names, seqs).full_length_dereplicate():
uniq_read_infos.append((cluster.member_seqs[0], cluster.member_names[0], len(cluster.member_names)))
self.progress.end()
return uniq_read_infos
def report_profile_stats(self):
"""Report to terminal stats on Uf, Uc, and Un immediately after profiling."""
seq_count_Uf = len(self.dict_Uf)
read_count_Uf = 0
seq_anticodon_count_Uf = 0
read_anticodon_count_Uf = 0
seq_complete_count_Uf = 0
read_complete_count_Uf = 0
max_reads_Uf = 0
mean_read_3prime_length_Uf = 0
seq_short_5prime_count_Uf = 0
read_short_5prime_count_Uf = 0
seq_long_5prime_count_Uf = 0
read_long_5prime_count_Uf = 0
mean_read_5prime_length_Uf = 0
mean_seq_profiled_freq_Uf = 0
mean_read_profiled_freq_Uf = 0
mean_seq_unconserved_freq_Uf = 0
mean_read_unconserved_freq_Uf = 0
mean_seq_unpaired_freq_Uf = 0
mean_read_unpaired_freq_Uf = 0
mean_seq_extrap_freq_Uf = 0
mean_read_extrap_freq_Uf = 0
for seq_Uf in self.dict_Uf.values():
read_count = seq_Uf.read_count
read_count_Uf += read_count
if seq_Uf.anticodon_string:
seq_anticodon_count_Uf += 1
read_anticodon_count_Uf += read_count
if seq_Uf.has_complete_feature_set:
seq_complete_count_Uf += 1
read_complete_count_Uf += read_count
if read_count > max_reads_Uf:
max_reads_Uf = read_count
mean_read_3prime_length_Uf += read_count * seq_Uf.length_3prime_terminus
if seq_Uf.xtra_5prime_length:
if seq_Uf.xtra_5prime_length < MIN_LENGTH_LONG_5PRIME_EXTENSION:
seq_short_5prime_count_Uf += 1
read_short_5prime_count_Uf += read_count
else:
seq_long_5prime_count_Uf += 1
read_long_5prime_count_Uf += read_count
mean_read_5prime_length_Uf += read_count * seq_Uf.xtra_5prime_length
profiled_freq = seq_Uf.profiled_seq_length / len(seq_Uf.string)
mean_seq_profiled_freq_Uf += profiled_freq
mean_read_profiled_freq_Uf += read_count * profiled_freq
unconserved_freq = seq_Uf.num_unconserved / seq_Uf.profiled_seq_length
mean_seq_unconserved_freq_Uf += unconserved_freq
mean_read_unconserved_freq_Uf += read_count * unconserved_freq
unpaired_freq = seq_Uf.num_unpaired / (seq_Uf.num_paired + seq_Uf.num_unpaired)
mean_seq_unpaired_freq_Uf += unpaired_freq
mean_read_unpaired_freq_Uf += read_count * unpaired_freq
mean_seq_extrap_freq_Uf += seq_Uf.num_extrap_5prime_nts
mean_read_extrap_freq_Uf += read_count * seq_Uf.num_extrap_5prime_nts
mean_reads_Uf = read_count_Uf / seq_count_Uf
mean_read_3prime_length_Uf /= read_count_Uf
mean_read_5prime_length_Uf /= read_long_5prime_count_Uf
mean_seq_profiled_freq_Uf /= seq_count_Uf
mean_read_profiled_freq_Uf /= read_count_Uf
mean_seq_unconserved_freq_Uf /= seq_count_Uf
mean_read_unconserved_freq_Uf /= read_count_Uf
mean_seq_unpaired_freq_Uf /= seq_count_Uf
mean_read_unpaired_freq_Uf /= read_count_Uf
mean_seq_extrap_freq_Uf /= seq_count_Uf
mean_read_extrap_freq_Uf /= read_count_Uf
seq_count_Uc = len(self.dict_Uc_nontrna)
read_count_Uc = 0
seq_anticodon_count_Uc = 0
read_anticodon_count_Uc = 0
max_reads_Uc = 0
mean_read_3prime_length_Uc = 0
mean_seq_profiled_freq_Uc = 0
mean_read_profiled_freq_Uc = 0
mean_seq_unconserved_freq_Uc = 0
mean_read_unconserved_freq_Uc = 0
mean_seq_unpaired_freq_Uc = 0
mean_read_unpaired_freq_Uc = 0
for seq_Uc in self.dict_Uc_nontrna.values():
read_count = seq_Uc.read_count
read_count_Uc += read_count
if seq_Uc.anticodon_string:
seq_anticodon_count_Uc += 1
read_anticodon_count_Uc += read_count
if read_count > max_reads_Uc:
max_reads_Uc = read_count
mean_read_3prime_length_Uc += read_count * seq_Uc.length_3prime_terminus
profiled_freq = seq_Uc.profiled_seq_length / len(seq_Uc.string)
mean_seq_profiled_freq_Uc += profiled_freq
mean_read_profiled_freq_Uc += read_count * profiled_freq
unconserved_freq = seq_Uc.num_unconserved / seq_Uc.profiled_seq_length
mean_seq_unconserved_freq_Uc += unconserved_freq
mean_read_unconserved_freq_Uc += read_count * unconserved_freq
unpaired_freq = seq_Uc.num_unpaired / (seq_Uc.num_paired + seq_Uc.num_unpaired)
mean_seq_unpaired_freq_Uc += unpaired_freq
mean_read_unpaired_freq_Uc += read_count * unpaired_freq
mean_reads_Uc = read_count_Uc / seq_count_Uc
mean_read_3prime_length_Uc /= read_count_Uc
mean_seq_profiled_freq_Uc /= seq_count_Uc
mean_read_profiled_freq_Uc /= read_count_Uc
mean_seq_unconserved_freq_Uc /= seq_count_Uc
mean_read_unconserved_freq_Uc /= read_count_Uc
mean_seq_unpaired_freq_Uc /= seq_count_Uc
mean_read_unpaired_freq_Uc /= read_count_Uc
seq_count_Un = len(self.dict_Un)
read_count_Un = 0
max_reads_Un = 0
for seq_Un in self.dict_Un.values():
read_count = seq_Un.read_count
read_count_Un += read_count
if read_count > max_reads_Un:
max_reads_Un = read_count
mean_reads_Un = read_count_Un / seq_count_Un
run = self.run
run.info_single("Results of profiling (subject to change -- see summary output file for final results)", nl_before=2, nl_after=1)
run.info_single("Unique seq counts")
run.info("tRNA profile", seq_count_Uf)
run.info("Truncated tRNA profile", seq_count_Uc)
run.info("No tRNA profile", seq_count_Un, nl_after=1)
run.info_single("Read counts")
run.info("tRNA profile", read_count_Uf)
run.info("Truncated tRNA profile", read_count_Uc)
run.info("No tRNA profile", read_count_Un, nl_after=1)
run.info_single("Unique seqs with tRNA profile")
run.info("Count with anticodon", seq_anticodon_count_Uf)
run.info("Count with complete feature set", seq_complete_count_Uf)
run.info("Mean reads per seq", round(mean_reads_Uf, 1))
run.info("Max reads per seq", max_reads_Uf)
run.info(f"Count with 1-{MIN_LENGTH_LONG_5PRIME_EXTENSION - 1} extra 5' nts", seq_short_5prime_count_Uf)
run.info(f"Count with ≥{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", seq_long_5prime_count_Uf)
run.info("Mean profiled nt freq", round(mean_seq_profiled_freq_Uf, 3))
run.info("Mean freq of unconserved in profiled nts", round(mean_seq_unconserved_freq_Uf, 4))
run.info("Mean freq of unpaired in stem nts", round(mean_seq_unpaired_freq_Uf, 4))
run.info("Mean extrapolated 5' nt freq", round(mean_seq_extrap_freq_Uf, 3), nl_after=1)
run.info_single("Unique seqs with truncated tRNA profile")
run.info("Count with anticodon", seq_anticodon_count_Uc)
run.info("Mean reads per seq", round(mean_reads_Uc, 1))
run.info("Max reads per seq", max_reads_Uc)
run.info("Mean profiled nt freq", round(mean_seq_profiled_freq_Uc, 3))
run.info("Mean freq of unconserved in profiled nts", round(mean_seq_unconserved_freq_Uc, 4))
run.info("Mean freq of unpaired in stem nts", round(mean_seq_unpaired_freq_Uc, 4), nl_after=1)
run.info_single("Unique seqs with no tRNA profile")
run.info("Mean reads per seq", round(mean_reads_Un, 1))
run.info("Max reads per seq", max_reads_Un, nl_after=1)
run.info_single("Reads with tRNA profile")
run.info("Count with anticodon", read_anticodon_count_Uf)
run.info("Count with complete feature set", read_complete_count_Uf)
run.info("Mean length 3' terminus", round(mean_read_3prime_length_Uf, 1))
run.info(f"Count with 1-{MIN_LENGTH_LONG_5PRIME_EXTENSION - 1} extra 5' nts", read_short_5prime_count_Uf)
run.info(f"Count with ≥{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", read_long_5prime_count_Uf)
run.info(f"Mean length ≥{MIN_LENGTH_LONG_5PRIME_EXTENSION} nt extension", round(mean_read_5prime_length_Uf, 1))
run.info("Mean profiled nt freq", round(mean_read_profiled_freq_Uf, 3))
run.info("Mean freq of unconserved in profiled nts", round(mean_read_unconserved_freq_Uf, 4))
run.info("Mean freq of unpaired in stem nts", round(mean_read_unpaired_freq_Uf, 4))
run.info("Mean extrapolated 5' nt freq", round(mean_read_extrap_freq_Uf, 3), nl_after=1)
run.info_single("Reads with truncated tRNA profile")
run.info("Spans anticodon", read_anticodon_count_Uc)
run.info("Mean length 3' terminus", round(mean_read_3prime_length_Uc, 1))
run.info("Mean profiled nt freq", round(mean_read_profiled_freq_Uc, 3))
run.info("Mean freq of unconserved in profiled nts", round(mean_read_unconserved_freq_Uc, 4))
run.info("Mean freq of unpaired in stem nts", round(mean_read_unpaired_freq_Uc, 4), nl_after=2)
def trim_trna_ends(self):
"""Trim any nts 5' of the acceptor stem and 3' of the discriminator from Uf, forming Tf."""
start_time = time.time()
self.progress.new("Trimming the 3' and 5' ends of profiled tRNA")
self.progress.update("...")
seqs_Tf = self.get_trimmed_seqs([seq_Uf for seq_Uf in self.dict_Uf.values()], TrimmedFullProfileSequence)
dict_Tf = self.dict_Tf
for seq_Tf in sorted(seqs_Tf, key=lambda seq_Tf: seq_Tf.name):
dict_Tf[seq_Tf.name] = seq_Tf
with open(self.analysis_summary_path, 'a') as f:
f.write(self.get_summary_line("Time elapsed trimming profiled tRNA (min)", time.time() - start_time, is_time_value=True))
self.progress.end()
def get_trimmed_seqs(self, seqs_U, class_T):
"""Find Tf or Tc from Uf or Uc, respectively."""
names = [seq_U.name for seq_U in seqs_U]
if class_T == TrimmedFullProfileSequence:
strings_T = [seq_U.string[seq_U.xtra_5prime_length: len(seq_U.string) - seq_U.length_3prime_terminus] for seq_U in seqs_U]
elif class_T == TrimmedTruncatedProfileSequence:
strings_T = [seq_U.string[: len(seq_U.string) - seq_U.length_3prime_terminus] for seq_U in seqs_U]
clusters = Dereplicator(names, strings_T, extras=seqs_U).full_length_dereplicate()
seqs_T = [class_T(cluster.member_seqs[0], cluster.member_extras) for cluster in clusters]
return seqs_T
def trim_truncated_profile_ends(self):
"""Trim any nts 3' of the discriminator from Uc, forming Tc."""
start_time = time.time()
self.progress.new("Trimming the 3' ends of seqs with truncated tRNA profiles")
self.progress.update("...")
seqs_Tc = self.get_trimmed_seqs([seq_Uc for seq_Uc in self.dict_Uc_nontrna.values()], TrimmedTruncatedProfileSequence)
dict_Tc_nontrna = self.dict_Tc_nontrna
for seq_Tc in sorted(seqs_Tc, key=lambda seq_Tc: seq_Tc.name):
dict_Tc_nontrna[seq_Tc.name] = seq_Tc
with open(self.analysis_summary_path, 'a') as f:
f.write(self.get_summary_line("Time elapsed trimming seqs with truncated feature profile (min)", time.time() - start_time, is_time_value=True))
self.progress.end()
def report_trim_stats(self):
"""Report to terminal stats to the terminal on Tf and Tc immediately after trimming steps."""
count_Tf = len(self.dict_Tf)
anticodon_count_Tf = 0
complete_count_Tf = 0
mean_uniq_seqs_Tf = len(self.dict_Uf) / count_Tf
single_count_Tf = 0
mean_reads_Tf = 0
max_reads_Tf = 0
long_5prime_count_Tf = 0
for seq_Tf in self.dict_Tf.values():
if seq_Tf.contains_anticodon:
anticodon_count_Tf += 1
if seq_Tf.has_complete_feature_set:
complete_count_Tf += 1
if len(seq_Tf.names_U) == 1:
single_count_Tf += 1
read_count = seq_Tf.read_count
mean_reads_Tf += read_count
if read_count > max_reads_Tf:
max_reads_Tf = read_count
if seq_Tf.long_5prime_extension_dict:
long_5prime_count_Tf += 1
mean_reads_Tf /= count_Tf
count_Tc = len(self.dict_Tc_nontrna)
anticodon_count_Tc = 0
mean_uniq_seqs_Tc = len(self.dict_Uc_nontrna) / count_Tc
single_uniq_seq_count_Tc = 0
mean_reads_Tc = 0
max_reads_Tc = 0
for seq_Tc in self.dict_Tc_nontrna.values():
if seq_Tc.contains_anticodon:
anticodon_count_Tc += 1
if len(seq_Tc.names_U) == 1:
single_uniq_seq_count_Tc += 1
read_count = seq_Tc.read_count
mean_reads_Tc += read_count
if read_count > max_reads_Tc:
max_reads_Tc = read_count
mean_reads_Tc /= count_Tc
run = self.run
run.info_single("Results of trimming (subject to change -- see summary output file for final results)", nl_before=2, nl_after=1)
run.info_single("Trimmed seq counts")
run.info("tRNA profile", count_Tf)
run.info("Truncated tRNA profile", count_Tc, nl_after=1)
run.info_single("Trimmed seqs with tRNA profile")
run.info("Count with anticodon", anticodon_count_Tf)
run.info("Count with complete feature set", complete_count_Tf)
run.info("Mean unique seqs per seq", round(mean_uniq_seqs_Tf, 1))
run.info("Count with single unique seq", single_count_Tf)
run.info("Mean reads per seq", round(mean_reads_Tf, 1))
run.info("Max reads per seq", max_reads_Tf)
run.info(f"Count with ≥{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", long_5prime_count_Tf, nl_after=1)
run.info_single("Trimmed seqs with truncated tRNA profile")
run.info("Count with anticodon", anticodon_count_Tc)
run.info("Mean unique seqs per seq", round(mean_uniq_seqs_Tc, 1))
run.info("Count with single unique seq", single_uniq_seq_count_Tc)
run.info("Mean reads per seq", round(mean_reads_Tc, 1))
run.info("Max reads per seq", max_reads_Tc, nl_after=2)
def threeprime_dereplicate_profiled_trna(self):
"""Dereplicate Tf from the 3' end of longer Tf.
EXAMPLE:
Nf (Tf 1): TCCGTGATAGTTTAATGGTCAGAATGGGCGCTTGTCGCGTGCCAGATCGGGGTTCAATTCCCCGTCGCGGAG
Tf 2 : AATGGGCGCTTGTCGCGTGCCAGATCGGGGTTCAATTCCCCGTCGCGGAG
Tf 3 : GCGTGCCAGATCGGGGTTCAATTCCCCGTCGCGGAG
"""
start_time = time.time()
pid = "Dereplicating trimmed tRNA seqs from the 3' end"
self.progress.new(pid)
self.progress.update("...")
# Prefix dereplicate Tf from the 3' end.
names = []
reverse_strings = []
seqs_Tf = []
for name, seq_Tf in self.dict_Tf.items():
names.append(name)
reverse_strings.append(seq_Tf.string[::-1])
seqs_Tf.append(seq_Tf)
clusters = Dereplicator(names, reverse_strings, extras=seqs_Tf).prefix_dereplicate()
# Profiling may have found multiple Tf that would here be 3'-dereplicated as having
# complete, but different, feature profiles. Consider the shortest "completely profiled" Tf
# in the cluster to have the correct profile. Reclassify discrepant 5' nts from Uf in longer
# Tf as extra 5' nts. Transfer the profile information from the representative Uf of the
# shortest Tf to Uf of longer Tf, replacing these longer Uf with Us objects. Produce a new
# Tf object from these Uf and Us.
# Similarly, the longest Tf in the cluster may have an erroneous "incomplete profile." If
# there is a shorter Tf in the cluster with a complete profile, then any longer Tf with an
# incomplete profile can be reevaluated in the same manner.
# This step, which likely affects a tiny number of Tf, helps reduce the number of extra,
# wrong nts at the 5' end of downstream seed sequences. In debug mode, consolidated Tf are
# written to a file.
# Nota bene: A complete profile may be extrapolated at the 5' end of the 5' strand of the
# acceptor stem. By default, only 1 nt may be extrapolated in the stem when all other nts
# form base pairs. So it is hard to see how an extrapolated complete profile is more likely
# to be inaccurate than the removed profiles of longer sequences in the cluster.
# Do not check for feature-by-feature agreement among clustered profiles here, as 3' tRNA
# fragments occasionally have some incorrect feature positions due to a paucity of sequence
# information available in fragment profiling. It is conceivable that the seed sequence of
# the cluster is assigned a wrong complete or incomplete profile while all of the shorter
# sequences in the cluster are assigned correct incomplete profiles -- presumably a rare
# inaccuracy that goes unchecked.
self.progress.update_pid(pid)
self.progress.update("Inspecting normalized seq clusters")
dict_Nf = self.dict_Nf
dict_Uf = self.dict_Uf
# It is possible that trimmed sequences from multiple clusters can consolidate.
self.count_consol_Tf = 0
dict_consol_Tf = {}
# This dict is for an edge case explained below.
dict_Tf_His = {}
if anvio.DEBUG:
consol_seqs_with_inconsis_profiles_file = open(self.consol_seqs_with_inconsis_profiles_path, 'w')
consol_seqs_with_inconsis_profiles_file.write("Index\tTrimmed (0) or Unique (1)\tSequence\n")
inconsis_profile_cluster_count = 0
for cluster in clusters:
# Skip initialization of Nf objects, as additional Tf members are later added to the
# objects after dereplicating Tc and mapping Tm.
seqs_Tf = cluster.member_extras
if len(seqs_Tf) == 1:
dict_Nf[seqs_Tf[0].name] = NormalizedFullProfileSequence(seqs_Tf)
continue
# Check that there are no shorter Tf in the cluster with a "complete profile".
complete_profile_indices = []
for index_Tf, seq_Tf in enumerate(seqs_Tf):
if seq_Tf.has_complete_feature_set:
complete_profile_indices.append(index_Tf)
if not complete_profile_indices:
dict_Nf[seqs_Tf[0].name] = NormalizedFullProfileSequence(seqs_Tf)
continue
if complete_profile_indices == [0]:
dict_Nf[seqs_Tf[0].name] = NormalizedFullProfileSequence(seqs_Tf)
continue
# Reaching this point means that there are multiple Tf with "complete profiles" in the
# cluster.
# If the two shortest Tf with complete feature profiles differ by the
# post-transcriptionally added 5'-G of tRNA-His, then they should both be maintained as
# separate Tf.
if seqs_Tf[complete_profile_indices[-2]].has_His_G:
if seqs_Tf[complete_profile_indices[-1]].string == seqs_Tf[complete_profile_indices[-2]].string[1: ]:
if len(complete_profile_indices) == 2:
assert complete_profile_indices[-2] == 0
dict_Tf_His[seqs_Tf[1].name] = seqs_Tf
continue
# Perhaps more than two Tf in the cluster have "complete" profiles, though this
# has not been checked. In this case, consolidate the Tf, retaining the profile
# of the shortest.
if anvio.DEBUG:
# Report consolidated Tf with different complete feature profiles.
inconsis_profile_cluster_count += 1
for seq_Tf in seqs_Tf[: complete_profile_indices[-1] + 1]:
consol_seqs_with_inconsis_profiles_file.write(str(inconsis_profile_cluster_count) + "\t")
consol_seqs_with_inconsis_profiles_file.write("0\t")
consol_seqs_with_inconsis_profiles_file.write(seq_Tf.string + "\n")
for name_Uf in seq_Tf.names_U:
consol_seqs_with_inconsis_profiles_file.write(str(inconsis_profile_cluster_count) + "\t")
consol_seqs_with_inconsis_profiles_file.write("1\t")
consol_seqs_with_inconsis_profiles_file.write(dict_Uf[name_Uf].string + "\n")
short_seq_Tf = seqs_Tf[complete_profile_indices[-1]]
if short_seq_Tf.name in dict_consol_Tf:
# Tf from multiple clusters consolidate with the same shorter Tf with a complete
# profile.
# Some of the longer Tf with rejected "complete" profiles occur in multiple
# clusters. Clearly, the longest seed Tf, which is also being consolidated, must
# differ between the clusters. Tf in the clusters that are shorter than the Tf with
# the selected complete profile must be the same in each cluster.
long_seqs_Tf = dict_consol_Tf[short_seq_Tf.name]['long_seqs_Tf']
encountered_long_Tf_names = [seq_Tf.name for seq_Tf in long_seqs_Tf]
for long_seq_Tf in seqs_Tf[: complete_profile_indices[-1]]:
if long_seq_Tf.name not in encountered_long_Tf_names:
long_seqs_Tf.append(long_seq_Tf)
else:
# This is the first time the shortest Tf with a complete profile has been processed
# from a cluster.
dict_consol_Tf[short_seq_Tf.name] = {'short_seq_Tf': short_seq_Tf,
'long_seqs_Tf': seqs_Tf[: complete_profile_indices[-1]],
'Nf_members': seqs_Tf[complete_profile_indices[-1] + 1: ]}
if anvio.DEBUG:
consol_seqs_with_inconsis_profiles_file.close()
# Consider the following edge case. One cluster had two Tf with complete profiles, Tf1 and
# Tf2, so the two were consolidated, forming Nf1.
# Tf1: GGTGGGAGAATTCCCGAGTGGCCAAGGGGGGCAGACTGTGTATCTGTTGCGTTTCGCTTCGATGGTTCGAATCCATCTTCTCCCA
# Tf2 == Nf1: GGGAGAATTCCCGAGTGGCCAAGGGGGGCAGACTGTGTATCTGTTGCGTTTCGCTTCGATGGTTCGAATCCATCTTCTCCCA
# Another cluster had two Tf, Tf3 and the same Tf2, only differing by a supposed tRNA-His
# 5'-G.
# Tf3: GGGGAGAATTCCCGAGTGGCCAAGGGGGGCAGACTGTGTATCTGTTGCGTTTCGCTTCGATGGTTCGAATCCATCTTCTCCCA
# Tf2: GGGAGAATTCCCGAGTGGCCAAGGGGGGCAGACTGTGTATCTGTTGCGTTTCGCTTCGATGGTTCGAATCCATCTTCTCCCA
# Rather than creating another Nf, Nf2, seeded by Tf3, consolidate Tf from the two clusters.
# This avoids producing two Nf, one of which is a 3' subsequence of the other.
for name, seqs_Tf in dict_Tf_His.items():
if name in dict_consol_Tf:
dict_consol_Tf[name]['long_seqs_Tf'].append(seqs_Tf[0])
else:
dict_Nf[seqs_Tf[0].name] = NormalizedFullProfileSequence(seqs_Tf)
for subdict_consol_Tf in dict_consol_Tf.values():
self.count_consol_Tf += 1
consol_seq_Tf = self.consolidate_trimmed_sequences(subdict_consol_Tf['short_seq_Tf'], subdict_consol_Tf['long_seqs_Tf'])
dict_Nf[consol_seq_Tf.name] = NormalizedFullProfileSequence([consol_seq_Tf] + subdict_consol_Tf['Nf_members'])
with open(self.analysis_summary_path, 'a') as f:
f.write(self.get_summary_line("Time elapsed 3'-dereplicating trimmed profiled seqs", time.time() - start_time, is_time_value=True))
self.progress.end()
def consolidate_trimmed_sequences(self, short_seq_Tf, long_seqs_Tf):
"""Consolidate longer Tf with a shorter Tf by pooling all of their Uf -- changing their
profile information -- and generating a new Tf object."""
short_seq_Tf_string = short_seq_Tf.string
dict_Uf = self.dict_Uf
short_seq_Uf = dict_Uf[short_seq_Tf.names_U[0]]
# To transfer profile information from the representative short Uf to longer Uf, determine
# where features are relative to the 3' end of the short Uf, which is found in the other Uf.
replacement_info_dict = {'string_T': short_seq_Tf.string}
feature_index_adjustment = -short_seq_Uf.xtra_5prime_length - len(short_seq_Tf_string)
feature_starts_from_T_3prime = []
for feature_start_index in short_seq_Uf.feature_start_indices:
if isinstance(feature_start_index, int):
feature_starts_from_T_3prime.append(feature_start_index + feature_index_adjustment)
else:
feature_starts_from_T_3prime.append(tuple([strand_start_index + feature_index_adjustment for strand_start_index in feature_start_index]))
replacement_info_dict['feature_starts_from_T_3prime'] = feature_starts_from_T_3prime
feature_stops_from_T_3prime = []
for feature_stop_index in short_seq_Uf.feature_stop_indices:
if isinstance(feature_stop_index, int):
feature_stops_from_T_3prime.append(feature_stop_index + feature_index_adjustment)
else:
feature_stops_from_T_3prime.append(tuple([strand_stop_index + feature_index_adjustment for strand_stop_index in feature_stop_index]))
replacement_info_dict['feature_stops_from_T_3prime'] = feature_stops_from_T_3prime
replacement_info_dict['has_His_G'] = short_seq_Uf.has_His_G
replacement_info_dict['alpha_start_from_T_3prime'] = None if short_seq_Uf.alpha_start is None else short_seq_Uf.alpha_start + feature_index_adjustment
replacement_info_dict['alpha_stop_from_T_3prime'] = None if short_seq_Uf.alpha_stop is None else short_seq_Uf.alpha_stop + feature_index_adjustment
replacement_info_dict['beta_start_from_T_3prime'] = None if short_seq_Uf.beta_start is None else short_seq_Uf.beta_start + feature_index_adjustment
replacement_info_dict['beta_stop_from_T_3prime'] = None if short_seq_Uf.beta_stop is None else short_seq_Uf.beta_stop + feature_index_adjustment
replacement_info_dict['anticodon_string'] = short_seq_Uf.anticodon_string
replacement_info_dict['anticodon_aa'] = short_seq_Uf.anticodon_aa
replacement_info_dict['contains_anticodon'] = short_seq_Uf.contains_anticodon
replacement_info_dict['num_conserved'] = short_seq_Uf.num_conserved
replacement_info_dict['num_unconserved'] = short_seq_Uf.num_unconserved
replacement_info_dict['num_paired'] = short_seq_Uf.num_paired
replacement_info_dict['num_unpaired'] = short_seq_Uf.num_unpaired
unconserved_info_from_T_3prime = []
for unconserved_tuple in short_seq_Uf.unconserved_info:
unconserved_info_from_T_3prime.append((unconserved_tuple[0] + feature_index_adjustment,
unconserved_tuple[1],
unconserved_tuple[2]))
replacement_info_dict['unconserved_info_from_T_3prime'] = unconserved_info_from_T_3prime
unpaired_info_from_T_3prime = []
for unpaired_tuple in short_seq_Uf.unpaired_info:
unpaired_info_from_T_3prime.append((unpaired_tuple[0] + feature_index_adjustment,
unpaired_tuple[1] + feature_index_adjustment,
unpaired_tuple[2],
unpaired_tuple[3]))
replacement_info_dict['unpaired_info_from_T_3prime'] = unpaired_info_from_T_3prime
replacement_info_dict['profiled_seq_without_terminus_length'] = short_seq_Uf.profiled_seq_length - short_seq_Uf.length_3prime_terminus
dict_Tf = self.dict_Tf
dict_Tf.pop(short_seq_Tf.name)
seqs_U = [dict_Uf[name_Uf] for name_Uf in short_seq_Tf.names_U]
for seq_Uf in seqs_U:
seq_Uf.name_T = None
dict_Us = self.dict_Us
for long_seq_Tf in long_seqs_Tf:
dict_Tf.pop(long_seq_Tf.name)
for seq_Uf in [dict_Uf[name_Uf] for name_Uf in long_seq_Tf.names_U]:
seq_Uf.name_T = None
dict_Uf.pop(seq_Uf.name)
seq_Us = UniqueTransferredProfileSequence(seq_Uf, replacement_info_dict)
seqs_U.append(seq_Us)
dict_Us[seq_Us.name] = seq_Us
consol_seqs_Tf = self.get_trimmed_seqs(seqs_U, TrimmedFullProfileSequence)
if len(consol_seqs_Tf) > 1:
raise ConfigError(f"Consolidation should have produced only 1 trimmed profiled tRNA sequence, not {len(consol_seqs_Tf)}.")
consol_seq_Tf = consol_seqs_Tf[0]
dict_Tf[consol_seq_Tf.name] = consol_seq_Tf
return consol_seq_Tf
def threeprime_dereplicate_truncated_sequences(self):
"""Recover Tc that are found to be 3' subseqs of Nf and thus legitimate 3' tRNA fragments.
These Tc are folded into Nf, while unrecovered Tc are themselves 3'-dereplicated, forming
another pool of Nc."""
start_time = time.time()
self.progress.new("Dereplicating trimmed seqs with a truncated feature profile")
self.progress.update("...")
# Prefix dereplicate both Tc and Nf from the 3' end.
names_Tc = []
reverse_Tc_strings = []
seqs_Tc = []
for name, seq_Tc in self.dict_Tc_nontrna.items():
names_Tc.append(name)
reverse_Tc_strings.append(seq_Tc.string[::-1])
seqs_Tc.append(seq_Tc)
names_Nf = []
reverse_Nf_strings = []
seqs_Nf = []
for name, seq_Nf in self.dict_Nf.items():
names_Nf.append(name)
reverse_Nf_strings.append(seq_Nf.string[::-1])
seqs_Nf.append(seq_Nf)
clusters = Dereplicator(names_Tc + names_Nf,
reverse_Tc_strings + reverse_Nf_strings,
extras=seqs_Tc + seqs_Nf).prefix_dereplicate()
# Associate each Tc with any Nf that contain it as a 3' subseq. Since a Tc can be a 3'
# subseq of multiple Nf, do not reconstruct a feature profile for the Tc. (Similarly, the
# seed Tf in an Nf need not have the same profile as other Tf in the Nf.) Uc in the Tc are
# therefore not included in the features table of the tRNA-seq database.
# Clusters cannot contain > 1 Nf, as these have already been 3'-dereplicated (by
# definition). Nf can seed clusters and also be members of clusters seeded by a Tc. In the
# latter case, only Tc that are shorter than the Nf in the cluster (3' subseqs of the Nf)
# are incorporated as members of the Nf.
# There are three types of cluster: 1. clusters consisting of a single Nf (ignore), 2.
# clusters containing an Nf as seed or member with shorter Tc members, and 3. clusters
# seeded by Tc. If a Tc is found in group 2 (part of one or more longer Nf) then ignore it
# in group 3 (do not include it in Nc formed from group 3 clusters). The alternatives do not
# make sense -- including the Tc in Nc but not Nf; or withholding the Tc from both Nc and
# Nf, perhaps as a new category of sequence.
# This dict relates Tc to Nf containing them.
dict_Tc_Nf = defaultdict(list)
# This dict relates Tc to other Tc found to be subseqs of the former.
dict_Tc_Tc = {}
for cluster in clusters:
if len(cluster.member_names) == 1:
if isinstance(cluster.member_extras[0], NormalizedSequence):
continue
seq_Nf = None
seed_seq_Tc = cluster.member_extras[0] if isinstance(cluster.member_extras[0], TrimmedTruncatedProfileSequence) else None
if seed_seq_Tc:
members_seed_seq_Tc = []
dict_Tc_Tc[seed_seq_Tc.name] = (seed_seq_Tc, members_seed_seq_Tc)
for seq in cluster.member_extras:
# Members of each cluster are pre-sorted in descending order of seq length. There
# cannot be an Nf and Tc of the same length.
if seq_Nf:
if isinstance(seq, TrimmedTruncatedProfileSequence):
dict_Tc_Nf[seq.name].append(seq_Nf)
continue
else:
raise ConfigError("It appears that a cluster in the 3' dereplication "
"of trimmed sequences with truncated profiles and normalized sequences with full profiles "
"contains >1 normalized sequence, when it should only contain 0 or 1.")
if isinstance(seq, NormalizedSequence):
seq_Nf = seq
continue
# The cluster is seeded by a Tc.
members_seed_seq_Tc.append(seq)
# Add Tc to matching Nf.
dict_Tc_nontrna = self.dict_Tc_nontrna
dict_Tc_trna = self.dict_Tc_trna
dict_Uc_nontrna = self.dict_Uc_nontrna
dict_Uc_trna = self.dict_Uc_trna
# It is important to determine whether Tc contain an anticodon to enable the later
# measurement of isoacceptor abundances. A truncated profile may stop 3' of the anticodon,
# but the anticodon may still be in the sequence. The presence of the anticodon in Tc is
# therefore inferred from the longest Tf in the matching Nf.
dict_Nf_anticodon = {} # This dict saves time finding the position of the anticodon relative to the 3' terminus of Nf
RELATIVE_ANTICODON_LOOP_INDEX = self.RELATIVE_ANTICODON_LOOP_INDEX
dict_Tf = self.dict_Tf
dict_Uf = self.dict_Uf
for name_Tc, seqs_Nf in dict_Tc_Nf.items():
seq_Tc = dict_Tc_nontrna.pop(name_Tc)
# Tc has been confirmed as tRNA, so transfer the object between dicts.
seq_Tc.category = 'trna'
dict_Tc_trna[seq_Tc.name] = seq_Tc
length_Tc = len(seq_Tc.string)
seqs_Uc = []
for name_Uc in seq_Tc.names_U:
# Uc has been confirmed as tRNA.
seq_Uc = dict_Uc_nontrna.pop(name_Uc)
dict_Uc_trna[name_Uc] = seq_Uc
seqs_Uc.append(seq_Uc)
for seq_Nf in seqs_Nf:
seq_Nf.names_T.append(seq_Tc.name)
seq_Nf.categories_T.append('Tc_trna')
length_Nf = len(seq_Nf.string)
seq_Nf.starts_T_in_N.append(length_Nf - length_Tc)
seq_Nf.stops_T_in_N.append(length_Nf)
seq_Tc.names_N.append(seq_Nf.name)
if not seq_Tc.contains_anticodon:
# Determine from the first Nf in which Tc is found whether Tc contains the
# anticodon.
seq_Tf = dict_Tf[seq_Nf.names_T[0]]
try:
# The position of the anticodon in Nf has already been found.
anticodon_start_relative_to_3prime_terminus = dict_Nf_anticodon[seq_Nf.name]
except KeyError:
try:
anticodon_loop_start = seq_Tf.feature_start_indices[RELATIVE_ANTICODON_LOOP_INDEX]
except IndexError:
# The anticodon loop was not reached in the profile.
anticodon_loop_start = -1
if anticodon_loop_start > -1:
# The anticodon loop was profiled.
anticodon_start = anticodon_loop_start + 2
# The position of the anticodon relative to the 3' terminus is a negative number.
anticodon_start_relative_to_3prime_terminus = anticodon_start - dict_Uf[seq_Tf.names_U[0]].feature_start_indices[-1]
else:
# The anticodon loop was not profiled, indicated by a positive number.
anticodon_start_relative_to_3prime_terminus = 1
dict_Nf_anticodon[seq_Nf.name] = anticodon_start_relative_to_3prime_terminus
if anticodon_start_relative_to_3prime_terminus == 1:
continue
if length_Tc + anticodon_start_relative_to_3prime_terminus >= 0:
seq_Tc.contains_anticodon = True
for seq_Uc in seqs_Uc:
seq_Uc.contains_anticodon = True
# Tc that don't match Nf are grouped into Nc.
dict_Nc = self.dict_Nc
for seed_Tc_name, entry in dict_Tc_Tc.items():
seed_seq_Tc, member_seqs_Tc = entry
# If a Tc is a 3' subseq of an Nf, then it and all shorter Tc should be excluded from
# the new Nc.
for index_Tc, seq_Tc in enumerate(member_seqs_Tc):
if seq_Tc.name in dict_Tc_Nf:
member_seqs_Tc = member_seqs_Tc[: index_Tc]
break
dict_Nc[seed_Tc_name] = NormalizedTruncatedProfileSequence(member_seqs_Tc)
with open(self.analysis_summary_path, 'a') as f:
f.write(self.get_summary_line("Time elapsed recovering tRNA with truncated feature profile (min)", time.time() - start_time, is_time_value=True))
self.progress.end()
def report_3prime_derep_stats(self):
"""Report to terminal stats regarding 3'-dereplication immediately after these steps."""
count_Nf = len(self.dict_Nf)
anticodon_count_Nf = 0
complete_count_Nf = 0
mean_spec_T_Nf = 0
mean_nonspec_T_Nf = 0
mean_spec_U_Nf = 0
mean_nonspec_U_Nf = 0
spec_reads_Nf = 0
nonspec_reads_Nf = 0
max_spec_reads_Nf = 0
max_nonspec_reads_Nf = 0
max_total_reads_Nf = 0
dict_Uc_trna = self.dict_Uc_trna
for seq_Nf in self.dict_Nf.values():
if seq_Nf.contains_anticodon:
anticodon_count_Nf += 1
if seq_Nf.has_complete_feature_set:
complete_count_Nf += 1
spec_reads = 0
nonspec_reads = 0
for category_T, name_T in zip(seq_Nf.categories_T, seq_Nf.names_T):
seq_T = getattr(self, 'dict_' + category_T)[name_T]
if category_T == 'Tf':
read_counts = []
for name_U, category_U in zip(seq_T.names_U, seq_T.categories_U):
read_counts.append(getattr(self, 'dict_' + category_U)[name_U].read_count)
elif category_T == 'Tc_trna':
read_counts = [dict_Uc_trna[name_U].read_count for name_U in seq_T.names_U]
else:
raise Exception(f"The trimmed seq ({seq_T.name}) in the normalized seq ({seq_Nf.name}) has the unexpected class, `{type(seq_T)}`.")
if len(seq_T.names_N) == 1:
mean_spec_T_Nf += 1
for read_count in read_counts:
mean_spec_U_Nf += 1
spec_reads += read_count
spec_reads_Nf += read_count
else:
mean_nonspec_T_Nf += 1
for read_count in read_counts:
mean_nonspec_U_Nf += 1
nonspec_reads += read_count
nonspec_reads_Nf += read_count
if spec_reads > max_spec_reads_Nf:
max_spec_reads_Nf = spec_reads
if nonspec_reads > max_nonspec_reads_Nf:
max_nonspec_reads_Nf = nonspec_reads
if spec_reads + nonspec_reads > max_total_reads_Nf:
max_total_reads_Nf = spec_reads + nonspec_reads
mean_spec_T_Nf /= count_Nf
mean_nonspec_T_Nf /= count_Nf
mean_spec_U_Nf /= count_Nf
mean_nonspec_U_Nf /= count_Nf
spec_reads_Nf /= count_Nf
nonspec_reads_Nf /= count_Nf
count_Tc_trna_Nf = len(self.dict_Tc_trna)
count_Uc_trna_Nf = len(self.dict_Uc_trna)
read_count_Uc_trna_Nf = 0
for seq_Uc_trna in self.dict_Uc_trna.values():
read_count_Uc_trna_Nf += seq_Uc_trna.read_count
mean_Tc_trna_Nf = count_Tc_trna_Nf / count_Nf
mean_Uc_trna_Nf = count_Uc_trna_Nf / count_Nf
mean_Uc_reads_Nf = read_count_Uc_trna_Nf / count_Nf
count_Nc = len(self.dict_Nc)
anticodon_count_Nc = 0
mean_spec_T_Nc = 0
mean_nonspec_T_Nc = 0
mean_spec_U_Nc = 0
mean_nonspec_U_Nc = 0
mean_spec_reads_Nc = 0
mean_nonspec_reads_Nc = 0
dict_Tc_nontrna = self.dict_Tc_nontrna
dict_Uc_nontrna = self.dict_Uc_nontrna
for seq_Nc in self.dict_Nc.values():
if seq_Nc.contains_anticodon:
anticodon_count_Nc += 1
for name_T in seq_Nc.names_T:
seq_T = dict_Tc_nontrna[name_T]
if len(seq_T.names_N) == 1:
mean_spec_T_Nc += 1
for name_U in seq_T.names_U:
mean_spec_U_Nc += 1
mean_spec_reads_Nc += dict_Uc_nontrna[name_U].read_count
else:
mean_nonspec_T_Nc += 1
for name_U in seq_T.names_U:
mean_nonspec_U_Nc += 1
mean_nonspec_reads_Nc += dict_Uc_nontrna[name_U].read_count
mean_spec_T_Nc /= count_Nc
mean_nonspec_T_Nc /= count_Nc
mean_spec_U_Nc /= count_Nc
mean_nonspec_U_Nc /= count_Nc
mean_spec_reads_Nc /= count_Nc
mean_nonspec_reads_Nc /= count_Nc
run = self.run
run.info_single("Results of 3' dereplication (subject to change -- see summary output file for final results)", nl_after=1)
run.info_single("Normalized seq counts")
run.info("tRNA profile", count_Nf)
run.info("Truncated tRNA profile", count_Nc, nl_after=1)
run.info_single("Normalized seqs with tRNA profile")
run.info("Containing anticodon", anticodon_count_Nf)
run.info("Containing complete feature set", complete_count_Nf)
run.info("Mean specific trimmed seqs per seq", round(mean_spec_T_Nf, 1))
run.info("Mean nonspecific trimmed seqs per seq", round(mean_nonspec_T_Nf, 1))
run.info("Mean specific unique seqs per seq", round(mean_spec_U_Nf, 1))
run.info("Mean nonspecific unique seqs per seq", round(mean_nonspec_U_Nf, 1))
run.info("Mean specific reads per seq", round(spec_reads_Nf, 1))
run.info("Mean nonspecific reads per seq", round(nonspec_reads_Nf, 1))
run.info("Max specific reads per seq", max_spec_reads_Nf)
run.info("Max nonspecific reads per seq", max_nonspec_reads_Nf)
run.info("Max total reads per seq", max_total_reads_Nf)
run.info("Recovered trimmed seqs with truncated profile", count_Tc_trna_Nf)
run.info("Recovered unique seqs with truncated profile", count_Uc_trna_Nf)
run.info("Recovered reads with truncated profile", read_count_Uc_trna_Nf)
run.info("Mean recovered trunc trimmed seqs per seq", round(mean_Tc_trna_Nf, 2))
run.info("Mean recovered trunc unique seqs per seq", round(mean_Uc_trna_Nf, 2))
run.info("Mean recovered trunc reads per seq", round(mean_Uc_reads_Nf, 2))
run.info("Consolidated trimmed tRNA seqs", self.count_consol_Tf, nl_after=1)
run.info_single("Normalized seqs with truncated tRNA profile")
run.info("Containing anticodon", anticodon_count_Nc)
run.info("Mean specific trimmed seqs per seq", round(mean_spec_T_Nc, 1))
run.info("Mean nonspecific trimmed seqs per seq", round(mean_nonspec_T_Nc, 1))
run.info("Mean specific unique seqs per seq", round(mean_spec_U_Nc, 1))
run.info("Mean nonspecific unique seqs per seq", round(mean_nonspec_U_Nc, 1))
run.info("Mean specific reads per seq", round(mean_spec_reads_Nc, 1))
run.info("Mean nonspecific reads per seq", round(mean_nonspec_reads_Nc, 1), nl_after=2)
def write_checkpoint_files(self, checkpoint_name):
self.progress.new(f"Writing intermediate files for the \"{checkpoint_name}\" checkpoint")
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
checkpoint_subdir_path = self.checkpoint_subdir_dict[checkpoint_name]
if not os.path.exists(checkpoint_subdir_path):
os.mkdir(checkpoint_subdir_path)
for intermed_file_key, intermed_file_path in self.intermed_file_path_dict[checkpoint_name].items():
self.progress.update(f"{os.path.basename(intermed_file_path)}")
# The key, e.g., "dict_Uf", corresponds to the attribute to be saved to file.
with open(intermed_file_path, 'wb') as intermed_file:
pkl.dump(getattr(self, intermed_file_key), intermed_file, protocol=pkl.HIGHEST_PROTOCOL)
self.progress.end()
self.run.info_single(f"Wrote \"{checkpoint_name}\" checkpoint intermediate files to {checkpoint_subdir_path}", cut_after=200)
def load_checkpoint_files(self, checkpoint_name):
progress = self.progress
progress.new(f"Loading intermediate files at the \"{checkpoint_name}\" checkpoint")
for intermed_file_key, intermed_file_path in self.intermed_file_path_dict[checkpoint_name].items():
progress.update(f"{os.path.basename(intermed_file_path)}")
with open(intermed_file_path, 'rb') as f:
setattr(self, intermed_file_key, pkl.load(f))
with open(self.analysis_summary_path, 'a') as f:
f.write(f"\nAnalysis restarted from the \"{checkpoint_name}\" checkpoint\n")
progress.end()
self.run.info_single(f"Loaded \"{checkpoint_name}\" checkpoint intermediate files from {self.checkpoint_subdir_dict[checkpoint_name]}")
def threeprime_dereplicate_sequences_without_terminus(self):
"""Find tRNA sequences missing a 3' terminus. By default, U required a 3' terminus to have
been profiled as tRNA. Un are searched against Nf. Un is recovered as tRNA when it is a 3'
subseq of Nf or is longer than Nf with a complete profile and thus is shown to have a 5'
extension. Recovered Un each generate a Um object."""
self.progress.new("Dereplicating tRNA seqs ending in discriminator nt")
self.progress.update("...")
# 3'-dereplicate Nf and Un.
names = []
reverse_strings = []
extras = []
# Nf are added first to the dereplicator so that they appear first in the clusters. This
# allows Un that are identical to the Nf (due to prior trimming of the 3' terminus from the
# Nf) to always be recovered.
for name_Nf, seq_Nf in self.dict_Nf.items():
names.append(name_Nf)
reverse_strings.append(seq_Nf.string[::-1])
extras.append(seq_Nf)
for name_Un, seq_Un in self.dict_Un.items():
if len(seq_Un.string) >= self.min_trna_frag_size:
names.append(name_Un)
reverse_strings.append(seq_Un.string[::-1])
extras.append(seq_Un)
clusters = Dereplicator(names, reverse_strings, extras=extras).prefix_dereplicate()
# Search clusters for Nf and qualifying Un. Some Un may be 3' subseqs of > 1 Nf. Un cannot
# be longer than > 1 Nf, as Nf have already been dereplicated. The same Nf can be found in
# multiple clusters as subseqs of different longer Un.
dict_Un_Nf = defaultdict(list)
dict_Un = self.dict_Un
dict_Um = self.dict_Um
dict_Tm = self.dict_Tm
for cluster in clusters:
if len(cluster.member_seqs) == 1:
continue
# Check that there is an Nf in the cluster -- there cannot be more than one.
cluster_Nf_index = None
seq_Nf = None
length_Nf = None
complete_feature_set_in_Nf = None
for member_index, seq in enumerate(cluster.member_extras):
if isinstance(seq, NormalizedFullProfileSequence):
cluster_Nf_index = member_index
seq_Nf = seq
length_Nf = len(seq_Nf.string)
if member_index > 0:
complete_feature_set_in_Nf = seq_Nf.has_complete_feature_set
break
else:
continue
# To reach this point, an Nf must have been found in the cluster. Now process any longer
# Un.
for seq_Un in cluster.member_extras[: cluster_Nf_index]:
# If the Nf has a complete feature profile (is a full-length tRNA), then the
# overhanging 5' bases in the recovered Un can be trimmed as "extra" 5' bases.
# Otherwise, it is possible the overhanging bases are part of an artifact joined to
# the 5' end of a tRNA fragment, so conservatively ignore the Un.
if not complete_feature_set_in_Nf:
break
name_Un = seq_Un.name
try:
dict_Un.pop(name_Un)
except KeyError:
# The Un has already been recovered in another cluster, necessarily as a
# supersequence of the same Nf.
continue
seq_Um = UniqueMappedSequence(seq_Un.string, name_Un, seq_Un.read_count, xtra_5prime_length=len(seq_Un.string) - length_Nf)
dict_Um[name_Un] = seq_Um
seq_Tm = TrimmedMappedSequence(seq_Um)
seq_Nf.names_T.append(name_Un)
seq_Nf.categories_T.append('Tm')
seq_Tm.names_N.append(seq_Nf.name)
seq_Nf.starts_T_in_N.append(0)
seq_Nf.stops_T_in_N.append(length_Nf)
dict_Tm[name_Un] = seq_Tm
# Find all Un in the cluster ≤ Nf length.
for seq_Un in cluster.member_extras[cluster_Nf_index + 1: ]:
dict_Un_Nf[seq_Un.name].append(seq_Nf)
for name_Un, seqs_Nf in dict_Un_Nf.items():
seq_Un = dict_Un.pop(name_Un)
seq_Um = UniqueMappedSequence(seq_Un.string, name_Un, seq_Un.read_count)
dict_Um[name_Un] = seq_Um
seq_Tm = TrimmedMappedSequence(seq_Um)
length_Um = len(seq_Um.string)
# The same Nf can be found in multiple clusters, so it can be represented multiple times
# in `seqs_Nf`.
for seq_Nf in set(seqs_Nf):
seq_Nf.names_T.append(name_Un)
seq_Nf.categories_T.append('Tm')
seq_Tm.names_N.append(seq_Nf.name)
length_Nf = len(seq_Nf.string)
seq_Nf.starts_T_in_N.append(length_Nf - length_Um)
seq_Nf.stops_T_in_N.append(length_Nf)
dict_Tm[name_Un] = seq_Tm
self.progress.end()
def map_fragments(self):
"""Map unprofiled tRNA fragments to longer profiled tRNA sequences. Fragments only missing a
3' terminus were already found with `threeprime_dereplicate_sequences_without_terminus` or
by profiling if '' was an accepted 3' terminus.
EXAMPLE:
Nf: (GT)TCCGTGATAGTTTAATGGTCAGAATGGGCGCTTGTCGCGTGCCAGATCGGGGTTCAATTCCCCGTCGCGGAG
Um1 (extra 5' bases) : T TCCGTGATAGTTTAATGGTCAGAATGG
Um2 (interior) : TAGTTTAATGGTCAGAATGGGCGCTTGTCGCGTGCCAGATCGGGG
"""
start_time = time.time()
progress = self.progress
pid = "Set up search of unprofiled reads to profiled tRNA"
progress.new(pid)
self.progress.update("Getting queries from unprofiled reads")
temp_dir_path = filesnpaths.get_temp_directory_path()
query_fasta_path = os.path.join(temp_dir_path, 'query.fa')
dict_Un = self.dict_Un
min_trna_frag_size = self.min_trna_frag_size
query_count = 0
with open(query_fasta_path, 'w') as query_fasta:
for seq_Un in [seq_Un for seq_Un in dict_Un.values() if len(seq_Un.string) >= min_trna_frag_size]:
# Include Un length in the defline for the purposes of parsing vmatch output.
query_fasta.write(f">{seq_Un.name}-{len(seq_Un.string)}\n{seq_Un.string}\n")
query_count += 1
progress.update_pid(pid)
progress.update("Getting targets from profiled tRNAs")
# Un are mapped to Nf with extra 5' bases added when present in underlying U. Multiple
# targets for each Nf are therefore produced for different 5' extensions.
target_fasta_path = os.path.join(temp_dir_path, 'target.fa')
dict_Tf = self.dict_Tf
dict_Uf = self.dict_Uf
with open(target_fasta_path, 'w') as target_fasta:
for seq_Nf in self.dict_Nf.values():
string_Nf = seq_Nf.string
# The longest Tf (the first in the list) is by design the only one of the Tf forming
# Nf that may have extra 5' bases.
longest_Tf = dict_Tf[seq_Nf.names_T[0]]
if longest_Tf.uniq_with_xtra_5prime_count > 0:
set_5prime_string = set()
for name_Uf in longest_Tf.names_U:
seq_Uf = dict_Uf[name_Uf]
if seq_Uf.xtra_5prime_length > 0:
set_5prime_string.add(seq_Uf.string[: seq_Uf.xtra_5prime_length])
# Avoid creating superfluous target seqs that are subseqs of other target seqs
# due to a 5' extension of an Nf being a subseq of a longer 5' extension of the
# same Nf.
strings_5prime = sorted(set_5prime_string, key=lambda string_5prime: -len(string_5prime))
string_5prime_additions = [strings_5prime[0]]
for string_5prime in strings_5prime[1: ]:
length_5prime = len(string_5prime)
for string_5prime_addition in string_5prime_additions:
if string_5prime == string_5prime_addition[-length_5prime: ]:
break
else:
string_5prime_additions.append(string_5prime)
for index_5prime, string_5prime in enumerate(strings_5prime):
# Use an index to distinguish otherwise equivalent targets with different 5'
# extensions of the same length.
target_fasta.write(f">{seq_Nf.name}-{len(string_5prime)}-{index_5prime}\n{string_5prime}{string_Nf}\n")
else:
target_fasta.write(f">{seq_Nf.name}-0-0\n{string_Nf}\n") # no extra 5' bases
progress.end()
# Use a 10x bigger query chunk size than the Vmatch default, as the rather conservative
# default is tailored to searches with mismatches/indels. It takes longer to process each
# chunk in these searches, and these searches may generate more alignments per chunk.
query_chunk_size_default = 10 * Vmatch.QUERY_CHUNK_SIZE_DEFAULT
match_df = Vmatch(argparse.Namespace(match_mode='exact_query_substring',
fasta_db_file=target_fasta_path,
fasta_query_file=query_fasta_path,
num_threads=self.num_threads,
query_chunk_size=query_count // self.num_threads + 1 if query_count < query_chunk_size_default else query_chunk_size_default // self.num_threads,
temp_dir=temp_dir_path)).search_queries()
pid = "Filtering matches"
progress.new(pid)
progress.update("...")
self.restructure_fragment_match_table(match_df)
# Process each Un match. Each Un can match more than one Nf.
match_gb = match_df.groupby('query_name')
del match_df
gc.collect()
fragment_filter_progress_interval = 25000
total_matched_queries = len(match_gb)
pp_total_matched_queries = pp(total_matched_queries)
num_filtered_queries = -1
dict_Um = self.dict_Um
dict_Tm = self.dict_Tm
dict_Nf = self.dict_Nf
for name_Un, query_match_df in match_gb:
num_filtered_queries += 1
if num_filtered_queries % fragment_filter_progress_interval == 0:
pp_progress_interval_end = pp(total_matched_queries if num_filtered_queries + fragment_filter_progress_interval > total_matched_queries else num_filtered_queries + fragment_filter_progress_interval)
progress.update_pid(pid)
progress.update(f"Queries {pp(num_filtered_queries + 1)}-{pp_progress_interval_end}/{pp_total_matched_queries}")
# Each Un with a validated match will yield a Um and Tm.
seq_Um = None
seq_Tm = None
for name_Nf, length_target_5prime, query_start, length_Un in zip(query_match_df['target_name'],
query_match_df['length_5prime'],
query_match_df['query_start_in_target'],
query_match_df['query_length']):
query_stop = query_start + length_Un
stop_Tm_in_Nf = query_stop - length_target_5prime
if stop_Tm_in_Nf <= 0:
# Ignore queries that align entirely to extra 5' bases. Un mapping exclusively
# to the 5' extension that are long enough to fulfill the minimum length
# requirement may be mapping to an artifactual chimeric sequence.
continue
seq_Nf = dict_Nf[name_Nf]
if not seq_Um:
# Enter this block the first time the Un query validly matches an Nf.
seq_Un = dict_Un.pop(name_Un)
# Assume that 5' extensions are the same for the query regardless of the reference.
# This could be false when
# 1. tRNA profiling erroneously identified the end of the acceptor stem
# or 2. the query mapped to different places at the end of the acceptor stem in different tRNAs.
if length_target_5prime - query_start > 0:
length_query_5prime = length_target_5prime - query_start
start_Tm_in_Nf = 0
else:
length_query_5prime = 0
start_Tm_in_Nf = query_start - length_target_5prime
seq_Um = UniqueMappedSequence(seq_Un.string, name_Un, seq_Un.read_count, xtra_5prime_length=length_query_5prime)
dict_Um[name_Un] = seq_Um
seq_Tm = TrimmedMappedSequence(seq_Um)
seq_Nf.names_T.append(name_Un)
seq_Nf.categories_T.append('Tm')
seq_Tm.names_N.append(seq_Nf.name)
seq_Nf.starts_T_in_N.append(start_Tm_in_Nf)
seq_Nf.stops_T_in_N.append(stop_Tm_in_Nf)
dict_Tm[name_Un] = seq_Tm
else:
for prev_name_T, prev_category_T in zip(seq_Nf.names_T[::-1], seq_Nf.categories_T[::-1]):
# Ensure that Tm maps to Nf only once. Multiple targets can be created from
# the same Nf for different 5' extensions. Tm are added after Tf and Tc to
# the list of T in Nf.
if seq_Tm.name == prev_name_T:
break
if prev_category_T != 'Tm':
seq_Nf.names_T.append(seq_Tm.name)
seq_Nf.categories_T.append('Tm')
seq_Tm.names_N.append(seq_Nf.name)
if length_target_5prime - query_start > 0:
start_Tm_in_Nf = 0
else:
start_Tm_in_Nf = query_start - length_target_5prime
seq_Nf.starts_T_in_N.append(start_Tm_in_Nf)
seq_Nf.stops_T_in_N.append(stop_Tm_in_Nf)
break
progress.end()
with open(self.analysis_summary_path, 'a') as f:
f.write(self.get_summary_line("Time elapsed mapping tRNA fragments (min)", time.time() - start_time, is_time_value=True))
def restructure_fragment_match_table(self, match_df):
"""Helper method for `map_fragments`."""
names_U = []
for query_name in match_df['query_name']:
# Sequence names in anvi'o cannot contain a hyphen.
name_U, length_U = query_name.split('-')
names_U.append(name_U)
match_df.loc[:, 'query_name'] = names_U
names_N = []
lengths_5prime = []
for target_name in match_df['target_name']:
name_N, length_5prime, index_5prime = target_name.split('-')
names_N.append(name_N)
lengths_5prime.append(int(length_5prime))
match_df.loc[:, 'target_name'] = names_N
match_df['length_5prime'] = lengths_5prime
def report_map_stats(self):
"""Report to terminal stats on fragment mapping immediately after these steps."""
count_spec_Nf = 0
count_nonspec_Nf = 0
count_any_Nf = 0
mean_spec_Tm_Nf = 0
mean_nonspec_Tm_Nf = 0
spec_reads_Nf = 0
nonspec_reads_Nf = 0
absent_3prime_terminus_seqs_Tm = 0
absent_3prime_terminus_reads_Tm = 0
for seq_Nf in self.dict_Nf.values():
if seq_Nf.spec_map_seq_count:
count_spec_Nf += 1
mean_spec_Tm_Nf += seq_Nf.spec_map_seq_count
spec_reads_Nf += seq_Nf.spec_map_read_count
if not PROFILE_ABSENT_3PRIME_TERMINUS:
absent_3prime_terminus_seqs_Tm += seq_Nf.absent_3prime_terminus_seq_count
absent_3prime_terminus_reads_Tm += seq_Nf.absent_3prime_terminus_read_count
if seq_Nf.nonspec_map_seq_count:
count_nonspec_Nf += 1
mean_nonspec_Tm_Nf += seq_Nf.nonspec_map_seq_count
nonspec_reads_Nf += seq_Nf.nonspec_map_read_count
if seq_Nf.spec_map_seq_count or seq_Nf.nonspec_map_seq_count:
count_any_Nf += 1
count_Nf = len(self.dict_Nf)
mean_spec_Tm_Nf /= count_Nf
mean_nonspec_Tm_Nf /= count_Nf
spec_reads_Nf /= count_Nf
nonspec_reads_Nf /= count_Nf
count_spec_Tm = 0
count_nonspec_Tm = 0
reads_spec_Tm = 0
reads_nonspec_Tm = 0
spec_short_5prime_seq_Tm = 0
nonspec_short_5prime_seq_Tm = 0
spec_long_5prime_seq_Tm = 0
nonspec_long_5prime_seq_Tm = 0
spec_short_5prime_read_Tm = 0
nonspec_short_5prime_read_Tm = 0
spec_long_5prime_read_Tm = 0
nonspec_long_5prime_read_Tm = 0
spec_short_5prime_Nf_names = []
spec_long_5prime_Nf_names = []
nonspec_short_5prime_Nf_names = []
nonspec_long_5prime_Nf_names = []
for seq_Tm in self.dict_Tm.values():
if len(seq_Tm.names_N) == 1:
count_spec_Tm += 1
reads_spec_Tm += seq_Tm.read_count
if seq_Tm.read_with_xtra_5prime_count:
if seq_Tm.long_5prime_extension_dict:
spec_long_5prime_seq_Tm += 1
spec_long_5prime_read_Tm += seq_Tm.read_count
spec_long_5prime_Nf_names.append(seq_Tm.names_N[0])
else:
spec_short_5prime_seq_Tm += 1
spec_short_5prime_read_Tm += seq_Tm.read_count
spec_short_5prime_Nf_names.append(seq_Tm.names_N[0])
else:
count_nonspec_Tm += 1
reads_nonspec_Tm += seq_Tm.read_count
if seq_Tm.read_with_xtra_5prime_count:
if seq_Tm.long_5prime_extension_dict:
nonspec_long_5prime_seq_Tm += 1
nonspec_long_5prime_read_Tm += seq_Tm.read_count
nonspec_long_5prime_Nf_names.extend(seq_Tm.names_N)
else:
nonspec_short_5prime_seq_Tm += 1
nonspec_short_5prime_read_Tm += seq_Tm.read_count
nonspec_short_5prime_Nf_names.extend(seq_Tm.names_N)
spec_short_5prime_Nf = len(set(spec_short_5prime_Nf_names))
spec_long_5prime_Nf = len(set(spec_long_5prime_Nf_names))
nonspec_short_5prime_Nf = len(set(nonspec_short_5prime_Nf_names))
nonspec_long_5prime_Nf = len(set(nonspec_long_5prime_Nf_names))
run = self.run
run.info_single("Results of fragment mapping (subject to change -- see summary output file for final results)", nl_before=2, nl_after=1)
run.info_single("Normalized seqs with tRNA profile")
run.info("With specific mapping", count_spec_Nf)
run.info("With nonspecific mapping", count_nonspec_Nf)
run.info("With any mapping", count_any_Nf)
run.info("Mean specific mapped seqs per seq", round(mean_spec_Tm_Nf, 2))
run.info("Mean nonspecific mapped seqs per seq", round(mean_nonspec_Tm_Nf, 2))
run.info("Mean specific mapped reads per seq", round(spec_reads_Nf, 2))
run.info("Mean nonspecific mapped reads per seq", round(nonspec_reads_Nf, 2))
run.info(f"With specific mapping to 1-{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", spec_short_5prime_Nf)
run.info(f"With specific mapping to ≥{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", spec_long_5prime_Nf)
run.info(f"With nonspecific mapping to 1-{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", nonspec_short_5prime_Nf)
run.info(f"With nonspecific mapping to ≥{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", nonspec_long_5prime_Nf, nl_after=1)
run.info_single("Mapped seq counts")
run.info("Specific seqs", count_spec_Tm)
run.info("Nonspecific seqs", count_nonspec_Tm)
run.info("Specific reads", reads_spec_Tm)
run.info("Nonspecific reads", reads_nonspec_Tm)
run.info(f"Specific seqs with 1-{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", spec_short_5prime_seq_Tm)
run.info(f"Specific seqs with ≥{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", spec_long_5prime_seq_Tm)
run.info(f"Specific reads with 1-{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", spec_short_5prime_read_Tm)
run.info(f"Specific reads with ≥{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", spec_long_5prime_read_Tm)
run.info(f"Nonspecific seqs with 1-{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", nonspec_short_5prime_seq_Tm)
run.info(f"Nonspecific seqs with ≥{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", nonspec_long_5prime_seq_Tm)
run.info(f"Nonspecific reads with 1-{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", nonspec_short_5prime_read_Tm)
run.info(f"Nonspecific reads with ≥{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", nonspec_long_5prime_read_Tm)
if not PROFILE_ABSENT_3PRIME_TERMINUS:
run.info("Seqs only missing a 3' terminus", absent_3prime_terminus_seqs_Tm)
run.info("Reads only missing a 3' terminus", absent_3prime_terminus_reads_Tm)
run.info_single("Consider including an absent 3' terminus (by using '_') "
"in the `anvi-trnaseq` parameterization of allowed 3' termini "
"if the number of mapped seqs identical to a normalized seq but missing a 3' terminus seems high.",
mc='red')
def report_N_cov_stats(self):
"""Report to terminal stats on N coverages immediately after N initialization."""
spec_read_Nf = 0
nonspec_read_Nf = 0
mean_spec_cov_Nf = 0
mean_nonspec_cov_Nf = 0
total_length_Nf = 0
max_spec_cov_Nf = 0
max_nonspec_cov_Nf = 0
max_total_cov_Nf = 0
for seq_Nf in self.dict_Nf.values():
spec_read_Nf += seq_Nf.spec_read_count
nonspec_read_Nf += seq_Nf.nonspec_read_count
length_Nf = len(seq_Nf.string)
mean_spec_cov = seq_Nf.mean_spec_cov
mean_nonspec_cov = seq_Nf.mean_nonspec_cov
mean_spec_cov_Nf += mean_spec_cov * length_Nf
mean_nonspec_cov_Nf += mean_nonspec_cov * length_Nf
total_length_Nf += length_Nf
if mean_spec_cov > max_spec_cov_Nf:
max_spec_cov_Nf = mean_spec_cov
if mean_nonspec_cov > max_nonspec_cov_Nf:
max_nonspec_cov_Nf = mean_nonspec_cov
if mean_spec_cov + mean_nonspec_cov > max_total_cov_Nf:
max_total_cov_Nf = mean_spec_cov + mean_nonspec_cov
mean_spec_cov_Nf /= total_length_Nf
mean_nonspec_cov_Nf /= total_length_Nf
spec_read_Nc = 0
nonspec_read_Nc = 0
mean_spec_cov_Nc = 0
mean_nonspec_cov_Nc = 0
total_length_Nc = 0
max_spec_cov_Nc = 0
max_nonspec_cov_Nc = 0
max_total_cov_Nc = 0
for seq_Nc in self.dict_Nc.values():
spec_read_Nc += seq_Nc.spec_read_count
nonspec_read_Nc += seq_Nc.nonspec_read_count
length_Nc = len(seq_Nc.string)
mean_spec_cov = seq_Nc.mean_spec_cov
mean_nonspec_cov = seq_Nc.mean_nonspec_cov
mean_spec_cov_Nc += mean_spec_cov * length_Nc
mean_nonspec_cov_Nc += mean_nonspec_cov * length_Nc
total_length_Nc += length_Nc
if mean_spec_cov > max_spec_cov_Nc:
max_spec_cov_Nc = mean_spec_cov
if mean_nonspec_cov > max_nonspec_cov_Nc:
max_nonspec_cov_Nc = mean_nonspec_cov
if mean_spec_cov + mean_nonspec_cov > max_total_cov_Nc:
max_total_cov_Nc = mean_spec_cov + mean_nonspec_cov
mean_spec_cov_Nc /= total_length_Nc
mean_nonspec_cov_Nc /= total_length_Nc
run = self.run
run.info_single("Results of normalization (subject to change -- see summary output file for final results)", nl_before=2, nl_after=1)
run.info_single("Normalized seqs with tRNA profile")
run.info("Specific reads", spec_read_Nf)
run.info("Nonspecific reads", nonspec_read_Nf)
run.info("Mean specific coverage", round(mean_spec_cov_Nf, 2))
run.info("Mean nonspecific coverage", round(mean_nonspec_cov_Nf, 2))
run.info("Max specific coverage", round(max_spec_cov_Nf, 2))
run.info("Max nonspecific coverage", round(max_nonspec_cov_Nf, 2))
run.info("Max total coverage", round(max_total_cov_Nf, 2), nl_after=1)
run.info_single("Normalized seqs with truncated tRNA profile")
run.info("Specific reads", spec_read_Nc)
run.info("Nonspecific reads", nonspec_read_Nc)
run.info("Mean specific coverage", round(mean_spec_cov_Nc, 2))
run.info("Mean nonspecific coverage", round(mean_nonspec_cov_Nc, 2))
run.info("Max specific coverage", round(max_spec_cov_Nc, 2))
run.info("Max nonspecific coverage", round(max_nonspec_cov_Nc, 2))
run.info("Max total coverage", round(max_total_cov_Nc, 2), nl_after=2)
def find_substitutions(self):
"""Find sites of potential modification-induced substitutions."""
start_time = time.time()
progress = self.progress
pid = "Finding modification-induced substitutions"
progress.new(pid)
progress.update("...")
# Cluster Nf. Clusters agglomerate Nf that differ from at least one other Nf in the cluster
# by no more than 3 nts in 100 (by default) in a gapless end-to-end alignment with no
# clipping.
dict_Nf = self.dict_Nf
names_Nf = []
strings_Nf = []
dict_Nf_feature_completeness = {}
for name_Nf, seq_Nf in dict_Nf.items():
names_Nf.append(name_Nf)
strings_Nf.append(seq_Nf.string)
dict_Nf_feature_completeness[name_Nf] = seq_Nf.has_complete_feature_set
progress.end()
agglomerator = Agglomerator(names_Nf, strings_Nf, num_threads=self.num_threads)
# Provide a priority function for seeding clusters that favors, in order:
# 1. Nf with a complete set of tRNA features,
# 2. longer Nf,
# 3. Nf with more alignments in the all-against-all search,
# 4. alphanumeric order of the Nf name.
agglomerator.agglomerate(max_mismatch_freq=self.agglom_max_mismatch_freq,
priority_function=lambda aligned_ref: (-dict_Nf_feature_completeness[aligned_ref.name],
-len(aligned_ref.seq_string),
-len(aligned_ref.alignments),
aligned_ref.name))
agglom_aligned_ref_dict = agglomerator.agglom_aligned_ref_dict
pid = "Decomposing clusters"
progress.new(pid)
progress.update("...")
excluded_Nf_names = [] # Used to exclude Nf from being considered as aligned queries in clusters (see below)
names_Nf = [] # Used to prevent the same M from being created twice
dict_M = self.dict_M
num_processed_refs = -1
total_ref_count = len(agglom_aligned_ref_dict)
decomposition_progress_interval = 1000
pp_total_ref_count = pp(total_ref_count)
for ref_name, aligned_ref in agglom_aligned_ref_dict.items():
num_processed_refs += 1
if num_processed_refs % decomposition_progress_interval == 0:
pp_progress_interval_end = pp(total_ref_count if num_processed_refs + decomposition_progress_interval > total_ref_count else num_processed_refs + decomposition_progress_interval)
progress.update_pid(pid)
progress.update(f"{pp(num_processed_refs + 1)}-{pp_progress_interval_end}/{pp_total_ref_count}")
# A mod requires at least 3 different nts to be detected, and each Nf differs by at
# least 1 nt (mismatch or gap), so for a cluster to form an M, it must contain at least
# 3 Nf.
if len(aligned_ref.alignments) < 2:
continue
aligned_ref_length = len(aligned_ref.seq_string)
valid_aligned_queries = []
for alignment in aligned_ref.alignments:
# Nf should only align at the 3' end. Alignments to the interior of Nf can
# theoretically occur when the reference is a tRNA-tRNA chimera.
if aligned_ref_length != alignment.target_start + alignment.alignment_length:
continue
query_name = alignment.aligned_query.name
# The Nf query may have formed a M already. If the Nf had a complete feature
# profile, or if it was the same length as such a sequence, then it should not be
# able to form a longer M that would have 5' nts beyond the end of a complete
# feature profile.
if query_name in excluded_Nf_names:
continue
valid_aligned_queries.append(dict_Nf[query_name])
# Confirm that 2 or more queries passed the filters, so at least 3 Nf are still in the
# cluster.
if len(valid_aligned_queries) < 2:
continue
valid_aligned_queries.sort(key=lambda seq_Nf: (-len(seq_Nf.string), -seq_Nf.has_complete_feature_set, seq_Nf.name))
seq_array = np.zeros((len(valid_aligned_queries) + 1, aligned_ref_length), dtype=int)
# Rather than using the ASCII representation of each character, which saves some time in
# converting the sequence string to a numpy array, constrain the integer representation
# to the smallest possible range of integers to speed up the bincount method used to
# determine the number of unique nts at an alignment position.
seq_array[0, :] += [NT_INT_DICT[nt] for nt in aligned_ref.seq_string]
for query_index, aligned_query in enumerate(valid_aligned_queries, start=1):
seq_array[query_index, aligned_ref_length - len(aligned_query.string): ] += [NT_INT_DICT[nt] for nt in aligned_query.string]
seqs_Nf = np.array([dict_Nf[ref_name]] + valid_aligned_queries)
# Find positions in the alignment with nt variability.
alignment_pos_uniq_nt_counts = (
np.bincount(
(seq_array + np.arange(aligned_ref_length, dtype=int) * NUM_NT_BINS).ravel(),
minlength=aligned_ref_length * NUM_NT_BINS
).reshape(-1, NUM_NT_BINS)[:, 1:] != 0
).sum(axis=1)
alignment_positions_3_4_nts = (alignment_pos_uniq_nt_counts > 2).nonzero()[0]
# Modification sites must have ≥ 3 nts.
if not alignment_positions_3_4_nts.size:
continue
alignment_positions_2_nts = (alignment_pos_uniq_nt_counts == 2).nonzero()[0]
clusters = deque(((seq_array, seqs_Nf, alignment_positions_3_4_nts), ))
for alignment_pos in alignment_positions_2_nts:
next_clusters = deque() # Make a new object with each iteration rather than clearing the same one
while clusters:
seq_array, seqs_Nf, alignment_positions_3_4_nts = clusters.pop()
# A modification requires ≥ 3 different nts to be detected, and each Nf differs
# by ≥ 1 nt, so for a cluster to form an M it must contain ≥ 3 Nf.
if seqs_Nf.size < 3:
continue
aligned_nts = seq_array[:, alignment_pos]
nt_counts = np.bincount(aligned_nts, minlength=NUM_NT_BINS)[1: ]
if (nt_counts != 0).sum() < 2:
# There are now < 2 nts at the alignment position in the cluster under
# consideration. 2 different nts are needed to distinguish SNVs.
next_clusters.appendleft((seq_array, seqs_Nf, alignment_positions_3_4_nts))
continue
# Add a new cluster for each SNV to the stack of clusters to process if: 1. the
# new cluster contains ≥ 3 Nf and 2. the longest Nf (with a complete feature
# profile, if applicable) in the new cluster has not yet formed an M.
# Agglomerative clustering ensures that the Nf agglomerated with the longest Nf
# will be the same regardless of the original unsplit cluster.
represented_nts = nt_counts.nonzero()[0] + 1
for nt in represented_nts:
split_cluster_seq_indices = (aligned_nts == nt).nonzero()[0]
if split_cluster_seq_indices.size > 2:
split_cluster_Nf_seqs = seqs_Nf[split_cluster_seq_indices]
if split_cluster_Nf_seqs[0].name in names_Nf:
continue
next_clusters.appendleft((seq_array[split_cluster_seq_indices, :],
split_cluster_Nf_seqs,
alignment_positions_3_4_nts.copy()))
if next_clusters:
clusters = next_clusters
else:
break
if not clusters:
continue
# Check alignment positions previously found to have 3-4 nts. Further split clusters
# when positions now have 2 nts.
next_clusters = deque()
while clusters:
seq_array, seqs_Nf, alignment_positions_3_4_nts = clusters.pop()
candidates_to_remove = []
for i, alignment_pos in enumerate(alignment_positions_3_4_nts):
aligned_nts = seq_array[:, alignment_pos]
nt_counts = np.bincount(aligned_nts, minlength=NUM_NT_BINS)[1: ]
# At least 3 different nts are needed at a position to predict a mod.
represented_nts = nt_counts.nonzero()[0] + 1
if represented_nts.size < 2:
candidates_to_remove.append(i)
elif represented_nts.size == 2:
candidates_to_remove.append(i)
for nt in represented_nts:
split_cluster_seq_indices = (aligned_nts == nt).nonzero()[0]
# At least 3 Nf are needed, and the split cluster cannot have already
# formed an M.
if split_cluster_seq_indices.size > 2:
split_cluster_Nf_seqs = seqs_Nf[split_cluster_seq_indices]
if split_cluster_Nf_seqs[0].name in names_Nf:
continue
clusters.appendleft((seq_array[split_cluster_seq_indices, :],
split_cluster_Nf_seqs,
np.delete(alignment_positions_3_4_nts, candidates_to_remove)))
# Reevaluate previous alignment positions in the split clusters.
break
else:
# At least 1 position was discounted as no longer having 3-4 different nts, but
# these positions had < 2 nts, and so did not cause the cluster to be split into
# new clusters. Therefore, do not cycle through the remaining positions again to
# find any more with < 3 nts.
if candidates_to_remove:
next_clusters.appendleft((seqs_Nf, np.delete(alignment_positions_3_4_nts, candidates_to_remove)))
else:
next_clusters.appendleft((seqs_Nf, alignment_positions_3_4_nts))
if not next_clusters:
continue
clusters = next_clusters
while clusters:
seqs_Nf, mod_positions = clusters.pop() # Nf should have retained their order
seqs_Nf = list(seqs_Nf) # Turn the array into a list
represent_Nf_seq = seqs_Nf[0]
length_represent_Nf = len(represent_Nf_seq.string)
represent_Nf_start_in_array = aligned_ref_length - length_represent_Nf
mod_positions -= represent_Nf_start_in_array
seq_M = ModifiedSequence(seqs_Nf, tuple(mod_positions))
if represent_Nf_seq.has_complete_feature_set:
for seq_Nf in seqs_Nf:
if len(seq_Nf.string) < length_represent_Nf:
break
excluded_Nf_names.append(seq_Nf.name)
dict_M[seq_M.name] = seq_M
with open(self.analysis_summary_path, 'a') as f:
f.write(self.get_summary_line("Time elapsed finding modification-induced substitutions (min)", time.time() - start_time, is_time_value=True))
progress.end()
def report_sub_stats(self):
"""Report to terminal stats on potential modification-induced substitutions."""
count_M = len(self.dict_M)
dict_Nf = self.dict_Nf
total_sub_count = 0
total_length_M = 0
for seq_M in self.dict_M.values():
length_M = len(dict_Nf[seq_M.name].string)
total_sub_count += len(seq_M.sub_positions)
total_length_M += length_M
mean_sub_per_seq = total_sub_count / count_M
mean_sub_per_nt = total_sub_count / total_length_M
run = self.run
run.info_single("Results of substitution search", nl_before=2)
run.info("Modified seqs", count_M)
run.info("Mean (*potential*) subs per modified seq", round(mean_sub_per_seq, 1))
run.info("Mean subs per nt in modified seq", round(mean_sub_per_nt, 3), nl_after=2)
def find_indels(self):
"""Find mod-induced indels among Nq normalized seqs not known to be modified, notated *Nq*.
These seqs form *Ni* objects, which are incorporated into corresponding mod seq objects,
notated *M*.
Nq are aligned to N with potential mod-induced subs comprising M, notated *Nb*. Vmatch
alignments are conducted with Nq as queries and Nb as targets and vice versa, with Nb as
queries and Nq as targets. Query seqs must be found fully in the target seq. M may be
shorter than Nq due to unknown, untrimmed 5' and 3' nts in Nq.
Two pools of Nq are searched for indels:
1. normalized seqs with a full feature profile and not assigned to M, notated *Nqf*, and
2. normalized "non-tRNA" seqs with truncated tRNA profiles, notated *Nc*.
Why these two pools?
1. Why are Nqf considered at all, when they have been successfully profiled, and therefore,
presumably, do not have indels interrupting the profile? Indels can be erroneously
accommodated by flexibility in feature lengths. For example, an indel associated with a
mod in the D loop can cause the variable-length α or β sections of the D loop to be
assigned one fewer or one more nt than is correct in order to optimize the profile.
2. Why not all "non-tRNAs," why just those with a truncated feature profile (Nc)? Indels can
cause truncation of the profile. "Non-tRNAs" without even a truncated profile (seqs that
were not profiled past the min length threshold of the T arm) also have fewer
opportunities for mod-induced mutations.
Nq seqs rather than constituent T or U are searched for the sake of speed and simplicity.
Ideally, Ni would be further processed, finding which of their constituent T and U actually
contain the indels. However, *nonspecific* T and U in Nq are, by definition, in other Nq,
theoretically permitting the ambiguity that a T would be marked as having an indel in one
but not another Nq. This would not necessarily be an error, as identical underlying reads
could theoretically originate from different cDNA seqs, with some containing an indel, and
others, representing a different tRNA, not containing it."""
pid = "Finding seqs with mod-induced indels"
progress = self.progress
run = self.run
progress.new(pid)
# Write FASTA files of queries and targets to a temp dir used in running Vmatch. Do not
# allow the Vmatch driver to automatically remove the dir, as the FASTA file of Nq is used
# in multiple searches. The file of parsed output generated by the Vmatch driver must be
# removed between searches, as it would otherwise be appended by the next search.
temp_dir_path = filesnpaths.get_temp_directory_path()
parsed_output_path = os.path.join(temp_dir_path, 'parsed_output.tsv')
fasta_path_Nb = os.path.join(temp_dir_path, 'Nb.fa')
fasta_path_Nqf = os.path.join(temp_dir_path, 'Nqf.fa')
# Write a FASTA file of Nb.
progress.update("Writing FASTA of norm tRNA seqs with mod-induced subs")
count_Nb, max_length_M = self.write_fasta_Nb(fasta_path_Nb)
# Write a FASTA file of Nqf.
progress.update("Writing FASTA of norm tRNA seqs without mod-induced subs")
count_Nqf, max_length_Nqf = self.write_fasta_Nqf(fasta_path_Nqf)
# Search Nqf against Nb.
match_df = Vmatch(argparse.Namespace(match_mode='query_substring_with_indels',
fasta_db_file=fasta_path_Nb,
fasta_query_file=fasta_path_Nqf,
num_threads=self.num_threads,
query_chunk_size=count_Nqf // self.num_threads + 1 if count_Nqf < Vmatch.QUERY_CHUNK_SIZE_DEFAULT else 0,
max_edit_dist=math.ceil(max_length_Nqf * self.max_indel_freq),
min_ident=int(100 - 100 * self.max_indel_freq),
align_output_length=10, # This value is chosen to speed up alignment parsing in Vmatch output.
temp_dir=temp_dir_path,
keep_temp_dir=True,
edit_left_buffer=self.left_indel_buffer,
edit_right_buffer=self.right_indel_buffer)).search_queries()
self.organize_vmatch_driver_output(match_df, False)
os.remove(parsed_output_path)
results_dict = {}
# The following method updates `results_dict`.
count_Nqf_with_indels = self.process_Nq_with_indels(match_df, self.dict_Nf, results_dict, False)
progress.end()
run.info_single("Completed indel search stage 1/4: norm tRNA seqs within mod tRNA seqs", nl_before=2)
# Search Nb against Nqf.
progress.new(pid)
if count_Nqf_with_indels:
# Indels were found in some Nqf, so rewrite the FASTA file of Nqf to exclude these.
progress.update("Writing FASTA of norm tRNA seqs without known mod-induced mutations")
count_Nqf, max_length_Nqf = self.write_fasta_Nqf(fasta_path_Nqf)
match_df = Vmatch(argparse.Namespace(match_mode='query_substring_with_indels',
fasta_db_file=fasta_path_Nqf,
fasta_query_file=fasta_path_Nb,
num_threads=self.num_threads,
query_chunk_size=count_Nb // self.num_threads + 1 if count_Nb < Vmatch.QUERY_CHUNK_SIZE_DEFAULT else 0,
max_edit_dist=math.ceil(max_length_M * self.max_indel_freq),
min_ident=int(100 - 100 * self.max_indel_freq),
align_output_length=10,
temp_dir=temp_dir_path,
keep_temp_dir=True,
edit_left_buffer=self.left_indel_buffer,
edit_right_buffer=self.right_indel_buffer)).search_queries()
self.organize_vmatch_driver_output(match_df, True)
os.remove(parsed_output_path)
count_Nqf_with_indels += self.process_Nq_with_indels(match_df, self.dict_Nf, results_dict, True)
progress.end()
run.info_single("Completed indel search stage 2/4: mod tRNA seqs within norm tRNA seqs")
# Write a FASTA file of Nc.
progress.new(pid)
progress.update("Writing FASTA of norm trunc seqs")
fasta_path_Nc = os.path.join(temp_dir_path, 'Nc.fa')
count_Nc, max_length_Nc = self.write_fasta_Nc(fasta_path_Nc)
# Search Nc against Nb.
match_df = Vmatch(argparse.Namespace(match_mode='query_substring_with_indels',
fasta_db_file=fasta_path_Nb,
fasta_query_file=fasta_path_Nc,
num_threads=self.num_threads,
query_chunk_size=count_Nc // self.num_threads + 1 if count_Nc < Vmatch.QUERY_CHUNK_SIZE_DEFAULT else 0,
max_edit_dist=math.ceil(max_length_Nc * self.max_indel_freq),
min_ident=int(100 - 100 * self.max_indel_freq),
align_output_length=10,
temp_dir=temp_dir_path,
keep_temp_dir=True,
edit_left_buffer=self.left_indel_buffer,
edit_right_buffer=self.right_indel_buffer)).search_queries()
self.organize_vmatch_driver_output(match_df, False)
os.remove(parsed_output_path)
count_Nc_with_indels = self.process_Nq_with_indels(match_df, self.dict_Nc, results_dict, False)
progress.end()
run.info_single("Completed indel search stage 3/4: trunc tRNA seqs within mod tRNA seqs")
# Search Nb against Nc.
progress.new(pid)
if count_Nc_with_indels:
# Indels were found in some Nc, so rewrite the FASTA file of Nc to exclude these.
progress.update("Writing FASTA of norm trunc seqs without known mod-induced mutations")
count_Nc, max_length_Nc = self.write_fasta_Nc(fasta_path_Nc)
match_df = Vmatch(argparse.Namespace(match_mode='query_substring_with_indels',
fasta_db_file=fasta_path_Nc,
fasta_query_file=fasta_path_Nb,
num_threads=self.num_threads,
query_chunk_size=count_Nb // self.num_threads + 1 if count_Nb < Vmatch.QUERY_CHUNK_SIZE_DEFAULT else 0,
max_edit_dist=math.ceil(max_length_M * self.max_indel_freq),
min_ident=int(100 - 100 * self.max_indel_freq),
align_output_length=10,
temp_dir=temp_dir_path,
keep_temp_dir=True,
edit_left_buffer=self.left_indel_buffer,
edit_right_buffer=self.right_indel_buffer)).search_queries()
self.organize_vmatch_driver_output(match_df, True)
os.remove(parsed_output_path)
count_Nc_with_indels += self.process_Nq_with_indels(match_df, self.dict_Nc, results_dict, True)
progress.end()
run.info_single("Completed indel search stage 4/4: mod tRNA seqs within trunc tRNA seqs")
# Consolidate Nq differing by 5' and 3' extensions into a new Ni object.
progress.new(pid)
progress.update("Finalizing norm seqs with indels")
self.add_Ni_to_M(results_dict)
progress.end()
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
set_meta_value = trnaseq_db.db.set_meta_value
set_meta_value('count_Nqf_with_indels', count_Nqf_with_indels)
set_meta_value('count_Nc_with_indels', count_Nc_with_indels)
trnaseq_db.disconnect()
def write_fasta_Nb(self, fasta_path):
"""This helper method for `find_indels` writes a FASTA file of Nb constituting M."""
count_Nb = 0
max_length_M = 0
dict_Nf = self.dict_Nf
with open(fasta_path, 'w') as fasta:
for name_M, seq_M in self.dict_M.items():
length_M = seq_M.length
for num_Nb, name_Nb in enumerate(seq_M.names_Nb):
string_Nb = dict_Nf[name_Nb].string
length_Nb = len(string_Nb)
fasta.write(f">{name_M}_{num_Nb}_{length_M - length_Nb}\n{string_Nb}\n")
count_Nb += 1
if length_Nb > max_length_M:
max_length_M = length_Nb
return count_Nb, max_length_M
def write_fasta_Nqf(self, fasta_path):
"""This helper method for `find_indels` writes a FASTA file of Nqf."""
count_Nqf = 0
max_length_Nqf = 0
with open(fasta_path, 'w') as fasta:
for name_Nf, seq_Nf in self.dict_Nf.items():
if not seq_Nf.names_M:
string_Nf = seq_Nf.string
length_Nf = len(string_Nf)
fasta.write(f">{name_Nf}_{length_Nf}\n{string_Nf}\n")
count_Nqf += 1
if length_Nf > max_length_Nqf:
max_length_Nqf = length_Nf
return count_Nqf, max_length_Nqf
def write_fasta_Nc(self, fasta_path):
"""This helper method for `find_indels` writes a FASTA file of Nc."""
max_length_Nc = 0
with open(fasta_path, 'w') as fasta:
for name_Nc, seq_Nc in self.dict_Nc.items():
string_Nc = seq_Nc.string
length_Nc = len(string_Nc)
fasta.write(f">{name_Nc}_{length_Nc}\n{string_Nc}\n")
if length_Nc > max_length_Nc:
max_length_Nc = length_Nc
return len(self.dict_Nc), max_length_Nc
def organize_vmatch_driver_output(self, match_df, queries_are_Nb):
"""This helper method for `find_indels` organizes a table of alignment data for further
analysis."""
if queries_are_Nb:
col_name_Nb = 'query_name'
col_name_N = 'target_name'
else:
col_name_Nb = 'target_name'
col_name_N = 'query_name'
names_M = []
starts_Nb_in_M = []
for defline in match_df[col_name_Nb]:
split_name = defline.split('_')
names_M.append('_'.join(split_name[: -2]))
starts_Nb_in_M.append(int(split_name[-1]))
match_df['M_name'] = names_M
match_df['Nb_start_in_M'] = starts_Nb_in_M
match_df.drop(col_name_Nb, axis=1, inplace=True)
names_N = []
lengths_N = []
for defline in match_df[col_name_N]:
split_name = defline.split('_')
names_N.append('_'.join(split_name[: -1]))
lengths_N.append(int(defline.split('_')[-1]))
match_df['Nq_name'] = names_N
match_df['Nq_length'] = lengths_N
match_df.drop(col_name_N, axis=1, inplace=True)
if queries_are_Nb:
match_df.rename({'query_start_in_target': 'Nb_start_in_Nq',
'del_lengths': 'insert_lengths',
'target_align_del_starts': 'Nq_align_insert_starts',
'query_align_del_starts': 'Nb_align_insert_starts',
'insert_lengths': 'del_lengths',
'target_align_insert_starts': 'Nq_align_del_starts',
'query_align_insert_starts': 'Nb_align_del_starts'}, axis=1, inplace=True)
else:
match_df.rename({'query_start_in_target': 'Nq_start_in_Nb',
'query_align_insert_starts': 'Nq_align_insert_starts',
'target_align_insert_starts': 'Nb_align_insert_starts',
'query_align_del_starts': 'Nq_align_del_starts',
'target_align_del_starts': 'Nb_align_del_starts'}, axis=1, inplace=True)
def process_Nq_with_indels(self, match_df, dict_N, results_dict, queries_are_Nb):
"""This helper method for `find_indels` finds indels in Nq from parsed Vmatch output. This
method is generalized to handle Nq (Nqf or Nc) as queries and Nb as targets (Nq length ≤ Nb
length in alignments) and vice versa (Nb as queries, Nq as targets, Nb length ≤ Nq length in
alignments).
A mod-induced insertion in Nq is a gap in Nb (or M), whereas a del in Nq corresponds to nts
in Nb (or M). The position of an insertion in Nq or a del in M is marked as the first nt of
the insertion or del. The position of an insertion in M or a del in Nq is marked by the
position of the adjacent 5' nt."""
count_of_Nq_with_indels = 0
dict_M = self.dict_M
max_length_3prime_terminus = self.max_length_3prime_terminus
for name_Nq, match_df_Nq in match_df.groupby('Nq_name'):
names_M = match_df_Nq['M_name']
if len(set(names_M)) > 1:
# Ignore Nq with indels that can arise from multiple M for simplicity's sake. This
# can exclude real molecules with indels. For instance, a mod occurring next to a
# SNV that distinguishes two M could generate a del that removes the SNV in both M,
# resulting in a single Nq derived from two M.
continue
# Ignore Nq that align to different places in M.
if queries_are_Nb:
# When searching Nb against Nq, the start position of Nq in M is ≤ 0.
starts_Nq_in_M = match_df_Nq['Nb_start_in_M'] - match_df_Nq['Nb_start_in_Nq']
if len(set(starts_Nq_in_M)) > 1:
continue
else:
# When searching Nq against Nb, the start position of Nq in M is ≥ 0.
starts_Nq_in_M = match_df_Nq['Nq_start_in_Nb'] + match_df_Nq['Nb_start_in_M']
if len(set(starts_Nq_in_M)) > 1:
continue
start_Nq_in_M = starts_Nq_in_M.iat[0]
insert_length_configs = []
sum_insert_lengths = []
for insert_length_config in match_df_Nq['insert_lengths']:
if insert_length_config:
parsed_insert_length_config = tuple(map(int, insert_length_config.split(',')))
insert_length_configs.append(parsed_insert_length_config)
sum_insert_lengths.append(sum(parsed_insert_length_config))
else:
insert_length_configs.append(tuple())
sum_insert_lengths.append(0)
sum_insert_lengths = np.array(sum_insert_lengths)
del_length_configs = []
sum_del_lengths = []
for del_length_config in match_df_Nq['del_lengths']:
if del_length_config:
parsed_del_length_config = tuple(map(int, del_length_config.split(',')))
del_length_configs.append(parsed_del_length_config)
sum_del_lengths.append(sum(parsed_del_length_config))
else:
del_length_configs.append(tuple())
sum_del_lengths.append(0)
sum_del_lengths = np.array(sum_del_lengths)
# If Nq length ≤ M length, Nq must align to the 3' end of M. If Nq length > M length, Nq
# must have the same number of extra 3' nts in all alignments.
stops_Nq_in_M = starts_Nq_in_M + match_df_Nq['Nq_length'] - sum_insert_lengths + sum_del_lengths
if len(set(stops_Nq_in_M)) > 1:
continue
stop_Nq_in_M = stops_Nq_in_M.iat[0]
name_M = names_M.iat[0]
try:
results_M_dict = results_dict[name_M]
seq_M = results_M_dict['M_name']['M_seq']
except KeyError:
results_M_dict = None
seq_M = dict_M[name_M]
if queries_are_Nb:
# Ensure that the 3' overhang of Nq in M does not exceed the maximum 3' terminus
# length allowed in profiling. This heuristic mainly addresses an inconsistency
# caused by chimeric Nc targets with a 5' part that is tRNA. The Nb query may align
# to the 5' part of the target, resulting in numerous extra 3' nts in Nc. Were these
# sequences allowed, this would be the only case in the workflow where the 5' part
# of a chimera is counted as tRNA.
if stop_Nq_in_M - seq_M.length > max_length_3prime_terminus:
continue
else:
if stop_Nq_in_M != seq_M.length:
continue
# Nq is now known to have indels. Their locations are to be determined.
count_of_Nq_with_indels += 1
seq_Nq = dict_N.pop(name_Nq)
if not results_M_dict:
results_dict[name_M] = results_M_dict = {'M_seq': seq_M}
results_M_dict['Nq_seqs'] = []
results_M_dict['Nq_starts_in_M'] = []
results_M_dict['Nq_stops_in_M'] = []
results_M_dict['Nq_insert_starts'] = []
results_M_dict['M_insert_starts'] = []
results_M_dict['insert_lengths'] = []
results_M_dict['Nq_del_starts'] = []
results_M_dict['M_del_starts'] = []
results_M_dict['del_lengths'] = []
results_M_dict['Nq_seqs'].append(seq_Nq)
results_M_dict['Nq_starts_in_M'].append(start_Nq_in_M)
results_M_dict['Nq_stops_in_M'].append(stop_Nq_in_M)
indel_configs = []
if queries_are_Nb:
align_starts_in_Nq = match_df_Nq['Nb_start_in_Nq']
align_starts_in_Nb = [0] * len(match_df_Nq)
else:
align_starts_in_Nq = [0] * len(match_df_Nq)
align_starts_in_Nb = match_df_Nq['Nq_start_in_Nb']
for (align_start_in_Nq,
align_start_in_Nb,
start_Nb_in_M,
align_insert_starts_Nq,
align_insert_starts_Nb,
insert_lengths,
align_del_starts_Nq,
align_del_starts_Nb,
del_lengths) in zip(align_starts_in_Nq,
align_starts_in_Nb,
match_df_Nq['Nb_start_in_M'],
match_df_Nq['Nq_align_insert_starts'],
match_df_Nq['Nb_align_insert_starts'],
insert_length_configs,
match_df_Nq['Nq_align_del_starts'],
match_df_Nq['Nb_align_del_starts'],
del_length_configs):
indel_config = []
if align_insert_starts_Nq:
indel_config.append(tuple(
[align_insert_start_Nq + align_start_in_Nq for align_insert_start_Nq
in map(int, align_insert_starts_Nq.split(','))]))
indel_config.append(tuple(
[align_insert_start_Nb + align_start_in_Nb + start_Nb_in_M for align_insert_start_Nb
in map(int, align_insert_starts_Nb.split(','))]))
indel_config.append(insert_lengths)
else:
indel_config.append(tuple())
indel_config.append(tuple())
indel_config.append(tuple())
if align_del_starts_Nq:
indel_config.append(tuple(
[align_del_start_Nq + align_start_in_Nq for align_del_start_Nq
in map(int, align_del_starts_Nq.split(','))]))
indel_config.append(tuple(
[align_del_start_Nb + align_start_in_Nb for align_del_start_Nb
in map(int, align_del_starts_Nb.split(','))]))
indel_config.append(del_lengths)
else:
indel_config.append(tuple())
indel_config.append(tuple())
indel_config.append(tuple())
indel_configs.append(tuple(indel_config))
indel_configs = list(set(tuple(indel_configs)))
if len(indel_configs) > 1:
# Determine the configuration of indels in Nq from the different possibilities
# suggested by the alignments.
indel_config = self.select_indel_config(indel_configs, seq_M.sub_positions)
else:
# Only one configuration of indels in N was found.
indel_config = indel_configs.pop()
results_M_dict['Nq_insert_starts'].append(indel_config[0])
results_M_dict['M_insert_starts'].append(indel_config[1])
results_M_dict['insert_lengths'].append(indel_config[2])
results_M_dict['Nq_del_starts'].append(indel_config[3])
results_M_dict['M_del_starts'].append(indel_config[4])
results_M_dict['del_lengths'].append(indel_config[5])
return count_of_Nq_with_indels
def select_indel_config(self, indel_configs, sub_positions):
"""This helper method for `process_Nq_with_indels` is called when multiple possible indel
configurations are found for a given N aligned with M. The configuration that best fits the
substitution configuration of M is selected. Fit is determined by the distance of indels to
the closest substitution sites and the positions of indels relative to those substitutions
-- when choosing between two indel candidates the same distance from a substitution, choose
the more 5' indel."""
sum_sub_distances = []
counts_5prime_indels = []
# Count the number of indels that occur to the 5' rather than 3' side of the closest sub.
count_5prime_indels = 0
for indel_config in indel_configs:
# Analyze insertions.
sum_sub_dist = 0
for insert_start_pos, insert_length in zip(indel_config[2], indel_config[3]):
insert_midpoint_pos = insert_start_pos + 0.5
sub_index = bisect_left(sub_positions, insert_midpoint_pos)
if sub_index == 0:
sum_sub_dist += sub_positions[sub_index] - insert_midpoint_pos
count_5prime_indels += 1
elif sub_index == len(sub_positions):
sum_sub_dist += insert_midpoint_pos - sub_positions[sub_index - 1]
else:
sub_pos_5prime = sub_positions[sub_index - 1]
sub_pos_3prime = sub_positions[sub_index]
sub_dist_5prime = insert_midpoint_pos - sub_pos_5prime
sub_dist_3prime = sub_pos_3prime - insert_midpoint_pos
if sub_dist_5prime >= sub_dist_3prime:
sum_sub_dist += sub_dist_3prime
count_5prime_indels += 1
else:
sum_sub_dist += sub_dist_5prime
# Analyze deletions.
for del_start_pos, del_length in zip(indel_config[4], indel_config[5]):
del_midpoint_pos = del_start_pos + (del_length - 1) / 2
sub_index = bisect_left(sub_positions, del_midpoint_pos)
if sub_index == 0:
sum_sub_dist += sub_positions[sub_index] - del_midpoint_pos
count_5prime_indels += 1
elif sub_index == len(sub_positions):
sum_sub_dist += del_midpoint_pos - sub_positions[sub_index - 1]
else:
sub_pos_5prime = sub_positions[sub_index - 1]
sub_pos_3prime = sub_positions[sub_index]
sub_dist_5prime = del_midpoint_pos - sub_pos_5prime
sub_dist_3prime = sub_pos_3prime - del_midpoint_pos
if sub_dist_5prime >= sub_dist_3prime:
sum_sub_dist += sub_dist_3prime
count_5prime_indels += 1
else:
sum_sub_dist += sub_dist_5prime
sum_sub_distances.append(sum_sub_dist)
counts_5prime_indels.append(count_5prime_indels)
min_sum_sub_dist = min(sum_sub_distances)
max_5prime_indel_count = -1
selected_config_index = -1
config_index = 0
for sub_sum_dist, count_5prime_indels in zip(sum_sub_distances, counts_5prime_indels):
if min_sum_sub_dist == sub_sum_dist:
if count_5prime_indels > max_5prime_indel_count:
# If multiple indel configurations somehow have both the same distance of indels
# from subs AND the same count of indels to the 5' side of the closest
# subs, then the first configuration is selected.
selected_config_index = config_index
config_index += 1
return indel_configs[selected_config_index]
def add_Ni_to_M(self, results_dict):
"""This helper method for `find_indels` generates Ni from Nq and adds them to M."""
dict_Ni_string = {}
for name_M, results_M_dict in results_dict.items():
names_Ni, starts_Ni_in_M = self.make_Ni(results_M_dict, dict_Ni_string)
seq_M = results_M_dict['M_seq']
seq_M.names_Ni = tuple(names_Ni)
seq_M.starts_Ni_in_M = tuple(starts_Ni_in_M)
def make_Ni(self, results_M_dict, dict_Ni_string):
"""Given an M with mapped Nq containing indels, make Ni from Nq.
Nq may be Nqf or Nc. Each Nqf must contain ≥ 1 Tf, each of which is formed from ≥ 1 Uf.
Each Nc must contain ≥ 1 Tc, each of which is formed from 1 Uc. Some Nq may contain Tm,
each of which can only contain 1 Um. Uf/Uc are adjusted for extra 5'/3' nts, forming new Uip
objects. Uip are pooled and trimmed, forming new Tip. 3' dereplication of Tip strings
produces clusters of Tip in each new Ni. Ni strings are *already known* from mapping Nq to
M (Nb). Ni objects can then be instantiated from their component Tip objects. Tim objects,
derived from Tm adjusted for extra 5'/3' nts, are then added to Ni. Tim membership in Ni
was known from the relation of Nq to Ni: Tm are part of Nq, and each Tm yields one Tim."""
# Indels have not been added to M, which only has subs.
seq_M = results_M_dict['M_seq']
# From the search against M, Nq may have been found to have extra 5'/3' nts. The following
# lists store information on Ni created from Nq.
strings_Ni = []
xtra_5primes_Nq = []
xtra_3primes_Nq = []
starts_Ni_in_M = []
length_M = seq_M.length
# Relate Tip strings to lists of Uip objects.
dict_Uip_in_Tip = defaultdict(list)
# Relate Ni strings to Tim objects.
dict_Uim_Tim_in_Ni = defaultdict(list)
# Relate Ti strings to their start positions in Ni.
dict_Ti_start_in_Ni = {}
dict_Ti_strings_from_T_name = defaultdict(list)
excluded_Ti_strings = []
for seq_Nq, start_Nq_in_M, stop_Nq_in_M in zip(results_M_dict['Nq_seqs'],
results_M_dict['Nq_starts_in_M'],
results_M_dict['Nq_stops_in_M']):
string_Nq = seq_Nq.string
length_Nq = len(string_Nq)
# If Nq has 5' nts overhanging M, the start position of Nq in M is negative.
xtra_5prime_Nq = -start_Nq_in_M if start_Nq_in_M < 0 else 0
# If Nq has 3' nts overhanging M, indicating an adjustment is needed in the 3'
# terminus, the stop position of Nq in M lies beyond the length of M.
xtra_3prime_Nq = stop_Nq_in_M - length_M if stop_Nq_in_M > length_M else 0
string_Ni = string_Nq[xtra_5prime_Nq: length_Nq - xtra_3prime_Nq]
strings_Ni.append(string_Ni)
xtra_5primes_Nq.append(xtra_5prime_Nq)
xtra_3primes_Nq.append(xtra_3prime_Nq)
starts_Ni_in_M.append(start_Nq_in_M + xtra_5prime_Nq)
self.process_T_in_Nq(seq_Nq,
string_Ni,
xtra_5prime_Nq,
xtra_3prime_Nq,
dict_Ti_start_in_Ni,
dict_Uip_in_Tip,
dict_Uim_Tim_in_Ni,
dict_Ti_strings_from_T_name,
excluded_Ti_strings)
# 3' dereplicate Tip to form Ni clusters and then Ni objects.
seqs_Tip = []
dict_Ti = self.dict_Ti
dict_Ui = self.dict_Ui
for string_Tip, seqs_Uip in dict_Uip_in_Tip.items():
if string_Tip in excluded_Ti_strings:
# Different Tip strings were found to originate from the same T.
pass
else:
seq_Tip = TrimmedIndelSequence(string_Tip, seqs_Uip)
seqs_Tip.append(seq_Tip)
dict_Ti[seq_Tip.name] = seq_Tip
for seq_Uip in seqs_Uip:
dict_Ui[seq_Uip.name] = seq_Uip
names_Tip = [seq_Tip.name for seq_Tip in seqs_Tip]
reverse_Tip_strings = [seq_Tip.string[::-1] for seq_Tip in seqs_Tip]
extras_Tip = [(seq_Tip, dict_Ti_start_in_Ni[seq_Tip.string]) for seq_Tip in seqs_Tip]
clusters = Dereplicator(names_Tip, reverse_Tip_strings, extras=extras_Tip).prefix_dereplicate()
names_Ni = []
dict_Ni = self.dict_Ni
for cluster in clusters:
seqs_Tip = []
starts_Tip_in_Ni = []
for extra in cluster.member_extras:
seqs_Tip.append(extra[0])
starts_Tip_in_Ni.append(extra[1])
string_Ni = seqs_Tip[0].string
index_Ni = strings_Ni.index(string_Ni)
xtra_5prime_Nq = xtra_5primes_Nq[index_Ni]
# Find the positions of insertions in Ni adjusting for extra 5' nts.
insert_starts_Ni = tuple([insert_start_Nq - xtra_5prime_Nq for insert_start_Nq in results_M_dict['Nq_insert_starts'][index_Ni]])
# Find the positions of deletions in Ni adjusting for extra 5' nts.
del_starts_Ni = tuple([del_start_Nq - xtra_5prime_Nq for del_start_Nq in results_M_dict['Nq_del_starts'][index_Ni]])
contains_anticodon = self.check_normalized_indel_sequence_for_anticodon(seq_M, starts_Ni_in_M[index_Ni])
seq_Ni = NormalizedIndelSequence(string_Ni,
seqs_Tip,
starts_Tip_in_Ni,
seq_M.name,
insert_starts_Ni,
tuple(results_M_dict['M_insert_starts'][index_Ni]),
tuple(results_M_dict['insert_lengths'][index_Ni]),
del_starts_Ni,
tuple(results_M_dict['M_del_starts'][index_Ni]),
tuple(results_M_dict['del_lengths'][index_Ni]),
contains_anticodon)
# Add Tim to Ni.
seqs_Tim = []
try:
for seq_Uim, seq_Tim, start_Tim_in_Ni in dict_Uim_Tim_in_Ni[string_Ni]:
if seq_Tim.string not in excluded_Ti_strings:
name_Tim = seq_Tim.name
seq_Ni.names_T.append(name_Tim)
seq_Ni.starts_T_in_N.append(start_Tim_in_Ni)
dict_Ui[name_Tim] = seq_Uim
dict_Ti[name_Tim] = seq_Tim
except KeyError:
# Ni does not contain any Tim.
pass
seq_Ni.init(seqs_Tip + seqs_Tim, self.dict_Ui)
dict_Ni[seq_Ni.name] = seq_Ni
names_Ni.append(seq_Ni.name)
return names_Ni, starts_Ni_in_M
def process_T_in_Nq(self,
seq_Nq,
string_Ni,
xtra_5prime_Nq,
xtra_3prime_Nq,
dict_Ti_start_in_Ni,
dict_Uip_in_Tip,
dict_Uim_Tim_in_Ni,
dict_Ti_strings_from_T_name,
excluded_Ti_strings):
"""Process T in an Nq mapped to M with indels. Generate Ui and Tim objects. Gather
information to generate Tip objects, which can come from multiple Ni."""
length_Nq = len(seq_Nq.string)
dict_Um = self.dict_Um
try:
# Nq is Nf.
categories_T = seq_Nq.categories_T
except AttributeError:
# Nq is Nc.
categories_T = ['Tc_nontrna'] * len(seq_Nq.names_T)
for name_T, category_T, start_T_in_Nq in zip(seq_Nq.names_T, categories_T, seq_Nq.starts_T_in_N):
seq_T = getattr(self, 'dict_' + category_T)[name_T]
string_T = seq_T.string
# T has 5' nts overhanging M when T is within `xtra_5prime_Nq` nts of the start of Nq.
# These overhanging nts are trimmed in Ti.
xtra_5prime_T = xtra_5prime_Nq - start_T_in_Nq if start_T_in_Nq < xtra_5prime_Nq else 0
type_T = type(seq_T)
# Extra 3' nts are handled differently in Tm and Tp.
if type_T is TrimmedMappedSequence:
# Find the distance from the end of Tm to the end of N.
delta_3prime = length_Nq - start_T_in_Nq - len(string_T)
# If Tm has 3' nts overhanging M, the stop position of Tm in M lies beyond the
# length of M.
xtra_3prime_T = xtra_3prime_Nq - delta_3prime if delta_3prime < xtra_3prime_Nq else 0
else:
# Tp ends at the same point as Nq, so the number of extra 3' nts is the same in
# each.
xtra_3prime_T = xtra_3prime_Nq
string_Ti = string_T[xtra_5prime_T: len(string_T) - xtra_3prime_T]
# It is required that the same T generate the same Ti in alignments to different M.
# Conflicting Ti strings from the same T are recorded and their Ti objects later purged.
strings_Ti_from_T_name = dict_Ti_strings_from_T_name[seq_T.name]
if len(strings_Ti_from_T_name) == 0:
strings_Ti_from_T_name.append(string_Ti)
elif len(strings_Ti_from_T_name) == 1:
if string_Ti != strings_Ti_from_T_name[0]:
excluded_Ti_strings.append(strings_Ti_from_T_name[0])
excluded_Ti_strings.append(string_Ti)
strings_Ti_from_T_name.append(string_Ti)
else:
if string_Ti not in strings_Ti_from_T_name:
excluded_Ti_strings.append(string_Ti)
strings_Ti_from_T_name.append(string_Ti)
start_Ti_in_Ni = 0 if start_T_in_Nq < xtra_5prime_Nq else start_T_in_Nq - xtra_5prime_Nq
dict_Ti_start_in_Ni[string_Ti] = start_Ti_in_Ni
# Make Ui objects.
for index_U, name_U in enumerate(seq_T.names_U):
if type_T == TrimmedMappedSequence:
seq_Ui = UniqueIndelSequence(dict_Um[name_U], xtra_3prime_T, xtra_5prime_T)
# There is only 1 Um per Tm, so the loop can be terminated after the first
# iteration.
break
# Only Up are considered after this point in the loop. Due to 5'/3' adjustments, Up
# from different Tp and Np can end up in the same Tip. Therefore, initialize Tip
# after processing all Nq.
if type_T == TrimmedTruncatedProfileSequence:
seq_U = getattr(self, 'dict_Uc_' + seq_T.category)[name_U]
# Uc are flush with Tc at the 5' end, so they undergo the same 5' adjustment
# when converted into Ui and Ti.
length_5prime_U = xtra_5prime_T
else:
seq_U = getattr(self, 'dict_' + seq_T.categories_U[index_U])[name_U]
# Adjust the nts beyond the 5' tRNA terminus as needed in Up.
length_5prime_U = xtra_5prime_T + seq_U.xtra_5prime_length
length_3prime_U = xtra_3prime_T + seq_U.length_3prime_terminus
seq_Uip = UniqueIndelSequence(seq_U, length_3prime_U, length_5prime_U)
dict_Uip_in_Tip[string_Ti].append(seq_Uip)
else:
# This point is reached for Tp.
continue
# This point is reached for Tm. Since this is a mapped sequence, Tim is a mirror of Uim
# and the string is not trimmed.
seq_Tim = TrimmedIndelSequence(seq_Ui.string, [seq_Ui])
dict_Uim_Tim_in_Ni[string_Ni].append((seq_Ui, seq_Tim, start_Ti_in_Ni))
def check_normalized_indel_sequence_for_anticodon(self, seq_M, start_Ni_in_M):
# Determine whether Ni contains the anticodon.
seq_Tf = self.dict_Tf[self.dict_Nf[seq_M.name].names_T[0]]
try:
anticodon_loop_start = seq_Tf.feature_starts[self.RELATIVE_ANTICODON_LOOP_INDEX]
except IndexError:
# The anticodon loop was not reached in the profile.
return False
if anticodon_loop_start >= start_Ni_in_M:
return True
else:
return False
def report_M_stats(self):
"""Report to terminal stats on M. Stats on subs were already reported."""
spec_read_count_M = 0
nonspec_read_count_M = 0
spec_short_5prime_read_count_M = 0
spec_long_5prime_read_count_M = 0
nonspec_short_5prime_read_count_M = 0
nonspec_long_5prime_read_count_M = 0
mean_spec_cov_M = 0
mean_nonspec_cov_M = 0
total_length_M = 0
max_spec_cov_M = 0
max_nonspec_cov_M = 0
max_total_cov_M = 0
for seq_M in self.dict_M.values():
spec_read_count_M += seq_M.spec_read_count
nonspec_read_count_M += seq_M.nonspec_read_count
if seq_M.spec_read_xtra_5prime_count:
if seq_M.spec_long_5prime_extension_dict:
spec_long_5prime_read_count = sum(seq_M.spec_long_5prime_extension_dict.values())
spec_long_5prime_read_count_M += spec_long_5prime_read_count
spec_short_5prime_read_count_M += seq_M.spec_read_xtra_5prime_count - spec_long_5prime_read_count
else:
spec_short_5prime_read_count_M += seq_M.spec_read_xtra_5prime_count
if seq_M.nonspec_read_xtra_5prime_count:
if seq_M.nonspec_long_5prime_extension_dict:
nonspec_long_5prime_read_count = sum(seq_M.nonspec_long_5prime_extension_dict.values())
nonspec_long_5prime_read_count_M += nonspec_long_5prime_read_count
nonspec_short_5prime_read_count_M += seq_M.nonspec_read_xtra_5prime_count - nonspec_long_5prime_read_count
else:
nonspec_short_5prime_read_count_M += seq_M.nonspec_read_xtra_5prime_count
length_M = len(seq_M.consensus_string)
mean_spec_cov = seq_M.mean_spec_cov
mean_nonspec_cov = seq_M.mean_nonspec_cov
mean_spec_cov_M += mean_spec_cov * length_M
mean_nonspec_cov_M += mean_nonspec_cov * length_M
total_length_M += length_M
if mean_spec_cov > max_spec_cov_M:
max_spec_cov_M = mean_spec_cov
if mean_nonspec_cov > max_nonspec_cov_M:
max_nonspec_cov_M = mean_nonspec_cov
if mean_spec_cov + mean_nonspec_cov > max_total_cov_M:
max_total_cov_M = mean_spec_cov + mean_nonspec_cov
mean_spec_cov_M /= total_length_M
mean_nonspec_cov_M /= total_length_M
if not self.skip_indel_profiling:
count_indel_M = 0
count_insert_M = 0
count_del_M = 0
for seq_M in self.dict_M.values():
if seq_M.insert_starts:
count_insert_M += 1
if seq_M.del_starts:
count_del_M += 1
if seq_M.insert_starts or seq_M.del_starts:
count_indel_M += 1
run = self.run
run.info_single("Final results of modification analysis", nl_before=2, nl_after=1)
run.info_single("Modified seqs")
run.info("Specific reads", spec_read_count_M)
run.info("Nonspecific reads", nonspec_read_count_M)
run.info(f"Specific reads with 1-{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", spec_short_5prime_read_count_M)
run.info(f"Specific reads with ≥{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", spec_long_5prime_read_count_M)
run.info(f"Nonspecific reads with 1-{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", nonspec_short_5prime_read_count_M)
run.info(f"Nonspecific reads with ≥{MIN_LENGTH_LONG_5PRIME_EXTENSION} extra 5' nts", nonspec_long_5prime_read_count_M)
run.info("Mean specific coverage", round(mean_spec_cov_M, 2))
run.info("Mean nonspecific coverage", round(mean_nonspec_cov_M, 2))
run.info("Max specific coverage", round(max_spec_cov_M, 2))
run.info("Max nonspecific coverage", round(max_nonspec_cov_M, 2))
run.info("Max total coverage", round(max_total_cov_M, 2), nl_after=0 if self.skip_indel_profiling else 1)
if not self.skip_indel_profiling:
run.info_single("Results of indel search")
run.info("Modified seqs with indels", count_indel_M)
run.info("Modified seqs with insertions", count_insert_M)
run.info("Modified seqs with deletions", count_del_M)
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
get_meta_value = trnaseq_db.db.get_meta_value
count_Nqf_with_indels = get_meta_value('count_Nqf_with_indels')
count_Nc_with_indels = get_meta_value('count_Nc_with_indels')
trnaseq_db.disconnect()
run.info("Normalized tRNA seqs found to have indels", count_Nqf_with_indels)
run.info("Normalized trunc seqs found to have indels", count_Nc_with_indels, nl_after=2)
def report_stats(self):
"""Add final stats to the db and write them to a summary file."""
# Define Ntrna to encompass Nf and Ni.
anticodon_profiled_trna_reads = 0
complete_profiled_trna_reads = 0
spec_reads_Ntrna = 0
nonspec_reads_Ntrna = 0
for seq_Tf in self.dict_Tf.values():
if seq_Tf.contains_anticodon:
anticodon_profiled_trna_reads += seq_Tf.read_count
if seq_Tf.has_complete_feature_set:
complete_profiled_trna_reads += seq_Tf.has_complete_feature_set
if len(seq_Tf.names_N) == 1:
spec_reads_Ntrna += seq_Tf.read_count
else:
nonspec_reads_Ntrna += seq_Tf.read_count
for seq_Tc_trna in self.dict_Tc_trna.values():
if seq_Tc_trna.contains_anticodon:
anticodon_profiled_trna_reads += seq_Tc_trna.read_count
if len(seq_Tc_trna.names_N) == 1:
spec_reads_Ntrna += seq_Tc_trna.read_count
else:
nonspec_reads_Ntrna += seq_Tc_trna.read_count
profiled_trna_reads = spec_reads_Ntrna + nonspec_reads_Ntrna
for seq_Tm in self.dict_Tm.values():
if len(seq_Tm.names_N) == 1:
spec_reads_Ntrna += seq_Tm.read_count
else:
nonspec_reads_Ntrna += seq_Tm.read_count
for seq_Ti in self.dict_Ti.values():
if len(seq_Ti.names_N) == 1:
spec_reads_Ntrna += seq_Ti.read_count
else:
nonspec_reads_Ntrna += seq_Ti.read_count
trna_reads = spec_reads_Ntrna + nonspec_reads_Ntrna
# Define Nfu to be Nf not in M.
count_M = len(self.dict_M)
count_Nfu = 0
for seq_Nf in self.dict_Nf.values():
if not seq_Nf.names_M:
count_Nfu += 1
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
set_meta_value = trnaseq_db.db.set_meta_value
set_meta_value('trna_reads', trna_reads)
set_meta_value('profiled_trna_reads', profiled_trna_reads)
set_meta_value('anticodon_profiled_trna_reads', anticodon_profiled_trna_reads)
set_meta_value('complete_profiled_trna_reads', complete_profiled_trna_reads)
set_meta_value('spec_reads_Ntrna', spec_reads_Ntrna)
set_meta_value('nonspec_reads_Ntrna', nonspec_reads_Ntrna)
set_meta_value('count_M', count_M)
set_meta_value('count_Nfu', count_Nfu)
get_summary_line = self.get_summary_line
get_meta_value = trnaseq_db.db.get_meta_value
with open(self.analysis_summary_path, 'a') as f:
f.write(get_summary_line("Input reads", get_meta_value('input_reads')))
f.write(get_summary_line("Input uniq seqs", get_meta_value('input_U')))
f.write(get_summary_line("tRNA reads", trna_reads))
f.write(get_summary_line("Profiled tRNA reads", profiled_trna_reads))
f.write(get_summary_line("Profiled tRNA reads with anticodon", anticodon_profiled_trna_reads))
f.write(get_summary_line("Profiled reads with complete features", complete_profiled_trna_reads))
f.write(get_summary_line("Reads specific to normalized tRNA seqs", spec_reads_Ntrna))
f.write(get_summary_line("Reads nonspecific to normalized tRNA seqs", nonspec_reads_Ntrna))
f.write(get_summary_line("Potentially modified seqs", count_M))
f.write(get_summary_line("Normalized tRNA seqs without detected modifications", count_Nfu))
trnaseq_db.disconnect()
self.run.info("Summary", self.analysis_summary_path, nl_before=2)
def write_feature_table(self):
self.progress.new("Writing tRNA-seq db table of profiled tRNA features")
self.progress.update("...")
rows = []
TRNA_FEATURE_NAMES = self.TRNA_FEATURE_NAMES
for dict_name, dict_U in zip(('dict_Uf', 'dict_Us', 'dict_Uc_trna'),
(self.dict_Uf, self.dict_Us, self.dict_Uc_trna)):
for seq_U in dict_U.values():
if dict_name == 'dict_Uc_trna':
has_complete_feature_set = False
num_extrap_5prime_nts = 0
xtra_5prime_length = 0
else:
has_complete_feature_set = seq_U.has_complete_feature_set
num_extrap_5prime_nts = seq_U.num_extrap_5prime_nts
xtra_5prime_length = seq_U.xtra_5prime_length
length_U = len(seq_U.string)
row = (
(seq_U.name,
has_complete_feature_set,
seq_U.anticodon_string,
seq_U.anticodon_aa,
length_U,
length_U - seq_U.profiled_seq_length,
seq_U.num_conserved,
seq_U.num_unconserved,
seq_U.num_paired,
seq_U.num_unpaired,
num_extrap_5prime_nts,
xtra_5prime_length,
seq_U.length_3prime_terminus)
# When tRNA features are not found at the 5' end of the read, the start and stop
# positions of these features also are not found.
+ tuple([None for _ in range((len(TRNA_FEATURE_NAMES) - len(seq_U.feature_starts)))]) * 2
+ tuple(chain(*zip(
[str(start) if isinstance(start, int)
else ','.join(map(str, start))
for start in seq_U.feature_starts],
# Convert pythonic stop position to real stop position of feature.
[str(stop - 1) if isinstance(stop, int)
else ','.join(str(strand_stop - 1) for strand_stop in stop)
for stop in seq_U.feature_stops])))
# The α and β sections of the D loop are "subfeatures," not "features," so add them
# to the row after the features.
+ (seq_U.alpha_start,
seq_U.alpha_stop - 1 if seq_U.alpha_stop else None,
seq_U.beta_start,
seq_U.beta_stop - 1 if seq_U.beta_stop else None)
)
rows.append(row)
if rows:
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
trnaseq_db.db._exec_many(
f'''INSERT INTO feature VALUES ({','.join('?' * len(tables.trnaseq_feature_table_structure))})''',
rows)
trnaseq_db.disconnect()
self.progress.end()
def write_unconserved_table(self):
self.progress.new("Writing tRNA-seq db table of unconserved nts in fully profiled tRNA")
self.progress.update("...")
rows = []
for seq_Uf in self.dict_Uf.values():
for unconserved_tuple in seq_Uf.unconserved_info:
rows.append((seq_Uf.name, ) + unconserved_tuple)
if rows:
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
trnaseq_db.db._exec_many(
f'''INSERT INTO unconserved VALUES ({','.join('?' * len(tables.trnaseq_unconserved_table_structure))})''',
rows)
trnaseq_db.disconnect()
self.progress.end()
def write_unpaired_table(self):
self.progress.new("Writing tRNA-seq db table of unpaired nts in fully profiled tRNA")
self.progress.update("...")
rows = []
for seq_Uf in self.dict_Uf.values():
for unpaired_tuple in seq_Uf.unpaired_info:
rows.append((seq_Uf.name, ) + unpaired_tuple)
if rows:
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
trnaseq_db.db._exec_many(
f'''INSERT INTO unpaired VALUES ({','.join('?' * len(tables.trnaseq_unpaired_table_structure))})''',
rows)
trnaseq_db.disconnect()
self.progress.end()
def write_sequences_table(self):
self.progress.new("Writing tRNA-seq db table of unique tRNA seqs")
self.progress.update("...")
rows = []
for info_U, dict_U in zip(('full_profile', 'transferred_profile', 'truncated_profile', 'mapped', 'indel_aligned'),
(self.dict_Uf, self.dict_Us, self.dict_Uc_trna, self.dict_Um, self.dict_Ui)):
for seq_U in dict_U.values():
rows.append((seq_U.name, seq_U.read_count, info_U, seq_U.string))
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
# Overwrite the existing table if starting from a checkpoint.
if self.load_checkpoint:
trnaseq_db.db.drop_table('sequences')
trnaseq_db.db.create_table('sequences',
tables.trnaseq_sequences_table_structure,
tables.trnaseq_sequences_table_types)
if rows:
trnaseq_db.db._exec_many(
f'''INSERT INTO sequences VALUES ({','.join('?' * len(tables.trnaseq_sequences_table_structure))})''',
rows)
trnaseq_db.disconnect()
self.progress.end()
def write_trimmed_table(self):
self.progress.new("Writing tRNA-seq db table of trimmed tRNA seqs")
self.progress.update("...")
rows = []
for info_T, dict_T in zip(('full_profile', 'truncated_profile', 'mapped', 'indel_aligned'),
(self.dict_Tf, self.dict_Tc_trna, self.dict_Tm, self.dict_Ti)):
for seq_T in dict_T.values():
termini_3prime = ''
read_counts_3prime_termini = ''
if info_T != 'mapped':
for string_3prime_terminus, read_count in seq_T.read_3prime_terminus_count_dict.items():
termini_3prime += string_3prime_terminus + ','
read_counts_3prime_termini += str(read_count) + ','
if info_T != 'truncated_profile':
uniq_with_xtra_5prime_count = seq_T.uniq_with_xtra_5prime_count
read_with_xtra_5prime_count = seq_T.read_with_xtra_5prime_count
else:
uniq_with_xtra_5prime_count = 0
read_with_xtra_5prime_count = 0
rows.append(
(seq_T.name,
len(seq_T.names_U),
seq_T.read_count,
info_T,
seq_T.string,
len(seq_T.names_N),
uniq_with_xtra_5prime_count,
read_with_xtra_5prime_count,
termini_3prime,
read_counts_3prime_termini)
)
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
# Overwrite the existing table if starting from a checkpoint.
if self.load_checkpoint:
trnaseq_db.db.drop_table('trimmed')
trnaseq_db.db.create_table('trimmed',
tables.trnaseq_trimmed_table_structure,
tables.trnaseq_trimmed_table_types)
if rows:
trnaseq_db.db._exec_many(
f'''INSERT INTO trimmed VALUES ({','.join('?' * len(tables.trnaseq_trimmed_table_structure))})''',
rows)
trnaseq_db.disconnect()
self.progress.end()
def write_normalized_table(self):
self.progress.new("Writing tRNA-seq db table of fragment-dereplicated (\"normalized\") tRNA seqs")
self.progress.update("...")
rows = []
for info_N, dict_N in zip(('full_profile', 'indel_aligned'),
(self.dict_Nf, self.dict_Ni)):
for seq_N in dict_N.values():
spec_long_5prime_extensions = ''
spec_long_5prime_read_counts = ''
for string_5prime, read_count in sorted(seq_N.spec_long_5prime_extension_dict.items(),
key=lambda item: -len(item[0])):
spec_long_5prime_extensions += string_5prime + ','
spec_long_5prime_read_counts += str(read_count) + ','
nonspec_long_5prime_extensions = ''
nonspec_long_5prime_read_counts = ''
for string_5prime, read_count in sorted(seq_N.nonspec_long_5prime_extension_dict.items(),
key=lambda item: -len(item[0])):
nonspec_long_5prime_extensions += string_5prime + ','
nonspec_long_5prime_read_counts += str(read_count) + ','
spec_3prime_termini = ''
spec_3prime_terminus_read_counts = ''
for string_3prime_terminus, read_count in seq_N.spec_read_3prime_terminus_count_dict.items():
spec_3prime_termini += string_3prime_terminus + ','
spec_3prime_terminus_read_counts += str(read_count) + ','
nonspec_3prime_termini = ''
nonspec_3prime_terminus_read_counts = ''
for string_3prime_terminus, read_count in seq_N.nonspec_read_3prime_terminus_count_dict.items():
nonspec_3prime_termini += string_3prime_terminus + ','
nonspec_3prime_terminus_read_counts += str(read_count) + ','
rows.append(
(seq_N.name,
len(seq_N.names_T),
info_N,
seq_N.mean_spec_cov,
seq_N.mean_nonspec_cov,
','.join(map(str, seq_N.spec_covs)) + ',',
','.join(map(str, seq_N.nonspec_covs)) + ',',
seq_N.spec_read_count,
seq_N.nonspec_read_count,
seq_N.spec_read_xtra_5prime_count,
seq_N.nonspec_read_xtra_5prime_count,
seq_N.spec_map_read_count,
seq_N.nonspec_map_read_count,
spec_long_5prime_extensions,
spec_long_5prime_read_counts,
nonspec_long_5prime_extensions,
nonspec_long_5prime_read_counts,
spec_3prime_termini,
spec_3prime_terminus_read_counts,
nonspec_3prime_termini,
nonspec_3prime_terminus_read_counts)
)
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
# Overwrite the existing table if starting from a checkpoint.
if self.load_checkpoint:
trnaseq_db.db.drop_table('normalized')
trnaseq_db.db.create_table('normalized',
tables.trnaseq_normalized_table_structure,
tables.trnaseq_normalized_table_types)
if rows:
trnaseq_db.db._exec_many(
f'''INSERT INTO normalized VALUES ({','.join('?' * len(tables.trnaseq_normalized_table_structure))})''',
rows)
trnaseq_db.disconnect()
self.progress.end()
def write_modified_table(self):
self.progress.new("Writing tRNA-seq db table of potentially modified tRNA seqs")
self.progress.update("...")
rows = []
for seq_M in self.dict_M.values():
spec_long_5prime_extensions = ''
spec_long_5prime_read_counts = ''
for string_5prime, read_count in sorted(seq_M.spec_long_5prime_extension_dict.items(),
key=lambda item: -len(item[0])):
spec_long_5prime_extensions += string_5prime + ','
spec_long_5prime_read_counts += str(read_count) + ','
nonspec_long_5prime_extensions = ''
nonspec_long_5prime_read_counts = ''
for string_5prime, read_count in sorted(seq_M.nonspec_long_5prime_extension_dict.items(),
key=lambda item: -len(item[0])):
nonspec_long_5prime_extensions += string_5prime + ','
nonspec_long_5prime_read_counts += str(read_count) + ','
spec_3prime_termini = ''
spec_3prime_terminus_read_counts = ''
for string_3prime_terminus, read_count in seq_M.spec_read_3prime_terminus_count_dict.items():
spec_3prime_termini += string_3prime_terminus + ','
spec_3prime_terminus_read_counts += str(read_count) + ','
nonspec_3prime_termini = ''
nonspec_3prime_terminus_read_counts = ''
for string_3prime_terminus, read_count in seq_M.nonspec_read_3prime_terminus_count_dict.items():
nonspec_3prime_termini += string_3prime_terminus + ','
nonspec_3prime_terminus_read_counts += str(read_count) + ','
rows.append(
(seq_M.name,
seq_M.mean_spec_cov,
seq_M.mean_nonspec_cov,
','.join(map(str, seq_M.spec_covs)) + ',',
','.join(map(str, seq_M.nonspec_covs)) + ',',
','.join([str(sub_pos) for sub_pos in seq_M.sub_positions]) + ',')
+ tuple([','.join(map(str, seq_M.spec_sub_covs[:, nt_index - 1])) + ',' for nt_index in INT_NT_DICT])
+ tuple([','.join(map(str, seq_M.nonspec_sub_covs[:, nt_index - 1])) + ',' for nt_index in INT_NT_DICT])
+ (','.join([str(insert_start) for insert_start in seq_M.insert_starts]) + ',',
','.join([insert_string for insert_string in seq_M.insert_strings]) + ',',
','.join([str(spec_insert_cov) for spec_insert_cov in seq_M.spec_insert_covs]) + ',',
','.join([str(nonspec_insert_cov) for nonspec_insert_cov in seq_M.nonspec_insert_covs]) + ',',
','.join([str(del_start) for del_start in seq_M.del_starts]) + ',',
','.join([str(del_length) for del_length in seq_M.del_lengths]) + ',',
','.join([str(spec_del_cov) for spec_del_cov in seq_M.spec_del_covs]) + ',',
','.join([str(nonspec_del_cov) for nonspec_del_cov in seq_M.nonspec_del_covs]) + ',',
seq_M.consensus_string,
len(seq_M.names_Nb),
','.join(seq_M.names_Nb),
len(seq_M.names_Ni),
','.join(seq_M.names_Ni),
seq_M.spec_read_count,
seq_M.nonspec_read_count,
seq_M.spec_read_xtra_5prime_count,
seq_M.nonspec_read_xtra_5prime_count,
seq_M.spec_map_read_count,
seq_M.nonspec_map_read_count,
spec_long_5prime_extensions,
spec_long_5prime_read_counts,
nonspec_long_5prime_extensions,
nonspec_long_5prime_read_counts,
spec_3prime_termini,
spec_3prime_terminus_read_counts,
nonspec_3prime_termini,
nonspec_3prime_terminus_read_counts)
)
trnaseq_db = dbops.TRNASeqDatabase(self.trnaseq_db_path, quiet=True)
# Overwrite the existing table if starting from a checkpoint.
if self.load_checkpoint:
trnaseq_db.db.drop_table('modified')
trnaseq_db.db.create_table('modified',
tables.trnaseq_modified_table_structure,
tables.trnaseq_modified_table_types)
if rows:
trnaseq_db.db._exec_many(
f'''INSERT INTO modified VALUES ({','.join('?' * len(tables.trnaseq_modified_table_structure))})''',
rows)
trnaseq_db.disconnect()
self.progress.end()
def write_nontrna_supplement(self):
"""Write a supplementary file on Un and Uc_nontrna."""
self.progress.new("Writing file of unique seqs not identified as tRNA")
self.progress.update("...")
with open(self.path_Un_supplement, 'w') as file_Un_supplement:
file_Un_supplement.write("\t".join(self.UNIQ_NONTRNA_HEADER) + "\n")
for seq_Un in self.dict_Un.values():
file_Un_supplement.write(seq_Un.name + "\t"
+ str(seq_Un.read_count) + "\t"
+ "\t"
+ seq_Un.string + "\n")
for seq_Uc_nontrna in self.dict_Uc_nontrna.values():
file_Un_supplement.write(seq_Uc_nontrna.name + "\t"
+ str(seq_Uc_nontrna.read_count) + "\t"
+ str(seq_Uc_nontrna.trunc_profile_index) + "\t"
+ seq_Uc_nontrna.string + "\n")
self.progress.end()
self.run.info("Unique non-tRNA supplement", self.path_Un_supplement)
def write_Tf_ends_supplement(self):
"""Write a supplementary file showing the spectrum of 5'/3' extensions of Tf."""
self.progress.new("Writing file showing 5'/3' ends of trimmed, fully profiled tRNA seqs")
self.progress.update("...")
with open(self.path_Tf_ends, 'w') as file_Tf_ends:
file_Tf_ends.write("\t".join(self.TRIMMED_ENDS_HEADER) + "\n")
for seq_Tf in sorted(self.dict_Tf.values(), key=lambda seq_Tf: -seq_Tf.read_count):
name_Tf = seq_Tf.name
seqs_U = [getattr(self, 'dict_' + category_U)[name_U] for category_U, name_U in zip(seq_Tf.categories_U, seq_Tf.names_U)]
for seq_U in sorted(seqs_U, key=lambda seq_U: (-seq_U.xtra_5prime_length, -seq_U.length_3prime_terminus)):
file_Tf_ends.write(name_Tf + "\t"
+ seq_U.name + "\t"
+ seq_U.string[: seq_U.xtra_5prime_length] + "\t"
+ seq_U.string[len(seq_U.string) - seq_U.length_3prime_terminus: ] + "\t"
+ str(seq_U.read_count) + "\n")
self.progress.end()
self.run.info("Trimmed tRNA supplement", self.path_Tf_ends)
def profile_worker(input_queue, output_queue, profiler):
"""This client for `trnaidentifier.Profiler.profile` is located outside the `TRNASeqDataset`
class to allow multiprocessing."""
while True:
seq_string, represent_name, read_count = input_queue.get()
output_queue.put((profiler.profile(seq_string, name=represent_name), read_count))
class NormalizedSeqSummary(object):
"""Relevant data from normalized sequences stored in anvi'o tRNA-seq databases are reloaded into
these objects."""
__slots__ = (
'name',
'sample_id',
'seq_string',
'threshold_feature_start',
'anticodon_seq_string',
'feature_threshold_start',
'mean_specific_cov',
'specific_covs',
'nonspecific_covs',
'specific_nt_covs_dict',
'nonspecific_nt_covs_dict',
'mod_seq_summary'
)
def __init__(self):
self.name = None
self.sample_id = None
self.seq_string = None
self.threshold_feature_start = None
self.anticodon_seq_string = None
self.feature_threshold_start = None
self.mean_specific_cov = None
self.specific_covs = None
self.nonspecific_covs = None
self.specific_nt_covs_dict = None
self.nonspecific_nt_covs_dict = None
self.mod_seq_summary = None
class ModifiedSeqSummary(object):
"""Relevant data from modified sequences stored in anvi'o tRNA-seq databases are reloaded into
these objects."""
__slots__ = (
'name',
'sample_id',
'consensus_seq_string',
'sub_positions',
'specific_nt_covs_dict',
'nonspecific_nt_covs_dict',
'specific_covs',
'nonspecific_covs',
'insert_starts',
'insert_strings',
'spec_insert_covs',
'nonspec_insert_covs',
'del_starts',
'del_lengths',
'spec_del_covs',
'nonspec_del_covs',
'norm_seq_summaries'
)
def __init__(self):
self.name = None
self.sample_id = None
self.consensus_seq_string = None
self.sub_positions = None
self.specific_nt_covs_dict = None
self.nonspecific_nt_covs_dict = None
self.specific_covs = None
self.nonspecific_covs = None
self.insert_starts = None
self.insert_strings = None
self.spec_insert_covs = None
self.nonspec_insert_covs = None
self.del_starts = None
self.del_lengths = None
self.spec_del_covs = None
self.nonspec_del_covs = None
self.norm_seq_summaries = None
class SeedSeq(object):
__slots__ = (
'name',
'seq_string',
'meets_feature_threshold',
'unmod_norm_seq_summaries',
'mod_seq_summaries',
'anticodon_seq_string',
'total_specific_covs',
'total_nonspecific_covs',
'total_mean_specific_cov',
'total_mean_nonspecific_cov',
'sample_specific_covs_dict',
'sample_nonspecific_covs_dict',
'sample_summed_covs_dict',
'sample_specific_nt_covs_dict',
'sample_nonspecific_nt_covs_dict',
'sample_summed_nt_covs_dict',
'sample_mean_specific_cov_dict',
'sample_mean_nonspecific_cov_dict',
'sample_mean_summed_cov_dict',
'sample_std_specific_cov_dict',
'sample_std_nonspecific_cov_dict',
'sample_std_summed_cov_dict',
'sample_specific_abundances_dict',
'sample_nonspecific_abundances_dict',
'sample_summed_abundances_dict',
'sample_specific_relative_abundances_dict',
'sample_nonspecific_relative_abundances_dict',
'sample_summed_relative_abundances_dict',
'sample_specific_detection_dict',
'sample_nonspecific_detection_dict',
'sample_summed_detection_dict',
'sample_mean_Q2Q3_specific_cov_dict',
'sample_mean_Q2Q3_nonspecific_cov_dict',
'sample_mean_Q2Q3_summed_cov_dict',
'sample_normalized_mean_Q2Q3_specific_cov_dict',
'sample_normalized_mean_Q2Q3_nonspecific_cov_dict',
'sample_normalized_mean_Q2Q3_summed_cov_dict',
'sample_specific_max_normalized_ratio_dict',
'sample_nonspecific_max_normalized_ratio_dict',
'sample_summed_max_normalized_ratio_dict',
'gc_fraction',
'sample_sub_positions_dict',
'total_mod_positions',
'sample_mod_positions_dict',
'sample_variability_dict',
'sample_insert_dict',
'sample_del_dict'
)
def __init__(self):
self.name = None
self.seq_string = None
self.meets_feature_threshold = None
self.unmod_norm_seq_summaries = None
self.mod_seq_summaries = None
self.anticodon_seq_string = None
self.total_specific_covs = None
self.total_nonspecific_covs = None
self.total_mean_specific_cov = None
self.total_mean_nonspecific_cov = None
self.sample_specific_covs_dict = None
self.sample_nonspecific_covs_dict = None
self.sample_summed_covs_dict = None
self.sample_specific_nt_covs_dict = None
self.sample_nonspecific_nt_covs_dict = None
self.sample_summed_nt_covs_dict = None
self.sample_mean_specific_cov_dict = None
self.sample_mean_nonspecific_cov_dict = None
self.sample_mean_summed_cov_dict = None
self.sample_std_specific_cov_dict = None
self.sample_std_nonspecific_cov_dict = None
self.sample_std_summed_cov_dict = None
self.sample_specific_abundances_dict = None
self.sample_nonspecific_abundances_dict = None
self.sample_summed_abundances_dict = None
self.sample_specific_relative_abundances_dict = None
self.sample_nonspecific_relative_abundances_dict = None
self.sample_summed_relative_abundances_dict = None
self.sample_specific_detection_dict = None
self.sample_nonspecific_detection_dict = None
self.sample_summed_detection_dict = None
self.sample_mean_Q2Q3_specific_cov_dict = None
self.sample_mean_Q2Q3_nonspecific_cov_dict = None
self.sample_mean_Q2Q3_summed_cov_dict = None
self.sample_normalized_mean_Q2Q3_specific_cov_dict = None
self.sample_normalized_mean_Q2Q3_nonspecific_cov_dict = None
self.sample_normalized_mean_Q2Q3_summed_cov_dict = None
self.sample_specific_max_normalized_ratio_dict = None
self.sample_nonspecific_max_normalized_ratio_dict = None
self.sample_summed_max_normalized_ratio_dict = None
self.gc_fraction = None
self.sample_sub_positions_dict = None
self.total_mod_positions = None
self.sample_mod_positions_dict = None
self.sample_variability_dict = None
self.sample_insert_dict = None
self.sample_del_dict = None
class DatabaseConverter(object):
"""Converts tRNA-seq database(s) into contigs, auxiliary, and profile databases.
"Contigs" in this context are tRNA seed sequences representing tRNA identified in the collection
of samples.
"""
# The columns needed from tables of a tRNA-seq database.
FEATURE_TABLE_COLS_OF_INTEREST = [
'name',
'anticodon_sequence'
]
TRIMMED_TABLE_COLS_OF_INTEREST = [
'name',
'sequence'
]
NORM_TABLE_COLS_OF_INTEREST = [
'name',
'id_info',
'mean_specific_coverage',
'specific_coverages',
'nonspecific_coverages'
]
MOD_TABLE_COLS_OF_INTEREST = [
'name',
'specific_coverages',
'nonspecific_coverages',
'names_of_normalized_seqs_without_indels',
'names_of_normalized_seqs_with_indels',
'substitution_positions',
'substitution_A_specific_coverage',
'substitution_C_specific_coverage',
'substitution_G_specific_coverage',
'substitution_T_specific_coverage',
'substitution_A_nonspecific_coverage',
'substitution_C_nonspecific_coverage',
'substitution_G_nonspecific_coverage',
'substitution_T_nonspecific_coverage',
'insertion_starts',
'insertion_seqs',
'insertion_specific_coverages',
'insertion_nonspecific_coverages',
'deletion_starts',
'deletion_lengths',
'deletion_specific_coverages',
'deletion_nonspecific_coverages',
'consensus_sequence'
]
def __init__(self, args=None, run=terminal.Run(), progress=terminal.Progress()):
self.args = args
self.run = run
self.progress = progress
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
# Argument group A: MANDATORY
self.trnaseq_db_paths = A('input')
self.out_dir = A('output_dir')
self.project_name = A('project_name')
# Argument group B: EXTRAS
self.num_threads = A('num_threads')
self.seed_seq_limit = A('max_reported_trna_seeds')
self.overwrite_out_dest = A('overwrite_output_destinations')
self.descrip_path = os.path.abspath(A('description')) if A('description') else None
# Argument group C: ADVANCED
self.feature_threshold = A('feature_threshold')
self.preferred_treatment = A('preferred_treatment')
self.nonspecific_output = A('nonspecific_output')
self.min_variation = A('min_variation')
self.min_third_fourth_nt = A('min_third_fourth_nt')
self.min_indel_fraction = A('min_indel_fraction')
self.distance = A('distance') or constants.distance_metric_default
self.linkage = A('linkage') or constants.linkage_method_default
if not self.project_name:
raise ConfigError("Please specify a name for the collection of input tRNA-seq dbs "
"using --project-name or -n.")
if not self.out_dir:
raise ConfigError("Please provide an output directory using --output-dir or -o.")
self.contigs_db_path = None
self.contigs_db_hash = None
self.specific_out_dir = None
self.specific_profile_db_path = None
self.specific_auxiliary_db_path = None
self.nonspecific_out_dir = None
self.nonspecific_profile_db_path = None
self.nonspecific_auxiliary_db_path = None
self.summed_out_dir = None
self.summed_profile_db_path = None
self.summed_auxiliary_db_path = None
self.combined_out_dir = None
self.combined_profile_db_path = None
self.combined_auxiliary_db_path = None
self.descrip = None
self.preferred_trnaseq_db_sample_ids = None
self.preferred_trnaseq_db_nums = None
self.trnaseq_dbs_info_dict = OrderedDict()
self.num_trnaseq_dbs = None
self.trnaseq_db_sample_ids = None
self.unmod_norm_seq_summaries_dict = OrderedDict()
self.mod_seq_summaries_dict = OrderedDict()
self.seed_seqs = None
self.total_seed_length = None
self.sample_total_specific_cov_dict = None
self.sample_total_nonspecific_cov_dict = None
self.sample_total_summed_cov_dict = None
self.sample_overall_mean_specific_cov_dict = None
self.sample_mean_nonspecific_cov_dict = None
self.sample_mean_summed_cov_dict = None
self.sample_normalization_multiplier_dict = None
self.overall_mean_specific_cov = None
self.overall_mean_nonspecific_cov = None
self.variable_nts_table_entries = None
self.specific_indels_table_entries = None
self.nonspecific_indels_table_entries = None
self.summed_indels_table_entries = None
def process(self):
"""Orchestrate the steps needed to create contigs, profile and auxiliary databases."""
self.sanity_check()
filesnpaths.gen_output_directory(self.out_dir, delete_if_exists=self.overwrite_out_dest)
self.load_trnaseq_dbs()
self.form_seeds()
filesnpaths.gen_output_directory(self.specific_out_dir)
self.gen_contigs_db()
self.gen_auxiliary_db('specific')
self.set_sample_total_covs()
self.set_sample_overall_mean_covs()
self.set_sample_mean_covs()
self.set_sample_std_covs()
self.set_sample_abundances()
self.set_sample_normalization_multipliers()
self.set_sample_normalized_mean_Q2Q3_coverages()
self.set_sample_detections()
self.set_sample_relative_abundances()
self.set_sample_max_normalized_ratios()
self.set_variable_nts_table_entries()
self.set_indels_table_entries()
self.gen_profile_db('specific')
if self.nonspecific_out_dir:
filesnpaths.gen_output_directory(self.nonspecific_out_dir, delete_if_exists=self.overwrite_out_dest)
self.gen_auxiliary_db('nonspecific')
self.gen_profile_db('nonspecific')
if self.combined_out_dir:
filesnpaths.gen_output_directory(self.combined_out_dir, delete_if_exists=self.overwrite_out_dest)
self.gen_auxiliary_db('combined')
self.gen_profile_db('combined')
if self.summed_out_dir:
filesnpaths.gen_output_directory(self.summed_out_dir, delete_if_exists=self.overwrite_out_dest)
self.gen_auxiliary_db('summed')
self.gen_profile_db('summed')
def sanity_check(self):
"""Check user inputs before proceeding."""
for trnaseq_db_path in self.trnaseq_db_paths:
utils.is_trnaseq_db(trnaseq_db_path)
self.populate_trnaseq_dbs_info_dict()
self.trnaseq_db_sample_ids = [inner_dict['sample_id'] for inner_dict in self.trnaseq_dbs_info_dict.values()]
if len(self.trnaseq_dbs_info_dict) != len(set(self.trnaseq_db_sample_ids)):
raise ConfigError("Sample IDs in each input tRNA-seq db must be unique. This is not "
"the case with your input. Here are the sample names so you can see "
"which ones occur more than once: '%s'" % (", ".join(self.trnaseq_db_sample_ids)))
self.num_trnaseq_dbs = len(self.trnaseq_db_sample_ids)
self.check_trnaseq_db_versions()
self.out_dir = filesnpaths.check_output_directory(self.out_dir,
ok_if_exists=self.overwrite_out_dest)
self.out_dir = os.path.abspath(self.out_dir)
self.contigs_db_path = os.path.join(self.out_dir, 'CONTIGS.db')
self.contigs_db_hash = 'hash' + str('%08x' % random.randrange(16**8))
self.specific_out_dir = filesnpaths.check_output_directory(os.path.join(self.out_dir, 'SPECIFIC_COVERAGE'),
ok_if_exists=self.overwrite_out_dest)
self.specific_profile_db_path = os.path.join(self.specific_out_dir, 'PROFILE.db')
self.specific_auxiliary_db_path = os.path.join(self.specific_out_dir, 'AUXILIARY-DATA.db')
if not 1 <= self.num_threads <= mp.cpu_count():
raise ConfigError("The number of threads to use must be a positive integer less than or equal to %d. "
"Try again!" % mp.cpu_count())
self.set_treatment_preference()
self.set_nonspecific_db_info()
utils.check_sample_id(self.project_name)
if self.descrip_path:
filesnpaths.is_file_plain_text(self.descrip_path)
self.descrip_path = os.path.abspath(self.descrip_path)
self.descrip = open(self.descrip_path).read()
if self.seed_seq_limit == -1:
self.seed_seq_limit = MAXSIZE
elif self.seed_seq_limit < 1:
raise ConfigError(f"{self.seed_seq_limit} is an invalid value for `--max-reported-seed-seqs`. "
"To remove the limit on tRNA seeds reported to the contigs db, "
"provide a value of -1. Otherwise provide an integer greater than 0.")
self.run.info("Input tRNA-seq dbs", ", ".join(self.trnaseq_db_paths))
if self.preferred_treatment:
self.run.info("Databases preferred for seed formation",
", ".join([trnaseq_db_path for trnaseq_db_num, trnaseq_db_path
in enumerate(self.trnaseq_db_paths)
if trnaseq_db_num in self.preferred_trnaseq_db_nums]))
self.run.info("Output directory", self.out_dir)
def populate_trnaseq_dbs_info_dict(self):
"""Get the meta-data from the input tRNA-seq databases."""
for trnaseq_db_path in self.trnaseq_db_paths:
trnaseq_db = dbops.TRNASeqDatabase(trnaseq_db_path)
self.trnaseq_dbs_info_dict[trnaseq_db_path] = trnaseq_db.meta
def check_trnaseq_db_versions(self):
if len(set([inner_dict['version'] for inner_dict in self.trnaseq_dbs_info_dict.values()])) > 1:
trnaseq_db_version_report = "\n".join([trnaseq_db_path + " : " + inner_dict['version']
for trnaseq_db_path, inner_dict in self.trnaseq_dbs_info_dict.items()])
if anvio.FORCE:
self.run.warning("Not all input tRNA-seq dbs have the same version number, "
"but since you have used the `--force` flag, `anvi-convert-trnaseq-database` "
"will proceed though this is dangerous and may lead to errors. "
f"Here is the version number of each database:\n{trnaseq_db_version_report}")
else:
raise ConfigError("Not all input tRNA-seq dbs have the same version number. "
f"Here is the version number of each db:\n{trnaseq_db_version_report}")
def set_treatment_preference(self):
if not self.preferred_treatment:
return
input_treatments = [inner_dict['treatment'] for inner_dict in self.trnaseq_dbs_info_dict.values()]
self.preferred_trnaseq_db_sample_ids = []
self.preferred_trnaseq_db_nums = []
if self.preferred_treatment not in input_treatments:
raise ConfigError("You provided a preferred treatment type, %s, "
"but it was not found in any of the input dbs, "
"which were found to have the following treatments: %s."
% (self.preferred_treatment, ', '.join(input_treatments)))
for trnaseq_db_num, treatment in enumerate(input_treatments):
if self.preferred_treatment == treatment:
self.preferred_trnaseq_db_sample_ids.append(self.trnaseq_db_sample_ids[trnaseq_db_num])
self.preferred_trnaseq_db_nums.append(trnaseq_db_num)
def set_nonspecific_db_info(self):
self.nonspecific_db_types = self.nonspecific_output.split(',')
for nonspecific_db_type in self.nonspecific_db_types:
if nonspecific_db_type not in ['nonspecific_db', 'combined_db', 'summed_db']:
raise ConfigError("The nonspecific profile db types provided by `--nonspecific-output` are not recognized. "
"The db types must be comma separated without spaces, "
f"e.g., 'nonspecific_db,combined_db,summed_db'. Your argument was: {self.nonspecific_output}'")
if 'nonspecific_db' in self.nonspecific_db_types:
self.nonspecific_out_dir = filesnpaths.check_output_directory(os.path.join(self.out_dir, 'NONSPECIFIC_COVERAGE'),
ok_if_exists=self.overwrite_out_dest)
self.nonspecific_profile_db_path = os.path.join(self.nonspecific_out_dir, 'PROFILE.db')
self.nonspecific_auxiliary_db_path = os.path.join(self.nonspecific_out_dir, 'AUXILIARY-DATA.db')
if 'combined_db' in self.nonspecific_db_types:
self.combined_out_dir = filesnpaths.check_output_directory(os.path.join(self.out_dir, 'COMBINED_COVERAGE'),
ok_if_exists=self.overwrite_out_dest)
self.combined_profile_db_path = os.path.join(self.combined_out_dir, 'PROFILE.db')
self.combined_auxiliary_db_path = os.path.join(self.combined_out_dir, 'AUXILIARY-DATA.db')
if 'summed_db' in self.nonspecific_db_types:
self.summed_out_dir = filesnpaths.check_output_directory(os.path.join(self.out_dir, 'SUMMED_COVERAGE'),
ok_if_exists=self.overwrite_out_dest)
self.summed_profile_db_path = os.path.join(self.summed_out_dir, 'PROFILE.db')
self.summed_auxiliary_db_path = os.path.join(self.summed_out_dir, 'AUXILIARY-DATA.db')
def load_trnaseq_dbs(self):
loaded_db_count = 0
num_trnaseq_db_paths = len(self.trnaseq_db_paths)
self.progress.new("Loading seq info from tRNA-seq dbs")
self.progress.update(f"{loaded_db_count}/{num_trnaseq_db_paths} dbs loaded")
manager = mp.Manager()
input_queue = manager.Queue()
output_queue = manager.Queue()
processes = [mp.Process(target=trnaseq_db_loader, args=(input_queue, output_queue, self))
for _ in range(self.num_threads)]
for p in processes:
p.start()
for trnaseq_db_path in self.trnaseq_db_paths:
input_queue.put(trnaseq_db_path)
while loaded_db_count < len(self.trnaseq_db_paths):
trnaseq_db_path, unmod_norm_seq_summaries, mod_seq_summaries = output_queue.get()
self.unmod_norm_seq_summaries_dict[trnaseq_db_path] = unmod_norm_seq_summaries
self.mod_seq_summaries_dict[trnaseq_db_path] = mod_seq_summaries
loaded_db_count += 1
self.progress.update(f"{loaded_db_count}/{num_trnaseq_db_paths} dbs loaded")
for p in processes:
p.terminate()
p.join()
self.progress.end()
def load_trnaseq_db_seq_info(self, trnaseq_db_path):
"""Load necessary tRNA sequence data from the input tRNA-seq database.
Unmodified normalized sequences and "modified" sequences, comprising clustered normalized
sequences, are stored in distinct data structures.
"""
trnaseq_db_num = list(self.trnaseq_dbs_info_dict.keys()).index(trnaseq_db_path)
sample_id = self.trnaseq_db_sample_ids[trnaseq_db_num]
trnaseq_db = dbops.TRNASeqDatabase(trnaseq_db_path)
norm_seq_summary_dict = {} # Used to link normalized to modified sequence summaries
# Store normalized sequence strings and feature information.
seq_string_and_feature_df = pd.DataFrame(
trnaseq_db.db.get_some_columns_from_table('feature', ', '.join(self.FEATURE_TABLE_COLS_OF_INTEREST + [self.feature_threshold + '_start'])),
columns=self.FEATURE_TABLE_COLS_OF_INTEREST + [self.feature_threshold + '_start']
).set_index('name')
if 'stem' in self.feature_threshold:
# The starts of both strands of the stem are recorded, so pick the start of the 5' strand.
seq_string_and_feature_df[self.feature_threshold + '_start'] = [int(entry.split(',')[0]) if isinstance(entry, str) else entry for entry
in seq_string_and_feature_df[self.feature_threshold + '_start'].fillna(-1).tolist()]
else:
seq_string_and_feature_df[self.feature_threshold + '_start'] = seq_string_and_feature_df[self.feature_threshold + '_start'].fillna(-1)
seq_string_and_feature_df['anticodon_sequence'] = seq_string_and_feature_df['anticodon_sequence'].fillna('')
seq_string_and_feature_df = pd.merge(
pd.DataFrame(
trnaseq_db.db.get_some_columns_from_table('trimmed', ', '.join(self.TRIMMED_TABLE_COLS_OF_INTEREST)),
columns=self.TRIMMED_TABLE_COLS_OF_INTEREST
).set_index('name'),
seq_string_and_feature_df,
left_index=True,
right_index=True)
for norm_seq_info in trnaseq_db.db.get_some_columns_from_table('normalized', ', '.join(self.NORM_TABLE_COLS_OF_INTEREST)):
(name,
id_info,
mean_specific_cov,
specific_covs_string,
nonspecific_covs_string) = norm_seq_info
if id_info == 'indel_aligned':
# Ignore normalized sequences with indels. The coverage of indels themselves
# is recorded in the parent modified sequence, but the contribution of these
# sequences to nucleotide coverage is ignored. Inclusion of these sequences would
# produce numerous complications (e.g., they don't have feature profiles).
continue
norm_seq_summary = NormalizedSeqSummary()
norm_seq_summary.name = name
norm_seq_summary.sample_id = sample_id
norm_seq_summary.mean_specific_cov = mean_specific_cov
# There is always a trailing comma in the coverage strings.
norm_seq_summary.specific_covs = np.fromiter(map(int, specific_covs_string.split(',')[: -1]), int)
norm_seq_summary.nonspecific_covs = np.fromiter(map(int, nonspecific_covs_string.split(',')[: -1]), int)
(norm_seq_summary.seq_string,
norm_seq_summary.anticodon_seq_string,
norm_seq_summary.feature_threshold_start) = seq_string_and_feature_df.loc[norm_seq_summary.name, ['sequence', 'anticodon_sequence', self.feature_threshold + '_start']]
norm_seq_summary_dict[norm_seq_summary.name] = norm_seq_summary
mod_seq_summaries = []
for mod_seq_info in trnaseq_db.db.get_some_columns_from_table('modified', ', '.join(self.MOD_TABLE_COLS_OF_INTEREST)):
(name,
specific_covs,
nonspecific_covs,
names_of_norm_seqs_without_indels,
names_of_norm_seqs_with_indels,
sub_positions,
sub_A_specific_covs,
sub_C_specific_covs,
sub_G_specific_covs,
sub_T_specific_covs,
sub_A_nonspecific_covs,
sub_C_nonspecific_covs,
sub_G_nonspecific_covs,
sub_T_nonspecific_covs,
insert_starts,
insert_strings,
spec_insert_covs,
nonspec_insert_covs,
del_starts,
del_lengths,
spec_del_covs,
nonspec_del_covs,
consensus_seq_string) = mod_seq_info
mod_seq_summary = ModifiedSeqSummary()
mod_seq_summary.name = name
mod_seq_summary.sample_id = sample_id
# There is always a trailing comma in the substitution and deletion coverage and
# position strings.
mod_seq_summary.sub_positions = np.fromiter(map(int, sub_positions.split(',')[: -1]), int)
mod_seq_summary.consensus_seq_string = consensus_seq_string
# Make nucleotide variability arrays covering every position in the sequence. Start with
# arrays of overall specific/nonspecific coverage with nonzero values for the
# nucleotides found in the consensus sequence, and then correct the variable positions.
seq_length = len(consensus_seq_string)
specific_nt_covs_dict = {nt: np.zeros(seq_length, int) for nt in UNAMBIG_NTS}
nonspecific_nt_covs_dict = {nt: np.zeros(seq_length, int) for nt in UNAMBIG_NTS}
pos = 0
for nt, specific_cov, nonspecific_cov in zip(consensus_seq_string,
specific_covs.split(',')[: -1],
nonspecific_covs.split(',')[: -1]):
specific_nt_covs_dict[nt][pos] = specific_cov
nonspecific_nt_covs_dict[nt][pos] = nonspecific_cov
pos += 1
for (sub_pos,
specific_A_cov,
specific_C_cov,
specific_G_cov,
specific_T_cov,
nonspecific_A_cov,
nonspecific_C_cov,
nonspecific_G_cov,
nonspecific_T_cov) in zip(map(int, sub_positions.split(',')[: -1]),
map(int, sub_A_specific_covs.split(',')[: -1]),
map(int, sub_C_specific_covs.split(',')[: -1]),
map(int, sub_G_specific_covs.split(',')[: -1]),
map(int, sub_T_specific_covs.split(',')[: -1]),
map(int, sub_A_nonspecific_covs.split(',')[: -1]),
map(int, sub_C_nonspecific_covs.split(',')[: -1]),
map(int, sub_G_nonspecific_covs.split(',')[: -1]),
map(int, sub_T_nonspecific_covs.split(',')[: -1])):
specific_nt_covs_dict['A'][sub_pos] = specific_A_cov
specific_nt_covs_dict['C'][sub_pos] = specific_C_cov
specific_nt_covs_dict['G'][sub_pos] = specific_G_cov
specific_nt_covs_dict['T'][sub_pos] = specific_T_cov
nonspecific_nt_covs_dict['A'][sub_pos] = nonspecific_A_cov
nonspecific_nt_covs_dict['C'][sub_pos] = nonspecific_C_cov
nonspecific_nt_covs_dict['G'][sub_pos] = nonspecific_G_cov
nonspecific_nt_covs_dict['T'][sub_pos] = nonspecific_T_cov
mod_seq_summary.specific_nt_covs_dict = specific_nt_covs_dict
mod_seq_summary.nonspecific_nt_covs_dict = nonspecific_nt_covs_dict
if insert_starts == ',':
mod_seq_summary.insert_starts = []
mod_seq_summary.insert_strings = []
mod_seq_summary.spec_insert_covs = []
mod_seq_summary.nonspec_insert_covs = []
else:
mod_seq_summary.insert_starts = list(map(int, insert_starts.split(',')[: -1]))
mod_seq_summary.insert_strings = list(insert_strings.split(',')[: -1])
mod_seq_summary.spec_insert_covs = list(map(int, spec_insert_covs.split(',')[: -1]))
mod_seq_summary.nonspec_insert_covs = list(map(int, nonspec_insert_covs.split(',')[: -1]))
assert len(mod_seq_summary.insert_starts) == len(mod_seq_summary.insert_strings) == len(mod_seq_summary.spec_insert_covs) == len(mod_seq_summary.nonspec_insert_covs)
if del_starts == ',':
mod_seq_summary.del_starts = []
mod_seq_summary.del_lengths = []
mod_seq_summary.spec_del_covs = []
mod_seq_summary.nonspec_del_covs = []
else:
mod_seq_summary.del_starts = list(map(int, del_starts.split(',')[: -1]))
mod_seq_summary.del_lengths = list(map(int, del_lengths.split(',')[: -1]))
mod_seq_summary.spec_del_covs = list(map(int, spec_del_covs.split(',')[: -1]))
mod_seq_summary.nonspec_del_covs = list(map(int, nonspec_del_covs.split(',')[: -1]))
assert len(mod_seq_summary.del_starts) == len(mod_seq_summary.del_lengths) == len(mod_seq_summary.spec_del_covs) == len(mod_seq_summary.nonspec_del_covs)
mod_seq_summary.norm_seq_summaries = []
for norm_seq_name in names_of_norm_seqs_without_indels.split(','):
norm_seq_summary = norm_seq_summary_dict[norm_seq_name]
# Cross-reference the modified sequence summary and constituent modified normalized
# sequence summary objects.
norm_seq_summary.mod_seq_summary = mod_seq_summary
mod_seq_summary.norm_seq_summaries.append(norm_seq_summary)
# Ensure that all of the constituent modified normalized sequences have coverage
# arrays flush with the modified sequence.
if len(norm_seq_summary.seq_string) < len(mod_seq_summary.consensus_seq_string):
fiveprime_extension = np.zeros(len(mod_seq_summary.consensus_seq_string) - len(norm_seq_summary.seq_string), int)
self.extend_norm_seq_fiveprime_end(norm_seq_summary, fiveprime_extension)
mod_seq_summaries.append(mod_seq_summary)
unmod_norm_seq_summaries = [norm_seq for norm_seq in norm_seq_summary_dict.values()
if not norm_seq.mod_seq_summary]
return unmod_norm_seq_summaries, mod_seq_summaries
def extend_norm_seq_fiveprime_end(self, norm_seq_summary, fiveprime_extension):
"""Seed sequences can be longer than the normalized sequences from the individual samples,
requiring addition of empty positions in the normalized sequence coverage arrays at the 5'
end, as normalized (and modified) sequences are aligned from the 3' end."""
norm_seq_summary.specific_covs = np.concatenate([fiveprime_extension, norm_seq_summary.specific_covs])
norm_seq_summary.nonspecific_covs = np.concatenate([fiveprime_extension, norm_seq_summary.nonspecific_covs])
def form_seeds(self):
"""Form tRNA seed sequences through comparison of the input samples.
Seed sequences are formed through comparison of the samples' normalized sequences (both
unmodified normalized sequences and normalized sequences underlying modified sequences).
Seed sequences need not be found in every sample.
Modification-induced mutations complicate seed formation. (anvi-trnaseq is capable of
finding substitutions -- the main type of mutation -- and deletions, though insertions can
also occur but go undetected.) Modified sequences are derived from clusters of normalized
sequences; unmodified normalized sequences and the normalized sequences underlying modified
sequences are here compared between samples to find seeds. If a normalized sequence is
shared identically (not as a subsequence) between samples, then the normalized seed
sequences and any modified sequences that the normalized sequences are part of are combined
into a single seed sequence.
It is a heuristic to exactly match normalized sequences, rather than to check whether one
normalized sequence is a 3' subsequence of the other, or to instead compare underlying
trimmed sequences making up normalized sequences. This heuristic should not distort sample
merging for the more abundant tRNA species, in particular, as these are most likely to be
represented by reads spanning the full length of the tRNA, producing the same normalized
sequences."""
self.progress.new("Forming seed seqs from input samples")
norm_seq_string_seed_seq_dict = {}
for trnaseq_db_num, trnaseq_db_path in enumerate(self.trnaseq_db_paths):
sample_id = self.trnaseq_db_sample_ids[trnaseq_db_num]
self.progress.update(f"Adding {sample_id}")
unmod_norm_seq_summaries = self.unmod_norm_seq_summaries_dict[trnaseq_db_path]
mod_seq_summaries = self.mod_seq_summaries_dict[trnaseq_db_path]
for norm_seq_summary in unmod_norm_seq_summaries:
# Process normalized sequences without any detected potential modifications.
norm_seq_string = norm_seq_summary.seq_string
try:
# The normalized sequence has already been found in another dataset.
seed_seq = norm_seq_string_seed_seq_dict[norm_seq_string]
except KeyError:
# Create a new seed sequence based on the normalized sequence.
seed_seq = SeedSeq()
seed_seq.name = norm_seq_summary.name + '_' + sample_id
seed_seq.seq_string = norm_seq_string
seed_seq.meets_feature_threshold = True if norm_seq_summary.feature_threshold_start >= 0 else False
seed_seq.unmod_norm_seq_summaries = [norm_seq_summary]
seed_seq.mod_seq_summaries = []
norm_seq_string_seed_seq_dict[norm_seq_string] = seed_seq
continue
if len(norm_seq_string) < len(seed_seq.seq_string):
# The normalized sequence string is shorter than the seed sequence string,
# implying that the seed string is a longer modified sequence in another
# dataset. Extend the normalized sequence coverage arrays the needed amount at
# the 5' end. (Note: It is impossible here for the normalized sequence string to
# be longer than the seed sequence string.)
fiveprime_extension = np.zeros(len(seed_seq.seq_string) - len(norm_seq_string), int)
self.extend_norm_seq_fiveprime_end(norm_seq_summary, fiveprime_extension)
seed_seq.unmod_norm_seq_summaries.append(norm_seq_summary)
for mod_seq_summary in mod_seq_summaries:
# Find seed sequences from other datasets containing any of the normalized sequences
# forming the modified sequence under consideration. If more than one seed sequence
# is identified, they are merged.
seed_seq_dict = {}
for norm_seq_summary in mod_seq_summary.norm_seq_summaries:
try:
# The normalized sequence is represented in another dataset.
seed_seq = norm_seq_string_seed_seq_dict[norm_seq_summary.seq_string]
except KeyError:
continue
seed_seq_dict[seed_seq.name] = seed_seq
if not seed_seq_dict:
# Create a new seed sequence based on the modified sequence.
seed_seq = SeedSeq()
seed_seq.name = mod_seq_summary.name + '_' + sample_id
seed_seq.seq_string = mod_seq_summary.consensus_seq_string
seed_seq.unmod_norm_seq_summaries = []
seed_seq.mod_seq_summaries = []
seed_seq.mod_seq_summaries.append(mod_seq_summary)
for norm_seq_summary in mod_seq_summary.norm_seq_summaries:
if norm_seq_summary.feature_threshold_start >= 0:
seed_seq.meets_feature_threshold = True
break
else:
seed_seq.meets_feature_threshold = False
for norm_seq_summary in mod_seq_summary.norm_seq_summaries:
norm_seq_string_seed_seq_dict[norm_seq_summary.seq_string] = seed_seq
continue
if len(seed_seq_dict) == 1:
# The modified sequence shares one or more normalized sequences with one seed sequence.
seed_seq_name, seed_seq = seed_seq_dict.popitem()
if len(mod_seq_summary.consensus_seq_string) < len(seed_seq.seq_string):
# The modified sequence is shorter than the seed sequence, so its coverage
# arrays must be extended with zeros at the 5' end.
fiveprime_extension = np.zeros(len(seed_seq.seq_string) - len(mod_seq_summary.consensus_seq_string), int)
self.extend_mod_seq_fiveprime_end(mod_seq_summary, fiveprime_extension)
for norm_seq_summary in mod_seq_summary.norm_seq_summaries:
self.extend_norm_seq_fiveprime_end(norm_seq_summary, fiveprime_extension)
elif len(mod_seq_summary.consensus_seq_string) > len(seed_seq.seq_string):
# The modified sequence is longer than the seed sequence, so the coverage
# arrays of the sequences forming the seed sequence must be extended with
# zeros at the 5' end.
fiveprime_extension = np.zeros(len(mod_seq_summary.consensus_seq_string) - len(seed_seq.seq_string), int)
for norm_seq_summary in seed_seq.unmod_norm_seq_summaries:
self.extend_norm_seq_fiveprime_end(norm_seq_summary, fiveprime_extension)
for other_mod_seq_summary in seed_seq.mod_seq_summaries:
self.extend_mod_seq_fiveprime_end(other_mod_seq_summary, fiveprime_extension)
seed_seq.name = mod_seq_summary.name + '_' + sample_id
seed_seq.seq_string = mod_seq_summary.consensus_seq_string
for norm_seq_summary in mod_seq_summary.norm_seq_summaries:
if norm_seq_summary.feature_threshold_start >= 0:
seed_seq.meets_feature_threshold = True
break
else:
seed_seq.meets_feature_threshold = False
seed_seq.mod_seq_summaries.append(mod_seq_summary)
for norm_seq_summary in mod_seq_summary.norm_seq_summaries:
norm_seq_string_seed_seq_dict[norm_seq_summary.seq_string] = seed_seq
continue
# To reach this point, the modified sequence must map to more than one seed
# sequence.
sorted_seed_seqs = sorted([seed_seq for seed_seq in seed_seq_dict.values()],
key=lambda seed_seq: -len(seed_seq.seq_string))
max_seed_seq_length = len(sorted_seed_seqs[0].seq_string)
mod_seq_length = len(mod_seq_summary.consensus_seq_string)
if mod_seq_length < max_seed_seq_length:
# Extend coverage arrays of the modified sequence.
fiveprime_extension = np.zeros(max_seed_seq_length - mod_seq_length, int)
self.extend_mod_seq_fiveprime_end(mod_seq_summary, fiveprime_extension)
# Extend coverage arrays of shorter seed sequences now grouped with a longer
# seed sequence.
for seed_seq in seed_seq_dict.values():
if len(seed_seq.seq_string) < max_seed_seq_length:
fiveprime_extension = np.zeros(max_seed_seq_length - len(seed_seq.seq_string), int)
for norm_seq_summary in seed_seq.unmod_norm_seq_summaries:
self.extend_norm_seq_fiveprime_end(norm_seq_summary, fiveprime_extension)
for other_mod_seq_summary in seed_seq.mod_seq_summaries:
self.extend_mod_seq_fiveprime_end(other_mod_seq_summary, fiveprime_extension)
new_seed_seq = SeedSeq()
longest_seed_seq = sorted_seed_seqs[0]
new_seed_seq.name = longest_seed_seq.name
new_seed_seq.seq_string = longest_seed_seq.seq_string
new_seed_seq.meets_feature_threshold = longest_seed_seq.meets_feature_threshold
elif mod_seq_length > max_seed_seq_length:
# Extend coverage arrays of seed sequences.
for seed_seq in seed_seq_dict.values():
fiveprime_extension = np.zeros(mod_seq_length - len(seed_seq.seq_string), int)
for norm_seq_summary in seed_seq.unmod_norm_seq_summaries:
self.extend_norm_seq_fiveprime_end(norm_seq_summary, fiveprime_extension)
for other_mod_seq_summary in seed_seq.mod_seq_summaries:
self.extend_mod_seq_fiveprime_end(other_mod_seq_summary, fiveprime_extension)
new_seed_seq = SeedSeq()
new_seed_seq.name = mod_seq_summary.name + '_' + sample_id
new_seed_seq.seq_string = mod_seq_summary.consensus_seq_string
for seed_seq in seed_seq_dict.values():
if seed_seq.meets_feature_threshold:
new_seed_seq.meets_feature_threshold = True
break
else:
for norm_seq_summary in mod_seq_summary.norm_seq_summaries:
if norm_seq_summary.feature_threshold_start >= 0:
new_seed_seq.meets_feature_threshold = True
break
else:
new_seed_seq.meets_feature_threshold = False
else:
# The modified sequence is the same length as the longest seed sequence. Extend
# coverage arrays of shorter seed sequences now grouped with a longer seed
# sequence.
for seed_seq in seed_seq_dict.values():
if len(seed_seq.seq_string) < max_seed_seq_length:
fiveprime_extension = np.zeros(max_seed_seq_length - len(seed_seq.seq_string), int)
for norm_seq_summary in seed_seq.unmod_norm_seq_summaries:
self.extend_norm_seq_fiveprime_end(norm_seq_summary, fiveprime_extension)
for other_mod_seq_summary in seed_seq.mod_seq_summaries:
self.extend_mod_seq_fiveprime_end(other_mod_seq_summary, fiveprime_extension)
new_seed_seq = SeedSeq()
new_seed_seq.name = mod_seq_summary.name + '_' + sample_id
new_seed_seq.seq_string = mod_seq_summary.consensus_seq_string
if sorted_seed_seqs[0].meets_feature_threshold:
new_seed_seq.meets_feature_threshold = True
else:
for norm_seq_summary in mod_seq_summary.norm_seq_summaries:
if norm_seq_summary.feature_threshold_start >= 0:
new_seed_seq.meets_feature_threshold = True
break
else:
new_seed_seq.meets_feature_threshold = False
# Now that all of the coverage arrays are reconciled in length, the modified
# sequence query and constituent sequences of the matching seeds can be added to the
# new seed.
new_seed_seq.unmod_norm_seq_summaries = []
new_seed_seq.mod_seq_summaries = []
for seed_seq in seed_seq_dict.values():
new_seed_seq.unmod_norm_seq_summaries += seed_seq.unmod_norm_seq_summaries
new_seed_seq.mod_seq_summaries += seed_seq.mod_seq_summaries
new_seed_seq.mod_seq_summaries.append(mod_seq_summary)
for norm_seq_summary in new_seed_seq.unmod_norm_seq_summaries:
norm_seq_string_seed_seq_dict[norm_seq_summary.seq_string] = new_seed_seq
for mod_seq_summary in new_seed_seq.mod_seq_summaries:
for norm_seq_summary in mod_seq_summary.norm_seq_summaries:
norm_seq_string_seed_seq_dict[norm_seq_summary.seq_string] = new_seed_seq
self.progress.update("Finalizing seeds")
# The seed references in the dict need to be dereplicated.
seed_seqs = list({seed_seq.name: seed_seq for seed_seq in norm_seq_string_seed_seq_dict.values()}.values())
# Disregard seed sequences that do not reach the 5' feature threshold.
seed_seqs = [seed_seq for seed_seq in seed_seqs if seed_seq.meets_feature_threshold]
self.set_anticodon(seed_seqs)
seed_seqs = [seed_seq for seed_seq in seed_seqs if seed_seq.anticodon_seq_string]
# Find specific coverages of modified sequences from specific coverages of nucleotides.
self.sum_specific_nt_covs(seed_seqs)
# Select the top seeds by specific coverage.
self.set_total_specific_covs(seed_seqs)
seed_seqs = sorted(seed_seqs, key=lambda seed_seq: -seed_seq.total_mean_specific_cov)[: self.seed_seq_limit]
self.set_nt_covs(seed_seqs)
self.sum_nonspecific_nt_covs(seed_seqs)
self.set_total_nonspecific_covs(seed_seqs)
self.set_consensus_seq_string(seed_seqs)
self.set_gc_fraction(seed_seqs)
self.seed_seqs = seed_seqs
self.total_seed_length = sum([len(seed_seq.seq_string) for seed_seq in seed_seqs])
self.set_sample_covs()
self.set_mods()
self.set_consensus_mod_nts()
self.set_sample_indels()
self.progress.end()
def extend_mod_seq_fiveprime_end(self, mod_seq_summary, fiveprime_extension):
"""Seed sequences can be longer than the normalized sequences from the individual samples,
requiring addition of empty positions in the normalized sequence coverage arrays at the 5'
end, as normalized (and modified) sequences are aligned from the 3' end."""
# The positions of substitutions are recorded in the seed sequence index.
length_5prime_extension = fiveprime_extension.size
mod_seq_summary.sub_positions += length_5prime_extension
for nt in UNAMBIG_NTS:
mod_seq_summary.specific_nt_covs_dict[nt] = np.concatenate([fiveprime_extension, mod_seq_summary.specific_nt_covs_dict[nt]])
mod_seq_summary.nonspecific_nt_covs_dict[nt] = np.concatenate([fiveprime_extension, mod_seq_summary.nonspecific_nt_covs_dict[nt]])
mod_seq_summary.insert_starts = [start + length_5prime_extension for start in mod_seq_summary.insert_starts]
mod_seq_summary.del_starts = [start + length_5prime_extension for start in mod_seq_summary.del_starts]
def set_anticodon(self, seed_seqs):
"""Assign the anticodon by comparing the mean specific coverage of the normalized sequences
comprising the seed (a simpler, approximate substitute for extracting the anticodon coverage
from each normalized sequence). This method is called by `DatabaseConverter.process` after
sequences from input tRNA-seq databases have been assigned to seeds."""
for seed_seq in seed_seqs:
if not seed_seq.mod_seq_summaries:
# The seed is comprised entirely of identical unmodified normalized sequences.
seed_seq.anticodon_seq_string = seed_seq.unmod_norm_seq_summaries[0].anticodon_seq_string
continue
anticodon_cov_dict = defaultdict(int)
for norm_seq_summary in seed_seq.unmod_norm_seq_summaries:
# Mean specific coverage does not include any 5' padding of zero coverage from the
# formation of the seed sequence.
anticodon_cov_dict[norm_seq_summary.anticodon_seq_string] += norm_seq_summary.mean_specific_cov
for mod_seq_summary in seed_seq.mod_seq_summaries:
for norm_seq_summary in mod_seq_summary.norm_seq_summaries:
anticodon_cov_dict[norm_seq_summary.anticodon_seq_string] += norm_seq_summary.mean_specific_cov
seed_seq.anticodon_seq_string = sorted(anticodon_cov_dict.items(), key=lambda t: -t[1])[0][0]
def sum_specific_nt_covs(self, seed_seqs):
"""Coverages of the 4 nucleotides are maintained in the tRNA-seq database for modified
sequences, since modified sequences have nucleotide variability, so this method is used to
calculate overall specific coverages."""
for seed_seq in seed_seqs:
for mod_seq_summary in seed_seq.mod_seq_summaries:
mod_seq_summary.specific_covs = np.array([covs for covs in mod_seq_summary.specific_nt_covs_dict.values()]).sum(axis=0)
def set_total_specific_covs(self, seed_seqs):
"""Sum specific coverages from each sequence comprising the seed sequence, and also
calculate the mean thereof."""
for seed_seq in seed_seqs:
seed_seq.total_specific_covs = np.zeros(len(seed_seq.seq_string), int)
for norm_seq_summary in seed_seq.unmod_norm_seq_summaries:
seed_seq.total_specific_covs += norm_seq_summary.specific_covs
for mod_seq_summary in seed_seq.mod_seq_summaries:
seed_seq.total_specific_covs += mod_seq_summary.specific_covs
seed_seq.total_mean_specific_cov = seed_seq.total_specific_covs.mean()
def set_nt_covs(self, seed_seqs):
"""Make separate coverage arrays for A, C, G and T, which are needed in seed sequence formation."""
for seed_seq in seed_seqs:
for norm_seq_summary in seed_seq.unmod_norm_seq_summaries:
norm_seq_summary.specific_nt_covs_dict = {nt: np.zeros(norm_seq_summary.specific_covs.size, int) for nt in UNAMBIG_NTS}
norm_seq_summary.nonspecific_nt_covs_dict = {nt: np.zeros(norm_seq_summary.nonspecific_covs.size, int) for nt in UNAMBIG_NTS}
start_pos = norm_seq_summary.specific_covs.size - len(norm_seq_summary.seq_string)
pos = start_pos
for nt, specific_cov, nonspecific_cov in zip(norm_seq_summary.seq_string,
norm_seq_summary.specific_covs[start_pos: ],
norm_seq_summary.nonspecific_covs[start_pos: ]):
norm_seq_summary.specific_nt_covs_dict[nt][pos] = specific_cov
norm_seq_summary.nonspecific_nt_covs_dict[nt][pos] = nonspecific_cov
pos += 1
def sum_nonspecific_nt_covs(self, seed_seqs):
"""Coverages of the 4 nucleotides are maintained in the tRNA-seq database for modified
sequences, since modified sequences have nucleotide variability, so this method is used to
calculate overall nonspecific coverages."""
for seed_seq in seed_seqs:
for mod_seq_summary in seed_seq.mod_seq_summaries:
mod_seq_summary.nonspecific_covs = np.array([covs for covs in mod_seq_summary.nonspecific_nt_covs_dict.values()]).sum(axis=0)
def set_total_nonspecific_covs(self, seed_seqs):
"""Sum nonspecific coverages from each sequence forming the seed sequence, and also
calculate the mean thereof."""
for seed_seq in seed_seqs:
seed_seq.total_nonspecific_covs = np.zeros(len(seed_seq.seq_string), int)
for norm_seq_summary in seed_seq.unmod_norm_seq_summaries:
seed_seq.total_nonspecific_covs += norm_seq_summary.nonspecific_covs
for mod_seq_summary in seed_seq.mod_seq_summaries:
nonspecific_nt_covs = np.array([covs for covs in mod_seq_summary.nonspecific_nt_covs_dict.values()])
seed_seq.total_nonspecific_covs += nonspecific_nt_covs.sum(axis=0)
seed_seq.total_mean_nonspecific_cov = seed_seq.total_nonspecific_covs.mean()
def set_consensus_seq_string(self, seed_seqs):
"""The consensus sequence for the seed consists of the nucleotides with the maximum specific
coverage summed across constituent sequences. When certain tRNA-seq treatments are preferred
(e.g., demethylase), nucleotides with predicted modifications are called on the basis of
sequences from preferred samples."""
for seed_seq in seed_seqs:
if not seed_seq.mod_seq_summaries:
# The seed is composed entirely of identical unmodified normalized sequences.
return
total_nt_cov_dict = {nt: np.zeros(len(seed_seq.seq_string), int) for nt in UNAMBIG_NTS}
for norm_seq_summary in seed_seq.unmod_norm_seq_summaries:
for nt in UNAMBIG_NTS:
total_nt_cov_dict[nt] += norm_seq_summary.specific_nt_covs_dict[nt]
for mod_seq_summary in seed_seq.mod_seq_summaries:
for nt in UNAMBIG_NTS:
total_nt_cov_dict[nt] += mod_seq_summary.specific_nt_covs_dict[nt]
seed_seq.seq_string = ''.join(
[INT_NT_DICT[i + 1] for i in
np.argmax(np.array([total_nt_cov_dict[nt] for nt in UNAMBIG_NTS]), axis=0)]
)
def set_gc_fraction(self, seed_seqs):
for seed_seq in seed_seqs:
seed_seq.gc_fraction = sum([1 for nt in seed_seq.seq_string if nt == 'C' or nt == 'G']) / len(seed_seq.seq_string)
def set_sample_covs(self):
"""Determine sample-specific coverages of seeds. Specific, nonspecific and summed coverages
are found for A, C, G and T, as well as overall and for Q2-Q3."""
for seed_seq in self.seed_seqs:
sample_specific_covs_dict = {}
sample_nonspecific_covs_dict = {}
sample_summed_covs_dict = {}
sample_specific_nt_covs_dict = {}
sample_nonspecific_nt_covs_dict = {}
sample_summed_nt_covs_dict = {}
seed_seq_length = len(seed_seq.seq_string)
for sample_id in self.trnaseq_db_sample_ids:
sample_specific_covs_dict[sample_id] = np.zeros(seed_seq_length, int)
sample_nonspecific_covs_dict[sample_id] = np.zeros(seed_seq_length, int)
sample_specific_nt_covs_dict[sample_id] = [np.zeros(seed_seq_length, int) for _ in UNAMBIG_NTS]
sample_nonspecific_nt_covs_dict[sample_id] = [np.zeros(seed_seq_length, int) for _ in UNAMBIG_NTS]
for norm_seq_summary in seed_seq.unmod_norm_seq_summaries:
sample_id = norm_seq_summary.sample_id
sample_specific_covs_dict[sample_id] += norm_seq_summary.specific_covs
sample_nonspecific_covs_dict[sample_id] += norm_seq_summary.nonspecific_covs
for i, nt in enumerate(UNAMBIG_NTS):
sample_specific_nt_covs_dict[sample_id][i] += norm_seq_summary.specific_nt_covs_dict[nt]
sample_nonspecific_nt_covs_dict[sample_id][i] += norm_seq_summary.nonspecific_nt_covs_dict[nt]
for mod_seq_summary in seed_seq.mod_seq_summaries:
sample_id = mod_seq_summary.sample_id
sample_specific_covs_dict[sample_id] += mod_seq_summary.specific_covs
sample_nonspecific_covs_dict[sample_id] += mod_seq_summary.nonspecific_covs
for i, nt in enumerate(UNAMBIG_NTS):
sample_specific_nt_covs_dict[sample_id][i] += mod_seq_summary.specific_nt_covs_dict[nt]
sample_nonspecific_nt_covs_dict[sample_id][i] += mod_seq_summary.nonspecific_nt_covs_dict[nt]
for sample_id in self.trnaseq_db_sample_ids:
sample_summed_covs_dict[sample_id] = sample_specific_covs_dict[sample_id] + sample_nonspecific_covs_dict[sample_id]
sample_specific_nt_covs = sample_specific_nt_covs_dict[sample_id]
sample_nonspecific_nt_covs = sample_nonspecific_nt_covs_dict[sample_id]
sample_summed_nt_covs_dict[sample_id] = [specific_covs + nonspecific_covs for specific_covs, nonspecific_covs
in zip(sample_specific_nt_covs, sample_nonspecific_nt_covs)]
seed_seq.sample_specific_covs_dict = sample_specific_covs_dict
seed_seq.sample_nonspecific_covs_dict = sample_nonspecific_covs_dict
seed_seq.sample_summed_covs_dict = sample_summed_covs_dict
seed_seq.sample_specific_nt_covs_dict = sample_specific_nt_covs_dict
seed_seq.sample_nonspecific_nt_covs_dict = sample_nonspecific_nt_covs_dict
seed_seq.sample_summed_nt_covs_dict = sample_summed_nt_covs_dict
q = int(seed_seq_length * 0.25)
seed_seq.sample_mean_Q2Q3_specific_cov_dict = {}
seed_seq.sample_mean_Q2Q3_nonspecific_cov_dict = {}
seed_seq.sample_mean_Q2Q3_summed_cov_dict = {}
for sample_id in self.trnaseq_db_sample_ids:
seed_seq.sample_mean_Q2Q3_specific_cov_dict[sample_id] = np.mean(
sorted(sample_specific_covs_dict[sample_id])[q: -q])
seed_seq.sample_mean_Q2Q3_nonspecific_cov_dict[sample_id] = np.mean(
sorted(sample_nonspecific_covs_dict[sample_id])[q: -q])
seed_seq.sample_mean_Q2Q3_summed_cov_dict[sample_id] = np.mean(
sorted(sample_summed_covs_dict[sample_id])[q: -q])
def set_mods(self):
"""Predict modified positions in the tRNA seed.
A modification requires a certain level of third- and/or fourth-most abundant nucleotides at
the position in one or more samples. A modification in any particular sample additionally
requires a certain level of second- through fourth-most abundant nucleotides at the
position.
There is currently an idiosyncracy in how modifications are set that results in the
retention, but potential masking, of SNVs. If the position of a potential modification does
not meet the coverage threshold for third- and fourth-most abundant nucleotides, the seed is
not split into separate seeds around those SNVs, as occurs in anvi-trnaseq. Instead, the
SNVs are simply not reported. This is a downside to imposing the aforementioned coverage
threshold."""
# Division by zero issues a numpy warning, but we handle it immediately by converting the
# nan result to zero, so you don't need to see the warning. Unfortunately, this is the only
# way we have found to suppress the warning.
np.seterr(invalid='ignore')
min_variation = self.min_variation
min_third_fourth_nt = self.min_third_fourth_nt
for seed_seq in self.seed_seqs:
sample_mod_positions_dict = {}
sample_variability_dict = {}
sample_specific_nt_covs_dict = seed_seq.sample_specific_nt_covs_dict
seed_seq_length = len(seed_seq.seq_string)
sample_variations = []
third_fourth_variations = np.zeros(seed_seq_length)
for sample_id, specific_nt_covs in sample_specific_nt_covs_dict.items():
specific_nt_covs_array = np.array(specific_nt_covs)
specific_nt_covs_array.sort(axis=0)
first_covs = specific_nt_covs_array[-1, :]
second_covs = specific_nt_covs_array[-2, :]
summed_covs = specific_nt_covs_array.sum(axis=0)
sample_variations.append(np.nan_to_num(1 - first_covs / summed_covs))
third_fourth_variations += np.nan_to_num(1 - (first_covs + second_covs) / summed_covs) >= min_third_fourth_nt
sample_variations = np.array(sample_variations)
third_fourth_variations = (third_fourth_variations > 0)
total_mod_positions = np.nonzero((sample_variations >= min_variation).any(axis=0) & third_fourth_variations)[0]
mod_sample_variations = sample_variations[:, total_mod_positions]
for sample_num, sample_id in enumerate(sample_specific_nt_covs_dict.keys()):
sample_mod_positions = total_mod_positions[np.nonzero(mod_sample_variations[sample_num, :] >= min_variation)[0]]
sample_mod_positions_dict[sample_id] = sample_mod_positions.tolist()
sample_variability_dict[sample_id] = sample_mod_positions.size * 1000 / seed_seq_length
seed_seq.total_mod_positions = total_mod_positions.tolist()
seed_seq.sample_mod_positions_dict = sample_mod_positions_dict
seed_seq.sample_variability_dict = sample_variability_dict
np.seterr(invalid='warn')
def set_consensus_mod_nts(self):
"""Change predicted modified nucleotides in the seed consensus sequences to the nucleotides
supported by the "preferred" treated samples, e.g., demethylase splits, with the goal of
increasing the accuracy of the underlying base call."""
if not self.preferred_treatment:
return
for seed_seq in self.seed_seqs:
seq_string = seed_seq.seq_string
preferred_nt_cov_dict = {nt: np.zeros(len(seq_string), int) for nt in UNAMBIG_NTS}
for norm_seq_summary in seed_seq.unmod_norm_seq_summaries:
if norm_seq_summary.sample_id in self.preferred_trnaseq_db_sample_ids:
for nt in UNAMBIG_NTS:
preferred_nt_cov_dict[nt] += norm_seq_summary.specific_nt_covs_dict[nt]
for mod_seq_summary in seed_seq.mod_seq_summaries:
if mod_seq_summary.sample_id in self.preferred_trnaseq_db_sample_ids:
for nt in UNAMBIG_NTS:
preferred_nt_cov_dict[nt] += mod_seq_summary.specific_nt_covs_dict[nt]
preferred_nt_cov_array = np.array([preferred_nt_cov_dict[nt] for nt in UNAMBIG_NTS])
for mod_pos in seed_seq.total_mod_positions:
mod_covs = preferred_nt_cov_array[:, mod_pos]
if mod_covs.sum() == 0:
# The preferred treatments do not have specific coverage of the modified site.
continue
seq_string = seq_string[: mod_pos] + INT_NT_DICT[np.argmax(mod_covs) + 1] + seq_string[mod_pos + 1: ]
seed_seq.seq_string = seq_string
def set_sample_indels(self):
for seed_seq in self.seed_seqs:
sample_insert_dict = {}
sample_del_dict = {}
for sample_id in self.trnaseq_db_sample_ids:
sample_insert_dict[sample_id] = []
sample_del_dict[sample_id] = []
for mod_seq_summary in seed_seq.mod_seq_summaries:
sample_id = mod_seq_summary.sample_id
sample_insert_info = sample_insert_dict[sample_id]
for insert_start, insert_string, spec_insert_cov, nonspec_insert_cov in zip(mod_seq_summary.insert_starts,
mod_seq_summary.insert_strings,
mod_seq_summary.spec_insert_covs,
mod_seq_summary.nonspec_insert_covs):
sample_insert_info.append((insert_start, insert_string, spec_insert_cov, nonspec_insert_cov))
sample_del_info = sample_del_dict[sample_id]
for del_start, del_length, spec_del_cov, nonspec_del_cov in zip(mod_seq_summary.del_starts,
mod_seq_summary.del_lengths,
mod_seq_summary.spec_del_covs,
mod_seq_summary.nonspec_del_covs):
sample_del_info.append((del_start, del_length, spec_del_cov, nonspec_del_cov))
seed_seq.sample_insert_dict = sample_insert_dict
seed_seq.sample_del_dict = sample_del_dict
def gen_contigs_db(self):
"""Generate a contigs database of tRNA seeds. The create method of dbops.ContigsDatabase is
not used because it tries to call genes, count kmers, and do other things that are
irrelevant to tRNA-seq reads. There are no tRNA splits, but to satisfy the structure of the
database, call every contig a split, and maintain tables on both."""
self.progress.new("Generating a contigs db of tRNA seeds")
self.progress.update("...")
contigs_db = dbops.ContigsDatabase(self.contigs_db_path)
contigs_db.touch()
set_meta_value = contigs_db.db.set_meta_value
insert_many = contigs_db.db.insert_many
# Meta-values are set like in `dbops.ContigsDatabase.create`.
set_meta_value('db_type', 'contigs')
set_meta_value('db_variant', 'trnaseq')
set_meta_value('project_name', self.project_name)
set_meta_value('description', self.descrip if self.descrip else '_No description is provided_')
set_meta_value('contigs_db_hash', self.contigs_db_hash)
set_meta_value('split_length', 10000) # sys.maxsize
set_meta_value('num_contigs', len(self.seed_seqs))
set_meta_value('num_splits', len(self.seed_seqs))
set_meta_value('total_length', self.total_seed_length)
set_meta_value('kmer_size', 0)
set_meta_value('gene_level_taxonomy_source', None)
set_meta_value('gene_function_sources', 'Transfer_RNAs')
set_meta_value('genes_are_called', True)
set_meta_value('external_gene_calls', True)
set_meta_value('external_gene_amino_acid_seqs', False)
set_meta_value('skip_predict_frame', True)
set_meta_value('splits_consider_gene_calls', False)
set_meta_value('scg_taxonomy_was_run', False)
set_meta_value('scg_taxonomy_database_version', None)
set_meta_value('trna_taxonomy_was_run', False)
set_meta_value('trna_taxonomy_database_version', None)
set_meta_value('creation_date', time.time())
insert_many('contig_sequences', [(seed_seq.name, seed_seq.seq_string) for seed_seq in self.seed_seqs])
insert_many('contigs_basic_info', self.get_contigs_basic_info_table_entries())
insert_many('splits_basic_info', self.get_splits_basic_info_table_entries())
insert_many('hmm_hits', self.get_hmm_hits_table_entries())
insert_many('hmm_hits_in_splits', self.get_hmm_hits_in_splits_table_entries())
# tRNA predictions are treated like HMM or tRNAScan-SE hits. The blank columns of the HMM
# hits info table are 'ref', 'search_type', 'domain' and 'genes'.
contigs_db.db.insert('hmm_hits_info', ('Transfer_RNAs', '', 'Transfer_RNAs', None, ''))
insert_many('genes_in_contigs', self.get_genes_in_contigs_table_entries())
insert_many('gene_amino_acid_sequences', [(i, '') for i in range(len(self.seed_seqs))])
insert_many('genes_in_splits', self.get_genes_in_splits_table_entries())
insert_many('gene_functions', self.get_gene_functions_table_entries())
contigs_db.disconnect()
self.progress.end()
def get_contigs_basic_info_table_entries(self):
entries = []
for seed_seq in self.seed_seqs:
seq_string = seed_seq.seq_string
entries.append(
(seed_seq.name,
len(seq_string),
seed_seq.gc_fraction,
1)
)
return entries
def get_splits_basic_info_table_entries(self):
entries = []
for seed_seq in self.seed_seqs:
entries.append(
(seed_seq.name + '_split_00001',
0, # Order of split in parent contig
0, # Start in contig
len(seed_seq.seq_string), # Stop in contig
len(seed_seq.seq_string), # Split length
seed_seq.gc_fraction, # GC content of split
seed_seq.gc_fraction, # GC content of parent contig
seed_seq.name)
)
return entries
def get_hmm_hits_table_entries(self):
"""tRNA seeds are analogous to tRNA gene predictions from a metagenomic contigs database."""
entries = []
for i, seed_seq in enumerate(self.seed_seqs):
entries.append(
(i, # Entry ID
'Transfer_RNAs', # Source, à la tRNA gene prediction via tRNAScan-SE
sha1(seed_seq.seq_string.encode('utf-8')).hexdigest(), # "Gene unique identifier"
i, # "Gene callers ID"
ANTICODON_AA_DICT[seed_seq.anticodon_seq_string] + '_' + seed_seq.anticodon_seq_string, # "Gene name", à la tRNA gene prediction via tRNAScan-SE
'-', # "Gene HMM ID"
0.0) # "HMM E-value"
)
return entries
def get_hmm_hits_in_splits_table_entries(self):
entries = []
for i, seed_seq in enumerate(self.seed_seqs):
entries.append(
(i, # Entry ID
seed_seq.name + '_split_00001', # Split name
100, # Percentage of "HMM hit" in split
'Transfer_RNAs')
)
return entries
def get_genes_in_contigs_table_entries(self):
entries = []
for i, seed_seq in enumerate(self.seed_seqs):
entries.append(
(i, # Gene callers ID
seed_seq.name, # Contig name
0, # Gene start in contig
len(seed_seq.seq_string), # Gene stop in contig
'f', # Direction of gene call on contig
0, # Is partial gene call: for now, say all seeds are "full tRNAs"
2, # Call type: 1 = coding, 2 = noncoding, 3 = unknown
'anvi-trnaseq', # Gene caller
tables.trnaseq_db_version) # Version of caller
)
return entries
def get_genes_in_splits_table_entries(self):
entries = []
for i, seed_seq in enumerate(self.seed_seqs):
entries.append(
(seed_seq.name + '_split_00001',
i,
0,
len(seed_seq.seq_string),
100)
)
return entries
def get_gene_functions_table_entries(self):
entries = []
for i, seed_seq in enumerate(self.seed_seqs):
entries.append(
(i,
'Transfer_RNAs',
'%s_%s_%d' % (ANTICODON_AA_DICT[seed_seq.anticodon_seq_string], seed_seq.anticodon_seq_string, i),
'tRNA transcript',
0.0)
)
return entries
def gen_auxiliary_db(self, db_cov_type):
if db_cov_type == 'specific':
auxiliary_db = auxiliarydataops.AuxiliaryDataForSplitCoverages(
self.specific_auxiliary_db_path, self.contigs_db_hash, create_new=True)
for seed_seq in self.seed_seqs:
split_name = seed_seq.name + '_split_00001'
for sample_id in self.trnaseq_db_sample_ids:\
auxiliary_db.append(split_name,
sample_id,
seed_seq.sample_specific_covs_dict[sample_id].tolist())
elif db_cov_type == 'nonspecific':
auxiliary_db = auxiliarydataops.AuxiliaryDataForSplitCoverages(
self.nonspecific_auxiliary_db_path, self.contigs_db_hash, create_new=True)
for seed_seq in self.seed_seqs:
split_name = seed_seq.name + '_split_00001'
for sample_id in self.trnaseq_db_sample_ids:
auxiliary_db.append(split_name,
sample_id,
seed_seq.sample_nonspecific_covs_dict[sample_id].tolist())
elif db_cov_type == 'combined':
auxiliary_db = auxiliarydataops.AuxiliaryDataForSplitCoverages(
self.combined_auxiliary_db_path, self.contigs_db_hash, create_new=True)
for seed_seq in self.seed_seqs:
split_name = seed_seq.name + '_split_00001'
for sample_id in self.trnaseq_db_sample_ids:
auxiliary_db.append(split_name,
sample_id + '_specific',
seed_seq.sample_specific_covs_dict[sample_id].tolist())
auxiliary_db.append(split_name,
sample_id + '_nonspecific',
seed_seq.sample_nonspecific_covs_dict[sample_id].tolist())
elif db_cov_type == 'summed':
auxiliary_db = auxiliarydataops.AuxiliaryDataForSplitCoverages(
self.summed_auxiliary_db_path, self.contigs_db_hash, create_new=True)
for seed_seq in self.seed_seqs:
split_name = seed_seq.name + '_split_00001'
for sample_id in self.trnaseq_db_sample_ids:
auxiliary_db.append(split_name,
sample_id,
(seed_seq.sample_specific_covs_dict[sample_id] + seed_seq.sample_nonspecific_covs_dict[sample_id]).tolist())
else:
raise ConfigError(f"The type of profile database provided, {db_cov_type}, is not among "
"those that are recognized: 'specific', 'nonspecific', 'combined', and 'summed'.")
auxiliary_db.store()
auxiliary_db.close()
def set_sample_total_covs(self):
"""For each input sample, find the total specific, nonspecific and summed coverage of the
seeds across all positions (single integers)."""
sample_total_specific_cov_dict = {sample_id: 0 for sample_id in self.trnaseq_db_sample_ids}
sample_total_nonspecific_cov_dict = {sample_id: 0 for sample_id in self.trnaseq_db_sample_ids}
for seed_seq in self.seed_seqs:
for norm_seq_summary in seed_seq.unmod_norm_seq_summaries:
sample_id = norm_seq_summary.sample_id
specific_cov = norm_seq_summary.specific_covs.sum()
nonspecific_cov = norm_seq_summary.nonspecific_covs.sum()
sample_total_specific_cov_dict[sample_id] += specific_cov
sample_total_nonspecific_cov_dict[sample_id] += nonspecific_cov
for mod_seq_summary in seed_seq.mod_seq_summaries:
sample_id = mod_seq_summary.sample_id
specific_cov = mod_seq_summary.specific_covs.sum()
nonspecific_cov = mod_seq_summary.nonspecific_covs.sum()
sample_total_specific_cov_dict[sample_id] += specific_cov
sample_total_nonspecific_cov_dict[sample_id] += nonspecific_cov
sample_total_summed_cov_dict = {}
for sample_id in self.trnaseq_db_sample_ids:
sample_total_summed_cov_dict[sample_id] = sample_total_specific_cov_dict[sample_id] + sample_total_nonspecific_cov_dict[sample_id]
self.sample_total_specific_cov_dict = sample_total_specific_cov_dict
self.sample_total_nonspecific_cov_dict = sample_total_nonspecific_cov_dict
self.sample_total_summed_cov_dict = sample_total_summed_cov_dict
def set_sample_overall_mean_covs(self):
"""For each input sample, find the mean specific, nonspecific and summed coverage of all
seeds across all positions (single numbers)."""
sample_overall_mean_specific_cov_dict = {}
sample_overall_mean_nonspecific_cov_dict = {}
sample_overall_mean_summed_cov_dict = {}
for sample_id, total_specific_cov in self.sample_total_specific_cov_dict.items():
sample_overall_mean_specific_cov_dict[sample_id] = total_specific_cov / self.total_seed_length
for sample_id, total_nonspecific_cov in self.sample_total_nonspecific_cov_dict.items():
sample_overall_mean_nonspecific_cov_dict[sample_id] = total_nonspecific_cov / self.total_seed_length
for sample_id, total_summed_cov in self.sample_total_summed_cov_dict.items():
sample_overall_mean_summed_cov_dict[sample_id] = total_summed_cov / self.total_seed_length
self.sample_overall_mean_specific_cov_dict = sample_overall_mean_specific_cov_dict
self.sample_overall_mean_nonspecific_cov_dict = sample_overall_mean_nonspecific_cov_dict
self.sample_overall_mean_summed_cov_dict = sample_overall_mean_summed_cov_dict
def set_sample_mean_covs(self):
"""Mean coverage of each seed in a sample."""
for seed_seq in self.seed_seqs:
sample_mean_specific_cov_dict = {}
sample_mean_nonspecific_cov_dict = {}
sample_mean_summed_cov_dict = {}
for sample_id in self.trnaseq_db_sample_ids:
sample_mean_specific_cov_dict[sample_id] = seed_seq.sample_specific_covs_dict[sample_id].mean()
sample_mean_nonspecific_cov_dict[sample_id] = seed_seq.sample_nonspecific_covs_dict[sample_id].mean()
sample_mean_summed_cov_dict[sample_id] = seed_seq.sample_summed_covs_dict[sample_id].mean()
seed_seq.sample_mean_specific_cov_dict = sample_mean_specific_cov_dict
seed_seq.sample_mean_nonspecific_cov_dict = sample_mean_nonspecific_cov_dict
seed_seq.sample_mean_summed_cov_dict = sample_mean_summed_cov_dict
def set_sample_std_covs(self):
"""Standard deviation of the coverage of each seed in a sample."""
for seed_seq in self.seed_seqs:
sample_std_specific_cov_dict = {}
sample_std_nonspecific_cov_dict = {}
sample_std_summed_cov_dict = {}
for sample_id in self.trnaseq_db_sample_ids:
sample_std_specific_cov_dict[sample_id] = seed_seq.sample_specific_covs_dict[sample_id].std()
sample_std_nonspecific_cov_dict[sample_id] = seed_seq.sample_nonspecific_covs_dict[sample_id].std()
sample_std_summed_cov_dict[sample_id] = seed_seq.sample_summed_covs_dict[sample_id].std()
seed_seq.sample_std_specific_cov_dict = sample_std_specific_cov_dict
seed_seq.sample_std_nonspecific_cov_dict = sample_std_nonspecific_cov_dict
seed_seq.sample_std_summed_cov_dict = sample_std_summed_cov_dict
def set_sample_abundances(self):
"""For each sample, and for specific and nonspecific coverages, abundance is defined as the
mean coverage of the seed divided by the mean total coverage of the sample across all
seeds."""
for seed_seq in self.seed_seqs:
sample_specific_abundances_dict = {}
sample_nonspecific_abundances_dict = {}
sample_summed_abundances_dict = {}
for sample_id in self.trnaseq_db_sample_ids:
sample_specific_abundances_dict[sample_id] = seed_seq.sample_mean_specific_cov_dict[sample_id] / self.sample_overall_mean_specific_cov_dict[sample_id]
sample_nonspecific_abundances_dict[sample_id] = seed_seq.sample_mean_nonspecific_cov_dict[sample_id] / self.sample_overall_mean_nonspecific_cov_dict[sample_id]
sample_summed_abundances_dict[sample_id] = seed_seq.sample_mean_summed_cov_dict[sample_id] / self.sample_overall_mean_summed_cov_dict[sample_id]
seed_seq.sample_specific_abundances_dict = sample_specific_abundances_dict
seed_seq.sample_nonspecific_abundances_dict = sample_nonspecific_abundances_dict
seed_seq.sample_summed_abundances_dict = sample_summed_abundances_dict
def set_sample_normalization_multipliers(self):
"""Set a normalization constant for each sample to scale their coverages, allowing the
relative abundance of seeds in a sample to compared between samples. Normalization is based
on the total specific coverage of each sample -- one can imagine other ways of doing this,
including use of summed specific and nonspecific coverage, but this would require
deconvoluting the multiple representation of nonspecific reads."""
sample_normalization_multiplier_dict = {}
min_total_specific_cov = min([v for v in self.sample_total_specific_cov_dict.values()])
for sample_id, total_specific_cov in self.sample_total_specific_cov_dict.items():
sample_normalization_multiplier_dict[sample_id] = min_total_specific_cov / total_specific_cov
self.sample_normalization_multiplier_dict = sample_normalization_multiplier_dict
def set_sample_normalized_mean_Q2Q3_coverages(self):
"""Scale mean coverages for comparison across samples."""
for seed_seq in self.seed_seqs:
sample_normalized_mean_Q2Q3_specific_cov_dict = {}
sample_normalized_mean_Q2Q3_nonspecific_cov_dict = {}
sample_normalized_mean_Q2Q3_summed_cov_dict = {}
for sample_id in self.trnaseq_db_sample_ids:
sample_normalized_mean_Q2Q3_specific_cov_dict[sample_id] = seed_seq.sample_mean_Q2Q3_specific_cov_dict[sample_id] * self.sample_normalization_multiplier_dict[sample_id]
sample_normalized_mean_Q2Q3_nonspecific_cov_dict[sample_id] = seed_seq.sample_mean_Q2Q3_nonspecific_cov_dict[sample_id] * self.sample_normalization_multiplier_dict[sample_id]
sample_normalized_mean_Q2Q3_summed_cov_dict[sample_id] = seed_seq.sample_mean_Q2Q3_summed_cov_dict[sample_id] * self.sample_normalization_multiplier_dict[sample_id]
seed_seq.sample_normalized_mean_Q2Q3_specific_cov_dict = sample_normalized_mean_Q2Q3_specific_cov_dict
seed_seq.sample_normalized_mean_Q2Q3_nonspecific_cov_dict = sample_normalized_mean_Q2Q3_nonspecific_cov_dict
seed_seq.sample_normalized_mean_Q2Q3_summed_cov_dict = sample_normalized_mean_Q2Q3_summed_cov_dict
def set_sample_detections(self):
"""Find the proportion of each seed sequence covered by reads in a sample."""
for seed_seq in self.seed_seqs:
seed_seq_length = len(seed_seq.seq_string)
sample_specific_detection_dict = {}
sample_nonspecific_detection_dict = {}
sample_summed_detection_dict = {}
for sample_id in self.trnaseq_db_sample_ids:
sample_specific_detection_dict[sample_id] = seed_seq.sample_specific_covs_dict[sample_id].nonzero()[0].size / seed_seq_length
sample_nonspecific_detection_dict[sample_id] = seed_seq.sample_nonspecific_covs_dict[sample_id].nonzero()[0].size / seed_seq_length
sample_summed_detection_dict[sample_id] = seed_seq.sample_summed_covs_dict[sample_id].nonzero()[0].size / seed_seq_length
seed_seq.sample_specific_detection_dict = sample_specific_detection_dict
seed_seq.sample_nonspecific_detection_dict = sample_nonspecific_detection_dict
seed_seq.sample_summed_detection_dict = sample_summed_detection_dict
def set_sample_relative_abundances(self):
"""Relative abundance represents the coverage of the seed in one sample relative to the
total coverage of the seed across samples -- relative abundances sum to one across
samples."""
np.seterr(invalid='ignore')
for seed_seq in self.seed_seqs:
sample_specific_relative_abundances_dict = {}
sample_nonspecific_relative_abundances_dict = {}
sample_summed_relative_abundances_dict = {}
pansample_normalized_mean_Q2Q3_specific_cov = sum(seed_seq.sample_normalized_mean_Q2Q3_specific_cov_dict.values())
pansample_normalized_mean_Q2Q3_nonspecific_cov = sum(seed_seq.sample_normalized_mean_Q2Q3_nonspecific_cov_dict.values())
pansample_normalized_mean_Q2Q3_summed_cov = sum(seed_seq.sample_normalized_mean_Q2Q3_summed_cov_dict.values())
for sample_id in self.trnaseq_db_sample_ids:
sample_specific_relative_abundances_dict[sample_id] = seed_seq.sample_normalized_mean_Q2Q3_specific_cov_dict[sample_id] / pansample_normalized_mean_Q2Q3_specific_cov
sample_nonspecific_relative_abundances_dict[sample_id] = seed_seq.sample_normalized_mean_Q2Q3_nonspecific_cov_dict[sample_id] / pansample_normalized_mean_Q2Q3_nonspecific_cov
sample_summed_relative_abundances_dict[sample_id] = seed_seq.sample_normalized_mean_Q2Q3_summed_cov_dict[sample_id] / pansample_normalized_mean_Q2Q3_summed_cov
seed_seq.sample_specific_relative_abundances_dict = sample_specific_relative_abundances_dict
seed_seq.sample_nonspecific_relative_abundances_dict = sample_nonspecific_relative_abundances_dict
seed_seq.sample_summed_relative_abundances_dict = sample_summed_relative_abundances_dict
np.seterr(invalid='warn')
def set_sample_max_normalized_ratios(self):
"""The max normalized coverage ratio represents the coverage of the seed in one sample
relative to the maximum coverage amongst the samples -- one sample will always have a value
equal to one."""
for seed_seq in self.seed_seqs:
sample_specific_max_normalized_ratio_dict = {}
sample_nonspecific_max_normalized_ratio_dict = {}
sample_summed_max_normalized_ratio_dict = {}
max_normalized_mean_Q2Q3_specific_cov = max(seed_seq.sample_normalized_mean_Q2Q3_specific_cov_dict.values())
max_normalized_mean_Q2Q3_nonspecific_cov = max(seed_seq.sample_normalized_mean_Q2Q3_nonspecific_cov_dict.values())
max_normalized_mean_Q2Q3_summed_cov = max(seed_seq.sample_normalized_mean_Q2Q3_summed_cov_dict.values())
for sample_id in self.trnaseq_db_sample_ids:
sample_specific_max_normalized_ratio_dict[sample_id] = seed_seq.sample_normalized_mean_Q2Q3_specific_cov_dict[sample_id] / max_normalized_mean_Q2Q3_specific_cov if max_normalized_mean_Q2Q3_specific_cov else 0
sample_nonspecific_max_normalized_ratio_dict[sample_id] = seed_seq.sample_normalized_mean_Q2Q3_nonspecific_cov_dict[sample_id] / max_normalized_mean_Q2Q3_nonspecific_cov if max_normalized_mean_Q2Q3_nonspecific_cov else 0
sample_summed_max_normalized_ratio_dict[sample_id] = seed_seq.sample_normalized_mean_Q2Q3_summed_cov_dict[sample_id] / max_normalized_mean_Q2Q3_summed_cov if max_normalized_mean_Q2Q3_summed_cov else 0
seed_seq.sample_specific_max_normalized_ratio_dict = sample_specific_max_normalized_ratio_dict
seed_seq.sample_nonspecific_max_normalized_ratio_dict = sample_nonspecific_max_normalized_ratio_dict
seed_seq.sample_summed_max_normalized_ratio_dict = sample_summed_max_normalized_ratio_dict
def set_variable_nts_table_entries(self):
"""Variable nucleotides in the profile databases are nucleotides with predicted
modifications, not single nucleotide variants. Modifications are determined from specific
coverage, but are displayed in nonspecific and summed profile databases as well."""
entries = []
for sample_id in self.trnaseq_db_sample_ids:
for i, seed_seq in enumerate(self.seed_seqs):
specific_covs = seed_seq.sample_specific_covs_dict[sample_id]
specific_nt_cov_arrays = seed_seq.sample_specific_nt_covs_dict[sample_id]
for pos in seed_seq.sample_mod_positions_dict[sample_id]:
total_cov = specific_covs[pos]
specific_nt_covs = [arr[pos] for arr in specific_nt_cov_arrays]
max_nt_cov = max(specific_nt_covs)
sorted_nt_covs = sorted(zip(UNAMBIG_NTS, specific_nt_covs), key=lambda x: -x[1])
ref_nt = sorted_nt_covs[0][0]
secondary_nt = sorted_nt_covs[1][0]
entries.append((sample_id,
seed_seq.name + '_split_00001',
pos, # Position in split
pos, # Position in contig
i, # Corresponding gene call
1, # In noncoding gene call
0, # In coding gene call
0, # Base position in codon (0 for noncoding gene call)
-1, # Codon order in gene (-1 for noncoding gene call)
total_cov,
0, # Coverage outlier in split (0 or 1)
0, # Coverage outlier in contig (0 or 1)
1 - max_nt_cov / total_cov, # Departure from reference
ref_nt + secondary_nt, # Competing nts (top 2)
ref_nt,
specific_nt_covs[0], # A coverage
specific_nt_covs[1], # C coverage
specific_nt_covs[2], # G coverage
specific_nt_covs[3], # T coverage
0))
self.variable_nts_table_entries = entries
def set_indels_table_entries(self):
"""Indels are determined separately from specific and nonspecific coverages."""
specific_entries = []
nonspecific_entries = []
summed_entries = []
min_indel_fraction = self.min_indel_fraction
for sample_id in self.trnaseq_db_sample_ids:
for seed_seq_index, seed_seq in enumerate(self.seed_seqs):
insert_info = seed_seq.sample_insert_dict[sample_id]
del_info = seed_seq.sample_del_dict[sample_id]
spec_covs = seed_seq.sample_specific_covs_dict[sample_id]
nonspec_covs = seed_seq.sample_nonspecific_covs_dict[sample_id]
for insert_start, insert_string, insert_spec_cov, insert_nonspec_cov in insert_info:
spec_cov = (spec_covs[insert_start] + spec_covs[insert_start + 1]) / 2
# A frequency of 1 only occurs when there is specific coverage of the insertion
# but not the reference nucleotide.
insert_freq = 1 if spec_cov == 0 else insert_spec_cov / spec_cov
if insert_freq >= min_indel_fraction:
specific_entries.append((sample_id,
seed_seq.name + '_split_00001',
insert_start, # Position in split
insert_start, # Position in contig
seed_seq_index, # Corresponding gene call
1, # In noncoding gene call
0, # In coding gene call
0, # Base position in codon (0 for noncoding gene call)
-1, # Codon order in gene (-1 for noncoding gene call)
0, # Coverage outlier in split (0 or 1)
0, # Coverage outlier in contig (0 or 1)
seed_seq.seq_string[insert_start], # Reference nt
'INS', # Type of indel
insert_string, # Indel sequence ('' for deletion)
len(insert_string), # Indel length
insert_spec_cov, # Deletion count (coverage)
spec_cov)) # Reference sequence coverage
nonspec_cov = (nonspec_covs[insert_start] + nonspec_covs[insert_start + 1]) / 2
insert_freq = 1 if nonspec_cov == 0 else insert_nonspec_cov / nonspec_cov
if insert_freq >= min_indel_fraction:
nonspecific_entries.append((sample_id,
seed_seq.name + '_split_00001',
insert_start,
insert_start,
seed_seq_index,
1,
0,
0,
-1,
0,
0,
seed_seq.seq_string[insert_start],
'INS',
insert_string,
len(insert_string),
insert_nonspec_cov,
nonspec_cov))
sum_cov = spec_cov + nonspec_cov
insert_sum_cov = insert_spec_cov + insert_nonspec_cov
insert_freq = 1 if sum_cov == 0 else insert_sum_cov / sum_cov
if insert_freq >= min_indel_fraction:
summed_entries.append((sample_id,
seed_seq.name + '_split_00001',
insert_start,
insert_start,
seed_seq_index,
1,
0,
0,
-1,
0,
0,
seed_seq.seq_string[insert_start],
'INS',
insert_string,
len(insert_string),
insert_sum_cov,
sum_cov))
for del_start, del_length, del_spec_cov, del_nonspec_cov in del_info:
spec_cov = spec_covs[del_start: del_start + del_length].mean()
del_freq = 1 if spec_cov == 0 else del_spec_cov / spec_cov
if del_freq >= min_indel_fraction:
specific_entries.append((sample_id,
seed_seq.name + '_split_00001',
del_start, # Position in split
del_start, # Position in contig
seed_seq_index, # Corresponding gene call
1, # In noncoding gene call
0, # In coding gene call
0, # Base position in codon (0 for noncoding gene call)
-1, # Codon order in gene (-1 for noncoding gene call)
0, # Coverage outlier in split (0 or 1)
0, # Coverage outlier in contig (0 or 1)
seed_seq.seq_string[del_start], # Reference nt
'DEL', # Type of indel
'', # Indel sequence ('' for deletion)
del_length, # Indel length
del_spec_cov, # Deletion count (coverage)
spec_cov)) # Reference sequence coverage
nonspec_cov = nonspec_covs[del_start: del_start + del_length].mean()
del_freq = 1 if nonspec_cov == 0 else del_nonspec_cov / nonspec_cov
if del_freq >= min_indel_fraction:
nonspecific_entries.append((sample_id,
seed_seq.name + '_split_00001',
del_start,
del_start,
seed_seq_index,
1,
0,
0,
-1,
0,
0,
seed_seq.seq_string[del_start],
'DEL',
'',
del_length,
del_nonspec_cov,
nonspec_cov))
sum_cov = spec_cov + nonspec_cov
del_sum_cov = del_spec_cov + del_nonspec_cov
del_freq = 1 if sum_cov == 0 else del_sum_cov / sum_cov
if del_freq >= min_indel_fraction:
summed_entries.append((sample_id,
seed_seq.name + '_split_00001',
del_start,
del_start,
seed_seq_index,
1,
0,
0,
-1,
0,
0,
seed_seq.seq_string[del_start],
'DEL',
'',
del_length,
del_sum_cov,
sum_cov))
self.specific_indels_table_entries = specific_entries
self.nonspecific_indels_table_entries = nonspecific_entries
self.summed_indels_table_entries = summed_entries
def gen_profile_db(self, db_cov_type):
self.progress.new(f"Generating {db_cov_type} profile db")
self.progress.update("...")
if db_cov_type == 'specific':
profile_db_path = self.specific_profile_db_path
elif db_cov_type == 'nonspecific':
profile_db_path = self.nonspecific_profile_db_path
elif db_cov_type == 'combined':
profile_db_path = self.combined_profile_db_path
elif db_cov_type == 'summed':
profile_db_path = self.summed_profile_db_path
else:
raise ConfigError(f"The tRNA-seq coverage type, {db_cov_type}, is not recognized. "
"The only valid options are specific, nonspecific, summed, and combined.")
profile_db = dbops.ProfileDatabase(profile_db_path)
profile_db.touch()
# Profile database meta-values are set in a parallel fashion to `merger.MultipleRuns.merge`.
profile_db.db.set_meta_value('creation_date', time.time())
profile_db.db.set_meta_value('db_type', 'profile')
contigs_db = dbops.ContigsDatabase(self.contigs_db_path, quiet=True)
profile_db.db.set_meta_value('contigs_db_hash', contigs_db.meta['contigs_db_hash'])
profile_db.db.set_meta_value('sample_id', contigs_db.meta['project_name'])
contigs_db.disconnect()
if db_cov_type == 'combined':
profile_db.db.set_meta_value('samples', ', '.join([sample_id + '_' + cov_type
for sample_id in self.trnaseq_db_sample_ids
for cov_type in ('specific', 'nonspecific')]))
else:
profile_db.db.set_meta_value('samples', ', '.join([sample_id for sample_id in self.trnaseq_db_sample_ids]))
# The total number of reads mapped is not calculated, as that would require deconvoluting
# the number of reads that mapped nonspecifically. Also, the total number of mapped reads is
# less informative here than in metagenomics, since they vary greatly in length.
# profile_db.db.set_meta_value('total_reads_mapped', -1)
profile_db.db.set_meta_value('merged', True)
profile_db.db.set_meta_value('blank', False)
profile_db.db.set_meta_value('default_view', 'mean_coverage')
profile_db.db.set_meta_value('min_contig_length', 1)
profile_db.db.set_meta_value('max_contig_length', MAXSIZE)
profile_db.db.set_meta_value('SNVs_profiled', False)
profile_db.db.set_meta_value('SCVs_profiled', False)
profile_db.db.set_meta_value('INDELs_profiled', False)
profile_db.db.set_meta_value('num_contigs', len(self.seed_seqs))
profile_db.db.set_meta_value('num_splits', len(self.seed_seqs))
profile_db.db.set_meta_value('total_length', self.total_seed_length)
profile_db.db.set_meta_value('min_coverage_for_variability', 1)
profile_db.db.set_meta_value('min_indel_fraction', 0)
profile_db.db.set_meta_value('report_variability_full', False)
profile_db.db.set_meta_value('description', self.descrip if self.descrip else '_No description is provided_')
# profile_db.db.set_meta_value('min_percent_identity', -1)
# Whereas variability in metagenomics refers to SNVs, here it refers to modifications.
# Modifications are only identified from specific coverage.
if db_cov_type == 'specific':
profile_db.db._exec_many('''INSERT INTO %s VALUES (%s)'''
% ('variable_nucleotides', ','.join('?' * len(tables.variable_nts_table_structure))),
self.variable_nts_table_entries)
profile_db.db.commit()
if db_cov_type == 'specific' or db_cov_type == 'nonspecific' or db_cov_type == 'summed':
tables_to_create = [('sample_mean_' + db_cov_type + '_cov_dict', 'mean_coverage'),
('sample_std_' + db_cov_type + '_cov_dict', 'std_coverage'),
('sample_' + db_cov_type + '_abundances_dict', 'abundance'),
('sample_' + db_cov_type + '_detection_dict', 'detection'),
('sample_mean_Q2Q3_' + db_cov_type + '_cov_dict', 'mean_coverage_Q2Q3')]
for attr, table_basename in tables_to_create:
data_dict = self.get_specific_nonspecific_or_summed_data_dict(attr)
self.create_specific_nonspecific_or_summed_contigs_and_splits_tables(profile_db_path, table_basename, data_dict)
# Variability is the measure of the frequency of modification-induced substitutions in
# seeds. Subs are only calculated from specific coverage -- nonspecific coverage is ignored.
variability_data_dict = self.get_specific_nonspecific_or_summed_data_dict('sample_variability_dict')
self.create_specific_nonspecific_or_summed_contigs_and_splits_tables(profile_db_path, 'variability', variability_data_dict)
profile_db.db._exec_many('''INSERT INTO %s VALUES (%s)'''
% ('indels', ','.join('?' * len(tables.indels_table_structure))),
getattr(self, db_cov_type + '_indels_table_entries'))
elif db_cov_type == 'combined':
tables_to_create = [('sample_mean_specific_cov_dict', 'sample_mean_nonspecific_cov_dict', 'mean_coverage'),
('sample_std_specific_cov_dict', 'sample_std_nonspecific_cov_dict', 'std_coverage'),
('sample_specific_abundances_dict', 'sample_nonspecific_abundances_dict', 'abundance'),
('sample_specific_detection_dict', 'sample_nonspecific_detection_dict', 'detection'),
('sample_mean_Q2Q3_specific_cov_dict', 'sample_mean_Q2Q3_nonspecific_cov_dict', 'mean_coverage_Q2Q3')]
for specific_attr, nonspecific_attr, table_basename in tables_to_create:
data_dict = self.get_combined_data_dict(specific_attr, nonspecific_attr)
self.create_combined_contigs_and_splits_tables(profile_db_path, table_basename, data_dict)
# Variability is the measure of the frequency of modification-induced substitutions in
# seeds. Subs are only calculated from specific coverage -- nonspecific coverage is ignored.
variability_data_dict = self.get_combined_data_dict('sample_variability_dict', 'sample_variability_dict')
self.create_combined_contigs_and_splits_tables(profile_db_path, 'variability', variability_data_dict)
combined_indels_table_entries = []
for entry in self.specific_indels_table_entries:
combined_indels_table_entries.append((entry[0] + '_specific', ) + entry[1: ])
for entry in self.nonspecific_indels_table_entries:
combined_indels_table_entries.append((entry[0] + '_nonspecific', ) + entry[1: ])
profile_db.db._exec_many('''INSERT INTO %s VALUES (%s)'''
% ('indels', ','.join('?' * len(tables.indels_table_structure))),
combined_indels_table_entries)
profile_db.db.commit()
self.progress.end()
# Add layers for anticodon and corresponding amino acid.
items_additional_data_table = miscdata.MiscDataTableFactory(argparse.Namespace(profile_db=profile_db_path, target_data_table='items'))
data_dict = {}
for seed_seq in self.seed_seqs:
data_dict[seed_seq.name + '_split_00001'] = {'anticodon': seed_seq.anticodon_seq_string,
'amino_acid': ANTICODON_AA_DICT[seed_seq.anticodon_seq_string]}
items_additional_data_table.add(data_dict, ['anticodon', 'amino_acid'])
# Cluster tRNA seeds to form the central dendrogram in anvi-interactive.
dbops.do_hierarchical_clustering_of_items(profile_db_path,
constants.clustering_configs['trnaseq'],
[seed_seq.name + '_split_00001' for seed_seq in self.seed_seqs],
{'CONTIGS.db': self.contigs_db_path, 'PROFILE.db': profile_db_path},
input_directory=os.path.dirname(profile_db_path),
default_clustering_config=constants.trnaseq_default,
distance=self.distance,
linkage=self.linkage,
run=self.run,
progress=self.progress)
profile_db.db.set_meta_value('items_ordered', True)
profile_db.db.disconnect()
# Cluster samples by "view" data to find possible sample layer orderings.
profile_db_super = dbops.ProfileSuperclass(argparse.Namespace(profile_db=profile_db_path))
profile_db_super.load_views(omit_parent_column=True)
layer_orders_data_dict = {}
failed_attempts = []
for essential_field in constants.essential_data_fields_for_anvio_profiles:
try:
data_value = clustering.get_newick_tree_data_for_dict(profile_db_super.views[essential_field]['dict'],
distance=self.distance,
linkage=self.linkage,
transpose=True)
layer_orders_data_dict[essential_field] = {'data_value': data_value, 'data_type': 'newick'}
except:
failed_attempts.append(essential_field)
if not len(layer_orders_data_dict):
self.run.warning("This may or may not be important: anvi'o attempted to generate orders for your "
"samples based on the view data, however, it failed :/")
return
if len(failed_attempts):
self.run.warning("While anvi'o was trying to generate clusterings of samples based on view data "
f"available in the {db_cov_type} profile, clustering of some of the essential data "
"failed. It is likely not a very big deal, but you shall be the judge of it. "
"Anvi'o now proceeds to store layers order information for those view items "
"the clustering in fact worked. Here is the list of stuff that failed: '%s'"\
% (', '.join(failed_attempts)))
# Add the layer orders quietly.
TableForLayerOrders(argparse.Namespace(profile_db=profile_db_path), r=terminal.Run(verbose=False)).add(layer_orders_data_dict)
def get_specific_nonspecific_or_summed_data_dict(self, seed_seq_attr):
"""Get data from seed sequences to generate a table in a specific, nonspecific, or summed
profile database."""
data_dict = {}
for seed_seq in self.seed_seqs:
seed_seq_name = seed_seq.name
seed_seq_split_name = seed_seq_name + '_split_00001'
data_dict[seed_seq_split_name] = {}
for sample_id in self.trnaseq_db_sample_ids:
data_dict[seed_seq_split_name][sample_id] = getattr(seed_seq, seed_seq_attr)[sample_id]
return data_dict
def get_combined_data_dict(self, specific_seed_seq_attr, nonspecific_seed_seq_attr):
"""Get data from seed sequences to generate a table in a combined profile database."""
data_dict = {}
for seed_seq in self.seed_seqs:
seed_seq_name = seed_seq.name
seed_seq_split_name = seed_seq_name + '_split_00001'
data_dict[seed_seq_name + '_split_00001'] = {}
for sample_id in self.trnaseq_db_sample_ids:
data_dict[seed_seq_split_name][sample_id + '_specific'] = getattr(seed_seq, specific_seed_seq_attr)[sample_id]
data_dict[seed_seq_split_name][sample_id + '_nonspecific'] = getattr(seed_seq, nonspecific_seed_seq_attr)[sample_id]
return data_dict
def create_specific_nonspecific_or_summed_contigs_and_splits_tables(self, profile_db_path, table_basename, data_dict):
"""Create a pair of tables in a specific, nonspecific, or summed profile database. Contigs
and splits tables contain the same information since tRNA, unlike a metagenomic contig, is
not long enough to be split."""
TablesForViews(profile_db_path).create_new_view(
view_data=data_dict,
table_name=table_basename + '_contigs',
view_name=None,
from_matrix_form=True)
TablesForViews(profile_db_path).create_new_view(
view_data=data_dict,
table_name=table_basename + '_splits',
view_name=table_basename,
from_matrix_form=True)
def create_combined_contigs_and_splits_tables(self, profile_db_path, table_basename, data_dict):
"""Create a pair of tables in a combined profile database. Contigs and splits tables contain
the same information since tRNA, unlike a metagenomic contig, is not long enough to be
split."""
TablesForViews(profile_db_path).create_new_view(
view_data=data_dict,
table_name=table_basename + '_contigs',
view_name=None,
from_matrix_form=True)
TablesForViews(profile_db_path).create_new_view(
view_data=data_dict,
table_name=table_basename + '_splits',
view_name=table_basename,
from_matrix_form=True)
def trnaseq_db_loader(input_queue, output_queue, db_converter):
"""This client for `DatabaseConverter.load_trnaseq_db_seq_info` is located outside the
`DatabaseConverter` class to allow multiprocessing."""
while True:
trnaseq_db_path = input_queue.get()
unmod_norm_seq_summaries, mod_seq_summaries = db_converter.load_trnaseq_db_seq_info(trnaseq_db_path)
output_queue.put((trnaseq_db_path, unmod_norm_seq_summaries, mod_seq_summaries))
|
gpl-3.0
|
3324fr/spinalcordtoolbox
|
dev/sct_nurbs.py
|
1
|
16659
|
#!/usr/bin/env python
## @package sct_nurbs
#
# - python class. Approximate or interpolate a 3D curve with a B-Spline curve from either a set of data points or a set of control points
#
#
# Description about how the function works:
#
# If a set of data points is given, it generates a B-spline that either approximates the curve in the least square sens, or interpolates the curve.
# It also computes the derivative of the 3D curve.
# getCourbe3D() returns the 3D fitted curve. The fitted z coordonate corresponds to the initial z, and the x and y are averaged for a given z
# getCourbe3D_deriv() returns the derivative of the 3D fitted curve also averaged along z-axis
#
# USAGE
# ---------------------------------------------------------------------------------------
# from sct_nurbs import *
# nurbs=NURBS(degree,precision,data)
#
# MANDATORY ARGUMENTS
# ---------------------------------------------------------------------------------------
# degree the degree of the fitting B-spline curve
# precision number of points before averaging data
# data 3D list [x,y,z] of the data requiring fitting
#
# OPTIONAL ARGUMENTS
# ---------------------------------------------------------------------------------------
#
#
#
# EXAMPLES
# ---------------------------------------------------------------------------------------
# from sct_nurbs import *
# nurbs = NURBS(3,1000,[[x_centerline[n],y_centerline[n],z_centerline[n]] for n in range(len(x_centerline))])
# P = nurbs.getCourbe3D()
# x_centerline_fit = P[0]
# y_centerline_fit = P[1]
# z_centerline_fit = P[2]
# D = nurbs.getCourbe3D_deriv()
# x_centerline_fit_der = D[0]
# y_centerline_fit_der = D[1]
# z_centerline_fit_der = D[2]
#
# DEPENDENCIES
# ---------------------------------------------------------------------------------------
# EXTERNAL PYTHON PACKAGES
# - scipy: <http://www.scipy.org>
# - numpy: <http://www.numpy.org>
#
# EXTERNAL SOFTWARE
#
# none
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 NeuroPoly, Polytechnique Montreal <www.neuropoly.info>
# Authors: Benjamin De Leener, Julien Touati
# Modified: 2014-07-01
#
# License: see the LICENSE.TXT
#=======================================================================================================================
import sys
import math
# check if needed Python libraries are already installed or not
try:
from numpy import *
except ImportError:
print '--- numpy not installed! ---'
sys.exit(2)
try:
from scipy.interpolate import interp1d
except ImportError:
print '--- scipy not installed! ---'
sys.exit(2)
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
class NURBS():
def __init__(self, degre=3, precision=1000, liste=None, nurbs_ctl_points = 0, sens=False):
"""
Ce constructeur initialise une NURBS et la construit.
Si la variable sens est True : On construit la courbe en fonction des points de controle
Si la variable sens est False : On reconstruit les points de controle en fonction de la courbe
"""
self.degre = degre+1
self.sens = sens
self.pointsControle = []
self.pointsControleRelatif = []
self.courbe3D = []
self.courbe3D_deriv = []
self.nbControle = 10 ### correspond au nombre de points de controle calcules.
self.precision = precision
if sens: #### si on donne les points de controle#####
if type(liste[0][0]).__name__ == 'list':
self.pointsControle = liste
else:
self.pointsControle.append(liste)
for li in self.pointsControle:
[[P_x,P_y,P_z],[P_x_d,P_y_d,P_z_d]] = self.construct3D(li,degre)
self.courbe3D.append([[P_x[i],P_y[i],P_z[i]] for i in len(P_x)])
self.courbe3D_deriv.append([[P_x_d[i],P_y_d[i],P_z_d[i]] for i in len(P_x_d)])
else:
# La liste est sous la forme d'une liste de points
P_x = [x[0] for x in liste]
P_y = [x[1] for x in liste]
P_z = [x[2] for x in liste]
if nurbs_ctl_points == 0:
self.nbControle = len(P_z)/5 ## ordre 3 -> len(P_z)/10, 4 -> len/7, 5-> len/5 permet d'obtenir une bonne approximation sans trop "interpoler" la courbe
# increase nbeControle if "short data"
else:
print 'In NURBS we get nurbs_ctl_points = ',nurbs_ctl_points
self.nbControle = nurbs_ctl_points
self.pointsControle = self.reconstructGlobalApproximation(P_x,P_y,P_z,self.degre,self.nbControle)
self.courbe3D, self.courbe3D_deriv= self.construct3D(self.pointsControle,self.degre,self.precision)
def getControle(self):
return self.pointsControle
def setControle(self,pointsControle):
self.pointsControle = pointsControle
def getCourbe3D(self):
print 'bonheur'
return self.courbe3D
def getCourbe3D_deriv(self):
return self.courbe3D_deriv
# Multiplie deux polynomes
def multipolynome(self,polyA,polyB):
result = [];
for r in polyB:
temp = polyA*r[0]
result.append([temp, r[-1]])
return result
def N(self,i,k,x):
global Nik_temp
if k==1:
tab = [[poly1d(1),i+1]]
else:
tab = []
den_g = x[i+k-1]-x[i]
den_d = x[i+k]-x[i+1]
if den_g != 0:
if Nik_temp[i][k-1] == -1:
Nik_temp[i][k-1] = self.N(i,k-1,x)
tab_b = self.multipolynome(poly1d([1/den_g,-x[i]/den_g]),Nik_temp[i][k-1])
tab.extend(tab_b)
if den_d != 0:
if Nik_temp[i+1][k-1] == -1:
Nik_temp[i+1][k-1] = self.N(i+1,k-1,x)
tab_d = self.multipolynome(poly1d([-1/den_d,x[i+k]/den_d]),Nik_temp[i+1][k-1])
tab.extend(tab_d)
return tab
def Np(self,i,k,x):
global Nik_temp_deriv, Nik_temp
if k==1:
tab = [[poly1d(0),i+1]]
else:
tab = []
den_g = x[i+k-1]-x[i]
den_d = x[i+k]-x[i+1]
if den_g != 0:
if Nik_temp_deriv[i][-1] == -1:
Nik_temp_deriv[i][-1] = self.N(i,k-1,x)
tab_b = self.multipolynome(poly1d([k/den_g]),Nik_temp_deriv[i][-1])
tab.extend(tab_b)
if den_d != 0:
if Nik_temp_deriv[i+1][-1] == -1 :
Nik_temp_deriv[i+1][-1] = self.N(i+1,k-1,x)
tab_d = self.multipolynome(poly1d([-k/den_d]),Nik_temp_deriv[i+1][-1])
tab.extend(tab_d)
return tab
def evaluateN(self,Ni,t,x):
result = 0;
for Ni_temp in Ni:
if x[Ni_temp[-1]-1] <= t <= x[Ni_temp[-1]]:
result += Ni_temp[0](t)
return result
def calculX3D(self,P,k):
n = len(P)-1
c = []
sumC = 0
for i in xrange(n):
dist = math.sqrt((P[i+1][0]-P[i][0])**2 + (P[i+1][1]-P[i][1])**2 + (P[i+1][2]-P[i][2])**2)
c.append(dist)
sumC += dist
x = [0]*k
sumCI = 0
for i in xrange(n-k+1):
sumCI += c[i+1]
value = (n-k+2)/sumC*((i+1)*c[i+1]/(n-k+2) + sumCI)
x.append(value)
x.extend([n-k+2]*k)
return x
def construct3D(self,P,k,prec): # P point de controles
global Nik_temp, Nik_temp_deriv
n = len(P) # Nombre de points de controle - 1
# Calcul des xi
x = self.calculX3D(P,k)
# Calcul des coefficients N(i,k)
Nik_temp = [[-1 for j in xrange(k)] for i in xrange(n)]
for i in xrange(n):
Nik_temp[i][-1] = self.N(i,k,x)
Nik = []
for i in xrange(n):
Nik.append(Nik_temp[i][-1])
#Calcul des Nik,p'
Nik_temp_deriv = [[-1] for i in xrange(n)]
for i in xrange(n):
Nik_temp_deriv[i][-1]=self.Np(i,k,x)
Nikp=[]
for i in xrange(n):
Nikp.append(Nik_temp_deriv[i][-1])
# Calcul de la courbe
param = linspace(x[0],x[-1],prec)
P_x,P_y,P_z = [],[],[] # coord fitees
P_x_d,P_y_d,P_z_d=[],[],[] #derivees
for i in xrange(len(param)):
sum_num_x,sum_num_y,sum_num_z,sum_den = 0,0,0,0
sum_num_x_der,sum_num_y_der,sum_num_z_der,sum_den_der = 0,0,0,0
for l in xrange(n-k+1): # utilisation que des points non nuls
if x[l+k-1]<=param[i]<x[l+k]:
debut = l
fin = debut+k-1
for j,point in enumerate(P[debut:fin+1]):
j = j+debut
N_temp = self.evaluateN(Nik[j],param[i],x)
N_temp_deriv = self.evaluateN(Nikp[j],param[i],x)
sum_num_x += N_temp*point[0]
sum_num_y += N_temp*point[1]
sum_num_z += N_temp*point[2]
sum_den += N_temp
sum_num_x_der += N_temp_deriv*point[0]
sum_num_y_der += N_temp_deriv*point[1]
sum_num_z_der += N_temp_deriv*point[2]
sum_den_der += N_temp_deriv
P_x.append(sum_num_x/sum_den) # sum_den = 1 !
P_y.append(sum_num_y/sum_den)
P_z.append(sum_num_z/sum_den)
P_x_d.append(sum_num_x_der)
P_y_d.append(sum_num_y_der)
P_z_d.append(sum_num_z_der)
#on veut que les coordonnees fittees aient le meme z que les coordonnes de depart. on se ramene donc a des entiers et on moyenne en x et y .
P_x=array(P_x)
P_y=array(P_y)
P_x_d=array(P_x_d)
P_y_d=array(P_y_d)
P_z_d=array(P_z_d)
P_z=array([int(round(P_z[i])) for i in range(0,len(P_z))])
#not perfect but works (if "enough" points), in order to deal with missing z slices
for i in range (min(P_z),max(P_z)+1,1):
if (i in P_z) is False :
#print ' Missing z slice '
#print i
P_z = insert(P_z,where(P_z==i-1)[-1][-1]+1,i)
P_x = insert(P_x,where(P_z==i-1)[-1][-1]+1,(P_x[where(P_z==i-1)[-1][-1]+1-1]+P_x[where(P_z==i-1)[-1][-1]+1+1])/2)
P_y = insert(P_y,where(P_z==i-1)[-1][-1]+1,(P_y[where(P_z==i-1)[-1][-1]+1-1]+P_y[where(P_z==i-1)[-1][-1]+1+1])/2)
P_x_d = insert(P_x_d,where(P_z==i-1)[-1][-1]+1,(P_x_d[where(P_z==i-1)[-1][-1]+1-1]+P_x_d[where(P_z==i-1)[-1][-1]+1+1])/2)
P_y_d = insert(P_y_d,where(P_z==i-1)[-1][-1]+1,(P_y_d[where(P_z==i-1)[-1][-1]+1-1]+P_y_d[where(P_z==i-1)[-1][-1]+1+1])/2)
P_z_d = insert(P_z_d,where(P_z==i-1)[-1][-1]+1,(P_z_d[where(P_z==i-1)[-1][-1]+1-1]+P_z_d[where(P_z==i-1)[-1][-1]+1+1])/2)
coord_mean = array([[mean(P_x[P_z==i]),mean(P_y[P_z==i]),i] for i in range(min(P_z),max(P_z)+1,1)])
P_x=coord_mean[:,:][:,0]
P_y=coord_mean[:,:][:,1]
coord_mean_d = array([[mean(P_x_d[P_z==i]),mean(P_y_d[P_z==i]),mean(P_z_d[P_z==i])] for i in range(min(P_z),max(P_z)+1,1)])
P_z=coord_mean[:,:][:,2]
P_x_d=coord_mean_d[:,:][:,0]
P_y_d=coord_mean_d[:,:][:,1]
P_z_d=coord_mean_d[:,:][:,2]
#print P_x_d,P_y_d,P_z_d
# p=len(P_x)/3
# n=1
# #plotting a tangent
# p1 = [P_x[p],P_y[p],P_z[p]]
# p2 = [P_x[p]+n*P_x_d[p],P_y[p]+n*P_y_d[p],P_z[p]+n*P_z_d[p]]
# #### 3D plot
# fig1 = plt.figure()
# ax = Axes3D(fig1)
# #ax.plot(x_centerline,y_centerline,z_centerline,zdir='z')
# ax.plot(P_x,P_y,P_z,zdir='z')
# ax.plot([p1[0],p2[0]],[p1[1],p2[1]],[p1[2],p2[2]],zdir='z')
# #ax.plot(x_centerline_fit_der,y_centerline_fit_der,z_centerline_fit_der,zdir='z')
# plt.show()
#print 'Construction effectuee'
return [P_x,P_y,P_z], [P_x_d,P_y_d,P_z_d]
def Tk(self,k,Q,Nik,ubar,u):
return Q[k] - self.evaluateN(Nik[-1],ubar,u)*Q[-1] - self.evaluateN(Nik[0],ubar,u)*Q[0]
def reconstructGlobalApproximation(self,P_x,P_y,P_z,p,n):
# p = degre de la NURBS
# n = nombre de points de controle desires
global Nik_temp
m = len(P_x)
# Calcul des chords
di = 0
for k in xrange(m-1):
di += math.sqrt((P_x[k+1]-P_x[k])**2 + (P_y[k+1]-P_y[k])**2 + (P_z[k+1]-P_z[k])**2)
u = [0]*p
ubar = [0]
for k in xrange(m-1):
ubar.append(ubar[-1]+math.sqrt((P_x[k+1]-P_x[k])**2 + (P_y[k+1]-P_y[k])**2 + (P_z[k+1]-P_z[k])**2)/di)
d = (m+1)/(n-p+1)
for j in xrange(n-p):
i = int((j+1)*d)
alpha = (j+1)*d-i
u.append((1-alpha)*ubar[i-1]+alpha*ubar[i])
u.extend([1]*p)
Nik_temp = [[-1 for j in xrange(p)] for i in xrange(n)]
for i in xrange(n):
Nik_temp[i][-1] = self.N(i,p,u)
Nik = []
for i in xrange(n):
Nik.append(Nik_temp[i][-1])
R = []
for k in xrange(m-1):
Rtemp = []
den = 0
for Ni in Nik:
den += self.evaluateN(Ni,ubar[k],u)
for i in xrange(n-1):
Rtemp.append(self.evaluateN(Nik[i],ubar[k],u)/den)
R.append(Rtemp)
R = matrix(R)
# calcul des denominateurs par ubar
denU = []
for k in xrange(m-1):
temp = 0
for Ni in Nik:
temp += self.evaluateN(Ni,ubar[k],u)
denU.append(temp)
Tx = []
for i in xrange(n-1):
somme = 0
for k in xrange(m-1):
somme += self.evaluateN(Nik[i],ubar[k],u)*self.Tk(k,P_x,Nik,ubar[k],u)/denU[k]
Tx.append(somme)
Tx = matrix(Tx)
Ty = []
for i in xrange(n-1):
somme = 0
for k in xrange(m-1):
somme += self.evaluateN(Nik[i],ubar[k],u)*self.Tk(k,P_y,Nik,ubar[k],u)/denU[k]
Ty.append(somme)
Ty = matrix(Ty)
Tz = []
for i in xrange(n-1):
somme = 0
for k in xrange(m-1):
somme += self.evaluateN(Nik[i],ubar[k],u)*self.Tk(k,P_z,Nik,ubar[k],u)/denU[k]
Tz.append(somme)
Tz = matrix(Tz)
P_xb = (R.T*R).I*Tx.T
P_yb = (R.T*R).I*Ty.T
P_zb = (R.T*R).I*Tz.T
P = [[P_xb[i,0],P_yb[i,0],P_zb[i,0]] for i in range(len(P_xb))]
# On modifie les premiers et derniers points
P[0][0],P[0][1],P[0][2] = P_x[0],P_y[0],P_z[0]
P[-1][0],P[-1][1],P[-1][2] = P_x[-1],P_y[-1],P_z[-1]
#print 'Reconstruction effectuee'
return P
def reconstructGlobalInterpolation(self,P_x,P_y,P_z,p): ### now in 3D
global Nik_temp
n = 13
l = len(P_x)
newPx = P_x[::int(round(l/(n-1)))]
newPy = P_y[::int(round(l/(n-1)))]
newPz = P_y[::int(round(l/(n-1)))]
newPx.append(P_x[-1])
newPy.append(P_y[-1])
newPz.append(P_z[-1])
n = len(newPx)
# Calcul du vecteur de noeuds
di = 0
for k in xrange(n-1):
di += math.sqrt((newPx[k+1]-newPx[k])**2 + (newPy[k+1]-newPy[k])**2 +(newPz[k+1]-newPz[k])**2)
u = [0]*p
ubar = [0]
for k in xrange(n-1):
ubar.append(ubar[-1]+math.sqrt((newPx[k+1]-newPx[k])**2 + (newPy[k+1]-newPy[k])**2 + (newPz[k+1]-newPz[k])**2)/di)
for j in xrange(n-p):
sumU = 0
for i in xrange(p):
sumU = sumU + ubar[j+i]
u.append(sumU/p)
u.extend([1]*p)
# Construction des fonctions basiques
Nik_temp = [[-1 for j in xrange(p)] for i in xrange(n)]
for i in xrange(n):
Nik_temp[i][-1] = self.N(i,p,u)
Nik = []
for i in xrange(n):
Nik.append(Nik_temp[i][-1])
# Construction des matrices
M = []
for i in xrange(n):
ligneM = []
for j in xrange(n):
ligneM.append(self.evaluateN(Nik[j],ubar[i],u))
M.append(ligneM)
M = matrix(M)
# Matrice des points interpoles
Qx = matrix(newPx).T
Qy = matrix(newPy).T
Qz = matrix(newPz).T
# Calcul des points de controle
P_xb = M.I*Qx
P_yb = M.I*Qy
P_zb = M.I*Qz
return [[P_xb[i,0],P_yb[i,0],P_zb[i,0]] for i in range(len(P_xb))]
|
mit
|
jakevdp/altair
|
altair/utils/tests/test_utils.py
|
1
|
3876
|
import pytest
import warnings
import json
import numpy as np
import pandas as pd
import six
from .. import infer_vegalite_type, sanitize_dataframe
def test_infer_vegalite_type():
def _check(arr, typ):
assert infer_vegalite_type(arr) == typ
_check(np.arange(5, dtype=float), 'quantitative')
_check(np.arange(5, dtype=int), 'quantitative')
_check(np.zeros(5, dtype=bool), 'nominal')
_check(pd.date_range('2012', '2013'), 'temporal')
_check(pd.timedelta_range(365, periods=12), 'temporal')
nulled = pd.Series(np.random.randint(10, size=10))
nulled[0] = None
_check(nulled, 'quantitative')
_check(['a', 'b', 'c'], 'nominal')
if hasattr(pytest, 'warns'): # added in pytest 2.8
with pytest.warns(UserWarning):
_check([], 'nominal')
else:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
_check([], 'nominal')
def test_sanitize_dataframe():
# create a dataframe with various types
df = pd.DataFrame({'s': list('abcde'),
'f': np.arange(5, dtype=float),
'i': np.arange(5, dtype=int),
'b': np.array([True, False, True, True, False]),
'd': pd.date_range('2012-01-01', periods=5, freq='H'),
'c': pd.Series(list('ababc'), dtype='category'),
'c2': pd.Series([1, 'A', 2.5, 'B', None],
dtype='category'),
'o': pd.Series([np.array(i) for i in range(5)]),
'p': pd.date_range('2012-01-01', periods=5, freq='H').tz_localize('UTC')})
# add some nulls
df.iloc[0, df.columns.get_loc('s')] = None
df.iloc[0, df.columns.get_loc('f')] = np.nan
df.iloc[0, df.columns.get_loc('d')] = pd.NaT
df.iloc[0, df.columns.get_loc('o')] = np.array(np.nan)
# JSON serialize. This will fail on non-sanitized dataframes
print(df[['s', 'c2']])
df_clean = sanitize_dataframe(df)
print(df_clean[['s', 'c2']])
print(df_clean[['s', 'c2']].to_dict())
s = json.dumps(df_clean.to_dict(orient='records'))
print(s)
# Re-construct pandas dataframe
df2 = pd.read_json(s)
# Re-order the columns to match df
df2 = df2[df.columns]
# Re-apply original types
for col in df:
if str(df[col].dtype).startswith('datetime'):
# astype(datetime) introduces time-zone issues:
# to_datetime() does not.
utc = isinstance(df[col].dtype, pd.core.dtypes.dtypes.DatetimeTZDtype)
df2[col] = pd.to_datetime(df2[col], utc = utc)
else:
df2[col] = df2[col].astype(df[col].dtype)
# pandas doesn't properly recognize np.array(np.nan), so change it here
df.iloc[0, df.columns.get_loc('o')] = np.nan
assert df.equals(df2)
def test_sanitize_dataframe_colnames():
df = pd.DataFrame(np.arange(12).reshape(4, 3))
# Test that RangeIndex is converted to strings
df = sanitize_dataframe(df)
assert [isinstance(col, six.string_types) for col in df.columns]
# Test that non-string columns result in an error
df.columns = [4, 'foo', 'bar']
with pytest.raises(ValueError) as err:
sanitize_dataframe(df)
assert str(err.value).startswith('Dataframe contains invalid column name: 4.')
def test_sanitize_dataframe_timedelta():
df = pd.DataFrame({'r': pd.timedelta_range(start='1 day', periods=4)})
with pytest.raises(ValueError) as err:
sanitize_dataframe(df)
assert str(err.value).startswith('Field "r" has type "timedelta')
def test_sanitize_dataframe_infs():
df = pd.DataFrame({'x': [0, 1, 2, np.inf, -np.inf, np.nan]})
df_clean = sanitize_dataframe(df)
assert list(df_clean.dtypes) == [object]
assert list(df_clean['x']) == [0, 1, 2, None, None, None]
|
bsd-3-clause
|
kaichogami/scikit-learn
|
sklearn/utils/tests/test_seq_dataset.py
|
47
|
2486
|
# Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
|
bsd-3-clause
|
prattmic/gnucash_analysis
|
gnucash_pandas.py
|
1
|
4484
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import datetime
import gnucash
import numpy as np
import pandas as pd
import time
account_types = {
gnucash.ACCT_TYPE_ASSET: "Asset",
gnucash.ACCT_TYPE_BANK: "Bank",
gnucash.ACCT_TYPE_CASH: "Cash",
gnucash.ACCT_TYPE_CHECKING: "Checking",
gnucash.ACCT_TYPE_CREDIT: "Credit",
gnucash.ACCT_TYPE_EQUITY: "Equity",
gnucash.ACCT_TYPE_EXPENSE: "Expense",
gnucash.ACCT_TYPE_INCOME: "Income",
gnucash.ACCT_TYPE_LIABILITY: "Liability",
gnucash.ACCT_TYPE_MUTUAL: "Mutual",
gnucash.ACCT_TYPE_PAYABLE: "Payable",
gnucash.ACCT_TYPE_RECEIVABLE: "Receivable",
gnucash.ACCT_TYPE_ROOT: "Root",
gnucash.ACCT_TYPE_STOCK: "Stock",
gnucash.ACCT_TYPE_TRADING: "Trading",
}
def all_accounts(root):
"""Get all gnucash Accounts,
Args:
root: Base Account to start from
Returns:
List of all accounts.
"""
accounts = []
for account in root.get_children():
accounts.append(account)
subaccounts = all_accounts(account)
for subaccount in subaccounts:
accounts.append(subaccount)
return accounts
def splits_dataframe(gnc_file):
"""Get GnuCash splits as Pandas DataFrame.
Args:
gnc_file: GnuCash source file.
Returns:
DataFrame with split data from each account.
"""
session = gnucash.Session(gnc_file)
try:
book = session.get_book()
root = book.get_root_account()
accounts = all_accounts(root)
splits = []
for account in accounts:
name = account.name
typ = account_types[account.GetType()]
for split in account.GetSplitList():
transaction = split.GetParent()
date = transaction.GetDate()
description = transaction.GetDescription()
amount = split.GetAmount().to_double()
splits.append((name, typ, date, description, amount))
return pd.DataFrame(splits, columns=['account', 'type', 'date',
'description', 'amount'])
finally:
session.end()
session.destroy()
def daily(df, account_type):
"""DataFrame of daily totals in each account.
Args:
df: DataFrame in format returned by splits_dataframe
account_type: Only include accounts of this type
Returns:
DataFrame indexed by date, with a column for each account. Each value
is the transaction total for that account that day.
"""
df = df[df['type'] == account_type]
df = pd.pivot_table(df, index=pd.DatetimeIndex(df['date']),
columns='account', values='amount', aggfunc=np.sum)
df = df.resample('D').sum()
return df
def balances_dataframe(gnc_file, start, end):
"""Get GnuCash daily account balances as Pandas DataFrame.
All balances are in USD.
Args:
gnc_file: GnuCash source file.
start: datetime.date first day to include.
end: datetime.date last day to include.
Returns:
DataFrame with USD balance from each account.
"""
dates = []
curr = start
while curr < end:
dates.append(curr)
curr += datetime.timedelta(days=1)
session = gnucash.Session(gnc_file)
try:
book = session.get_book()
usd = book.get_table().lookup("CURRENCY", "USD")
root = book.get_root_account()
accounts = all_accounts(root)
balances = []
for account in accounts:
# TODO(prattmic): Full name in splits_dataframe
name = account.get_full_name()
typ = account_types[account.GetType()]
for date in dates:
t = time.mktime(date.timetuple())
# Third argument is whether to include child accounts.
balance = account.GetBalanceAsOfDateInCurrency(t, usd, False).to_double()
balances.append((name, typ, date, balance))
return pd.DataFrame(balances, columns=['account', 'type', 'date',
'balance'])
finally:
session.end()
session.destroy()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='GNUCash expense moving average')
parser.add_argument('file', type=str, help='GNUCash file')
args = parser.parse_args()
print(splits_dataframe(args.file))
|
mit
|
BasuruK/sGlass
|
Description_Generator/train.py
|
1
|
3403
|
from keras.callbacks import ModelCheckpoint, History
from keras.callbacks import ReduceLROnPlateau
import matplotlib.pyplot as plt
from Description_Generator.descriptionGeneratorModel import DscGenModel
from Description_Generator.dataSetHandler import DataHandler
from Description_Generator.preprocessedDataHandler import PreprocessDataHandler
from Description_Generator.models import DscGenModel
from Description_Generator.descriptionGenerator import Generator
num_epochs = 200
batch_size = 64
root_path = 'Data/'
selected_CNN = 'vgg19'
captions_Dataset = root_path + 'Captions/IAPR_2012_captions.txt'
data_handler = DataHandler(root_path=root_path, caption_file=captions_Dataset,
maximum_caption_length=50,
allowed_word_frequency=2,
extract_image_features=True,
image_dataset=root_path + 'Images/iaprtc12/',
cnn_name=selected_CNN)
data_handler.process_data()
preProcessDataHandler = PreprocessDataHandler(root_path=root_path, batch_size=batch_size, cnn_name=selected_CNN)
num_training_samples = preProcessDataHandler.training_data.shape[0]
num_testing_samples = preProcessDataHandler.testing_data.shape[0]
print('Number of training samples:', num_training_samples)
print('Number of testing samples:', num_testing_samples)
model = DscGenModel(max_token_length=preProcessDataHandler.maximum_token_length,
vocabulary_size=preProcessDataHandler.vocabulary_size,
rnn='lstm',
num_image_features=preProcessDataHandler.image_features,
hidden_size=512,
embedding_size=512)
model.compile(loss='categorical_crossentropy',
optimizer = 'rmsprop',
metrics=['accuracy'])
print(model.summary())
history = History()
model_names = (root_path + 'PreTrainedModels/' + selected_CNN +
'/descGenrator.{epoch:02d}-{val_loss:.2f}.hdf5')
model_checkpoint = ModelCheckpoint(model_names,
monitor='val_loss',
verbose=1,
save_best_only=False,
save_weights_only=False)
reduce_learning_rate = ReduceLROnPlateau(monitor='val_loss', factor=0.001,
patience=4, verbose=1)
callbacks = [model_checkpoint, reduce_learning_rate, history]
model.fit_generator(generator=preProcessDataHandler.flow(mode='train'),
steps_per_epoch=int(num_training_samples / batch_size),
epochs=num_epochs,
verbose=1,
callbacks=callbacks,
validation_data=preProcessDataHandler.flow(mode='testing'),
validation_steps=int(num_testing_samples / batch_size))
def plot_graphs_on_data(history):
# Plot Accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epocs')
plt.legend(['Train Data', 'Test Data'], loc = 'upper left')
plt.show()
#Plot Loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epocs')
plt.legend(['Train Data', 'Test Data'], loc = 'upper left')
plt.show()
|
gpl-3.0
|
mitchell-joblin/codeface
|
experiments/adist.py
|
5
|
19990
|
#! /usr/bin/env python
# This file is part of Codeface. Codeface is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Copyright 2010, 2011 by Wolfgang Mauerer <[email protected]>
# Copyright 2012, 2013, Siemens AG, Wolfgang Mauerer <[email protected]>
# All Rights Reserved.
from codeface.VCS import gitVCS
from codeface.commit_analysis import createCumulativeSeries, createSeries, \
writeToFile, getSeriesDuration
import rpy2.robjects as robjects
import matplotlib
matplotlib.use("pdf")
import matplotlib.pyplot as plt
import os.path
from pylab import *
from subprocess import *
import codeface.kerninfo
import shelve
import re
def _abort(msg):
print(msg + "\n")
sys.exit(-1)
def status(msg):
# Comment to disable status messages. Highly professional!
print(msg)
def runR(cmd):
return robjects.r(cmd)
def loadRpkg(pkg):
robjects.r.library(pkg)
def RtoPython(array):
"""This is required since matplotlib can't handle R arrays natively."""
return [array[i] for i in range(0,len(array))]
# TODO: We should use constants to denote the backend.
def _setupRGraphics(filename = None, backend = None):
"""Must be called before plotting commands are used in R.
filename --- Where to store the plot.
backend --- File format. Currently, only PDF is supported.
"""
if backend == None:
return;
if filename:
if backend.lower() == "pdf":
runR('pdf(file="{0}.pdf")'.format(filename))
elif backend.lower() == "png":
runR('png(file="{0}.png")'.format(filename))
else:
_abort("Internal error: Backend {0} unsupported for R".
format(backend))
def _closeRGraphics(filename=None, backend=None):
"""Counterpart of _setupRGraphics() after plotting is finished."""
if filename:
runR('dev.off()')
def _setupPythonGraphics(filename=None, backend=None):
"""Must be called before plotting commands are used in Python.
filename --- Where to store the plot.
backend --- File format. Currently, only PDF is supported.
"""
# NOTE: matplotlib does not seem to be able to dynamically switch
# the output format (duh...), so this function remains empty -- we
# have to do all the setup at startup
def _closePythonGraphics(filename=None, backend=None):
"""Counterpart of _setupPythonGraphics() after plotting is finished."""
if filename:
plt.savefig(filename)
def _backendToSuffix(backend=None):
if backend == None:
return
if backend.lower() == "pdf":
return ".pdf"
elif backend.lower() == "png":
return ".pdf"
else:
_abort("Unsupported backend {0}.".format(backend))
def _computeCorrelation(rdata, sequence, msg, filename=None, backend=None,
frameLabels = None):
"""
Compute a correlation diagram (internal function, frontends available).
rdata -- Name (string) of the R time series that contains the data.
sequence -- Slice specification of the columns to compare, typically
in the form "1:3" or "c(1,5,7)".
msg -- Headline of the plot.
filename -- Output file. If None, then plot to the screen.
backend -- Which backend to use when plotting into a file.
"""
_setupRGraphics(filename, backend)
runR('corr = data.frame(coredata({0})[,{1}])'.format(rdata, sequence))
if frameLabels:
for i in range(0, len(frameLabels)):
frameLabels[i] = '"' + frameLabels[i] + '"'
runR('names(corr) <- {0}'.format("c(" + ", ".join(frameLabels) + ")"))
runR('pairs(corr, panel=panel.smooth, ' \
'main="{0}")'.format(msg))
_closeRGraphics(filename, backend)
def computeDiffsizeCommitlengthCorrelation(rdata, difftype=1, filename=None,
backend=None):
"""Compute correlation between commit size and commit description.
rdata -- Name (string) of the R time series that contains the data.
difftype -- Which diff type to use as basis (defaults to 1).
filename -- Output file. If None, then plot to the screen.
backend -- Which backend to use when plotting into a file."""
# Column 5 contains the commit message length, 6 contains
# the number of signed-offs, 7 the number of all signatures
# (6 plus CC etc.)
sequence = "c({0},5,6)".format(difftype)
msg = ("Correlation: Diff method {0}, "
"Msg length, # Signed-offs".format(difftype))
_computeCorrelation(rdata, sequence, msg, filename, backend,
frameLabels = ["Diff Size",
"Commit description length",
"# Signed-offs"])
def computeDifftypeCorrelation(rdata, filename=None, backend=None):
"""Check if there is any corelation between commit sizes.
We iterate over all tupled of different diff type methods.
rdata -- Name (string) of the R time series that contains the data.
filename -- Base string used to construct the output file. The tuple
for which the calculation is done is appended as _i_j,
followed by the suffix.
backend -- Which backend to use when plotting into a file."""
for i in range(1,5):
for j in range(i,5):
if i != j:
sequence = "c({0},{1})".format(i,j)
msg = ("Correlation between diff methods "
"{0} and {1}".format(i,j))
if filename:
curr_file = filename
curr_file += "_{0}_{1}".format(i,j)
curr_file += _backendToSuffix(backend)
_computeCorrelation(rdata, sequence, msg, curr_file, backend,
frameLabels=["Diff method {0}".format(i),
"Diff method {0}".format(j)])
def computeRecurrenceDiagram(rdata, m=3, d=1, filename=None, backend=None):
"""
Compute the recurrence diagram for a time series.
rdata -- Name (string) of the R time series that contains the data.
m -- embedding dimension (default to 3)
d -- time delay (defaults to 1)
filename -- Output file. If None, then plot to the screen.
backend -- Which backend to use when plotting into a file.
"""
_setupRGraphics(filename, backend)
runR('recurr({0}, m={1}, d={2})'.format(rdata, m, d))
_closeRGraphics(filename, backend)
def computeDensity(rdata, bandwidth=10,
filename=None, backend=None):
"""
Compute the spectral density of a time series.
rdata -- Name (string) of the R time series that contains the data.
bandwidth -- Guess what.
filename -- Output file. If None, then plot to the screen.
backend -- Which backend to use when plotting into a file.
"""
_setupRGraphics(filename, backend)
runR('plot(density({0},bw={1}))'.format(rdata, bandwidth))
_closeRGraphics(filename, backend)
def computeSpectrum(rdata, filename=None, backend=None):
"""
Fit an AR model to the timeseries and compute the spectral density.
rdata -- Name (string) of the R time series that contains the data.
filename -- Output file. If None, then plot to the screen.
backend -- Which backend to use when plotting into a file.
"""
_setupRGraphics(filename, backend)
runR('spec.ar({0})'.format(rdata))
_closeRGraphics(filename, backend)
def computeLag(rdata, m=16, filename=None, backend=None):
"""Plot a grid of scatterplots of x(t-h) versus x(t) for h = 1,...,m."
The autocorrelation value is given in the upper right border
of the graph (in blue) and a lowess fit is added in red.
rdata -- Name (string) of the R time series that contains the data.
m -- Maximal lag.
filename -- Output file. If None, then plot to the screen.
backend -- Which backend to use when plotting into a file.
"""
_setupRGraphics(filename, backend)
runR('lag.plot1({0},{1},smooth=TRUE)'.format(rdata, m))
_closeRGraphics(filename, backend)
def computeECDF(rdata, filename=None, backend=None):
"""Compute the empirical cumulative distribution function.
rdata -- Name (string) of the R time series that contains the data.
filename -- Output file. If None, then plot to the screen.
backend -- Which backend to use when plotting into a file.
"""
_setupRGraphics(filename, backend)
runR('plot(ecdf({0}))'.format(rdata))
_closeRGraphics(filename, backend)
def initialiseR():
loadRpkg("zoo")
loadRpkg("xts")
loadRpkg("tseriesChaos")
runR('source("utils.r")')
runR('source("stoffer.r")')
def doAnalysis(vcs, basedir, revrange=None):
# TODO: This needs to include the subrange analysis
# TODO: Use a temporary dir for data storage (unless the R
# data exchange problem is solved)
print("Creating raw series")
res = createSeries(vcs, "__main__", revrange)
writeToFile(res, "/home/wolfgang/raw.dat")
duration = getSeriesDuration(res)
# Emergency stop: If the cycle is less than 200 commits long,
# there are no meaningful results to be expected.
if len(res) < 200:
print("!!! Not enough commits in list, skipping analysis")
return
print("Creating cumulative series")
res = createCumulativeSeries(vcs, "__main__", revrange)
writeToFile(res, "/home/wolfgang/cum.dat")
# TODO: How is it possible to exchange the data directly between python
# and R? Writing to a file and then re-reading the stuff is a bit stupid
# (if all else fails, we could at least use a named pipe)
runR('raw = as.xts(read.zoo(file="/home/wolfgang/raw.dat", '\
'FUN=tstamp_to_date))')
raw = RtoPython(runR('raw'))
# We use the average number of commits per quarter day as basis for the
# moving average
secs_per_hour = 60*60
smooth_commits = len(raw)/(duration/(6*secs_per_hour))
print("Length: {0}, duration: {1}".format(len(raw), duration))
# ... but also ensure that we do not get excessively large or
# small values
if smooth_commits < 20:
smooth_commits = 20
elif smooth_commits > 350:
smooth_commits = 350
print("Using {0} as smoothing factor".format(smooth_commits))
if (len(raw) < smooth_commits):
print("Pathological case: Excessively short series with {} commits "
"detected, giving up.".format(len(raw)))
return
runR('reg = to.regts(raw[,1], {0})'.format(smooth_commits))
runR('cum = as.xts(read.zoo(file="/home/wolfgang/cum.dat", '\
'FUN=tstamp_to_date))')
reg = RtoPython(runR('reg'))
cum = RtoPython(runR('cum'))
# HARDCODED assumptions about the position of the data fields
# TODO: These should get symbolic R labels. How is this possible?
diff_sizes = RtoPython(runR('coredata(raw)[,1]'))
descr_sizes = RtoPython(runR('coredata(raw)[,5]'))
deltat = int(runR('deltat(reg)')[0])
tstart = int(runR('start(reg)')[0])
tend = int(runR('end(reg)')[0])
timelist_reg = RtoPython(runR('unclass(index(reg))'))
# Create a simplified time range starting at zero
timelist_reg_simplified = range(0, tend-tstart+1, deltat)
timelist_cum = RtoPython(runR('unclass(index(cum))'))
# Plot the cumulative and the averaged series
# TODO: Use different y axes for the components because they
# scale vastly different
# TODO: We need to re-initialise the plot object somehow since
# in the second run, the histogram of the previous run is
# plotted here.
status("Computing Time Series Graphs")
fig = plt.figure()
ax = fig.add_subplot(111)
_setupPythonGraphics(os.path.join(basedir, "timegraph"), "PDF")
plot(timelist_reg, RtoPython(runR('reg')))
xlabel("Time (TODO: Label with tags)")
plt.show()
_closePythonGraphics(os.path.join(basedir, "timegraph"), "PDF")
_setupPythonGraphics(os.path.join(basedir, "timegraph_cum"), "PDF")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(timelist_cum, RtoPython(runR('coredata(cum)[,1]')))
xlabel("Time (TODO: Label with tags)")
plt.show()
_closePythonGraphics(os.path.join(basedir, "timegraph_cum"), "PDF")
# Compare the histograms of commit size and description length
# distributions
# TODO: The plots overlap so that information gets lost. This is
# obviously bad.
status("Computing Histograms")
_setupPythonGraphics(os.path.join(basedir, "histograms"), "PDF")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hold(True)
ax.hist(descr_sizes,100,range=(0,100),normed=True)
ax.hist(diff_sizes,100,range=(0,100),normed=True,alpha=0.5)
ax.set_xlabel("Commit/Diff size")
ax.set_ylabel("Probability")
ax.grid(True)
ax.hold(False)
plt.show()
_closePythonGraphics(os.path.join(basedir, "histograms"), "PDF")
# Let's look at some correlations: Between different diff approaches,
# and the correlation between diff size and
status("Computing Correlations")
computeDiffsizeCommitlengthCorrelation("raw",
filename=os.path.join(basedir,
"diff_commit_corr"),
backend="PDF")
computeDifftypeCorrelation("raw",
filename=os.path.join(basedir, "difftype_corr"),
backend="PDF")
# Determine the density. TODO: Find the best bandwidth.
status("Computing Density")
computeDensity("reg", bandwidth=10,
filename=os.path.join(basedir, "density"),
backend="PDF")
# We could also use reg, but coredata gives more regular labels
status("Computing Spectrum")
computeSpectrum("coredata(reg)",
filename=os.path.join(basedir, "spectrum"),
backend="PDF")
status("Computing ECDF")
computeECDF("reg", filename=os.path.join(basedir, "ecdf"),
backend="PDF")
# Generate the recurrence diagram for a series
# NOTE: When the number of considered data points exceeds a
# certain threshold, we don't do the plot because it's
# computationally too expensive
if len(reg) < 5000:
# We use PNG for plotting here because the PDF gets huge.
# (we could also just pass reg, but extracting the coredata gives
# "nicer" labels")
status("Computing Recurrence Diagram")
computeRecurrenceDiagram("coredata(reg)[,1]",
filename=os.path.join(basedir, "recurrence"),
backend="PNG")
else:
status("Skipping recurrence diagram: Too many data points")
# NOTE: The histogram of the smoothed distribution looks different than
# the raw histogram - the smoothing shifts the distribution towards larger
# commit numbers
# To compare distA and distA_smoothed, use
# hist(distA,100,range=(0,100), cumulative=True, normed=True)
# hist(distA_smoothed,100,range=(0,100), cumulative=True, normed=True)
# This could be interpreted that the dominance of the small commits is
# not as drastic as many describe it to be.
# TODO: Check for which statistical distributions the power law
# results emerges. I have the feeling that it is true for all
# distributions.
def doSubrangeAnalysis(vcs, basepath, revrange=None, subsys="__main__"):
"""Perform a subrange analysis of the given revision range.
Split the given range into a number of parts covering
(approximately) identical time intervals, and perform the
standard analysis steps on them.
vcs -- Revision control basis object.
parts -- Number of sub-parts into which the revision range is
to be split.
revrange -- Revision range.
subsys -- Specific subsystem to consider (the whole project
is analysed if subsys==None)."""
# Get the list of tags (TODO: Move this to the VCS object.
# I just don't want to recreate the DB right now)
cmd = 'git --git-dir={0} tag'.format(vcs.repo).split()
print("About to call {0}".format(cmd))
try:
p2 = Popen(cmd, stdout=PIPE)
clist = p2.communicate()[0].splitlines()
except OSError:
_abort("Internal error: Could not spawn git")
regexp = "^" + revrange[1] + "-rc"
sublist = [revrange[0]]
for entry in clist:
if re.match(regexp, entry):
sublist.append(entry)
sublist.append(revrange[1])
print("Sublist: {0}".format(", ".join(sublist)))
for i in range(1, len(sublist)):
path = os.path.join(basepath, "cycle{0}".format(i-1))
if not(os.path.exists(path)):
os.mkdir(path)
print("Analysing development sub-cycle {0} ({1}..{2})".
format(i-1, sublist[i-1], sublist[i]))
doAnalysis(vcs, path, revrange=[sublist[i-1], sublist[i]])
#######################################################################
############################ Dispatcher #############################
#######################################################################
def doRevisionAnalysis(vcs, revs, basepath, subrangeAnalysis=False):
for i in range(1, len(revs)):
path = os.path.join(basepath, revs[i])
if not(os.path.exists(path)):
os.mkdir(path)
print("Analysing {0} development cycle".format(revs[i]))
doAnalysis(vcs, path, revrange=[revs[i-1], revs[i]])
if subrangeAnalysis:
doSubrangeAnalysis(vcs, path, revrange=[revs[i-1], revs[i]])
# Let it rip!
########################################################
###git = shelve.open("/home/wolfgang/linux-small")["git"]
#git = shelve.open("/home/wolfgang/linux-14-33")["git"]
#print("Revision range of shelved object: {0}..{1}".
# format(git.rev_start, git.rev_end))
#initialiseR()
#
#revs = ["v2.6.{0}".format(i) for i in range(14,33) ]
#doRevisionAnalysis(git, revs, "/tmp/graphs", subrangeAnalysis=True)
############################################################
########################################################
#git = shelve.open("/home/wolfgang/git-full")["git"]
#print("Revision range of shelved object: {0}..{1}".
# format(git.rev_start, git.rev_end))
#initialiseR()
#
#revs = ["v1.{0}.0".format(i) for i in range(0,7) ]
#doRevisionAnalysis(git, revs, "/tmp/gitgraphs", subrangeAnalysis=True)
#########################################################
#######################################################
git = shelve.open("/home/wolfgang/perl-full")["git"]
print("Revision range of shelved object: {0}..{1}".
format(git.rev_start, git.rev_end))
initialiseR()
revs = ["perl-5.8.{0}".format(i) for i in range(1,10) ]
doRevisionAnalysis(git, revs, "/tmp/perlgraphs", subrangeAnalysis=False)
#######################################################
############################################################
#revs = ["v2.6.{0}".format(i) for i in range(24,26) ]
#doRevisionAnalysis(git, revs, "/tmp/graphs", subrangeAnalysis=True)
#doRevisionAnalysis(git, ["v2.6.24", "v2.6.25"], "/tmp/graphs")
#doSubrangeAnalysis(git, "/tmp/graphs/v2.6.25",
# revrange=["v2.6.24", "v2.6.25"])
|
gpl-2.0
|
ricardog/raster-project
|
projections/scripts/tifftomp4.py
|
1
|
3734
|
#!/usr/bin/env python
import matplotlib
matplotlib.use("Agg")
import matplotlib.colors as colors
import argparse
import itertools
import json
import math
import numpy as np
import os
import rasterio
import tempfile
import shutil
import subprocess
import sys
from ..progressbar import ProgressBar
from ..mp4_utils import to_mp4
from .. import tiff_utils
def parse_fname2(fname):
return os.path.splitext(os.path.basename(fname))[0].rsplit('-', 2)
def get_stats(files):
import gdal
low = []
high = []
bands = []
x_size = None
y_size = None
for f in files:
ds = gdal.Open(f)
if ds is None:
print("Error: failed to open '%s" % f)
sys.exit(1)
if x_size is None:
x_size = ds.RasterXSize
y_size = ds.RasterYSize
else:
if (x_size != ds.RasterXSize or y_size != ds.RasterYSize):
print("raster have mismatched sizes (%d = %d; %d = %d)" % (x_size,
ds.RasterXSize,
y_size,
ds.RasterYSize))
sys.exit(1)
l, h = tiff_utils.get_min_max(ds)
low.append(l)
high.append(h)
bands.append(ds.RasterCount)
print("min: %.2f / max: %.2f [%d x %d] : %d" % (min(low),
max(high),
x_size,
y_size,
sum(bands)))
return(min(low), max(high), x_size, y_size, bands)
def convert(title, fps, palette, band, oname, files):
#stats = get_stats(files)
#bands = stats[4]
#nframes = sum(bands)
nframes = len(files)
cnorm = colors.Normalize(vmin=0.7, vmax=1.07)
with rasterio.open(files[0]) as src:
data = src.read(band, masked=True)
for idx, img, text in to_mp4(title, oname, nframes,
data, 'year', fps, cnorm=cnorm):
scenario, metric, year = parse_fname2(files[idx])
ds = rasterio.open(files[idx])
data = ds.read(band, masked=True)
img.set_array(data)
text.set_text(year)
def parse_args():
parser = argparse.ArgumentParser(description='Convert a series of raster ' +
'maps into a video sequence.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--fps', type=int, default=10,
help='frames per second for output video. ' +
'you can control the speed of playback by increasing ' +
'or decreasing the FPS.')
parser.add_argument('-o', '--out', type=str, required=True,
help='name of output file.')
parser.add_argument('-t', '--title', default='',
help='title for the video')
parser.add_argument('-p', '--palette', default='green',
choices=['blue', 'green', 'orange', 'red'],
help='name of color palette to use')
parser.add_argument('-b', '--band', type=int,
help='which band in input raster files to use')
parser.add_argument('-f', '--files', type=str, nargs='+',
required=True, help='GeoTIFF raster files to process')
parser.add_argument('-s', '--stats', action='store_true',
help='Print max/min stats for all files and exit')
return parser.parse_args()
def main():
args = parse_args()
if args.stats:
stats = get_stats(args.files)
return
convert(args.title, args.fps, args.palette, args.band, args.out, args.files)
if __name__ == '__main__':
main()
|
apache-2.0
|
pkainz/hyperas
|
examples/use_intermediate_functions.py
|
2
|
3226
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.datasets import mnist
from keras.utils import np_utils
import matplotlib.pyplot as plt
def visualization_mnist(x_data,n=10):
plt.figure(figsize=(20, 4))
for i in range(n):
# display digit
ax = plt.subplot(1, n, i+1)
plt.imshow(x_data[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def data():
'''
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
'''
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
visualization_mnist(X_test)
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
'''
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
'''
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([256, 512, 1024])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
nb_epoch=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
X_train, Y_train, X_test, Y_test = data()
functions=[visualization_mnist]
best_run, best_model = optim.minimize(model=model,
data=data,
functions=functions,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
|
mit
|
gotomypc/scikit-learn
|
sklearn/semi_supervised/label_propagation.py
|
128
|
15312
|
# coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
|
bsd-3-clause
|
kekraft/golden_eye
|
src/pong_vision/src/image_gui.py
|
1
|
3415
|
#!/usr/bin/env python
import sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
import copy
import random
import Tkinter
import Image, ImageTk # Had to install sudo apt-get install python-imaging-tk
#import sift
class System_GUI():
def __init__(self, img):
root = Tkinter.Tk()
im = Image.fromarray(img)
imgtk = ImageTk.PhotoImage(image=im)
# insert image into panel
panel = Tkinter.Label(root, image = imgtk)
panel.pack(side = "top", fill = "both", expand = "yes")
# Game State Text....need method for updating this to offense and defense
self.game_state_text = Tkinter.Text(root, height=1, width = 8)
self.game_state_text.config(bg='gray77')
self.game_state_text.config(fg='black')
self.game_state_text.pack()
self.game_state_text.insert(Tkinter.END, " SETUP")
self.game_state_text.config(state=Tkinter.DISABLED)
# motor spin boxes
motor_a_text = Tkinter.Text(root, height=1, width=8)
motor_a_text.pack()
motor_a_text.insert(Tkinter.END, "Motor A:")
motor_a_text.config(state=Tkinter.DISABLED)
motor_a_text.configure(bg='gray77')
self.motor_a_velocity_box = Tkinter.Spinbox(root, from_=0, to=10, increment=.1)
self.motor_a_velocity_box.pack()
motor_b_text = Tkinter.Text(root, height=1, width=8)
motor_b_text.pack()
motor_b_text.insert(Tkinter.END, "Motor B:")
motor_b_text.config(state=Tkinter.DISABLED)
motor_b_text.configure(bg='gray77')
self.motor_b_velocity_box = Tkinter.Spinbox(root, from_=0, to=10, increment=.1)
self.motor_b_velocity_box.pack()
motor_c_text = Tkinter.Text(root, height=1, width=8)
motor_c_text.pack()
motor_c_text.insert(Tkinter.END, "Motor C:")
motor_c_text.config(state=Tkinter.DISABLED)
motor_c_text.configure(bg='gray77')
self.motor_c_velocity_box = Tkinter.Spinbox(root, from_=0, to=10, increment=.1)
self.motor_c_velocity_box.pack()
# What mode are we in, automatic or manual
self.mode_val = Tkinter.IntVar()
self.manual_button = Tkinter.Radiobutton(root, text="Manual", variable=self.mode_val, value=1, command=self.mode_change)
self.manual_button.pack()
self.automatic_button = Tkinter.Radiobutton(root, text="Automatic", variable=self.mode_val, value=2, command=self.mode_change)
self.automatic_button.pack()
# insert buttons into panel
# 2 buttons: Fire and Quit
self.fire_button = Tkinter.Button(root, text="Fire")
self.quit_button = Tkinter.Button(root, text="Quit")
self.fire_button.pack()
self.quit_button.pack()
self.fire_button.bind('<Button-1>', self.fire)
self.quit_button.bind('<Button-1>', self.quit)
# start the GUI
root.mainloop()
def fire(self, arg):
print 'Fire'
motor_a_speed = float(self.motor_a_velocity_box.get())
print 'Motor a Speed', motor_a_speed
def quit(self, arg):
print 'Quit'
def mode_change(self):
print 'Selection changed to: ', self.mode_val.get()
if __name__ == '__main__':
# run pipeline with a given image
img = cv2.imread("../images/saved.jpg", 1)
cv2.namedWindow("Display Window", cv2.WINDOW_AUTOSIZE)
#cv2.imshow("Display Window", img)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# convert image to be friendly with tkinter
# rearrange color channel
b,g,r = cv2.split(img)
img = cv2.merge((r,g,b))
gui = System_GUI(img)
|
bsd-2-clause
|
Bulochkin/tensorflow_pack
|
tensorflow/python/estimator/canned/dnn_test.py
|
20
|
16058
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifier(*args, **kwargs)
class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn)
class DNNClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressor(*args, **kwargs)
class DNNRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
NelisVerhoef/scikit-learn
|
sklearn/datasets/lfw.py
|
141
|
19372
|
"""Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
|
bsd-3-clause
|
ssaeger/scikit-learn
|
examples/covariance/plot_covariance_estimation.py
|
99
|
5074
|
"""
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.model_selection import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
|
bsd-3-clause
|
michaelpacer/scikit-image
|
doc/examples/applications/plot_rank_filters.py
|
14
|
18001
|
"""
============
Rank filters
============
Rank filters are non-linear filters using the local gray-level ordering to
compute the filtered value. This ensemble of filters share a common base: the
local gray-level histogram is computed on the neighborhood of a pixel (defined
by a 2-D structuring element). If the filtered value is taken as the middle
value of the histogram, we get the classical median filter.
Rank filters can be used for several purposes such as:
* image quality enhancement
e.g. image smoothing, sharpening
* image pre-processing
e.g. noise reduction, contrast enhancement
* feature extraction
e.g. border detection, isolated point detection
* post-processing
e.g. small object removal, object grouping, contour smoothing
Some well known filters are specific cases of rank filters [1]_ e.g.
morphological dilation, morphological erosion, median filters.
In this example, we will see how to filter a gray-level image using some of the
linear and non-linear filters available in skimage. We use the `camera` image
from `skimage.data` for all comparisons.
.. [1] Pierre Soille, On morphological operators based on rank filters, Pattern
Recognition 35 (2002) 527-535.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
from skimage import data
noisy_image = img_as_ubyte(data.camera())
hist = np.histogram(noisy_image, bins=np.arange(0, 256))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3))
ax1.imshow(noisy_image, interpolation='nearest', cmap=plt.cm.gray)
ax1.axis('off')
ax2.plot(hist[1][:-1], hist[0], lw=2)
ax2.set_title('Histogram of grey values')
"""
.. image:: PLOT2RST.current_figure
Noise removal
=============
Some noise is added to the image, 1% of pixels are randomly set to 255, 1% are
randomly set to 0. The **median** filter is applied to remove the noise.
"""
from skimage.filters.rank import median
from skimage.morphology import disk
noise = np.random.random(noisy_image.shape)
noisy_image = img_as_ubyte(data.camera())
noisy_image[noise > 0.99] = 255
noisy_image[noise < 0.01] = 0
fig, ax = plt.subplots(2, 2, figsize=(10, 7))
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray)
ax1.set_title('Noisy image')
ax1.axis('off')
ax2.imshow(median(noisy_image, disk(1)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax2.set_title('Median $r=1$')
ax2.axis('off')
ax3.imshow(median(noisy_image, disk(5)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax3.set_title('Median $r=5$')
ax3.axis('off')
ax4.imshow(median(noisy_image, disk(20)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax4.set_title('Median $r=20$')
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
The added noise is efficiently removed, as the image defaults are small (1
pixel wide), a small filter radius is sufficient. As the radius is increasing,
objects with bigger sizes are filtered as well, such as the camera tripod. The
median filter is often used for noise removal because borders are preserved and
e.g. salt and pepper noise typically does not distort the gray-level.
Image smoothing
================
The example hereunder shows how a local **mean** filter smooths the camera man
image.
"""
from skimage.filters.rank import mean
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 7])
loc_mean = mean(noisy_image, disk(10))
ax1.imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(loc_mean, vmin=0, vmax=255, cmap=plt.cm.gray)
ax2.set_title('Local mean $r=10$')
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
One may be interested in smoothing an image while preserving important borders
(median filters already achieved this), here we use the **bilateral** filter
that restricts the local neighborhood to pixel having a gray-level similar to
the central one.
.. note::
A different implementation is available for color images in
`skimage.filters.denoise_bilateral`.
"""
from skimage.filters.rank import mean_bilateral
noisy_image = img_as_ubyte(data.camera())
bilat = mean_bilateral(noisy_image.astype(np.uint16), disk(20), s0=10, s1=10)
fig, ax = plt.subplots(2, 2, figsize=(10, 7))
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(bilat, cmap=plt.cm.gray)
ax2.set_title('Bilateral mean')
ax2.axis('off')
ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax3.axis('off')
ax4.imshow(bilat[200:350, 350:450], cmap=plt.cm.gray)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
One can see that the large continuous part of the image (e.g. sky) is smoothed
whereas other details are preserved.
Contrast enhancement
====================
We compare here how the global histogram equalization is applied locally.
The equalized image [2]_ has a roughly linear cumulative distribution function
for each pixel neighborhood. The local version [3]_ of the histogram
equalization emphasizes every local gray-level variations.
.. [2] http://en.wikipedia.org/wiki/Histogram_equalization
.. [3] http://en.wikipedia.org/wiki/Adaptive_histogram_equalization
"""
from skimage import exposure
from skimage.filters import rank
noisy_image = img_as_ubyte(data.camera())
# equalize globally and locally
glob = exposure.equalize_hist(noisy_image) * 255
loc = rank.equalize(noisy_image, disk(20))
# extract histogram for each image
hist = np.histogram(noisy_image, bins=np.arange(0, 256))
glob_hist = np.histogram(glob, bins=np.arange(0, 256))
loc_hist = np.histogram(loc, bins=np.arange(0, 256))
fig, ax = plt.subplots(3, 2, figsize=(10, 10))
ax1, ax2, ax3, ax4, ax5, ax6 = ax.ravel()
ax1.imshow(noisy_image, interpolation='nearest', cmap=plt.cm.gray)
ax1.axis('off')
ax2.plot(hist[1][:-1], hist[0], lw=2)
ax2.set_title('Histogram of gray values')
ax3.imshow(glob, interpolation='nearest', cmap=plt.cm.gray)
ax3.axis('off')
ax4.plot(glob_hist[1][:-1], glob_hist[0], lw=2)
ax4.set_title('Histogram of gray values')
ax5.imshow(loc, interpolation='nearest', cmap=plt.cm.gray)
ax5.axis('off')
ax6.plot(loc_hist[1][:-1], loc_hist[0], lw=2)
ax6.set_title('Histogram of gray values')
"""
.. image:: PLOT2RST.current_figure
Another way to maximize the number of gray-levels used for an image is to apply
a local auto-leveling, i.e. the gray-value of a pixel is proportionally
remapped between local minimum and local maximum.
The following example shows how local auto-level enhances the camara man
picture.
"""
from skimage.filters.rank import autolevel
noisy_image = img_as_ubyte(data.camera())
auto = autolevel(noisy_image.astype(np.uint16), disk(20))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 7])
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(auto, cmap=plt.cm.gray)
ax2.set_title('Local autolevel')
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
This filter is very sensitive to local outliers, see the little white spot in
the left part of the sky. This is due to a local maximum which is very high
comparing to the rest of the neighborhood. One can moderate this using the
percentile version of the auto-level filter which uses given percentiles (one
inferior, one superior) in place of local minimum and maximum. The example
below illustrates how the percentile parameters influence the local auto-level
result.
"""
from skimage.filters.rank import autolevel_percentile
image = data.camera()
selem = disk(20)
loc_autolevel = autolevel(image, selem=selem)
loc_perc_autolevel0 = autolevel_percentile(image, selem=selem, p0=.00, p1=1.0)
loc_perc_autolevel1 = autolevel_percentile(image, selem=selem, p0=.01, p1=.99)
loc_perc_autolevel2 = autolevel_percentile(image, selem=selem, p0=.05, p1=.95)
loc_perc_autolevel3 = autolevel_percentile(image, selem=selem, p0=.1, p1=.9)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(np.hstack((image, loc_autolevel)), cmap=plt.cm.gray)
ax0.set_title('Original / auto-level')
ax1.imshow(
np.hstack((loc_perc_autolevel0, loc_perc_autolevel1)), vmin=0, vmax=255)
ax1.set_title('Percentile auto-level 0%,1%')
ax2.imshow(
np.hstack((loc_perc_autolevel2, loc_perc_autolevel3)), vmin=0, vmax=255)
ax2.set_title('Percentile auto-level 5% and 10%')
for ax in axes:
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
The morphological contrast enhancement filter replaces the central pixel by the
local maximum if the original pixel value is closest to local maximum,
otherwise by the minimum local.
"""
from skimage.filters.rank import enhance_contrast
noisy_image = img_as_ubyte(data.camera())
enh = enhance_contrast(noisy_image, disk(5))
fig, ax = plt.subplots(2, 2, figsize=[10, 7])
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(enh, cmap=plt.cm.gray)
ax2.set_title('Local morphological contrast enhancement')
ax2.axis('off')
ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax3.axis('off')
ax4.imshow(enh[200:350, 350:450], cmap=plt.cm.gray)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
The percentile version of the local morphological contrast enhancement uses
percentile *p0* and *p1* instead of the local minimum and maximum.
"""
from skimage.filters.rank import enhance_contrast_percentile
noisy_image = img_as_ubyte(data.camera())
penh = enhance_contrast_percentile(noisy_image, disk(5), p0=.1, p1=.9)
fig, ax = plt.subplots(2, 2, figsize=[10, 7])
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(penh, cmap=plt.cm.gray)
ax2.set_title('Local percentile morphological\n contrast enhancement')
ax2.axis('off')
ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax3.axis('off')
ax4.imshow(penh[200:350, 350:450], cmap=plt.cm.gray)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
Image threshold
===============
The Otsu threshold [1]_ method can be applied locally using the local gray-
level distribution. In the example below, for each pixel, an "optimal"
threshold is determined by maximizing the variance between two classes of
pixels of the local neighborhood defined by a structuring element.
The example compares the local threshold with the global threshold
`skimage.filters.threshold_otsu`.
.. note::
Local is much slower than global thresholding. A function for global Otsu
thresholding can be found in : `skimage.filters.threshold_otsu`.
.. [4] http://en.wikipedia.org/wiki/Otsu's_method
"""
from skimage.filters.rank import otsu
from skimage.filters import threshold_otsu
p8 = data.page()
radius = 10
selem = disk(radius)
# t_loc_otsu is an image
t_loc_otsu = otsu(p8, selem)
loc_otsu = p8 >= t_loc_otsu
# t_glob_otsu is a scalar
t_glob_otsu = threshold_otsu(p8)
glob_otsu = p8 >= t_glob_otsu
fig, ax = plt.subplots(2, 2)
ax1, ax2, ax3, ax4 = ax.ravel()
fig.colorbar(ax1.imshow(p8, cmap=plt.cm.gray), ax=ax1)
ax1.set_title('Original')
ax1.axis('off')
fig.colorbar(ax2.imshow(t_loc_otsu, cmap=plt.cm.gray), ax=ax2)
ax2.set_title('Local Otsu ($r=%d$)' % radius)
ax2.axis('off')
ax3.imshow(p8 >= t_loc_otsu, cmap=plt.cm.gray)
ax3.set_title('Original >= local Otsu' % t_glob_otsu)
ax3.axis('off')
ax4.imshow(glob_otsu, cmap=plt.cm.gray)
ax4.set_title('Global Otsu ($t=%d$)' % t_glob_otsu)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
The following example shows how local Otsu thresholding handles a global level
shift applied to a synthetic image.
"""
n = 100
theta = np.linspace(0, 10 * np.pi, n)
x = np.sin(theta)
m = (np.tile(x, (n, 1)) * np.linspace(0.1, 1, n) * 128 + 128).astype(np.uint8)
radius = 10
t = rank.otsu(m, disk(radius))
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(m)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(m >= t, interpolation='nearest')
ax2.set_title('Local Otsu ($r=%d$)' % radius)
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
Image morphology
================
Local maximum and local minimum are the base operators for gray-level
morphology.
.. note::
`skimage.dilate` and `skimage.erode` are equivalent filters (see below for
comparison).
Here is an example of the classical morphological gray-level filters: opening,
closing and morphological gradient.
"""
from skimage.filters.rank import maximum, minimum, gradient
noisy_image = img_as_ubyte(data.camera())
closing = maximum(minimum(noisy_image, disk(5)), disk(5))
opening = minimum(maximum(noisy_image, disk(5)), disk(5))
grad = gradient(noisy_image, disk(5))
# display results
fig, ax = plt.subplots(2, 2, figsize=[10, 7])
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(closing, cmap=plt.cm.gray)
ax2.set_title('Gray-level closing')
ax2.axis('off')
ax3.imshow(opening, cmap=plt.cm.gray)
ax3.set_title('Gray-level opening')
ax3.axis('off')
ax4.imshow(grad, cmap=plt.cm.gray)
ax4.set_title('Morphological gradient')
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
Feature extraction
===================
Local histograms can be exploited to compute local entropy, which is related to
the local image complexity. Entropy is computed using base 2 logarithm i.e. the
filter returns the minimum number of bits needed to encode local gray-level
distribution.
`skimage.rank.entropy` returns the local entropy on a given structuring
element. The following example shows applies this filter on 8- and 16-bit
images.
.. note::
to better use the available image bit, the function returns 10x entropy for
8-bit images and 1000x entropy for 16-bit images.
"""
from skimage import data
from skimage.filters.rank import entropy
from skimage.morphology import disk
import numpy as np
import matplotlib.pyplot as plt
image = data.camera()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))
fig.colorbar(ax1.imshow(image, cmap=plt.cm.gray), ax=ax1)
ax1.set_title('Image')
ax1.axis('off')
fig.colorbar(ax2.imshow(entropy(image, disk(5)), cmap=plt.cm.jet), ax=ax2)
ax2.set_title('Entropy')
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
Implementation
==============
The central part of the `skimage.rank` filters is build on a sliding window
that updates the local gray-level histogram. This approach limits the algorithm
complexity to O(n) where n is the number of image pixels. The complexity is
also limited with respect to the structuring element size.
In the following we compare the performance of different implementations
available in `skimage`.
"""
from time import time
from scipy.ndimage import percentile_filter
from skimage.morphology import dilation
from skimage.filters.rank import median, maximum
def exec_and_timeit(func):
"""Decorator that returns both function results and execution time."""
def wrapper(*arg):
t1 = time()
res = func(*arg)
t2 = time()
ms = (t2 - t1) * 1000.0
return (res, ms)
return wrapper
@exec_and_timeit
def cr_med(image, selem):
return median(image=image, selem=selem)
@exec_and_timeit
def cr_max(image, selem):
return maximum(image=image, selem=selem)
@exec_and_timeit
def cm_dil(image, selem):
return dilation(image=image, selem=selem)
@exec_and_timeit
def ndi_med(image, n):
return percentile_filter(image, 50, size=n * 2 - 1)
"""
Comparison between
* `filters.rank.maximum`
* `morphology.dilate`
on increasing structuring element size:
"""
a = data.camera()
rec = []
e_range = range(1, 20, 2)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_max(a, elem)
rcm, ms_rcm = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to element size')
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Element radius')
ax.plot(e_range, rec)
ax.legend(['filters.rank.maximum', 'morphology.dilate'])
"""
.. image:: PLOT2RST.current_figure
and increasing image size:
"""
r = 9
elem = disk(r + 1)
rec = []
s_range = range(100, 1000, 100)
for s in s_range:
a = (np.random.random((s, s)) * 256).astype(np.uint8)
(rc, ms_rc) = cr_max(a, elem)
(rcm, ms_rcm) = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to image size')
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Image size')
ax.plot(s_range, rec)
ax.legend(['filters.rank.maximum', 'morphology.dilate'])
"""
.. image:: PLOT2RST.current_figure
Comparison between:
* `filters.rank.median`
* `scipy.ndimage.percentile`
on increasing structuring element size:
"""
a = data.camera()
rec = []
e_range = range(2, 30, 4)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_med(a, elem)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_ndi))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to element size')
ax.plot(e_range, rec)
ax.legend(['filters.rank.median', 'scipy.ndimage.percentile'])
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Element radius')
"""
.. image:: PLOT2RST.current_figure
Comparison of outcome of the three methods:
"""
fig, ax = plt.subplots()
ax.imshow(np.hstack((rc, rndi)))
ax.set_title('filters.rank.median vs. scipy.ndimage.percentile')
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
and increasing image size:
"""
r = 9
elem = disk(r + 1)
rec = []
s_range = [100, 200, 500, 1000]
for s in s_range:
a = (np.random.random((s, s)) * 256).astype(np.uint8)
(rc, ms_rc) = cr_med(a, elem)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_ndi))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to image size')
ax.plot(s_range, rec)
ax.legend(['filters.rank.median', 'scipy.ndimage.percentile'])
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Image size')
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
|
bsd-3-clause
|
kmather73/pymc3
|
pymc3/examples/custom_dists.py
|
14
|
3262
|
# This model was presented by Jake Vanderplas in his blog post about
# comparing different MCMC packages
# http://jakevdp.github.io/blog/2014/06/14/frequentism-and-bayesianism-4-bayesian-in-python/
#
# While at the core it's just a linear regression, it's a nice
# illustration of using Jeffrey priors and custom density
# distributions in PyMC3.
#
# Adapted to PyMC3 by Thomas Wiecki
import matplotlib.pyplot as plt
import numpy as np
import pymc3
import theano.tensor as T
np.random.seed(42)
theta_true = (25, 0.5)
xdata = 100 * np.random.random(20)
ydata = theta_true[0] + theta_true[1] * xdata
# add scatter to points
xdata = np.random.normal(xdata, 10)
ydata = np.random.normal(ydata, 10)
data = {'x': xdata, 'y': ydata}
with pymc3.Model() as model:
alpha = pymc3.Uniform('intercept', -100, 100)
# Create custom densities
beta = pymc3.DensityDist('slope', lambda value: -1.5 * T.log(1 + value**2), testval=0)
sigma = pymc3.DensityDist('sigma', lambda value: -T.log(T.abs_(value)), testval=1)
# Create likelihood
like = pymc3.Normal('y_est', mu=alpha + beta * xdata, sd=sigma, observed=ydata)
start = pymc3.find_MAP()
step = pymc3.NUTS(scaling=start) # Instantiate sampler
trace = pymc3.sample(10000, step, start=start)
#################################################
# Create some convenience routines for plotting
# All functions below written by Jake Vanderplas
def compute_sigma_level(trace1, trace2, nbins=20):
"""From a set of traces, bin by number of standard deviations"""
L, xbins, ybins = np.histogram2d(trace1, trace2, nbins)
L[L == 0] = 1E-16
logL = np.log(L)
shape = L.shape
L = L.ravel()
# obtain the indices to sort and unsort the flattened array
i_sort = np.argsort(L)[::-1]
i_unsort = np.argsort(i_sort)
L_cumsum = L[i_sort].cumsum()
L_cumsum /= L_cumsum[-1]
xbins = 0.5 * (xbins[1:] + xbins[:-1])
ybins = 0.5 * (ybins[1:] + ybins[:-1])
return xbins, ybins, L_cumsum[i_unsort].reshape(shape)
def plot_MCMC_trace(ax, xdata, ydata, trace, scatter=False, **kwargs):
"""Plot traces and contours"""
xbins, ybins, sigma = compute_sigma_level(trace[0], trace[1])
ax.contour(xbins, ybins, sigma.T, levels=[0.683, 0.955], **kwargs)
if scatter:
ax.plot(trace[0], trace[1], ',k', alpha=0.1)
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$\beta$')
def plot_MCMC_model(ax, xdata, ydata, trace):
"""Plot the linear model and 2sigma contours"""
ax.plot(xdata, ydata, 'ok')
alpha, beta = trace[:2]
xfit = np.linspace(-20, 120, 10)
yfit = alpha[:, None] + beta[:, None] * xfit
mu = yfit.mean(0)
sig = 2 * yfit.std(0)
ax.plot(xfit, mu, '-k')
ax.fill_between(xfit, mu - sig, mu + sig, color='lightgray')
ax.set_xlabel('x')
ax.set_ylabel('y')
def plot_MCMC_results(xdata, ydata, trace, colors='k'):
"""Plot both the trace and the model together"""
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
plot_MCMC_trace(ax[0], xdata, ydata, trace, True, colors=colors)
plot_MCMC_model(ax[1], xdata, ydata, trace)
pymc3_trace = [trace['intercept'],
trace['slope'],
trace['sigma']]
plot_MCMC_results(xdata, ydata, pymc3_trace)
plt.show()
|
apache-2.0
|
danieldmm/minerva
|
models/pytorch2.py
|
1
|
29389
|
# -*- coding: utf-8 -*-
#
# task as translation
# now with epochs, features and embeddings
from __future__ import unicode_literals, print_function, division
import os
import random
import numpy as np
np.warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Torch using", device)
import torchtext.vocab as vocab
CUSTOM_SEED = 42
np.random.seed(CUSTOM_SEED)
from tqdm import tqdm
from collections import Counter
from models.base_model import BaseModel
from models.keyword_features import FeaturesReader, filterOutFeatures, normaliseFeatures, getRootDir
# cell_type=nn.LSTM
cell_type = nn.GRU
MAX_LENGTH = 100
SOS_token = 0
EOS_token = 1
UNK_token = 3
EOS_marker = "#EOS"
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
h = math.floor(m / 60)
m -= h * 60
return '%02d:%02d:%02d' % (h, m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
if percent == 0:
return "? (?)"
es = s / percent
rs = es - s
return '%s ( %s)' % (asMinutes(s), asMinutes(rs))
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS", 3: "UNK"}
self.n_words = len(self.index2word) # Count SOS / EOS / UNK
def addSentence(self, sentence):
assert isinstance(sentence, list)
for word in sentence:
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
######################################################################
# The Seq2Seq Model
# =================
#
# A Recurrent Neural Network, or RNN, is a network that operates on a
# sequence and uses its own output as input for subsequent steps.
#
# A `Sequence to Sequence network <http://arxiv.org/abs/1409.3215>`__, or
# seq2seq network, or `Encoder Decoder
# network <https://arxiv.org/pdf/1406.1078v3.pdf>`__, is a model
# consisting of two RNNs called the encoder and decoder. The encoder reads
# an input sequence and outputs a single vector, and the decoder reads
# that vector to produce an output sequence.
#
# .. figure:: /_static/img/seq-seq-images/seq2seq.png
# :alt:
#
# Unlike sequence prediction with a single RNN, where every input
# corresponds to an output, the seq2seq model frees us from sequence
# length and order, which makes it ideal for translation between two
# languages.
#
# Consider the sentence "Je ne suis pas le chat noir" → "I am not the
# black cat". Most of the words in the input sentence have a direct
# translation in the output sentence, but are in slightly different
# orders, e.g. "chat noir" and "black cat". Because of the "ne/pas"
# construction there is also one more word in the input sentence. It would
# be difficult to produce a correct translation directly from the sequence
# of input words.
#
# With a seq2seq model the encoder creates a single vector which, in the
# ideal case, encodes the "meaning" of the input sequence into a single
# vector — a single point in some N dimensional space of sentences.
#
######################################################################
# The Encoder
# -----------
#
# The encoder of a seq2seq network is a RNN that outputs some value for
# every word from the input sentence. For every input word the encoder
# outputs a vector and a hidden state, and uses the hidden state for the
# next input word.
#
# .. figure:: /_static/img/seq-seq-images/encoder-network.png
# :alt:
#
#
class ExtraFeaturesEncoderRNN(nn.Module):
def __init__(self, vocab_size, hidden_size, lang, num_extra_features):
super(ExtraFeaturesEncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.lang = lang
self.embedding = nn.Embedding(vocab_size, hidden_size)
self.gru = cell_type(hidden_size + num_extra_features, hidden_size)
self.loadWordVectors()
def loadWordVectors(self):
local = "/Users/masterman/NLP/PhD/vectors/glove"
if os.path.isdir(local):
vector_dir = local
else:
vector_dir = "/tmp/tmp-1135029/glove"
self.glove = vocab.GloVe(name='6B', dim=300, cache=vector_dir)
print('Loaded {} words'.format(len(self.glove.itos)))
for word, emb_index in self.lang.word2index.items():
# if the word is in the loaded glove vectors
if word.lower() in self.glove.stoi:
# get the index into the glove vectors
glove_index = self.glove.stoi[word.lower()]
# get the glove vector itself and convert to pytorch structure
# glove_vec = torch.FloatTensor(self.glove.vectors[glove_index], device=device)
# # this only matters if using cuda :)
# if device.startswith("cuda"):
# glove_vec = glove_vec.cuda()
# finally, if net is our network, and emb is the embedding layer:
self.embedding.weight.data[emb_index, :].set_(self.glove.vectors[glove_index])
self.glove = None
def forward(self, input, hidden):
embedded = self.embedding(input[0]).view(1, 1, -1)
output = torch.cat([embedded, input[1].view(1, 1, -1)], dim=2)
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = cell_type(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
######################################################################
# The Decoder
# -----------
#
# The decoder is another RNN that takes the encoder output vector(s) and
# outputs a sequence of words to create the translation.
#
######################################################################
# Attention Decoder
# ^^^^^^^^^^^^^^^^^
#
# If only the context vector is passed betweeen the encoder and decoder,
# that single vector carries the burden of encoding the entire sentence.
#
# Attention allows the decoder network to "focus" on a different part of
# the encoder's outputs for every step of the decoder's own outputs. First
# we calculate a set of *attention weights*. These will be multiplied by
# the encoder output vectors to create a weighted combination. The result
# (called ``attn_applied`` in the code) should contain information about
# that specific part of the input sequence, and thus help the decoder
# choose the right output words.
#
# .. figure:: https://i.imgur.com/1152PYf.png
# :alt:
#
# Calculating the attention weights is done with another feed-forward
# layer ``attn``, using the decoder's input and hidden state as inputs.
# Because there are sentences of all sizes in the training data, to
# actually create and train this layer we have to choose a maximum
# sentence length (input length, for encoder outputs) that it can apply
# to. Sentences of the maximum length will use all the attention weights,
# while shorter sentences will only use the first few.
#
# .. figure:: /_static/img/seq-seq-images/attention-decoder-network.png
# :alt:
#
#
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = cell_type(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
######################################################################
# .. note:: There are other forms of attention that work around the length
# limitation by using a relative position approach. Read about "local
# attention" in `Effective Approaches to Attention-based Neural Machine
# Translation <https://arxiv.org/abs/1508.04025>`__.
#
# Training
# ========
#
# Preparing Training Data
# -----------------------
#
# To train, for each pair we will need an input tensor (indexes of the
# words in the input sentence) and target tensor (indexes of the words in
# the target sentence). While creating these vectors we will append the
# EOS token to both sequences.
#
def indexesFromSentence(lang, sentence):
res = []
for word in sentence:
if word in lang.word2index:
res.append(lang.word2index[word])
else:
res.append(UNK_token)
return res
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def getInputTensor(context, input_lang, dict_vectorizer):
text_in = getTextTokens(context)
context = filterOutFeatures(context) # FIXME
features = [dict_vectorizer.transform(t) for t in context["tokens"]]
feat_len = len(features[0])
features_tensors = [torch.tensor(feat, dtype=torch.float, device=device).view(-1, feat_len) for feat in features]
indexes = tensorFromSentence(input_lang, text_in)
input_list = [p for p in zip(indexes, features_tensors)]
return input_list
def getOutputTensor(context, output_lang):
text_out = getTokensToExtract(context)
target_tensor = tensorFromSentence(output_lang, text_out)
return target_tensor
def getTensorsWithFeatures(context, input_lang, output_lang, dict_vectorizer):
return getInputTensor(context, input_lang, dict_vectorizer), getOutputTensor(context, output_lang)
######################################################################
# Training the Model
# ------------------
#
# To train we run the input sentence through the encoder, and keep track
# of every output and the latest hidden state. Then the decoder is given
# the ``<SOS>`` token as its first input, and the last hidden state of the
# encoder as its first hidden state.
#
# "Teacher forcing" is the concept of using the real target outputs as
# each next input, instead of using the decoder's guess as the next input.
# Using teacher forcing causes it to converge faster but `when the trained
# network is exploited, it may exhibit
# instability <http://minds.jacobs-university.de/sites/default/files/uploads/papers/ESNTutorialRev.pdf>`__.
#
# You can observe outputs of teacher-forced networks that read with
# coherent grammar but wander far from the correct translation -
# intuitively it has learned to represent the output grammar and can "pick
# up" the meaning once the teacher tells it the first few words, but it
# has not properly learned how to create the sentence from the translation
# in the first place.
#
# Because of the freedom PyTorch's autograd gives us, we can randomly
# choose to use teacher forcing or not with a simple if statement. Turn
# ``teacher_forcing_ratio`` up to use more of it.
#
teacher_forcing_ratio = 0
def train(input_list, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion,
max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = min(len(input_list), max_length)
target_length = target_tensor.size(0)
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(
input_list[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device)
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_tensor[di])
decoder_input = target_tensor[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
loss += criterion(decoder_output, target_tensor[di])
if decoder_input.item() == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item() / target_length
######################################################################
# The whole training process looks like this:
#
# - Start a timer
# - Initialize optimizers and criterion
# - Create set of training pairs
# - Start empty losses array for plotting
#
# Then we call ``train`` many times and occasionally print the progress (%
# of examples, time so far, estimated time) and average loss.
#
######################################################################
# Plotting results
# ----------------
#
# Plotting is done with matplotlib, using the array of loss values
# ``plot_losses`` saved while training.
#
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.ticker as ticker
import numpy as np
def showPlot(points, filename):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
plt.savefig(filename, dpi=600)
######################################################################
# Evaluation
# ==========
#
# Evaluation is mostly the same as training, but there are no targets so
# we simply feed the decoder's predictions back to itself for each step.
# Every time it predicts a word we add it to the output string, and if it
# predicts the EOS token we stop there. We also store the decoder's
# attention outputs for display later.
#
def evaluate(encoder,
decoder,
input_list,
output_lang,
max_length=MAX_LENGTH):
"""
Generate the output for a single context
:param encoder:
:param decoder:
:param context:
:param input_lang:
:param output_lang:
:param dict_vectorizer:
:param max_length:
:return:
"""
with torch.no_grad():
# input_list = getInputTensor(context, input_lang, dict_vectorizer)
# input_length = input_tensor.size()[0]
input_length = min(len(input_list), max_length)
encoder_hidden = encoder.initHidden()
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_list[ei],
encoder_hidden)
encoder_outputs[ei] += encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device) # SOS
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
if topi.item() == EOS_token:
decoded_words.append(EOS_marker)
break
else:
decoded_words.append(output_lang.index2word[topi.item()])
decoder_input = topi.squeeze().detach()
return decoded_words, decoder_attentions[:di + 1]
######################################################################
# We can evaluate random sentences from the training set and print out the
# input, target, and output to make some subjective quality judgements:
#
######################################################################
# Training and Evaluating
# =======================
#
# With all these helper functions in place (it looks like extra work, but
# it makes it easier to run multiple experiments) we can actually
# initialize a network and start training.
#
# Remember that the input sentences were heavily filtered. For this small
# dataset we can use relatively small networks of 256 hidden nodes and a
# single GRU layer. After about 40 minutes on a MacBook CPU we'll get some
# reasonable results.
#
# .. Note::
# If you run this notebook you can train, interrupt the kernel,
# evaluate, and continue training later. Comment out the lines where the
# encoder and decoder are initialized and run ``trainIters`` again.
#
# ======================================
def getTextTokens(context):
tokens = [t["text"].lower() for t in context["tokens"]]
return tokens
def getContextsTextTokens(contexts):
return [getTextTokens(context) for context in contexts]
def getTokensToExtract(context):
return [t[0] for t in context["best_kws"]]
def getTargetTranslations(contexts):
translations = []
for context in contexts:
tokens = [t[0] for t in context["best_kws"]]
translations.append(tokens)
return translations
def measurePR(truth, predictions):
if len(truth) == 0:
return 0, 0
tp = fp = fn = 0
for word in predictions:
if word in truth:
tp += 1
else:
fp += 1
for word in truth:
if word not in predictions:
fn += 1
try:
precision = tp / (tp + fp)
except ZeroDivisionError:
precision = 0
try:
recall = tp / (tp + fn)
except ZeroDivisionError:
recall = 0
return precision, recall, tp, (tp + fp), (tp + fn)
class TorchModel(BaseModel):
def __init__(self, exp_dir, params={},
train_data_filename="feature_data.json.gz",
test_data_filename="feature_data_test.json.gz"):
super(TorchModel, self).__init__(exp_dir, params, train_data_filename, test_data_filename)
self.epochs = params.get("num_epochs", 10)
self.optimizer_class = params.get("optimizer", "SGD")
self.print_every = params.get("print_every", 100)
self.plot_every = params.get("plot_every", 100)
self.learning_rate = params.get("learning_rate", 0.01)
self.hidden_size = params.get("hidden_size", 512)
self.dropout_p = params.get("dropout_p", 0.1)
def augmentSingleContext(self, context):
pass
def processFeatures(self):
self.context_tokens = getContextsTextTokens(self.contexts)
# for context in tqdm(self.contexts, desc="Adding context features"):
# self.augmentSingleContext(context)
normaliseFeatures(self.contexts)
def postProcessLoadedData(self):
self.MAX_CONTEXT_LEN = max([len(x["tokens"]) for x in self.contexts]) + 2
train_val_cutoff = int(.80 * len(self.contexts))
self.training_contexts = self.contexts[:train_val_cutoff]
self.validation_contexts = self.contexts[train_val_cutoff:]
self.X_train = getContextsTextTokens(self.training_contexts)
self.X_val = getContextsTextTokens(self.validation_contexts)
# self.X_train, self.y_train = getTrainTestData(self.training_contexts)
# self.X_val, self.y_val = getTrainTestData(self.validation_contexts)
# self.X_train = matrixFromContextFeatures(self.X_train, self.dict_vectorizer, self.MAX_CONTEXT_LEN)
# self.X_val = matrixFromContextFeatures(self.X_val, self.dict_vectorizer, self.MAX_CONTEXT_LEN)
self.y_train = getTargetTranslations(self.training_contexts)
self.y_val = getTargetTranslations(self.validation_contexts)
self.lang = Lang("input")
# self.output_lang = Lang("output")
for words in self.X_train + self.X_val:
for word in words:
self.lang.addWord(word)
for words in self.y_train + self.y_val:
for word in words:
self.lang.addWord(word)
self.pairs = [p for p in zip(self.X_train, self.y_train)]
def defineModel(self):
print("Creating model...")
hidden_size = 300
self.encoder = ExtraFeaturesEncoderRNN(self.lang.n_words,
hidden_size,
self.lang,
self.num_extra_features).to(device)
self.decoder = AttnDecoderRNN(hidden_size,
self.lang.n_words,
dropout_p=self.dropout_p,
max_length=self.MAX_CONTEXT_LEN).to(device)
def trainModel(self):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
optimizer = getattr(optim, self.optimizer_class)
encoder_optimizer = optimizer(self.encoder.parameters(), lr=self.learning_rate)
decoder_optimizer = optimizer(self.decoder.parameters(), lr=self.learning_rate)
training_pairs = []
for context in tqdm(self.training_contexts, desc="Vectorizing data"):
training_pairs.append(getTensorsWithFeatures(context,
self.lang,
self.lang,
self.dict_vectorizer))
criterion = nn.NLLLoss()
print("Training...")
for epoch in range(1, self.epochs + 1):
interrupted = False
for iteration, training_pair in enumerate(training_pairs):
try:
input_tensor = training_pair[0]
target_tensor = training_pair[1]
loss = train(input_tensor,
target_tensor,
self.encoder,
self.decoder,
encoder_optimizer,
decoder_optimizer,
criterion,
max_length=self.MAX_CONTEXT_LEN)
print_loss_total += loss
plot_loss_total += loss
if iteration % self.print_every == 0:
print_loss_avg = print_loss_total / self.print_every
print_loss_total = 0
print('Epoch %d: %s (%d %d%%) %.4f' % (epoch,
timeSince(start, iteration / float(self.epochs)),
iteration,
iteration / len(training_pairs),
print_loss_avg))
if iteration % self.plot_every == 0:
plot_loss_avg = plot_loss_total / self.plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
except KeyboardInterrupt:
print("Training interrupted")
interrupted = True
break
if interrupted:
break
random.shuffle(training_pairs)
showPlot(plot_losses, os.path.join(self.exp_dir, "pytorch_training.png"))
def testModel(self):
print("Testing...")
self.reader = FeaturesReader(self.test_data_filename)
self.testing_contexts = [c for c in self.reader]
self.X_test = [getInputTensor(context, self.lang, self.dict_vectorizer) for context in
self.testing_contexts]
self.y_test = getTargetTranslations(self.testing_contexts)
all_recall = []
all_precision = []
all_tp = []
all_p_sum = []
all_r_sum = []
for index, input_tensor in enumerate(tqdm(self.X_test)):
truth = {t for t in self.y_test[index]}
predictions, attentions = evaluate(self.encoder,
self.decoder,
input_tensor,
self.lang,
max_length=self.MAX_CONTEXT_LEN)
predictions = [p for p in predictions if p != EOS_marker]
predictions = Counter(predictions)
precision, recall, tp, p_sum, r_sum = measurePR(truth, predictions)
all_recall.append(recall)
all_precision.append(precision)
all_tp.append(precision)
all_p_sum.append(p_sum)
all_r_sum.append(r_sum)
numsamples = float(len(all_recall))
tp = sum(all_tp)
p_sum = sum(all_p_sum)
r_sum = sum(all_r_sum)
overall_recall = sum(all_recall) / numsamples
overall_precision = sum(all_precision) / numsamples
print("Precision %d/%d %0.2f Recall %d/%d %0.2f" % (tp, p_sum, overall_precision,
tp, r_sum, overall_recall))
def plotPerformance(self):
""" Plot model loss and accuracy through epochs. """
pass
def saveModel(self):
model_name = self.__class__.__name__
# def evaluateRandomly(self, n=10):
# for i in range(n):
# pair = random.choice(self.pairs)
# print('>', pair[0])
# print('=', pair[1])
# output_words, attentions = evaluate(self.encoder, self.decoder, pair[0])
# output_sentence = ' '.join(output_words)
# print('<', output_sentence)
# print('')
def stochasticAugmentation(self):
pass
# lens = []
# to_choose = []
# for index, pair in enumerate(self.pairs):
# cur_len = len(pair[1])
# lens.append((index, cur_len))
# for _ in range(cur_len):
# to_choose.append(index)
#
# lens = sorted(lens, key=lambda x: x[1], reverse=True)
# First we fill the list with unique examples, starting with the longest extracted query first
# pairs_list = [self.pairs[p[0]] for p in lens[:self.epochs]]
# remaining = max(0, self.epochs - len(lens))
# If we need more training data, we stochastically pick more training examples by length as above
# random.shuffle(to_choose)
# pairs_list.extend([self.pairs[random.choice(to_choose)] for _ in range(remaining)])
def main(num_epochs=10, reset=False):
params = {
"num_epochs": num_epochs,
"print_every": 100,
# "learning_rate": 0.003,
"learning_rate": 0.01,
# "optimizer": "Adam",
"optimizer": "SGD",
}
exp_dir = os.path.join(getRootDir("aac"), "experiments", "aac_generate_kw_trace")
model = TorchModel(exp_dir, params=params)
model.run()
if __name__ == '__main__':
import plac
plac.call(main)
|
gpl-3.0
|
huzq/scikit-learn
|
sklearn/linear_model/_theil_sen.py
|
2
|
14782
|
# -*- coding: utf-8 -*-
"""
A Theil-Sen Estimator for Multiple Linear Regression Model
"""
# Author: Florian Wilhelm <[email protected]>
#
# License: BSD 3 clause
import warnings
from itertools import combinations
import numpy as np
from scipy import linalg
from scipy.special import binom
from scipy.linalg.lapack import get_lapack_funcs
from joblib import Parallel, delayed, effective_n_jobs
from ._base import LinearModel
from ..base import RegressorMixin
from ..utils import check_random_state
from ..utils.validation import _deprecate_positional_args
from ..exceptions import ConvergenceWarning
_EPSILON = np.finfo(np.double).eps
def _modified_weiszfeld_step(X, x_old):
"""Modified Weiszfeld step.
This function defines one iteration step in order to approximate the
spatial median (L1 median). It is a form of an iteratively re-weighted
least squares method.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
x_old : array, shape = [n_features]
Current start vector.
Returns
-------
x_new : array, shape = [n_features]
New iteration step.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
diff = X - x_old
diff_norm = np.sqrt(np.sum(diff ** 2, axis=1))
mask = diff_norm >= _EPSILON
# x_old equals one of our samples
is_x_old_in_X = int(mask.sum() < X.shape[0])
diff = diff[mask]
diff_norm = diff_norm[mask][:, np.newaxis]
quotient_norm = linalg.norm(np.sum(diff / diff_norm, axis=0))
if quotient_norm > _EPSILON: # to avoid division by zero
new_direction = (np.sum(X[mask, :] / diff_norm, axis=0)
/ np.sum(1 / diff_norm, axis=0))
else:
new_direction = 1.
quotient_norm = 1.
return (max(0., 1. - is_x_old_in_X / quotient_norm) * new_direction
+ min(1., is_x_old_in_X / quotient_norm) * x_old)
def _spatial_median(X, max_iter=300, tol=1.e-3):
"""Spatial median (L1 median).
The spatial median is member of a class of so-called M-estimators which
are defined by an optimization problem. Given a number of p points in an
n-dimensional space, the point x minimizing the sum of all distances to the
p other points is called spatial median.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
max_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if spatial_median has converged. Default is 1.e-3.
Returns
-------
spatial_median : array, shape = [n_features]
Spatial median.
n_iter : int
Number of iterations needed.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
if X.shape[1] == 1:
return 1, np.median(X.ravel())
tol **= 2 # We are computing the tol on the squared norm
spatial_median_old = np.mean(X, axis=0)
for n_iter in range(max_iter):
spatial_median = _modified_weiszfeld_step(X, spatial_median_old)
if np.sum((spatial_median_old - spatial_median) ** 2) < tol:
break
else:
spatial_median_old = spatial_median
else:
warnings.warn("Maximum number of iterations {max_iter} reached in "
"spatial median for TheilSen regressor."
"".format(max_iter=max_iter), ConvergenceWarning)
return n_iter, spatial_median
def _breakdown_point(n_samples, n_subsamples):
"""Approximation of the breakdown point.
Parameters
----------
n_samples : int
Number of samples.
n_subsamples : int
Number of subsamples to consider.
Returns
-------
breakdown_point : float
Approximation of breakdown point.
"""
return 1 - (0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1) +
n_subsamples - 1) / n_samples
def _lstsq(X, y, indices, fit_intercept):
"""Least Squares Estimator for TheilSenRegressor class.
This function calculates the least squares method on a subset of rows of X
and y defined by the indices array. Optionally, an intercept column is
added if intercept is set to true.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Design matrix, where n_samples is the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector, where n_samples is the number of samples.
indices : array, shape = [n_subpopulation, n_subsamples]
Indices of all subsamples with respect to the chosen subpopulation.
fit_intercept : bool
Fit intercept or not.
Returns
-------
weights : array, shape = [n_subpopulation, n_features + intercept]
Solution matrix of n_subpopulation solved least square problems.
"""
fit_intercept = int(fit_intercept)
n_features = X.shape[1] + fit_intercept
n_subsamples = indices.shape[1]
weights = np.empty((indices.shape[0], n_features))
X_subpopulation = np.ones((n_subsamples, n_features))
# gelss need to pad y_subpopulation to be of the max dim of X_subpopulation
y_subpopulation = np.zeros((max(n_subsamples, n_features)))
lstsq, = get_lapack_funcs(('gelss',), (X_subpopulation, y_subpopulation))
for index, subset in enumerate(indices):
X_subpopulation[:, fit_intercept:] = X[subset, :]
y_subpopulation[:n_subsamples] = y[subset]
weights[index] = lstsq(X_subpopulation,
y_subpopulation)[1][:n_features]
return weights
class TheilSenRegressor(RegressorMixin, LinearModel):
"""Theil-Sen Estimator: robust multivariate regression model.
The algorithm calculates least square solutions on subsets with size
n_subsamples of the samples in X. Any value of n_subsamples between the
number of features and samples leads to an estimator with a compromise
between robustness and efficiency. Since the number of least square
solutions is "n_samples choose n_subsamples", it can be extremely large
and can therefore be limited with max_subpopulation. If this limit is
reached, the subsets are chosen randomly. In a final step, the spatial
median (or L1 median) is calculated of all least square solutions.
Read more in the :ref:`User Guide <theil_sen_regression>`.
Parameters
----------
fit_intercept : boolean, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations.
copy_X : boolean, default=True
If True, X will be copied; else, it may be overwritten.
max_subpopulation : int, default=1e4
Instead of computing with a set of cardinality 'n choose k', where n is
the number of samples and k is the number of subsamples (at least
number of features), consider only a stochastic subpopulation of a
given maximal size if 'n choose k' is larger than max_subpopulation.
For other than small problem sizes this parameter will determine
memory usage and runtime if n_subsamples is not changed.
n_subsamples : int, default=None
Number of samples to calculate the parameters. This is at least the
number of features (plus 1 if fit_intercept=True) and the number of
samples as a maximum. A lower number leads to a higher breakdown
point and a low efficiency while a high number leads to a low
breakdown point and a high efficiency. If None, take the
minimum number of subsamples leading to maximal robustness.
If n_subsamples is set to n_samples, Theil-Sen is identical to least
squares.
max_iter : int, default=300
Maximum number of iterations for the calculation of spatial median.
tol : float, default=1.e-3
Tolerance when calculating spatial median.
random_state : int, RandomState instance, default=None
A random number generator instance to define the state of the random
permutations generator. Pass an int for reproducible output across
multiple function calls.
See :term:`Glossary <random_state>`
n_jobs : int or None, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : boolean, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (median of distribution).
intercept_ : float
Estimated intercept of regression model.
breakdown_ : float
Approximated breakdown point.
n_iter_ : int
Number of iterations needed for the spatial median.
n_subpopulation_ : int
Number of combinations taken into account from 'n choose k', where n is
the number of samples and k is the number of subsamples.
Examples
--------
>>> from sklearn.linear_model import TheilSenRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(
... n_samples=200, n_features=2, noise=4.0, random_state=0)
>>> reg = TheilSenRegressor(random_state=0).fit(X, y)
>>> reg.score(X, y)
0.9884...
>>> reg.predict(X[:1,])
array([-31.5871...])
References
----------
- Theil-Sen Estimators in a Multiple Linear Regression Model, 2009
Xin Dang, Hanxiang Peng, Xueqin Wang and Heping Zhang
http://home.olemiss.edu/~xdang/papers/MTSE.pdf
"""
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, copy_X=True,
max_subpopulation=1e4, n_subsamples=None, max_iter=300,
tol=1.e-3, random_state=None, n_jobs=None, verbose=False):
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.max_subpopulation = int(max_subpopulation)
self.n_subsamples = n_subsamples
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.verbose = verbose
def _check_subparams(self, n_samples, n_features):
n_subsamples = self.n_subsamples
if self.fit_intercept:
n_dim = n_features + 1
else:
n_dim = n_features
if n_subsamples is not None:
if n_subsamples > n_samples:
raise ValueError("Invalid parameter since n_subsamples > "
"n_samples ({0} > {1}).".format(n_subsamples,
n_samples))
if n_samples >= n_features:
if n_dim > n_subsamples:
plus_1 = "+1" if self.fit_intercept else ""
raise ValueError("Invalid parameter since n_features{0} "
"> n_subsamples ({1} > {2})."
"".format(plus_1, n_dim, n_samples))
else: # if n_samples < n_features
if n_subsamples != n_samples:
raise ValueError("Invalid parameter since n_subsamples != "
"n_samples ({0} != {1}) while n_samples "
"< n_features.".format(n_subsamples,
n_samples))
else:
n_subsamples = min(n_dim, n_samples)
if self.max_subpopulation <= 0:
raise ValueError("Subpopulation must be strictly positive "
"({0} <= 0).".format(self.max_subpopulation))
all_combinations = max(1, np.rint(binom(n_samples, n_subsamples)))
n_subpopulation = int(min(self.max_subpopulation, all_combinations))
return n_subsamples, n_subpopulation
def fit(self, X, y):
"""Fit linear model.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
random_state = check_random_state(self.random_state)
X, y = self._validate_data(X, y, y_numeric=True)
n_samples, n_features = X.shape
n_subsamples, self.n_subpopulation_ = self._check_subparams(n_samples,
n_features)
self.breakdown_ = _breakdown_point(n_samples, n_subsamples)
if self.verbose:
print("Breakdown point: {0}".format(self.breakdown_))
print("Number of samples: {0}".format(n_samples))
tol_outliers = int(self.breakdown_ * n_samples)
print("Tolerable outliers: {0}".format(tol_outliers))
print("Number of subpopulations: {0}".format(
self.n_subpopulation_))
# Determine indices of subpopulation
if np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation:
indices = list(combinations(range(n_samples), n_subsamples))
else:
indices = [random_state.choice(n_samples, size=n_subsamples,
replace=False)
for _ in range(self.n_subpopulation_)]
n_jobs = effective_n_jobs(self.n_jobs)
index_list = np.array_split(indices, n_jobs)
weights = Parallel(n_jobs=n_jobs,
verbose=self.verbose)(
delayed(_lstsq)(X, y, index_list[job], self.fit_intercept)
for job in range(n_jobs))
weights = np.vstack(weights)
self.n_iter_, coefs = _spatial_median(weights,
max_iter=self.max_iter,
tol=self.tol)
if self.fit_intercept:
self.intercept_ = coefs[0]
self.coef_ = coefs[1:]
else:
self.intercept_ = 0.
self.coef_ = coefs
return self
|
bsd-3-clause
|
openego/data_processing
|
preprocessing/eGo_preprocessing.py
|
1
|
3138
|
"""
eGo PreProcessing (eGoPP)
This script opens an oedb database connection and executes different parts of eGo.
Reads python and SQL scripts and gives logging infos during the execution.
Also see corresponding BPML diagram.
This file is part of project "open_eGo DataProcessing" (https://github.com/openego/data_processing/).
It's copyrighted by the contributors recorded in the version control history:
openego/data_processing/preprocessing/eGo_PreProcessing.py
SPDX-License-Identifier: AGPL-3.0-or-later
"""
__copyright__ = "Reiner Lemoine Institut"
__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)"
__url__ "https://www.gnu.org/licenses/agpl-3.0.en.html"
__author__ = "gplssm, Ludee"
import pandas as pd
import logging
import time
import os
import codecs
from tools import io
def preprocessing():
# Configure logging
logger = logging.getLogger('EEEE')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(message)s',
datefmt='%Y-%m-%d %I:%M:%S')
ch.setFormatter(formatter)
logger.addHandler(ch)
# get current time and inform about start
total_time = time.time()
logger.info('ego preprocessing started...')
# list of sql- and python-snippets that process the data in correct order
snippet_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'preprocessing'))
script_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'python_scripts'))
snippets = [
'ego_pre_voltage_level.sql',
'ego_pre_slp_parameters.sql'
]
# get database connection
conn = io.oedb_session(section='oedb')
# iterate over list of sql- and python-snippets and execute them
for snippet in snippets:
# timing and logging
snippet_time = time.time()
logger.info("Execute '{}' ...".format(snippet))
if os.path.splitext(snippet)[1] == '.sql':
snippet_str = open(os.path.join(snippet_dir, snippet)).read()
# execute desired sql snippet
conn.execution_options(autocommit=True).execute(snippet_str)
elif os.path.splitext(snippet)[1] == '.py':
filename = os.path.join(script_dir, snippet)
script_str = open(filename, "rb").read()
# execute desired sql snippet
exec(compile(script_str, filename, 'exec'))
else:
raise NameError('{} is neither a python nor a sql script (at least it '
'has not the right extension). Please add an extension '
'to the script name (.py or .sql)'.format(snippet))
# inform the user
logger.info('...successfully done in {:.2f} seconds.'.format(
time.time() - snippet_time))
# close database connection
conn.close()
logger.info('eGo PreProcessing script successfully executed in {:.2f} seconds'.format(
time.time() - total_time))
if __name__ == '__main__':
preprocessing()
|
agpl-3.0
|
mohamed-ezz/CargoClassifier
|
extraction/learner.py
|
1
|
15127
|
import numpy as np
import cv2,cv
import os,sys
import idputils
import config as cfg
from sklearn import cross_validation
from sklearn import metrics
from sklearn.metrics import confusion_matrix
import pickle
from skimage.feature import hog
from sklearn.metrics import classification_report
class Learner:
def __init__(self, feature='hog', dirs = None, test_portion = 0.2, image_size = (64,64), datapicklefiles=None):
self.dirs = dirs
self.test_portion = test_portion
self.image_size = image_size
self.feature = feature # 'hue' or 'hog' or 'gray'
if feature not in ['hue', 'hog', 'gray']:
raise ValueError("Invalid value for feature: %s. It should be one of: hog, hue, gray" % feature)
self.clf = None
self.data = 'None' #write as string, bcoz otherwise numpy array will be compared elementwise with value None (in a future numpy version).
if datapicklefiles: #if data is ready in a pickle file
self.read_data_matrix(datapicklefiles)
def load_classifier(self, clfpicklepath):
self.clf = pickle.load(open(clfpicklepath,'r'))
def pickle_classifier(self, outputpath):
if self.clf:
pickle.dump(self.clf,open(outputpath,'w'))
else:
print 'pickle_classifier found no saved classifier to save. learner.clf=None'
def pickle_data_matrix(self, outputpath):
if self.data == 'None':
raise Exception('data matrix is not yet read. Cannot save it to a pickle file.')
directory = os.path.dirname(outputpath)
if not os.path.exists(directory):
os.makedirs(directory)
pickle.dump(self.data, open(outputpath,'w'))
def read_data_matrix(self, datapicklefiles=None):
"""Read data from pickled file if given in datapicklefiles, or from images in self.dirs (each dir = a class)"""
if datapicklefiles != None:
data = pickle.load(open(datapicklefiles[0],'r'))
remainingfiles = datapicklefiles[1:]
for datafile in remainingfiles:
data2 = pickle.load(open(datafile,'r'))
data = np.concatenate((data, data2),1) #concatenate columns
self.data = data
print 'Matrix loaded from %s' % datapicklefiles
if self.data != 'None':
return self.data
X, y = 'None', 'None'
for lbl, d in enumerate(self.dirs):
if not os.path.isdir(d): continue
if X=='None' and y=='None': #first timer
X, y = self._read_dir_to_matrix(d, lbl)
else:
Xi, yi = self._read_dir_to_matrix(d, lbl)
X = np.concatenate((X, Xi))
y = np.concatenate((y, yi))
y = np.reshape(y,(y.shape[0],1)) #convert shape (4000,) to (4000, 1)
self.data = np.concatenate((X,y),1)
print 'Data ready in memory. Matrix size:%s' % (str(self.data.shape))
if self.feature=='hog':
self.data = self.data.astype('float32') #for some reason, without this line, the matrix ends up being float64.
elif self.feature in ['hue', 'gray']:
self.data = self.data.astype('uint8')
# Divide into train+validation and test datasets
nrows = int(0.7 * self.data.shape[0])
self.test_data = self.data[nrows:,:]
self.data = self.data[:nrows,:]
return self.data
def _read_dir_to_matrix(self, directory, label):
"""Returns 2-tuple (data,y) corresponding to images in a single directory, all having the same label
Where data is the X feature matrix. y is the corresponding class label vector (y is just a vector of same values = label)"""
print 'getting size'
if self.feature == 'hog':
n_features = 1764
else:
n_features = self.image_size[0] * self.image_size[1]
n_rows = len(idputils.list_images(directory))
print 'reading data to memory'
if self.feature == 'hog':
data = np.zeros((n_rows, n_features), np.float32)
else:
data = np.zeros((n_rows, n_features), np.uint8)
idx = 0
imagenames = idputils.list_images(directory)
for colorpath, depthpath, prefix in imagenames:
colorimage = cv2.imread(colorpath)
#preprocessing + feature selection
vector = self._image_to_featurevector(colorimage)
data[idx,:] = vector
idx += 1
print 'X.shape=',data.shape
y = np.ones((n_rows)) * label #class label (initialize with -1)
return data,y
def _image_to_featurevector(self, colorimage):
if self.feature == 'hue': return self._image_to_featurevector_HUE(colorimage)
if self.feature == 'hog': return self._image_to_featurevector_HOG(colorimage)
if self.feature == 'gray': return self._image_to_featurevector_GRAY(colorimage)
def _image_to_featurevector_HUE(self, colorimage):
"""Takes colorimage (numpy ndarray) and does :
preprocessing ,feature extraction ,feature selection...etc
Returns a vector (numpy 1D array)"""
#Threshold depth
# colorimage[depthimage < cfg.DEPTH_LO] = 0
# colorimage[depthimage > cfg.DEPTH_HI] = 0
#Get hueimage only
hueimage = cv2.cvtColor(colorimage,cv.CV_BGR2HSV)[:,:,0]
#Median filter to remove noise
#hueimage = cv2.medianBlur(hueimage, 21) #found to worsen performance
#Resize
hueimage = cv2.resize(hueimage, self.image_size)
#Switch to vector
vector = hueimage.reshape((hueimage.size))
return vector
def _image_to_featurevector_HOG(self, colorimage):
"""Takes colorimage (numpy ndarray) and does :
preprocessing ,feature extraction ,feature selection...etc
Returns a vector (numpy 1D array)"""
grayimage = cv2.cvtColor(colorimage,cv.CV_BGR2GRAY)
#Resize
grayimage = cv2.resize(grayimage, (64,64))
#Switch to vector
vector = hog(grayimage,orientations=9,pixels_per_cell=(8,8),cells_per_block=(2,2),visualise=False)
#print 'hog vector shape',vector.shape
return vector
def _image_to_featurevector_GRAY(self, colorimage):
"""Takes colorimage (numpy ndarray) and does :
preprocessing ,feature extraction ,feature selection...etc
Returns a vector (numpy 1D array)"""
grayimage = cv2.cvtColor(colorimage,cv.CV_BGR2GRAY)
#Resize
grayimage = cv2.resize(grayimage, self.image_size)
#Switch to vector
vector = grayimage.reshape(grayimage.size)
return vector
def balanced_accuracy(self, y, y_pred):
"""Calculates the classification accuracy for unbalanced classes.
The error is the average of errors for each indiviual class. Accuracy = 1 - error"""
if y.shape[0] != y_pred.shape[0]:
raise ValueError('weighted_error: given arrays have different lengths : %i and %i' % (y.shape[0],y_pred.shape[0]))
errors = []
classes = np.unique(y)
for lbl in classes:
classpredictions = y_pred[y==lbl]
classsize = classpredictions.size
misclassified = (classpredictions!=lbl).sum()
errors.append(misclassified*1.0/classsize)
return 1 - np.average(errors)
def sample_balanced(self, Xy, classsize = None):
"""Returns a sub/over sampled balanced dataset.
For ex. if classsize=500, then 500 instances of each class will be taken from self.data"""
np.random.seed(583)
y=Xy[:,-1]
print y.shape
classcount = classsize or min(np.bincount(y))
classes = np.unique(y)
Xy_sampled = np.zeros((0,Xy.shape[1]))
for lbl in classes:
Xyclass = Xy[y==lbl]
sample_idx = np.random.random_integers(0, Xyclass.shape[0]-1, classcount)
Xy_sampled = np.concatenate((Xy_sampled, Xyclass[sample_idx,:]))
return Xy_sampled
def test(self, X_test=None, y_test=None, frompickle=None):
if frompickle:
data = pickle.load(open(frompickle,'r'))
X_test, y_test = data[:,:-1], data[:,-1]
y_predicttest = self.clf.predict(X_test)
test_accuracy = self.balanced_accuracy(y_test, y_predicttest)
print classification_report(y_test, y_predicttest, target_names=['Barrel','Blue','Brown','Non-object'])
print confusion_matrix(y_test, y_predicttest)
print 'Test accuracy: ', test_accuracy
return test_accuracy
def train_test(self, clf, title = 'Untitled', test_size = 0.3, presample_class_size = None):
data = self.data
if presample_class_size: #subsample with balanced class proportions
data = self.sample_balanced(self.data, presample_class_size)
X,y = data[:,:-1], data[:,-1].astype(np.uint8) #separate last column to a vector y
skfold = cross_validation.StratifiedShuffleSplit(y, n_iter=1, test_size = test_size, random_state = 583)
for train_index, test_index in skfold:
X_train, X_test, y_train, y_test = X[train_index], X[test_index], y[train_index], y[test_index]
print '======================'
print 'Xtrain shape ',X_train.shape,' --- Xtest', X_test.shape
clf.fit(X_train,y_train)
self.clf = clf
y_predicttrain = clf.predict(X_train)
y_predicttest = clf.predict(X_test)
train_accuracy = self.balanced_accuracy(y_train, y_predicttrain)
test_accuracy = self.balanced_accuracy(y_test, y_predicttest)
print clf
print y_test.dtype,y_test
print y_predicttest.dtype,y_predicttest
print classification_report(y_test, y_predicttest, target_names=['Barrel','Blue','Brown','Non-object'])
print confusion_matrix(y_test, y_predicttest)
print 'Test:', test_accuracy, ' Train:', train_accuracy
print '======================'
return {'train_accuracy':train_accuracy,
'test_accuracy':test_accuracy,
'n_samples_train':X_train.shape[0],
'n_samples_test':X_test.shape[0]}
# def train_test(self, clf, title = 'Untitled'):
#
# X,y = self.data[:,:-1], self.data[:,-1] #separate last column to a vector y
# #Learn and predict
# SEED = 583
# k = 1
# skfold = cross_validation.StratifiedShuffleSplit(y, n_iter=k, test_size = 0.3, random_state = SEED)
# avg_test_accuracy = 0
# avg_train_accuracy = 0
# for i, d in enumerate(skfold):
# train_index, test_index = d
# X_train,X_test = X[train_index], X[test_index]
# y_train,y_test = y[train_index], y[test_index]
#
# clf.fit(X_train,y_train)
# y_predicttrain = clf.predict(X_train)
# accuracy = metrics.accuracy_score(y_train, y_predicttrain)#f1_score(y_train, y_predicttrain)
# avg_train_accuracy += accuracy
# #print '\t\t\t\tTrain accuracy: ', accuracy
# y_predicttest = clf.predict(X_test)
#
# self.clf = clf
#
# accuracy = metrics.metrics.accuracy_score(y_test, y_predicttest)#f1_score(y_test, y_predicttest)
# #idputils.plot_prec_recall([y_test], [y_predicttest[:,0]], ['svm'], 'TITLE', 'Pre_Rec_%i'%i)
# print confusion_matrix(y_test, y_predicttest)
# #print 'Test accuracy: ', accuracy
# avg_test_accuracy += accuracy
# sys.stdout.write('.');sys.stdout.flush()
# #print 'Parameters:',clf.coef_
#
# sys.stdout.write('\n')
# avg_test_accuracy /= k
# avg_train_accuracy /= k
# print title, 'Classifier'
# print 'Test:',avg_test_accuracy,' Train:',avg_train_accuracy
# return y_predicttest
def predictimg(self, img):
"""Use self.clf to predict given img. img could be a path or a numpy array image"""
if type(img) == str:
img = cv2.imread(img)
vector = self._image_to_featurevector(img)
y = self.clf.predict(vector)
print y
return y[0]
def predictdir(self, directory):
lst = idputils.list_images(directory)
y = []
for colorname,_,_ in lst:
y.append(self.predictimg(colorname))
return np.array(y)
if __name__ == '__main__':
"""
python learner.py -matrix /labeled/SETS/06
"""
import argparse
parser = argparse.ArgumentParser()
#python learner.py -matrix /labeled/
parser.add_argument('-matrix', metavar = 'path/file.pickle',nargs='+', dest='readMatrixPaths', help = 'Path to pickle file(s) with a numpy data matrix with last column having labels. If more than one file is given, they should have same # of instances, the features from both matrices will be concatenated and used together.')
parser.add_argument('-trainsize', dest ='trainsize', default=0.1,type=float, help='Proportion of data to use for training, rest is for testing.Float between 0-1.')
parser.add_argument('-d',dest='dirs', nargs='+', help='One or more directories with the images, each dir should contain one class to be classified.')
parser.add_argument('-saveonly', metavar = 'path/file.pickle', dest='saveMatrixPath', help='Make the command only read the given images data, saves it to a matrix and Stop.')
parser.add_argument('-feature', dest ='feature', default=None, help='The feature type to extract. Either hog, hue or gray')
parser.add_argument('-savemodel', dest='modeloutputfile', default=None, help= 'Path to file to save the classifier model to. If this option is not used, the model will not be saved.')
args = parser.parse_args()
if args.readMatrixPaths and (args.dirs or args.saveMatrixPath or args.feature):
parser.error("Cannot use -matrix with one of -feature,-d,-save-only")
if not args.feature:
args.feature='hog'
############################################################################################################################
if args.readMatrixPaths:
learner = Learner(args.feature, datapicklefiles= args.readMatrixPaths)
else:
learner = Learner(args.feature, dirs = args.dirs)
learner.read_data_matrix()
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import AdaBoostClassifier
#from nolearn.dbn import DBN
import nolearn
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.cross_validation import train_test_split
from collections import namedtuple
ModelTune = namedtuple('ModelTune', 'model params')
SEED=583
classifiers = [
#ModelTune(DecisionTreeClassifier(), {'max_depth':[9], 'class_weight':['auto'], 'random_state':[SEED]})
#ModelTune(LinearSVC(), {'C':[0.0000001, 0.01, 0.0001], 'loss':['hinge'], 'class_weight':['auto']}),
#ModelTune(SVC(), {'C':[0.000001, 0.01, 0.0001], 'kernel':['rbf'],'degree':[2], 'gamma':[5], 'class_weight':['auto'], 'tol':[0.01]}),
ModelTune(LogisticRegression(), {'C':[10],'intercept_scaling':[100000],'class_weight':['auto'],'random_state':[SEED]}),
#ModelTune(RandomForestClassifier(), {'n_jobs':[6],'n_estimators':[20],'class_weight':['auto'], 'min_samples_split':[160],'random_state':[SEED]}),
#ModelTune(NearestCentroid(), {}),
#ModelTune(AdaBoostClassifier(), {'base_estimator':[SVC(kernel='linear', C=0.001, class_weight='auto')],'random_state':[SEED],'algorithm':['SAMME']}),
#ModelTune(DBN(), {'layer_sizes':[[-1, 20, -1]], 'output_act_funct':[nolearn.dbn.activationFunctions.Sigmoid()]}),
]
if args.saveMatrixPath:
learner.pickle_data_matrix(args.saveMatrixPath)
else:
results = []
for classifier in classifiers:
#clf = RandomizedSearchCV(classifier.model, classifier.params,n_iter=1, n_jobs=7, cv=2, verbose=5, pre_dispatch='n_jobs')
#print clf
for k in classifier.params: classifier.params[k] = classifier.params[k][0]
clf=classifier.model.__class__(**classifier.params)
res = learner.train_test(clf,test_size=1-args.trainsize, presample_class_size=None)
results.append((clf, res))
if args.modeloutputfile:
learner.pickle_classifier(args.modeloutputfile)
#For 64x64 images
# Test accuracy: 0.912198824923
#y_predicttest=learner.train_test(LinearSVC(C = 0.001))
# LinearSVC(loss='l1', C = 0.000005)
# Test accuracy : 0.91957374675
# LogisticRegression('l2',C = 0.0001)
|
gpl-3.0
|
EtienneCmb/tensorpac
|
examples/misc/plot_align_tf_pha_peak.py
|
1
|
3411
|
"""
============================================================
Align time-frequency representations according to phase peak
============================================================
This example illustrates how to realign time-frequency representations
according to a phase. In particular, a time-point of reference is first
defined (`cue`). Then, the closest peak phase is found around this cue and the
phase is shifted so that the peak of the phase is aligned with the cue.
Finally, the same shift is then applied to the time-frequency representation.
For an extended description, see :class:`tensorpac.utils.PeakLockedTF`
This realignment can be a great tool to visualize the emergence of a
phase-amplitude coupling according to a specific phase.
"""
import numpy as np
from tensorpac.signals import pac_signals_wavelet
from tensorpac.utils import PeakLockedTF
import matplotlib.pyplot as plt
###############################################################################
# Simulate artificial coupling
###############################################################################
# first, we generate a several trials that contains a coupling between a 4z
# phase and a 100hz amplitude. By default, the returned dataset is organized as
# (n_epochs, n_times) where n_times is the number of time points and n_epochs
# is the number of trials
f_pha = 4. # frequency for phase
f_amp = 100. # frequency for amplitude
sf = 1024. # sampling frequency
n_epochs = 40 # number of epochs
n_times = 2000 # number of time-points
x, _ = pac_signals_wavelet(sf=sf, f_pha=4, f_amp=100, noise=1.,
n_epochs=n_epochs, n_times=n_times)
times = np.linspace(-1, 1, n_times)
###############################################################################
# Define the peak-locking object and realign TF representations
###############################################################################
# then, we define an instance of :class:`tensorpac.utils.PeakLockedTF`. This
# is assessed by using a reference time-point (here we used a cue at 0 second),
# a single phase interval and several amplitudes
cue = 0. # time-point of reference (in seconds)
f_pha = [3, 5] # single frequency phase interval
f_amp = (60, 140, 3, 1) # amplitude frequencies
p_obj = PeakLockedTF(x, sf, cue, times=times, f_pha=f_pha, f_amp=f_amp)
###############################################################################
# Plotting the realignment
###############################################################################
# finally, we use the integrated plotting function to visualize the result of
# the realignment. The returned plot contains a bottom image which is the
# mean of the shifted time-frequency power and a bottom line plot which
# contains the single trial shifted phase in gray such as the mean of those
# shifted phases in blue. You can see from the bottom plot that we retrieve the
# 4hz <-> 100hz artificial coupling
plt.figure(figsize=(8, 9))
title = 'Peak-Locked TF representation according to delta phase'
p_obj.plot(vmin=0, cmap='viridis', title=title)
# note that it is also possible to perform a z-score normalization to
# compensate the natural 1 / f effect in the power of real data. In that case
# the power is centered around 0
# p_obj.plot(zscore=True, vmin=-1, vmax=1, cmap='Spectral_r')
plt.tight_layout()
p_obj.show()
|
bsd-3-clause
|
wangshiphys/HamiltonianPy
|
HamiltonianPy/quantumoperator/particlesystem.py
|
1
|
37709
|
"""
This module provides classes that describe creation/annihilation operator as
well as term composed of creation and/or annihilation operators.
"""
__all__ = [
"AoC",
"NumberOperator",
"ParticleTerm",
]
import matplotlib.pyplot as plt
from HamiltonianPy.quantumoperator.constant import ANNIHILATION, CREATION, \
NUMERIC_TYPES_GENERAL
from HamiltonianPy.quantumoperator.matrixrepr import matrix_function
from HamiltonianPy.quantumoperator.quantumstate import StateID
class AoC:
"""
A unified description of the creation and annihilation operator.
Attributes
----------
otype : int
The type of this operator. It can be either 0 or 1, corresponding to
annihilation and creation respectively.
state : StateID
The single-particle state on which this operator is defined.
coordinate : tuple
The coordinates of the localized single-particle state in tuple form.
site : 1D np.ndarray
The coordinates of the localized single-particle state in np.ndarray
form.
spin : int
The spin index of the single-particle state.
orbit : int
The orbit index of the single-particle state.
Examples
--------
>>> from HamiltonianPy.quantumoperator import AoC
>>> c = AoC(otype=1, site=[0, 0], spin=0)
>>> a = AoC(otype=0, site=(0.3, 0.75), spin=1)
>>> c
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
>>> a
AoC(otype=ANNIHILATION, site=(0.3, 0.75), spin=1, orbit=0)
>>> c.tolatex()
'$c_{(0,0),\\\\downarrow}^{\\\\dagger}$'
>>> a.tolatex()
'$c_{(0.3,0.75),\\\\uparrow}$'
>>> c < a
True
>>> c.dagger()
AoC(otype=ANNIHILATION, site=(0, 0), spin=0, orbit=0)
>>> a.dagger()
AoC(otype=CREATION, site=(0.3, 0.75), spin=1, orbit=0)
>>> print(2 * c * a)
The coefficient of this term: 2
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(0.3, 0.75), spin=1, orbit=0)
>>> print(0.5 * c)
The coefficient of this term: 0.5
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
>>> print(a * (1+2j))
The coefficient of this term: (1+2j)
The component operators:
AoC(otype=ANNIHILATION, site=(0.3, 0.75), spin=1, orbit=0)
"""
def __init__(self, otype, site, spin=0, orbit=0):
"""
Customize the newly created instance.
Parameters
----------
otype : int
The type of this operator.
It can be either 0 or 1, corresponding to annihilation and
creation respectively. It is recommended to use the constants
`CREATION` and `ANNIHILATION` defined in the `constant` module.
site : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
The `site` parameter should be 1D array with length 1,2 or 3.
spin : int, optional
The spin index of the single-particle state.
Default: 0.
orbit : int, optional
The orbit index of the single-particle state.
Default: 0.
"""
assert otype in (ANNIHILATION, CREATION)
state = StateID(site=site, spin=spin, orbit=orbit)
self._otype = otype
self._state = state
# The tuple form of this instance
# It is a tuple: (otype, (site, spin, orbit)) and site itself is a
# tuple with length 1, 2, or 3.
self._tuple_form = (otype, state._tuple_form)
@property
def otype(self):
"""
The `otype` attribute.
"""
return self._otype
@property
def state(self):
"""
The `state` attribute.
"""
return self._state
@property
def coordinate(self):
"""
The `coordinate` attribute.
"""
return self._state.coordinate
@property
def site(self):
"""
The `site` attribute.
"""
return self._state.site
@property
def spin(self):
"""
The `spin` attribute.
"""
return self._state.spin
@property
def orbit(self):
"""
The `orbit` attribute.
"""
return self._state.orbit
def getIndex(self, indices_table):
"""
Return the index of this operator.
Parameters
----------
indices_table : IndexTable
A table that associate instances of AoC with integer indices.
Returns
-------
index : int
The index of this instance in the given table.
See also
--------
getStateIndex
"""
return indices_table(self)
def getStateIndex(self, indices_table):
"""
Return the index of the single-particle state on which this operator is
defined.
Notes:
This method is different from the `getIndex` method.
This method return the index of the `state` attribute of the
operator and the `getIndex` method return the index of the
operator itself.
Parameters
----------
indices_table : IndexTable
A table that associate instances of StateID with integer indices.
Returns
-------
index : int
The index of the `state` attribute in the given table.
"""
return indices_table(self._state)
def __repr__(self):
"""
Official string representation of the instance.
"""
otype = "CREATION" if self._otype == CREATION else "ANNIHILATION"
info = "AoC(otype={0}, site={1!r}, spin={2}, orbit={3})"
return info.format(otype, self.coordinate, self.spin, self.orbit)
__str__ = __repr__
def tolatex(self, **kwargs):
"""
Return the LaTex form of this instance.
Parameters
----------
kwargs :
All keyword arguments are passed to the `tolatex` method of the
`state` attribute.
See also: `StateID.tolatex`.
Returns
-------
latex : str
The LaTex form of this instance.
"""
subscript = self._state.tolatex(**kwargs).replace("$", "")
if self._otype == CREATION:
latex_form = r"$c_{{{0}}}^{{\dagger}}$".format(subscript)
else:
latex_form = r"$c_{{{0}}}$".format(subscript)
return latex_form
def show(self, **kwargs):
"""
Show the instance in handwriting form.
Parameters
----------
kwargs :
All keyword arguments are passed to the `tolatex` method of the
`state` attribute.
See also: `StateID.tolatex`.
"""
fig, ax = plt.subplots()
ax.text(
0.5, 0.5, self.tolatex(**kwargs), fontsize="xx-large",
ha="center", va="center", transform=ax.transAxes
)
ax.set_axis_off()
plt.show()
def __hash__(self):
"""
Calculate the hash code of the instance.
"""
return hash(self._tuple_form)
def __lt__(self, other):
"""
Implement the `<` operator between self and other.
The comparison logic is as follow:
Creation operator is always compare less than annihilation operator;
The smaller the single-particle state, the smaller the creation
operator; The larger the single-particle state, the smaller the
annihilation operator.
See also
--------
StateID.__lt__
StateID.__gt__
"""
if isinstance(other, self.__class__):
otype0 = self._otype
otype1 = other._otype
state0 = self._state
state1 = other._state
if otype0 == CREATION and otype1 == CREATION:
return state0 < state1
elif otype0 == CREATION and otype1 == ANNIHILATION:
return True
elif otype0 == ANNIHILATION and otype1 == CREATION:
return False
else:
return state0 > state1
else:
return NotImplemented
def __gt__(self, other):
"""
Implement the `>` operator between self and other.
See also
--------
__lt__
"""
if isinstance(other, self.__class__):
otype0 = self._otype
otype1 = other._otype
state0 = self._state
state1 = other._state
if otype0 == CREATION and otype1 == CREATION:
return state0 > state1
elif otype0 == CREATION and otype1 == ANNIHILATION:
return False
elif otype0 == ANNIHILATION and otype1 == CREATION:
return True
else:
return state0 < state1
else:
return NotImplemented
def __eq__(self, other):
"""
Implement the `==` operator between self and other.
"""
if isinstance(other, self.__class__):
return self._tuple_form == other._tuple_form
else:
return NotImplemented
def __ne__(self, other):
"""
Implement the `!=` operator between self and other.
"""
if isinstance(other, self.__class__):
return self._tuple_form != other._tuple_form
else:
return NotImplemented
def __le__(self, other):
"""
Implement the `<=` operator between self and other.
See also
--------
__lt__, __eq__
"""
if isinstance(other, self.__class__):
return self.__lt__(other) or self.__eq__(other)
else:
return NotImplemented
def __ge__(self, other):
"""
Implement the `>=` operator between self and other.
See also
--------
__lt__, __gt__, __eq__
"""
if isinstance(other, self.__class__):
return self.__gt__(other) or self.__eq__(other)
else:
return NotImplemented
def __mul__(self, other):
"""
Implement the binary arithmetic operation: `*`.
`self` is the left operand and `other` is the right operand;
Return an instance of ParticleTerm.
"""
if isinstance(other, self.__class__):
return ParticleTerm((self, other), coeff=1.0)
elif isinstance(other, NUMERIC_TYPES_GENERAL):
return ParticleTerm((self,), coeff=other)
else:
return NotImplemented
def __rmul__(self, other):
"""
Implement the binary arithmetic operation: `*`.
`self` is the right operand and `other` is the left operand;
Return an instance of ParticleTerm.
"""
if isinstance(other, NUMERIC_TYPES_GENERAL):
return ParticleTerm((self,), coeff=other)
else:
return NotImplemented
def dagger(self):
"""
Return the Hermitian conjugate of this operator.
"""
otype = ANNIHILATION if self._otype == CREATION else CREATION
return self.derive(otype=otype)
def conjugate_of(self, other):
"""
Determine whether `self` is the Hermitian conjugate of `other`.
"""
if isinstance(other, self.__class__):
return self._otype != other._otype and self._state == other._state
else:
raise TypeError(
"The `other` parameter is not instance of this class!"
)
def same_state(self, other):
"""
Determine whether `self` and `other` is defined on the same
single-particle state.
"""
if isinstance(other, self.__class__):
return self._state == other._state
else:
raise TypeError(
"The `other` parameter is not instance of this class!"
)
def derive(self, *, otype=None, site=None, spin=None, orbit=None):
"""
Derive a new instance from `self` and the given parameters.
This method creates a new instance with the same attribute as `self`
except for these given to this method.
All the parameters should be specified as keyword arguments.
Returns
-------
res : A new instance of AoC.
"""
if otype is None:
otype = self.otype
if site is None:
site = self.coordinate
if spin is None:
spin = self.spin
if orbit is None:
orbit = self.orbit
return self.__class__(otype=otype, site=site, spin=spin, orbit=orbit)
def matrix_repr(
self, state_indices_table, right_bases, *,
left_bases=None, to_csr=True
):
"""
Return the matrix representation of this operator in the Hilbert space.
Parameters
----------
state_indices_table : IndexTable
A table that associate instances of StateID with integer indices.
right_bases : 1D np.ndarray
The bases of the Hilbert space before the operation.
The data-type of the array's elements is np.uint64.
left_bases : 1D np.ndarray, optional, keyword-only
The bases of the Hilbert space after the operation.
If given, the data-type of the array's elements is np.uint64.
If not given or None, left_bases is the same as right_bases.
Default: None.
to_csr : bool, optional, keyword-only
Whether to construct a csr_matrix as the result.
Default: True.
Returns
-------
res : csr_matrix or tuple
The matrix representation of the operator in the Hilbert space.
If `to_csr` is set to True, the result is a csr_matrix;
If set to False, the result is a tuple: (entries, (rows, cols)),
where `entries` is the non-zero matrix elements, `rows` and
`cols` are the row and column indices of the none-zero elements.
"""
term = [(state_indices_table(self._state), self._otype)]
return matrix_function(
term, right_bases, left_bases=left_bases, to_csr=to_csr
)
class NumberOperator:
"""
A unified description of the particle-number operator.
Attributes
----------
state : StateID
The single-particle state on which this operator is defined.
coordinate : tuple
The coordinates of the localized single-particle state in tuple form.
site : 1D np.ndarray
The coordinates of the localized single-particle state in np.ndarray
form.
spin : int
The spin index of the single-particle state.
orbit : int
The orbit index of the single-particle state.
Examples
--------
>>> from HamiltonianPy.quantumoperator import NumberOperator
>>> N0 = NumberOperator(site=[0, 0], spin=0)
>>> N1 = NumberOperator(site=(0.3, 0.75), spin=1)
>>> N0
NumberOperator(site=(0, 0), spin=0, orbit=0)
>>> N1
NumberOperator(site=(0.3, 0.75), spin=1, orbit=0)
>>> N0.tolatex()
'$n_{(0,0),\\\\downarrow}$'
>>> N1.tolatex()
'$n_{(0.3,0.75),\\\\uparrow}$'
>>> N0 < N1
True
>>> N0.dagger() is N0
True
>>> print(N0.toterm())
The coefficient of this term: 1.0
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(0, 0), spin=0, orbit=0)
>>> print(N0 * N1 * 1.5)
The coefficient of this term: 1.5
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=CREATION, site=(0.3, 0.75), spin=1, orbit=0)
AoC(otype=ANNIHILATION, site=(0.3, 0.75), spin=1, orbit=0)
"""
def __init__(self, site, spin=0, orbit=0):
"""
Customize the newly created instance.
Parameters
----------
site : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
The `site` parameter should be 1D array with length 1, 2 or 3.
spin : int, optional
The spin index of the single-particle state.
Default: 0.
orbit : int, optional
The orbit index of the single-particle state.
Default: 0.
"""
state = StateID(site=site, spin=spin, orbit=orbit)
self._state = state
# The tuple form of this instance
# It is a tuple: ("N", (site, spin, orbit)) and site itself is a
# tuple with length 1, 2 or 3.
self._tuple_form = ("N", state._tuple_form)
@property
def state(self):
"""
The `state` attribute.
"""
return self._state
@property
def coordinate(self):
"""
The `coordinate` attribute.
"""
return self._state.coordinate
@property
def site(self):
"""
The `site` attribute.
"""
return self._state.site
@property
def spin(self):
"""
The `spin` attribute.
"""
return self._state.spin
@property
def orbit(self):
"""
The `orbit` attribute.
"""
return self._state.orbit
def getIndex(self, indices_table):
"""
Return the index of this operator.
Parameters
----------
indices_table : IndexTable
A table that associate instances of NumberOperator with integer
indices.
Returns
-------
index : int
The index of this instance in the given table.
See also
--------
getStateIndex
"""
return indices_table(self)
def getStateIndex(self, indices_table):
"""
Return the index of the single-particle state on which this operator
is defined.
Notes:
This method is different from the `getIndex` method.
This method return the index of the `state` attribute of the
operator and the `getIndex` method return the index of the
operator itself.
Parameters
----------
indices_table : IndexTable
A table that associate instances of StateID with integer indices.
Returns
-------
index : int
The index of the `state` attribute in the given table.
"""
return indices_table(self._state)
def __repr__(self):
"""
Official string representation of the instance.
"""
info = "NumberOperator(site={0!r}, spin={1}, orbit={2})"
return info.format(self.coordinate, self.spin, self.orbit)
__str__ = __repr__
def tolatex(self, **kwargs):
"""
Return the LaTex form of this instance.
Parameters
----------
kwargs :
All keyword arguments are passed to the `tolatex` method of the
`state` attribute.
See also: `StateID.tolatex`.
Returns
-------
latex : str
The LaTex form of this instance.
"""
subscript = self._state.tolatex(**kwargs).replace("$", "")
return r"$n_{{{0}}}$".format(subscript)
def show(self, **kwargs):
"""
Show the instance in handwriting form.
Parameters
----------
kwargs :
All keyword arguments are passed to the `tolatex` method of the
`state` attribute.
See also: `StateID.tolatex`.
"""
fig, ax = plt.subplots()
ax.text(
0.5, 0.5, self.tolatex(**kwargs), fontsize="xx-large",
ha="center", va="center", transform=ax.transAxes
)
ax.set_axis_off()
plt.show()
def __hash__(self):
"""
Calculate the hash code of the instance.
"""
return hash(self._tuple_form)
def __lt__(self, other):
"""
Implement the `<` operator between self and other.
"""
if isinstance(other, self.__class__):
return self._tuple_form < other._tuple_form
else:
return NotImplemented
def __gt__(self, other):
"""
Implement the `>` operator between self and other.
"""
if isinstance(other, self.__class__):
return self._tuple_form > other._tuple_form
else:
return NotImplemented
def __eq__(self, other):
"""
Implement the `==` operator between self and other.
"""
if isinstance(other, self.__class__):
return self._tuple_form == other._tuple_form
else:
return NotImplemented
def __ne__(self, other):
"""
Implement the `!=` operator between self and other.
"""
if isinstance(other, self.__class__):
return self._tuple_form != other._tuple_form
else:
return NotImplemented
def __le__(self, other):
"""
Implement the `<=` operator between self and other.
"""
if isinstance(other, self.__class__):
return self._tuple_form <= other._tuple_form
else:
return NotImplemented
def __ge__(self, other):
"""
Implement the `>=` operator between self and other.
"""
if isinstance(other, self.__class__):
return self._tuple_form >= other._tuple_form
else:
return NotImplemented
def toterm(self):
"""
Convert this operator to ParticleTerm instance.
Returns
-------
term : ParticleTerm
The term corresponding to this operator.
"""
spin = self.spin
orbit = self.orbit
site = self.coordinate
c = AoC(CREATION, site=site, spin=spin, orbit=orbit)
a = AoC(ANNIHILATION, site=site, spin=spin, orbit=orbit)
return ParticleTerm((c, a), coeff=1.0)
def __mul__(self, other):
"""
Implement the binary arithmetic operation: `*`.
`self` is the left operand and `other` is the right operand;
Return an instance of ParticleTerm.
"""
return self.toterm() * other
def __rmul__(self, other):
"""
Implement the binary arithmetic operation: `*`.
`self` is the right operand and `other` is the left operand;
Return an instance of ParticleTerm.
"""
return other * self.toterm()
def dagger(self):
"""
Return the Hermitian conjugate of this operator.
"""
return self
def derive(self, *, site=None, spin=None, orbit=None):
"""
Derive a new instance from `self` and the given parameters.
This method creates a new instance with the same attribute as `self`
except for these given to this method.
All the parameters should be specified as keyword arguments.
Returns
-------
res : A new instance of NumberOperator.
"""
if site is None:
site = self.coordinate
if spin is None:
spin = self.spin
if orbit is None:
orbit = self.orbit
return self.__class__(site=site, spin=spin, orbit=orbit)
def matrix_repr(
self, state_indices_table, bases, *, to_csr=True
):
"""
Return the matrix representation of this operator in the Hilbert space.
Parameters
----------
state_indices_table : IndexTable
A table that associate instances of StateID with integer indices.
bases : 1D np.ndarray
The bases of the Hilbert space.
The data-type of the array's elements is np.uint64.
to_csr : bool, optional, keyword-only
Whether to construct a csr_matrix as the result.
Default: True.
Returns
-------
res : csr_matrix or tuple
The matrix representation of the operator in the Hilbert space.
If `to_csr` is set to True, the result is a csr_matrix;
If set to False, the result is a tuple: (entries, (rows, cols)),
where `entries` is the non-zero matrix elements, `rows` and
`cols` are the row and column indices of the none-zero elements.
"""
index = state_indices_table(self._state)
term = [(index, CREATION), (index, ANNIHILATION)]
return matrix_function(term, bases, to_csr=to_csr, special_tag="number")
class SwapFermionError(Exception):
"""
Raised when swap creation and annihilation operators defined on the same
single-particle state.
"""
def __init__(self, aoc0, aoc1):
self.aoc0 = aoc0
self.aoc1 = aoc1
def __str__(self):
return "\n".join(
[
"Swapping the following two operators would generate extra "
"identity operator which can not be processed properly:",
" {0!r}".format(self.aoc0),
" {0!r}".format(self.aoc1),
]
)
class ParticleTerm:
"""
A unified description of any term composed of creation and/or
annihilation operators.
Attributes
----------
coeff : float, int or complex
The coefficient of this term.
components : tuple
The component creation and/or annihilation operators of this term.
classification : {"general", "hopping", "number" or "Coulomb"}
The classification of the term.
Examples
--------
>>> from HamiltonianPy.quantumoperator import AoC, ParticleTerm
>>> c = AoC(otype=1, site=[0, 0], spin=0)
>>> a = AoC(otype=0, site=(0.3, 0.75), spin=1)
>>> term = ParticleTerm((c, a), coeff=1.2)
>>> print(term)
The coefficient of this term: 1.2
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(0.3, 0.75), spin=1, orbit=0)
>>> print(2.0 * term)
The coefficient of this term: 2.4
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(0.3, 0.75), spin=1, orbit=0)
"""
def __init__(self, aocs, coeff=1.0, *, classification="general"):
"""
Customize the newly created instance.
Parameters
----------
aocs : tuple or list
A collection of creation and/or annihilation operators that
composing this term.
coeff : float, int or complex, optional
The coefficient of this term.
Default: 1.0.
classification : str, optional, keyword-only
A tag that identify the classification of the instance.
Supported values: "general", "hopping", "number" and "Coulomb".
If you are not sure about this parameter, just use the default
value.
"general" means that the instance is just a term composed of
creation and/or annihilation operators;
"hopping" means that the instance is a hopping term:
'$c_i^{\\dagger} c_j$' and `i != j`(Note: the `i == j` case does
not belong to this category);
"number" means that the instance is a particle-number(chemical
potential) term: 'c_i^{\\dagger} c_i';
"Coulomb" means that the instance is a Coulomb interaction term:
'n_i n_j'.
The "hopping", "number" and "Coulomb" categories can also be
classified as "general".
Currently, this class does not check whether the given `aocs` is
compatible with the `classification` parameter. The user is
responsible for the compatibility of these two parameters. If
these two parameters are incompatible, the corresponding
instance would behave incorrectly, so use this parameter with
caution.
"""
assert isinstance(coeff, NUMERIC_TYPES_GENERAL), "Invalid coefficient"
assert classification in ("general", "hopping", "number", "Coulomb")
self._aocs = tuple(aocs)
self._coeff = coeff
self._classification = classification
@property
def coeff(self):
"""
The coefficient of this term.
"""
return self._coeff
@coeff.setter
def coeff(self, coeff):
assert isinstance(coeff, NUMERIC_TYPES_GENERAL), "Invalid coefficient"
self._coeff = coeff
@property
def components(self):
"""
The component creation and/or annihilation operators of this term.
"""
return self._aocs
@property
def classification(self):
"""
The `classification` attribute.
"""
return self._classification
def __str__(self):
"""
Return a string that describes the content of this instance.
"""
return "\n".join(
[
"The coefficient of this term: {0}".format(self._coeff),
"The component operators:",
*[" {0}".format(aoc) for aoc in self._aocs],
]
)
def check_compatibility(self):
"""
Check whether this term is compatible with the `classification`
attribute.
Returns
-------
res : bool
"""
aoc_num = len(self._aocs)
otypes = [aoc.otype for aoc in self._aocs]
states = [aoc.state for aoc in self._aocs]
if self._classification == "hopping":
return (
aoc_num == 2 and
otypes[0] == CREATION and otypes[1] == ANNIHILATION and
states[0] != states[1]
)
elif self._classification == "number":
return (
aoc_num == 2 and
otypes[0] == CREATION and otypes[1] == ANNIHILATION and
states[0] == states[1]
)
elif self._classification == "Coulomb":
return (
aoc_num == 4 and
otypes[0] == CREATION and otypes[1] == ANNIHILATION and
otypes[2] == CREATION and otypes[3] == ANNIHILATION and
states[0] == states[1] and states[2] == states[3]
)
else:
return True
def tolatex(self, indices_table=None, **kwargs):
"""
Return the LaTex form of this term.
Parameters
----------
indices_table : IndexTable or None, optional
A table that associate instances of SiteID with integer indices.
The `indices_table` is passed to the `tolatex` method of
`StateID` as the `site_index` argument.
If not given or None, the `site` is show as it is.
Default : None.
kwargs :
All other keyword arguments are passed to the `tolatex` method of
`StateID`.
See also: `StateID.tolatex`.
Returns
-------
latex : str
The LaTex form of this term.
"""
latex_aocs = [
aoc.tolatex(site_index=indices_table, **kwargs).replace("$", "")
for aoc in self._aocs
]
return "".join(["$", str(self._coeff), *latex_aocs, "$"])
def show(self, indices_table=None, **kwargs):
"""
Show the term in handwriting form.
Parameters
----------
indices_table : IndexTable or None, optional
A table that associate instances of SiteID with integer indices.
The `indices_table` is passed to the `tolatex` method of
`StateID` as the `site_index` argument.
If not given or None, the `site` is show as it is.
Default : None.
kwargs :
All other keyword arguments are passed to the `tolatex` method of
`StateID`.
See also: `StateID.tolatex`.
"""
fig, ax = plt.subplots()
ax.text(
0.5, 0.5, self.tolatex(indices_table, **kwargs),
fontsize="xx-large", ha="center",
va="center", transform=ax.transAxes
)
ax.set_axis_off()
plt.show()
def __mul__(self, other):
"""
Implement the binary arithmetic operation: `*`.
`self` is the left operand and `other` is the right operand;
Return a new instance of this class.
"""
if isinstance(other, self.__class__):
aocs = self._aocs + other._aocs
coeff = self._coeff * other._coeff
elif isinstance(other, AoC):
aocs = self._aocs + (other, )
coeff = self._coeff
elif isinstance(other, NUMERIC_TYPES_GENERAL):
aocs = self._aocs
coeff = self._coeff * other
else:
return NotImplemented
return self.__class__(aocs=aocs, coeff=coeff)
def __rmul__(self, other):
"""
Implement the binary arithmetic operation: `*`.
`self` is the right operand and `other` is the left operand;
This method return a new instance of this class.
"""
if isinstance(other, AoC):
aocs = (other, ) + self._aocs
coeff = self._coeff
elif isinstance(other, NUMERIC_TYPES_GENERAL):
aocs = self._aocs
coeff = other * self._coeff
else:
return NotImplemented
return self.__class__(aocs=aocs, coeff=coeff)
@staticmethod
def normalize(aocs):
"""
Reordering the given `aocs` into norm form.
For a composite operator consisting of creation and/or annihilation
operators, the norm form means that all the creation operators appear
to the left of all the annihilation operators. Also, the creation and
annihilation operators are sorted in ascending and descending order
respectively according to the single-particle state associated with
the operator.
See the document of `__lt__` method of AoC for the comparison logic.
Parameters
----------
aocs : list or tuple
A collection of creation and/or annihilation operators.
Returns
-------
aocs : list
The norm form of the operator.
swap_count : int
The number of swap to obtain the normal form.
Raises
------
SwapFermionError :
Exceptions raised when swap creation and annihilation operator
that was defined on the same single-particle state.
"""
aocs = list(aocs)
length = len(aocs)
swap_count = 0
for remaining_length in range(length, 1, -1):
for i in range(0, remaining_length - 1):
aoc0, aoc1 = aocs[i:i+2]
id0 = aoc0.state
id1 = aoc1.state
if aoc0 > aoc1:
if id0 != id1:
aocs[i] = aoc1
aocs[i + 1] = aoc0
swap_count += 1
else:
raise SwapFermionError(aoc0, aoc1)
return aocs, swap_count
def dagger(self):
"""
Return the Hermitian conjugate of this term.
"""
aocs = [aoc.dagger() for aoc in self._aocs[::-1]]
return self.__class__(aocs=aocs, coeff=self._coeff.conjugate())
def matrix_repr(
self, state_indices_table, right_bases, *,
left_bases=None, coeff=None, to_csr=True
):
"""
Return the matrix representation of this term.
Parameters
----------
state_indices_table : IndexTable
A table that associate instances of StateID with integer indices.
right_bases : 1D np.ndarray
The bases of the Hilbert space before the operation.
The data-type of the array's elements is np.uint64.
left_bases : 1D np.ndarray, optional, keyword-only
The bases of the Hilbert space after the operation.
If given, the data-type of the array's elements is np.uint64.
If not given or None, left_bases is the same as right_bases.
Default: None.
coeff : int, float or complex, optional, keyword-only
A new coefficient for this term.
If not given or None, use the original coefficient.
Default: None.
to_csr : bool, optional, keyword-only
Whether to construct a csr_matrix as the result.
Default: True.
Returns
-------
res : csr_matrix or tuple
The matrix representation of the operator in the Hilbert space.
If `to_csr` is set to True, the result is a csr_matrix;
If set to False, the result is a tuple: (entries, (rows, cols)),
where `entries` is the non-zero matrix elements, `rows` and
`cols` are the row and column indices of the none-zero elements.
"""
if coeff is not None:
self.coeff = coeff
term = [
(aoc.getStateIndex(state_indices_table), aoc.otype)
for aoc in self._aocs
]
return matrix_function(
term, right_bases,
left_bases=left_bases, coeff=self._coeff,
to_csr=to_csr, special_tag=self._classification
)
|
gpl-3.0
|
nilmtk/nilmtk
|
nilmtk/dataset_converters/refit/convert_refit.py
|
1
|
4996
|
'''
REFIT dataset converter for the clean version avaiable at the URLs below:
"REFIT: Electrical Load Measurements (Cleaned)"
https://pureportal.strath.ac.uk/en/datasets/refit-electrical-load-measurements-cleaned
https://pureportal.strath.ac.uk/files/52873459/Processed_Data_CSV.7z
https://pureportal.strath.ac.uk/files/62090184/CLEAN_REFIT_081116.7z
The original version of the dataset include duplicated timestamps.
Check the dataset website for more information.
For citation of the dataset, use:
http://dx.doi.org/10.1038/sdata.2016.122
https://doi.org/10.15129/9ab14b0e-19ac-4279-938f-27f643078cec
'''
import pandas as pd
import numpy as np
from copy import deepcopy
from os.path import join, isdir, isfile, exists
from os import listdir
import fnmatch
import re
from sys import stdout
from nilmtk.utils import get_datastore
from nilmtk.datastore import Key
from nilmtk.timeframe import TimeFrame
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.utils import get_module_directory, check_directory_exists
from nilm_metadata import convert_yaml_to_hdf5, save_yaml_to_datastore
def convert_refit(input_path, output_filename, format='HDF'):
"""
Parameters
----------
input_path : str
The root path of the CSV files, e.g. House1.csv
output_filename : str
The destination filename (including path and suffix).
format : str
format of output. Either 'HDF' or 'CSV'. Defaults to 'HDF'
"""
# Open DataStore
store = get_datastore(output_filename, format, mode='w')
# Convert raw data to DataStore
_convert(input_path, store, 'Europe/London')
# Add metadata
save_yaml_to_datastore(join(get_module_directory(),
'dataset_converters',
'refit',
'metadata'),
store)
store.close()
print("Done converting REFIT to HDF5!")
def _convert(input_path, store, tz, sort_index=True):
"""
Parameters
----------
input_path : str
The root path of the REFIT dataset.
store : DataStore
The NILMTK DataStore object.
measurement_mapping_func : function
Must take these parameters:
- house_id
- chan_id
Function should return a list of tuples e.g. [('power', 'active')]
tz : str
Timezone e.g. 'US/Eastern'
sort_index : bool
"""
check_directory_exists(input_path)
# Iterate though all houses and channels
# house 14 is missing!
houses = [1,2,3,4,5,6,7,8,9,10,11,12,13,15,16,17,18,19,20,21]
nilmtk_house_id = 0
prefix = ''
suffix = '_'
version_checked = False
for house_id in houses:
nilmtk_house_id += 1
print("Loading house", house_id, end="... ")
stdout.flush()
csv_filename = join(input_path, prefix + 'House' + suffix + str(house_id) + '.csv')
if not version_checked:
version_checked = True
if exists(csv_filename):
print('Using original filenames (House_XX.csv)')
else:
prefix = 'CLEAN_'
suffix = ''
csv_filename = join(input_path, prefix + 'House' + suffix + str(house_id) + '.csv')
print('Using CLEAN filenames (CLEAN_HouseXX.csv)')
if not exists(csv_filename):
raise RuntimeError('Could not find REFIT files. Please check the provided folder.')
# The clean version already includes header, so we
# just skip the text version of the timestamp
usecols = ['Unix','Aggregate','Appliance1','Appliance2','Appliance3','Appliance4','Appliance5','Appliance6','Appliance7','Appliance8','Appliance9']
df = _load_csv(csv_filename, usecols, tz)
if sort_index:
df = df.sort_index() # might not be sorted...
chan_id = 0
for col in df.columns:
chan_id += 1
print(chan_id, end=" ")
stdout.flush()
key = Key(building=nilmtk_house_id, meter=chan_id)
chan_df = pd.DataFrame(df[col])
chan_df.columns = pd.MultiIndex.from_tuples([('power', 'active')])
# Modify the column labels to reflect the power measurements recorded.
chan_df.columns.set_names(LEVEL_NAMES, inplace=True)
store.put(str(key), chan_df)
print('')
def _load_csv(filename, usecols, tz):
"""
Parameters
----------
filename : str
usecols : list of columns to keep
tz : str e.g. 'US/Eastern'
Returns
-------
dataframe
"""
# Load data
df = pd.read_csv(filename, usecols=usecols)
# Convert the integer index column to timezone-aware datetime
df['Unix'] = pd.to_datetime(df['Unix'], unit='s', utc=True)
df.set_index('Unix', inplace=True)
df = df.tz_convert(tz)
return df
|
apache-2.0
|
michaelneuder/image_quality_analysis
|
bin/nets/old/SSIM_conv_net.py
|
1
|
7670
|
#!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
np.set_printoptions(threshold=np.nan)
import tensorflow as tf
import time
import progressbar
import pandas as pd
def convolve_inner_layers(x, W, b):
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='SAME')
y = tf.nn.bias_add(y, b)
return tf.nn.tanh(y)
def convolve_ouput_layer(x, W, b):
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='SAME')
y = tf.nn.bias_add(y, b)
return y
def conv_net(x, W, b):
conv1 = convolve_inner_layers(x, W['weights1'], b['bias1'])
conv2 = convolve_inner_layers(conv1, W['weights2'], b['bias2'])
conv3 = convolve_inner_layers(conv2, W['weights3'], b['bias3'])
output = convolve_ouput_layer(conv3, W['weights_out'], b['bias_out'])
return output
def get_variance(training_target):
all_pixels = training_target.flatten()
return all_pixels.var()
def get_epoch(x, y, n):
input_size = x.shape[0]
number_batches = input_size // n
extra_examples = input_size % n
batches = {}
batch_indices = np.arange(input_size)
np.random.shuffle(batch_indices)
for i in range(number_batches):
temp_indices = batch_indices[n*i:n*(i+1)]
temp_x = []
temp_y = []
for j in temp_indices:
temp_x.append(x[j])
temp_y.append(y[j])
batches[i] = [np.asarray(temp_x), np.asarray(temp_y)]
if extra_examples != 0:
extra_indices = batch_indices[input_size-extra_examples:input_size]
temp_x = []
temp_y = []
for k in extra_indices:
temp_x.append(x[k])
temp_y.append(y[k])
batches[i+1] = [np.asarray(temp_x), np.asarray(temp_y)]
return batches
def normalize_input(data):
data = np.asarray(data)
mean, std_dev = data.mean(), data.std()
return (data - mean) / std_dev
def main():
# parameters
filter_dim = 11
filter_dim2 = 1
batch_size = 4
image_dim = 96
input_layer = 2
first_layer = 50
second_layer = 25
third_layer = 10
output_layer = 1
initializer_scale = 1.0
learning_rate = .00001
epochs = 200
# seeding for debug purposes --- dont forget to remove
SEED = 12345
np.random.seed(SEED)
tf.set_random_seed(SEED)
print('loading image files ... ')
# train/test images
orig_500 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/orig_500.txt', header=None, delim_whitespace = True)
recon_500 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/recon_500.txt', header=None, delim_whitespace = True)
SSIM_500 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/SSIM_500.txt', header=None, delim_whitespace = True)
orig_140 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/orig_140.txt', header=None, delim_whitespace = True)
recon_140 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/recon_140.txt', header=None, delim_whitespace = True)
SSIM_140 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/SSIM_140.txt', header=None, delim_whitespace = True)
# normaliztion
original_images_train = normalize_input(orig_500.values)
reconstructed_images_train = normalize_input(recon_500.values)
comparison_images_train = SSIM_500.values
original_images_test = normalize_input(orig_140.values)
reconstructed_images_test = normalize_input(recon_140.values)
comparison_images_test = SSIM_140.values
# get size of training and testing set
train_size = original_images_train.shape[0]
test_size = original_images_test.shape[0]
# reshaping the result data to --- (num pics), 96, 96, 1
target_data_train = np.reshape(comparison_images_train, [train_size, image_dim, image_dim, 1])
target_data_test = np.reshape(comparison_images_test, [test_size, image_dim, image_dim, 1])
# zipping data
train_data = np.reshape(np.dstack((original_images_train, reconstructed_images_train)), [train_size,image_dim,image_dim,2])
test_data = np.reshape(np.dstack((original_images_test, reconstructed_images_test)), [test_size,image_dim,image_dim,2])
# initializing variables --- fan in
weights = {
'weights1': tf.Variable(tf.random_normal([filter_dim,filter_dim,input_layer,first_layer],stddev=(1.0/(initializer_scale*filter_dim*filter_dim*input_layer)))),
'weights2': tf.Variable(tf.random_normal([filter_dim2,filter_dim2,first_layer,second_layer],stddev=(1.0/(initializer_scale*filter_dim2*filter_dim2*first_layer)))),
'weights3': tf.Variable(tf.random_normal([filter_dim2,filter_dim2,second_layer,third_layer],stddev=(1.0/(initializer_scale*filter_dim2*filter_dim2*second_layer)))),
'weights_out': tf.Variable(tf.random_normal([filter_dim2,filter_dim2,third_layer,output_layer],stddev=(1.0/(initializer_scale*filter_dim2*filter_dim2*third_layer))))
}
biases = {
'bias1': tf.Variable(tf.random_normal([first_layer],stddev=(1.0/(initializer_scale*filter_dim*filter_dim*input_layer)))),
'bias2': tf.Variable(tf.random_normal([second_layer],stddev=(1.0/(initializer_scale*filter_dim2*filter_dim2*first_layer)))),
'bias3': tf.Variable(tf.random_normal([third_layer],stddev=(1.0/(initializer_scale*filter_dim2*filter_dim2*second_layer)))),
'bias_out': tf.Variable(tf.random_normal([output_layer],stddev=(1.0/(initializer_scale*filter_dim2*filter_dim2*third_layer))))
}
# tf Graph input
x = tf.placeholder(tf.float32, [None, image_dim, image_dim, 2])
y = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1])
# model
prediction = conv_net(x, weights, biases)
# get variance to normalize error terms during training
variance = get_variance(target_data_train)
# loss and optimization
cost = tf.reduce_mean(tf.square(tf.subtract(prediction, y)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# session
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
epoch_count = 0
global_step = 0
start_time = time.time()
print("starting training ... ")
for j in range(epochs):
print('---------------------------------------------------------')
print('beginning epoch {} ...'.format(epoch_count))
epoch = get_epoch(train_data, target_data_train, batch_size)
for i in range(len(epoch)):
x_data_train, y_data_train = np.asarray(epoch[i][0]), np.asarray(epoch[i][1])
sess.run(optimizer, feed_dict={x : x_data_train, y : y_data_train})
loss = sess.run(cost, feed_dict={x : x_data_train, y : y_data_train})
percent_error = 100*loss/variance
print(" - training global_step {0:4d} error: {1:8.4f} {2:8.2f}%".format(global_step, loss, percent_error))
global_step += 1
epoch_count+=1
print('optimization finished!')
print('\nstarting testing...')
score = sess.run(cost, feed_dict={x: test_data, y: target_data_test})
percent_error = 100*score/variance
pred = sess.run(prediction, feed_dict={x: test_data})
print('---- test score : {:.4f}, {:.4f}% ----'.format(score, percent_error))
if __name__ == '__main__':
main()
|
mit
|
shahankhatch/scikit-learn
|
sklearn/datasets/samples_generator.py
|
103
|
56423
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
|
bsd-3-clause
|
cajal/pipeline
|
python/pipeline/reso.py
|
1
|
80743
|
""" Schemas for resonant scanners."""
import datajoint as dj
from datajoint.jobs import key_hash
import matplotlib.pyplot as plt
import numpy as np
import scanreader
from . import experiment, injection, notify, shared
from .utils import galvo_corrections, signal, quality, mask_classification, performance
from .exceptions import PipelineException
schema = dj.schema('pipeline_reso', locals(), create_tables=False)
CURRENT_VERSION = 1
@schema
class Version(dj.Manual):
definition = """ # versions for the reso pipeline
-> shared.PipelineVersion
---
description = '' : varchar(256) # any notes on this version
date = CURRENT_TIMESTAMP : timestamp # automatic
"""
@schema
class ScanInfo(dj.Imported):
definition = """ # master table with general data about the scans
-> experiment.Scan
-> Version # reso version
---
nfields : tinyint # number of fields
nchannels : tinyint # number of channels
nframes : int # number of recorded frames
nframes_requested : int # number of requested frames (from header)
px_height : smallint # lines per frame
px_width : smallint # pixels per line
um_height : float # height in microns
um_width : float # width in microns
x : float # (um) center of scan in the motor coordinate system
y : float # (um) center of scan in the motor coordinate system
fps : float # (Hz) frames per second
zoom : decimal(5,2) # zoom factor
bidirectional : boolean # true = bidirectional scanning
usecs_per_line : float # microseconds per scan line
fill_fraction : float # raster scan temporal fill fraction (see scanimage)
valid_depth=false : boolean # whether depth has been manually check
"""
@property
def key_source(self):
rigs = [{'rig': '2P2'}, {'rig': '2P3'}, {'rig': '2P5'}, {'rig': '3P1'}, {'rig': 'M2P2'}]
reso_scans = experiment.Scan() & (experiment.Session() & rigs)
return reso_scans * (Version() & {'pipe_version': CURRENT_VERSION})
class Field(dj.Part):
definition = """ # field-specific information
-> ScanInfo
-> shared.Field
---
z : float # (um) absolute depth with respect to the surface of the cortex
delay_image : longblob # (ms) delay between the start of the scan and pixels in this field
"""
def make(self, key):
""" Read some scan parameters and compute FOV in microns."""
from decimal import Decimal
# Read the scan
print('Reading header...')
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Get attributes
tuple_ = key.copy() # in case key is reused somewhere else
tuple_['nfields'] = scan.num_fields
tuple_['nchannels'] = scan.num_channels
tuple_['nframes'] = scan.num_frames
tuple_['nframes_requested'] = scan.num_requested_frames
tuple_['px_height'] = scan.image_height
tuple_['px_width'] = scan.image_width
tuple_['x'] = scan.motor_position_at_zero[0]
tuple_['y'] = scan.motor_position_at_zero[1]
tuple_['fps'] = scan.fps
tuple_['zoom'] = Decimal(str(scan.zoom))
tuple_['bidirectional'] = scan.is_bidirectional
tuple_['usecs_per_line'] = scan.seconds_per_line * 1e6
tuple_['fill_fraction'] = scan.temporal_fill_fraction
tuple_['valid_depth'] = True
# Estimate height and width in microns using measured FOVs for similar setups
fov_rel = (experiment.FOV() * experiment.Session() * experiment.Scan() & key
& 'session_date>=fov_ts')
zooms = fov_rel.fetch('mag').astype(np.float32) # zooms measured in same setup
closest_zoom = zooms[np.argmin(np.abs(np.log(zooms / scan.zoom)))]
dims = (fov_rel & 'ABS(mag - {}) < 1e-4'.format(closest_zoom)).fetch1('height', 'width')
um_height, um_width = [float(um) * (closest_zoom / scan.zoom) for um in dims]
tuple_['um_height'] = um_height * scan._y_angle_scale_factor
tuple_['um_width'] = um_width * scan._x_angle_scale_factor
# Insert in ScanInfo
self.insert1(tuple_)
# Compute field depths with respect to surface
surf_z = (experiment.Scan() & key).fetch1('depth') # surface depth in motor coordinates
motor_zero = surf_z - scan.motor_position_at_zero[2]
if scan.is_slow_stack and not scan.is_slow_stack_with_fastZ: # using motor
# Correct for motor and fastZ pointing in different directions
initial_fastZ = scan.initial_secondary_z or 0
rel_field_depths = 2 * initial_fastZ - np.array(scan.field_depths)
else: # using fastZ
rel_field_depths = np.array(scan.field_depths)
field_depths = motor_zero + rel_field_depths
# Insert field information
for field_id, (field_z, field_offsets) in enumerate(zip(field_depths, scan.field_offsets)):
ScanInfo.Field().insert1({**key, 'field': field_id + 1, 'z': field_z,
'delay_image': field_offsets})
# Fill in CorrectionChannel if only one channel
if scan.num_channels == 1:
CorrectionChannel().fill(key)
# Fill SegmentationTask if scan in autosegment
if experiment.AutoProcessing() & key & {'autosegment': True}:
SegmentationTask().fill(key)
@property
def microns_per_pixel(self):
""" Returns an array with microns per pixel in height and width. """
um_height, px_height, um_width, px_width = self.fetch1('um_height', 'px_height',
'um_width', 'px_width')
return np.array([um_height / px_height, um_width / px_width])
@schema
class FieldAnnotation(dj.Manual):
definition = """ # Annotations for specific fields within one scan
-> ScanInfo.Field
-> shared.ExpressionConstruct
-> shared.Channel
---
-> [nullable] injection.InjectionSite
field_notes : varchar(256)
"""
@schema
class Quality(dj.Computed):
definition = """ # different quality metrics for a scan (before corrections)
-> ScanInfo
"""
@property
def key_source(self):
return ScanInfo() & {'pipe_version': CURRENT_VERSION}
class MeanIntensity(dj.Part):
definition = """ # mean intensity values across time
-> Quality
-> shared.Field
-> shared.Channel
---
intensities : longblob
"""
class SummaryFrames(dj.Part):
definition = """ # 16-part summary of the scan (mean of 16 blocks)
-> Quality
-> shared.Field
-> shared.Channel
---
summary : longblob # h x w x 16
"""
class Contrast(dj.Part):
definition = """ # difference between 99 and 1 percentile across time
-> Quality
-> shared.Field
-> shared.Channel
---
contrasts : longblob
"""
class QuantalSize(dj.Part):
definition = """ # quantal size in images
-> Quality
-> shared.Field
-> shared.Channel
---
min_intensity : int # min value in movie
max_intensity : int # max value in movie
quantal_size : float # variance slope, corresponds to quantal size
zero_level : int # level corresponding to zero (computed from variance dependence)
quantal_frame : longblob # average frame expressed in quanta
"""
class EpileptiformEvents(dj.Part):
definition = """ # compute frequency of epileptiform events
-> Quality
-> shared.Field
-> shared.Channel
---
frequency : float # (events / sec) frequency of epileptiform events
abn_indices : longblob # indices of epileptiform events (0-based)
peak_indices : longblob # indices of all local maxima peaks (0-based)
prominences : longblob # peak prominence for all peaks
widths : longblob # (secs) width at half prominence for all peaks
"""
def make(self, key):
# Read the scan
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Insert in Quality
self.insert1(key)
for field_id in range(scan.num_fields):
print('Computing quality metrics for field', field_id + 1)
for channel in range(scan.num_channels):
# Map: Compute quality metrics in parallel
results = performance.map_frames(performance.parallel_quality_metrics,
scan, field_id=field_id, channel=channel)
# Reduce
mean_intensities = np.zeros(scan.num_frames)
contrasts = np.zeros(scan.num_frames)
for frames, chunk_mis, chunk_contrasts, _ in results:
mean_intensities[frames] = chunk_mis
contrasts[frames] = chunk_contrasts
sorted_results = sorted(results, key=lambda res: res[0])
mean_groups = np.array_split([r[3] for r in sorted_results], 16) # 16 groups
frames = np.stack([np.mean(g, axis=0) for g in mean_groups if g.any()], axis=-1)
# Compute quantal size
middle_frame = int(np.floor(scan.num_frames / 2))
mini_scan = scan[field_id, :, :, channel, max(middle_frame - 2000, 0): middle_frame + 2000]
mini_scan = mini_scan.astype(np.float32)
results = quality.compute_quantal_size(mini_scan)
min_intensity, max_intensity, _, _, quantal_size, zero_level = results
quantal_frame = (np.mean(mini_scan, axis=-1) - zero_level) / quantal_size
# Compute abnormal event frequency
deviations = (mean_intensities - mean_intensities.mean()) / mean_intensities.mean()
peaks, prominences, widths = quality.find_peaks(deviations)
widths = [w / scan.fps for w in widths] # in seconds
abnormal = peaks[[p > 0.2 and w < 0.4 for p, w in zip(prominences, widths)]]
abnormal_freq = len(abnormal) / (scan.num_frames / scan.fps)
# Insert
field_key = {**key, 'field': field_id + 1, 'channel': channel + 1}
self.MeanIntensity().insert1({**field_key, 'intensities': mean_intensities})
self.Contrast().insert1({**field_key, 'contrasts': contrasts})
self.SummaryFrames().insert1({**field_key, 'summary': frames})
self.QuantalSize().insert1({**field_key, 'min_intensity': min_intensity,
'max_intensity': max_intensity,
'quantal_size': quantal_size,
'zero_level': zero_level,
'quantal_frame': quantal_frame})
self.EpileptiformEvents.insert1({**field_key, 'frequency': abnormal_freq,
'abn_indices': abnormal,
'peak_indices': peaks,
'prominences': prominences,
'widths': widths})
self.notify(field_key, frames, mean_intensities, contrasts)
@notify.ignore_exceptions
def notify(self, key, summary_frames, mean_intensities, contrasts):
# Send summary frames
import imageio
video_filename = '/tmp/' + key_hash(key) + '.gif'
percentile_99th = np.percentile(summary_frames, 99.5)
summary_frames = np.clip(summary_frames, None, percentile_99th)
summary_frames = signal.float2uint8(summary_frames).transpose([2, 0, 1])
imageio.mimsave(video_filename, summary_frames, duration=0.4)
msg = ('summary frames for {animal_id}-{session}-{scan_idx} field {field} '
'channel {channel}').format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=video_filename, file_title=msg)
# Send intensity and contrasts
fig, axes = plt.subplots(2, 1, figsize=(15, 8), sharex=True)
axes[0].set_title('Mean intensity', size='small')
axes[0].plot(mean_intensities)
axes[0].set_ylabel('Pixel intensities')
axes[1].set_title('Contrast (99 - 1 percentile)', size='small')
axes[1].plot(contrasts)
axes[1].set_xlabel('Frames')
axes[1].set_ylabel('Pixel intensities')
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = ('quality traces for {animal_id}-{session}-{scan_idx} field {field} '
'channel {channel}').format(**key)
slack_user.notify(file=img_filename, file_title=msg)
@schema
class CorrectionChannel(dj.Manual):
definition = """ # channel to use for raster and motion correction
-> experiment.Scan
-> shared.Field
---
-> shared.Channel
"""
def fill(self, key, channel=1):
for field_key in (ScanInfo.Field() & key).fetch(dj.key):
self.insert1({**field_key, 'channel': channel}, ignore_extra_fields=True,
skip_duplicates=True)
@schema
class RasterCorrection(dj.Computed):
definition = """ # raster correction for bidirectional resonant scans
-> ScanInfo # animal_id, session, scan_idx, version
-> CorrectionChannel # animal_id, session, scan_idx, field
---
raster_template : longblob # average frame from the middle of the movie
raster_phase : float # difference between expected and recorded scan angle
"""
@property
def key_source(self):
return ScanInfo * CorrectionChannel & {'pipe_version': CURRENT_VERSION}
def make(self, key):
from scipy.signal import tukey
# Read the scan
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename, dtype=np.float32)
# Select correction channel
channel = (CorrectionChannel() & key).fetch1('channel') - 1
field_id = key['field'] - 1
# Load some frames from the middle of the scan
middle_frame = int(np.floor(scan.num_frames / 2))
frames = slice(max(middle_frame - 1000, 0), middle_frame + 1000)
mini_scan = scan[field_id, :, :, channel, frames]
# Create results tuple
tuple_ = key.copy()
# Create template (average frame tapered to avoid edge artifacts)
taper = np.sqrt(np.outer(tukey(scan.image_height, 0.4),
tukey(scan.image_width, 0.4)))
anscombed = 2 * np.sqrt(mini_scan - mini_scan.min() + 3 / 8) # anscombe transform
template = np.mean(anscombed, axis=-1) * taper
tuple_['raster_template'] = template
# Compute raster correction parameters
if scan.is_bidirectional:
tuple_['raster_phase'] = galvo_corrections.compute_raster_phase(template,
scan.temporal_fill_fraction)
else:
tuple_['raster_phase'] = 0
# Insert
self.insert1(tuple_)
def get_correct_raster(self):
""" Returns a function to perform raster correction on the scan. """
raster_phase = self.fetch1('raster_phase')
fill_fraction = (ScanInfo() & self).fetch1('fill_fraction')
if abs(raster_phase) < 1e-7:
correct_raster = lambda scan: scan.astype(np.float32, copy=False)
else:
correct_raster = lambda scan: galvo_corrections.correct_raster(scan,
raster_phase, fill_fraction)
return correct_raster
@schema
class MotionCorrection(dj.Computed):
definition = """ # motion correction for galvo scans
-> RasterCorrection
---
motion_template : longblob # image used as alignment template
y_shifts : longblob # (pixels) y motion correction shifts
x_shifts : longblob # (pixels) x motion correction shifts
y_std : float # (pixels) standard deviation of y shifts
x_std : float # (pixels) standard deviation of x shifts
outlier_frames : longblob # mask with true for frames with outlier shifts (already corrected)
align_time=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def key_source(self):
return RasterCorrection() & {'pipe_version': CURRENT_VERSION}
def make(self, key):
"""Computes the motion shifts per frame needed to correct the scan."""
from scipy import ndimage
# Get some params
px_height, px_width, nframes, nfields, fps = (ScanInfo() & key).fetch1('px_height', 'px_width',
'nframes', 'nfields', 'fps')
channel = (CorrectionChannel() & key).fetch1('channel') - 1
field_id = key['field'] - 1
# Motion correction fails if FPS is too high
if fps < 100:
# Read the scan
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Load some frames from middle of scan to compute template
skip_rows = int(round(px_height * 0.10)) # we discard some rows/cols to avoid edge artifacts
skip_cols = int(round(px_width * 0.10))
middle_frame = int(np.floor(scan.num_frames / 2))
mini_scan = scan[field_id, skip_rows: -skip_rows, skip_cols: -skip_cols, channel,
max(middle_frame - 1000, 0): middle_frame + 1000]
mini_scan = mini_scan.astype(np.float32, copy=False)
# Correct mini scan
correct_raster = (RasterCorrection() & key).get_correct_raster()
mini_scan = correct_raster(mini_scan)
# Create template
mini_scan = 2 * np.sqrt(mini_scan - mini_scan.min() + 3 / 8) # *
template = np.mean(mini_scan, axis=-1)
template = ndimage.gaussian_filter(template, 0.7) # **
# * Anscombe tranform to normalize noise, increase contrast and decrease outliers' leverage
# ** Small amount of gaussian smoothing to get rid of high frequency noise
# Map: compute motion shifts in parallel
f = performance.parallel_motion_shifts # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (ScanInfo() & key).fetch1('fill_fraction')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction,
'template': template}
results = performance.map_frames(f, scan, field_id=field_id,
y=slice(skip_rows, -skip_rows),
x=slice(skip_cols, -skip_cols), channel=channel,
kwargs=kwargs)
# Reduce
y_shifts = np.zeros(scan.num_frames)
x_shifts = np.zeros(scan.num_frames)
for frames, chunk_y_shifts, chunk_x_shifts in results:
y_shifts[frames] = chunk_y_shifts
x_shifts[frames] = chunk_x_shifts
# Detect outliers
max_y_shift, max_x_shift = 20 / (ScanInfo() & key).microns_per_pixel
y_shifts, x_shifts, outliers = galvo_corrections.fix_outliers(y_shifts, x_shifts,
max_y_shift,
max_x_shift)
# Center shifts around zero
y_shifts -= np.median(y_shifts)
x_shifts -= np.median(x_shifts)
else:
# Fill with dummy values
print(f'FPS of {int(fps)} is greater than 100. Not correcting for motion.')
y_shifts = np.zeros(nframes, dtype=np.float32)
x_shifts = np.zeros(nframes, dtype=np.float32)
outliers = np.zeros(nframes, dtype=np.bool)
template = np.zeros((px_height, px_height), dtype=np.float32)
# Create results tuple
tuple_ = key.copy()
tuple_['field'] = field_id + 1
tuple_['motion_template'] = template
tuple_['y_shifts'] = y_shifts
tuple_['x_shifts'] = x_shifts
tuple_['outlier_frames'] = outliers
tuple_['y_std'] = np.std(y_shifts)
tuple_['x_std'] = np.std(x_shifts)
# Insert
self.insert1(tuple_)
# Notify after all fields have been processed
scan_key = {'animal_id': key['animal_id'], 'session': key['session'],
'scan_idx': key['scan_idx'], 'pipe_version': key['pipe_version']}
if len(MotionCorrection - CorrectionChannel & scan_key) > 0:
self.notify(scan_key, nframes, nfields)
@notify.ignore_exceptions
def notify(self, key, num_frames, num_fields):
fps = (ScanInfo() & key).fetch1('fps')
seconds = np.arange(num_frames) / fps
fig, axes = plt.subplots(num_fields, 1, figsize=(15, 4 * num_fields), sharey=True)
axes = [axes] if num_fields == 1 else axes # make list if single axis object
for i in range(num_fields):
y_shifts, x_shifts = (self & key & {'field': i + 1}).fetch1('y_shifts',
'x_shifts')
axes[i].set_title('Shifts for field {}'.format(i + 1))
axes[i].plot(seconds, y_shifts, label='y shifts')
axes[i].plot(seconds, x_shifts, label='x shifts')
axes[i].set_ylabel('Pixels')
axes[i].set_xlabel('Seconds')
axes[i].legend()
fig.tight_layout()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'motion shifts for {animal_id}-{session}-{scan_idx}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def save_video(self, filename='galvo_corrections.mp4', channel=1, start_index=0,
seconds=30, dpi=250):
""" Creates an animation video showing the original vs corrected scan.
:param string filename: Output filename (path + filename)
:param int channel: What channel from the scan to use. Starts at 1
:param int start_index: Where in the scan to start the video.
:param int seconds: How long in seconds should the animation run.
:param int dpi: Dots per inch, controls the quality of the video.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get fps and total_num_frames
fps = (ScanInfo() & self).fetch1('fps')
num_video_frames = int(round(fps * seconds))
stop_index = start_index + num_video_frames
# Load the scan
scan_filename = (experiment.Scan() & self).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename, dtype=np.float32)
scan_ = scan[self.fetch1('field') - 1, :, :, channel - 1, start_index: stop_index]
original_scan = scan_.copy()
# Correct the scan
correct_raster = (RasterCorrection() & self).get_correct_raster()
correct_motion = self.get_correct_motion()
corrected_scan = correct_motion(correct_raster(scan_), slice(start_index, stop_index))
# Create animation
import matplotlib.animation as animation
## Set the figure
fig, axes = plt.subplots(1, 2, sharex=True, sharey=True)
axes[0].set_title('Original')
im1 = axes[0].imshow(original_scan[:, :, 0], vmin=original_scan.min(),
vmax=original_scan.max()) # just a placeholder
fig.colorbar(im1, ax=axes[0])
axes[0].axis('off')
axes[1].set_title('Corrected')
im2 = axes[1].imshow(corrected_scan[:, :, 0], vmin=corrected_scan.min(),
vmax=corrected_scan.max()) # just a placeholder
fig.colorbar(im2, ax=axes[1])
axes[1].axis('off')
## Make the animation
def update_img(i):
im1.set_data(original_scan[:, :, i])
im2.set_data(corrected_scan[:, :, i])
video = animation.FuncAnimation(fig, update_img, corrected_scan.shape[2],
interval=1000 / fps)
# Save animation
if not filename.endswith('.mp4'):
filename += '.mp4'
print('Saving video at:', filename)
print('If this takes too long, stop it and call again with dpi <', dpi, '(default)')
video.save(filename, dpi=dpi)
return fig
def get_correct_motion(self):
""" Returns a function to perform motion correction on scans. """
x_shifts, y_shifts = self.fetch1('x_shifts', 'y_shifts')
return lambda scan, indices=slice(None): galvo_corrections.correct_motion(scan,
x_shifts[indices], y_shifts[indices])
@schema
class SummaryImages(dj.Computed):
definition = """ # summary images for each field and channel after corrections
-> MotionCorrection
-> shared.Channel
"""
@property
def key_source(self):
return MotionCorrection() & {'pipe_version': CURRENT_VERSION}
class Average(dj.Part):
definition = """ # mean of each pixel across time
-> master
---
average_image : longblob
"""
class Correlation(dj.Part):
definition = """ # average temporal correlation between each pixel and its eight neighbors
-> master
---
correlation_image : longblob
"""
class L6Norm(dj.Part):
definition = """ # l6-norm of each pixel across time
-> master
---
l6norm_image : longblob
"""
def make(self, key):
# Read the scan
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
for channel in range(scan.num_channels):
# Map: Compute some statistics in different chunks of the scan
f = performance.parallel_summary_images # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (ScanInfo() & key).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & key).fetch1('y_shifts', 'x_shifts')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction,
'y_shifts': y_shifts, 'x_shifts': x_shifts}
results = performance.map_frames(f, scan, field_id=key['field'] - 1,
channel=channel, kwargs=kwargs)
# Reduce: Compute average images
average_image = np.sum([r[0] for r in results], axis=0) / scan.num_frames
l6norm_image = np.sum([r[1] for r in results], axis=0) ** (1 / 6)
# Reduce: Compute correlation image
sum_x = np.sum([r[2] for r in results], axis=0) # h x w
sum_sqx = np.sum([r[3] for r in results], axis=0) # h x w
sum_xy = np.sum([r[4] for r in results], axis=0) # h x w x 8
denom_factor = np.sqrt(scan.num_frames * sum_sqx - sum_x ** 2)
corrs = np.zeros(sum_xy.shape)
for k in [0, 1, 2, 3]:
rotated_corrs = np.rot90(corrs, k=k)
rotated_sum_x = np.rot90(sum_x, k=k)
rotated_dfactor = np.rot90(denom_factor, k=k)
rotated_sum_xy = np.rot90(sum_xy, k=k)
# Compute correlation
rotated_corrs[1:, :, k] = (scan.num_frames * rotated_sum_xy[1:, :, k] -
rotated_sum_x[1:] * rotated_sum_x[:-1]) / \
(rotated_dfactor[1:] * rotated_dfactor[:-1])
rotated_corrs[1:, 1:, 4 + k] = ((scan.num_frames * rotated_sum_xy[1:, 1:, 4 + k] -
rotated_sum_x[1:, 1:] * rotated_sum_x[:-1, : -1]) /
(rotated_dfactor[1:, 1:] * rotated_dfactor[:-1, :-1]))
# Return back to original orientation
corrs = np.rot90(rotated_corrs, k=4 - k)
correlation_image = np.sum(corrs, axis=-1)
norm_factor = 5 * np.ones(correlation_image.shape) # edges
norm_factor[[0, -1, 0, -1], [0, -1, -1, 0]] = 3 # corners
norm_factor[1:-1, 1:-1] = 8 # center
correlation_image /= norm_factor
# Insert
field_key = {**key, 'channel': channel + 1}
self.insert1(field_key)
SummaryImages.Average().insert1({**field_key, 'average_image': average_image})
SummaryImages.L6Norm().insert1({**field_key, 'l6norm_image': l6norm_image})
SummaryImages.Correlation().insert1({**field_key,
'correlation_image': correlation_image})
self.notify(key, scan.num_channels)
@notify.ignore_exceptions
def notify(self, key, num_channels):
fig, axes = plt.subplots(num_channels, 2, squeeze=False, figsize=(12, 5 * num_channels))
axes[0, 0].set_title('L6-Norm', size='small')
axes[0, 1].set_title('Correlation', size='small')
for ax in axes.ravel():
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
for channel in range(num_channels):
axes[channel, 0].set_ylabel('Channel {}'.format(channel + 1), size='large',
rotation='horizontal', ha='right')
corr = (SummaryImages.Correlation() & key & {'channel': channel + 1}).fetch1('correlation_image')
l6norm = (SummaryImages.L6Norm() & key & {'channel': channel + 1}).fetch1('l6norm_image')
axes[channel, 0].imshow(l6norm)
axes[channel, 1].imshow(corr)
fig.tight_layout()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'summary images for {animal_id}-{session}-{scan_idx} field {field}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg, channel='#pipeline_quality')
@schema
class SegmentationTask(dj.Manual):
definition = """ # defines the target of segmentation and the channel to use
-> experiment.Scan
-> shared.Field
-> shared.Channel
-> shared.SegmentationMethod
---
-> experiment.Compartment
"""
def fill(self, key, channel=1, segmentation_method=6, compartment='soma'):
for field_key in (ScanInfo.Field() & key).fetch(dj.key):
tuple_ = {**field_key, 'channel': channel, 'compartment': compartment,
'segmentation_method': segmentation_method}
self.insert1(tuple_, ignore_extra_fields=True, skip_duplicates=True)
def estimate_num_components(self):
""" Estimates the number of components per field using simple rules of thumb.
For somatic scans, estimate number of neurons based on:
(100x100x100)um^3 = 1e6 um^3 -> 100 neurons; (1x1x1)mm^3 = 1e9 um^3 -> 100K neurons
For axonal/dendritic scans, just ten times our estimate of neurons.
:returns: Number of components
:rtype: int
"""
# Get field dimensions (in microns)
scan = (ScanInfo() & self & {'pipe_version': CURRENT_VERSION})
field_height, field_width = scan.fetch1('um_height', 'um_width')
field_thickness = 10 # assumption
field_volume = field_width * field_height * field_thickness
# Estimate number of components
compartment = self.fetch1('compartment')
if compartment == 'soma':
num_components = field_volume * 0.0001
elif compartment == 'axon':
num_components = field_volume * 0.0005 # five times as many neurons
elif compartment == 'bouton':
num_components = field_volume * 0.001 # 10 times as many neurons
else:
PipelineException("Compartment type '{}' not recognized".format(compartment))
return int(round(num_components))
@schema
class DoNotSegment(dj.Manual):
definition = """ # field/channels that should not be segmented (used for web interface only)
-> experiment.Scan
-> shared.Field
-> shared.Channel
"""
@schema
class Segmentation(dj.Computed):
definition = """ # Different mask segmentations.
-> MotionCorrection # animal_id, session, scan_idx, version, field
-> SegmentationTask # animal_id, session, scan_idx, field, channel, segmentation_method
---
segmentation_time=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def key_source(self):
return MotionCorrection() * SegmentationTask() & {'pipe_version': CURRENT_VERSION}
class Mask(dj.Part):
definition = """ # mask produced by segmentation.
-> Segmentation
mask_id : smallint
---
pixels : longblob # indices into the image in column major (Fortran) order
weights : longblob # weights of the mask at the indices above
"""
def get_mask_as_image(self):
""" Return this mask as an image (2-d numpy array)."""
# Get params
pixels, weights = self.fetch('pixels', 'weights')
image_height, image_width = (ScanInfo() & self).fetch1('px_height', 'px_width')
# Reshape mask
mask = Segmentation.reshape_masks(pixels, weights, image_height, image_width)
return np.squeeze(mask)
class Manual(dj.Part):
definition = """ # masks created manually
-> Segmentation
"""
def make(self, key):
print('Warning: Manual segmentation is not implemented in Python.')
# Copy any masks (and MaskClassification) that were there before
# Delete key from Segmentation (this is needed for trace and ScanSet and Activity computation to restart when things are added)
# Show GUI with the current masks
# User modifies it somehow to produce the new set of masks
# Insert info in Segmentation -> Segmentation.Manual -> Segmentation.Mask -> MaskClassification -> MaskClassification.Type
class CNMF(dj.Part):
definition = """ # source extraction using constrained non-negative matrix factorization
-> Segmentation
---
params : varchar(1024) # parameters send to CNMF as JSON array
"""
def make(self, key):
""" Use CNMF to extract masks and traces.
See caiman_interface.extract_masks for explanation of parameters
"""
from .utils import caiman_interface as cmn
import json
import uuid
import os
print('')
print('*' * 85)
print('Processing {}'.format(key))
# Get some parameters
field_id = key['field'] - 1
channel = key['channel'] - 1
image_height, image_width = (ScanInfo() & key).fetch1('px_height', 'px_width')
num_frames = (ScanInfo() & key).fetch1('nframes')
# Read scan
print('Reading scan...')
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Create memory mapped file (as expected by CaImAn)
print('Creating memory mapped file...')
filename = '/tmp/caiman-{}_d1_{}_d2_{}_d3_1_order_C_frames_{}_.mmap'.format(
uuid.uuid4(), image_height, image_width, num_frames)
mmap_shape = (image_height * image_width, num_frames)
mmap_scan = np.memmap(filename, mode='w+', shape=mmap_shape, dtype=np.float32)
# Map: Correct scan and save in memmap scan
f = performance.parallel_save_memmap # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (ScanInfo() & key).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & key).fetch1('y_shifts', 'x_shifts')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction, 'y_shifts': y_shifts,
'x_shifts': x_shifts, 'mmap_scan': mmap_scan}
results = performance.map_frames(f, scan, field_id=field_id, channel=channel, kwargs=kwargs)
# Reduce: Use the minimum values to make memory mapped scan nonnegative
mmap_scan -= np.min(results) # bit inefficient but necessary
# Set CNMF parameters
## Set general parameters
kwargs = {}
kwargs['num_background_components'] = 1
kwargs['merge_threshold'] = 0.7
kwargs['fps'] = (ScanInfo() & key).fetch1('fps')
# Set params specific to method and segmentation target
target = (SegmentationTask() & key).fetch1('compartment')
if key['segmentation_method'] == 2: # nmf
if target == 'axon':
kwargs['init_on_patches'] = True
kwargs['proportion_patch_overlap'] = 0.2 # 20% overlap
kwargs['num_components_per_patch'] = 15
kwargs['init_method'] = 'sparse_nmf'
kwargs['snmf_alpha'] = 500 # 10^2 to 10^3.5 is a good range
kwargs['patch_size'] = tuple(50 / (ScanInfo() & key).microns_per_pixel) # 50 x 50 microns
elif target == 'bouton':
kwargs['init_on_patches'] = False
kwargs['num_components'] = (SegmentationTask() & key).estimate_num_components()
kwargs['init_method'] = 'greedy_roi'
kwargs['soma_diameter'] = tuple(2 / (ScanInfo() & key).microns_per_pixel)
else: # soma
kwargs['init_on_patches'] = False
kwargs['num_components'] = (SegmentationTask() & key).estimate_num_components()
kwargs['init_method'] = 'greedy_roi'
kwargs['soma_diameter'] = tuple(14 / (ScanInfo() & key).microns_per_pixel)
else: #nmf-new
kwargs['init_on_patches'] = True
kwargs['proportion_patch_overlap'] = 0.2 # 20% overlap
if target == 'axon':
kwargs['num_components_per_patch'] = 15
kwargs['init_method'] = 'sparse_nmf'
kwargs['snmf_alpha'] = 500 # 10^2 to 10^3.5 is a good range
kwargs['patch_size'] = tuple(50 / (ScanInfo() & key).microns_per_pixel) # 50 x 50 microns
elif target == 'bouton':
kwargs['num_components_per_patch'] = 5
kwargs['init_method'] = 'greedy_roi'
kwargs['patch_size'] = tuple(20 / (ScanInfo() & key).microns_per_pixel) # 20 x 20 microns
kwargs['soma_diameter'] = tuple(2 / (ScanInfo() & key).microns_per_pixel)
else: # soma
kwargs['num_components_per_patch'] = 6
kwargs['init_method'] = 'greedy_roi'
kwargs['patch_size'] = tuple(50 / (ScanInfo() & key).microns_per_pixel)
kwargs['soma_diameter'] = tuple(8 / (ScanInfo() & key).microns_per_pixel)
## Set performance/execution parameters (heuristically), decrease if memory overflows
kwargs['num_processes'] = 8 # Set to None for all cores available
kwargs['num_pixels_per_process'] = 10000
# Extract traces
print('Extracting masks and traces (cnmf)...')
scan_ = mmap_scan.reshape((image_height, image_width, num_frames), order='F')
cnmf_result = cmn.extract_masks(scan_, mmap_scan, **kwargs)
(masks, traces, background_masks, background_traces, raw_traces) = cnmf_result
# Delete memory mapped scan
print('Deleting memory mapped scan...')
os.remove(mmap_scan.filename)
# Insert CNMF results
print('Inserting masks, background components and traces...')
dj.conn()
## Insert in CNMF, Segmentation and Fluorescence
self.insert1({**key, 'params': json.dumps(kwargs)})
Fluorescence().insert1(key, allow_direct_insert=True) # we also insert traces
## Insert background components
Segmentation.CNMFBackground().insert1({**key, 'masks': background_masks,
'activity': background_traces})
## Insert masks and traces (masks in Matlab format)
num_masks = masks.shape[-1]
masks = masks.reshape(-1, num_masks, order='F').T # [num_masks x num_pixels] in F order
raw_traces = raw_traces.astype(np.float32, copy=False)
for mask_id, mask, trace in zip(range(1, num_masks + 1), masks, raw_traces):
mask_pixels = np.where(mask)[0]
mask_weights = mask[mask_pixels]
mask_pixels += 1 # matlab indices start at 1
Segmentation.Mask().insert1({**key, 'mask_id': mask_id, 'pixels': mask_pixels,
'weights': mask_weights})
Fluorescence.Trace().insert1({**key, 'mask_id': mask_id, 'trace': trace})
Segmentation().notify(key)
def save_video(self, filename='cnmf_results.mp4', start_index=0, seconds=30,
dpi=250, first_n=None):
""" Creates an animation video showing the results of CNMF.
:param string filename: Output filename (path + filename)
:param int start_index: Where in the scan to start the video.
:param int seconds: How long in seconds should the animation run.
:param int dpi: Dots per inch, controls the quality of the video.
:param int first_n: Draw only the first n components.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get fps and calculate total number of frames
fps = (ScanInfo() & self).fetch1('fps')
num_video_frames = int(round(fps * seconds))
stop_index = start_index + num_video_frames
# Load the scan
channel = self.fetch1('channel') - 1
field_id = self.fetch1('field') - 1
scan_filename = (experiment.Scan() & self).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename, dtype=np.float32)
scan_ = scan[field_id, :, :, channel, start_index: stop_index]
# Correct the scan
correct_raster = (RasterCorrection() & self).get_correct_raster()
correct_motion = (MotionCorrection() & self).get_correct_motion()
scan_ = correct_motion(correct_raster(scan_), slice(start_index, stop_index))
# Get scan dimensions
image_height, image_width, _ = scan_.shape
num_pixels = image_height * image_width
# Get masks and traces
masks = (Segmentation() & self).get_all_masks()
traces = (Fluorescence() & self).get_all_traces() # always there for CNMF
background_masks, background_traces = (Segmentation.CNMFBackground() &
self).fetch1('masks', 'activity')
# Select first n components
if first_n is not None:
masks = masks[:, :, :first_n]
traces = traces[:first_n, :]
# Drop frames that won't be displayed
traces = traces[:, start_index: stop_index]
background_traces = background_traces[:, start_index: stop_index]
# Create movies
extracted = np.dot(masks.reshape(num_pixels, -1), traces)
extracted = extracted.reshape(image_height, image_width, -1)
background = np.dot(background_masks.reshape(num_pixels, -1), background_traces)
background = background.reshape(image_height, image_width, -1)
residual = scan_ - extracted - background
# Create animation
import matplotlib.animation as animation
## Set the figure
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
axes[0, 0].set_title('Original (Y)')
im1 = axes[0, 0].imshow(scan_[:, :, 0], vmin=scan_.min(), vmax=scan_.max()) # just a placeholder
fig.colorbar(im1, ax=axes[0, 0])
axes[0, 1].set_title('Extracted (A*C)')
im2 = axes[0, 1].imshow(extracted[:, :, 0], vmin=extracted.min(), vmax=extracted.max())
fig.colorbar(im2, ax=axes[0, 1])
axes[1, 0].set_title('Background (B*F)')
im3 = axes[1, 0].imshow(background[:, :, 0], vmin=background.min(),
vmax=background.max())
fig.colorbar(im3, ax=axes[1, 0])
axes[1, 1].set_title('Residual (Y - A*C - B*F)')
im4 = axes[1, 1].imshow(residual[:, :, 0], vmin=residual.min(), vmax=residual.max())
fig.colorbar(im4, ax=axes[1, 1])
for ax in axes.ravel():
ax.axis('off')
## Make the animation
def update_img(i):
im1.set_data(scan_[:, :, i])
im2.set_data(extracted[:, :, i])
im3.set_data(background[:, :, i])
im4.set_data(residual[:, :, i])
video = animation.FuncAnimation(fig, update_img, scan_.shape[2],
interval=1000 / fps)
# Save animation
if not filename.endswith('.mp4'):
filename += '.mp4'
print('Saving video at:', filename)
print('If this takes too long, stop it and call again with dpi <', dpi, '(default)')
video.save(filename, dpi=dpi)
return fig
class CNMFBackground(dj.Part):
definition = """ # inferred background components
-> Segmentation.CNMF
---
masks : longblob # array (im_height x im_width x num_background_components)
activity : longblob # array (num_background_components x timesteps)
"""
def make(self, key):
# Create masks
if key['segmentation_method'] == 1: # manual
Segmentation.Manual().make(key)
elif key['segmentation_method'] in [2, 6]: # nmf and nmf-patches
self.insert1(key)
Segmentation.CNMF().make(key)
elif key['segmentation_method'] in [3, 4]: # nmf_patches
msg = 'This method has been deprecated, use segmentation_method 6'
raise PipelineException(msg)
else:
msg = 'Unrecognized segmentation method {}'.format(key['segmentation_method'])
raise PipelineException(msg)
@notify.ignore_exceptions
def notify(self, key):
fig = (Segmentation() & key).plot_masks()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'segmentation for {animal_id}-{session}-{scan_idx} field {field}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
@staticmethod
def reshape_masks(mask_pixels, mask_weights, image_height, image_width):
""" Reshape masks into an image_height x image_width x num_masks array."""
masks = np.zeros([image_height, image_width, len(mask_pixels)], dtype=np.float32)
# Reshape each mask
for i, (mp, mw) in enumerate(zip(mask_pixels, mask_weights)):
mask_as_vector = np.zeros(image_height * image_width)
mask_as_vector[np.squeeze(mp - 1).astype(int)] = np.squeeze(mw)
masks[:, :, i] = mask_as_vector.reshape(image_height, image_width, order='F')
return masks
def get_all_masks(self):
"""Returns an image_height x image_width x num_masks matrix with all masks."""
mask_rel = (Segmentation.Mask() & self)
# Get masks
image_height, image_width = (ScanInfo() & self).fetch1('px_height', 'px_width')
mask_pixels, mask_weights = mask_rel.fetch('pixels', 'weights', order_by='mask_id')
# Reshape masks
masks = Segmentation.reshape_masks(mask_pixels, mask_weights, image_height, image_width)
return masks
def plot_masks(self, threshold=0.97, first_n=None):
""" Draw contours of masks over the correlation image (if available).
:param threshold: Threshold on the cumulative mass to define mask contours. Lower
for tighter contours.
:param first_n: Number of masks to plot. None for all.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get masks
masks = self.get_all_masks()
if first_n is not None:
masks = masks[:, :, :first_n]
# Get correlation image if defined, black background otherwise.
image_rel = SummaryImages.Correlation() & self
if image_rel:
background_image = image_rel.fetch1('correlation_image')
else:
background_image = np.zeros(masks.shape[:-1])
# Plot background
image_height, image_width, num_masks = masks.shape
figsize = np.array([image_width, image_height]) / min(image_height, image_width)
fig = plt.figure(figsize=figsize * 7)
plt.imshow(background_image)
# Draw contours
cumsum_mask = np.empty([image_height, image_width])
for i in range(num_masks):
mask = masks[:, :, i]
## Compute cumulative mass (similar to caiman)
indices = np.unravel_index(np.flip(np.argsort(mask, axis=None), axis=0), mask.shape) # max to min value in mask
cumsum_mask[indices] = np.cumsum(mask[indices]**2) / np.sum(mask**2)
## Plot contour at desired threshold (with random color)
random_color = (np.random.rand(), np.random.rand(), np.random.rand())
plt.contour(cumsum_mask, [threshold], linewidths=0.8, colors=[random_color])
return fig
@schema
class Fluorescence(dj.Computed):
definition = """ # fluorescence traces before spike extraction or filtering
-> Segmentation # animal_id, session, scan_idx, pipe_version, field, channel, segmentation_method
"""
@property
def key_source(self):
return Segmentation() & {'pipe_version': CURRENT_VERSION}
class Trace(dj.Part):
definition = """
-> Fluorescence
-> Segmentation.Mask
---
trace : longblob
"""
def make(self, key):
# Load scan
print('Reading scan...')
field_id = key['field'] - 1
channel = key['channel'] - 1
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Map: Extract traces
print('Creating fluorescence traces...')
f = performance.parallel_fluorescence # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (ScanInfo() & key).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & key).fetch1('y_shifts', 'x_shifts')
mask_ids, pixels, weights = (Segmentation.Mask() & key).fetch('mask_id', 'pixels', 'weights')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction,
'y_shifts': y_shifts, 'x_shifts': x_shifts, 'mask_pixels': pixels,
'mask_weights': weights}
results = performance.map_frames(f, scan, field_id=field_id, channel=channel, kwargs=kwargs)
# Reduce: Concatenate
traces = np.zeros((len(mask_ids), scan.num_frames), dtype=np.float32)
for frames, chunk_traces in results:
traces[:, frames] = chunk_traces
# Insert
self.insert1(key)
for mask_id, trace in zip(mask_ids, traces):
Fluorescence.Trace().insert1({**key, 'mask_id': mask_id, 'trace': trace})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
fig = plt.figure(figsize=(15, 4))
plt.plot((Fluorescence() & key).get_all_traces().T)
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'calcium traces for {animal_id}-{session}-{scan_idx} field {field}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def get_all_traces(self):
""" Returns a num_traces x num_timesteps matrix with all traces."""
traces = (Fluorescence.Trace() & self).fetch('trace', order_by='mask_id')
return np.array([x.squeeze() for x in traces])
@schema
class MaskClassification(dj.Computed):
definition = """ # classification of segmented masks.
-> Segmentation # animal_id, session, scan_idx, pipe_version, field, channel, segmentation_method
-> shared.ClassificationMethod
---
classif_time=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def key_source(self):
return (Segmentation() * shared.ClassificationMethod() &
{'pipe_version': CURRENT_VERSION})
class Type(dj.Part):
definition = """
-> MaskClassification
-> Segmentation.Mask
---
-> shared.MaskType
"""
def make(self, key):
# Skip axonal scans
target = (SegmentationTask() & key).fetch1('compartment')
if key['classification_method'] == 2 and target != 'soma':
print('Warning: Skipping {}. Automatic classification works only with somatic '
'scans'.format(key))
return
# Get masks
image_height, image_width = (ScanInfo() & key).fetch1('px_height', 'px_width')
mask_ids, pixels, weights = (Segmentation.Mask() & key).fetch('mask_id', 'pixels', 'weights')
masks = Segmentation.reshape_masks(pixels, weights, image_height, image_width)
# Classify masks
if key['classification_method'] == 1: # manual
if not SummaryImages() & key:
msg = 'Need to populate SummaryImages before manual mask classification'
raise PipelineException(msg)
template = (SummaryImages.Correlation() & key).fetch1('correlation_image')
masks = masks.transpose([2, 0, 1]) # num_masks, image_height, image_width
mask_types = mask_classification.classify_manual(masks, template)
elif key['classification_method'] == 2: # cnn-caiman
from .utils import caiman_interface as cmn
soma_diameter = tuple(14 / (ScanInfo() & key).microns_per_pixel)
probs = cmn.classify_masks(masks, soma_diameter)
mask_types = ['soma' if prob > 0.75 else 'artifact' for prob in probs]
else:
msg = 'Unrecognized classification method {}'.format(key['classification_method'])
raise PipelineException(msg)
print('Generated types:', mask_types)
# Insert results
self.insert1(key)
for mask_id, mask_type in zip(mask_ids, mask_types):
MaskClassification.Type().insert1({**key, 'mask_id': mask_id, 'type': mask_type})
self.notify(key, mask_types)
@notify.ignore_exceptions
def notify(self, key, mask_types):
fig = (MaskClassification() & key).plot_masks()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = ('mask classification for {animal_id}-{session}-{scan_idx} field {field}: '
'{somas} somas and {arts} artifacts').format(**key,
somas=mask_types.count('soma'), arts=mask_types.count('artifact'))
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg, channel='#pipeline_quality')
def plot_masks(self, threshold=0.99):
""" Draw contours of masks over the correlation image (if available) with different
colors per type
:param threshold: Threshold on the cumulative mass to define mask contours. Lower
for tighter contours.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get masks
masks = (Segmentation() & self).get_all_masks()
mask_types = (MaskClassification.Type() & self).fetch('type')
colormap = {'soma': 'b', 'axon': 'k', 'dendrite': 'c', 'neuropil': 'y',
'artifact': 'r', 'unknown': 'w'}
# Get correlation image if defined, black background otherwise.
image_rel = SummaryImages.Correlation() & self
if image_rel:
background_image = image_rel.fetch1('correlation_image')
else:
background_image = np.zeros(masks.shape[:-1])
# Plot background
image_height, image_width, num_masks = masks.shape
figsize = np.array([image_width, image_height]) / min(image_height, image_width)
fig = plt.figure(figsize=figsize * 7)
plt.imshow(background_image)
# Draw contours
cumsum_mask = np.empty([image_height, image_width])
for i in range(num_masks):
mask = masks[:, :, i]
color = colormap[mask_types[i]]
## Compute cumulative mass (similar to caiman)
indices = np.unravel_index(np.flip(np.argsort(mask, axis=None), axis=0), mask.shape) # max to min value in mask
cumsum_mask[indices] = np.cumsum(mask[indices]**2) / np.sum(mask**2)
## Plot contour at desired threshold
plt.contour(cumsum_mask, [threshold], linewidths=0.8, colors=[color])
return fig
@schema
class ScanSet(dj.Computed):
definition = """ # set of all units in the same scan
-> Fluorescence # processing done per field
"""
@property
def key_source(self):
return Fluorescence() & {'pipe_version': CURRENT_VERSION}
class Unit(dj.Part):
definition = """ # single unit in the scan
-> ScanInfo
-> shared.SegmentationMethod
unit_id : int # unique per scan & segmentation method
---
-> ScanSet # for it to act as a part table of ScanSet
-> Fluorescence.Trace
"""
class UnitInfo(dj.Part):
definition = """ # unit type and coordinates in x, y, z
-> ScanSet.Unit
---
um_x : int # x-coordinate of centroid in motor coordinate system
um_y : int # y-coordinate of centroid in motor coordinate system
um_z : smallint # z-coordinate of mask relative to surface of the cortex
px_x : smallint # x-coordinate of centroid in the frame
px_y : smallint # y-coordinate of centroid in the frame
ms_delay = 0 : smallint # (ms) delay from start of frame to recording of this unit
"""
def _job_key(self, key):
# Force reservation key to be per scan so diff fields are not run in parallel
return {k: v for k, v in key.items() if k not in ['field', 'channel']}
def make(self, key):
from pipeline.utils import caiman_interface as cmn
# Get masks
image_height, image_width = (ScanInfo() & key).fetch1('px_height', 'px_width')
mask_ids, pixels, weights = (Segmentation.Mask() & key).fetch('mask_id', 'pixels', 'weights')
masks = Segmentation.reshape_masks(pixels, weights, image_height, image_width)
# Compute units' coordinates
px_center = [image_height / 2, image_width / 2]
um_center = (ScanInfo() & key).fetch1('y', 'x')
um_z = (ScanInfo.Field() & key).fetch1('z')
px_centroids = cmn.get_centroids(masks)
um_centroids = um_center + (px_centroids - px_center) * (ScanInfo() & key).microns_per_pixel
# Compute units' delays
delay_image = (ScanInfo.Field() & key).fetch1('delay_image')
delays = (np.sum(masks * np.expand_dims(delay_image, -1), axis=(0, 1)) /
np.sum(masks, axis=(0, 1)))
delays = np.round(delays * 1e3).astype(np.int16) # in milliseconds
# Get next unit_id for scan
unit_rel = (ScanSet.Unit().proj() & key)
unit_id = np.max(unit_rel.fetch('unit_id')) + 1 if unit_rel else 1
# Insert in ScanSet
self.insert1(key)
# Insert units
unit_ids = range(unit_id, unit_id + len(mask_ids) + 1)
for unit_id, mask_id, (um_y, um_x), (px_y, px_x), delay in zip(unit_ids, mask_ids,
um_centroids, px_centroids, delays):
ScanSet.Unit().insert1({**key, 'unit_id': unit_id, 'mask_id': mask_id})
unit_info = {**key, 'unit_id': unit_id, 'um_x': um_x, 'um_y': um_y,
'um_z': um_z, 'px_x': px_x, 'px_y': px_y, 'ms_delay': delay}
ScanSet.UnitInfo().insert1(unit_info, ignore_extra_fields=True)
def plot_centroids(self, first_n=None):
""" Draw masks centroids over the correlation image. Works on a single field/channel
:param first_n: Number of masks to plot. None for all
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get centroids
centroids = self.get_all_centroids(centroid_type='px')
if first_n is not None:
centroids = centroids[:, :first_n] # select first n components
# Get correlation image if defined, black background otherwise.
image_rel = SummaryImages.Correlation() & self
if image_rel:
background_image = image_rel.fetch1('correlation_image')
else:
image_height, image_width = (ScanInfo() & self).fetch1('px_height', 'px_width')
background_image = np.zeros([image_height, image_width])
# Plot centroids
image_height, image_width = background_image.shape
figsize = np.array([image_width, image_height]) / min(image_height, image_width)
fig = plt.figure(figsize=figsize * 7)
plt.imshow(background_image)
plt.plot(centroids[:, 0], centroids[:, 1], 'ow', markersize=3)
return fig
def plot_centroids3d(self):
""" Plots the centroids of all units in the motor coordinate system (in microns)
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from mpl_toolkits.mplot3d import Axes3D
# Get centroids
centroids = self.get_all_centroids()
# Plot
# TODO: Add different colors for different types, correlation image as 2-d planes
# masks from diff channels with diff colors.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(centroids[:, 0], centroids[:, 1], centroids[:, 2])
ax.invert_zaxis()
ax.set_xlabel('x (um)')
ax.set_ylabel('y (um)')
ax.set_zlabel('z (um)')
return fig
def get_all_centroids(self, centroid_type='um'):
""" Returns the centroids for all units in the scan. Could also be limited by field.
Centroid type is either 'um' or 'px':
'um': Array (num_units x 3) with x, y, z in motor coordinate system (microns).
'px': Array (num_units x 2) with x, y pixel coordinates.
"""
units_rel = ScanSet.UnitInfo() & (ScanSet.Unit() & self)
if centroid_type == 'um':
xs, ys, zs = units_rel.fetch('um_x', 'um_y', 'um_z', order_by='unit_id')
centroids = np.stack([xs, ys, zs], axis=1)
else:
xs, ys = units_rel.fetch('px_x', 'px_y', order_by='unit_id')
centroids = np.stack([xs, ys], axis=1)
return centroids
@schema
class Activity(dj.Computed):
definition = """ # activity inferred from fluorescence traces
-> ScanSet # processing done per field
-> shared.SpikeMethod
---
activity_time=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def key_source(self):
return ScanSet() * shared.SpikeMethod() & {'pipe_version': CURRENT_VERSION}
class Trace(dj.Part):
definition = """ # deconvolved calcium acitivity
-> ScanSet.Unit
-> shared.SpikeMethod
---
-> Activity # for it to act as part table of Activity
trace : longblob
"""
class ARCoefficients(dj.Part):
definition = """ # fitted parameters for the autoregressive process (nmf deconvolution)
-> Activity.Trace
---
g : blob # g1, g2, ... coefficients for the AR process
"""
def make(self, key):
print('Creating activity traces for', key)
# Get fluorescence
fps = (ScanInfo() & key).fetch1('fps')
unit_ids, traces = (ScanSet.Unit() * Fluorescence.Trace() & key).fetch('unit_id', 'trace')
full_traces = [signal.fill_nans(np.squeeze(trace).copy()) for trace in traces]
# Insert in Activity
self.insert1(key)
if key['spike_method'] == 2: # oopsie
import pyfnnd # Install from https://github.com/cajal/PyFNND.git
for unit_id, trace in zip(unit_ids, full_traces):
spike_trace = pyfnnd.deconvolve(trace, dt=1 / fps)[0].astype(np.float32, copy=False)
Activity.Trace().insert1({**key, 'unit_id': unit_id, 'trace': spike_trace})
elif key['spike_method'] == 3: # stm
import c2s # Install from https://github.com/lucastheis/c2s
for unit_id, trace in zip(unit_ids, full_traces):
start = signal.notnan(trace)
end = signal.notnan(trace, len(trace) - 1, increment=-1)
trace_dict = {'calcium': np.atleast_2d(trace[start:end + 1]), 'fps': fps}
data = c2s.predict(c2s.preprocess([trace_dict], fps=fps), verbosity=0)
spike_trace = np.squeeze(data[0].pop('predictions')).astype(np.float32, copy=False)
Activity.Trace().insert1({**key, 'unit_id': unit_id, 'trace': spike_trace})
elif key['spike_method'] == 5: # nmf
from pipeline.utils import caiman_interface as cmn
import multiprocessing as mp
with mp.Pool(10) as pool:
results = pool.map(cmn.deconvolve, full_traces)
for unit_id, (spike_trace, ar_coeffs) in zip(unit_ids, results):
spike_trace = spike_trace.astype(np.float32, copy=False)
Activity.Trace().insert1({**key, 'unit_id': unit_id, 'trace': spike_trace})
Activity.ARCoefficients().insert1({**key, 'unit_id': unit_id, 'g': ar_coeffs},
ignore_extra_fields=True)
else:
msg = 'Unrecognized spike method {}'.format(key['spike_method'])
raise PipelineException(msg)
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
fig = plt.figure(figsize=(15, 4))
plt.plot((Activity() & key).get_all_spikes().T)
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'spike traces for {animal_id}-{session}-{scan_idx} field {field}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def plot_impulse_responses(self, num_timepoints=100):
""" Plots the impulse response functions for all traces.
:param int num_timepoints: The number of points after impulse to use for plotting.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
ar_rel = Activity.ARCoefficients() & (Activity.Trace() & self)
if ar_rel: # if an AR model was used
# Get some params
fps = (ScanInfo() & self).fetch1('fps')
ar_coeffs = ar_rel.fetch('g')
# Define the figure
fig = plt.figure()
x_axis = np.arange(num_timepoints) / fps # make it seconds
# Over each trace
for g in ar_coeffs:
AR_order = len(g)
# Calculate impulse response function
irf = np.zeros(num_timepoints)
irf[0] = 1 # initial spike
for i in range(1, num_timepoints):
if i <= AR_order: # start of the array needs special care
irf[i] = np.sum(g[:i] * irf[i - 1:: -1])
else:
irf[i] = np.sum(g * irf[i - 1: i - AR_order - 1: -1])
# Plot
plt.plot(x_axis, irf)
plt.xlabel('Seconds')
return fig
def get_all_spikes(self):
""" Returns a num_traces x num_timesteps matrix with all spikes."""
spikes = (Activity.Trace() & self).fetch('trace', order_by='unit_id')
return np.array([x.squeeze() for x in spikes])
@schema
class ScanDone(dj.Computed):
definition = """ # scans that are fully processed (updated every time a field is added)
-> ScanInfo
-> shared.SegmentationMethod
-> shared.SpikeMethod
"""
@property
def key_source(self):
return Activity() & {'pipe_version': CURRENT_VERSION}
@property
def target(self):
return ScanDone.Partial() # trigger make_tuples for fields in Activity that aren't in ScanDone.Partial
def _job_key(self, key):
# Force reservation key to be per scan so diff fields are not run in parallel
return {k: v for k, v in key.items() if k not in ['field', 'channel']}
class Partial(dj.Part):
definition = """ # fields that have been processed in the current scan
-> ScanDone
-> Activity
"""
def make(self, key):
scan_key = {k: v for k, v in key.items() if k in self.heading}
# Delete current ScanDone entry
with dj.config(safemode=False):
(ScanDone() & scan_key).delete()
# Reinsert in ScanDone
self.insert1(scan_key)
# Insert all processed fields in Partial
ScanDone.Partial().insert((Activity() & scan_key).proj())
from . import stack
@schema
class StackCoordinates(dj.Computed):
definition = """ # centroids of each unit in motor/stack coordinate system
-> ScanSet # animal_id, session, scan_idx, channel, field, segmentation_method, pipe_version
-> stack.Registration.proj(session='scan_session') # animal_id, stack_session, stack_idx, volume_id, session, scan_idx, field, stack_channel, scan_channel, registration_method
"""
class UnitInfo(dj.Part):
definition = """ # ScanSet.UnitInfo centroids mapped to stack coordinates
-> master # this will add field and channels back
-> ScanSet.Unit
---
stack_x : float
stack_y : float
stack_z : float
"""
def make(self, key):
from scipy import ndimage
# Get registration grid (px -> stack_coordinate)
stack_key = {**key, 'scan_session': key['session']}
field_res = (ScanInfo & key).microns_per_pixel
grid = (stack.Registration & stack_key).get_grid(type='affine',
desired_res=field_res)
self.insert1(key)
field_units = ScanSet.UnitInfo & (ScanSet.Unit & key)
for unit_key, px_x, px_y in zip(*field_units.fetch('KEY', 'px_x', 'px_y')):
px_coords = np.array([[px_y], [px_x]])
unit_x, unit_y, unit_z = [ndimage.map_coordinates(grid[..., i], px_coords,
order=1)[0] for i in
range(3)]
StackCoordinates.UnitInfo.insert1({**key, **unit_key, 'stack_x': unit_x,
'stack_y': unit_y, 'stack_z': unit_z})
@schema
class Func2StructMatching(dj.Computed):
definition = """ # match functional masks to structural masks
-> ScanSet # animal_id, session, scan_idx, pipe_version, field, channel
-> stack.FieldSegmentation.proj(session='scan_session') # animal_id, stack_session, stack_idx, volume_id, session, scan_idx, field, stack_channel, scan_channel, registration_method, stacksegm_channel, stacksegm_method
---
key_hash : varchar(32) # single attribute representation of the key (used to avoid going over 16 attributes in the key)
"""
class AllMatches(dj.Part):
definition = """ # store all possible matches (one functional cell could match with more than one structural mask and viceversa)
key_hash : varchar(32) # master key
unit_id : int # functional unit id
sunit_id : int # structural unit id
---
iou : float # intersection-over-union of the 2-d masks
"""
# Used key_hash because key using ScanSet.Unit, FieldSegmentation.StackUnit has
# more than 16 attributes and MySQL complains. I added the foreign key constraints
# manually
class Match(dj.Part):
definition = """ # match of a functional mask to a structural mask (1:1 relation)
-> master
-> ScanSet.Unit
---
-> stack.FieldSegmentation.StackUnit.proj(session='scan_session')
iou : float # Intersection-over-Union of the 2-d masks
distance2d : float # distance between centroid of 2-d masks
distance3d : float # distance between functional centroid and structural centroid
"""
def make(self, key):
from .utils import registration
from scipy import ndimage
# Get caiman masks and resize them
field_dims = (ScanInfo & key).fetch1('um_height', 'um_width')
masks = np.moveaxis((Segmentation & key).get_all_masks(), -1, 0)
masks = np.stack([registration.resize(m, field_dims, desired_res=1) for m in
masks])
scansetunit_keys = (ScanSet.Unit & key).fetch('KEY', order_by='mask_id')
# Binarize masks
binary_masks = np.zeros(masks.shape, dtype=bool)
for i, mask in enumerate(masks):
## Compute cumulative mass (similar to caiman)
indices = np.unravel_index(np.flip(np.argsort(mask, axis=None), axis=0),
mask.shape) # max to min value in mask
cumsum_mask = np.cumsum(mask[indices] ** 2) / np.sum(mask ** 2)# + 1e-9)
binary_masks[i][indices] = cumsum_mask < 0.9
# Get structural segmentation and registration grid
stack_key = {**key, 'scan_session': key['session']}
segmented_field = (stack.FieldSegmentation & stack_key).fetch1('segm_field')
grid = (stack.Registration & stack_key).get_grid(type='affine', desired_res=1)
sunit_ids = (stack.FieldSegmentation.StackUnit & stack_key).fetch('sunit_id',
order_by='sunit_id')
# Create matrix with IOU values (rows for structural units, columns for functional units)
ious = []
for sunit_id in sunit_ids:
binary_sunit = segmented_field == sunit_id
intersection = np.logical_and(binary_masks, binary_sunit).sum(axis=(1, 2)) # num_masks
union = np.logical_or(binary_masks, binary_sunit).sum(axis=(1, 2)) # num_masks
ious.append(intersection / union)
iou_matrix = np.stack(ious)
# Save all possible matches / iou_matrix > 0
self.insert1({**key, 'key_hash': key_hash(key)})
for mask_idx, func_idx in zip(*np.nonzero(iou_matrix)):
self.AllMatches.insert1({'key_hash': key_hash(key),
'unit_id': scansetunit_keys[func_idx]['unit_id'],
'sunit_id': sunit_ids[mask_idx],
'iou': iou_matrix[mask_idx, func_idx]})
# Iterate over matches (from best to worst), insert
while iou_matrix.max() > 0:
# Get next best
best_mask, best_func = np.unravel_index(np.argmax(iou_matrix),
iou_matrix.shape)
best_iou = iou_matrix[best_mask, best_func]
# Get stack unit coordinates
coords = (stack.FieldSegmentation.StackUnit & stack_key &
{'sunit_id': sunit_ids[best_mask]}).fetch1('sunit_z', 'sunit_y',
'sunit_x', 'mask_z',
'mask_y', 'mask_x')
sunit_z, sunit_y, sunit_x, mask_z, mask_y, mask_x = coords
# Compute distance to 2-d and 3-d mask
px_y, px_x = ndimage.measurements.center_of_mass(binary_masks[best_func])
px_coords = np.array([[px_y], [px_x]])
func_x, func_y, func_z = [ndimage.map_coordinates(grid[..., i], px_coords,
order=1)[0] for i in
range(3)]
distance2d = np.sqrt((func_z - mask_z) ** 2 + (func_y - mask_y) ** 2 +
(func_x - mask_x) ** 2)
distance3d = np.sqrt((func_z - sunit_z) ** 2 + (func_y - sunit_y) ** 2 +
(func_x - sunit_x) ** 2)
self.Match.insert1({**key, **scansetunit_keys[best_func],
'sunit_id': sunit_ids[best_mask], 'iou': best_iou,
'distance2d': distance2d, 'distance3d': distance3d})
# Deactivate match
iou_matrix[best_mask, :] = 0
iou_matrix[:, best_func] = 0
|
lgpl-3.0
|
elijah513/scikit-learn
|
examples/linear_model/plot_sgd_iris.py
|
286
|
2202
|
"""
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
|
bsd-3-clause
|
xiaoxiamii/scikit-learn
|
examples/svm/plot_svm_margin.py
|
318
|
2328
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
miladh/lgn-simulator
|
apps/edog/analysis/freq_tuning.py
|
1
|
2957
|
#!/usr/bin/python
from pylab import*
import os, sys
current_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.abspath(os.path.join(current_path,"../../../tools")))
from analysis.data_extractor import*
from analysis.tuning_analysis import*
from analysis.pretty_plotting import*
import seaborn.apionly as sns
sns.set_color_codes()
#Analysis: ###########################################################################
def make_plot(cell_type, resp, attr_a, attr_b, freqs, diameter, save_fig=True):
fig, ax = plt.subplots(1, 1, figsize=(6,6), sharex='col')
set_font()
set_legend()
spines_edge_color(ax)
remove_ticks(ax)
set_grid(ax)
for wr in attr_a[:]:
i = where(attr_a==wr)[0][0]
label = r"$w_{\mathrm{RCR}}=$"+'${0:.2f}$'.format(wr)
ax.plot(freqs, resp[cell_type][i,:], "-", label=label)
ax.set_ylabel("Response", fontsize=20)
ax.set_xlabel("Patch-grating wave vector $k_\mathrm{pg} (1/^\circ)$", fontsize=20)
ax.set_title("$\mathrm{Patch\;size}=$"+'${0:.2f}^\circ$'.format(diameter), fontsize=20)
ax.legend()
# ax.set_xscale('log')
# ax.set_xticks([0.5, 1, 2, 4, 8])
# ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xlim([0, 10])
#########################################################################################
plt.tight_layout()
if save_fig: fig.savefig(os.path.join(output_dir, fig_name+cell_type+"_"+record_label+".pdf"))
if save_fig: fig.savefig(os.path.join(output_dir, fig_name+cell_type+"_"+record_label+".png"))
plt.show()
if __name__ == "__main__":
import sumatra_tracking.io_manager as smt
record_label = sys.argv[1:][-1]
sims_path = sys.argv[1:][-2]
output_dir = smt.get_output_dir(record_label)
#-----------------------------------------------------------------------------------
cell_types = ["relay"]
attr_a_name = "relay.Krc.w"
attr_b_name = "relay.Krc.w"
fig_name= "freq_tuning_fb_large_d"
sims = get_simulations(sims_path)
Ns=sims[0].integrator.Ns
Nt=sims[0].integrator.Nt
s_points = sims[0].integrator.s_points[Ns/2:]
k_points = sims[0].integrator.k_points[Ns/2:]
rc = [Ns/2, Ns/2]
attr_a = extract_unique_simulation_attrs(sims, attr_a_name)
attr_b = extract_unique_simulation_attrs(sims, attr_b_name)
attr_a2, freqs, resp = resp_vs_attrA_vs_attrB(sims, attr_a_name, "stimulus.spatial_freq", rc=rc)
if(dot(attr_a - attr_a2,attr_a - attr_a2)!=0 ):
raise ValueError('attra and attra2 are different:' + attr_a, attr_a2)
else:
print "diff=", attr_a - attr_a2
d = extract_unique_simulation_attrs(sims, "stimulus.mask_size")
if len(d)>1:raise IndexError(d)
d=d[0]
print d
#-----------------------------------------------------------------------------------
for cell in cell_types:
make_plot(cell, resp, attr_a, attr_b, freqs, d)
|
gpl-3.0
|
ledbetdr/keras
|
examples/kaggle_otto_nn.py
|
70
|
3775
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4. Requires Scikit-Learn and Pandas.
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data('train.csv', train=True)
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data('test.csv', train=False)
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
print("Building model...")
model = Sequential()
model.add(Dense(dims, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15)
print("Generating submission...")
proba = model.predict_proba(X_test)
make_submission(proba, ids, encoder, fname='keras-otto.csv')
|
mit
|
rahul-c1/scikit-learn
|
sklearn/feature_extraction/text.py
|
12
|
48135
|
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _check_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Parameters
----------
input: string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents: {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer: string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor: callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer: callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range: tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words: string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
lowercase: boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern: string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, optional, (2 ** 20) by default
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, optional
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because
they occurred in either too many
(`max_df`) or in too few (`min_df`) documents.
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._check_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._check_vocabulary()
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log1p instead of log makes sure terms with zero idf don't get
# suppressed entirely
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
if not hasattr(self, "_idf_diag"):
raise ValueError("idf vector not fitted")
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a document frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a document frequency
strictly lower than the given threshold.
This value is also called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, False by default.
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
|
bsd-3-clause
|
zrhans/pythonanywhere
|
.virtualenvs/django19/lib/python3.4/site-packages/matplotlib/backends/backend_gtk3agg.py
|
8
|
3841
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
import sys
import warnings
from . import backend_agg
from . import backend_gtk3
from .backend_cairo import cairo, HAS_CAIRO_CFFI
from matplotlib.figure import Figure
from matplotlib import transforms
if six.PY3 and not HAS_CAIRO_CFFI:
warnings.warn(
"The Gtk3Agg backend is known to not work on Python 3.x with pycairo. "
"Try installing cairocffi.")
class FigureCanvasGTK3Agg(backend_gtk3.FigureCanvasGTK3,
backend_agg.FigureCanvasAgg):
def __init__(self, figure):
backend_gtk3.FigureCanvasGTK3.__init__(self, figure)
self._bbox_queue = []
def _renderer_init(self):
pass
def _render_figure(self, width, height):
backend_agg.FigureCanvasAgg.draw(self)
def on_draw_event(self, widget, ctx):
""" GtkDrawable draw event, like expose_event in GTK 2.X
"""
allocation = self.get_allocation()
w, h = allocation.width, allocation.height
if not len(self._bbox_queue):
if self._need_redraw:
self._render_figure(w, h)
bbox_queue = [transforms.Bbox([[0, 0], [w, h]])]
else:
return
else:
bbox_queue = self._bbox_queue
if HAS_CAIRO_CFFI:
ctx = cairo.Context._from_pointer(
cairo.ffi.cast('cairo_t **',
id(ctx) + object.__basicsize__)[0],
incref=True)
for bbox in bbox_queue:
area = self.copy_from_bbox(bbox)
buf = np.fromstring(area.to_string_argb(), dtype='uint8')
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
if HAS_CAIRO_CFFI:
image = cairo.ImageSurface.create_for_data(
buf.data, cairo.FORMAT_ARGB32, width, height)
else:
image = cairo.ImageSurface.create_for_data(
buf, cairo.FORMAT_ARGB32, width, height)
ctx.set_source_surface(image, x, y)
ctx.paint()
if len(self._bbox_queue):
self._bbox_queue = []
return False
def blit(self, bbox=None):
# If bbox is None, blit the entire canvas to gtk. Otherwise
# blit only the area defined by the bbox.
if bbox is None:
bbox = self.figure.bbox
allocation = self.get_allocation()
w, h = allocation.width, allocation.height
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
self._bbox_queue.append(bbox)
self.queue_draw_area(x, y, width, height)
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(backend_agg.FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
class FigureManagerGTK3Agg(backend_gtk3.FigureManagerGTK3):
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK3Agg(figure)
manager = FigureManagerGTK3Agg(canvas, num)
return manager
FigureCanvas = FigureCanvasGTK3Agg
FigureManager = FigureManagerGTK3Agg
show = backend_gtk3.show
|
apache-2.0
|
PHOTOX/fuase
|
ase/ase/utils/sjeos.py
|
4
|
3701
|
# -*- coding: utf-8 -*-
try:
import numpy as np
except ImportError:
# required due to ase/test/eoswoase.py
pass
class EquationOfStateSJEOS:
"""Fit equation of state for bulk systems.
10.1103/PhysRevB.67.026103
The following equation is used::
A third order inverse polynomial fit
2 3 -1/3
E(V) = c + c t + c t + c t , t = V
0 1 2 3
More methods available in ase.utils.eosase2
Use::
eos = EquationOfState(volumes, energies)
v0, e0, B = eos.fit()
eos.plot()
"""
def __init__(self, volumes, energies, eos='sjeos'):
assert eos == 'sjeos', eos + ' eos not available. Probably scipy missing.'
self.v = np.array(volumes)
self.e = np.array(energies)
self.eos_string = 'sjeos'
self.v0 = None
def fit(self):
"""Calculate volume, energy, and bulk modulus.
Returns the optimal volume, the minumum energy, and the bulk
modulus. Notice that the ASE units for the bulk modulus is
eV/Angstrom^3 - to get the value in GPa, do this::
v0, e0, B = eos.fit()
print B / kJ * 1.0e24, 'GPa'
"""
fit0 = np.poly1d(np.polyfit(self.v**-(1.0 / 3), self.e, 3))
fit1 = np.polyder(fit0, 1)
fit2 = np.polyder(fit1, 1)
self.v0 = None
for t in np.roots(fit1):
if isinstance(t, float) and t > 0 and fit2(t) > 0:
self.v0 = t**-3
break
if self.v0 is None:
raise ValueError('No minimum!')
self.e0 = fit0(t)
self.B = t**5 * fit2(t) / 9
self.fit0 = fit0
return self.v0, self.e0, self.B
def plot(self, filename=None, show=None):
"""Plot fitted energy curve.
Uses Matplotlib to plot the energy curve. Use *show=True* to
show the figure and *filename='abc.png'* or
*filename='abc.eps'* to save the figure to a file."""
#import matplotlib.pyplot as plt
import pylab as plt
if self.v0 is None:
self.fit()
if filename is None and show is None:
show = True
x = 3.95
f = plt.figure(figsize=(x * 2.5**0.5, x))
f.subplots_adjust(left=0.12, right=0.9, top=0.9, bottom=0.15)
plt.plot(self.v, self.e, 'o')
x = np.linspace(min(self.v), max(self.v), 100)
y = self.fit0(x**-(1.0 / 3))
plt.plot(x, y, '-r')
try:
from ase.units import kJ
plt.xlabel(u'volume [Å^3]')
plt.ylabel(u'energy [eV]')
plt.title(u'%s: E: %.3f eV, V: %.3f Å^3, B: %.3f GPa' %
(self.eos_string, self.e0, self.v0, self.B / kJ * 1.e24))
except ImportError:
plt.xlabel(u'volume [L(length)^3]')
plt.ylabel(u'energy [E(energy)]')
plt.title(u'%s: E: %.3f E, V: %.3f L^3, B: %.3e E/L^3' %
(self.eos_string, self.e0, self.v0, self.B))
if show:
plt.show()
if filename is not None:
f.savefig(filename)
return f
if __name__ == '__main__':
try:
import numpy as np
# from ase/test/eos.py
volumes = [29.205536, 30.581492, 32.000000, 33.461708, 34.967264]
energies = [0.0190898, -0.0031172, -0.0096925, -0.0004014, 0.0235753]
sjeos = (31.867118229937798, -0.0096410046694188622, 0.23984474782755572)
eos = EquationOfStateSJEOS(volumes, energies)
v0, e0, B = eos.fit()
assert abs(v0 - sjeos[0]) < 5.e-6
assert abs(B - sjeos[2]) < 5.e-6
except ImportError:
pass
|
gpl-2.0
|
gregcaporaso/scikit-bio
|
skbio/diversity/_driver.py
|
4
|
15329
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import functools
import itertools
import numpy as np
import sklearn.metrics
import pandas as pd
import skbio
from skbio.diversity.alpha._faith_pd import _faith_pd, _setup_faith_pd
from skbio.diversity.beta._unifrac import (
_setup_multiple_unweighted_unifrac, _setup_multiple_weighted_unifrac,
_normalize_weighted_unifrac_by_default)
from skbio.util._decorator import experimental, deprecated
from skbio.stats.distance import DistanceMatrix
from skbio.diversity._util import (_validate_counts_matrix,
_get_phylogenetic_kwargs)
def _get_alpha_diversity_metric_map():
return {
'ace': skbio.diversity.alpha.ace,
'chao1': skbio.diversity.alpha.chao1,
'chao1_ci': skbio.diversity.alpha.chao1_ci,
'berger_parker_d': skbio.diversity.alpha.berger_parker_d,
'brillouin_d': skbio.diversity.alpha.brillouin_d,
'dominance': skbio.diversity.alpha.dominance,
'doubles': skbio.diversity.alpha.doubles,
'enspie': skbio.diversity.alpha.enspie,
'esty_ci': skbio.diversity.alpha.esty_ci,
'faith_pd': skbio.diversity.alpha.faith_pd,
'fisher_alpha': skbio.diversity.alpha.fisher_alpha,
'goods_coverage': skbio.diversity.alpha.goods_coverage,
'heip_e': skbio.diversity.alpha.heip_e,
'kempton_taylor_q': skbio.diversity.alpha.kempton_taylor_q,
'margalef': skbio.diversity.alpha.margalef,
'mcintosh_d': skbio.diversity.alpha.mcintosh_d,
'mcintosh_e': skbio.diversity.alpha.mcintosh_e,
'menhinick': skbio.diversity.alpha.menhinick,
'michaelis_menten_fit': skbio.diversity.alpha.michaelis_menten_fit,
'observed_otus': skbio.diversity.alpha.observed_otus,
'osd': skbio.diversity.alpha.osd,
'pielou_e': skbio.diversity.alpha.pielou_e,
'robbins': skbio.diversity.alpha.robbins,
'shannon': skbio.diversity.alpha.shannon,
'simpson': skbio.diversity.alpha.simpson,
'simpson_e': skbio.diversity.alpha.simpson_e,
'singles': skbio.diversity.alpha.singles,
'strong': skbio.diversity.alpha.strong,
'gini_index': skbio.diversity.alpha.gini_index,
'lladser_pe': skbio.diversity.alpha.lladser_pe,
'lladser_ci': skbio.diversity.alpha.lladser_ci}
@experimental(as_of="0.4.1")
def get_alpha_diversity_metrics():
""" List scikit-bio's alpha diversity metrics
The alpha diversity metrics listed here can be passed as metrics to
``skbio.diversity.alpha_diversity``.
Returns
-------
list of str
Alphabetically sorted list of alpha diversity metrics implemented in
scikit-bio.
See Also
--------
alpha_diversity
get_beta_diversity_metrics
"""
metrics = _get_alpha_diversity_metric_map()
return sorted(metrics.keys())
@experimental(as_of="0.4.1")
def get_beta_diversity_metrics():
""" List scikit-bio's beta diversity metrics
The beta diversity metrics listed here can be passed as metrics to
``skbio.diversity.beta_diversity``.
Returns
-------
list of str
Alphabetically sorted list of beta diversity metrics implemented in
scikit-bio.
See Also
--------
beta_diversity
get_alpha_diversity_metrics
scipy.spatial.distance.pdist
Notes
-----
SciPy implements many additional beta diversity metrics that are not
included in this list. See documentation for
``scipy.spatial.distance.pdist`` for more details.
"""
return sorted(['unweighted_unifrac', 'weighted_unifrac'])
@experimental(as_of="0.4.1")
def alpha_diversity(metric, counts, ids=None, validate=True, **kwargs):
""" Compute alpha diversity for one or more samples
Parameters
----------
metric : str, callable
The alpha diversity metric to apply to the sample(s). Passing metric as
a string is preferable as this often results in an optimized version of
the metric being used.
counts : 1D or 2D array_like of ints or floats
Vector or matrix containing count/abundance data. If a matrix, each row
should contain counts of OTUs in a given sample.
ids : iterable of strs, optional
Identifiers for each sample in ``counts``. By default, samples will be
assigned integer identifiers in the order that they were provided.
validate: bool, optional
If `False`, validation of the input won't be performed. This step can
be slow, so if validation is run elsewhere it can be disabled here.
However, invalid input data can lead to invalid results or error
messages that are hard to interpret, so this step should not be
bypassed if you're not certain that your input data are valid. See
:mod:`skbio.diversity` for the description of what validation entails
so you can determine if you can safely disable validation.
kwargs : kwargs, optional
Metric-specific parameters.
Returns
-------
pd.Series
Values of ``metric`` for all vectors provided in ``counts``. The index
will be ``ids``, if provided.
Raises
------
ValueError, MissingNodeError, DuplicateNodeError
If validation fails. Exact error will depend on what was invalid.
TypeError
If invalid method-specific parameters are provided.
See Also
--------
skbio.diversity
skbio.diversity.alpha
skbio.diversity.get_alpha_diversity_metrics
skbio.diversity.beta_diversity
"""
metric_map = _get_alpha_diversity_metric_map()
if validate:
counts = _validate_counts_matrix(counts, ids=ids)
if metric == 'faith_pd':
otu_ids, tree, kwargs = _get_phylogenetic_kwargs(counts, **kwargs)
counts_by_node, branch_lengths = _setup_faith_pd(
counts, otu_ids, tree, validate, single_sample=False)
counts = counts_by_node
metric = functools.partial(_faith_pd, branch_lengths=branch_lengths)
elif callable(metric):
metric = functools.partial(metric, **kwargs)
elif metric in metric_map:
metric = functools.partial(metric_map[metric], **kwargs)
else:
raise ValueError('Unknown metric provided: %r.' % metric)
# kwargs is provided here so an error is raised on extra kwargs
results = [metric(c, **kwargs) for c in counts]
return pd.Series(results, index=ids)
@deprecated(as_of='0.5.0', until='0.6.0',
reason=('The return type is unstable. Developer caution is '
'advised. The resulting DistanceMatrix object will '
'include zeros when distance has not been calculated, and '
'therefore can be misleading.'))
def partial_beta_diversity(metric, counts, ids, id_pairs, validate=True,
**kwargs):
"""Compute distances only between specified ID pairs
Parameters
----------
metric : str or callable
The pairwise distance function to apply. If ``metric`` is a string, it
must be resolvable by scikit-bio (e.g., UniFrac methods), or must be
callable.
counts : 2D array_like of ints or floats
Matrix containing count/abundance data where each row contains counts
of OTUs in a given sample.
ids : iterable of strs
Identifiers for each sample in ``counts``.
id_pairs : iterable of tuple
An iterable of tuples of IDs to compare (e.g., ``[('a', 'b'), ('a',
'c'), ...])``. If specified, the set of IDs described must be a subset
of ``ids``.
validate : bool, optional
See ``skbio.diversity.beta_diversity`` for details.
kwargs : kwargs, optional
Metric-specific parameters.
Returns
-------
skbio.DistanceMatrix
Distances between pairs of samples indicated by id_pairs. Pairwise
distances not defined by id_pairs will be 0.0. Use this resulting
DistanceMatrix with caution as 0.0 is a valid distance.
Raises
------
ValueError
If ``ids`` are not specified.
If ``id_pairs`` are not a subset of ``ids``.
If ``metric`` is not a callable or is unresolvable string by
scikit-bio.
If duplicates are observed in ``id_pairs``.
See Also
--------
skbio.diversity.beta_diversity
skbio.diversity.get_beta_diversity_metrics
"""
if validate:
counts = _validate_counts_matrix(counts, ids=ids)
id_pairs = list(id_pairs)
all_ids_in_pairs = set(itertools.chain.from_iterable(id_pairs))
if not all_ids_in_pairs.issubset(ids):
raise ValueError("`id_pairs` are not a subset of `ids`")
hashes = {i for i in id_pairs}.union({i[::-1] for i in id_pairs})
if len(hashes) != len(id_pairs) * 2:
raise ValueError("A duplicate or a self-self pair was observed.")
if metric == 'unweighted_unifrac':
otu_ids, tree, kwargs = _get_phylogenetic_kwargs(counts, **kwargs)
metric, counts_by_node = _setup_multiple_unweighted_unifrac(
counts, otu_ids=otu_ids, tree=tree, validate=validate)
counts = counts_by_node
elif metric == 'weighted_unifrac':
# get the value for normalized. if it was not provided, it will fall
# back to the default value inside of _weighted_unifrac_pdist_f
normalized = kwargs.pop('normalized',
_normalize_weighted_unifrac_by_default)
otu_ids, tree, kwargs = _get_phylogenetic_kwargs(counts, **kwargs)
metric, counts_by_node = _setup_multiple_weighted_unifrac(
counts, otu_ids=otu_ids, tree=tree, normalized=normalized,
validate=validate)
counts = counts_by_node
elif callable(metric):
metric = functools.partial(metric, **kwargs)
# remove all values from kwargs, since they have already been provided
# through the partial
kwargs = {}
else:
raise ValueError("partial_beta_diversity is only compatible with "
"optimized unifrac methods and callable functions.")
dm = np.zeros((len(ids), len(ids)), dtype=float)
id_index = {id_: idx for idx, id_ in enumerate(ids)}
id_pairs_indexed = ((id_index[u], id_index[v]) for u, v in id_pairs)
for u, v in id_pairs_indexed:
dm[u, v] = metric(counts[u], counts[v], **kwargs)
return DistanceMatrix(dm + dm.T, ids)
@experimental(as_of="0.4.0")
def beta_diversity(metric, counts, ids=None, validate=True, pairwise_func=None,
**kwargs):
"""Compute distances between all pairs of samples
Parameters
----------
metric : str, callable
The pairwise distance function to apply. See the scipy ``pdist`` docs
and the scikit-bio functions linked under *See Also* for available
metrics. Passing metrics as a strings is preferable as this often
results in an optimized version of the metric being used.
counts : 2D array_like of ints or floats or 2D pandas DataFrame
Matrix containing count/abundance data where each row contains counts
of OTUs in a given sample.
ids : iterable of strs, optional
Identifiers for each sample in ``counts``. By default, samples will be
assigned integer identifiers in the order that they were provided
(where the type of the identifiers will be ``str``).
validate : bool, optional
If `False`, validation of the input won't be performed. This step can
be slow, so if validation is run elsewhere it can be disabled here.
However, invalid input data can lead to invalid results or error
messages that are hard to interpret, so this step should not be
bypassed if you're not certain that your input data are valid. See
:mod:`skbio.diversity` for the description of what validation entails
so you can determine if you can safely disable validation.
pairwise_func : callable, optional
The function to use for computing pairwise distances. This function
must take ``counts`` and ``metric`` and return a square, hollow, 2-D
``numpy.ndarray`` of dissimilarities (floats). Examples of functions
that can be provided are ``scipy.spatial.distance.pdist`` and
``sklearn.metrics.pairwise_distances``. By default,
``sklearn.metrics.pairwise_distances`` will be used.
kwargs : kwargs, optional
Metric-specific parameters.
Returns
-------
skbio.DistanceMatrix
Distances between all pairs of samples (i.e., rows). The number of
rows and columns will be equal to the number of rows in ``counts``.
Raises
------
ValueError, MissingNodeError, DuplicateNodeError
If validation fails. Exact error will depend on what was invalid.
TypeError
If invalid method-specific parameters are provided.
See Also
--------
skbio.diversity
skbio.diversity.beta
skbio.diversity.get_beta_diversity_metrics
skbio.diversity.alpha_diversity
scipy.spatial.distance.pdist
sklearn.metrics.pairwise_distances
"""
if validate:
counts = _validate_counts_matrix(counts, ids=ids)
if 0 in counts.shape:
# if the input counts are empty, return an empty DistanceMatrix.
# this check is not necessary for scipy.spatial.distance.pdist but
# it is necessary for sklearn.metrics.pairwise_distances where the
# latter raises an exception over empty data.
return DistanceMatrix(np.zeros((len(ids), len(ids))), ids)
if metric == 'unweighted_unifrac':
otu_ids, tree, kwargs = _get_phylogenetic_kwargs(counts, **kwargs)
metric, counts_by_node = _setup_multiple_unweighted_unifrac(
counts, otu_ids=otu_ids, tree=tree, validate=validate)
counts = counts_by_node
elif metric == 'weighted_unifrac':
# get the value for normalized. if it was not provided, it will fall
# back to the default value inside of _weighted_unifrac_pdist_f
normalized = kwargs.pop('normalized',
_normalize_weighted_unifrac_by_default)
otu_ids, tree, kwargs = _get_phylogenetic_kwargs(counts, **kwargs)
metric, counts_by_node = _setup_multiple_weighted_unifrac(
counts, otu_ids=otu_ids, tree=tree, normalized=normalized,
validate=validate)
counts = counts_by_node
elif callable(metric):
metric = functools.partial(metric, **kwargs)
# remove all values from kwargs, since they have already been provided
# through the partial
kwargs = {}
else:
# metric is a string that scikit-bio doesn't know about, for
# example one of the SciPy metrics
pass
if pairwise_func is None:
pairwise_func = sklearn.metrics.pairwise_distances
distances = pairwise_func(counts, metric=metric, **kwargs)
return DistanceMatrix(distances, ids)
|
bsd-3-clause
|
krez13/scikit-learn
|
sklearn/datasets/svmlight_format.py
|
29
|
16073
|
"""This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_labels]
Target values. Class labels must be an integer or float, or array-like
objects of integer or float for multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
bsd-3-clause
|
tobydriscoll/fnc-extras
|
python/FNC10.py
|
1
|
6758
|
from scipy import *
from numpy import *
from matplotlib.pyplot import *
from scipy.linalg import *
from numpy.linalg import *
from scipy.optimize import root_scalar
from scipy.integrate import solve_ivp
from FNC04 import levenberg
def shoot(phi,xspan,lval,lder,rval,rder,init):
"""
shoot(phi,xspan,lval,lder,rval,rder,init)
Use the shooting method to solve a two-point boundary value problem. The ODE is
u'' = `phi`(x,u,u') for x in `xspan`. Specify a function value or derivative at
the left endpoint using `lval` and `lder`, respectively, and similarly for the
right endpoint using `rval` and `rder`. (Use an empty array to denote an
unknown quantity.) The value `init` is an initial guess for whichever value is
missing at the left endpoint.
Return vectors for the nodes, the values of u, and the values of u'.
"""
# Tolerances for IVP solver and rootfinder.
ivp_opt = 1e-6
optim_opt = 1e-5
# Evaluate the difference between computed and target values at x=b.
def objective(s):
nonlocal x, v # change these values in outer scope
# Combine s with the known left endpoint value.
if len(lder)==0:
v_init = [ lval[0], s ]
else:
v_init = [ s, lder[0] ]
# ODE posed as a first-order equation in 2 variables.
def shootivp(x,v):
return array([ v[1], phi(x,v[0],v[1]) ])
x = linspace(xspan[0],xspan[1],400) # make decent plots on return
sol = solve_ivp(shootivp,xspan,v_init,t_eval=x)
x = sol.t; v = sol.y
if len(rder)==0:
return v[0,-1] - rval[0]
else:
return v[1,-1] - rder[0]
# Find the unknown quantity at x=a by rootfinding.
x = []; v = []; # the values will be overwritten
s = root_scalar(objective,x0=init,x1=init+0.05,xtol=optim_opt).root
# Don't need to solve the IVP again. It was done within the
# objective function already.
u = v[0] # solution
dudx = v[1] # derivative
return x,u,dudx
def diffmat2(n,xspan):
"""
diffmat2(n,xspan)
Compute 2nd-order-accurate differentiation matrices on `n`+1 points in the
interval `xspan`. Return a vector of nodes, and the matrices for the first
and second derivatives.
"""
a,b = xspan
h = (b-a)/n
x = linspace(a,b,n+1) # nodes
# Define most of Dx by its diagonals.
dp = 0.5/h*ones(n) # superdiagonal
dm = -0.5/h*ones(n) # subdiagonal
Dx = diag(dm,-1) + diag(dp,1)
# Fix first and last rows.
Dx[0,:3] = array([-1.5,2,-0.5])/h
Dx[-1,-3:] = array([0.5,-2,1.5])/h
# Define most of Dxx by its diagonals.
d0 = -2/h**2*ones(n+1) # main diagonal
dp = ones(n)/h**2 # superdiagonal and subdiagonal
Dxx = diag(d0,0) + diag(dp,-1) + diag(dp,1)
# Fix first and last rows.
Dxx[0,:4] = array([2,-5,4,-1])/h**2
Dxx[-1,-4:] = array([-1,4,-5,2])/h**2
return x,Dx,Dxx
def diffcheb(n,xspan):
"""
diffcheb(n,xspan)
Compute Chebyshev differentiation matrices on `n`+1 points in the
interval `xspan`. Return a vector of nodes, and the matrices for the first
and second derivatives.
"""
x = -cos( arange(n+1)*pi/n ) # nodes in [-1,1]
Dx = zeros([n+1,n+1])
c = hstack([2.,ones(n-1),2.]) # endpoint factors
# Off-diagonal entries
Dx = zeros([n+1,n+1])
for i in range(n+1):
for j in range(n+1):
if i != j:
Dx[i,j] = (-1)**(i+j) * c[i] / (c[j]*(x[i]-x[j]))
# Diagonal entries by the "negative sum trick"
for i in range(n+1):
Dx[i,i] = -sum( [Dx[i,j] for j in range(n+1) if j!=i] )
# Transplant to [a,b]
a,b = xspan
x = a + (b-a)*(x+1)/2
Dx = 2*Dx/(b-a)
# Second derivative
Dxx = Dx @ Dx
return x,Dx,Dxx
def bvplin(p,q,r,xspan,lval,rval,n):
"""
bvplin(p,q,r,xspan,lval,rval,n)
Use finite differences to solve a linear bopundary value problem. The ODE is
u''+`p`(x)u'+`q`(x)u = `r`(x) on the interval `xspan`, with endpoint function
values given as `lval` and `rval`. There will be `n`+1 equally spaced nodes,
including the endpoints.
Return vectors of the nodes and the solution values.
"""
x,Dx,Dxx = diffmat2(n,xspan)
P = diag(p(x))
Q = diag(q(x))
L = Dxx + P@Dx + Q # ODE expressed at the nodes
# Replace first and last rows using boundary conditions.
I = eye(n+1)
A = vstack([ I[0], L[1:-1], I[-1] ] )
b = hstack([ lval, r(x[1:-1]), rval ])
# Solve the system.
u = solve(A,b)
return x,u
def bvp(phi,xspan,lval,lder,rval,rder,init):
"""
bvp(phi,xspan,lval,lder,rval,rder,init)
Use finite differences to solve a two-point boundary value problem. The ODE is
u'' = `phi`(x,u,u') for x in `xspan`. Specify a function value or derivative at
the left endpoint using `lval` and `lder`, respectively, and similarly for the
right endpoint using `rval` and `rder`. (Use an empty array to denote an
unknown quantity.) The value `init` is an initial guess for whichever value is
missing at the left endpoint.
Return vectors for the nodes and the values of u.
"""
n = len(init) - 1
x,Dx,Dxx = diffmat2(n,xspan)
h = x[1]-x[0]
def residual(u):
# Compute the difference between u'' and phi(x,u,u') at the
# interior nodes and appends the error at the boundaries.
dudx = Dx@u # discrete u'
d2udx2 = Dxx@u # discrete u''
f = d2udx2 - phi(x,u,dudx)
# Replace first and last values by boundary conditions.
if len(lder)==0:
f[0] = (u[0] - lval[0])/h**2
else:
f[0] = (dudx[0] - lder[0])/h
if len(rder)==0:
f[-1] = (u[-1] - rval[0])/h**2
else:
f[-1] = (dudx[-1] - rder[0])/h
return f
u = levenberg(residual,init)
return x,u[:,-1]
def fem(c,s,f,a,b,n):
"""
fem(c,s,f,a,b,n)
Use a piecewise linear finite element method to solve a two-point boundary
value problem. The ODE is (`c`(x)u')' + `s`(x)u = `f`(x) on the interval
[`a`,`b`], and the boundary values are zero. The discretization uses `n` equal
subintervals.
Return vectors for the nodes and the values of u.
"""
# Define the grid.
h = (b-a)/n
x = linspace(a,b,n+1)
# Templates for the subinterval matrix and vector contributions.
Ke = array( [[1,-1], [-1,1]] )
Me = (1/6)*array( [[2,1], [1,2]] )
fe = (1/2)*array([1, 1])
# Evaluate coefficent functions and find average values.
cval = c(x); cbar = (cval[:-1]+cval[1:]) / 2
sval = s(x); sbar = (sval[:-1]+sval[1:]) / 2
fval = f(x); fbar = (fval[:-1]+fval[1:]) / 2
# Assemble global system, one interval at a time.
K = zeros([n-1,n-1]); M = zeros([n-1,n-1]); f = zeros(n-1)
K[0,0] = cbar[0]/h; M[0,0] = sbar[0]*h/3; f[0] = fbar[0]*h/2
K[-1,-1] = cbar[-1]/h; M[-1,-1] = sbar[-1]*h/3; f[-1] = fbar[-1]*h/2
for k in range(1,n-1):
K[k-1:k+1,k-1:k+1] += (cbar[k]/h) * Ke
M[k-1:k+1,k-1:k+1] += (sbar[k]*h) * Me
f[k-1:k+1] += (fbar[k]*h) * fe
# Solve system for the interior values.
u = solve(K+M,f)
u = hstack([0, u, 0]) # put the boundary values into the result
return x,u
|
mit
|
hyperspy/hyperspyUI
|
hyperspyui/plugins/segmentation.py
|
3
|
4922
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
from hyperspyui.plugins.plugin import Plugin
from hyperspy.signal import BaseSignal
import numpy as np
from hyperspy.signals import Signal1D, Signal2D
from hyperspyui.tools import MultiSelectionTool
from hyperspyui.util import win2sig
from hyperspy.misc.rgb_tools import regular_array2rgbx
import matplotlib.cm as plt_cm
# TODO: Add dialog for manual editing of ROIs + Preview checkbox.
class Segmentation(Plugin):
name = "Segmentation"
def create_tools(self):
self.tool = MultiSelectionTool()
self.tool.name = 'Segmentation tool'
self.tool.icon = 'segmentation.svg'
self.tool.category = 'Image'
self.tool.updated[BaseSignal, list].connect(self._on_update)
self.tool.accepted[BaseSignal, list].connect(self.segment)
self.tool.validator = self._tool_signal_validator
self.add_tool(self.tool, self._select_image)
self.map = {}
self.ui.actions[self.tool.name].triggered.connect(
lambda c=None: self.start())
def _select_image(self, win, action):
"""Signal selection callback for actions that are only valid for
selected Signals.
"""
sw = win2sig(win, self.ui.signals, self.ui._plotting_signal)
if sw is None or not sw.signal.axes_manager.signal_dimension == 2:
action.setEnabled(False)
else:
action.setEnabled(True)
def _tool_signal_validator(self, signal, axes):
if not self.tool._default_validator(signal, axes):
return False
return signal in self.map
def start(self, signal=None):
if signal is None:
signal = self.ui.get_selected_signal()
data = signal()
hist = signal._get_signal_signal(data).get_histogram(1000)
hist.plot()
s_out = Signal1D(self._make_gray(data))
s_out.change_dtype('rgb8')
s_out.plot()
self.map[hist] = (signal, s_out)
def _make_gray(self, data):
data = data.astype(np.float) - np.nanmin(data)
data /= np.nanmax(data)
return (255 * plt_cm.gray(data)).astype('uint8')
def segment(self, signal, rois):
if signal is None:
signal = self.ui.get_selected_signal()
if signal in self.map:
histogram = signal
source, s_out = self.map[signal]
else:
found = False
for h, (s, s_out) in self.map.items():
if signal in (s, s_out):
found = True
histogram = h
source = s
break
if not found:
histogram = None
s_out = None
source = signal
if histogram is not None:
self.ui.lut_signalwrapper[histogram].close()
if s_out is not None:
self.ui.lut_signalwrapper[s_out].close()
N = len(rois)
if N <= 256:
dtype = np.uint8
elif N <= 2**16:
dtype = np.uint16
else:
dtype = np.uint32
src_data = source()
data = np.zeros(src_data.shape, dtype)
data[...] = np.nan
for i, r in enumerate(rois):
# Checks has to be inclusive to catch edges
mask = (src_data <= r.right) & (src_data >= r.left)
data[mask] = i + 1
s_seg = Signal2D(data)
s_seg.plot(cmap=plt_cm.jet)
roi_str = '[' + ',\n'.join(['hs.roi.' + str(r) for r in rois]) + ']'
self.record_code('segment_rois = ' + roi_str)
self.record_code('<p>.segment(None, segment_rois)')
def _on_update(self, histogram, rois):
if histogram not in self.map:
return
source, s_out = self.map[histogram]
N = len(rois)
data = source()
gray = self._make_gray(data)
s_out.data = regular_array2rgbx(gray)
for i in range(N):
color = (255 * plt_cm.hsv([float(i) / max(N, 10)])).astype('uint8')
color = regular_array2rgbx(color)
r = rois[i]
mask = (data < r.right) & (data >= r.left)
s_out.data[mask] = color
s_out.update_plot()
|
gpl-3.0
|
Basvanstein/OWCK
|
OWCK/OWCK.py
|
1
|
30615
|
# -*- coding: utf-8 -*-
# Author: Hao Wang <[email protected]>
# Bas van Stein <[email protected]>
import numpy as np
from numpy import array, ones, inner, dot, diag, size
from numpy.random import shuffle
from copy import deepcopy, copy
from multiprocessing import Pool
from sklearn.utils.validation import check_is_fitted
import itertools
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
import skfuzzy as fuzz
from .gprhao import GaussianProcess_extra
from .regressiontree import IncrementalRegressionTree
def train_model( clus, model, training_set):
shape = np.array(training_set[0]).shape
while True:
try:
model = model.fit(*training_set)
break
except ValueError:
#print('Current nugget setting is too small!' +\
# ' It will be tuned up automatically')
model.nugget *= 10
return model
def train_modelstar(inputdata):
return train_model(*inputdata)
MACHINE_EPSILON = np.finfo(np.double).eps
class OWCK(GaussianProcess_extra):
"""The Optimal Weighted Cluster Kriging/Gaussian Process class
This class inherited from GaussianProcess class in sklearn library
Most of the parameters are contained in sklearn.gaussian_process.
Please check the docstring of Gaussian Process parameters in sklearn.
Only newly introduced parameters are documented below.
Parameters
----------
n_cluster : int, optional
The number of clusters, determines the number of the Gaussian Process
model to build. It is the speed-up factor in OWCK.
min_leaf_size : int, optional
if min_leaf_size > 0, min_leaf_size is used to determine the number of clusters for
the model tree clustering method.
cluster_method : string, optional
The clustering algorithm used to partition the data set.
Built-in clustering algorithm are:
'k-mean', 'GMM', 'fuzzy-c-mean', 'random', 'tree'
Note that GMM, fuzzy-c-mean are fuzzy clustering algorithms
With these algorithms you can set the overlap you desire.
Tree is a non-fuzzy algorithm using local models per leaf in a regression tree
The tree algorithm is also able to update the model with new records
overlap : float, optional
The percentage of overlap when using a fuzzy cluster method.
Each cluster will be of the same size.
is_parallel : boolean, optional
A boolean switching parallel model fitting on. If it is True, then
all the underlying Gaussian Process model will be fitted in parallel,
supported by MPI. Otherwise, all the models will be fitted sequentially.
Attributes
----------
cluster_label : the cluster label of the training set after clustering
clusterer : the clustering algorithm used.
models : a list of (fitted) Gaussian Process models built on each cluster.
References
----------
.. [SWKBE15] `Bas van Stein, Hao Wang, Wojtek Kowalczyk, Thomas Baeck
and Michael Emmerich. Optimally Weighted Cluster Kriging for Big
Data Regression. In 14th International Symposium, IDA 2015, pages
310-321, 2015`
http://link.springer.com/chapter/10.1007%2F978-3-319-24465-5_27#
"""
def __init__(self, regr='constant', corr='squared_exponential',
n_cluster=8, min_leaf_size=0, cluster_method='k-mean', overlap=0.0, beta0=None,
storage_mode='full', verbose=False, theta0=0.1, thetaL=None,
thetaU=None, sigma2=None, optimizer='BFGS', random_start=1,
normalize=False, nugget=10. * MACHINE_EPSILON, random_state=None,
nugget_estim=True, is_parallel=False):
super(OWCK, self).__init__(regr=regr, corr=corr,
beta0=beta0, verbose=verbose,
theta0=theta0, thetaL=thetaL, thetaU=thetaU, sigma2=sigma2,
optimizer=optimizer, random_start=random_start,
normalize=normalize, nugget=nugget, nugget_estim=nugget_estim,
random_state=random_state)
self.empty_model = GaussianProcess_extra(regr=regr, corr=corr,
beta0=beta0, verbose=verbose,
theta0=theta0, thetaL=thetaL, thetaU=thetaU, sigma2=sigma2,
optimizer=optimizer, random_start=random_start, normalize=normalize,
nugget=nugget, nugget_estim=nugget_estim, random_state=random_state)
self.n_cluster = n_cluster
self.is_parallel = is_parallel
self.verbose = verbose
self.overlap = overlap #overlap for fuzzy clusters
self.min_leaf_size = min_leaf_size
self.regr_label = regr
self.fitted = False
if cluster_method not in ['k-mean', 'GMM', 'fuzzy-c-mean', 'random', 'tree']:
raise Exception('{} clustering is not supported!'.format(cluster_method))
else:
self.cluster_method = cluster_method
def __clustering(self, X, y=None):
"""
The clustering procedure of the Optimal Weighted Clustering Gaussian
Process. This function should not be called externally
"""
self.sizeX = len(X)
if self.cluster_method == 'k-mean':
clusterer = KMeans(n_clusters=self.n_cluster)
clusterer.fit(X)
self.cluster_label = clusterer.labels_
self.clusterer = clusterer
elif self.cluster_method == 'tree':
if (self.min_leaf_size > 0):
self.minsamples = self.min_leaf_size
tree = IncrementalRegressionTree(min_samples_leaf=self.min_leaf_size)
else:
self.minsamples = int(len(X)/(self.n_cluster))
tree = IncrementalRegressionTree(min_samples_leaf=self.minsamples)
tree.fit(X,y)
labels = tree.apply(X)
clusters = np.unique(labels)
k = len(clusters)
if self.verbose:
print ("leafs:",k)
self.n_cluster = k
self.leaf_labels = np.unique(labels)
self.cluster_label = labels
self.clusterer = tree
elif self.cluster_method == 'random':
r = self.n_sample % self.n_cluster
m = (self.n_sample - r) / self.n_cluster
self.cluster_label = array(range(self.n_cluster) * m + range(r))
self.clusterer = None
shuffle(self.cluster_label)
elif self.cluster_method == 'GMM': #GMM from sklearn
self.clusterer = GaussianMixture(n_components=self.n_cluster, n_init=10)
self.clusterer.fit(X)
self.cluster_labels_proba = self.clusterer.predict_proba(X)
self.cluster_label = self.clusterer.predict(X)
elif self.cluster_method == 'fuzzy-c-mean': #Fuzzy C-means from sklearn
cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(X.T, self.n_cluster, 2, error=0.000005, maxiter=10000, init=None)
self.clusterer = cntr #save the centers for cmeans_predict
self.cluster_labels_proba = u.T
self.cluster_labels_proba = np.array(self.cluster_labels_proba)
self.cluster_label = np.argmax(u, axis=0)
self.cluster_label = np.array(self.cluster_label)
def __fit(self, X, y):
"""
The Optimal Weighted Cluster Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
ocwk : self
A fitted Cluster Gaussian Process model object awaiting data to
perform predictions.
"""
self.n_sample, self.n_feature = X.shape
if y.shape[0] != self.n_sample:
raise Exception('Training input and target do not match!')
# clustering
self.__clustering(X,y)
# model creation
self.models = None;
if (self.cluster_method == 'tree'):
self.models = [deepcopy(self) for i in self.leaf_labels]
else:
self.models = [deepcopy(self) for i in range(self.n_cluster)]
for m in self.models:
m.__class__ = GaussianProcess_extra
self.X = X
self.y = y
# model fitting
if self.is_parallel: # parallel model fitting
#now using parralel threading
#
# prepare the training set for each GP model
if (self.cluster_method=='k-mean' or self.cluster_method=='random'):
idx = [self.cluster_label == i for i in range(self.n_cluster)]
elif (self.cluster_method=='tree'):
idx = [self.cluster_label == self.leaf_labels[i] for i in range(self.n_cluster)]
if (self.verbose):
print "len cluster", len(idx)
else:
targetMemberSize = (len(self.X) / self.n_cluster)*(1.0+self.overlap)
idx = []
minindex = np.argmin(self.y)
maxindex = np.argmax(self.y)
for i in range(self.n_cluster):
idx_temp = np.argsort(self.cluster_labels_proba[:,i])[-targetMemberSize:]
if (minindex not in idx_temp):
idx_temp = np.hstack((idx_temp,[minindex]))
if (maxindex not in idx_temp):
idx_temp = np.hstack((idx_temp,[maxindex]))
idx.append(idx_temp)
training = [(X[index, :], y[index]) for index in idx]
training_set = itertools.izip(range(self.n_cluster),deepcopy(self.models),training )
pool = Pool(self.n_cluster)
models = pool.map(train_modelstar, training_set)
pool.close()
pool.join()
self.models = models
else: # sequential model fitting
for i in range(self.n_cluster):
if (self.cluster_method=='k-mean' or self.cluster_method=='random'):
idx = self.cluster_label == i
elif (self.cluster_method=='tree'):
idx = self.cluster_label == self.leaf_labels[i]
else:
targetMemberSize = (len(self.X) / self.n_cluster)*(1.0+self.overlap)
idx = []
minindex = np.argmin(self.y)
maxindex = np.argmax(self.y)
# TODO: fix line here
idx = np.argsort(self.cluster_labels_proba[:,i])[-int(targetMemberSize):]
if (minindex not in idx):
idx = np.hstack((idx,[minindex]))
if (maxindex not in idx):
idx = np.hstack((idx,[maxindex]))
model = self.models[i]
# dirty fix so that low nugget errors will increase the
# nugget till the model fits
while True:
try:
# super is needed here to call the 'fit' function in the
# parent class (GaussianProcess_extra)
if (self.cluster_method=='tree' and self.verbose):
print 'leaf: ', self.leaf_labels[i]
length_lb = 1e-10
length_ub = 1e2
X = self.X[idx, :]
x_lb, x_ub = X.min(0), X.max(0)
model.thetaL = length_ub ** -2. / (x_ub - x_lb) ** 2. * np.ones(self.n_feature)
model.thetaU = length_lb ** -2. / (x_ub - x_lb) ** 2. * np.ones(self.n_feature)
model.fit(self.X[idx, :], self.y[idx])
break
except Exception as e:
print e
if self.verbose:
print('Current nugget setting is too small!' +\
' It will be tuned up automatically')
#pdb.set_trace()
model.nugget *= 10
def gradient(self, x):
"""
Calculate the gradient of the posterior mean and variance
"""
check_is_fitted(self, 'X')
x = np.atleast_2d(x)
if self.cluster_method == 'tree':
idx = self.clusterer.apply(x.reshape(1, -1))[0]
active_GP_idx = np.nonzero(self.leaf_labels == idx)[0][0]
active_GP = self.models[active_GP_idx]
y_dx, mse_dx = active_GP.gradient(x)
elif self.cluster_method == 'GMM':
# TODO: implement this
pass
elif self.cluster_method in ['random', 'k-mean']:
par = {}
_ = self.predict(x, eval_MSE=False, par_out=par)
weights = par['weights'].reshape(-1, 1)
y = par['y'].reshape(-1, 1)
mse = par['mse'].reshape(-1, 1)
normalized_mse = par['mse_normalized'].reshape(-1, 1)
U = par['U'].reshape(-1, 1)
y_jac, mse_jac = zip(*[model.gradient(x) for model in self.models])
y_jac, mse_jac = np.c_[y_jac], np.c_[mse_jac]
M = (1. / normalized_mse).sum()
tmp = np.dot(mse_jac, (1. / normalized_mse ** 2.) / U)
weights_jacobian = -mse_jac * normalized_mse.T ** -2. / U.T / M \
+ (np.repeat(tmp, len(weights), axis=1) / normalized_mse.T) / M ** 2.
y_dx = np.dot(y_jac, weights) + np.dot(weights_jacobian, y)
mse_dx = np.dot(mse_jac, weights ** 2.) + np.dot(weights.T * weights_jacobian, mse)
return y_dx, mse_dx
def __mse_upper_bound(self, model):
"""
This function computes the tight upper bound of the Mean Square Error(
Kriging variance) for the underlying Posterior Gaussian Process model,
whose usage should be subject to Simple or Ordinary Kriging (constant trend)
Parameters
----------
model : a fitted Gaussian Process/Kriging model, in which 'self.regr'
should be 'constant'
Returns
----------
upper_bound : the upper bound of the Mean Squared Error
"""
if self.regr_label != 'constant':
raise Exception('MSE upper bound only exists for constant trend')
C = model.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if model.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
_, par = model.reduced_likelihood_function()
model.C = par['C']
model.Ft = par['Ft']
model.G = par['G']
n_samples, n_features = model.X.shape
tmp = 1 / model.G ** 2
upper_bound = np.sum(model.sigma2 * (1 + tmp))
return upper_bound
def __check_duplicate(self, X, y):
# TODO: show a warning here
X = np.atleast_2d(X)
new_X = []
new_Y = []
for i, x in enumerate(X):
idx = np.nonzero(np.all(np.isclose(self.X, x), axis=1))[0]
if len(idx) == 0:
new_X.append(x)
new_Y.append(y[i])
if y[i] != self.y[idx]:
raise Exception('The same input can not have different respones!')
return np.array(new_X), new_Y
def updateModel(self, newX, newY):
"""
Deprecated function, just call fit with new database.
"""
newY = newY.reshape(-1, 1)
#print newY.shape, self.y.shape
X = np.r_[self.X, newX]
y = np.r_[self.y, newY]
self.fit(X, y)
def update_data(self, X, y):
self.X = X
self.y = y
# note that the clusters are not rebuilt
if self.cluster_method == 'tree':
self.cluster_label = self.clusterer.apply(self.X)
for i, model in enumerate(self.models):
idx = self.cluster_label == self.leaf_labels[i]
if not np.any(idx):
raise Exception('No data point in cluster {}!'.format(i+1))
model.update_data(self.X[idx, :], self.y[idx])
else:
# TODO: to implement for the rest options
pass
return self
def fit(self, newX, newY, re_estimate_all=False):
"""
Add several instances to the data and rebuild models
newX is a 2d array of (instances,features) and newY a vector
"""
if not hasattr(self, 'X'):
self.__fit(newX, newY)
return
newX, newY = self.__check_duplicate(newX, newY)
if self.cluster_method == 'tree':
#first update our data
if len(newY) != 0:
self.X = np.r_[self.X, newX]
self.y = np.r_[self.y, newY]
#self.X = np.append(self.X, newX, axis=0)
#self.y = np.append(self.y, newY)
#check the size of the new data
if re_estimate_all:
#in this case build additional models
if self.verbose:
print("refitting all models")
self.__fit(self.X, self.y)
elif len(self.X) > (self.sizeX + self.minsamples*2.0):
#in this case build additional models if needed
if self.verbose:
print("refitting new models")
#print("Current tree")
#print(self.clusterer)
rebuildmodels = np.unique(self.clusterer.apply(newX))
rebuildmodelstemp = []
rebuild_index = 0;
self.cluster_label = self.clusterer.apply(self.X)
new_leaf_labels = []
for i in rebuildmodels:
leafindex = np.where(self.leaf_labels==i)[0][0]
idx = self.cluster_label == i
#check size of model
if (len(idx) > self.minsamples*2.0):
if self.verbose:
print("Trying to split leaf node",i)
#split the leaf and fit 2 additional models
new_labels = []
if self.clusterer.split_terminal(i,self.X[idx, :], self.y[idx]):
self.cluster_label = self.clusterer.apply(self.X)
new_labels = np.unique(self.cluster_label)
self.n_cluster = len(new_labels)
delete_old = False
for n in new_labels:
if n not in self.leaf_labels:
delete_old = True
new_leafindex = np.where(new_labels==n)[0][0]
if self.verbose:
print("New model with id",new_leafindex)
#print self.leaf_labels
new_model = deepcopy(self.empty_model)
self.models.append(new_model)
self.leaf_labels = np.append(self.leaf_labels,n)
#rebuildmodelstemp.append(new_leafindex)
new_leaf_labels.append(n)
if delete_old:
self.leaf_labels = np.delete(self.leaf_labels, leafindex)
del(self.models[leafindex])
else:
new_leaf_labels.append(i) #update current model
else:
#just refit this model
#rebuildmodelstemp.append(leafindex)
new_leaf_labels.append(i)
for n in new_leaf_labels:
rebuildmodelstemp.append(np.where(self.leaf_labels==n)[0][0])
rebuildmodels = np.unique(np.array(rebuildmodelstemp,dtype=int))
labels = self.clusterer.apply(self.X)
self.cluster_label = labels
self.leaf_labels = np.unique(labels)
for i in rebuildmodels:
idx = self.cluster_label == self.leaf_labels[i]
if self.verbose:
print("updating model on position "+str(i)+" attached to leaf id "+str(self.leaf_labels[i])+" and "+str(sum(idx))+" data points")
model = self.models[i]
while True:
try:
# super is needed here to call the 'fit' function in the
# parent class (GaussianProcess)
model.fit(self.X[idx, :], self.y[idx])
break
except ValueError:
if self.verbose:
print('Current nugget setting is too small!' +\
' It will be tuned up automatically')
model.nugget *= 10
else:
rebuildmodels = np.unique(self.clusterer.apply(newX))
rebuildmodelstemp = []
for i in rebuildmodels:
rebuildmodelstemp.append(np.where(self.leaf_labels==i)[0][0])
rebuildmodels = np.array(rebuildmodelstemp,dtype=int)
labels = self.clusterer.apply(self.X)
self.cluster_label = labels
if self.is_parallel: # parallel model fitting
idx = [self.cluster_label == self.leaf_labels[i] for i in rebuildmodels]
modelstosend = [deepcopy(self.models[i]) for i in rebuildmodels]
training = [(self.X[index, :], self.y[index]) for index in idx]
training_set = itertools.izip(rebuildmodels,modelstosend,training )
pool = Pool(self.n_cluster)
models = pool.map(train_modelstar, training_set)
pool.close()
pool.join()
for i in range(len(rebuildmodels)):
self.models[rebuildmodels[i]] = models[i]
else:# is_parralel = false
for i in rebuildmodels:
if self.verbose:
print("updating model "+str(i))
idx = self.cluster_label == self.leaf_labels[i]
model = self.models[i]
while True:
try:
# super is needed here to call the 'fit' function in the
# parent class (GaussianProcess)
model.fit(self.X[idx, :], self.y[idx])
break
except ValueError:
if self.verbose:
print('Current nugget setting is too small!' +\
' It will be tuned up automatically')
model.nugget *= 10
else:
#rebuild all models
self.X = np.r_[self.X, newX]
self.y = np.r_[self.y, newY]
self.__fit(self.X, self.y)
# TODO: implementating batch_size option to reduce the memory usage
def predict(self, X, eval_MSE=False, par_out=None):
"""
This function evaluates the Optimal Weighted Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, Not available yet
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
X = np.atleast_2d(X)
X = X.T if size(X, 1) != self.n_feature else X
n_eval, n_feature = X.shape
if n_feature != self.n_feature:
raise Exception('Dimensionality does not match!')
if self.cluster_method == 'tree':
pred = np.zeros(n_eval)
if eval_MSE:
mse = np.zeros(n_eval)
for i, x in enumerate(X):
# modelindex = self.clusterer
ix = self.clusterer.apply(x.reshape(1, -1))
model = self.models[np.where(self.leaf_labels == ix)[0][0]]
_ = model.predict(x.reshape(1, -1), eval_MSE)
if eval_MSE:
pred[i], mse[i] = _
else:
pred[i] = _
if eval_MSE:
return pred, mse
else:
return pred
elif self.cluster_method in ['random', 'k-mean']:
# compute predictions and MSE from all underlying GP models
# super is needed here to call the 'predict' function in the
# parent class
res = array([model.predict(X, eval_MSE=True) \
for model in self.models])
# compute the upper bound of MSE from all underlying GP models
mse_upper_bound = array([self.__mse_upper_bound(model) \
for model in self.models])
if np.any(mse_upper_bound == 0):
raise Exception('Something weird happened!')
pred, mse = res[:, 0, :], res[:, 1, :]
normalized_mse = mse / mse_upper_bound.reshape(-1, 1)
# inverse of the MSE matrices
Q_inv = [diag(1.0 / normalized_mse[:, i]) for i in range(n_eval)]
_ones = ones(self.n_cluster)
weight = lambda Q_inv: dot(_ones, Q_inv)
normalizer = lambda Q_inv: dot(dot(_ones, Q_inv), _ones.reshape(-1, 1))
# compute the weights of convex combination
weights = array([weight(q_inv) / normalizer(q_inv) for q_inv in Q_inv])
# make sure the weights sum to 1...
if np.any(abs(np.sum(weights, axis=1) - 1.0) > 1e-8):
raise Exception('Computed weights do not sum to 1!')
# convex combination of predictions from the underlying GP models
pred_combined = array([inner(pred[:, i], weights[i, :]) \
for i in range(n_eval)])
if par_out is not None:
par_out['weights'] = weights
par_out['y'] = pred
par_out['mse'] = mse
par_out['mse_normalized'] = normalized_mse
par_out['U'] = mse / normalized_mse
# if overall MSE is needed
if eval_MSE:
mse_combined = array([inner(mse[:, i], weights[i, :]**2) \
for i in range(n_eval)])
return pred_combined, mse_combined
else:
return pred_combined
elif self.cluster_method == 'GMM':
# TODO: implement the MSE calculation for 'GMM' approach: mixed of Gaussian processes
pass
if __name__ == '__main__':
X = np.linspace(0, 1, 100).reshape(-1, 1)
y = np.sin(X)
model = OWCK(regr='constant', corr='absolute_exponential',
n_cluster=2, min_leaf_size=0, cluster_method='tree',
overlap=0.0, beta0=None,
storage_mode='full', verbose=True, theta0=np.random.rand(), thetaL=[1e-10],
thetaU=[10], optimizer='fmin_cobyla', random_start=1, nugget=0.0001,
normalize=False, is_parallel=False)
model.fit(X, y)
newX = np.array([[1.2]])
newY = np.sin(newX)
model.fit(newX, newY)
|
gpl-2.0
|
alexlib/piv-gui-python
|
piv_gui_python/windowclass.py
|
2
|
2407
|
from PyQt4 import QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
class Window(QtGui.QDialog):
def __init__(self, shape_0, shape_1, corr_mat, frame_number, parent=None):
super(Window, self).__init__(parent)
self.frame_number = frame_number
self.corr_mat = corr_mat
self.shape_0 = shape_0
self.shape_1 = shape_1
# a figure instance to plot on
self.figure = plt.figure()
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
# Just some button connected to `plot` method
self.button = QtGui.QPushButton('Plot')
self.plot()
# set the layout
layout = QtGui.QVBoxLayout()
layout.addWidget(self.canvas)
self.setLayout(layout)
def plot(self):
data = self.corr_mat
self.ax = Axes3D(self.figure)
self.nx, self.ny = self.shape_0,self.shape_1
self.xx = range(self.nx)
self.yy = range(self.ny)
xmax = np.argmax(np.max(data, axis=0))
ymax = np.argmax(np.max(data, axis=1))
self.ax.set_xlabel('x axis. Peak column = ' +str(xmax))
self.ax.set_ylabel('y axis. Peak row = ' + str(ymax))
self.ax.set_title('Frame ' + str(self.frame_number))
self.X, self.Y = np.meshgrid(self.xx, self.yy)
self.ax.plot_surface(self.X , self.Y , data , rstride = 1, cstride = 1,alpha=0.7)
#TODO: Countours are sometimes not plotted properly. Investigate this.
cset = self.ax.contour(self.X, self.Y, data, zdir='z', offset=0, cmap=cm.coolwarm)
cset = self.ax.contour(self.X, self.Y, data, zdir='x', offset=0, cmap=cm.coolwarm)
cset = self.ax.contour(self.X, self.Y, data, zdir='y', offset=self.shape_0, cmap=cm.coolwarm)
#TODO: Need to make the ticks appear in a reasonable way. Sometimes too dense.
self.ax.set_xticks(np.arange(0,self.shape_1,4))
self.ax.set_yticks(np.arange(0,self.shape_1,4))
self.canvas.draw()
###TODO: Show the mask, show the contours. Show "snr" ?
|
gpl-3.0
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/mne-python-0.10/examples/inverse/plot_lcmv_beamformer_volume.py
|
18
|
3046
|
"""
===================================================================
Compute LCMV inverse solution on evoked data in volume source space
===================================================================
Compute LCMV inverse solution on an auditory evoked dataset in a volume source
space. It stores the solution in a nifti file for visualisation e.g. with
Freeview.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.io import Raw
from mne.beamformer import lcmv
from nilearn.plotting import plot_stat_map
from nilearn.image import index_img
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-vol-7-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
###############################################################################
# Get epochs
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = Raw(raw_fname)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads', selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd)
# Read regularized noise covariance and compute regularized data covariance
noise_cov = mne.read_cov(fname_cov)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk')
# Run free orientation (vector) beamformer. Source orientation can be
# restricted by setting pick_ori to 'max-power' (or 'normal' but only when
# using a surface-based source space)
stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, pick_ori=None)
# Save result in stc files
stc.save('lcmv-vol')
stc.crop(0.0, 0.2)
# Save result in a 4D nifti file
img = mne.save_stc_as_volume('lcmv_inverse.nii.gz', stc,
forward['src'], mri_resolution=False)
t1_fname = data_path + '/subjects/sample/mri/T1.mgz'
# Plotting with nilearn ######################################################
plot_stat_map(index_img(img, 61), t1_fname, threshold=0.8,
title='LCMV (t=%.1f s.)' % stc.times[61])
# plot source time courses with the maximum peak amplitudes
plt.figure()
plt.plot(stc.times, stc.data[np.argsort(np.max(stc.data, axis=1))[-40:]].T)
plt.xlabel('Time (ms)')
plt.ylabel('LCMV value')
plt.show()
|
bsd-3-clause
|
openastro/d2d
|
python/plot_lambert_scan_maps.py
|
4
|
6944
|
'''
Copyright (c) 2014-2016 Kartik Kumar, Dinamica Srl ([email protected])
Distributed under the MIT License.
See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT
'''
# Set up modules and packages
# Plotting
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
from nlcmap import nlcmap
# I/O
import commentjson
import json
from pprint import pprint
import sqlite3
# Numerical
import numpy as np
import pandas as pd
# System
import sys
import time
print ""
print "------------------------------------------------------------------"
print " D2D "
print " 0.0.2 "
print " Copyright (c) 2015-2016, K. Kumar ([email protected]) "
print " Copyright (c) 2016, E.J. Hekma ([email protected]) "
print "------------------------------------------------------------------"
print ""
# Start timer.
start_time = time.time( )
print ""
print "******************************************************************"
print " Input parameters "
print "******************************************************************"
print ""
# Parse JSON configuration file
# Raise exception if wrong number of inputs are provided to script
if len(sys.argv) != 2:
raise Exception("Only provide a JSON config file as input!")
json_data = open(sys.argv[1])
config = commentjson.load(json_data)
json_data.close()
pprint(config)
print ""
print "******************************************************************"
print " Operations "
print "******************************************************************"
print ""
print "Fetching scan data from database ..."
# Connect to SQLite database.
try:
database = sqlite3.connect(config['database'])
except sqlite3.Error, e:
print "Error %s:" % e.args[0]
sys.exit(1)
departure_epochs = pd.read_sql("SELECT DISTINCT departure_epoch \
FROM lambert_scanner_results;", \
database)
for i in xrange(0,departure_epochs.size):
c = departure_epochs['departure_epoch'][i]
print "Plotting scan map with departure epoch: ",c,"Julian Date"
# Fetch scan data.
map_order = "departure_" + config['map_order']
scan_data = pd.read_sql("SELECT departure_object_id, arrival_object_id, \
min(transfer_delta_v), "+ map_order + " \
FROM lambert_scanner_results \
WHERE departure_epoch BETWEEN " + str(c-0.00001) + " \
AND "+str(c+0.00001) +" \
GROUP BY departure_object_id, arrival_object_id;", \
database)
scan_data.columns = ['departure_object_id','arrival_object_id', \
'transfer_delta_v',str(map_order)]
scan_order = scan_data.sort_values(str(map_order)) \
.drop_duplicates('departure_object_id')[ \
['departure_object_id',str(map_order)]]
scan_map = scan_data.pivot(index='departure_object_id', \
columns='arrival_object_id',
values='transfer_delta_v')
scan_map = scan_map.reindex(index=scan_order['departure_object_id'], \
columns=scan_order['departure_object_id'])
# Set up color map.
bins = np.linspace(scan_data['transfer_delta_v'].min(), \
scan_data['transfer_delta_v'].max(), 10)
groups = scan_data['transfer_delta_v'].groupby( \
np.digitize(scan_data['transfer_delta_v'], bins))
levels = groups.mean().values
cmap_lin = plt.get_cmap(config['colormap'])
cmap = nlcmap(cmap_lin, levels)
# Plot heat map.
ax1 = plt.subplot2grid((15,15), (2, 0),rowspan=13,colspan=14)
heatmap = ax1.pcolormesh(scan_map.values, cmap=cmap, \
vmin=scan_data['transfer_delta_v'].min(), \
vmax=scan_data['transfer_delta_v'].max())
ax1.set_xticks(np.arange(scan_map.shape[1] + 1)+0.5)
ax1.set_xticklabels(scan_map.columns, rotation=90)
ax1.set_yticks([])
ax1.tick_params(axis='both', which='major', labelsize=config['tick_label_size'])
ax1.set_xlim(0, scan_map.shape[1])
ax1.set_ylim(0, scan_map.shape[0])
ax1.set_xlabel('Departure object',fontsize=config['axis_label_size'])
ax1.set_ylabel('Arrival object',fontsize=config['axis_label_size'])
# Plot axis ordering.
ax2 = plt.subplot2grid((15,15), (0, 0),rowspan=2,colspan=14,sharex=ax1)
ax2.step(np.arange(0.5,scan_map.shape[1]+.5),scan_order[str(map_order)],'k',linewidth=2.0)
ax2.get_yaxis().set_major_formatter(plt.FormatStrFormatter('%.2e'))
ax2.tick_params(axis='both', which='major', labelsize=config['tick_label_size'])
plt.setp(ax2.get_xticklabels(), visible=False)
ax2.set_ylabel(config['map_order_axis_label'],fontsize=config['axis_label_size'])
# Plot color bar.
ax3 = plt.subplot2grid((15,15), (0, 14), rowspan=15)
color_bar = plt.colorbar(heatmap,cax=ax3)
color_bar.ax.get_yaxis().labelpad = 20
color_bar.ax.set_ylabel(r'Total transfer $\Delta V$ [km/s]', rotation=270)
plt.tight_layout()
# Save figure to file.
plt.savefig(config["output_directory"] + "/" + config["scan_figure"] + "_"+str(i+1) + \
".png", dpi=config["figure_dpi"])
plt.close()
print "Figure ",i+1," generated successfully...."
print "Figure generated successfully!"
print ""
# Close SQLite database if it's still open.
if database:
database.close()
# Stop timer
end_time = time.time( )
# Print elapsed time
print "Script time: " + str("{:,g}".format(end_time - start_time)) + "s"
print ""
print "------------------------------------------------------------------"
print " Exited successfully! "
print "------------------------------------------------------------------"
print ""
|
mit
|
easttosea/old_investor
|
structured_fund/data.py
|
1
|
25978
|
# -*- coding: utf-8 -*-
import re
import socket
import datetime
import urllib
import pandas as pd
import tushare as ts
import logging
logging.basicConfig(level=logging.INFO)
class StructuredFund(object):
"""The structured fund here."""
def __init__(self):
self.TODAY_DATE = datetime.date.today()
self.MANUAL_CORRECT_RATE = {'163109': [0.0575, '1年+3.0%'], '161719': [0.055, '3年+1.25%'],
'162215': [0.0358, '国债×1.3']}
self.net_value_date = ''
self.frame_info = None
self.init_fund_info()
self.fund_a_code = list(self.frame_info['a_code'].values)
self.fund_b_code = list(self.frame_info['b_code'].values)
self.i_code = []
for code in set(self.frame_info['i_code']):
if code[0:3] == '399':
self.i_code.append(code)
self.frame_realtime = None
self.update_time = None
def init_fund_info(self):
"""Init the info of the structured fund."""
# 1. Get the basic info.
url = 'http://www.abcfund.cn/style/fundlist.php'
reg_ex = r'<tr.*?><td>(.*?)</td></tr>'
split_str = '</td><td>'
data_list = web_crawler(url, reg_ex, split_str)
frame_info_1 = pd.DataFrame(data_list, columns=[
'm_code', 'm_name', 'establish_date', 'list_date', 'a_code', 'a_name', 'b_code',
'b_name', 'a_to_b', 'delist_date', 'current_annual_rate', 'i_code', 'i_name'])
frame_info_1 = frame_info_1[frame_info_1.a_code.str.contains(r'15|50')]
frame_info_1 = frame_info_1.set_index('m_code')
# 2. Get the info of rate adjustment.
url = 'http://www.abcfund.cn/data/arateadjustment.php'
reg_ex = r'<tr.*?><td>(.*?)</td></tr>'
split_str = '</td><td>'
data_list = web_crawler(url, reg_ex, split_str)
frame_info_2 = pd.DataFrame(data_list, columns=[
'm_code', 'm_name', 'rate_adjustment_condition', 'next_rate_adjustment_date'])
frame_info_2 = frame_info_2.drop('m_name', axis=1)
frame_info_2 = frame_info_2.set_index('m_code')
# 3. Get the conversion condition.
url = 'http://www.abcfund.cn/data/zsinfo.php'
reg_ex = r'onclick.*?><td>(.*?)</td><tr'
replace_str = '</td><td>'
split_str = '<td>'
data_list = web_crawler(url, reg_ex, split_str, replace_str)
frame_info_3 = pd.DataFrame(data_list, columns=[
'm_code', 'm_name', 'next_regular_conversion_date', 'days_to_next_regular_conversion_date',
'ascending_conversion_condition', 'descending_conversion_condition'])
frame_info_3 = frame_info_3.drop('m_name', axis=1)
frame_info_3 = frame_info_3.set_index('m_code')
# 4. Get the net value of m fund, a and b.
url = 'http://www.abcfund.cn/data/premium.php'
reg_ex = r'<tr.*?><td>(.*?)</td></tr>'
split_str = '</td><td>'
# Get the date of the data of net value.
reg_ex_date = r'\d{4}年\d{1,2}月\d{1,2}日'
data_list, date = web_crawler(url, reg_ex, split_str, reg_ex_2=reg_ex_date)
self.net_value_date = datetime.datetime.strptime(date[0], '%Y年%m月%d日').date()
frame_info_4 = pd.DataFrame(data_list, columns=[
'm_code', 'm_name', 'm_net_value', 'a_code', 'a_name', 'a_net_value',
'a_price', 'a_premium', 'a_volume', 'b_code', 'b_name', 'b_net_value', 'b_price', 'b_premium',
'b_volume', 'whole_premium'])
frame_info_4 = frame_info_4.loc[:, ['m_code', 'm_net_value', 'a_net_value', 'b_net_value']]
frame_info_4 = frame_info_4.set_index('m_code')
# 5. Join the data frames together.
self.frame_info = frame_info_1.join([frame_info_2, frame_info_3, frame_info_4], how='inner')
# Get the one-year deposit rate
deposit_name, deposit_rate = ts.get_deposit_rate().loc[6, ['deposit_type', 'rate']]
if deposit_name == '定期存款整存整取(一年)':
deposit_rate = float(deposit_rate) / 100
else:
logging.error('Failure in getting deposit rate!')
deposit_rate = 1.5 / 100
# Format the data of table
establish_date_column = []
list_date_column = []
delist_date_column = []
years_to_delist_date_column = []
a_in_10_column = []
a_to_b_column = []
current_annual_rate_column = []
rate_rule_column = []
next_annual_rate_column = []
next_rate_adjustment_date_column = []
days_to_next_rate_adjustment_date_column = []
rate_adjustment_condition_column = []
next_regular_conversion_date_column = []
ascending_conversion_condition_column = []
descending_conversion_condition_column = []
a_net_value_column = []
b_net_value_column = []
m_net_value_column = []
for index in self.frame_info.index:
fund = self.frame_info.loc[index, :]
try:
establish_date = datetime.datetime.strptime(fund.establish_date, '%Y-%m-%d').date()
except ValueError:
establish_date = None
try:
list_date = datetime.datetime.strptime(fund.list_date, '%Y-%m-%d').date()
except ValueError:
list_date = None
try:
delist_date = datetime.datetime.strptime(fund.delist_date, '%Y-%m-%d').date()
except ValueError:
delist_date = None
try:
years_to_delist_date = (delist_date - self.TODAY_DATE).days / 365
except TypeError:
years_to_delist_date = None
a_in_10 = (int(fund.a_to_b[-3:-2]) / (int(fund.a_to_b[-3:-2]) + int(fund.a_to_b[-1:]))) * 10
a_to_b = '{0}:{1}'.format(int(a_in_10), int(10-a_in_10))
rate_and_rule = fund.current_annual_rate.split('<br><font color=#696969>')
if index in self.MANUAL_CORRECT_RATE:
current_annual_rate, rate_rule = self.MANUAL_CORRECT_RATE[index]
elif len(rate_and_rule) > 1:
current_annual_rate = float(rate_and_rule[0][:-1]) / 100
rate_rule = rate_and_rule[1][:-7]
if rate_rule == '固定':
rate_rule = '固定' + rate_and_rule[0]
if '.' not in rate_rule:
rate_rule = rate_rule[:-1] + '.0%'
else:
current_annual_rate = None
rate_rule = rate_and_rule[0]
if '1年+' in rate_rule:
next_annual_rate = deposit_rate + float(rate_rule[3:-1]) / 100
elif '3年+' in rate_rule:
next_annual_rate = 2.75 / 100 + float(rate_rule[3:-1]) / 100
elif '固定' in rate_rule:
next_annual_rate = float(rate_rule[2:-1]) / 100
elif rate_rule == '特殊情况':
next_annual_rate = None
else:
# This is the rate of mother code '162215'
next_annual_rate = 0.0358
try:
next_rate_adjustment_date = datetime.datetime.strptime(
fund.next_rate_adjustment_date, '%Y-%m-%d').date()
except ValueError:
next_rate_adjustment_date = None
try:
days_to_next_rate_adjustment_date = (next_rate_adjustment_date - self.TODAY_DATE).days
except TypeError:
days_to_next_rate_adjustment_date = None
if '动态调整' in fund.rate_adjustment_condition:
rate_adjustment_condition = '动态调整'
elif '不定期' in fund.rate_adjustment_condition:
rate_adjustment_condition = '折算调整'
elif '不调整' in fund.rate_adjustment_condition:
rate_adjustment_condition = '不调整'
else:
rate_adjustment_condition = '定期调整'
try:
next_regular_conversion_date = datetime.datetime.strptime(
fund.next_regular_conversion_date, '%Y年%m月%d日').date()
except ValueError:
next_regular_conversion_date = None
if fund.ascending_conversion_condition[0] == '母':
ascending_conversion_condition = float(fund.ascending_conversion_condition[7:])
elif fund.ascending_conversion_condition[0] == 'B':
ascending_conversion_condition = float(fund.ascending_conversion_condition[6:]) * (-1)
else:
ascending_conversion_condition = None
if fund.descending_conversion_condition[0] == 'B':
descending_conversion_condition = float(fund.descending_conversion_condition[6:])
elif fund.descending_conversion_condition[0] == '母':
descending_conversion_condition = float(fund.descending_conversion_condition[7:]) * (-1)
else:
descending_conversion_condition = None
try:
a_net_value = float(fund.a_net_value)
except ValueError:
a_net_value = None
try:
b_net_value = float(fund.b_net_value)
except ValueError:
b_net_value = None
try:
m_net_value = float(fund.m_net_value)
except ValueError:
try:
m_net_value = (a_net_value * a_in_10 + b_net_value * (10 - a_in_10)) / 10
except TypeError:
m_net_value = None
establish_date_column.append(establish_date)
list_date_column.append(list_date)
years_to_delist_date_column.append(years_to_delist_date)
delist_date_column.append(delist_date)
a_in_10_column.append(a_in_10)
a_to_b_column.append(a_to_b)
current_annual_rate_column.append(current_annual_rate)
rate_rule_column.append(rate_rule)
next_annual_rate_column.append(next_annual_rate)
next_rate_adjustment_date_column.append(next_rate_adjustment_date)
days_to_next_rate_adjustment_date_column.append(days_to_next_rate_adjustment_date)
rate_adjustment_condition_column.append(rate_adjustment_condition)
next_regular_conversion_date_column.append(next_regular_conversion_date)
ascending_conversion_condition_column.append(ascending_conversion_condition)
descending_conversion_condition_column.append(descending_conversion_condition)
a_net_value_column.append(a_net_value)
b_net_value_column.append(b_net_value)
m_net_value_column.append(m_net_value)
self.frame_info['establish_date'] = establish_date_column
self.frame_info['list_date'] = list_date_column
self.frame_info['delist_date'] = delist_date_column
self.frame_info['years_to_delist_date'] = years_to_delist_date_column
self.frame_info['a_in_10'] = a_in_10_column
self.frame_info['a_to_b'] = a_to_b_column
self.frame_info['current_annual_rate'] = current_annual_rate_column
self.frame_info['rate_rule'] = rate_rule_column
self.frame_info['next_annual_rate'] = next_annual_rate_column
self.frame_info['next_rate_adjustment_date'] = next_rate_adjustment_date_column
self.frame_info['days_to_next_rate_adjustment_date'] = days_to_next_rate_adjustment_date_column
self.frame_info['rate_adjustment_condition'] = rate_adjustment_condition_column
self.frame_info['next_regular_conversion_date'] = next_regular_conversion_date_column
self.frame_info['ascending_conversion_condition'] = ascending_conversion_condition_column
self.frame_info['descending_conversion_condition'] = descending_conversion_condition_column
self.frame_info['a_net_value'] = a_net_value_column
self.frame_info['b_net_value'] = b_net_value_column
self.frame_info['m_net_value'] = m_net_value_column
# 5. Save the data into csv file
self.frame_info.to_csv('../data/structured_fund_info.csv')
def update_realtime_quotations(self):
"""Update the realtime quotations of fund a, fund b, and index
Returns:
The boolean.
If there is new data, return True.
If there is no new data, return False.
"""
# 1. Update the data of fund_a
frame_realtime_a = realtime_quotations(self.fund_a_code)
update_time = frame_realtime_a.time[0]
if self.update_time != update_time:
self.update_time = update_time
frame_realtime_a.columns = [
'a_name', 'a_price', 'a_volume', 'a_amount', 'a_b1_p', 'a_b1_v', 'a_b2_p', 'a_b2_v',
'a_b3_p', 'a_b3_v', 'a_b4_p', 'a_b4_v', 'a_b5_p', 'a_b5_v', 'a_a1_p', 'a_a1_v', 'a_a2_p',
'a_a2_v', 'a_a3_p', 'a_a3_v', 'a_a4_p', 'a_a4_v', 'a_a5_p', 'a_a5_v', 'a_high', 'a_low',
'a_pre_close', 'a_open', 'a_date', 'a_time']
frame_realtime_a = frame_realtime_a.drop('a_name', axis=1)
# 2. update the data of the increase rate of index
frame_realtime_i = realtime_quotations(self.i_code)
frame_realtime_i = frame_realtime_i.loc[:, ['price', 'pre_close']]
frame_realtime_i.columns = ['i_price', 'i_pre_close']
self.frame_realtime = self.frame_info.join(frame_realtime_a, on='a_code', how='inner')
self.frame_realtime = self.frame_realtime.join(frame_realtime_i, on='i_code')
a_price_column = []
a_increase_value_column = []
a_increase_rate_column = []
a_premium_rate_column = []
modified_rate_of_return_column = []
i_increase_rate_column = []
m_price_column = []
m_descending_distance_column = []
for index in self.frame_realtime.index:
fund = self.frame_realtime.loc[index, :]
if datetime.time(9, 15) <= update_time <= datetime.time(9, 30):
if fund.a_b1_p == fund.a_a1_p:
a_price = fund.a_b1_p
else:
a_price = fund.a_pre_close
a_price_column.append(a_price)
else:
if fund.a_volume == 0:
a_price = fund.a_pre_close
else:
a_price = fund.a_price
a_increase_value = a_price - fund.a_pre_close
a_increase_rate = a_increase_value / fund.a_pre_close
a_premium_rate = (a_price - fund.a_net_value) / fund.a_net_value
if fund.rate_adjustment_condition in ['不调整', '动态调整']:
modified_rate_of_return = fund.next_annual_rate / (a_price - (fund.a_net_value - 1))
else:
modified_rate_of_return = (fund.next_annual_rate / (a_price - (fund.a_net_value - 1) +
fund.days_to_next_rate_adjustment_date / 365 *
(fund.next_annual_rate - fund.current_annual_rate)))
if fund.i_code in self.i_code:
i_increase_rate = (fund.i_price - fund.i_pre_close) / fund.i_pre_close
if self.net_value_date == fund.a_date:
m_price = fund.m_net_value
else:
# 此处按95%仓位计算
m_price = fund.m_net_value * (1 + i_increase_rate * 0.95)
else:
i_increase_rate = None
m_price = fund.m_net_value
if fund.isnull().descending_conversion_condition:
m_descending_distance = None
else:
if fund.descending_conversion_condition > 0:
m_descending_conversion_condition = (fund.descending_conversion_condition + fund.a_net_value) /2
else:
m_descending_conversion_condition = fund.descending_conversion_condition * (-1)
m_descending_distance = (m_price - m_descending_conversion_condition) / m_price
a_price_column.append(a_price)
a_increase_value_column.append(a_increase_value)
a_increase_rate_column.append(a_increase_rate)
a_premium_rate_column.append(a_premium_rate)
modified_rate_of_return_column.append(modified_rate_of_return)
i_increase_rate_column.append(i_increase_rate)
m_price_column.append(m_price)
m_descending_distance_column.append(m_descending_distance)
self.frame_realtime['a_price'] = a_price_column
self.frame_realtime['a_increase_value'] = a_increase_value_column
self.frame_realtime['a_increase_rate'] = a_increase_rate_column
self.frame_realtime['a_premium_rate'] = a_premium_rate_column
self.frame_realtime['modified_rate_of_return'] = modified_rate_of_return_column
self.frame_realtime['i_increase_rate'] = i_increase_rate_column
self.frame_realtime['m_price'] = m_price_column
self.frame_realtime['m_descending_distance'] = m_descending_distance_column
# Write into CSV
self.frame_realtime.to_csv('../data/structured_fund_a.csv')
return True
else:
return False
def web_crawler(url, reg_ex, split_str, replace_str=None, reg_ex_2=None, time_out=10):
"""Crawl from a website, and extract the data into a list.
Args:
url: The url of website.
reg_ex: The regular expression for extracting the data from text.
split_str: The funds are split by the string of split_str.
replace_str: When the website text is not standard, the replace_str should be replaced to split_str.
reg_ex_2: The extra regular expression.
time_out: The time limit of urlopen.
Returns:
A list of row data fetched. Each row is a list of strings. For example:
[['161022', '富国创业板指数分级', '150152', '创业板A', ...]
['164705', '汇添富恒生指数分级', '150169', '恒生A', ...]
...]
If reg_ex_2 exists, return an extra list of data fetched, but this is not split.
"""
try:
with urllib.request.urlopen(url, timeout=time_out) as f:
text = f.read()
except socket.timeout:
logging.info('Timeout when loading this url: {0}'.format(url))
return web_crawler(url, reg_ex, split_str, replace_str, time_out)
except socket.error:
logging.info('Socket error when loading this url: {0}'.format(url))
return web_crawler(url, reg_ex, split_str, replace_str, time_out)
text = text.decode('GBK')
reg = re.compile(reg_ex)
data = reg.findall(text)
data_list = []
if replace_str is not None:
for row in data:
if len(row) > 1:
data_list.append([cell for cell in row.replace(replace_str, split_str).split(split_str)])
else:
for row in data:
if len(row) > 1:
data_list.append([cell for cell in row.split(split_str)])
if reg_ex_2 is None:
return data_list
else:
data_2 = re.findall(reg_ex_2, text)
return data_list, data_2
def realtime_quotations(symbols):
"""Get the realtime quotations of stocks/funds.
Args:
symbols:The code of stocks/funds.
Returns:
A data frame of the realtime quotations. The index is the code.
"""
# Get the list split by 30 codes, in the format of [['code1', 'code2', ...], ['code31', 'code32', ...], ...]
if isinstance(symbols, str):
code_list = [[symbols]]
else:
code_list = []
i = 0
while i < len(symbols):
code_list.append(symbols[i:i+30])
i += 30
# Get realtime quotations, in the format of data frame.
frame_realtime = pd.DataFrame()
for code_list_split_30 in code_list:
table = ts.get_realtime_quotes(code_list_split_30).loc[
:, ['code', 'name', 'price', 'volume', 'amount', 'b1_p', 'b1_v', 'b2_p', 'b2_v', 'b3_p',
'b3_v', 'b4_p', 'b4_v', 'b5_p', 'b5_v', 'a1_p', 'a1_v', 'a2_p', 'a2_v', 'a3_p', 'a3_v',
'a4_p', 'a4_v', 'a5_p', 'a5_v', 'high', 'low', 'pre_close', 'open', 'date', 'time']]
table = table.set_index('code')
frame_realtime = pd.concat([frame_realtime, table])
price_column = []
volume_column = []
amount_column = []
b1_p_column = []
b1_v_column = []
b2_p_column = []
b2_v_column = []
b3_p_column = []
b3_v_column = []
b4_p_column = []
b4_v_column = []
b5_p_column = []
b5_v_column = []
a1_p_column = []
a1_v_column = []
a2_p_column = []
a2_v_column = []
a3_p_column = []
a3_v_column = []
a4_p_column = []
a4_v_column = []
a5_p_column = []
a5_v_column = []
high_column = []
low_column = []
pre_close_column = []
open_column = []
date_column = []
time_column = []
for index in frame_realtime.index:
fund = frame_realtime.loc[index, :]
price = float(fund.price)
volume = int(fund.volume)
amount = float(fund.amount)
b1_p = float(fund.b1_p)
if b1_p == 0:
b1_p = None
try:
b1_v = int(fund.b1_v)
except ValueError:
b1_v = None
b2_p = float(fund.b2_p)
if b2_p == 0:
b2_p = None
try:
b2_v = int(fund.b2_v)
except ValueError:
b2_v = None
b3_p = float(fund.b3_p)
if b3_p == 0:
b3_p = None
try:
b3_v = int(fund.b3_v)
except ValueError:
b3_v = None
b4_p = float(fund.b4_p)
if b4_p == 0:
b4_p = None
try:
b4_v = int(fund.b4_v)
except ValueError:
b4_v = None
b5_p = float(fund.b5_p)
if b5_p == 0:
b5_p = None
try:
b5_v = int(fund.b5_v)
except ValueError:
b5_v = None
a1_p = float(fund.a1_p)
if a1_p == 0:
a1_p = None
try:
a1_v = int(fund.a1_v)
except ValueError:
a1_v = None
a2_p = float(fund.a2_p)
if a2_p == 0:
a2_p = None
try:
a2_v = int(fund.a2_v)
except ValueError:
a2_v = None
a3_p = float(fund.a3_p)
if a3_p == 0:
a3_p = None
try:
a3_v = int(fund.a3_v)
except ValueError:
a3_v = None
a4_p = float(fund.a4_p)
if a4_p == 0:
a4_p = None
try:
a4_v = int(fund.a4_v)
except ValueError:
a4_v = None
a5_p = float(fund.a5_p)
if a5_p == 0:
a5_p = None
try:
a5_v = int(fund.a5_v)
except ValueError:
a5_v = None
high = float(fund.high)
if high == 0:
high = None
low = float(fund.low)
if low == 0:
low = None
pre_close = float(fund.pre_close)
if pre_close == 0:
pre_close = None
open_d = float(fund.open)
if open_d == 0:
open_d = None
date = datetime.datetime.strptime(fund.date, '%Y-%m-%d').date()
time = datetime.datetime.strptime(fund.time, '%H:%M:%S').time()
price_column.append(price)
volume_column.append(volume)
amount_column.append(amount)
b1_p_column.append(b1_p)
b1_v_column.append(b1_v)
b2_p_column.append(b2_p)
b2_v_column.append(b2_v)
b3_p_column.append(b3_p)
b3_v_column.append(b3_v)
b4_p_column.append(b4_p)
b4_v_column.append(b4_v)
b5_p_column.append(b5_p)
b5_v_column.append(b5_v)
a1_p_column.append(a1_p)
a1_v_column.append(a1_v)
a2_p_column.append(a2_p)
a2_v_column.append(a2_v)
a3_p_column.append(a3_p)
a3_v_column.append(a3_v)
a4_p_column.append(a4_p)
a4_v_column.append(a4_v)
a5_p_column.append(a5_p)
a5_v_column.append(a5_v)
high_column.append(high)
low_column.append(low)
pre_close_column.append(pre_close)
open_column.append(open_d)
date_column.append(date)
time_column.append(time)
frame_realtime['price'] = price_column
frame_realtime['volume'] = volume_column
frame_realtime['amount'] = amount_column
frame_realtime['b1_p'] = b1_p_column
frame_realtime['b1_v'] = b1_v_column
frame_realtime['b2_p'] = b2_p_column
frame_realtime['b2_v'] = b2_v_column
frame_realtime['b3_p'] = b3_p_column
frame_realtime['b3_v'] = b3_v_column
frame_realtime['b4_p'] = b4_p_column
frame_realtime['b4_v'] = b4_v_column
frame_realtime['b5_p'] = b5_p_column
frame_realtime['b5_v'] = b5_v_column
frame_realtime['a1_p'] = a1_p_column
frame_realtime['a1_v'] = a1_v_column
frame_realtime['a2_p'] = a2_p_column
frame_realtime['a2_v'] = a2_v_column
frame_realtime['a3_p'] = a3_p_column
frame_realtime['a3_v'] = a3_v_column
frame_realtime['a4_p'] = a4_p_column
frame_realtime['a4_v'] = a4_v_column
frame_realtime['a5_p'] = a5_p_column
frame_realtime['a5_v'] = a5_v_column
frame_realtime['high'] = high_column
frame_realtime['low'] = low_column
frame_realtime['pre_close'] = pre_close_column
frame_realtime['open'] = open_column
frame_realtime['date'] = date_column
frame_realtime['time'] = time_column
return frame_realtime
|
gpl-2.0
|
wzbozon/scikit-learn
|
examples/decomposition/plot_faces_decomposition.py
|
103
|
4394
|
"""
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
|
bsd-3-clause
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/pandas/tools/rplot.py
|
3
|
28660
|
import random
from copy import deepcopy
from pandas.core.common import _values_from_object
import numpy as np
from pandas.compat import range, zip
#
# TODO:
# * Make sure legends work properly
#
class Scale:
"""
Base class for mapping between graphical and data attributes.
"""
pass
class ScaleGradient(Scale):
"""
A mapping between a data attribute value and a
point in colour space between two specified colours.
"""
def __init__(self, column, colour1, colour2):
"""Initialize ScaleGradient instance.
Parameters:
-----------
column: string, pandas DataFrame column name
colour1: tuple, 3 element tuple with float values representing an RGB colour
colour2: tuple, 3 element tuple with float values representing an RGB colour
"""
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.categorical = False
def __call__(self, data, index):
"""Return a colour corresponding to data attribute value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A three element tuple representing an RGB somewhere between colour1 and colour2
"""
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
x_scaled = (x - a) / (b - a)
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
class ScaleGradient2(Scale):
"""
Create a mapping between a data attribute value and a
point in colour space in a line of three specified colours.
"""
def __init__(self, column, colour1, colour2, colour3):
"""Initialize ScaleGradient2 instance.
Parameters:
-----------
column: string, pandas DataFrame column name
colour1: tuple, 3 element tuple with float values representing an RGB colour
colour2: tuple, 3 element tuple with float values representing an RGB colour
colour3: tuple, 3 element tuple with float values representing an RGB colour
"""
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.colour3 = colour3
self.categorical = False
def __call__(self, data, index):
"""Return a colour corresponding to data attribute value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A three element tuple representing an RGB somewhere along the line
of colour1, colour2 and colour3
"""
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
r3, g3, b3 = self.colour3
x_scaled = (x - a) / (b - a)
if x_scaled < 0.5:
x_scaled *= 2.0
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
else:
x_scaled = (x_scaled - 0.5) * 2.0
return (r2 + (r3 - r2) * x_scaled,
g2 + (g3 - g2) * x_scaled,
b2 + (b3 - b2) * x_scaled)
class ScaleSize(Scale):
"""
Provide a mapping between a DataFrame column and matplotlib
scatter plot shape size.
"""
def __init__(self, column, min_size=5.0, max_size=100.0, transform=lambda x: x):
"""Initialize ScaleSize instance.
Parameters:
-----------
column: string, a column name
min_size: float, minimum point size
max_size: float, maximum point size
transform: a one argument function of form float -> float (e.g. lambda x: log(x))
"""
self.column = column
self.min_size = min_size
self.max_size = max_size
self.transform = transform
self.categorical = False
def __call__(self, data, index):
"""Return matplotlib scatter plot marker shape size.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
"""
x = data[self.column].iget(index)
a = float(min(data[self.column]))
b = float(max(data[self.column]))
return self.transform(self.min_size + ((x - a) / (b - a)) *
(self.max_size - self.min_size))
class ScaleShape(Scale):
"""
Provides a mapping between matplotlib marker shapes
and attribute values.
"""
def __init__(self, column):
"""Initialize ScaleShape instance.
Parameters:
-----------
column: string, pandas DataFrame column name
"""
self.column = column
self.shapes = ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x']
self.legends = set([])
self.categorical = True
def __call__(self, data, index):
"""Returns a matplotlib marker identifier.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
a matplotlib marker identifier
"""
values = sorted(list(set(data[self.column])))
if len(values) > len(self.shapes):
raise ValueError("Too many different values of the categorical attribute for ScaleShape")
x = data[self.column].iget(index)
return self.shapes[values.index(x)]
class ScaleRandomColour(Scale):
"""
Maps a random colour to a DataFrame attribute.
"""
def __init__(self, column):
"""Initialize ScaleRandomColour instance.
Parameters:
-----------
column: string, pandas DataFrame column name
"""
self.column = column
self.categorical = True
def __call__(self, data, index):
"""Return a tuple of three floats, representing
an RGB colour.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
"""
random.seed(data[self.column].iget(index))
return [random.random() for _ in range(3)]
class ScaleConstant(Scale):
"""
Constant returning scale. Usually used automatically.
"""
def __init__(self, value):
"""Initialize ScaleConstant instance.
Parameters:
-----------
value: any Python value to be returned when called
"""
self.value = value
self.categorical = False
def __call__(self, data, index):
"""Return the constant value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A constant value specified during initialisation
"""
return self.value
def default_aes(x=None, y=None):
"""Create the default aesthetics dictionary.
Parameters:
-----------
x: string, DataFrame column name
y: string, DataFrame column name
Returns:
--------
a dictionary with aesthetics bindings
"""
return {
'x' : x,
'y' : y,
'size' : ScaleConstant(40.0),
'colour' : ScaleConstant('grey'),
'shape' : ScaleConstant('o'),
'alpha' : ScaleConstant(1.0),
}
def make_aes(x=None, y=None, size=None, colour=None, shape=None, alpha=None):
"""Create an empty aesthetics dictionary.
Parameters:
-----------
x: string, DataFrame column name
y: string, DataFrame column name
size: function, binding for size attribute of Geoms
colour: function, binding for colour attribute of Geoms
shape: function, binding for shape attribute of Geoms
alpha: function, binding for alpha attribute of Geoms
Returns:
--------
a dictionary with aesthetics bindings
"""
if not hasattr(size, '__call__') and size is not None:
size = ScaleConstant(size)
if not hasattr(colour, '__call__') and colour is not None:
colour = ScaleConstant(colour)
if not hasattr(shape, '__call__') and shape is not None:
shape = ScaleConstant(shape)
if not hasattr(alpha, '__call__') and alpha is not None:
alpha = ScaleConstant(alpha)
if any([isinstance(size, scale) for scale in [ScaleConstant, ScaleSize]]) or size is None:
pass
else:
raise ValueError('size mapping should be done through ScaleConstant or ScaleSize')
if any([isinstance(colour, scale) for scale in [ScaleConstant, ScaleGradient, ScaleGradient2, ScaleRandomColour]]) or colour is None:
pass
else:
raise ValueError('colour mapping should be done through ScaleConstant, ScaleRandomColour, ScaleGradient or ScaleGradient2')
if any([isinstance(shape, scale) for scale in [ScaleConstant, ScaleShape]]) or shape is None:
pass
else:
raise ValueError('shape mapping should be done through ScaleConstant or ScaleShape')
if any([isinstance(alpha, scale) for scale in [ScaleConstant]]) or alpha is None:
pass
else:
raise ValueError('alpha mapping should be done through ScaleConstant')
return {
'x' : x,
'y' : y,
'size' : size,
'colour' : colour,
'shape' : shape,
'alpha' : alpha,
}
class Layer:
"""
Layer object representing a single plot layer.
"""
def __init__(self, data=None, **kwds):
"""Initialize layer object.
Parameters:
-----------
data: pandas DataFrame instance
aes: aesthetics dictionary with bindings
"""
self.data = data
self.aes = make_aes(**kwds)
self.legend = {}
def work(self, fig=None, ax=None):
"""Do the drawing (usually) work.
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis object
Returns:
--------
a tuple with the same figure and axis instances
"""
return fig, ax
class GeomPoint(Layer):
def work(self, fig=None, ax=None):
"""Render the layer on a matplotlib axis.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
for index in range(len(self.data)):
row = self.data.irow(index)
x = row[self.aes['x']]
y = row[self.aes['y']]
size_scaler = self.aes['size']
colour_scaler = self.aes['colour']
shape_scaler = self.aes['shape']
alpha = self.aes['alpha']
size_value = size_scaler(self.data, index)
colour_value = colour_scaler(self.data, index)
marker_value = shape_scaler(self.data, index)
alpha_value = alpha(self.data, index)
patch = ax.scatter(x, y,
s=size_value,
c=colour_value,
marker=marker_value,
alpha=alpha_value)
label = []
if colour_scaler.categorical:
label += [colour_scaler.column, row[colour_scaler.column]]
if shape_scaler.categorical:
label += [shape_scaler.column, row[shape_scaler.column]]
self.legend[tuple(label)] = patch
ax.set_xlabel(self.aes['x'])
ax.set_ylabel(self.aes['y'])
return fig, ax
class GeomPolyFit(Layer):
"""
Draw a polynomial fit of specified degree.
"""
def __init__(self, degree, lw=2.0, colour='grey'):
"""Initialize GeomPolyFit object.
Parameters:
-----------
degree: an integer, polynomial degree
lw: line width
colour: matplotlib colour
"""
self.degree = degree
self.lw = lw
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw the polynomial fit on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from numpy.polynomial.polynomial import polyfit
from numpy.polynomial.polynomial import polyval
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
min_x = min(x)
max_x = max(x)
c = polyfit(x, y, self.degree)
x_ = np.linspace(min_x, max_x, len(x))
y_ = polyval(x_, c)
ax.plot(x_, y_, lw=self.lw, c=self.colour)
return fig, ax
class GeomScatter(Layer):
"""
An efficient scatter plot, use this instead of GeomPoint for speed.
"""
def __init__(self, marker='o', colour='lightblue', alpha=1.0):
"""Initialize GeomScatter instance.
Parameters:
-----------
marker: matplotlib marker string
colour: matplotlib colour
alpha: matplotlib alpha
"""
self.marker = marker
self.colour = colour
self.alpha = alpha
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw a scatter plot on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
ax.scatter(x, y, marker=self.marker, c=self.colour, alpha=self.alpha)
return fig, ax
class GeomHistogram(Layer):
"""
An efficient histogram, use this instead of GeomBar for speed.
"""
def __init__(self, bins=10, colour='lightblue'):
"""Initialize GeomHistogram instance.
Parameters:
-----------
bins: integer, number of histogram bins
colour: matplotlib colour
"""
self.bins = bins
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw a histogram on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
ax.hist(_values_from_object(x), self.bins, facecolor=self.colour)
ax.set_xlabel(self.aes['x'])
return fig, ax
class GeomDensity(Layer):
"""
A kernel density estimation plot.
"""
def work(self, fig=None, ax=None):
"""Draw a one dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from scipy.stats import gaussian_kde
x = self.data[self.aes['x']]
gkde = gaussian_kde(x)
ind = np.linspace(x.min(), x.max(), 200)
ax.plot(ind, gkde.evaluate(ind))
return fig, ax
class GeomDensity2D(Layer):
def work(self, fig=None, ax=None):
"""Draw a two dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
rvs = np.array([x, y])
x_min = x.min()
x_max = x.max()
y_min = y.min()
y_max = y.max()
X, Y = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
import scipy.stats as stats
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.contour(Z, extent=[x_min, x_max, y_min, y_max])
return fig, ax
class TrellisGrid(Layer):
def __init__(self, by):
"""Initialize TreelisGrid instance.
Parameters:
-----------
by: column names to group by
"""
if len(by) != 2:
raise ValueError("You must give a list of length 2 to group by")
elif by[0] == '.' and by[1] == '.':
raise ValueError("At least one of grouping attributes must be not a dot")
self.by = by
def trellis(self, layers):
"""Create a trellis structure for a list of layers.
Each layer will be cloned with different data in to a two dimensional grid.
Parameters:
-----------
layers: a list of Layer objects
Returns:
--------
trellised_layers: Clones of each layer in the list arranged in a trellised latice
"""
trellised_layers = []
for layer in layers:
data = layer.data
if self.by[0] == '.':
grouped = data.groupby(self.by[1])
elif self.by[1] == '.':
grouped = data.groupby(self.by[0])
else:
grouped = data.groupby(self.by)
groups = list(grouped.groups.keys())
if self.by[0] == '.' or self.by[1] == '.':
shingle1 = set([g for g in groups])
else:
shingle1 = set([g[0] for g in groups])
shingle2 = set([g[1] for g in groups])
if self.by[0] == '.':
self.rows = 1
self.cols = len(shingle1)
elif self.by[1] == '.':
self.rows = len(shingle1)
self.cols = 1
else:
self.rows = len(shingle1)
self.cols = len(shingle2)
trellised = [[None for _ in range(self.cols)] for _ in range(self.rows)]
self.group_grid = [[None for _ in range(self.cols)] for _ in range(self.rows)]
row = 0
col = 0
for group, data in grouped:
new_layer = deepcopy(layer)
new_layer.data = data
trellised[row][col] = new_layer
self.group_grid[row][col] = group
col += 1
if col >= self.cols:
col = 0
row += 1
trellised_layers.append(trellised)
return trellised_layers
def dictionary_union(dict1, dict2):
"""Take two dictionaries, return dictionary union.
Parameters:
-----------
dict1: Python dictionary
dict2: Python dictionary
Returns:
--------
A union of the dictionaries. It assumes that values
with the same keys are identical.
"""
keys1 = list(dict1.keys())
keys2 = list(dict2.keys())
result = {}
for key1 in keys1:
result[key1] = dict1[key1]
for key2 in keys2:
result[key2] = dict2[key2]
return result
def merge_aes(layer1, layer2):
"""Merges the aesthetics dictionaries for the two layers.
Look up sequence_layers function. Which layer is first and which
one is second is important.
Parameters:
-----------
layer1: Layer object
layer2: Layer object
"""
for key in layer2.aes.keys():
if layer2.aes[key] is None:
layer2.aes[key] = layer1.aes[key]
def sequence_layers(layers):
"""Go through the list of layers and fill in the missing bits of information.
The basic rules are this:
* If the current layer has data set to None, take the data from previous layer.
* For each aesthetic mapping, if that mapping is set to None, take it from previous layer.
Parameters:
-----------
layers: a list of Layer objects
"""
for layer1, layer2 in zip(layers[:-1], layers[1:]):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layers
def sequence_grids(layer_grids):
"""Go through the list of layer girds and perform the same thing as sequence_layers.
Parameters:
-----------
layer_grids: a list of two dimensional layer grids
"""
for grid1, grid2 in zip(layer_grids[:-1], layer_grids[1:]):
for row1, row2 in zip(grid1, grid2):
for layer1, layer2 in zip(row1, row2):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layer_grids
def work_grid(grid, fig):
"""Take a two dimensional grid, add subplots to a figure for each cell and do layer work.
Parameters:
-----------
grid: a two dimensional grid of layers
fig: matplotlib figure to draw on
Returns:
--------
axes: a two dimensional list of matplotlib axes
"""
nrows = len(grid)
ncols = len(grid[0])
axes = [[None for _ in range(ncols)] for _ in range(nrows)]
for row in range(nrows):
for col in range(ncols):
axes[row][col] = fig.add_subplot(nrows, ncols, ncols * row + col + 1)
grid[row][col].work(ax=axes[row][col])
return axes
def adjust_subplots(fig, axes, trellis, layers):
"""Adjust the subtplots on matplotlib figure with the
fact that we have a trellis plot in mind.
Parameters:
-----------
fig: matplotlib figure
axes: a two dimensional grid of matplotlib axes
trellis: TrellisGrid object
layers: last grid of layers in the plot
"""
# Flatten the axes grid
axes = [ax for row in axes for ax in row]
min_x = min([ax.get_xlim()[0] for ax in axes])
max_x = max([ax.get_xlim()[1] for ax in axes])
min_y = min([ax.get_ylim()[0] for ax in axes])
max_y = max([ax.get_ylim()[1] for ax in axes])
[ax.set_xlim(min_x, max_x) for ax in axes]
[ax.set_ylim(min_y, max_y) for ax in axes]
for index, axis in enumerate(axes):
if index % trellis.cols == 0:
pass
else:
axis.get_yaxis().set_ticks([])
axis.set_ylabel('')
if index / trellis.cols == trellis.rows - 1:
pass
else:
axis.get_xaxis().set_ticks([])
axis.set_xlabel('')
if trellis.by[0] == '.':
label1 = "%s = %s" % (trellis.by[1], trellis.group_grid[index // trellis.cols][index % trellis.cols])
label2 = None
elif trellis.by[1] == '.':
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[index // trellis.cols][index % trellis.cols])
label2 = None
else:
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[index // trellis.cols][index % trellis.cols][0])
label2 = "%s = %s" % (trellis.by[1], trellis.group_grid[index // trellis.cols][index % trellis.cols][1])
if label2 is not None:
axis.table(cellText=[[label1], [label2]],
loc='top', cellLoc='center',
cellColours=[['lightgrey'], ['lightgrey']])
else:
axis.table(cellText=[[label1]], loc='top', cellLoc='center', cellColours=[['lightgrey']])
# Flatten the layer grid
layers = [layer for row in layers for layer in row]
legend = {}
for layer in layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
fig.subplots_adjust(wspace=0.05, hspace=0.2)
class RPlot:
"""
The main plot object. Add layers to an instance of this object to create a plot.
"""
def __init__(self, data, x=None, y=None):
"""Initialize RPlot instance.
Parameters:
-----------
data: pandas DataFrame instance
x: string, DataFrame column name
y: string, DataFrame column name
"""
self.layers = [Layer(data, **default_aes(x=x, y=y))]
trellised = False
def add(self, layer):
"""Add a layer to RPlot instance.
Parameters:
-----------
layer: Layer instance
"""
if not isinstance(layer, Layer):
raise TypeError("The operand on the right side of + must be a Layer instance")
self.layers.append(layer)
def render(self, fig=None):
"""Render all the layers on a matplotlib figure.
Parameters:
-----------
fig: matplotlib figure
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.gcf()
# Look for the last TrellisGrid instance in the layer list
last_trellis = None
for layer in self.layers:
if isinstance(layer, TrellisGrid):
last_trellis = layer
if last_trellis is None:
# We have a simple, non-trellised plot
new_layers = sequence_layers(self.layers)
for layer in new_layers:
layer.work(fig=fig)
legend = {}
for layer in new_layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
else:
# We have a trellised plot.
# First let's remove all other TrellisGrid instances from the layer list,
# including this one.
new_layers = []
for layer in self.layers:
if not isinstance(layer, TrellisGrid):
new_layers.append(layer)
new_layers = sequence_layers(new_layers)
# Now replace the old layers by their trellised versions
new_layers = last_trellis.trellis(new_layers)
# Prepare the subplots and draw on them
new_layers = sequence_grids(new_layers)
axes_grids = [work_grid(grid, fig) for grid in new_layers]
axes_grid = axes_grids[-1]
adjust_subplots(fig, axes_grid, last_trellis, new_layers[-1])
# And we're done
return fig
|
gpl-3.0
|
ominux/scikit-learn
|
examples/plot_digits_classification.py
|
2
|
2115
|
"""
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <getting_started>`.
"""
print __doc__
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: Simplified BSD
# Standard scientific Python imports
import pylab as pl
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits,
# let's have a look at the first 3 images, stored in the `images`
# attribute of the dataset. If we were working from image files, we
# could load them using pylab.imread. For these images know which
# digit they represent: it is given in the 'target' of the dataset.
for index, (image, label) in enumerate(zip(digits.images, digits.target)[:4]):
pl.subplot(2, 4, index+1)
pl.imshow(image, cmap=pl.cm.gray_r)
pl.title('Training: %i' % label)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC()
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples/2], digits.target[:n_samples/2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples/2:]
predicted = classifier.predict(data[n_samples/2:])
print "Classification report for classifier %s:\n%s\n" % (
classifier, metrics.classification_report(expected, predicted))
print "Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)
for index, (image, prediction) in enumerate(
zip(digits.images[n_samples/2:], predicted)[:4]):
pl.subplot(2, 4, index+5)
pl.imshow(image, cmap=pl.cm.gray_r)
pl.title('Prediction: %i' % prediction)
pl.show()
|
bsd-3-clause
|
466152112/scikit-learn
|
sklearn/feature_selection/__init__.py
|
244
|
1088
|
"""
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
|
bsd-3-clause
|
ccasotto/rmtk
|
rmtk/vulnerability/common/utils.py
|
1
|
76014
|
# -*- coding: utf-8 -*-
import os
import csv
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate, optimize, stats
from shapely.geometry import LineString, MultiPoint, Point
def read_capacity_curves(input_file):
# This function reads one, or a set of capacity curves
with open(input_file, 'rU') as f:
data = csv.reader(f)
for line in data:
if line[0] == 'Vb-droof' and line[1] == 'TRUE':
capacity_curves = read_Vbdroof_capacity_curves(data)
break
elif line[0] == 'Vb-dfloor' and line[1] == 'TRUE':
capacity_curves = read_Vbdfloor_capacity_curves(data)
break
elif line[0] == 'Sd-Sa' and line[1] == 'TRUE':
capacity_curves = read_SdSa_capacity_curves(data)
break
return capacity_curves
def read_Vbdfloor_capacity_curves(data):
# This function reads Vb-floor displacements type of capacity curves
idealised = 'FALSE'
periods = []
ground_heights = []
regular_heights = []
gammas = []
no_storeys = []
weights = []
Vb = []
d_floor = []
d_roof = []
id_floor = []
M_star = []
for line in data:
if line[0] == 'Idealised':
idealised = line[1]
if line[0] == 'Periods [s]':
for value in line[1:]:
if isNumber(value):
periods.append(float(value))
if line[0] == 'Ground heights [m]':
for value in line[1:]:
if isNumber(value):
ground_heights.append(float(value))
if line[0] == 'Regular heights [m]':
for value in line[1:]:
if isNumber(value):
regular_heights.append(float(value))
if line[0] == 'Gamma participation factors':
for value in line[1:]:
if isNumber(value):
gammas.append(float(value))
if line[0] == 'Effective modal masses [ton]':
for value in line[1:]:
if isNumber(value):
M_star.append(float(value))
if line[0] == 'Number storeys':
for value in line[1:]:
if isNumber(value):
no_storeys.append(int(value))
if line[0] == 'Weights':
for value in line[1:]:
if isNumber(value):
weights.append(float(value))
if line[0][0:6] == 'dfloor':
subd_floor = []
for value in line[1:]:
if isNumber(value):
subd_floor.append(float(value))
id_floor.append(subd_floor)
if len(id_floor) == no_storeys[-1]:
d_floor.append(id_floor)
d_roof.append(subd_floor)
id_floor = []
if line[0][0:2] == 'Vb':
subVb = []
for value in line[1:]:
if isNumber(value):
subVb.append(float(value))
Vb.append(subVb)
if not weights:
weights = np.ones_like(periods)
average_period = np.average(np.array(periods), weights=np.array(weights))
# Store all the data in the dictionary
capacity_curves = {'type': None, 'idealised': None, 'periods': None,
'mean_period': None, 'ground_heights': None,
'regular_heights': None, 'gamma': None, 'modal_mass': None,
'no_storeys': None, 'weight': None, 'dfloor': None,
'droof': None, 'Vb': None}
capacity_curves['type'] = 'Vb-dfloor'
capacity_curves['idealised'] = idealised[0]
capacity_curves['periods'] = periods
capacity_curves['mean_period'] = average_period
capacity_curves['ground_heights'] = ground_heights
capacity_curves['regular_heights'] = regular_heights
capacity_curves['gamma'] = gammas
capacity_curves['modal_mass'] = M_star
capacity_curves['no_storeys'] = no_storeys
capacity_curves['weights'] = weights
capacity_curves['dfloor'] = d_floor
capacity_curves['droof'] = d_roof
capacity_curves['Vb'] = Vb
return capacity_curves
def read_Vbdroof_capacity_curves(data):
# This function reads Vb-droof type of capacity curves
idealised = 'FALSE'
periods = []
ground_heights = []
regular_heights = []
gammas = []
no_storeys = []
weights = []
Vb = []
d_roof = []
M_star = []
for line in data:
if line[0] == 'Idealised':
idealised = line[1]
if line[0] == 'Periods [s]':
for value in line[1:]:
if isNumber(value):
periods.append(float(value))
if line[0] == 'Ground heights [m]':
for value in line[1:]:
if isNumber(value):
ground_heights.append(float(value))
if line[0] == 'Regular heights [m]':
for value in line[1:]:
if isNumber(value):
regular_heights.append(float(value))
if line[0] == 'Gamma participation factors':
for value in line[1:]:
if isNumber(value):
gammas.append(float(value))
if line[0] == 'Effective modal masses [ton]':
for value in line[1:]:
if isNumber(value):
M_star.append(float(value))
if line[0] == 'Number storeys':
for value in line[1:]:
if isNumber(value):
no_storeys.append(int(value))
if line[0] == 'Weights':
for value in line[1:]:
if isNumber(value):
weights.append(float(value))
if line[0][0:5] == 'droof':
subd_roof = []
for value in line[1:]:
if isNumber(value):
subd_roof.append(float(value))
d_roof.append(subd_roof)
if line[0][0:2] == 'Vb' and isNumber(line[0][2]):
subVb = []
for value in line[1:]:
if isNumber(value):
subVb.append(float(value))
Vb.append(subVb)
if not weights:
weights = np.ones_like(periods)
average_period = np.average(np.array(periods), weights = np.array(weights))
# Store all the data in the dictionary
capacity_curves = {'type': None, 'idealised': None, 'periods': None,
'ground_heights': None, 'regular_heights': None,
'gamma': None, 'no_storeys': None, 'modal_mass': None,
'weights': None,
'droof': None, 'Vb': None}
capacity_curves['type'] = 'Vb-droof'
capacity_curves['idealised'] = idealised
capacity_curves['periods'] = periods
capacity_curves['mean_period'] = average_period
capacity_curves['ground_heights'] = ground_heights
capacity_curves['regular_heights'] = regular_heights
capacity_curves['gamma'] = gammas
capacity_curves['modal_mass'] = M_star
capacity_curves['no_storeys'] = no_storeys
capacity_curves['weights'] = weights
capacity_curves['droof'] = d_roof
capacity_curves['Vb'] = Vb
return capacity_curves
def read_SdSa_capacity_curves(data):
# This function reads Sd-Sa type of capacity curves
periods = []
heights = []
gammas = []
Sdy = []
Say = []
Sd = []
Sa = []
for line in data:
if line[0] == 'Periods [s]':
for value in line[1:]:
if isNumber(value):
periods.append(float(value))
if line[0] == 'Heights [m]':
for value in line[1:]:
if isNumber(value):
heights.append(float(value))
if line[0] == 'Gamma participation factors':
for value in line[1:]:
if isNumber(value):
gammas.append(float(value))
if line[0] == 'Sdy [m]':
for value in line[1:]:
if isNumber(value):
Sdy.append(float(value))
if line[0] == 'Say [g]':
for value in line[1:]:
if isNumber(value):
Say.append(float(value))
if line[0][0:2] == 'Sd' and isNumber(line[0][2]):
subSd = []
for value in line[1:]:
if isNumber(value):
subSd.append(float(value))
Sd.append(subSd)
if line[0][0:2] == 'Sa' and isNumber(line[0][2]):
subSa = []
for value in line[1:]:
if isNumber(value):
subSa.append(float(value))
Sa.append(subSa)
# Store all the data in the dictionary
capacity_curves = {'type': None, 'periods': None, 'heights': None,
'gamma': None, 'Sdy': None, 'Say': None, 'Sd': None, 'Sa': None}
capacity_curves['type'] = 'Sd-Sa'
capacity_curves['periods'] = periods
capacity_curves['heights'] = heights
capacity_curves['gamma'] = gammas
capacity_curves['Sdy'] = Sdy
capacity_curves['Say'] = Say
capacity_curves['Sd'] = Sd
capacity_curves['Sa'] = Sa
return capacity_curves
def save_capacity_curves(capacity_curves, filename):
if capacity_curves['type'] == 'Sd-Sa':
save_SdSa_capacity_curves(capacity_curves, filename)
def save_SdSa_capacity_curves(capacity_curves, filename):
no_capacity_curves = len(capacity_curves['Sd'])
output = open(filename, 'w')
output.write('Vb-droof,FALSE\n')
output.write('Vb-dfloor,FALSE\n')
output.write('Sd-Sa,TRUE\n')
periods = 'Periods [s]'
heights = 'Heights [m]'
gammas = 'Gamma participation factors'
Sdy = 'Sdy [m]'
Say = 'Say [g]'
for icc in range(no_capacity_curves):
if capacity_curves['periods'] != None:
periods = periods + ',' + str(capacity_curves['periods'][icc])
if capacity_curves['heights'] != None:
heights = heights + ',' + str(capacity_curves['heights'][icc])
if capacity_curves['gamma'] != None:
gammas = gammas + ',' + str(capacity_curves['gamma'][icc])
if capacity_curves['Sdy'] != None:
Sdy = Sdy + ',' + str(capacity_curves['Sdy'][icc])
if capacity_curves['Say'] != None:
Say = Say + ',' + str(capacity_curves['Say'][icc])
output.write(periods + '\n')
output.write(heights + '\n')
output.write(gammas + '\n')
output.write(Sdy + '\n')
output.write(Say + '\n')
for icc in range(no_capacity_curves):
Sd = 'Sd'+str(icc+1)+' [m]'
Sa = 'Sa'+str(icc+1)+' [g]'
for ivalue in range(len(capacity_curves['Sd'][icc])):
Sd = Sd + ',' + str(capacity_curves['Sd'][icc][ivalue])
Sa = Sa + ',' + str(capacity_curves['Sa'][icc][ivalue])
output.write(Sd + '\n')
output.write(Sa + '\n')
output.close()
def plot_capacity_curves(capacity_curves):
#This function plots the capacity curves
if capacity_curves['type'] == 'Sd-Sa':
no_capacity_curves = len(capacity_curves['Sa'])
for icc in range(no_capacity_curves):
Sa = capacity_curves['Sa'][icc]
Sd = capacity_curves['Sd'][icc]
plt.plot(Sd, Sa, color = 'g', linewidth = 2)
plt.plot(Sd, Sa, color = 'g', linewidth = 2,
label = 'individual capacity curve')
plt.xlabel('Spectral displacement [m]', fontsize = 10)
plt.ylabel('Spectral acceleration [g]', fontsize = 10)
elif capacity_curves['type'] == 'Vb-dfloor' or capacity_curves['type'] == 'Vb-droof':
no_capacity_curves = len(capacity_curves['Vb'])
for icc in range(no_capacity_curves):
Vb = capacity_curves['Vb'][icc]
droof = capacity_curves['droof'][icc]
plt.plot(droof, Vb, color = 'b', linewidth = 2)
plt.plot(droof, Vb, color = 'b', linewidth = 2,
label = 'individual capacity curve')
plt.xlabel('Roof displacement [m]', fontsize = 10)
plt.ylabel('Base shear [kN]', fontsize = 10)
plt.suptitle('Capacity curves')
plt.legend(loc = 'lower right', frameon = False)
plt.show()
def plot_idealised_capacity(idealised_capacity, capacity_curves, idealised_type):
#This function plots the capacity curves
no_capacity_curves = len(capacity_curves['periods'])
if idealised_type == 'bilinear':
for icc in range(no_capacity_curves):
if capacity_curves['type']== 'Vb-droof' or capacity_curves['type']== 'Vb-dfloor':
droof = capacity_curves['droof'][icc]
Vb = capacity_curves['Vb'][icc]
else:
droof = capacity_curves['Sd'][icc]
Vb = capacity_curves['Sa'][icc]
Vb_idealised = [0, idealised_capacity[icc][2], idealised_capacity[icc][2]]
droof_idealised = [0, idealised_capacity[icc][0], idealised_capacity[icc][1]]
plt.plot(droof, Vb, color = 'g', linewidth = 2)
plt.plot(droof_idealised, Vb_idealised, color = 'r', linewidth = 2)
plt.plot(droof, Vb, color = 'g', linewidth = 2, label = 'capacity curve')
plt.plot(droof_idealised, Vb_idealised, color = 'r', linewidth = 2,
label = 'idealised capacity curve')
plt.xlabel('Roof displacement [m]', fontsize = 10)
plt.ylabel('Base shear [kN]', fontsize = 10)
else:
for icc in range(no_capacity_curves):
if capacity_curves['type']== 'Vb-droof' or capacity_curves['type']== 'Vb-dfloor':
droof = capacity_curves['droof'][icc]
Vb = capacity_curves['Vb'][icc]
else:
droof = capacity_curves['Sd'][icc]
Vb = capacity_curves['Sa'][icc]
Vb_idealised = idealised_capacity[icc][4:]
Vb_idealised.insert(0, 0)
Vb_idealised.append(idealised_capacity[icc][-1])
droof_idealised = idealised_capacity[icc][0:4]
droof_idealised.insert(0, 0)
plt.plot(droof, Vb, color = 'g', linewidth = 2)
plt.plot(droof_idealised, Vb_idealised, color = 'r', linewidth = 2)
plt.plot(droof, Vb, color = 'g', linewidth = 2, label = 'capacity curve')
plt.plot(droof_idealised, Vb_idealised, color = 'r', linewidth = 2,
label = 'idealised capacity curve')
plt.xlabel('Roof displacement [m]', fontsize = 10)
plt.ylabel('Base shear [kN]', fontsize = 10)
plt.suptitle('Capacity curves')
plt.legend(loc = 'lower right', frameon = False)
plt.show()
def read_gmrs(folder):
#This function reads a set of ground motion records
#and stores them in a dictionary
time = []
acc = []
dt = []
no_points = []
name = []
for f in os.listdir(folder):
if f.endswith(".csv"):
itime, iacc = read_gmr(folder, f)
time.append(itime)
acc.append(iacc)
dt.append(itime[1] - itime[0])
no_points.append(len(iacc))
name.append(f)
gmrs = {'time': None, 'acc': None, 'dt': None,
'no_points': None, 'name': None}
gmrs['time'] = time
gmrs['acc'] = acc
gmrs['dt'] = dt
gmrs['no_points'] = no_points
gmrs['name'] = name
return gmrs
def read_gmr(folder, gmr):
time, acc = [], []
with open(folder + '/' + gmr) as f:
for line in f.readlines():
line = line.split(',')
time.append(float(line[0]))
acc.append(float(line[1])*9.81)
return time, acc
def evaluate_optimal_IM(gmrs,PDM,minT,maxT,stepT,damage_model,damping_ratio,method):
T = np.arange(minT, maxT,stepT)
setRsquare = []
for it in range(len(T)):
print str((it+1)*100/len(T))+'%'
fragility_model = calculate_mean_fragility(gmrs, PDM, T[it], damping_ratio, 'Sa', damage_model, method)
setRsquare.append(fragility_model['Rsquare'])
meanRsquare = np.mean(np.array(setRsquare),axis=1)
bestT = T[np.argmax(meanRsquare)]
print 'The best damage-intensity correlation was achieved for Sa at T='+str(bestT)+'s'
plot_correlation(T,setRsquare,meanRsquare,damage_model,bestT)
def plot_correlation(T,setRsquare,meanRsquare,damage_model,bestT):
setRsquare = np.array(setRsquare)
color_scheme = ['g', 'b', 'y', 'orangered', 'r', 'k', 'm', 'c', '0.5', '0.75']
for iDS in range(len(damage_model['damage_states'])):
plt.plot(T,setRsquare[:,iDS],color=color_scheme[iDS],linewidth=2,label=damage_model['damage_states'][iDS])
plt.plot(T,meanRsquare,color='k',linewidth=2,linestyle='dashed',label='Mean correlation')
plt.xlabel('T (s)', fontsize = 12)
plt.ylabel('Coefficient of correlation - R2', fontsize = 12)
plt.legend(loc=4)
plt.legend(frameon=False)
plt.show()
def calculate_correlation(logmeans,logstddev,cumPDM,imls):
Rsquare = []
for iDS in range(len(logmeans)):
sigma = logstddev[iDS]
mu = logmeans[iDS]
expected = stats.lognorm.cdf(imls, sigma,scale=math.exp(mu)),
observed = cumPDM[iDS+1,:]
R = np.corrcoef(observed,expected)
Rsquare.append(R[0][1]**2)
return Rsquare
def plot_response_spectra(gmrs, minT, maxT):
no_gmrs = len(gmrs['time'])
damping = 0.05
T = np.linspace(minT, maxT, 50)
plt.figure(figsize = (15, 5))
for igmr in range(no_gmrs):
acc = gmrs['acc'][igmr]
time = gmrs['time'][igmr]
spectrum = NigamJennings(time, acc, T, damping)
# spectrum = NewmarkBeta(time, acc, T, damping)
plt.subplot(1, 3, 1)
plt.plot(T, spectrum['Sa'], color = 'b', linewidth = 2)
plt.subplot(1, 3, 2)
plt.plot(T, spectrum['Sd'], color = 'g', linewidth = 2)
plt.subplot(1, 3, 3)
plt.plot(spectrum['Sd'], spectrum['Sa'], color = 'r', linewidth = 2)
plt.subplot(1, 3, 1)
plt.xlabel('Periods of vibration (sec)', fontsize = 10)
plt.ylabel('Spectral acceleration (g)', fontsize = 10)
plt.subplot(1, 3, 2)
plt.xlabel('Periods of vibration (sec)', fontsize = 10)
plt.ylabel('Spectral displacement (m)', fontsize = 10)
plt.subplot(1, 3, 3)
plt.xlabel('Spectral displacement (m)', fontsize = 10)
plt.ylabel('Spectral acceleration (g)', fontsize = 10)
plt.show()
def NewmarkBeta(time, acc, T, damping):
u0 = 0
v0 = 0
dt = time[1]-time[0]
no_acc = len(acc)
no_T = len(T)
M = 1
Sd = np.zeros(no_T)
Sa = np.zeros(no_T)
u = np.zeros(no_acc)
a = np.zeros(no_acc)
v = np.zeros(no_acc)
at = np.zeros(no_acc)
for i in range(no_T):
if T[i] == 0:
Sd[i] = 0
Sa[i] = max(abs(np.array(acc)))
else:
wn = 2*math.pi/T[i]
C = damping*2*M*wn
K = ((2*math.pi)/T[i])**2*M
u[0] = u0
v[0] = v0
a[0] = -acc[0]-C*v[0]-K*u[0]
at[0] = acc[0]+a[0]
for j in range(no_acc-1):
u[j+1] = u[j] + dt*v[j] + dt**2 / 2*a[j]
a[j+1] = (1/(M+dt*0.5*C)) * (-M*acc[j+1] - K*u[j+1] - C*(v[j]+dt*0.5*a[j]))
v[j+1] = v[j] + dt*(0.5*a[j] + 0.5*a[j+1])
at[j+1] = acc[j+1] + a[j+1]
Sd[i] = max(abs(u))
Sa[i] = max(abs(at))/9.81
spectrum = {'Sd': None, 'Sa': None}
spectrum['Sd'] = Sd
spectrum['Sa'] = Sa
return spectrum
def isNumber(s):
try:
float(s)
return True
except ValueError:
return False
def read_damage_model(input_file):
damage_states = []
type_damage_state = []
distribution = []
mean = []
cov = []
median = []
dispersion = []
with open(input_file) as f:
data = f.readlines()
line = data[0]
line = line.strip().split(',')
type_criteria = line[1]
with open(input_file, 'rU') as f:
data = [row for row in csv.reader(f)]
if (type_criteria == 'interstorey drift'):
if data[2][1]== 'TRUE':
path_to_deformed_shape = data[2][2]
index = input_file.rfind('/')
entire_path_to_deformed_shape = input_file[0:index+1]+path_to_deformed_shape
else:
entire_path_to_deformed_shape = 'none'
damage_states = [row[0] for row in data[3:]]
distribution = [row[1] for row in data[3:]]
no_capacity = (len(data[3])-2)/2
for icc in range(0, no_capacity):
submedian = []
subdispersion = []
for iline in data[3:]:
submedian.append(float(iline[icc*2+2]))
subdispersion.append(float(iline[icc*2+3]))
median.append(submedian)
dispersion.append(subdispersion)
damage_model = {'type_criteria': None, 'damage_states': None,
'median': None, 'dispersion': None}
damage_model['type_criteria'] = type_criteria
damage_model['damage_states'] = damage_states
damage_model['distribution'] = distribution
damage_model['median'] = median
damage_model['dispersion'] = dispersion
damage_model['deformed shape'] = entire_path_to_deformed_shape
else:
for iline in range(len(data)-2):
line = data[iline+2]
damage_states.append(line[0])
if type_criteria == 'capacity curve dependent':
print line
type_damage_state.append(line[1])
distribution.append(line[2])
if isNumber(line[3]):
mean.append(float(line[3]))
else:
mean.append(line[4])
cov.append(float(line[4]))
if type_criteria == 'strain dependent':
type_damage_state.append(line[1])
distribution.append(line[2])
mean.append(extract_values_string(line[3]))
cov.append(extract_values_string(line[4]))
if type_criteria == 'spectral displacement':
distribution.append(line[1])
no_capacity = (len(line)-2)/2
mean.append(float(line[2]))
cov.append(float(line[3]))
damage_model = {'type_criteria': None, 'damage_states': None,
'type_damage_state': None, 'mean': None, 'cov': None}
damage_model['type_criteria'] = type_criteria
damage_model['damage_states'] = damage_states
damage_model['type_damage_state'] = type_damage_state
damage_model['distribution'] = distribution
damage_model['mean'] = mean
damage_model['cov'] = cov
return damage_model
def extract_values_string(string):
string = string.split(' ')
values = []
if isNumber(string[0]):
for value in string:
values.append(float(value))
return values
def define_limit_states(capacity_curves, icc, damage_model):
no_damage_states = len(damage_model['damage_states'])
limit_states = []
assert(damage_model['type_criteria'] != 'strain dependent'), 'Strain dependent damage model cannot be used in this methodology'
if damage_model['type_criteria'] == 'capacity curve dependent':
Sd = capacity_curves['Sd'][icc]
Sa = capacity_curves['Sa'][icc]
assert(capacity_curves['Sdy'] != []), 'If you want to use capacity curve dependent damage model you should provide Sdy, Say in capacity curve input file'
Sdy = capacity_curves['Sdy'][icc]
Say = capacity_curves['Say'][icc]
for ids in range(no_damage_states):
type_damage_state = damage_model['type_damage_state'][ids]
distribution = damage_model['distribution'][ids]
mean = damage_model['mean'][ids]
cov = damage_model['cov'][ids]
limit_states.append(define_limit_state(Sd, Sa, Sdy, Say,
type_damage_state,
distribution, mean, cov))
elif damage_model['type_criteria'] == 'spectral displacement':
for ids in range(no_damage_states):
mean = damage_model['mean'][ids]
cov = damage_model['cov'][ids]
limit_states.append(sample_value(damage_model['distribution'][ids], mean, cov,0, float('inf')))
elif damage_model['type_criteria'] == 'interstorey drift':
if len(damage_model['median'])==1:
EDPlim = damage_model['median'][0]
bUthd = damage_model['dispersion'][0]
else:
EDPlim = damage_model['median'][icc]
bUthd = damage_model['dispersion'][icc]
[ISDvec, Sdvec] = read_deformed_shape(damage_model, capacity_curves,icc)
[ISDvec,indy] = np.unique(ISDvec,return_index=True);
Sdvec=Sdvec[indy];
duf = capacity_curves['Sd'][icc][-1]
for ids in range(no_damage_states):
if bUthd[ids]==0: EDPsample = EDPlim[ids]
else: EDPsample = stats.lognorm.rvs(bUthd[ids], scale = EDPlim[ids])
Sdlim = np.interp(EDPsample,ISDvec,Sdvec);
if Sdlim>duf: Sdlim = duf
limit_states.append(Sdlim)
#print limit_states
return limit_states
def define_limit_state(Sd, Sa, Sdy, Say, type_damage_state, distribution, mean, cov):
#FIXME: calls to the function sample_value are missing arguments A and B
if type_damage_state == 'Sdy':
mean = Sdy
limit_state = sample_value(distribution, mean, cov, 0, float('inf'))
elif type_damage_state == 'Sdu':
mean = max(Sd)
limit_state = sample_value(distribution, mean, cov, 0, float('inf'))
elif type_damage_state == 'fraction Sdy':
mean = mean*Sdy
limit_state = sample_value(distribution, mean, cov, 0, float('inf'))
elif type_damage_state == 'fraction Sdu':
#FIXME: Sdu is not defined
mean = mean*Sdu
limit_state = sample_value(distribution, mean, cov, 0, float('inf'))
elif type_damage_state == 'mean Sdy Sdu':
mean = (Sdy+max(Sd))/2
limit_state = sample_value(distribution, mean, cov, 0, float('inf'))
elif isNumber(type_damage_state[0]):
values = type_damage_state.split()
weight1 = float(values[0])
weight2 = float(values[2])
mean = (weight1*Sdy + weight2*max(Sd))/(weight1+weight2)
limit_state = sample_value(distribution, mean, cov, 0, float('inf'))
return limit_state
def sample_value(distribution, mean, cov, A, B):
if cov == 0:
result = mean
else:
result = float('-inf')
while result <= A or result > B:
if distribution == 'normal':
result = stats.norm.rvs(mean, mean*cov)
elif distribution == 'lognormal':
variance = (mean*cov)**2.0
mu = math.log(mean ** 2.0 / math.sqrt(variance + mean ** 2.0) )
sigma = math.sqrt(math.log((variance / mean ** 2.0) + 1.0))
result = stats.lognorm.rvs(sigma, scale = math.exp(mu))
elif distribution == 'gamma':
beta = (mean*cov)**2/mean
alpha = mean/beta
result = stats.gamma.rvs(alpha, scale = beta)
return result
def find_intersection(list1, list2, plot_flag):
line1 = []
for i in range(len(list1[0])):
line1.append([list1[0][i], list1[1][i]])
line2 = []
for i in range(len(list2[0])):
line2.append([list2[0][i], list2[1][i]])
curve1 = LineString(line1)
curve2 = LineString(line2)
intersection = curve1.intersection(curve2)
Sdi = []
Sai = []
if not intersection.is_empty:
if isinstance(intersection, Point):
Sdi.append(intersection.x)
Sai.append(intersection.y)
elif isinstance(intersection, MultiPoint):
for points in intersection:
coords = points.coords
for xy in coords:
Sdi.append(xy[0])
Sai.append(xy[1])
if plot_flag:
plt.plot(list1[0], list1[1], color = 'r', linewidth = 2)
plt.plot(list2[0], list2[1], color = 'b', linewidth = 2)
plt.xlabel('Spectral displacement', fontsize = 10)
plt.ylabel('Spectral acceleration', fontsize = 10)
plt.plot(Sdi, Sai, 'ro', color = 'y')
plt.show()
return Sdi, Sai
def spread(array, no_steps):
result = np.zeros((len(array)-1)*no_steps+1)
for i in range(len(array)-1):
result[i*no_steps] = array[i]
step = (array[i+1] - array[i]) / no_steps
for j in range(no_steps):
result[i*no_steps + j + 1] = array[i] + (j+1)*step
result[-1] = array[-1]
return result
def allocate_damage(igmr, PDM, disp, limitStates):
no_ls = len(limitStates)
#print PDM[igmr, :]
PDM[igmr, 0] = PDM[igmr, 0]+1
ds = 0
for ils in range(no_ls):
if disp > limitStates[no_ls - ils - 1]:
ds = no_ls-ils
PDM[igmr, ds] = PDM[igmr, ds]+1
PDM[igmr, 0] = PDM[igmr, 0]-1
break
#print disp
#print PDM[igmr, :]
return PDM, ds
def residuals(coeffs, y, x):
# TODO: this function seems incomplete - x not used, IMLs not defined
res = y - stats.lognorm.cdf(IMLs, coeffs[1], scale = math.exp(coeffs[0]))
return res
def calculate_imls(gmrs, T, damping):
Sa = []
Sd = []
pga = []
HI = []
for igmr in range(len(gmrs['time'])):
time = gmrs['time'][igmr]
acc = gmrs['acc'][igmr]
try:
spectrum = NigamJennings(time, acc, [T], damping)
Sa.append(spectrum['Sa'][0])
Sd.append(spectrum['Sd'][0])
pga.append(max(abs(np.array(acc)))/9.81)
except (ValueError):
spectrum = NigamJennings(time, acc, T, damping)
dT = T[1]- T[0]
HI.append(sum(spectrum['PSv'])*dT)
return Sa, Sd, pga, HI
def calculate_iml(igmr, gmrs, IMT, T, damping):
time = gmrs['time'][igmr]
acc = gmrs['acc'][igmr]
try:
spectrum = NigamJennings(time, acc, [T], damping)
except (ValueError):
spectrum = NigamJennings(time, acc, T, damping)
if IMT == 'PGA':
iml = max(abs(np.array(acc)))
elif IMT == 'Sa':
if len(spectrum['Sa'])== 1: iml = spectrum['Sa'][0]
else: iml = spectrum['Sa']
elif IMT == 'Sd':
if len(spectrum['Sd'])== 1: iml = spectrum['Sd'][0]
else: iml = spectrum['Sd']
elif IMT == 'Sv':
if len(spectrum['Sv'])== 1: iml = spectrum['Sv'][0]
else: iml = spectrum['Sv']
elif IMT == 'PSv':
if len(spectrum['PSv'])== 1: iml = spectrum['PSv'][0]
else: iml = spectrum['PSv']
elif IMT == 'HI':
# computing Housner Intensity
dT = T[1]- T[0]
iml = sum(spectrum['PSv'])*dT;
return iml
def calculate_mean_fragility(gmrs, PDM, T, damping, IMT, damage_model, method):
imls = calculate_imls(gmrs, T, damping)
if IMT == 'Sa':
imls = imls[0]
IMT = 'Sa('+str(T)+')'
elif IMT == 'Sd':
imls = imls[1]
IMT = 'Sd('+str(T)+')'
elif IMT == 'PGA':
imls = imls[2]
cumPDM = convert_damage_matrix_to_probability(PDM)
logmeans, logstddev = [], []
if method == 'least squares':
for iDS in range(len(damage_model['damage_states'])):
solution, _ = optimize.leastsq(residual_lognormal_dist,[0.1, 0.6],args=(imls, cumPDM[iDS+1]))
logmeans.append(solution[0])
logstddev.append(solution[1])
elif method == 'max likelihood':
# the imls and the PDM are sorted to be used in the maximum likelihood approach
# the PDM should be a list of integers in this approach
I = np.argsort(imls, axis=0)
imls = np.array(imls)
imls = imls[I]
PDM_mle = []
for iDS in range(len(damage_model['damage_states'])):
temp = cumPDM[iDS+1][I]*no_assets
temp2 = temp.astype(int)
PDM_mle = temp2.tolist()
solution = mle(PDM_mle,imls,no_assets,1)
logmeans.append(solution[0])
logstddev.append(solution[1])
Rsquare = calculate_correlation(logmeans,logstddev,cumPDM,imls)
fragility_model = {'damage_states': None, 'logmean': None,'logstddev': None, 'IMT': None}
fragility_model['damage_states'] = damage_model['damage_states']
fragility_model['logmean'] = logmeans
fragility_model['logstddev'] = logstddev
fragility_model['IMT'] = IMT
fragility_model['Rsquare'] = Rsquare
return fragility_model
def mle(num_collapse, iml, tot, g):
#This function gives maximum likelihood estimate of a lognormal function
def func(x):
p = [stats.lognorm.cdf(i, x[0], loc=0, scale=x[1]) for i in iml]
return -np.sum(stats.binom.logpmf(num_collapse,tot,p))
x0 = np.array([1, g])
x = optimize.fmin(func, x0)
sigma = x[0]
mu = np.log(x[1])
return [mu, sigma]
def save_mean_fragility(taxonomy, fragility_model, minIML, maxIML, output_type, output_path):
damage_states = fragility_model['damage_states']
logmeans = fragility_model['logmean']
logstddev = fragility_model['logstddev']
Rsquares = fragility_model['Rsquare']
IMT = fragility_model['IMT']
if output_type == 'csv':
save_mean_fragility_csv(taxonomy, damage_states, logmeans, logstddev, Rsquares, minIML, maxIML, IMT, output_path)
if output_type == 'nrml':
save_mean_fragility_nrml(taxonomy, damage_states, logmeans, logstddev, minIML, maxIML, IMT, output_path)
def save_mean_fragility_csv(taxonomy, damage_states, logmeans, logstddev, Rsquares, minIML, maxIML, IMT, output_path):
destination_file = os.path.join(output_path, taxonomy)
with open(output_path+'/'+taxonomy+'_fra.csv', 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([taxonomy, IMT, minIML, maxIML])
writer.writerow(['Damage state', 'log mean', 'log stddev', 'mean', 'stddev', 'median', 'cov','Rsquare'])
for iDS in range(len(damage_states)):
mu = logmeans[iDS]
sigma = logstddev[iDS]
mean = math.exp(mu+sigma**2/2)
stddev = math.sqrt(math.exp(2*mu+sigma**2)*(math.exp(sigma**2)-1))
median = math.exp(mu)
cov = abs(stddev/mean)
Rsquare = Rsquares[iDS]
writer.writerow([damage_states[iDS], mu, sigma, mean, stddev, median, cov, Rsquare])
def get_damage_states(csv_data):
damage_states = []
csv_data.next()
csv_data.next()
for line in csv_data:
damage_states.append(line[0])
return damage_states
def save_fragility_set_nrml(folder, destination_file):
path = os.path.abspath(folder)
destination_file = os.path.join(path, destination_file)
nrml_file = open(destination_file, 'w')
nrml_file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
nrml_file.write('<nrml xmlns="http://openquake.org/xmlns/nrml/0.5">\n\n')
nrml_file.write('<fragilityModel id="fm" assetCategory="buildings" lossCategory="structural">\n\n')
nrml_file.write(' <description>fragility model</description>\n')
ds_str = ' <limitStates>'
file_index = 0
for f in os.listdir(folder):
if f.endswith(".csv"):
with open(os.path.join(path, f), 'rU') as fr:
data = csv.reader(fr)
if file_index == 0:
damage_states = get_damage_states(data)
fr.seek(0)
ds_str += ' '.join(damage_states)
nrml_file.write(ds_str + '</limitStates>\n\n')
file_index += 1
taxonomy, IMT, minIML, maxIML = data.next()[0:4]
data.next()
if IMT == 'PGA' or IMT[:2] == 'Sa':
IMT_units = 'g'
else:
IMT_units = 'm'
nrml_file.write(' <fragilityFunction format="continuous" id="' + taxonomy + '">\n')
nrml_file.write(' <imls imt="'+IMT+'" minIML="'+minIML+'" maxIML="'+maxIML+'"/>\n')
for line in data:
ds, logmean, logstd, mean, stddev, median, cov = line[0:7]
nrml_file.write(' <params ls="'+ds+'" mean="'+mean+'" stddev="'+stddev+'"/>\n')
nrml_file.write(' </fragilityFunction>\n\n')
nrml_file.write('</fragilityModel>\n\n')
nrml_file.write('</nrml>\n')
nrml_file.close()
def save_mean_fragility_nrml(taxonomy, damage_states, logmeans, logstddev, minIML, maxIML, IMT, output_path):
path = os.path.abspath(output_path)
destination_file = os.path.join(path, taxonomy)
nrml_file = open(destination_file+'_fra.xml', 'w')
nrml_file.write('<?xml version = "1.0" encoding = "UTF-8"?>\n')
nrml_file.write('<nrml xmlns = "http://openquake.org/xmlns/nrml/0.4">\n')
nrml_file.write('<fragilityModel format = "continuous">\n')
nrml_file.write(' <description>fragility model for '+ taxonomy +'</description>\n')
ds_str = ' <limitStates>'
for damage_state in damage_states:
ds_str = ds_str + ' ' + damage_state
nrml_file.write(ds_str + '</limitStates>\n')
nrml_file.close()
save_single_fragility_nrml(taxonomy, damage_states, logmeans, logstddev, minIML, maxIML, IMT)
nrml_file = open(taxonomy+'_fra.xml', 'a')
nrml_file.write(' </fragilityModel>\n')
nrml_file.write('</nrml>\n')
nrml_file.close()
def save_single_fragility_nrml(taxonomy, damage_states, logmeans, logstddev, minIML, maxIML, IMT):
if IMT == 'PGA' or IMT[:2] == 'Sa':
IMT_units = 'g'
else:
IMT_units = 'm'
nrml_file = open(taxonomy+'_fra.xml', 'a')
nrml_file.write(' <ffs type = "lognormal">\n')
nrml_file.write(' <taxonomy>'+taxonomy+'</taxonomy>\n')
nrml_file.write(' <IML IMT = "'+IMT+'" imlUnit = "'+IMT_units+'" minIML = "'+str(minIML)+'" maxIML = "'+str(maxIML)+'"/>\n')
for iDS in range(len(damage_states)):
mu = logmeans[iDS]
sigma = logstddev[iDS]
mean = math.exp(mu+sigma**2/2)
stddev = math.sqrt(math.exp(2*mu+sigma**2)*(math.exp(sigma**2)-1))
nrml_file.write(' <ffc ls = "'+damage_states[iDS]+'">\n')
nrml_file.write(' <params mean = "'+str(mean)+'" stddev = "'+str(stddev)+'"/>\n')
nrml_file.write(' </ffc>\n')
nrml_file.write(' </ffs>\n')
nrml_file.close()
def plot_fragility_model(fragility_model, minIML, maxIML):
imls = np.linspace(minIML, maxIML, 100)
color_scheme = ['g', 'b', 'y', 'orangered', 'r', 'k', 'm', 'c', '0.5', '0.75']
for iDS in range(len(fragility_model['damage_states'])):
mu = fragility_model['logmean'][iDS]
sigma = fragility_model['logstddev'][iDS]
if sigma > 0:
plt.plot(imls, stats.lognorm.cdf(imls, sigma,
scale=math.exp(mu)),
color=color_scheme[iDS],
linewidth=2,
label= fragility_model['damage_states'][iDS])
else: #sigma = 0
PoE = []
for ele in imls:
if ele <=math.exp(mu): PoE.append(0)
else: PoE.append(1)
plt.plot(
imls,PoE,color=color_scheme[iDS],
linewidth=2,label= fragility_model['damage_states'][iDS])
try:
plt.plot(fragility_model['imls'],fragility_model['PoE points'][iDS+1],
marker='o',color=color_scheme[iDS],linestyle='None')
except:
pass
plt.xlabel('Sa(Tel) [g]', fontsize = 12)
plt.ylabel('Probabilty of Exceedance', fontsize = 12)
plt.legend(loc=4)
plt.show()
def plot_fragility_MSA(fragility_model, minIML, maxIML):
imls = np.linspace(minIML, maxIML, 100)
color_scheme = ['g', 'b', 'y', 'orangered', 'r', 'k', 'm', 'c', '0.5', '0.75']
for iDS in range(len(fragility_model['damage_states'])):
mu = fragility_model['logmean'][iDS]
sigma = fragility_model['logstddev'][iDS]
if sigma > 0:
plt.plot(imls, stats.lognorm.cdf(imls, sigma,
scale=math.exp(mu)),
color=color_scheme[iDS],
linewidth=2,
label= fragility_model['damage_states'][iDS])
else: #sigma = 0
PoE = []
for ele in imls:
if ele <=math.exp(mu): PoE.append(0)
else: PoE.append(1)
plt.plot(
imls,PoE,color=color_scheme[iDS],
linewidth=2,label= fragility_model['damage_states'][iDS])
try:
plt.plot(fragility_model['imls'],fragility_model['PoE points'][iDS],
marker='o',color=color_scheme[iDS],linestyle='None')
except:
pass
plt.xlabel('Sa(Tel) [g]', fontsize = 12)
plt.ylabel('Probabilty of Exceedance', fontsize = 12)
plt.legend(loc=4)
plt.show()
def convert_damage_matrix_to_probability(PDM):
no_assets = np.sum(PDM, axis=1, dtype=float)
cumPDM = np.fliplr(np.cumsum(np.fliplr(PDM), axis=1))
cumPDM = np.transpose(cumPDM)
cumPDM = cumPDM/no_assets
return cumPDM
def export_IMLs_PDM(gmrs,T,PDM,damping_ratio,damage_model,output_file):
cumPDM = convert_damage_matrix_to_probability(PDM)
Sa, Sd, PGA, HI = calculate_imls(gmrs, T, damping_ratio)
header = []
header.append('Sa('+str(T)+')')
header.append('Sd('+str(T)+')')
header.append('PGA')
for iDS in range(len(damage_model['damage_states'])):
header.append(damage_model['damage_states'][iDS])
with open(output_file, 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for irow in range(len(Sa)):
row = []
row.append(Sa[irow])
row.append(Sd[irow])
row.append(PGA[irow])
for iDS in range(len(damage_model['damage_states'])):
row.append(cumPDM[iDS+1][irow])
writer.writerow(row)
def plot_fragility_scatter(fragility_model, minIML, maxIML, PDM, gmrs, IMT, T, damping_ratio):
imls = np.linspace(minIML, maxIML, 100)
Sa, Sd, PGA, HI = calculate_imls(gmrs, T, damping_ratio)
cumPDM = convert_damage_matrix_to_probability(PDM)
color_scheme = ['g', 'b', 'y', 'orangered', 'r', 'k', 'm', 'c', '0.5', '0.75']
for iDS in range(len(fragility_model['damage_states'])):
mu = fragility_model['logmean'][iDS]
sigma = fragility_model['logstddev'][iDS]
plt.scatter(Sa, cumPDM[iDS+1], s=20, c=color_scheme[iDS], alpha=0.5)
if sigma > 0:
plt.plot(imls, stats.lognorm.cdf(imls, sigma,
scale=math.exp(mu)),
color=color_scheme[iDS],
linewidth=2,
label= fragility_model['damage_states'][iDS])
else: #sigma = 0
PoE = []
for ele in imls:
if ele <=math.exp(mu): PoE.append(0)
else: PoE.append(1)
plt.plot(
imls,PoE,color=color_scheme[iDS],
linewidth=2,label= fragility_model['damage_states'][iDS])
try:
plt.plot(fragility_model['imls'],fragility_model['PoE points'][iDS+1],
marker='o',color=color_scheme[iDS],linestyle='None')
except:
pass
plt.xlabel('Sa(Tel) [g]', fontsize = 12)
plt.ylabel('Probabilty of Exceedance', fontsize = 12)
plt.legend(loc=4)
plt.ylim((0,1))
plt.xlim((minIML,maxIML))
plt.show()
def save_result(result,output_file):
np.savetxt(output_file,result,delimiter=',')
def import_result(input_file):
result = np.genfromtxt(input_file,delimiter=',')
return result
def output_conversions(fragility_model, output_type):
fragility_model_converted = []
if output_type == 'median-dispersion':
for iDS in range(len(fragility_model)):
median = np.exp(fragility_model[iDS][0][0])
sigma = fragility_model[iDS][0][1]
fragility_model_converted.append([[median, sigma], fragility_model[iDS][1]])
outputs = ['median', 'dispersion']
elif output_type == 'logmean-cov':
for iDS in range(len(fragility_model)):
sigma = fragility_model[iDS][0][1]
median = np.exp(fragility_model[iDS][0][0])
log_mean = median*np.exp(np.power(sigma, 2)/2)
log_st_dev = np.sqrt((np.exp(np.power(sigma, 2))-1)*np.exp(2*np.log(median)+np.power(sigma, 2)))
cov = log_st_dev/log_mean
fragility_model_converted.append([[log_mean, cov], fragility_model[iDS][1]])
outputs = ['mean of x', 'cov of x']
elif output_type == 'mean-sigma':
fragility_model_converted = fragility_model
outputs = ['mean of ln(x)', 'st. dev. of ln(x)']
return [fragility_model_converted, outputs]
#def plot_fragility(fragility_model, minIML, maxIML):
#
# imls = np.linspace(minIML, maxIML, 100)
# txt = []
# colours = ['y', 'g', 'c', 'b', 'r', 'm', 'k']
# for iDS in range(0, len(fragility_model[0])):
# mu = fragility_model[0][iDS]
# sigma = fragility_model[1][iDS]
# txt.append('DS '+str(iDS+1))
# if sigma <= 0:
# y = np.zeros_like(imls)
# y[imls>np.exp(mu)] = 1
# else:
# y = norm(mu, sigma).cdf(np.log(imls))
# plt.plot(imls, y, color = colours[iDS], linewidth = 2)
#
# plt.xlabel('Spectral acceleration at T elastic, Sa(Tel) [g]', fontsize = 12)
# plt.ylabel('Probabilty of Exceedance', fontsize = 12)
# plt.suptitle('Fragility Curves', fontsize = 12)
# plt.show()
def NigamJennings(time, acc, periods, damping):
add_PGA = False
if periods[0] == 0:
periods = np.delete(periods, 0)
add_PGA = True
dt = time[1]-time[0]
num_steps = len(acc)
num_per = len(periods)
vel, disp = calculate_velocity_displacement(time, acc)
omega = (2. * np.pi) / np.array(periods)
omega2 = omega ** 2.
omega3 = omega ** 3.
omega_d = omega * math.sqrt(1.0 - (damping ** 2.))
const = {'f1': (2.0 * damping) / (omega3 * dt),
'f2': 1.0 / omega2,
'f3': damping * omega,
'f4': 1.0 / omega_d}
const['f5'] = const['f3'] * const['f4']
const['f6'] = 2.0 * const['f3']
const['e'] = np.exp(-const['f3'] * dt)
const['s'] = np.sin(omega_d * dt)
const['c'] = np.cos(omega_d * dt)
const['g1'] = const['e'] * const['s']
const['g2'] = const['e'] * const['c']
const['h1'] = (omega_d * const['g2']) - (const['f3'] * const['g1'])
const['h2'] = (omega_d * const['g1']) + (const['f3'] * const['g2'])
x_a, x_v, x_d = calculate_time_series(num_steps, num_per, time, acc, const, omega2)
spectrum = {'Sa': None, 'Sv': None, 'Sd': None, 'T': None}
spectrum['Sa'] = np.max(np.fabs(x_a)/9.81, axis = 0)
spectrum['Sv'] = np.max(np.fabs(x_v), axis = 0)
spectrum['Sd'] = np.max(np.fabs(x_d), axis = 0)
spectrum['PSv'] = spectrum['Sd']*omega
spectrum['T'] = periods
if add_PGA:
spectrum = add_PGA_spectrum(spectrum, acc)
return spectrum
def add_PGA_spectrum(spectrum, acc):
spectrum['Sa'] = np.append(np.max(np.fabs(acc)/9.81), spectrum['Sa'])
spectrum['Sv'] = np.append(0, spectrum['Sv'])
spectrum['Sd'] = np.append(0, spectrum['Sd'])
spectrum['T'] = np.append(0, spectrum['T'])
return spectrum
def calculate_time_series(num_steps, num_per, time, acc, const, omega2):
"""
Calculates the acceleration, velocity and displacement time series for
the SDOF oscillator
:param dict const:
Constants of the algorithm
:param np.ndarray omega2:
Square of the oscillator period
:returns:
x_a = Acceleration time series
x_v = Velocity time series
x_d = Displacement time series
"""
dt = time[1]-time[0]
x_d = np.zeros([num_steps - 1, num_per], dtype = float)
x_v = np.zeros_like(x_d)
x_a = np.zeros_like(x_d)
for k in range(0, num_steps - 1):
yval = k - 1
dug = acc[k + 1] - acc[k]
z_1 = const['f2'] * dug
z_2 = const['f2'] * acc[k]
z_3 = const['f1'] * dug
z_4 = z_1 / dt
if k == 0:
b_val = z_2 - z_3
a_val = (const['f5'] * b_val) + (const['f4'] * z_4)
else:
b_val = x_d[k - 1, :] + z_2 - z_3
a_val = (const['f4'] * x_v[k - 1, :]) +\
(const['f5'] * b_val) + (const['f4'] * z_4)
x_d[k, :] = (a_val * const['g1']) + (b_val * const['g2']) +\
z_3 - z_2 - z_1
x_v[k, :] = (a_val * const['h1']) - (b_val * const['h2']) - z_4
x_a[k, :] = (-const['f6'] * x_v[k, :]) - (omega2 * x_d[k, :])
return x_a, x_v, x_d
def calculate_velocity_displacement(time, acc):
'''
Returns the velocity and displacment time series using simple integration
:param float time_step:
Time-series time-step (s)
:param np.ndarray acceleration:
Acceleration time-history
:returns:
velocity - Velocity Time series (cm/s)
displacement - Displacement Time series (cm)
'''
dt = time[1]-time[0]
velocity = dt * integrate.cumtrapz(acc, initial = 0.)
displacement = dt * integrate.cumtrapz(velocity, initial = 0.)
return velocity, displacement
def residual_lognormal_dist(coeffs, imls, fractions):
mu = coeffs[0]
sigma = coeffs[1]
residual = abs(np.array(fractions) - stats.lognorm.cdf(imls, sigma, scale = math.exp(mu)))
return residual
def add_information(capacity_curves, attribute, type, data):
#FIXME: type is a reserved keyword, change to something else
no_capacity_curves = len(capacity_curves['Sa'])
if attribute == 'heights' or attribute == 'periods' or attribute == 'gamma':
if type == 'value':
values = []
for icc in range(no_capacity_curves):
values.append(data)
capacity_curves[attribute] = values
elif type == 'vector':
capacity_curves[attribute] = data
elif type == 'calculate' and attribute == 'periods':
periods = []
for icc in range(no_capacity_curves):
Sd = capacity_curves['Sd'][icc][data]
Sa = capacity_curves['Sa'][icc][data]
periods.append(2*math.pi*math.sqrt(Sd/(Sa*9.81)))
capacity_curves[attribute] = periods
elif attribute == 'yielding point':
Sdy = []
Say = []
for icc in range(no_capacity_curves):
Sdy.append(capacity_curves['Sd'][icc][data])
Say.append(capacity_curves['Sa'][icc][data])
capacity_curves['Sdy'] = Sdy
capacity_curves['Say'] = Say
else:
print attribute + ' is not a recognized attribute. No information was added.'
return capacity_curves
def idealisation(typ, capacity_curves):
idealised_capacity = []
no_capacity_curves = len(capacity_curves['periods'])
for icc in range(no_capacity_curves):
if capacity_curves['type']== 'Vb-droof' or capacity_curves['type']== 'Vb-dfloor':
droof = capacity_curves['droof'][icc]
Vb = capacity_curves['Vb'][icc]
else:
droof = capacity_curves['Sd'][icc]
Vb = capacity_curves['Sa'][icc]
capacity_curves['idealised'] = 'FALSE'
if typ == 'bilinear':
idealised_capacity.append(bilinear(droof, Vb, capacity_curves['idealised']))
elif typ == 'quadrilinear':
idealised_capacity.append(quadrilinear(droof, Vb, capacity_curves['idealised']))
return idealised_capacity
def bilinear(droof, Vb, idealised):
# FEMA method
if idealised == 'FALSE':
droof = np.array(droof)
Fy = np.max(Vb)
du = np.max(droof)
for index, item in enumerate(Vb):
if item >= Fy:
break
Ay = 0.6*Fy
Ax = np.interp(Ay, Vb[0:index], droof[0:index])
slp = Ay/Ax
dy = Fy/slp
else:
dy = droof[1]
du = droof[2]
Fy = Vb[1]
return [dy, du, Fy]
def quadrilinear(droof, Vb, idealised):
if idealised == 'FALSE':
droof = np.array(droof)
Fmax = np.max(Vb)
for index, item in enumerate(Vb):
if item >= Fmax:
break
fmax = index
dmax = droof[index]
# Yielding point:
# Vulnerability guidelines method
# Find yielding displacement with equal energy principle n the interval from 0 to Dmax
Areas = np.array([(Vb[i+1]+Vb[i])/2 for i in range(0, fmax)])
dd = np.array([droof[i+1]-droof[i] for i in range(0, fmax)])
Edmax = np.sum(dd*Areas) #Area under the pushover curve in the interval from 0 to Dmax
dy = 2*(dmax-Edmax/Fmax)
Fy = Fmax
# Onset of plateu
# Find were slope of pushover curve before decreasing in the plateu
Vb_norm = Vb[fmax::]/Fy
d_norm = droof[fmax::]/dy
slp = [(Vb_norm[i]-Vb_norm[i-1])/(d_norm[i]-d_norm[i-1]) for i in xrange(1, len(Vb_norm))]
indy_soft = np.nonzero(abs(np.array(slp))>0.3)
if len(indy_soft[0])>1:
fmin = indy_soft[0][-1]+fmax
Fmin = Vb[fmin]
dmin = droof[fmin]
# Onset of softening
# Find yielding displacement with equal energy principle n the interval from Dmax to Dmin (onset of plateu)
Areas = np.array([(Vb[i+1]+Vb[i])/2 for i in range(fmax, fmin)])
dd = np.array([droof[i+1]-droof[i] for i in range(fmax, fmin)])
Edmin = np.sum(dd*Areas)
ds = 2/(Fmax-Fmin)*(Edmin - (dmin-dmax)*Fmax + 0.5*dmin*(Fmax-Fmin))
du = np.max(droof)
if ds<dy: ds = dy
if ds>du: ds = du
# Residual Plateu
if len(indy_soft[0])>0:
Areas = np.array([(Vb[i+1]+Vb[i])/2 for i in range(fmin, len(Vb)-1)])
dd = np.array([droof[i+1]-droof[i] for i in range(fmin, len(Vb)-1)])
Edplat = np.sum(dd*Areas)
Fres = Edplat/(droof[-1]-dmin)
slp_soft = abs((Fmax-Fmin)/(ds-dmin))
dmin = dmin+(Fmin-Fres)/slp_soft
if dmin>du:
dmin = du
Fmin = Vb[-1]
else:
Fmin = Fres
else:
fmin = len(Vb)-1
Fmin = Fmax
dmin = droof[fmin]
ds = dmin
du = dmin
else:
dy = droof[1]
ds = droof[2]
dmin = droof[3]
du = droof[4]
Fy = Vb[1]
Fmax = Vb [2]
Fmin = Vb[3]
return [dy, ds, dmin, du, Fy, Fmax, Fmin]
def get_spectral_ratios(capacity_curves, input_spectrum):
Tuni = capacity_curves['mean_period']
T = capacity_curves['periods']
with open(input_spectrum, 'rb') as f:
data = f.read()
l = data.rstrip()
lines = l.split('\n')
data = [lines[i].split('\t') for i in range(0, len(lines))]
Ts = np.array([float(ele[0]) for ele in data])
Sa = np.array([float(ele[1]) for ele in data])
S_Tuni = np.interp(Tuni, Ts, Sa)
S_T = np.array([np.interp(ele, Ts, Sa) for ele in T])
Sa_ratios = S_Tuni/S_T
return Sa_ratios
def calculate_fragility_statistics(gmrs, PDM, T, damping, IMT, damage_model,
method, no_samples, size_sample):
imls = calculate_imls(gmrs, T, damping)
if IMT == 'Sa':
imls = imls[0]
IMT = 'Sa('+str(T)+')'
elif IMT == 'Sd':
imls = imls[1]
IMT = 'Sd('+str(T)+')'
elif IMT == 'PGA':
imls = imls[2]
no_gmrs = len(imls)
no_DS = len(damage_model['damage_states'])
cumPDM = convert_damage_matrix_to_probability(PDM)
statistics = []
for isample in range(no_samples):
sample_indices = np.random.random_integers(0, no_gmrs-1, size_sample)
sample_imls = imls[sample_indices]
sample_cumPDM = cumPDM[:, sample_indices]
fragility_model = []
if method == 'least squares':
for iDS in range(no_DS):
solution, _ = optimize.leastsq(residual_lognormal_dist,
[0.1, 0.6],
args=(sample_imls, sample_cumPDM[iDS+1]))
fragility_model.append(solution)
elif method == 'max likelihood':
I = np.argsort(sample_imls, axis=0)
imls = np.array(sample_imls)
imls = sample_imls[I]
sample_PDM_mle = []
for iDS in range(no_DS):
temp = sample_cumPDM[iDS+1][I]*no_assets
temp2 = temp.astype(int)
sample_PDM_mle = temp2.tolist()
solution = mle(sample_PDM_mle, sample_imls, no_assets, 1)
fragility_model.append(solution)
statistics.append(fragility_model)
statistics = np.array(statistics)
means = np.mean(statistics, axis=0)
stddev = np.std(statistics, axis=0)
fragility_stats = {'damage_states': None, 'mean': None, 'stddev': None,
'correlation': None, 'IMT': None}
fragility_stats['damage_states'] = damage_model['damage_states']
fragility_stats['mean'] = np.reshape(means, (no_DS, 2))
fragility_stats['stddev'] = np.reshape(stddev, (no_DS, 2))
fragility_stats['correlation'] = np.corrcoef(np.transpose(statistics))
fragility_stats['statistics'] = statistics
fragility_stats['IMT'] = IMT
return fragility_stats
def plot_fragility_stats(fragility_stats, minIML, maxIML):
imls = np.linspace(minIML, maxIML, 100)
color_scheme = ['g', 'b', 'y', 'orangered', 'r', 'k', 'm', 'c', '0.5', '0.75']
for iDS in range(len(fragility_stats['damage_states'])):
mu = fragility_stats['mean'][iDS][0]
sigma = fragility_stats['mean'][iDS][1]
plt.plot(imls, stats.lognorm.cdf(imls, sigma, scale = math.exp(mu)),
color = color_scheme[iDS], linewidth = 2,
label = fragility_stats['damage_states'][iDS])
plt.legend(loc = 4)
plt.show()
def read_consequence_model(input_file):
damage_states, distribution, mean, cov, A, B = [], [], [], [], [], []
with open(input_file, 'rU') as f:
data = csv.reader(f)
for iline, row in enumerate(data):
if iline > 0:
damage_states.append(row[0])
distribution.append(row[1])
mean.append(float(row[2]))
cov.append(float(row[3]))
A.append(float(row[4]))
B.append(float(row[5]))
cons_model = {'damage_states': None, 'distribution': None, 'mean': None,
'cov': None, 'A': None, 'B': None}
cons_model['damage_states'] = damage_states
cons_model['distribution'] = distribution
cons_model['mean'] = mean
cons_model['cov'] = cov
cons_model['A'] = A
cons_model['B'] = B
return cons_model
def convert_fragility_vulnerability(fragility_model, cons_model, imls, dist_type):
vulnerability_model = []
check_damage_states(fragility_model, cons_model)
no_samples = 100
loss_ratios = []
for isample in range(no_samples):
loss_ratios.append(sample_loss_ratios(fragility_model, cons_model, imls))
if dist_type == 'lognormal' or dist_type == 'beta':
vulnerability_model = create_parametric_vul_model(imls, loss_ratios, dist_type, fragility_model['IMT'])
elif dist_type == 'PMF':
vulnerability_model = create_nonparametric_vul_model(imls, loss_ratios, dist_type, fragility_model['IMT'])
return vulnerability_model
def create_parametric_vul_model(imls, loss_ratios, dist_type, IMT):
mean = np.mean(np.array(loss_ratios), axis=0)
stddev = np.std(np.array(loss_ratios), axis=0)
cov = np.zeros(len(mean))
for icov in range(len(cov)):
if mean[icov] > 0:
if stddev[icov]/mean[icov] > 10**-4:
cov[icov] = stddev[icov]/mean[icov]
vulnerability_model = {'imls': None, 'distribution': None,
'mean': None, 'cov': None, 'IMT': None}
vulnerability_model['imls'] = imls
vulnerability_model['distribution'] = dist_type
vulnerability_model['mean'] = mean
vulnerability_model['cov'] = cov
vulnerability_model['IMT'] = IMT
return vulnerability_model
def create_nonparametric_vul_model(imls, sampled_loss_ratios, dist_type, IMT):
no_values = 20
no_samples = len(sampled_loss_ratios)
loss_ratios = np.linspace(0.0, 1.0, no_values)
probs = np.zeros((no_values, len(imls)))
for isample in range(no_samples):
for iiml in range(len(imls)):
if sampled_loss_ratios[isample][iiml] == 0:
probs[0][iiml] = probs[0][iiml] + 1
elif sampled_loss_ratios[isample][iiml] == 1:
probs[-1][iiml] = probs[-1][iiml] + 1
else:
idx = (np.abs(loss_ratios[1:-1]-sampled_loss_ratios[isample][iiml])).argmin()
probs[idx+1][iiml] = probs[idx+1][iiml] + 1
probs = probs/no_samples
vulnerability_model = {'imls': None, 'distribution': None,
'loss_ratios': None, 'probabilities': None, 'IMT': None}
vulnerability_model['imls'] = imls
vulnerability_model['distribution'] = dist_type
vulnerability_model['loss_ratios'] = loss_ratios
vulnerability_model['probabilities'] = probs
vulnerability_model['IMT'] = IMT
return vulnerability_model
def plot_vulnerability_model(vulnerability_model):
imls = vulnerability_model['imls']
imt = vulnerability_model['IMT']
if vulnerability_model['distribution'] == 'lognormal':
mean_lrs = vulnerability_model['mean']
cov_lrs = vulnerability_model['cov']
plt.errorbar(imls, mean_lrs, yerr=cov_lrs, color='0.4', fmt='o')
elif vulnerability_model['distribution'] == 'beta':
mean_lrs = vulnerability_model['mean']
cov_lrs = vulnerability_model['cov']
plt.errorbar(imls, mean_lrs, yerr=cov_lrs, color='0.4', fmt='o')
elif vulnerability_model['distribution'] == 'PMF':
lrs = vulnerability_model['loss_ratios']
probs = vulnerability_model['probabilities']
mean_lrs = np.dot(lrs, probs)
imls.insert(0, 0)
mean_lrs = np.insert(mean_lrs, 0, 0)
plt.plot(imls, mean_lrs, linewidth=2)
plt.plot(imls, mean_lrs, 'ro')
plt.ylim(0.0, 1.0)
plt.xlabel(imt, fontsize=12)
plt.ylabel('Mean Loss Ratio', fontsize=12)
plt.suptitle('Vulnerability function', fontsize=16)
plt.show()
def check_damage_states(model1, model2):
for iDS in range(len(model1['damage_states'])):
assert(model1['damage_states'][iDS] == model2['damage_states'][iDS]), 'The fragility model and the consequence model are not compatible'
return
def sample_loss_ratios(fragility_model, cons_model, imls):
no_damage_states = len(fragility_model['damage_states'])
loss_ratios = np.zeros(len(imls))
for iDS in range(no_damage_states):
distribution = cons_model['distribution'][iDS]
mean = cons_model['mean'][iDS]
cov = cons_model['cov'][iDS]
A = cons_model['A'][iDS]
B = cons_model['B'][iDS]
damage_ratio = sample_value(distribution, mean, cov, A, B)
mu = fragility_model['logmean'][iDS]
sigma = fragility_model['logstddev'][iDS]
if iDS == no_damage_states-1:
fraction = stats.lognorm.cdf(imls, sigma, scale = math.exp(mu))
else:
mu2 = fragility_model['logmean'][iDS + 1]
sigma2 = fragility_model['logstddev'][iDS + 1]
fraction = stats.lognorm.cdf(imls, sigma, scale = math.exp(mu))-stats.lognorm.cdf(imls, sigma2, scale = math.exp(mu2))
loss_ratios = loss_ratios + fraction*damage_ratio
return loss_ratios
def save_vulnerability(taxonomy, vulnerability_model, output_type, output_path):
imls = vulnerability_model['imls']
IMT = vulnerability_model['IMT']
distribution = vulnerability_model['distribution']
if distribution == 'lognormal' or distribution == 'beta':
values1 = vulnerability_model['mean']
values2 = vulnerability_model['cov']
elif distribution == 'PMF':
values1 = vulnerability_model['loss_ratios']
values2 = vulnerability_model['probabilities']
if output_type == 'csv':
save_vulnerability_csv(taxonomy, imls, values1, values2, distribution, IMT, output_path)
if output_type == 'nrml':
save_vulnerability_nrml(taxonomy, imls, values1, values2, distribution, IMT, output_path)
def save_vulnerability_csv(taxonomy, imls, values1, values2, distribution, IMT, output_path):
path = os.path.abspath(output_path)
destination_file = os.path.join(path, taxonomy)
with open(destination_file+'_vul.csv', 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([taxonomy, IMT, distribution])
if distribution == 'lognormal' or distribution == 'beta':
writer.writerow(np.append('imls', imls))
writer.writerow(np.append('mean', values1))
writer.writerow(np.append('cov', values2))
elif distribution == 'PMF':
writer.writerow(['loss ratio', 'probabilities'])
for iLR in range(len(values1)):
writer.writerow(np.append(values1[iLR], values2[iLR, :]))
def save_vulnerability_nrml(taxonomy, imls, values1, values2, distribution, IMT, output_path):
path = os.path.abspath(output_path)
destination_file = os.path.join(path, taxonomy)
nrml_file = open(destination_file+'_vul.xml', 'w')
nrml_file.write('<?xml version = "1.0" encoding = "utf-8"?>\n')
nrml_file.write('<nrml xmlns = "http://openquake.org/xmlns/nrml/0.5">\n')
nrml_file.write('<vulnerabilityModel id = "'+taxonomy+'" assetCategory = "buildings" lossCategory = "economic">\n')
nrml_file.write(' <description>vulnerability model for '+ taxonomy +'</description>\n')
nrml_file.close()
save_single_vulnerability_nrml(taxonomy, imls, values1, values2, distribution, IMT)
nrml_file = open(taxonomy+'_vul.xml', 'a')
nrml_file.write('</vulnerabilityModel>\n')
nrml_file.write('</nrml>\n')
nrml_file.close()
def save_single_vulnerability_nrml(taxonomy, imls, values1, values2, distribution, IMT):
imls_str = convert_array_to_string(imls)
nrml_file = open(taxonomy+'_vul.xml', 'a')
if distribution == 'lognormal':
dist = 'LN'
elif distribution == 'beta':
dist = 'BT'
elif distribution == 'PMF':
dist = 'PM'
nrml_file.write(' <vulnerabilityFunction id = "'
+ taxonomy + '" dist = "' + dist + '">\n')
if distribution == 'lognormal' or distribution == 'beta':
lrs_str = convert_array_to_string(values1)
cov_str = convert_array_to_string(values2)
nrml_file.write(' <imls imt = "'+IMT+'">'+imls_str+'</imls>\n')
nrml_file.write(' <meanLRs>'+lrs_str+'</meanLRs>\n')
nrml_file.write(' <covLRs>'+cov_str+'</covLRs>\n')
elif distribution == 'PMF':
for iLR in range(len(values1)):
probs = convert_array_to_string(values2[iLR])
nrml_file.write(' <probabilities lr = "'
+ str(values1[iLR]) + '">' + probs
+ '</probabilities>)\n')
nrml_file.write(' </vulnerabilityFunction>\n')
def save_vulnerability_set_nrml(folder, destination_file):
path = os.path.abspath(folder)
destination_file = os.path.join(path, destination_file)
nrml_file = open(destination_file, 'w')
nrml_file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
nrml_file.write('<nrml xmlns="http://openquake.org/xmlns/nrml/0.5">\n\n')
nrml_file.write('<vulnerabilityModel assetCategory="buildings" lossCategory="economic">\n')
nrml_file.write(' <description>vulnerability model</description>\n\n')
for f in os.listdir(folder):
if f.endswith(".csv"):
with open(os.path.join(path, f), 'rU') as fr:
data = csv.reader(fr)
taxonomy, IMT, distribution = data.next()[0:3]
imls = data.next()
imls.remove("imls")
imls_str = " ".join(imls)
if distribution == 'lognormal':
dist = 'LN'
elif distribution == 'beta':
dist = 'BT'
elif distribution == 'PMF':
dist = 'PM'
nrml_file.write(' <vulnerabilityFunction id="'
+ taxonomy + '" dist="' + dist + '">\n')
nrml_file.write(' <imls imt="'+IMT+'">'+imls_str+'</imls>\n')
if distribution == 'lognormal' or distribution == 'beta':
lrs = data.next()
lrs.remove("mean")
lrs_str = " ".join(lrs)
covs = data.next()
covs.remove("cov")
cov_str = " ".join(covs)
nrml_file.write(' <meanLRs>'+lrs_str+'</meanLRs>\n')
nrml_file.write(' <covLRs>'+cov_str+'</covLRs>\n')
elif distribution == 'PMF':
data.next()
for line in data:
lr = line.pop(0)
probs = " ".join(line)
nrml_file.write(' <probabilities lr="' + lr + '">' + probs + '</probabilities>\n')
nrml_file.write(' </vulnerabilityFunction>\n\n')
nrml_file.write('</vulnerabilityModel>\n\n')
nrml_file.write('</nrml>\n')
nrml_file.close()
def convert_array_to_string(array):
string = ''
for i in range(len(array)):
string = string + ' ' + str(array[i])
return string
def read_hazard(input_file):
#This function reads a hazard curve and stores it in a dictionary
file = open(input_file)
data = csv.reader(file)
IMLs = []
prob_exceedance = []
IM_type = []
for line in data:
if line[0] == 'PGA':
IM_type.append(line[0])
for value in line[1:]:
if isNumber(value):
IMLs.append(float(value))
if line[0] == 'Sa':
IM_type.append(line[0])
for value in line[1:]:
if isNumber(value):
IMLs.append(float(value))
if line[0] == 'PoE':
for value in line[1:]:
if isNumber(value):
prob_exceedance.append(float(value))
#Store all the data in the dictionary
hazard_curve = {'IMLs':None,'PoE':None,'IM_Type':None}
hazard_curve['IMLs'] = IMLs
hazard_curve['PoE'] = prob_exceedance
hazard_curve['IM_Type'] = IM_type
return hazard_curve
def plot_hazard_curve(hazard_curve):
#This function plots the hazard curve
IMLs = hazard_curve['IMLs']
PoE = hazard_curve['PoE']
plt.plot(IMLs,PoE,color='g',linewidth=2)
plt.xlabel(hazard_curve['IM_Type'][0] + ' [g]',fontsize = 10)
plt.ylabel('Probability of Exceedance',fontsize = 10)
def read_frag_model(input_file):
#This function reads a fragility model and stores it in a dictionary
damage_states = []
model_type = []
cent_value = []
dispersion = []
log_mean = []
log_stdev = []
file = open(input_file)
data = file.readlines()
line = data[0]
line = line.strip().split(',')
model_type = line[1] + ' - ' + line[2]
file = open(input_file)
data = csv.reader(file)
data = [row for row in data]
for iline in range(len(data)-1):
line = data[iline+1]
damage_states.append(line[0])
cent_value.append(line[1])
dispersion.append(line[2])
if model_type == ' median - dispersion':
for ids in range(len(cent_value)):
mean = math.log(cent.value[ids])
stdev = dispersion[ids]
log_mean.append(mean)
log_stdev.append(stdev)
elif model_type == 'mean of x - cov of x':
for ids in range(len(cent_value)):
mu = cent.value[ids]
cov = dispersion[ids]
median = (mu**2)/math.sqrt(mu**2+cov**2)
mean = math.log(median)
stdev = math.sqrt(math.log(1+(cov**2)/mu**2))
log_mean.append(mean)
log_stdev.append(stdev)
elif model_type == 'mean of ln(x) - st. dev. of ln(x)':
for ids in range(len(cent_value)):
mean = cent_value[ids]
stdev = line[ids]
log_mean.append(mean)
log_stdev.append(stdev)
#Store all the data in the dictionary
fragility_model = {'Damage_states':None,'log_mean':None,'log_stdev':None}
fragility_model['Damage_states'] = damage_states
fragility_model['logmean'] = log_mean
fragility_model['logstdev'] = log_stdev
return fragility_model
def read_deformed_shape(damage_model, capacity_curves, icc):
if damage_model['deformed shape'] != 'none':
ISDvec, Sdvec = [],[]
with open(damage_model['deformed shape'],'rU') as f:
csv_reader = csv.reader(f)
for row in csv_reader:
if row[0] == 'ISD':
ISDvec.append(np.array(row[1:],dtype = float))
else:
Sdvec.append(np.array(row[1:],dtype = float))
try:
ISDvec = np.array(ISDvec[icc])
Sdvec = np.array(Sdvec[icc])
except IndexError:
ISDvec = np.array(ISDvec[0])
Sdvec = np.array(Sdvec[0])
else:
# This is a simple assumption on the relationship between roof displacement and drift
assert(capacity_curves['heights'] != []), 'If you want to use inter-storey drift damage model you should provide building height in capacity curve input file'
RDvec = np.array(capacity_curves['Sd'][icc])*capacity_curves['gamma'][icc]
Sdvec = np.array(capacity_curves['Sd'][icc])
ISDvec = np.array(RDvec)/capacity_curves['heights'][icc]
return [ISDvec, Sdvec]
def check_SDOF_curves(capacity_curves):
no_capacity_curves = len(capacity_curves['Sd'])
for icc in range(no_capacity_curves):
assert(len(capacity_curves['Sd'][icc]) <= 5), "Idealise capacity curves with a maximum of 5 points, "+str(len(capacity_curves['Sd'][icc]))+" given"
if len(capacity_curves['Sd'][icc]) < 5:
no_points = 5-len(capacity_curves['Sd'][icc])
Sd = capacity_curves['Sd'][icc][:-1]
for i in range(no_points):
Sd.append(capacity_curves['Sd'][icc][-2]+(capacity_curves['Sd'][icc][-1] - capacity_curves['Sd'][icc][-2])/(no_points+1)*(i+1))
Sd.append(capacity_curves['Sd'][icc][-1])
Sa = np.interp(Sd,capacity_curves['Sd'][icc],capacity_curves['Sa'][icc])
capacity_curves['Sd'][icc] = Sd
capacity_curves['Sa'][icc] = Sa.tolist()
return capacity_curves
|
agpl-3.0
|
marinkaz/orange3
|
Orange/data/io.py
|
1
|
26361
|
import re
import warnings
import subprocess
from os import path
from ast import literal_eval
from math import isnan
from numbers import Number
from itertools import chain, repeat
from functools import lru_cache
from collections import OrderedDict
import bottlechest as bn
import numpy as np
from chardet.universaldetector import UniversalDetector
from Orange.data.variable import *
from Orange.util import abstract, Registry, flatten, namegen
_IDENTITY = lambda i: i
class Compression:
GZIP = '.gz'
BZIP2 = '.bz2'
XZ = '.xz'
all = (GZIP, BZIP2, XZ)
def open_compressed(filename, *args, _open=open, **kwargs):
"""Return seamlessly decompressed open file handle for `filename`"""
if isinstance(filename, str):
if filename.endswith(Compression.GZIP):
from gzip import open as _open
elif filename.endswith(Compression.BZIP2):
from bz2 import open as _open
elif filename.endswith(Compression.XZ):
from lzma import open as _open
return _open(filename, *args, **kwargs)
# Else already a file, just pass it through
return filename
def detect_encoding(filename):
"""
Detect encoding of `filename`, which can be a ``str`` filename, a
``file``-like object, or ``bytes``.
"""
# Try with Unix file utility first because it's faster (~10ms vs 100ms)
if isinstance(filename, str) and not filename.endswith(Compression.all):
try:
with subprocess.Popen(('file', '--brief', '--mime-encoding', filename),
stdout=subprocess.PIPE) as process:
process.wait()
if process.returncode == 0:
encoding = process.stdout.read().strip()
# file only supports these encodings; for others it says
# unknown-8bit or binary. So we give chardet a chance to do
# better
if encoding in (b'utf-8', b'us-ascii', b'iso-8859-1',
b'utf-7', b'utf-16le', b'utf-16be', b'ebcdic'):
return encoding.decode('us-ascii')
except OSError: pass # windoze
# file not available or unable to guess the encoding, have chardet do it
detector = UniversalDetector()
# We examine only first N 4kB blocks of file because chardet is really slow
MAX_BYTES = 4*1024*12
def _from_file(f):
detector.feed(f.read(MAX_BYTES))
detector.close()
return detector.result.get('encoding')
if isinstance(filename, str):
with open_compressed(filename, 'rb') as f:
return _from_file(f)
elif isinstance(filename, bytes):
detector.feed(filename[:MAX_BYTES])
detector.close()
return detector.result.get('encoding')
elif hasattr(filename, 'encoding'):
return filename.encoding
else: # assume file-like object that you can iter through
return _from_file(filename)
class Flags:
"""Parser for column flags (i.e. third header row)"""
DELIMITER = ' '
_RE_SPLIT = re.compile(r'(?<!\\)' + DELIMITER).split
_RE_ATTR_UNQUOTED_STR = re.compile(r'^[a-zA-Z_]').match
ALL = OrderedDict((
('class', 'c'),
('ignore', 'i'),
('meta', 'm'),
('weight', 'w'),
('.+?=.*?', ''), # general key=value attributes
))
_RE_ALL = re.compile(r'^({})$'.format('|'.join(filter(None, flatten(ALL.items())))))
def __init__(self, flags):
for v in filter(None, self.ALL.values()):
setattr(self, v, False)
self.attributes = {}
for flag in flags or []:
flag = flag.strip()
if self._RE_ALL.match(flag):
if '=' in flag:
k, v = flag.split('=', 1)
self.attributes[k] = (v if Flags._RE_ATTR_UNQUOTED_STR(v) else
literal_eval(v) if v else
'')
else:
setattr(self, flag, True)
setattr(self, self.ALL.get(flag, ''), True)
elif flag:
warnings.warn('Invalid attribute flag \'{}\''.format(flag))
@staticmethod
def join(iterable, *args):
return Flags.DELIMITER.join(i.strip().replace(Flags.DELIMITER, '\\' + Flags.DELIMITER)
for i in chain(iterable, args)).lstrip()
@staticmethod
def split(s):
return [i.replace('\\' + Flags.DELIMITER, Flags.DELIMITER)
for i in Flags._RE_SPLIT(s)]
# Matches discrete specification where all the values are listed, space-separated
_RE_DISCRETE_LIST = re.compile(r'^\s*[^\s]+(\s[^\s]+)+\s*$')
_RE_TYPES = re.compile(r'^\s*({}|{}|)\s*$'.format(_RE_DISCRETE_LIST.pattern,
'|'.join(flatten(getattr(vartype, 'TYPE_HEADERS')
for vartype in Variable.registry.values()))))
_RE_FLAGS = re.compile(r'^\s*( |{}|)*\s*$'.format('|'.join(flatten(filter(None, i) for i in Flags.ALL.items()))))
class FileFormatMeta(Registry):
def __new__(cls, name, bases, attrs):
newcls = super().__new__(cls, name, bases, attrs)
# Optionally add compressed versions of extensions as supported
if getattr(newcls, 'SUPPORT_COMPRESSED', False):
new_extensions = list(getattr(newcls, 'EXTENSIONS', ()))
for compression in Compression.all:
for ext in newcls.EXTENSIONS:
new_extensions.append(ext + compression)
newcls.EXTENSIONS = tuple(new_extensions)
return newcls
@property
def formats(self):
return self.registry.values()
@lru_cache(5)
def _ext_to_attr_if_attr2(self, attr, attr2):
"""
Return ``{ext: `attr`, ...}`` dict if ``cls`` has `attr2`.
If `attr` is '', return ``{ext: cls, ...}`` instead.
"""
return OrderedDict((ext, getattr(cls, attr, cls))
for cls in self.registry.values()
if hasattr(cls, attr2)
for ext in getattr(cls, 'EXTENSIONS', []))
@property
def names(self):
return self._ext_to_attr_if_attr2('DESCRIPTION', '__class__')
@property
def writers(self):
return self._ext_to_attr_if_attr2('', 'write_file')
@property
def readers(self):
return self._ext_to_attr_if_attr2('', 'read_file')
@property
def img_writers(self):
return self._ext_to_attr_if_attr2('', 'write_image')
@property
def graph_writers(self):
return self._ext_to_attr_if_attr2('', 'write_graph')
@abstract
class FileFormat(metaclass=FileFormatMeta):
"""
Subclasses set the following attributes and override the following methods:
EXTENSIONS = ('.ext1', '.ext2', ...)
DESCRIPTION = 'human-readable file format description'
SUPPORT_COMPRESSED = False
@classmethod
def read_file(cls, filename, wrapper=_IDENTITY):
... # load headers, data, ...
return wrapper(self.data_table(data, headers))
@classmethod
def write_file(cls, filename, data):
...
self.write_headers(writer.write, data)
writer.writerows(data)
Wrapper FileFormat.data_table() returns Orange.data.Table from `data`
iterable (list (rows) of lists of values (cols)). `wrapper` is the
desired output class (if other than Table).
"""
PRIORITY = 10000 # Sort order in OWSave widget combo box, lower is better
@staticmethod
def open(filename, *args, **kwargs):
"""
Format handlers can use this method instead of the builtin ``open()``
to transparently (de)compress files if requested (according to
`filename` extension). Set ``SUPPORT_COMPRESSED=True`` if you use this.
"""
return open_compressed(filename, *args, **kwargs)
@classmethod
def read(cls, filename, wrapper=None):
for ext, reader in cls.readers.items():
if filename.endswith(ext):
return reader.read_file(filename, wrapper)
else: raise IOError('No readers for file "{}"'.format(filename))
@classmethod
def write(cls, filename, data):
for ext, writer in cls.writers.items():
if filename.endswith(ext):
return writer.write_file(filename, data)
else: raise IOError('No writers for file "{}"'.format(filename))
@staticmethod
def parse_headers(data):
"""Return (header rows, rest of data) as discerned from `data`"""
def is_number(item):
try: float(item)
except ValueError: return False
return True
# Second row items are type identifiers
def header_test2(items):
return all(map(_RE_TYPES.match, items))
# Third row items are flags and column attributes (attr=value)
def header_test3(items):
return all(map(_RE_FLAGS.match, items))
data = iter(data)
header_rows = []
# Try to parse a three-line header
lines = []
try:
lines.append(list(next(data)))
lines.append(list(next(data)))
lines.append(list(next(data)))
except StopIteration:
lines, data = [], chain(lines, data)
if lines:
l1, l2, l3 = lines
# Three-line header if line 2 & 3 match (1st line can be anything)
if header_test2(l2) and header_test3(l3):
header_rows = [l1, l2, l3]
else:
lines, data = [], chain((l1, l2, l3), data)
# Try to parse a single-line header
if not header_rows:
try: lines.append(list(next(data)))
except StopIteration: pass
if lines:
# Header if none of the values in line 1 parses as a number
if not all(is_number(i) for i in lines[0]):
header_rows = [lines[0]]
else:
data = chain(lines, data)
return header_rows, data
@classmethod
def data_table(self, data, headers=None):
"""
Return Orange.data.Table given rows of `headers` (iterable of iterable)
and rows of `data` (iterable of iterable; if ``numpy.ndarray``, might
as well **have it sorted column-major**, e.g. ``order='F'``).
Basically, the idea of subclasses is to produce those two iterables,
however they might.
If `headers` is not provided, the header rows are extracted from `data`,
assuming they precede it.
"""
if not headers:
headers, data = self.parse_headers(data)
# Consider various header types (single-row, two-row, three-row, none)
if 3 == len(headers):
names, types, flags = map(list, headers)
else:
if 1 == len(headers):
HEADER1_FLAG_SEP = '#'
# First row format either:
# 1) delimited column names
# 2) -||- with type and flags prepended, separated by #,
# e.g. d#sex,c#age,cC#IQ
_flags, names = zip(*[i.split(HEADER1_FLAG_SEP, 1) if HEADER1_FLAG_SEP in i else ('', i)
for i in headers[0]])
names = list(names)
elif 2 == len(headers):
names, _flags = map(list, headers)
else:
# Use heuristics for everything
names, _flags = [], []
types = [''.join(filter(str.isupper, flag)).lower() for flag in _flags]
flags = [Flags.join(filter(str.islower, flag)) for flag in _flags]
# Determine maximum row length
rowlen = max(map(len, (names, types, flags)))
def _equal_length(lst):
lst.extend(['']*(rowlen - len(lst)))
return lst
# Ensure all data is of equal width in a column-contiguous array
data = np.array([_equal_length(list(row)) for row in data if any(row)],
copy=False, dtype=object, order='F')
# Data may actually be longer than headers were
try: rowlen = data.shape[1]
except IndexError: pass
else:
for lst in (names, types, flags):
_equal_length(lst)
NAMEGEN = namegen('Feature ', 1)
Xcols, attrs = [], []
Mcols, metas = [], []
Ycols, clses = [], []
Wcols = []
# Iterate through the columns
for col in range(rowlen):
flag = Flags(Flags.split(flags[col]))
if flag.i: continue
type_flag = types and types[col].strip()
try:
orig_values = [np.nan if i in MISSING_VALUES else i
for i in (i.strip() for i in data[:, col])]
except IndexError:
# No data instances leads here
orig_values = []
# In this case, coltype could be anything. It's set as-is
# only to satisfy test_table.TableTestCase.test_append
coltype = DiscreteVariable
coltype_kwargs = {}
valuemap = []
values = orig_values
if type_flag in StringVariable.TYPE_HEADERS:
coltype = StringVariable
elif type_flag in ContinuousVariable.TYPE_HEADERS:
coltype = ContinuousVariable
try:
values = [float(i) for i in orig_values]
except ValueError:
for row, num in enumerate(orig_values):
try: float(num)
except ValueError: break
raise ValueError('Non-continuous value in (1-based) '
'line {}, column {}'.format(row + len(headers) + 1,
col + 1))
elif (type_flag in DiscreteVariable.TYPE_HEADERS or
_RE_DISCRETE_LIST.match(type_flag)):
if _RE_DISCRETE_LIST.match(type_flag):
valuemap = Flags.split(type_flag)
coltype_kwargs.update(ordered=True)
else:
valuemap = sorted(set(orig_values) - {np.nan})
else:
# No known type specified, use heuristics
is_discrete = is_discrete_values(orig_values)
if is_discrete:
valuemap = sorted(is_discrete)
else:
try: values = [float(i) for i in orig_values]
except ValueError:
coltype = StringVariable
else:
coltype = ContinuousVariable
if valuemap:
# Map discrete data to ints
def valuemap_index(val):
try: return valuemap.index(val)
except ValueError: return np.nan
values = np.vectorize(valuemap_index, otypes=[float])(orig_values)
coltype = DiscreteVariable
coltype_kwargs.update(values=valuemap)
if coltype is StringVariable:
values = ['' if i is np.nan else i
for i in orig_values]
# Write back the changed data. This is needeed to pass the
# correct, converted values into Table.from_numpy below
try: data[:, col] = values
except IndexError: pass
if flag.m or coltype is StringVariable:
append_to = (Mcols, metas)
elif flag.w:
append_to = (Wcols, None)
elif flag.c:
append_to = (Ycols, clses)
else:
append_to = (Xcols, attrs)
cols, domain_vars = append_to
cols.append(col)
if domain_vars is not None:
if names and names[col]:
# Use existing variable if available
var = coltype.make(names[col].strip(), **coltype_kwargs)
else:
# Never use existing for un-named variables
var = coltype(next(NAMEGEN), **coltype_kwargs)
var.attributes.update(flag.attributes)
domain_vars.append(var)
# Reorder discrete values to match existing variable
if var.is_discrete and not var.ordered:
new_order, old_order = var.values, coltype_kwargs.get('values', var.values)
if new_order != old_order:
offset = len(new_order)
column = data[:, col] if data.ndim > 1 else data
column += offset
for i, val in enumerate(var.values):
try: oldval = old_order.index(val)
except ValueError: continue
bn.replace(column, offset + oldval, new_order.index(val))
from Orange.data import Table, Domain
domain = Domain(attrs, clses, metas)
if not data.size:
return Table.from_domain(domain, 0)
table = Table.from_numpy(domain,
data[:, Xcols].astype(float, order='C'),
data[:, Ycols].astype(float, order='C'),
data[:, Mcols].astype(object, order='C'),
data[:, Wcols].astype(float, order='C'))
return table
@staticmethod
def header_names(data):
return ['weights'] * data.has_weights() + \
[v.name for v in chain(data.domain.attributes,
data.domain.class_vars,
data.domain.metas)]
@staticmethod
def header_types(data):
def _vartype(var):
if var.is_continuous or var.is_string:
return var.TYPE_HEADERS[0]
elif var.is_discrete:
return Flags.join(var.values) if var.ordered else var.TYPE_HEADERS[0]
raise NotImplementedError
return ['continuous'] * data.has_weights() + \
[_vartype(v) for v in chain(data.domain.attributes,
data.domain.class_vars,
data.domain.metas)]
@staticmethod
def header_flags(data):
return list(chain(['weight'] * data.has_weights(),
(Flags.join([flag], *('{}={}'.format(*a)
for a in sorted(var.attributes.items())))
for flag, var in chain(zip(repeat(''), data.domain.attributes),
zip(repeat('class'), data.domain.class_vars),
zip(repeat('meta'), data.domain.metas)))))
@classmethod
def write_headers(cls, write, data):
"""`write` is a callback that accepts an iterable"""
write(cls.header_names(data))
write(cls.header_types(data))
write(cls.header_flags(data))
@classmethod
def write_data(cls, write, data):
"""`write` is a callback that accepts an iterable"""
vars = list(chain((ContinuousVariable('_w'),) if data.has_weights() else (),
data.domain.attributes,
data.domain.class_vars,
data.domain.metas))
for row in zip(data.W if data.W.ndim > 1 else data.W[:, np.newaxis],
data.X,
data.Y if data.Y.ndim > 1 else data.Y[:, np.newaxis],
data.metas):
write(['' if isinstance(val, Number) and isnan(val) else
var.values[int(val)] if var.is_discrete else
val
for var, val in zip(vars, flatten(row))])
@classmethod
def write(cls, filename, data):
return cls.write_file(filename, data)
class CSVFormat(FileFormat):
EXTENSIONS = ('.csv',)
DESCRIPTION = 'Comma-separated values'
DELIMITERS = ',;:\t$ '
SUPPORT_COMPRESSED = True
PRIORITY = 20
@classmethod
def read_file(cls, filename, wrapper=None):
wrapper = wrapper or _IDENTITY
import csv, sys, locale
for encoding in (lambda: ('us-ascii', None), # fast
lambda: (detect_encoding(filename), None), # precise
lambda: (locale.getpreferredencoding(False), None),
lambda: (sys.getdefaultencoding(), None), # desperate
lambda: ('utf-8', None), # ...
lambda: ('utf-8', 'ignore')): # fallback
encoding, errors = encoding()
# Clear the error flag for all except the last check, because
# the error of second-to-last check is stored and shown as warning in owfile
if errors != 'ignore':
error = ''
with cls.open(filename, mode='rt', newline='', encoding=encoding, errors=errors) as file:
# Sniff the CSV dialect (delimiter, quotes, ...)
try:
dialect = csv.Sniffer().sniff(file.read(1024), cls.DELIMITERS)
except UnicodeDecodeError as e:
error = e
continue
except csv.Error:
dialect = csv.excel()
dialect.delimiter = cls.DELIMITERS[0]
file.seek(0)
dialect.skipinitialspace = True
try:
reader = csv.reader(file, dialect=dialect)
data = cls.data_table(reader)
if error and isinstance(error, UnicodeDecodeError):
pos, endpos = error.args[2], error.args[3]
warning = ('Skipped invalid byte(s) in position '
'{}{}').format(pos,
('-' + str(endpos)) if (endpos - pos) > 1 else '')
warnings.warn(warning)
return wrapper(data)
except Exception as e:
error = e
continue
raise ValueError('Cannot parse dataset {}: {}'.format(filename, error))
@classmethod
def write_file(cls, filename, data):
import csv
with cls.open(filename, mode='wt', newline='', encoding='utf-8') as file:
writer = csv.writer(file, delimiter=cls.DELIMITERS[0])
cls.write_headers(writer.writerow, data)
cls.write_data(writer.writerow, data)
class TabFormat(CSVFormat):
EXTENSIONS = ('.tab', '.tsv')
DESCRIPTION = 'Tab-separated values'
DELIMITERS = '\t'
PRIORITY = 10
class PickleFormat(FileFormat):
EXTENSIONS = ('.pickle', '.pkl')
DESCRIPTION = 'Pickled Python object file'
@staticmethod
def read_file(filename, wrapper=None):
wrapper = wrapper or _IDENTITY
import pickle
with open(filename, 'rb') as f:
return wrapper(pickle.load(f))
@staticmethod
def write_file(filename, data):
import pickle
with open(filename, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
class BasketFormat(FileFormat):
EXTENSIONS = ('.basket', '.bsk')
DESCRIPTION = 'Basket file'
@classmethod
def read_file(cls, filename, storage_class=None):
from Orange.data import _io, Table, Domain
import sys
if storage_class is None:
storage_class = Table
def constr_vars(inds):
if inds:
return [ContinuousVariable(x.decode("utf-8")) for _, x in
sorted((ind, name) for name, ind in inds.items())]
X, Y, metas, attr_indices, class_indices, meta_indices = \
_io.sparse_read_float(filename.encode(sys.getdefaultencoding()))
attrs = constr_vars(attr_indices)
classes = constr_vars(class_indices)
meta_attrs = constr_vars(meta_indices)
domain = Domain(attrs, classes, meta_attrs)
return storage_class.from_numpy(
domain, attrs and X, classes and Y, metas and meta_attrs)
class ExcelFormat(FileFormat):
EXTENSIONS = ('.xls', '.xlsx')
DESCRIPTION = 'Mircosoft Excel spreadsheet'
@classmethod
def read_file(cls, filename, wrapper=None):
wrapper = wrapper or _IDENTITY
file_name, _, sheet_name = filename.rpartition(':')
if not path.isfile(file_name):
file_name, sheet_name = filename, ''
import xlrd
wb = xlrd.open_workbook(file_name, on_demand=True)
if sheet_name:
ss = wb.sheet_by_name(sheet_name)
else:
ss = wb.sheet_by_index(0)
try:
first_row = next(i for i in range(ss.nrows) if any(ss.row_values(i)))
first_col = next(i for i in range(ss.ncols) if ss.cell_value(first_row, i))
row_len = ss.row_len(first_row)
cells = filter(any,
[[str(ss.cell_value(row, col)) if col < ss.row_len(row) else ''
for col in range(first_col, row_len)]
for row in range(first_row, ss.nrows)])
table = cls.data_table(cells)
except Exception:
raise IOError("Couldn't load spreadsheet from " + file_name)
return wrapper(table)
class DotFormat(FileFormat):
EXTENSIONS = ('.dot', '.gv')
DESCRIPTION = 'Dot graph description'
SUPPORT_COMPRESSED = True
@staticmethod
def write_graph(cls, filename, graph):
from sklearn import tree
tree.export_graphviz(graph, out_file=cls.open(filename, 'wt'))
@classmethod
def write(cls, filename, tree):
if type(tree) == dict:
tree = tree['tree']
cls.write_graph(filename, tree)
|
bsd-2-clause
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/linear_model/plot_lasso_coordinate_descent_path.py
|
254
|
2639
|
"""
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
hansbrenna/NetCDF_postprocessor
|
control_run_variability.py
|
1
|
4102
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 18 11:12:51 2016
@author: hanbre
This program takes an input-file which lists netcdf files containing horizonta-
lly averaged O3 concentrations in 67 WACCM layers and calculates total O3.
Each file is treated as a single year of a control simulations and the program
characterizes the natural interannual variability in the run. It also saves the
individual yearly time series as columns in a .csv file for further processing
or ease of reuse. as long as the files contains a single horizontal grid point
this program is applicable.
"""
from __future__ import print_function
import sys
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import HB_module.outsourced
import HB_module.colordefs
label_size = 14
matplotlib.rcParams['xtick.labelsize'] = label_size
matplotlib.rcParams['ytick.labelsize'] = label_size
colors = HB_module.colordefs.colordefs()
input_file = sys.argv[1]
var = sys.argv[2]
gases = {'BRO':95.9,'CLO':51.45,'O3':48.0,'HCL':36.46,'HBR':80.91}
T = ['']
ppt = ['BRO','BROY','HBR']
ppb = ['CLOY','CLO','HCL']
ppm = ['O3']
region = '65N'
in_name = input_file.split('.')[0]
df = pd.DataFrame(index=np.arange(1,13))
#fig = plt.figure(figsize=(20,20))
with open(input_file,'r') as file_in:
c = 4
#sns.set_palette('dark')
for line in file_in:
l = line.strip('\n')
c += 1
ds = xr.open_dataset(l)
O3_vmm = ds[var].squeeze()
if var in gases.keys() or var in T:
g=9.81
P0 = ds.P0
PS = ds.PS
hyai = ds.hyai
hybi = ds.hybi
Plevi = hyai*P0+hybi*PS
Plevi = Plevi.values
dp = np.zeros((66,12))
for i in xrange(Plevi.shape[1]):
dp[:,i] = Plevi[1:,i]-Plevi[0:66,i]
if var in gases.keys():
O3_mmm = O3_vmm*(gases[var]/28.94)
O3_t = O3_mmm*(dp.transpose()/g)
O3_tot = O3_t.sum(dim='lev')
O3_tot_DU = O3_tot/2.1415e-5
elif var in T:
T = O3_vmm
M =(dp[0:len(T.lev)].transpose()/g)
TM = T*M
Tmean = np.average(TM.values, axis = 1, weights = M)
TM.values = Tmean
O3_tot_DU = TM
df[str(c)]=O3_tot_DU.values
else:
df[str(c)]=O3_vmm.values
df.plot(colormap='winter',figsize=(10,10))
std=df.std(axis=1)
ma = df.mean(axis=1)
#yl = HB_module.outsourced.clb_labels(var)
plt.xlabel('Month',fontsize='18'); plt.ylabel('Column {} (DU)'.format(var),fontsize=18)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,fontsize='14')
#plt.title('Arctic column ozone',fontsize=18)
# plt.show()
plt.savefig('{}_{}_natural_variability.svg'.format(in_name,var),dpi=100,bbox_inches='tight')
plt.savefig('{}_{}_natural_variability.png'.format(in_name,var),dpi=100,bbox_inches='tight')
fig=plt.figure()
plt.plot(ma,color=colors['darkblue'])
plt.hold('on')
plt.fill_between(std.index,ma-2*std, ma+2*std, color=colors['deepblue'], alpha=0.2)
plt.savefig('{}_{}_m_std.svg'.format(in_name,var),dpi=100,bbox_inches='tight')
plt.savefig('{}_{}_m_std.png'.format(in_name,var),dpi=100,bbox_inches='tight')
fig3=plt.figure()
#Deseasonalise
df_ds = df.sub(ma,axis=0)
df_ds_s=df_ds.stack()
sns.distplot(df_ds_s)
#plt.show()
plt.savefig('{}_{}_kde.svg'.format(in_name,var),dpi=100,bbox_inches='tight')
plt.savefig('{}_{}_kde.png'.format(in_name,var),dpi=100,bbox_inches='tight')
fig4=plt.figure()
df_ds_s.hist(bins=19)
plt.savefig('{}_{}_histogram.svg'.format(in_name,var),dpi=100,bbox_inches='tight')
plt.savefig('{}_{}_histogram.png'.format(in_name,var),dpi=100,bbox_inches='tight')
df.to_csv('{}_{}_control_run.csv'.format(region,var))
plt.close('all')
|
gpl-3.0
|
Unidata/MetPy
|
v0.12/_downloads/7ce004631a19ab9a125e080a54ef25ce/Advanced_Sounding.py
|
3
|
3136
|
# Copyright (c) 2015,2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
=================
Advanced Sounding
=================
Plot a sounding using MetPy with more advanced features.
Beyond just plotting data, this uses calculations from `metpy.calc` to find the lifted
condensation level (LCL) and the profile of a surface-based parcel. The area between the
ambient profile and the parcel profile is colored as well.
"""
import matplotlib.pyplot as plt
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, SkewT
from metpy.units import units
###########################################
# Upper air data can be obtained using the siphon package, but for this example we will use
# some of MetPy's sample data.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed'), how='all'
).reset_index(drop=True)
###########################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
###########################################
# Create a new figure. The dimensions here give a good aspect ratio.
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, 115, 100)
skew = SkewT(fig, rotation=45)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot.
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Calculate LCL height and plot as black dot. Because `p`'s first value is
# ~1000 mb and its last value is ~250 mb, the `0` index is selected for
# `p`, `T`, and `Td` to lift the parcel from the surface. If `p` was inverted,
# i.e. start from low value, 250 mb, to a high value, 1000 mb, the `-1` index
# should be selected.
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Calculate full parcel profile and add to plot as black line
prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
skew.plot(p, prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, prof)
skew.shade_cape(p, T, prof)
# An example of a slanted line at constant T -- in this case the 0
# isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Show the plot
plt.show()
|
bsd-3-clause
|
ky822/Data_Bootcamp
|
Code/Python/WEO_all_oldschool_incomplete.py
|
2
|
1562
|
"""
Messing around with the IMF's WEO dataset. The first section is an exploration
of various methods of reading data from a url.
Once we've read in the data, we can slice as needed.
Note: data file is labeled xls but it's really tab-delimited text.
Prepared for the NYU Course "Data Bootcamp."
More at https://github.com/DaveBackus/Data_Bootcamp
References
* http://www.imf.org/external/ns/cs.aspx?id=28
* http://pandas.pydata.org/pandas-docs/stable/generated/pandas.io.parsers.read_table.html
* http://pandas.pydata.org/pandas-docs/stable/io.html#io-read-csv-table
* https://docs.python.org/3.4/library/urllib.html
* https://docs.python.org/3.4/library/os.html
Written by Dave Backus @ NYU, September 2014
Created with Python 3.4
"""
import pandas as pd
import urllib.request
import os
"""
1. Read data from url (several approaches illustrated)
"""
# file is labeled xls but it's really tab delimited
url = 'http://www.imf.org/external/pubs/ft/weo/2014/01/weodata/WEOApr2014all.xls'
# two versions (takes 5-10 seconds for both)
df1 = pd.read_table(url) # tab delimited is the default
df2 = pd.read_csv(url, sep='\t') # tab = \t
#%%
# copy to hard drive
file = '../Data/WEOApr2014all.xls'
urllib.request.urlretrieve(url, file)
#%%
# Sarah's version
f = urllib.request.urlopen(url)
file_sbh = file[:-4] + '_sbh' + file[-4:]
with open(file_sbh, 'wb') as local_file:
local_file.write(f.read())
#%%
# cool thing from Sarah: strips filename from url
base = os.path.basename(url)
"""
2. Slice and dice
"""
|
mit
|
jmontgom10/pyPol
|
06b_polCalConstants.py
|
2
|
17987
|
# -*- coding: utf-8 -*-
"""
Computes the polarimetric efficiency (PE), position angle direction (+1 or -1),
and position angle offset (DeltaPA) for the instrument.
"""
# TODO: Should I find a way to use Python "Statsodr.Models" to do linear fitting
# with uncertainties in X and Y?
# Core imports
import os
import sys
import copy
# Scipy/numpy imports
import numpy as np
from scipy import odr
# Import statsmodels for robust linear regression
import statsmodels.api as smapi
# Astropy imports
from astropy.table import Table, Column, hstack, join
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.stats import sigma_clipped_stats
from photutils import (centroid_com, aperture_photometry, CircularAperture,
CircularAnnulus)
# Import plotting utilities
from matplotlib import pyplot as plt
# Import the astroimage package
import astroimage as ai
# This script will compute the photometry of polarization standard stars
# and output a file containing the polarization position angle
# additive correction and the polarization efficiency of the PRISM instrument.
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# Define how the font will appear in the plots
font = {'family': 'sans-serif',
'color': 'black',
'weight': 'normal',
'size': 14
}
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\PRISM_data\\pyPol_data\\201612'
# This is the name of the file in which the calibration constants will be stored
polCalConstantsFile = os.path.join(pyPol_data, 'polCalConstants.csv')
# Read in the indexFile data and select the filenames
print('\nReading file index from disk')
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='ascii.csv')
# Group the fileIndex by waveband
fileIndexByWaveband = fileIndex.group_by(['FILTER'])
# Retrieve the waveband values within specified the calibration data
wavebands = np.unique(fileIndexByWaveband['FILTER'])
# Initalize a table to store all the measured polarizatino calibration constants
calTable = Table(names=('FILTER', 'PE', 's_PE', 'PAsign', 'D_PA', 's_D_PA'),
dtype=('S1', 'f8', 'f8', 'i8', 'f8', 'f8'))
# Also initalize a dictionary to store ALL of the polarization data
allPolCalDict = {}
# Loop through each waveband and compute the calibration constants from the data
# available for that waveband.
for thisFilter in wavebands:
# Update the user on processing status
print('\nProcessing calibration data for')
print('Filter : {0}'.format(thisFilter))
# Define the polarization standard files
thisFilename = 'polStandardTable_{0}.csv'.format(thisFilter)
polTableFile = os.path.join(pyPol_data, thisFilename)
# Read in the polarization calibration data file
polCalTable = Table.read(polTableFile, format='ascii.csv')
###############
# Get PE value
###############
# # Grab the column names of the polarization measurements
# polStart = lambda s: s.startswith('P_' + thisFilter)
# polBool = list(map(polStart, polCalTable.keys()))
# polInds = np.where(polBool)
# polKeys = np.array(polCalTable.keys())[polInds]
# Initalize a dictionary to store all the calibration measurements
tmpDict1 = {
'value':[],
'uncert':[]}
tmpDict2 = {
'expected':copy.deepcopy(tmpDict1),
'measured':copy.deepcopy(tmpDict1)}
polCalDict = {
'P':copy.deepcopy(tmpDict2),
'PA':copy.deepcopy(tmpDict2)}
# Quickly build a list of calibration keys
calKeyList = ['_'.join([prefix, thisFilter])
for prefix in ['P', 'sP', 'PA', 'sPA']]
# Loop over each row in the calibration data table
for istandard, standard in enumerate(polCalTable):
# Grab the appropriate row for this standard (as a table object)
standardTable = polCalTable[np.array([istandard])]
# Trim off unnecessary rows before looping over what remains
standardTable.remove_columns(['Name', 'RA_1950', 'Dec_1950'])
# Now loop over the remaining keys and
for key in standardTable.keys():
# Test if this is a calibration value
if key in calKeyList: continue
# Test if this value is masked
if standardTable[key].data.mask: continue
# If this is an unmasked, non-calibration value, then store it!
# Find out the proper calibration key for polCalTable
calKeyInd = np.where([key.startswith(k) for k in calKeyList])
thisCalKey = calKeyList[calKeyInd[0][0]]
# Begin by parsing which key we're dealing with
dictKey = (key.split('_'))[0]
if dictKey.endswith('A'):
dictKey = 'PA'
elif dictKey.endswith('P'):
dictKey = 'P'
else:
print('funky keys!')
pdb.set_trace()
# Parse whether this is a value or an uncertainty
if key.startswith('s'):
val_sig = 'uncert'
else:
val_sig = 'value'
# Store the expected value
try:
polCalDict[dictKey]['expected'][val_sig].append(
standardTable[thisCalKey].data.data[0])
except:
pdb.set_trace()
# Store the measured value
polCalDict[dictKey]['measured'][val_sig].append(
standardTable[key].data.data[0])
###################
# Identify Outliers
###################
# Grab the FULL set of expected and measured polarization values
expectedPol = np.array(polCalDict['P']['expected']['value'])
uncertInExpectedPol = np.array(polCalDict['P']['expected']['uncert'])
measuredPol = np.array(polCalDict['P']['measured']['value'])
uncertInMeasuredPol = np.array(polCalDict['P']['measured']['uncert'])
# Run a statsmodels linear regression and test for outliers
OLSmodel = smapi.OLS(
expectedPol,
measuredPol,
hasconst=False
)
OLSregression = OLSmodel.fit()
# Find the outliers
outlierTest = OLSregression.outlier_test()
outlierBool = [t[2] < 0.5 for t in outlierTest]
# Grab the FULL set of expected and measured polarization values
expectedPA = np.array(polCalDict['PA']['expected']['value'])
uncertInExpectedPA = np.array(polCalDict['PA']['expected']['uncert'])
measuredPA = np.array(polCalDict['PA']['measured']['value'])
uncertInMeasuredPA = np.array(polCalDict['PA']['measured']['uncert'])
# Run a statsmodels linear regression and test for outliers
OLSmodel = smapi.OLS(
expectedPA,
measuredPA,
hasconst=True
)
OLSregression = OLSmodel.fit()
# Find the outliers
outlierTest = OLSregression.outlier_test()
outlierBool = np.logical_or(
outlierBool,
[t[2] < 0.5 for t in outlierTest]
)
# Cull the list of Ps and PAs
goodInds = np.where(np.logical_not(outlierBool))
expectedPol = expectedPol[goodInds]
uncertInExpectedPol = uncertInExpectedPol[goodInds]
measuredPol = measuredPol[goodInds]
uncertInMeasuredPol = uncertInMeasuredPol[goodInds]
expectedPA = expectedPA[goodInds]
uncertInExpectedPA = uncertInExpectedPA[goodInds]
measuredPA = measuredPA[goodInds]
uncertInMeasuredPA = uncertInMeasuredPA[goodInds]
# TODO: print an update to the user on the polarization values culled
###############
# Get PE value
###############
# Close any remaining plots before proceeding to show the user the graphical
# summary of the calibration data.
plt.close('all')
# Define the model to be used in the fitting
def PE(slope, x):
return slope*x
# Set up ODR with the model and data.
PEmodel = odr.Model(PE)
data = odr.RealData(
expectedPol,
measuredPol,
sx=uncertInExpectedPol,
sy=uncertInMeasuredPol
)
# Initalize the full odr model object
odrObj = odr.ODR(data, PEmodel, beta0=[1.])
# Run the regression.
PEout = odrObj.run()
# Use the in-built pprint method to give us results.
print(thisFilter + '-band PE fitting results')
PEout.pprint()
print('\n\nGenerating P plot')
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.errorbar(
polCalDict['P']['expected']['value'],
polCalDict['P']['measured']['value'],
xerr=polCalDict['P']['expected']['uncert'],
yerr=polCalDict['P']['measured']['uncert'],
ecolor='b', linestyle='None', marker=None)
xlim = ax.get_xlim()
ax.plot([0,max(xlim)], PE(PEout.beta[0], np.array([0,max(xlim)])), 'g')
plt.xlabel('Cataloged P [%]')
plt.ylabel('Measured P [%]')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xlim = 0, xlim[1]
ylim = 0, ylim[1]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt.title(thisFilter + '-band Polarization Efficiency')
#Compute where the annotation should be placed
ySpan = np.max(ylim) - np.min(ylim)
xSpan = np.max(xlim) - np.min(xlim)
xtxt = 0.1*xSpan + np.min(xlim)
ytxt = 0.9*ySpan + np.min(ylim)
plt.text(xtxt, ytxt, 'PE = {0:4.3g} +/- {1:4.3g}'.format(
PEout.beta[0], PEout.sd_beta[0]), fontdict=font)
import pdb; pdb.set_trace()
# Test if a polarization efficiency greater than one was retrieved...
if PEout.beta[0] > 1.0:
print('Polarization Efficiency greater than one detected.')
print('Forcing PE constant to be 1.0')
PEout.beta[0] = 1.0
###############
# Get PA offset
###############
# Fit a model to the PA1 vs. PA0 data
# Define the model to be used in the fitting
def deltaPA(B, x):
return B[0]*x + B[1]
# Set up ODR with the model and data.
deltaPAmodel = odr.Model(deltaPA)
data = odr.RealData(
expectedPA,
measuredPA,
sx=uncertInExpectedPA,
sy=uncertInMeasuredPA
)
# On first pass, just figure out what the sign is
odrObj = odr.ODR(data, deltaPAmodel, beta0=[0.0, 90.0])
dPAout = odrObj.run()
PAsign = np.round(dPAout.beta[0])
# Build the proper fitter class with the slope fixed
odrObj = odr.ODR(data, deltaPAmodel, beta0=[PAsign, 90.0], ifixb=[0,1])
# Run the regression.
dPAout = odrObj.run()
# Use the in-built pprint method to give us results.
print(thisFilter + '-band delta PA fitting results')
dPAout.pprint()
# For ease of reference, convert the expected and measured values to arrays
PA0 = np.array(polCalDict['PA']['expected']['value'])
PA1 = np.array(polCalDict['PA']['measured']['value'])
# Apply the correction terms
dPAval = dPAout.beta[1]
PAcor = ((PAsign*(PA1 - dPAval)) + 720.0) % 180.0
# TODO
# Check if PAcor values are closer corresponding PA0_V values
# by adding or subtracting 180
PA0 = np.array(polCalDict['PA']['expected']['value'])
PAminus = np.abs((PAcor - 180) - PA0 ) < np.abs(PAcor - PA0)
if np.sum(PAminus) > 0:
PAcor[np.where(PAminus)] = PAcor[np.where(PAminus)] - 180
PAplus = np.abs((PAcor + 180) - PA0 ) < np.abs(PAcor - PA0)
if np.sum(PAplus) > 0:
PAcor[np.where(PAplus)] = PAcor[np.where(PAplus)] + 180
# Do a final regression to plot-test if things are right
data = odr.RealData(
PA0,
PAcor,
sx=polCalDict['PA']['expected']['uncert'],
sy=polCalDict['PA']['measured']['uncert']
)
odrObj = odr.ODR(data, deltaPAmodel, beta0=[1.0, 0.0], ifixb=[0,1])
dPAcor = odrObj.run()
# Plot up the results
# PA measured vs. PA true
print('\n\nGenerating PA plot')
fig.delaxes(ax)
ax = fig.add_subplot(1,1,1)
#ax.errorbar(PA0_V, PA1_V, xerr=sPA0_V, yerr=sPA1_V,
# ecolor='b', linestyle='None', marker=None)
#ax.plot([0,max(PA0_V)], deltaPA(dPAout.beta, np.array([0,max(PA0_V)])), 'g')
ax.errorbar(PA0, PAcor,
xerr=polCalDict['PA']['expected']['uncert'],
yerr=polCalDict['PA']['measured']['uncert'],
ecolor='b', linestyle='None', marker=None)
xlim = ax.get_xlim()
ax.plot([0,max(xlim)], deltaPA(dPAcor.beta, np.array([0, max(xlim)])), 'g')
plt.xlabel('Cataloged PA [deg]')
plt.ylabel('Measured PA [deg]')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xlim = 0, xlim[1]
ax.set_xlim(xlim)
plt.title(thisFilter + '-band PA offset')
#Compute where the annotation should be placed
ySpan = np.max(ylim) - np.min(ylim)
xSpan = np.max(xlim) - np.min(xlim)
xtxt = 0.1*xSpan + np.min(xlim)
ytxt = 0.9*ySpan + np.min(ylim)
plt.text(xtxt, ytxt, 'PA offset = {0:4.3g} +/- {1:4.3g}'.format(
dPAout.beta[1], dPAout.sd_beta[1]), fontdict=font)
pdb.set_trace()
# Now that all the calibration constants have been estimated and the results
# shown to the user (in theory for their sanity-test approval), store the
# final calibration data in the calTable variable
calTable.add_row([thisFilter, PEout.beta[0], PEout.sd_beta[0],
np.int(PAsign), dPAout.beta[1], dPAout.sd_beta[1]])
# Store a copy of polCalDict in allPolCalDict
allPolCalDict[thisFilter] = copy.deepcopy(polCalDict)
# Now double check if the PA offsets are agreeable. If not, keep them separate,
# but otherwise attempt to combine them...
#####################################################
# Check if a single deltaPA value is appropriate
#####################################################
# Extract the originally estimated dPA values from the table
dPAvalues = calTable['D_PA'].data
dPAsigmas = calTable['s_D_PA'].data
# Compute all possible differences in dPAs and their uncertainties
D_dPAmatrix = np.zeros(2*dPAvalues.shape)
s_D_dPAmatrix = np.ones(2*dPAvalues.shape)
for i in range(len(dPAvalues)):
for j in range(len(dPAvalues)):
# Skip over trivial or redundant elements
if j <= i: continue
D_dPAmatrix[i,j] = np.abs(dPAvalues[i] - dPAvalues[j])
s_D_dPAmatrix[i,j] = np.sqrt(dPAsigmas[i]**2 + dPAsigmas[j]**2)
# Check if this these two values are significantly different from each-other
if (D_dPAmatrix/s_D_dPAmatrix > 3.0).any():
print('Some of these calibration constants are significantly different.')
print('Leave them as they are.')
else:
PA0 = []
PA1 = []
sPA0 = []
sPA1 = []
for key, val in allPolCalDict.items():
PA0.extend(val['PA']['expected']['value'])
PA1.extend(val['PA']['measured']['value'])
sPA0.extend(val['PA']['expected']['uncert'])
sPA1.extend(val['PA']['measured']['uncert'])
# Do a final regression to plot-test if things are right
data = odr.RealData(PA0, PA1, sx=sPA0, sy=sPA1)
# On first pass, just figure out what the sign is
odrObj = odr.ODR(data, deltaPAmodel, beta0=[0.0, 90.0])
dPAout = odrOjb.run()
PAsign = np.round(dPAout.beta[0])
# Build the proper fitter class with the slope fixed
odrObj = odr.ODR(data, deltaPAmodel, beta0=[PAsign, 90.0], ifixb=[0,1])
# Run the regression.
dPAout = odrObj.run()
# Use the in-built pprint method to give us results.
print('Final delta PA fitting results')
dPAout.pprint()
# Apply the correction terms
dPAval = dPAout.beta[1]
PAcor = ((PAsign*(PA1 - dPAval)) + 720.0) % 180.0
# Check if the correct PAs need 180 added or subtracted.
PAminus = np.abs((PAcor - 180) - PA0 ) < np.abs(PAcor - PA0)
if np.sum(PAminus) > 0:
PAcor[np.where(PAminus)] = PAcor[np.where(PAminus)] - 180
PAplus = np.abs((PAcor + 180) - PA0 ) < np.abs(PAcor - PA0)
if np.sum(PAplus) > 0:
PAcor[np.where(PAplus)] = PAcor[np.where(PAplus)] + 180
# # Save corrected values for possible future use
# PAcor_R = PAcor.copy()
# Do a final regression to plot-test if things are right
data = odr.RealData(PA0, PAcor, sx=sPA0, sy=sPA1)
odrObj = odr.ODR(data, deltaPAmodel, beta0=[1.0, 0.0], ifixb=[0,1])
dPAcor = odrObj.run()
# Plot up the results
# PA measured vs. PA true
print('\n\nGenerating PA plot')
fig.delaxes(ax)
ax = fig.add_subplot(1,1,1)
#ax.errorbar(PA0_R, PA1, xerr=sPA0_R, yerr=sPA1,
# ecolor='b', linestyle='None', marker=None)
#ax.plot([0,max(PA0_R)], deltaPA(dPAout.beta, np.array([0,max(PA0_R)])), 'g')
ax.errorbar(PA0, PAcor, xerr=sPA0, yerr=sPA1,
ecolor='b', linestyle='None', marker=None)
ax.plot([0,max(PA0)], deltaPA(dPAcor.beta, np.array([0, max(PA0)])), 'g')
plt.xlabel('Cataloged PA [deg]')
plt.ylabel('Measured PA [deg]')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xlim = 0, xlim[1]
ax.set_xlim(xlim)
plt.title('Final Combined PA offset')
#Compute where the annotation should be placed
ySpan = np.max(ylim) - np.min(ylim)
xSpan = np.max(xlim) - np.min(xlim)
xtxt = 0.1*xSpan + np.min(xlim)
ytxt = 0.9*ySpan + np.min(ylim)
plt.text(xtxt, ytxt, 'PA offset = {0:4.3g} +/- {1:4.3g}'.format(
dPAout.beta[1], dPAout.sd_beta[1]))
# Pause for a double check from the user
pdb.set_trace()
# User approves, close the plot and proceed
plt.close()
plt.ioff()
# Update the calibration table
calTable['D_PA'] = dPAout.beta[1]
calTable['s_D_PA'] = dPAout.sd_beta[1]
print('Writing calibration data to disk')
calTable.write(polCalConstantsFile, format='ascii.csv')
print('Calibration tasks completed!')
|
mit
|
mitenjain/signalAlign
|
scripts/makeBuildAlignments.py
|
2
|
5508
|
#!/usr/bin/env python
""" Make build alignments for starting a new HDP
"""
from __future__ import print_function
import glob
import os
import sys
import string
import pandas as pd
import numpy as np
from argparse import ArgumentParser
from random import shuffle
def parse_args():
parser = ArgumentParser(description=__doc__)
# query files
parser.add_argument('--C_alignments', '-C', action='store',
dest='C_alns', required=False, type=str, default=None,
help="C files")
parser.add_argument('--mC_alignments', '-mC', action='store',
dest='mC_alns', required=False, type=str, default=None,
help="mC files")
parser.add_argument('--hmC_alignments', '-hmC', action='store',
dest='hmC_alns', required=False, type=str, default=None,
help="hmC files")
parser.add_argument('--number_of_assignments', '-n', action='store', type=int, default=10000,
dest='max_assignments',
help='total number of assignments to collect FOR EACH GROUP')
parser.add_argument('--threshold', '-t', action='store', type=float, default=0.25, dest='threshold')
parser.add_argument('--out', '-o', action='store', type=str, required=True, dest='out_file')
return parser.parse_args()
def randomly_select_alignments(path_to_alignments):
alignments = [x for x in glob.glob(path_to_alignments) if os.stat(x).st_size != 0]
shuffle(alignments)
return alignments
def collect_assignments(alignments, strand, threshold, max_assignments, transtable):
if alignments is None:
return None
else:
assignments_list = []
add_to_assignments = assignments_list.append
total = 0
assert len(alignments) > 0, "Didn't find any alignments"
for alignment in alignments:
try:
data = pd.read_table(alignment, usecols=(4, 9, 12, 13),
dtype={'strand': np.str,
'kmer': np.str,
'posterior_prob': np.float64,
'event_mean': np.float64},
header=None,
names=['strand', 'kmer', 'posterior_prob', 'event_mean'])
selected_rows = data.ix[(data['strand'] == strand) & (data['posterior_prob'] >= threshold)]
total += selected_rows.shape[0]
assignment_table = pd.DataFrame({"kmer": selected_rows['kmer'].str.translate(transtable),
"event_mean": selected_rows["event_mean"]})
add_to_assignments(assignment_table)
except:
print("ERROR: problem with alignment {}".format(alignment))
continue
if total >= max_assignments:
break
assignments = pd.concat(assignments_list)
return assignments
def make_build_alignment(c_alns, mc_alns, hmc_alns, strand, threshold, max_assignments):
# translation tables for methylation
C_trans_table = string.maketrans("C", "C")
mC_trans_table = string.maketrans("C", "E")
hmC_trans_table = string.maketrans("C", "O")
C_table = collect_assignments(c_alns, strand, threshold, max_assignments, C_trans_table)
mC_table = collect_assignments(mc_alns, strand, threshold, max_assignments, mC_trans_table)
hmC_table = collect_assignments(hmc_alns, strand, threshold, max_assignments, hmC_trans_table)
nb_c_assignments = C_table.shape[0] if C_table is not None else "None"
nb_mc_assignments = mC_table.shape[0] if mC_table is not None else "None"
nb_hmc_assignments = hmC_table.shape[0] if hmC_table is not None else "None"
print("[buildAlignments] NOTICE: I found {C} C-assignments, {mC} mC-assignments, and {hmC} hmC-assignments "
"for strand {strand}"
"".format(C=nb_c_assignments, mC=nb_mc_assignments, hmC=nb_hmc_assignments, strand=strand),
file=sys.stderr)
tables = []
for table in (C_table, mC_table, hmC_table):
if table is None:
continue
else:
tables.append(table)
return pd.concat(tables)
def main(arguments):
args = parse_args()
C_alns = randomly_select_alignments(args.C_alns) if args.C_alns is not None else None
mC_alns = randomly_select_alignments(args.mC_alns) if args.mC_alns is not None else None
hmC_alns = randomly_select_alignments(args.hmC_alns) if args.hmC_alns is not None else None
template_build_alignment = make_build_alignment(C_alns, mC_alns, hmC_alns, 't',
args.threshold, args.max_assignments)
complement_build_alignment = make_build_alignment(C_alns, mC_alns, hmC_alns, 'c',
args.threshold, args.max_assignments)
entry_line = "blank\t0\tblank\tblank\t{strand}\t0\t0.0\t0.0\t0.0\t{kmer}\t0.0\t0.0\t0.0\t{event}\t0.0\n"
with open(args.out_file, 'w') as f:
for row in template_build_alignment.itertuples():
f.write(entry_line.format(strand="t", kmer=row[2], event=row[1]))
for row in complement_build_alignment.itertuples():
f.write(entry_line.format(strand="c", kmer=row[2], event=row[1]))
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
mit
|
q1ang/scikit-learn
|
sklearn/preprocessing/tests/test_data.py
|
71
|
38516
|
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
@ignore_warnings
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
@ignore_warnings
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
|
bsd-3-clause
|
JohannesUIBK/oggm
|
oggm/sandbox/run_demtests.py
|
2
|
3169
|
"""Run with a subset of benchmark glaciers"""
from __future__ import division
# Log message format
import logging
logging.getLogger("rasterio").setLevel(logging.WARNING)
logging.getLogger("shapely").setLevel(logging.WARNING)
logging.basicConfig(format='%(asctime)s: %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG)
# Module logger
log = logging.getLogger(__name__)
# Python imports
import os
import glob
# Libs
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import salem
# Locals
import oggm
import oggm.cfg as cfg
from oggm import workflow
from oggm import tasks
from oggm.workflow import execute_entity_task
from oggm import graphics, utils
# Initialize OGGM
cfg.initialize()
# Local paths (where to write output and where to download input)
DATA_DIR = '/home/mowglie/disk/OGGM_INPUT'
WORKING_DIR = '/home/mowglie/disk/OGGM_RUNS/TEST_DEMS'
PLOTS_DIR = os.path.join(WORKING_DIR, 'plots')
cfg.PATHS['working_dir'] = WORKING_DIR
cfg.PATHS['topo_dir'] = os.path.join(DATA_DIR, 'topo')
cfg.PATHS['rgi_dir'] = os.path.join(DATA_DIR, 'rgi')
utils.mkdir(WORKING_DIR)
utils.mkdir(cfg.PATHS['topo_dir'])
utils.mkdir(cfg.PATHS['rgi_dir'])
# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = False
cfg.PARAMS['border'] = 20
cfg.CONTINUE_ON_ERROR = False
# Read in the RGI file
rgisel = os.path.join(WORKING_DIR, 'rgi_selection.shp')
if not os.path.exists(rgisel):
rgi_dir = utils.get_rgi_dir()
regions = ['{:02d}'.format(int(p)) for p in range(1, 20)]
files = [glob.glob(os.path.join(rgi_dir, '*', r + '_rgi50_*.shp'))[0] for r in regions]
rgidf = []
for fs in files:
sh = salem.read_shapefile(os.path.join(rgi_dir, fs), cached=True)
percs = np.asarray([0, 25, 50, 75, 100])
idppercs = np.round(percs * 0.01 * (len(sh)-1)).astype(int)
rgidf.append(sh.sort_values(by='Area').iloc[idppercs])
rgidf.append(sh.sort_values(by='CenLon').iloc[idppercs])
rgidf.append(sh.sort_values(by='CenLat').iloc[idppercs])
rgidf = gpd.GeoDataFrame(pd.concat(rgidf))
rgidf = rgidf.drop_duplicates('RGIId')
rgidf.to_file(rgisel)
else:
rgidf = salem.read_shapefile(rgisel)
rgidf = rgidf.loc[~rgidf.RGIId.isin(['RGI50-10.00012', 'RGI50-17.00850',
'RGI50-19.01497', 'RGI50-19.00990',
'RGI50-19.01440'])]
log.info('Number of glaciers: {}'.format(len(rgidf)))
# Go - initialize working directories
# gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
gdirs = workflow.init_glacier_regions(rgidf)
# Prepro tasks
task_list = [
tasks.glacier_masks,
]
for task in task_list:
execute_entity_task(task, gdirs)
# Plots (if you want)
if PLOTS_DIR == '':
exit()
utils.mkdir(PLOTS_DIR)
for gd in gdirs:
bname = os.path.join(PLOTS_DIR, gd.rgi_id + '_')
demsource = ' (' + gd.read_pickle('dem_source') + ')'
# graphics.plot_googlemap(gd)
# plt.savefig(bname + 'ggl.png')
# plt.close()
graphics.plot_domain(gd, title_comment=demsource)
plt.savefig(bname + 'dom.png')
plt.close()
|
gpl-3.0
|
garbersc/keras-galaxies
|
predict_convnet_keras_10cat.py
|
1
|
49283
|
import matplotlib.lines as mlines
import warnings
import theano.sandbox.cuda.basic_ops as sbcuda
import numpy as np
import load_data
import realtime_augmentation as ra
import time
import sys
import json
from datetime import timedelta
import os
import matplotlib.pyplot as plt
import skimage.io
from termcolor import colored
import functools
from custom_for_keras import sliced_accuracy_mean, sliced_accuracy_std, rmse,\
lr_function
from ellipse_fit import get_ellipse_kaggle_par
# from custom_keras_model_and_fit_capsels import kaggle_winsol
from custom_keras_model_x_cat import kaggle_x_cat\
as kaggle_winsol
import skimage
from skimage.transform import rotate
starting_time = time.time()
cut_fraktion = 0.9
copy_to_ram_beforehand = False
debug = True
get_winsol_weights = False
BATCH_SIZE = 16 # keep in mind
NUM_INPUT_FEATURES = 3
included_flipped = True
USE_BLENDED_PREDICTIONS = False
PRED_BLENDED_PATH = 'predictions/final/blended/blended_predictions.npy.gz'
if debug:
print os.path.isfile(PRED_BLENDED_PATH)
TRAIN_LOSS_SF_PATH = 'loss_10cat_bw.txt'
# TRAIN_LOSS_SF_PATH = "trainingNmbrs_keras_modular_includeFlip_and_37relu.txt"
# TARGET_PATH = "predictions/final/try_convnet.csv"
WEIGHTS_PATH = "analysis/final/try_10cat_wMaxout_next_next_next_next.h5"
TXT_OUTPUT_PATH = 'try_10cat_bw.txt'
WRONG_CAT_IMGS_PATH = 'wrong_categorized_10cat_bw.json'
IMAGE_OUTPUT_PATH = "img_10cat_bw"
postfix = ''
NUM_ELLIPSE_PARAMS = 2
ELLIPSE_FIT = False
# ELLIPSE_FIT = WEIGHTS_PATH.find('ellipse') >= 0
# if ELLIPSE_FIT:
# postfix = '_ellipse'
DONT_LOAD_WEIGHTS = False
input_sizes = [(69, 69), (69, 69)]
PART_SIZE = 45
N_INPUT_VARIATION = 2
# set to True if the prediction and evaluation should be done when the
# prediction file already exists
REPREDICT_EVERYTIME = True
# TODO built this as functions, not with the if's
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_VALID_CORR = False # not implemented yet
# N_Corr_Filter_Images = np.sum(VALID_CORR_OUTPUT_FILTER)
DO_VALIDSTUFF_ON_TRAIN = True
DO_TEST = False # disable this to not generate predictions on the testset
VALID_CORR_OUTPUT_FILTER = np.ones((10))
DO_TEST = False # disable this to not generate predictions on the testset
output_names = ['round', 'broad_ellipse', 'small_ellipse', 'edge_no_bulge',
'edge_bulge', 'disc', 'spiral_1_arm', 'spiral_2_arm',
'spiral_other', 'other']
question_slices = [slice(0, 10)]
target_filename = os.path.basename(WEIGHTS_PATH).replace(".h5", ".npy.gz")
if get_winsol_weights:
target_filename = os.path.basename(WEIGHTS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join(
"predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join(
"predictions/final/augmented/test", target_filename)
if copy_to_ram_beforehand:
ra.myLoadFrom_RAM = True
import copy_data_to_shm
y_train = np.load("data/solutions_train_10cat.npy")
y_train_cert = np.load('data/solution_certainties_train_10cat.npy')
ra.y_train = y_train
# split training data into training + a small validation set
ra.num_train = y_train.shape[0]
# integer division, is defining validation size
ra.num_valid = ra.num_train // 10
ra.num_train -= ra.num_valid
# training num check for EV usage
if ra.num_train != 55420:
print "num_train = %s not %s" % (ra.num_train, 55420)
ra.y_valid = ra.y_train[ra.num_train:]
ra.y_train = ra.y_train[:ra.num_train]
load_data.num_train = y_train.shape[0]
load_data.train_ids = np.load("data/train_ids.npy")
ra.load_data.num_train = load_data.num_train
ra.load_data.train_ids = load_data.train_ids
ra.valid_ids = load_data.train_ids[ra.num_train:]
ra.train_ids = load_data.train_ids[:ra.num_train]
train_ids = load_data.train_ids
test_ids = load_data.test_ids
num_train = ra.num_train
num_test = len(test_ids)
num_valid = ra.num_valid
y_valid = ra.y_valid
y_train = ra.y_train
y_train_cert = y_train_cert[num_train:]
valid_ids = ra.valid_ids
train_ids = ra.train_ids
train_indices = np.arange(num_train
)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
N_TRAIN = num_train
N_VALID = num_valid
print("validation sample contains %s images. \n" %
(ra.num_valid))
print 'initiate winsol class'
winsol = kaggle_winsol(BATCH_SIZE=BATCH_SIZE,
NUM_INPUT_FEATURES=NUM_INPUT_FEATURES,
PART_SIZE=PART_SIZE,
input_sizes=input_sizes,
LOSS_PATH=TRAIN_LOSS_SF_PATH,
WEIGHTS_PATH=WEIGHTS_PATH,
include_flip=included_flipped)
layer_formats = winsol.layer_formats
layer_names = layer_formats.keys()
print "Build model"
if debug:
print("input size: %s x %s x %s x %s" %
(input_sizes[0][0],
input_sizes[0][1],
NUM_INPUT_FEATURES,
BATCH_SIZE))
winsol.init_models(final_units=10)
if debug:
winsol.print_summary(postfix=postfix)
print winsol.models.keys()
if not DONT_LOAD_WEIGHTS:
if get_winsol_weights:
print "Import weights from run with original kaggle winner solution"
if not winsol.getWinSolWeights(debug=True, path=WEIGHTS_PATH):
raise UserWarning('Importing of the winsol weights did not work')
else:
print "Load model weights"
winsol.load_weights(path=WEIGHTS_PATH, postfix=postfix)
winsol.WEIGHTS_PATH = ((WEIGHTS_PATH.split('.', 1)[0] + '_next.h5'))
print "Set up data loading"
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(
3.0, target_size=input_sizes[1])
+ ra.build_augmentation_transform(rotation=45)
]
def tripple_gray(img):
gray = skimage.color.rgb2gray(img)
return np.stack((gray, gray, gray), axis=-1)
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(
valid_indices,
'train',
ds_transforms=ds_transforms,
chunk_size=N_VALID,
target_sizes=input_sizes)
return data_gen_valid
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
# make to bw
xs_valid = [np.asarray([tripple_gray(x) for x in x_valid])
for x_valid in xs_valid]
# move the colour dimension up
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid]
validation_data = (
[xs_valid[0], xs_valid[1]], y_valid)
validation_data = (
[np.asarray(xs_valid[0]), np.asarray(xs_valid[1])], validation_data[1])
t_val = (time.time() - start_time)
print " took %.2f seconds" % (t_val)
if debug:
print("Free GPU Mem before first step %s MiB " %
(sbcuda.cuda_ndarray.cuda_ndarray.mem_info()[0] / 1024. / 1024.))
def save_exit():
# winsol.save()
print "Done!"
print ' run for %s' % timedelta(seconds=(time.time() - start_time))
exit()
sys.exit(0)
if USE_BLENDED_PREDICTIONS:
predictions = load_data.load_gz(PRED_BLENDED_PATH)
if debug:
print os.path.isfile(PRED_BLENDED_PATH)
print type(predictions)
print predictions
print np.shape(predictions)
elif not REPREDICT_EVERYTIME and os.path.isfile(
target_path_valid) and os.path.isfile(TRAIN_LOSS_SF_PATH):
print 'Loading validation predictions from %s and loss from %s ' % (
target_path_valid, TRAIN_LOSS_SF_PATH)
predictions = load_data.load_gz(target_path_valid)
else:
try:
print ''
print 'Re-evalulating and predicting'
if DO_VALID:
evalHist = winsol.evaluate(
[xs_valid[0], xs_valid[1]], y_valid=y_valid, postfix='')
# validation_data[0], y_valid=y_valid, postfix=postfix)
winsol.save_loss(modelname='model_norm_metrics',
postfix=postfix)
evalHist = winsol.load_loss(
modelname='model_norm_metrics', postfix=postfix)
print ''
predictions = winsol.predict(
validation_data[0], postfix=postfix)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, predictions)
except KeyboardInterrupt:
print "\ngot keyboard interuption"
save_exit()
except ValueError, e:
print "\ngot value error, could be the end of the generator in the fit"
print e
save_exit()
evalHist = winsol.load_loss(modelname='model_norm_metrics', postfix=postfix)
print evalHist.keys
if np.shape(predictions) != np.shape(y_valid):
raise ValueError('prediction and validation set have different shapes, %s to %s ' % (
np.shape(predictions), np.shape(y_valid)))
# FIXME add this counts decision tree dependent
n_global_cat_pred = [0] * len(output_names)
n_global_cat_valid = [0] * len(output_names)
n_global_cat_agrement = [0] * len(output_names)
n_sliced_cat_pred = [0] * len(output_names)
n_sliced_cat_valid = [0] * len(output_names)
n_sliced_cat_agrement = [0] * len(output_names)
n_sliced_cat_pred_wcut = [0] * len(output_names)
n_sliced_cat_valid_wcut = [0] * len(output_names)
n_sliced_cat_agrement_wcut = [0] * len(output_names)
wrong_cat_cutted = []
categories = np.zeros((10, 10))
val_l = []
pred_l = []
val_l_cutted = []
pred_l_cutted = []
n_pred_2nd_agree = [0] * len(output_names)
n_pred_3rd_agree = [0] * len(output_names)
def arg_nthmax(arr, n=2):
arr_ = arr
for _ in range(n - 1):
arr_[np.argmax(arr_)] = np.amin(arr_)
return np.argmax(arr_)
for i in range(len(predictions)):
argpred = np.argmax(predictions[i])
argval = np.argmax(y_valid[i])
n_global_cat_pred[argpred] += 1
n_global_cat_valid[argval] += 1
if argval == argpred:
n_global_cat_agrement[argval] += 1
elif argval == arg_nthmax(predictions[i]):
n_pred_2nd_agree[argval] += 1
elif argval == arg_nthmax(predictions[i], 3):
n_pred_3rd_agree[argval] += 1
categories[argval, argpred] += 1.
val_l.append(argval)
pred_l.append(argpred)
c = 0
for slice in question_slices:
sargpred = np.argmax(predictions[i][slice])
cutpred = predictions[i][slice][sargpred] /\
sum(predictions[i][slice]) > cut_fraktion
sargval = np.argmax(y_valid[i][slice])
n_sliced_cat_pred[sargpred + slice.start] += 1
if cutpred:
n_sliced_cat_pred_wcut[sargpred + slice.start] += 1
n_sliced_cat_valid_wcut[sargval + slice.start] += 1
val_l_cutted.append(argval)
pred_l_cutted.append(argpred)
if sargval != sargpred:
# print '%sto%s' % (str(argval), str(argpred))
# print valid_ids[i]
wrong_cat_cutted.append(('%sto%s' % (str(argval),
str(argpred)),
i))
n_sliced_cat_valid[sargval + slice.start] += 1
if sargval == sargpred:
n_sliced_cat_agrement[sargval + slice.start] += 1
if cutpred:
n_sliced_cat_agrement_wcut[sargval + slice.start] += 1
c += 1
print '\nfirst hit precision: %.3f' % (float(np.sum(n_global_cat_agrement)) / float(np.sum(n_global_cat_pred)))
print 'second hit precision: %.3f' % (float(np.sum(n_pred_2nd_agree)) / float(np.sum(n_global_cat_pred) - np.sum(n_global_cat_agrement)))
print 'third hit precision: %.3f\n' % (float(np.sum(n_pred_3rd_agree)) / float(np.sum(n_global_cat_pred) - np.sum(n_global_cat_agrement) - np.sum(n_pred_2nd_agree)))
def pred_to_val_hist(path=IMAGE_OUTPUT_PATH, also_cutted=True):
weights_l = []
for p, v in zip(pred_l, val_l):
weights_l.append(1. / float(n_global_cat_valid[v]))
# print categories
plt.hist2d(pred_l, val_l, bins=10, range=[[0., 10.], [0., 10.]])
cb = plt.colorbar()
cb.set_label('# categorised')
plt.xlabel('predicted category')
plt.ylabel('validation category')
plt.xticks([a + 0.5 for a in range(10)], output_names, rotation=90)
plt.yticks([a + 0.5 for a in range(10)], output_names)
plt.tight_layout()
plt.savefig(path + '/categories.eps')
cb.remove()
plt.hist2d(pred_l, val_l, bins=10, weights=weights_l,
range=[[0., 10.], [0., 10.]])
cb = plt.colorbar()
cb.set_label('# categorised / # validation in this category')
plt.xlabel('predicted category')
plt.ylabel('validation category')
plt.tight_layout()
plt.savefig(path + '/categories_normToVal.eps')
cb.remove()
weights_l_cutted = []
for p, v in zip(pred_l_cutted, val_l_cutted):
weights_l_cutted.append(1. / float(n_sliced_cat_valid_wcut[v]))
# print categories
plt.hist2d(pred_l_cutted, val_l_cutted, bins=10,
range=[[0., 10.], [0., 10.]])
cb = plt.colorbar()
cb.set_label('# categorised')
plt.xlabel('predicted category')
plt.ylabel('validation category')
plt.tight_layout()
plt.savefig(path + '/categories_cutted.eps')
cb.remove()
plt.hist2d(pred_l_cutted, val_l_cutted, bins=10, weights=weights_l_cutted,
range=[[0., 10.], [0., 10.]])
cb = plt.colorbar()
cb.set_label('# categorised / # validation in this category')
plt.xlabel('predicted category')
plt.ylabel('validation category')
plt.tight_layout()
plt.savefig(path + '/categories_normToVal_cutted.eps')
cb.remove()
def P_base(n_pred, n_agree):
return (float(n_agree) / float(n_pred))\
if n_pred else 0.
def P_i(i, n_pred, n_agree):
return P_base(n_pred[i], n_agree[i])
P = functools.partial(P_i, n_pred=n_sliced_cat_pred,
n_agree=n_sliced_cat_agrement)
def R_base(n_pred, n_agree, n_false_neg):
return float(n_agree) / float(
n_pred + n_false_neg) if n_pred or n_false_neg else 0.
def R_i(i, sli, n_pred, n_agree):
if i >= sli.start and i < sli.stop:
false_neg = sum(n_pred[sli]) - n_pred[i] - (
sum(n_agree[sli]) - n_agree[i])
return R_base(n_pred[i], n_agree[i], false_neg)
else:
warnings.warn('question number %i is not in slice %s' % (i, sli))
def R_i_slices(i, slices, n_pred, n_agree):
for sli in slices:
if i >= sli.start and i < sli.stop:
return R_i(i, sli, n_pred, n_agree)
else:
continue
else:
warnings.warn('question number %i is not in one of the slices' % (i))
R = functools.partial(R_i_slices, slices=question_slices,
n_pred=n_sliced_cat_pred, n_agree=n_sliced_cat_agrement)
# def R(i):
# for slice in question_slices:
# if i >= slice.start and i < slice.stop:
# false_neg = sum(n_sliced_cat_pred[slice]) - n_sliced_cat_pred[i] - (
# sum(n_sliced_cat_agrement[slice]) - n_sliced_cat_agrement[i])
# return float(n_sliced_cat_agrement[i]) / float(
# n_sliced_cat_agrement[i] + false_neg)
def P_wcut(i):
return (float(n_sliced_cat_agrement_wcut[i]) / float(
n_sliced_cat_pred_wcut[i])) if n_sliced_cat_pred_wcut[i] else 0.
def R_wcut(i):
for slice in question_slices:
if i >= slice.start and i < slice.stop:
false_neg = sum(n_sliced_cat_pred_wcut[slice]) -\
n_sliced_cat_pred_wcut[i] - (
sum(n_sliced_cat_agrement_wcut[slice]) -
n_sliced_cat_agrement_wcut[i])
return float(n_sliced_cat_agrement_wcut[i]) / float(
n_sliced_cat_agrement_wcut[i] + false_neg) if (
n_sliced_cat_agrement_wcut[i] + false_neg) else 0.
output_dic = {}
output_dic_short_hand_names = {'rmse': 'rmse',
'rmse/mean': 'rmse/mean',
'slice categorized prediction': 'qPred',
'slice categorized valid': 'qVal',
'slice categorized agree': 'qAgree',
'precision': 'P',
'recall': 'R',
}
rmse_valid = evalHist['rmse'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - predictions)**2))
print " MSE (last iteration):\t%.6f" % float(rmse_valid)
print ' categorical acc. (last iteration):\t%.4f' % float(evalHist['categorical_accuracy'][-1])
print " MSE (augmented):\t%.6f RMSE/mean: %.2f " % (float(rmse_augmented),
float(rmse_augmented) / float(np.mean(
y_valid)))
print " mean P (augmented):\t%.3f mean R (augmented):\t%.3f " % (
np.mean([P(i) for i in range(VALID_CORR_OUTPUT_FILTER.shape[0])]),
np.mean([R(i) for i in range(VALID_CORR_OUTPUT_FILTER.shape[0])]))
print " mean P (with Cut):\t%.3f mean R (with Cut):\t%.3f ,\t cut is on %s, mean cut eff. %.2f" % (
np.mean([P_wcut(i) for i in range(VALID_CORR_OUTPUT_FILTER.shape[0])]),
np.mean([R_wcut(i) for i in range(VALID_CORR_OUTPUT_FILTER.shape[0])]),
cut_fraktion,
np.mean([float(n_sliced_cat_pred_wcut[i]) / float(
n_sliced_cat_pred[i]) if n_sliced_cat_pred[i] else 0.
for i in range(VALID_CORR_OUTPUT_FILTER.shape[0])]))
P_wcut_mean_noEmpty = []
for i in range(VALID_CORR_OUTPUT_FILTER.shape[0]):
if n_sliced_cat_pred_wcut[i]:
P_wcut_mean_noEmpty.append(P_wcut(i))
P_wcut_mean_noEmpty = np.mean(P_wcut_mean_noEmpty)
R_wcut_mean_noEmpty = []
for i in range(VALID_CORR_OUTPUT_FILTER.shape[0]):
if n_sliced_cat_pred_wcut[i]:
R_wcut_mean_noEmpty.append(R_wcut(i))
R_wcut_mean_noEmpty = np.mean(R_wcut_mean_noEmpty)
cut_eff_noEmpty = []
for i in range(VALID_CORR_OUTPUT_FILTER.shape[0]):
if n_sliced_cat_pred[i]:
cut_eff_noEmpty.append(float(n_sliced_cat_pred_wcut[i]) / float(
n_sliced_cat_pred[i]))
cut_eff_noEmpty = np.mean(cut_eff_noEmpty)
print " without zero entry classes:\n mean P (with Cut):\t%.3f mean R (with Cut):\t%.3f" % (
P_wcut_mean_noEmpty,
R_wcut_mean_noEmpty)
print 'mean cut eff, without zero uncuted pred. %.2f' % (cut_eff_noEmpty)
print " MSE output wise (augmented): P(recision), R(ecall)"
qsc = 0
for i in xrange(0, VALID_CORR_OUTPUT_FILTER.shape[0]):
oneMSE = np.sqrt(np.mean((y_valid.T[i] - predictions.T[i])**2))
if not str(qsc) in output_dic.keys():
output_dic[str(qsc)] = {}
output_dic[str(qsc)][output_names[i]] = {'rmse': float(oneMSE),
'rmse/mean': float(oneMSE / np.mean(y_valid.T[i])),
'slice categorized prediction': n_sliced_cat_pred[i],
'slice categorized valid': n_sliced_cat_valid[i],
'slice categorized agree': n_sliced_cat_agrement[i],
'precision': P(i),
'recall': R(i),
}
if i in [slice.start for slice in question_slices]:
print '----------------------------------------------------'
qsc += 1
if P(i) < 0.5: # oneMSE / np.mean(y_valid.T[i]) > 1.2 * rmse_augmented / np.mean(
# y_valid):
print colored(" output % s ( % s): \t%.6f RMSE/mean: % .2f \t N sliced pred., valid, agree % i, % i, % i, P % .3f, R % .3f, wCut(eff.%.2f): pred., valid, agree % i, % i, % i, P % .3f, R % .3f" % (
output_names[i], i, oneMSE, oneMSE /
np.mean(y_valid.T[i]),
# n_global_cat_pred[i], n_global_cat_valid[i],
# n_global_cat_agrement[i],
n_sliced_cat_pred[i], n_sliced_cat_valid[i], n_sliced_cat_agrement[i],
P(i), R(i),
float(n_sliced_cat_pred_wcut[i]) / float(
n_sliced_cat_pred[i]) if n_sliced_cat_pred[i] else 0.,
n_sliced_cat_pred_wcut[i], n_sliced_cat_valid_wcut[i],
n_sliced_cat_agrement_wcut[i],
P_wcut(i), R_wcut(i)
),
'red')
elif P(i) > 0.9: # oneMSE / np.mean(y_valid.T[i]) < 0.8 * rmse_augmented / np.mean(
# y_valid):
print colored(" output % s ( % s): \t%.6f RMSE/mean: % .2f \t N sliced pred., valid, agree % i, % i, % i, P % .3f, R % .3f, wCut(eff.%.2f): pred., valid, agree % i, % i, % i, P % .3f, R % .3f " % (
output_names[i], i, oneMSE, oneMSE / np.mean(y_valid.T[i]),
# n_global_cat_pred[i], n_global_cat_valid[i],
# n_global_cat_agrement[i],
n_sliced_cat_pred[i], n_sliced_cat_valid[i], n_sliced_cat_agrement[i],
P(i), R(i),
float(n_sliced_cat_pred_wcut[i]) / float(
n_sliced_cat_pred[i]) if n_sliced_cat_pred[i] else 0.,
n_sliced_cat_pred_wcut[i], n_sliced_cat_valid_wcut[i],
n_sliced_cat_agrement_wcut[i],
P_wcut(i), R_wcut(i)
),
'green')
else:
print (" output % s ( % s): \t%.6f RMSE/mean: % .2f \t N sliced pred., valid, agree % i, % i, % i, P % .3f, R % .3f, wCut(eff.%.2f): pred., valid, agree % i, % i, % i, P % .3f, R % .3f " %
(output_names[i], i, oneMSE, oneMSE / np.mean(y_valid.T[i]),
n_sliced_cat_pred[i], n_sliced_cat_valid[i],
n_sliced_cat_agrement[i],
P(i), R(i),
float(n_sliced_cat_pred_wcut[i]) / float(
n_sliced_cat_pred[i]) if n_sliced_cat_pred[i] else 0.,
n_sliced_cat_pred_wcut[i], n_sliced_cat_valid_wcut[i],
n_sliced_cat_agrement_wcut[i],
P_wcut(i), R_wcut(i)
)
)
with open(TXT_OUTPUT_PATH, 'a+') as f:
json.dump(output_dic_short_hand_names, f)
f.write('\n')
json.dump(output_dic, f)
f.write('\n')
imshow_c = functools.partial(
plt.imshow, interpolation='none') # , vmin=0.0, vmax=1.0)
imshow_g = functools.partial(
plt.imshow, interpolation='none', cmap=plt.get_cmap('gray')) # , vmin=0.0, vmax=1.0)
def try_different_cut_fraktion(cut_fraktions=map(lambda x: float(x) / 20.,
range(8, 20)),
figname='different_cuts.eps'):
print
print 'Testing different fraction cuts:'
cut_fraktions = cut_fraktions
n_wcut_pred = []
n_wcut_valid = []
n_wcut_agree = []
n_agree_total = []
n_selected_total = []
for _ in cut_fraktions:
n_wcut_pred.append([0] * len(output_names))
n_wcut_valid.append([0] * len(output_names))
n_wcut_agree.append([0] * len(output_names))
n_agree_total.append(0)
n_selected_total.append(0)
for i in range(len(predictions)):
for slic in question_slices:
sargpred = np.argmax(predictions[i][slic])
q_frak_pred = predictions[i][slic][sargpred] / \
sum(predictions[i][slic])
sargval = np.argmax(y_valid[i][slic])
for j, cut_val in enumerate(cut_fraktions):
if q_frak_pred > cut_val:
n_wcut_pred[j][sargval + slic.start] += 1
n_wcut_valid[j][sargpred + slic.start] += 1
n_selected_total[j] = n_selected_total[j] + 1
if sargval == sargpred:
n_wcut_agree[j][sargval + slic.start] += 1
n_agree_total[j] = n_agree_total[j] + 1
Ps_no_zero = []
Rs_no_zero = []
effs = []
signigicance = [] # agree/sqrt(pred-agree)
effs_sig = []
P_total = [float(a) / float(s)
for a, s in zip(n_agree_total, n_selected_total)]
signif_total = [float(a) / np.sqrt(s - a)
for a, s in zip(n_agree_total, n_selected_total)]
Ps = [np.mean([P_i(i, param[0], param[1]) for i in range(
VALID_CORR_OUTPUT_FILTER.shape[0])]) for param in zip(n_wcut_pred,
n_wcut_agree)]
Rs = [np.mean([R_i_slices(i, slices=question_slices, n_pred=param[0],
n_agree=param[1]) for i in range(
VALID_CORR_OUTPUT_FILTER.shape[0])])
for param in zip(n_wcut_pred, n_wcut_agree)]
if debug:
print n_sliced_cat_pred[0:3]
print n_wcut_pred[0][0:3]
def _ePReS(n_pred, n_agree):
eff_mean = []
eff_mean_s = []
P_wcut_mean_noEmpty = []
R_wcut_mean_noEmpty = []
signi = []
for i in range(VALID_CORR_OUTPUT_FILTER.shape[0]):
if n_sliced_cat_pred[i]:
eff_mean.append(float(n_pred[i]) / float(
n_wcut_pred[0][i]))
if n_sliced_cat_agrement[i] and n_wcut_agree[0][i]:
eff_mean_s.append(float(n_agree[i]) / float(
n_wcut_agree[0][i]))
if n_pred[i]:
P_wcut_mean_noEmpty.append(P_i(i, n_pred, n_agree))
R_wcut_mean_noEmpty.append(R_i_slices(
i, question_slices, n_pred, n_agree))
if n_agree[i]:
signi.append(
float(n_agree[i]) / np.sqrt(float(n_pred[i]
- n_agree[i])))
return (np.mean(eff_mean),
np.mean(P_wcut_mean_noEmpty),
np.mean(R_wcut_mean_noEmpty),
np.mean(signi),
np.mean(eff_mean_s))
for p, a in zip(n_wcut_pred, n_wcut_agree):
_e, _P, _R, _s, _es = _ePReS(p, a)
Ps_no_zero.append(_P)
Rs_no_zero.append(_R)
effs.append(_e)
effs_sig.append(_es)
signigicance.append(_s)
if debug:
print 'cut_fraktions'
print cut_fraktions
print 'effs'
print effs
print 'effs_sig'
print effs_sig
print 'signigicance / 120'
print [s / 120. for s in signigicance]
print 'Ps'
print Ps
print 'Rs'
print Rs
print 'Ps_no_zero'
print Ps_no_zero
print 'Rs_no_zero'
print Rs_no_zero
plots = []
label_h = []
# plt.subplot2grid((1, 1), (0, 0), colspan=1)
plots.append(plt.plot(cut_fraktions, effs, 'r-', label="eff"))
label_h.append(mlines.Line2D([], [], color='red', label='eff'))
plots.append(plt.plot(
cut_fraktions, effs_sig, 'b-', label="eff sig"))
label_h.append(mlines.Line2D([], [], color='blue', label='eff sig'))
# plots.append(plt.plot(cut_fraktions, [
# s / 120. for s in signigicance], 'g-', label="signif/120"))
plots.append(plt.plot(cut_fraktions, [
s / 250. for s in signif_total], 'g-', label="signif/250"))
label_h.append(mlines.Line2D([], [], color='green', label='signif/250'))
plots.append(plt.plot(cut_fraktions, Ps_no_zero, 'r.', label="Ps no zero"))
label_h.append(mlines.Line2D([], [], color='red', marker='.',
markersize=15, linewidth=0, label='mean P, no 0.'))
plots.append(plt.plot(cut_fraktions, Rs_no_zero, 'b.', label="Rs no zero"))
label_h.append(mlines.Line2D([], [], color='blue', marker='.',
markersize=15, linewidth=0, label='mean R, no 0.'))
# plots.append(plt.plot(cut_fraktions, Ps, 'r.', label="Ps"))
plots.append(plt.plot(cut_fraktions, P_total, 'ro', label="Ps"))
label_h.append(mlines.Line2D([], [], color='red', marker='o',
markersize=15, linewidth=0, label='P'))
# plots.append(plt.plot(cut_fraktions, Rs, 'b.', label="Rs"))
# label_h.append(mlines.Line2D([], [], color='blue', marker='.',
# markersize=15, linewidth=0, label='R'))
plt.legend(handles=label_h, loc='lower left') # , bbox_to_anchor=(
# 1.05, 1), loc=2, borderaxespad=0.)
plt.ylabel('Value')
plt.xlabel('Cut Value')
# plt.show()
plt.tight_layout()
plt.savefig(figname)
plots = []
label_h = []
plt.subplot(121)
plots.append(plt.plot(Rs_no_zero, Ps_no_zero, 'r-', label="no zero"))
label_h.append(mlines.Line2D([], [], color='red', label='no zero'))
plots.append(plt.plot(Rs, Ps, 'b-', label=""))
label_h.append(mlines.Line2D([], [], color='blue', label='with zero'))
plt.legend(handles=label_h, bbox_to_anchor=(
1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.savefig('ROC_test3.eps')
def pixel_correlations(useTruth=False, dirname='InOutCorr'):
if not useTruth:
predict = predictions
dirname = dirname + '_valid'
else:
predict = y_valid
dirname = dirname + '_truth'
pixels_color0 = []
pixels_color1 = []
pixels_color2 = []
input_img = validation_data[0][0].transpose(1, 2, 3, 0)
# if b==0: print input_img.shape
pixels_color0.append(input_img[0])
pixels_color1.append(input_img[1])
pixels_color2.append(input_img[2])
print "begin correlation calculation"
pixels_color0_stack = np.dstack(pixels_color0)
pixels_color1_stack = np.dstack(pixels_color1)
pixels_color2_stack = np.dstack(pixels_color2)
if not os.path.isdir(IMAGE_OUTPUT_PATH):
os.mkdir(IMAGE_OUTPUT_PATH)
# plt.gray()
os.chdir(IMAGE_OUTPUT_PATH)
if not os.path.isdir(dirname):
os.mkdir(dirname)
os.chdir(dirname)
for i in xrange(0, VALID_CORR_OUTPUT_FILTER.shape[0]):
if not VALID_CORR_OUTPUT_FILTER[i]:
continue
print "begin correlation of output %s" % i
corr_image_line_c0 = np.zeros(
input_sizes[0][0] * input_sizes[0][1])
corr_image_line_c1 = np.zeros(
input_sizes[0][0] * input_sizes[0][1])
corr_image_line_c2 = np.zeros(
input_sizes[0][0] * input_sizes[0][1])
pixels_colors0_line = np.reshape(
pixels_color0_stack, (input_sizes[0][0] * input_sizes[0][1],
pixels_color0_stack.shape[2]))
pixels_colors1_line = np.reshape(
pixels_color1_stack, (input_sizes[0][0] * input_sizes[0][1],
pixels_color1_stack.shape[2]))
pixels_colors2_line = np.reshape(
pixels_color2_stack, (input_sizes[0][0] * input_sizes[0][1],
pixels_color2_stack.shape[2]))
for j in xrange(0, input_sizes[0][0] * input_sizes[0][1]):
if j == 0:
print pixels_colors0_line[j].shape
print predict.T[i].shape
corr_image_line_c0[j] = np.corrcoef(
pixels_colors0_line[j][:predict.shape[0]],
predict.T[i])[1][0]
corr_image_line_c1[j] = np.corrcoef(
pixels_colors1_line[j][:predict.shape[0]],
predict.T[i])[1][0]
corr_image_line_c2[j] = np.corrcoef(
pixels_colors2_line[j][:predict.shape[0]],
predict.T[i])[1][0]
# correlation_output_images.append(np.reshape(corr_image_line,(input_sizes[0][0],input_sizes[0][1])))
# Needs to be in row,col order
plt.imshow(np.reshape(np.fabs(corr_image_line_c0), (
input_sizes[0][0], input_sizes[0][1])), interpolation='none',
vmin=0.0, vmax=0.4)
plt.colorbar()
plt.savefig("inputCorrelationToOutput%s%s_c0_Red.jpg" %
(i, output_names[i]))
plt.close()
skimage.io.imsave("inputCorrelationToOutput%s%s_c0_Red_small.jpg" %
(i, output_names[i]), np.reshape(
np.fabs(corr_image_line_c0), (
input_sizes[0][0], input_sizes[0][1])) / 0.4)
# Needs to be in row,col order
if np.max(np.fabs(np.dstack([corr_image_line_c0,
corr_image_line_c1,
corr_image_line_c2]))) > 0.4:
print np.max(np.fabs(np.dstack([corr_image_line_c0,
corr_image_line_c1,
corr_image_line_c2])))
plt.imshow(np.reshape(np.fabs(np.dstack([corr_image_line_c0,
corr_image_line_c1,
corr_image_line_c2])) / 0.4, (
input_sizes[0][0], input_sizes[0][1], 3)),
interpolation='none',
vmin=0.0, vmax=0.1)
# plt.colorbar()
plt.savefig("inputCorrelationToOutput%s%s_RGB.jpg" %
(i, output_names[i]))
plt.close()
skimage.io.imsave("inputCorrelationToOutput%s%s_RGB_small.jpg" %
(i, output_names[i]), np.reshape(np.fabs(
np.dstack([
corr_image_line_c0,
corr_image_line_c1,
corr_image_line_c2])) / 0.4, (
input_sizes[0][0],
input_sizes[0][1], 3)))
# # Needs to be in row,col order
# plt.imshow(np.reshape(corr_image_line_c2, (
# input_sizes[0][0], input_sizes[0][1])), interpolation='none', vmin=-0.4, vmax=0.4)
# plt.colorbar()
# plt.savefig("inputCorrelationToOutput%s%s_c2_Blue.jpg" %
# (i, output_names[i]))
# plt.close()
os.chdir("../..")
def valid_scatter():
print 'Do scatter plots'
print ' they will be saved in the folder %s ' % IMAGE_OUTPUT_PATH
if not os.path.isdir(IMAGE_OUTPUT_PATH):
os.mkdir(IMAGE_OUTPUT_PATH)
# plt.gray()
os.chdir(IMAGE_OUTPUT_PATH)
if not os.path.isdir("ValidScatter"):
os.mkdir("ValidScatter")
os.chdir("ValidScatter")
for i in xrange(0, VALID_CORR_OUTPUT_FILTER.shape[0]):
y = predictions.T[i]
x = y_valid.T[i]
fig, ax = plt.subplots()
fit = np.polyfit(x, y, deg=1)
ax.plot(x, fit[0] * x + fit[1], color='red')
ax.scatter(x, y)
plt.ylabel('prediction')
plt.xlabel('target')
plt.title("valid %s" % (output_names[i]))
oneMSE = np.sqrt(np.mean((y_valid.T[i] - predictions.T[i])**2))
plt.text(60, .025, 'RMSE: %s , RMSE/mean: %s ' %
(oneMSE, oneMSE / np.mean(y_valid.T[i])))
plt.savefig("validScatter_%s_%s.jpg" % (i, output_names[i]))
plt.close()
os.chdir("../..")
def normalize_img(img):
min = np.amin(img)
max = np.amax(img)
return (img - min) / (max - min)
def _img_wall(img, norm=False):
dim = len(np.shape(img))
shape = np.shape(img)
n_board_side = int(np.ceil(np.sqrt(shape[0])))
n_board_square = int(n_board_side**2)
if dim == 3:
img_w = shape[1]
img_h = shape[2]
wall = np.zeros((n_board_side * img_w + n_board_side + 1,
n_board_side * img_h + n_board_side + 1))
elif dim == 4:
img_w = shape[2]
img_h = shape[3]
wall = np.zeros((shape[1], n_board_side * img_w + n_board_side + 1,
n_board_side * img_h + n_board_side + 1))
else:
raise TypeError(
'Wrong dimension %s of the input' % dim)
pos = [0, 0]
for i in img:
if pos[0] >= n_board_side:
pos[0] = 0
pos[1] = pos[1] + 1
x0 = pos[0] * (img_w + 1) + 1
x1 = (pos[0] + 1) * img_w + pos[0] + 1
y0 = pos[1] * (img_h + 1) + 1
y1 = (pos[1] + 1) * img_h + pos[1] + 1
i_ = normalize_img(i) if norm else i
if dim == 3:
wall[x0:x1, y0:y1] = i_
else:
wall[:, x0:x1, y0:y1] = i_
pos[0] = pos[0] + 1
return wall
def highest_conv_activation(img_nr=None, img_id=None, layername='conv_0',
n_highest=5,
order='both', path_base='highest_activations',
verbose=1):
if img_nr and img_id:
print 'Warning: got image number and id, will use id.'
img_nr = list(valid_ids).index(img_id)
elif img_id:
img_nr = list(valid_ids).index(img_id)
elif img_nr:
img_id = valid_ids[img_nr]
if verbose:
print 'highest activations in layer %s of image %s (%s)' % (layername,
img_id, img_nr)
input_img = [np.asarray([validation_data[0][0][img_nr]]),
np.asarray([validation_data[0][1][img_nr]])]
save_dic = {}
if order == 'both' or order == 'global':
global_max = []
l_out = np.asarray(winsol.get_layer_output(
layer=layername, input_=input_img))
if debug:
print np.shape(l_out)
if verbose:
print '\t global'
l_out = np.mean(l_out, axis=(2, 3))
if debug:
print np.shape(l_out)
for i in range(n_highest):
max_ch = np.argmax(l_out)
val = l_out[0, max_ch]
l_out[0, max_ch] = 0.
global_max.append((max_ch, float(val)))
if verbose:
print '\t filter %i, with mean activation %.3f'\
% global_max[-1]
save_dic['global'] = global_max
if order == 'both' or order == 'local':
local_max = []
l_out = np.asarray(winsol.get_layer_output(
layer=layername, input_=input_img))
if debug:
print np.shape(l_out)
if verbose:
print '\t local:'
for i in range(n_highest):
max_ch = np.argmax(l_out[0]) / l_out.shape[2] / l_out.shape[3]
x = np.argmax(l_out[0, max_ch]) / l_out.shape[3]
y = np.argmax(l_out[0, max_ch, x])
val = l_out[0, max_ch, x, y]
l_out[0, max_ch, x, y] = 0.
x = float(x) / float(l_out.shape[2])
y = float(y) / float(l_out.shape[3])
local_max.append((max_ch, x, y, float(val)))
if verbose:
print '\t filter %i at %.2f %.2f, with activation %.3f'\
% local_max[-1]
save_dic['local'] = local_max
with open(path_base + '_' + str(img_id) + '.json', 'w') as f:
json.dump(save_dic, f)
def print_filters(image_nr=0, norm=False):
if not os.path.isdir(IMAGE_OUTPUT_PATH):
os.mkdir(IMAGE_OUTPUT_PATH)
print "Print filtered"
image_nr = image_nr
if type(image_nr) == int:
input_img = [np.asarray([validation_data[0][0][image_nr]]),
np.asarray([validation_data[0][1][image_nr]])]
elif image_nr == 'ones':
input_img = [np.ones(shape=(np.asarray(
[validation_data[0][0][0]]).shape)), np.ones(
shape=(np.asarray([validation_data[0][0][0]]).shape))]
elif image_nr == 'zeros':
input_img = [np.zeros(shape=(np.asarray(
[validation_data[0][0][0]]).shape)), np.zeroes(
shape=(np.asarray([validation_data[0][0][0]]).shape))]
print ' getting outputs'
intermediate_outputs = {}
for n in layer_names:
intermediate_outputs[n] = np.asarray(winsol.get_layer_output(
n, input_=input_img))
intermediate_outputs[n] = intermediate_outputs[n][0]
if layer_formats[n] <= 0:
board_side = int(np.ceil(np.sqrt(len(intermediate_outputs[n]))))
board_square = int(board_side**2)
intermediate_outputs[n] = np.append(
intermediate_outputs[n], [0] * (board_square - len(
intermediate_outputs[n])))
intermediate_outputs[n] = np.reshape(
intermediate_outputs[n], (board_side, board_side))
os.chdir(IMAGE_OUTPUT_PATH)
intermed_out_dir = 'intermediate_outputs'
if norm:
intermed_out_dir += '_norm'
if not os.path.isdir(intermed_out_dir):
os.mkdir(intermed_out_dir)
os.chdir(intermed_out_dir)
print ' output images will be saved at %s/%s' % (IMAGE_OUTPUT_PATH,
intermed_out_dir)
print ' plotting outputs'
if type(image_nr) == int:
imshow_c(np.transpose(input_img[0][0], (1, 2, 0)))
plt.savefig('input_fig_%s_rotation_0.jpg' % (image_nr))
plt.close()
skimage.io.imsave('input_fig_%s_rotation_0_small.jpg' % (
image_nr), np.transpose(input_img[0][0], (1, 2, 0)) /
np.max(input_img[0][0]))
imshow_c(np.transpose(input_img[1][0], (1, 2, 0)))
plt.savefig('input_fig_%s_rotation_45.jpg' % (image_nr))
plt.close()
skimage.io.imsave('input_fig_%s_rotation_45_small.jpg' % (
image_nr), np.transpose(input_img[1][0], (1, 2, 0)) /
np.max(input_img[1][0]))
for i in range(len(input_img[0][0])):
imshow_g(input_img[0][0][i])
plt.savefig('input_fig_%s_rotation_0_dim_%s.jpg' % (image_nr, i))
plt.close()
skimage.io.imsave('input_fig_%s_rotation_0_dim_%s_small.jpg' %
(image_nr, i), input_img[0][0][i] /
np.max(input_img[0][0][i]))
for i in range(len(input_img[1][0])):
imshow_g(input_img[1][0][i])
plt.savefig('input_fig_%s_rotation_45_dim_%s.jpg' %
(image_nr, i))
plt.close()
skimage.io.imsave('input_fig_%s_rotation_45_dim_%s_small.jpg' %
(image_nr, i), input_img[1][0][i] /
np.max(input_img[1][0][i]))
for n in layer_names:
if layer_formats[n] > 0:
imshow_g(_img_wall(intermediate_outputs[n], norm))
if not norm:
plt.colorbar()
plt.savefig('output_fig_%s_%s.jpg' %
(image_nr, n))
plt.close()
skimage.io.imsave('output_fig_%s_%s_small.jpg' %
(image_nr, n), _img_wall(
intermediate_outputs[n], norm) /
np.max(_img_wall(
intermediate_outputs[n], norm)))
else:
imshow_g(normalize_img(
intermediate_outputs[n]) if norm else intermediate_outputs[n])
if not norm:
plt.colorbar()
plt.savefig('output_fig_%s_%s.jpg' %
(image_nr, n))
plt.close()
skimage.io.imsave('output_fig_%s_%s_small.jpg' %
(image_nr, n), normalize_img(
intermediate_outputs[n]) if norm
else intermediate_outputs[n])
os.chdir('../..')
def print_weights(norm=False):
if not os.path.isdir(IMAGE_OUTPUT_PATH):
os.mkdir(IMAGE_OUTPUT_PATH)
os.chdir(IMAGE_OUTPUT_PATH)
weights_out_dir = 'weights'
if norm:
weights_out_dir += '_normalized'
if not os.path.isdir(weights_out_dir):
os.mkdir(weights_out_dir)
os.chdir(weights_out_dir)
print 'Printing weights'
for name in layer_formats:
if layer_formats[name] == 1:
w, b = winsol.get_layer_weights(layer=name)
w = np.transpose(w, (3, 0, 1, 2))
w = _img_wall(w, norm)
b = _img_wall(b, norm)
# elif layer_formats[name] == 0:
# w, b = winsol.get_layer_weights(layer=name)
# w = _img_wall(w, norm)
# b = _img_wall(b, norm)
else:
continue
for i in range(len(w)):
imshow_g(w[i])
if not norm:
plt.colorbar()
plt.savefig('weight_layer_%s_kernel_channel_%s.jpg' % (name, i))
plt.close()
# print type(w[i])
# print np.shape(w[i])
# print np.max(w[i])
# print np.min(w[i])
skimage.io.imsave(
'weight_layer_%s_kernel_channel_%s_small.jpg' % (name, i), np.clip(w[i], -1., 1.))
imshow_g(b)
if not norm:
plt.colorbar()
plt.savefig('weight_layer_%s_bias.jpg' % (name))
plt.close()
skimage.io.imsave('weight_layer_%s_bias_small.jpg' %
(name), np.clip(b, -1., 1.))
os.chdir('../..')
def get_best_id(category_name, n=1):
dtype = []
dtype.append(('img_nr', int))
for q in output_names:
dtype.append((q, float))
print len(dtype)
print len(predictions[0])
print type(predictions[0])
print len(tuple(np.append(np.array(valid_ids[0]), predictions[0])))
predictions_dtyped = np.array([], dtype=dtype)
for id, line in zip(valid_ids, predictions):
predictions_dtyped = np.append(
predictions_dtyped, np.asarray(
tuple(np.append(np.array(id), line)), dtype=dtype))
return np.sort(predictions_dtyped, order=category_name)['img_nr'][
-1] if n == 1 else np.sort(predictions_dtyped, order=category_name)[
'img_nr'][
-1 - n: len(predictions_dtyped['img_nr'])]
def save_wrong_cat_cutted():
if not os.path.isdir(IMAGE_OUTPUT_PATH + '/wrong_cat'):
os.mkdir(IMAGE_OUTPUT_PATH + '/wrong_cat/')
for i in wrong_cat_cutted:
plt.imsave(IMAGE_OUTPUT_PATH + '/wrong_cat/' +
i[0] + '_' + str(valid_ids[i[1]]) + '.jpg',
np.transpose(validation_data[0][0][i[1]], (1, 2, 0)))
# util function to convert a tensor into a valid image
def _deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def find_filter_max_input(layer_name='conv_0', filter_index=0,
step=0.01):
from keras import backend as K
# we start from a gray image with some noise
input_img_data_0 = np.random.random(
(1, 3, 141, 141)) * 20 + 128.
input_img_data_45 = rotate(input_img_data_0, 45)
# gehen wir mal von dem standart quadratischen format aus
n_pix = input_sizes[0][0]
input_imgs = [input_img_data_0[:, 36:36 + n_pix, 36:36 + n_pix],
input_img_data_45[:, 36:36 + n_pix, 36:36 + n_pix]]
# build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = winsol.models['model_norm'].layers['main_seq']\
.layers[layer_name].output
loss = K.mean(layer_output[:, :, :, filter_index])
# compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_imgs)[0]
# normalization trick: we normalize the gradient
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
# this function returns the loss and grads given the input picture
iterate = K.function([input_imgs], [loss, grads])
# run gradient ascent for 20 steps
for i in range(20):
loss_value, grads_value = iterate(input_imgs)
input_imgs += grads_value * step
plt.plot(_deprocess_image(input_imgs[0]))
plt.show()
find_filter_max_input()
# pred_to_val_hist()
# save_wrong_cat_cutted()
# print_weights(norm=True)
# print_weights(norm=False)
# valid_scatter()
# print_filters(2, norm=True)
# print_filters(3, norm=True)
# highest_conv_activation(img_id=get_best_id('RoundCigar'))
# highest_conv_activation(img_id=get_best_id('Spiral2Arm'))
# highest_conv_activation(img_id=get_best_id('Lense'))
# print_filters(list(valid_ids).index(get_best_id('RoundCigar')))
# print_filters(list(valid_ids).index(get_best_id('Spiral2Arm')))
# print_filters(list(valid_ids).index(get_best_id('Lense')))
# print
# print
# print 'RoundCompletly:'
# for id in get_best_id('RoundCompletly', 5):
# print 'predicted with %.3f' % predictions[list(valid_ids).index(id)][
# output_names.index('RoundCompletly')]
# highest_conv_activation(img_id=id)
# print
# print
# print 'Spiral3Arm:'
# for id in get_best_id('Spiral3Arm', 5):
# print 'predicted with %.3f' % predictions[list(valid_ids).index(id)][
# output_names.index('Spiral3Arm')]
# highest_conv_activation(img_id=id)
# print
# try_different_cut_fraktion(cut_fraktions=map(lambda x: float(
# x) / 80., range(32, 80)), figname=IMAGE_OUTPUT_PATH + '/10_cat_new.eps')
# pixel_correlations(True)
# pixel_correlations()
# print_weights()
# print_weights(True)
save_exit()
|
bsd-3-clause
|
kernc/scikit-learn
|
examples/gaussian_process/plot_gpc.py
|
103
|
3927
|
"""
====================================================================
Probabilistic predictions with Gaussian process classification (GPC)
====================================================================
This example illustrates the predicted probability of GPC for an RBF kernel
with different choices of the hyperparameters. The first figure shows the
predicted probability of GPC with arbitrarily chosen hyperparameters and with
the hyperparameters corresponding to the maximum log-marginal-likelihood (LML).
While the hyperparameters chosen by optimizing LML have a considerable larger
LML, they perform slightly worse according to the log-loss on test data. The
figure shows that this is because they exhibit a steep change of the class
probabilities at the class boundaries (which is good) but have predicted
probabilities close to 0.5 far away from the class boundaries (which is bad)
This undesirable effect is caused by the Laplace approximation used
internally by GPC.
The second figure shows the log-marginal-likelihood for different choices of
the kernel's hyperparameters, highlighting the two choices of the
hyperparameters used in the first figure by black dots.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# Generate data
train_size = 50
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 100)[:, np.newaxis]
y = np.array(X[:, 0] > 2.5, dtype=int)
# Specify Gaussian Processes with fixed and optimized hyperparameters
gp_fix = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0),
optimizer=None)
gp_fix.fit(X[:train_size], y[:train_size])
gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0))
gp_opt.fit(X[:train_size], y[:train_size])
print("Log Marginal Likelihood (initial): %.3f"
% gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta))
print("Log Marginal Likelihood (optimized): %.3f"
% gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta))
print("Accuracy: %.3f (initial) %.3f (optimized)"
% (accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])),
accuracy_score(y[:train_size], gp_opt.predict(X[:train_size]))))
print("Log-loss: %.3f (initial) %.3f (optimized)"
% (log_loss(y[:train_size], gp_fix.predict_proba(X[:train_size])[:, 1]),
log_loss(y[:train_size], gp_opt.predict_proba(X[:train_size])[:, 1])))
# Plot posteriors
plt.figure(0)
plt.scatter(X[:train_size, 0], y[:train_size], c='k', label="Train data")
plt.scatter(X[train_size:, 0], y[train_size:], c='g', label="Test data")
X_ = np.linspace(0, 5, 100)
plt.plot(X_, gp_fix.predict_proba(X_[:, np.newaxis])[:, 1], 'r',
label="Initial kernel: %s" % gp_fix.kernel_)
plt.plot(X_, gp_opt.predict_proba(X_[:, np.newaxis])[:, 1], 'b',
label="Optimized kernel: %s" % gp_opt.kernel_)
plt.xlabel("Feature")
plt.ylabel("Class 1 probability")
plt.xlim(0, 5)
plt.ylim(-0.25, 1.5)
plt.legend(loc="best")
# Plot LML landscape
plt.figure(1)
theta0 = np.logspace(0, 8, 30)
theta1 = np.logspace(-1, 1, 29)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp_opt.log_marginal_likelihood(np.log([Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
plt.plot(np.exp(gp_fix.kernel_.theta)[0], np.exp(gp_fix.kernel_.theta)[1],
'ko', zorder=10)
plt.plot(np.exp(gp_opt.kernel_.theta)[0], np.exp(gp_opt.kernel_.theta)[1],
'ko', zorder=10)
plt.pcolor(Theta0, Theta1, LML)
plt.xscale("log")
plt.yscale("log")
plt.colorbar()
plt.xlabel("Magnitude")
plt.ylabel("Length-scale")
plt.title("Log-marginal-likelihood")
plt.show()
|
bsd-3-clause
|
merenlab/web
|
data/sar11-saavs/files/model_1_frequency_vector.py
|
1
|
2337
|
#!/usr/bin/env python
# -*- coding: utf-8
import sys
import numpy as np
import pandas as pd
# r is assumed 0
kappa = float(sys.argv[1]) # transition vs transversion bias
from anvio.sequence import Codon
from anvio.constants import codons, codon_to_AA, amino_acids
dist = Codon().get_codon_to_codon_dist_dictionary()
genes = [int(gene.strip()) for gene in open('List-core-799genes.txt').readlines()]
df = pd.read_csv('codon_frequencies.txt', sep='\t')
vector = df.loc[df['gene_callers_id'].isin(genes), :].iloc[:, 1:].sum(axis=0)
vector /= vector.sum()
exchange_rate = {}
for X in codons:
for Y in codons:
d = dist[X][Y][0]
amino_acid_X = codon_to_AA[X]
amino_acid_Y = codon_to_AA[Y]
AAST = ''.join(sorted([amino_acid_X, amino_acid_Y]))
if d != 1:
continue
if codons.index(Y) <= codons.index(X):
continue
if amino_acid_X == amino_acid_Y:
continue
t = 'transversion' if dist[X][Y][1] else 'transition'
P = kappa / (kappa + 2) if t == 'transition' else 1 / (kappa + 2)
fX = vector[X]
fY = vector[Y]
score = (fX + fY)/2 * P
if AAST not in exchange_rate:
exchange_rate[AAST] = score
else:
exchange_rate[AAST] += score
top_twenty_five = ["IleVal",
"AspGlu",
"AsnAsp",
"AsnLys",
"AsnSer",
"ArgLys",
"GluLys",
"SerThr",
"IleLeu",
"AlaThr",
"IleThr",
"GlnLys",
"AlaSer",
"LeuPhe",
"AlaVal",
"IleMet",
"GlnGlu",
"PheTyr",
"LeuVal",
"GlySer",
"HisTyr",
"LeuSer",
"LysThr",
"AsnThr",
"ProSer"]
# normalize rates
exchange_rate = {aast: rate/sum(exchange_rate.values()) for aast, rate in exchange_rate.items()}
x = top_twenty_five
y = [exchange_rate[aast] for aast in top_twenty_five]
df = pd.DataFrame(list(zip(*[x,y])), columns=('AAST', 'fraction'))
df.to_csv('neutral_freq_dist_model_1.txt', sep='\t')
|
mit
|
wkfwkf/statsmodels
|
statsmodels/datasets/engel/data.py
|
25
|
1971
|
#! /usr/bin/env python
"""Name of dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = """Engel (1857) food expenditure data"""
SOURCE = """
This dataset was used in Koenker and Bassett (1982) and distributed alongside
the ``quantreg`` package for R.
Koenker, R. and Bassett, G (1982) Robust Tests of Heteroscedasticity based on
Regression Quantiles; Econometrica 50, 43-61.
Roger Koenker (2012). quantreg: Quantile Regression. R package version 4.94.
http://CRAN.R-project.org/package=quantreg
"""
DESCRSHORT = """Engel food expenditure data."""
DESCRLONG = """Data on income and food expenditure for 235 working class households in 1857 Belgium."""
#suggested notes
NOTE = """::
Number of observations: 235
Number of variables: 2
Variable name definitions:
income - annual household income (Belgian francs)
foodexp - annual household food expenditure (Belgian francs)
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/engel.csv', 'rb'),
delimiter=",", names = True, dtype=float)
return data
|
bsd-3-clause
|
Srisai85/scikit-learn
|
sklearn/linear_model/omp.py
|
127
|
30417
|
"""Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
|
bsd-3-clause
|
YeoLab/anchor
|
anchor/tests/test_infotheory.py
|
1
|
2974
|
import numpy as np
import pandas.util.testing as pdt
import pandas as pd
import pytest
import six
@pytest.fixture
def size():
return 10
@pytest.fixture
def data(size):
df = pd.DataFrame(np.tile(np.arange(size), (size, 1)))
df.index = df.index.astype(str)
df.columns = df.columns.astype(str)
return df
@pytest.fixture
def df1(data):
return data
@pytest.fixture
def df2(data):
return data.T
@pytest.fixture
def p(df1, bins):
from anchor.infotheory import binify
return binify(df1, bins)
@pytest.fixture
def q(df2, bins):
from anchor.infotheory import binify
return binify(df2, bins)
@pytest.fixture
def bins(size):
return np.linspace(0, size, num=5)
@pytest.fixture(
params=((None, ['0-2.5', '2.5-5', '5-7.5', '7.5-10']),
(':.2f', ['0.00-2.50', '2.50-5.00', '5.00-7.50', '7.50-10.00'])))
def fmt_true(request):
return request.param
def test_bin_range_strings(bins, fmt_true):
from anchor.infotheory import bin_range_strings
fmt, true = fmt_true
if fmt is None:
test = bin_range_strings(bins)
else:
test = bin_range_strings(bins, fmt=fmt)
assert test == true
@pytest.fixture(
params=(pytest.mark.xfail(-np.ones(10)),
pytest.mark.xfail(np.zeros(10)),
pytest.mark.xfail(np.ones(10))))
def x(request):
return request.param
def test__check_prob_dist(x):
from anchor.infotheory import _check_prob_dist
# All the tests should raise an error
_check_prob_dist(x)
def test_binify(df1, bins):
from anchor.infotheory import binify
test = binify(df1, bins)
s = ''',0,1,2,3,4,5,6,7,8,9
0-2.5,1.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
2.5-5,0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0
5-7.5,0.0,0.0,0.0,0.0,0.0,1.0,1.0,1.0,0.0,0.0
7.5-10,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0'''
true = pd.read_csv(six.StringIO(s), index_col=0)
pdt.assert_frame_equal(test, true)
def test_kld(p, q):
from anchor.infotheory import kld
test = kld(p, q)
s = '''0,1.7369655941662063
1,1.7369655941662063
2,1.7369655941662063
3,2.321928094887362
4,2.321928094887362
5,1.7369655941662063
6,1.7369655941662063
7,1.7369655941662063
8,2.321928094887362
9,2.321928094887362'''
true = pd.read_csv(six.StringIO(s), index_col=0, squeeze=True, header=None)
true.index.name = None
true.name = None
true.index = true.index.astype(str)
pdt.assert_series_equal(test, true)
def test_jsd(p, q):
from anchor.infotheory import jsd
test = jsd(p, q)
s = '''0,0.49342260576014463
1,0.49342260576014463
2,0.49342260576014463
3,0.6099865470109875
4,0.6099865470109875
5,0.49342260576014463
6,0.49342260576014463
7,0.49342260576014463
8,0.6099865470109875
9,0.6099865470109875'''
true = pd.read_csv(six.StringIO(s), index_col=0, squeeze=True, header=None)
true.index.name = None
true.name = None
true.index = true.index.astype(str)
pdt.assert_series_equal(test, true)
|
bsd-3-clause
|
tapomayukh/projects_in_python
|
rapid_categorization/haptic_map/online_haptic_map_taxel_based.py
|
1
|
16270
|
# Online haptic_map implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import tf
import os
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.transforms as tr
import hrl_lib.matplotlib_util as mpu
import pickle
import unittest
import ghmm
import ghmmwrapper
import random
from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact
from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray
from m3skin_ros.msg import TaxelArray as TaxelArray_Meka
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/rapid_categorization/taxel_based/')
from data_variable_length_force_sample import Fmat_original, temp_num_fol, temp_num_trunk
def callback(data, callback_args):
rospy.loginfo('Getting data!')
# Fixing Transforms
tf_lstnr = callback_args
sc = SkinContact()
sc.header.frame_id = '/torso_lift_link' # has to be this and no other coord frame.
sc.header.stamp = data.header.stamp
t1, q1 = tf_lstnr.lookupTransform(sc.header.frame_id,
data.header.frame_id,
rospy.Time(0))
t1 = np.matrix(t1).reshape(3,1)
r1 = tr.quaternion_to_matrix(q1)
# Gathering Force Data
force_vectors = np.row_stack([data.values_x, data.values_y, data.values_z])
fmags_instant = ut.norm(force_vectors)
threshold = 0.01
force_arr = fmags_instant.reshape((16,24))
fmags_tuned = fmags_instant - threshold
fmags_tuned[np.where(fmags_tuned<0)]=0
fmags_instant_tuned = fmags_tuned
global fmags
for i in range(len(fmags_instant_tuned)):
if fmags_instant_tuned[i] > 0.0:
fmags[i].append(fmags_instant_tuned[i])
else:
fmags[i] = []
# Gathering Contact Data for Haptic Mapping
global global_contact_vector
for i in range(len(fmags_instant_tuned)):
global_contact_vector[i] = r1*((np.column_stack([data.centers_x[i], data.centers_y[i], data.centers_z[i]])).T) + t1
test_data()
global taxel_FLAG
for i in range(len(fmags_instant_tuned)):
if taxel_FLAG[i] > -1:
idx = taxel_FLAG[i]
contact_info = global_contact_vector[i]
pubdata(idx, contact_info)
def test_data():
# Do Stuff For Testing which basically returns which FLAG is true
global taxel_FLAG # -1 for not in Contact, 0 for Unknown (Red), 1 for Foliage (green), 2 for Trunk (brown)
# For Testing
global fmags
for i in range(384):
if (len(fmags[i]) > 0):
ts_obj = fmags[i]
final_ts_obj = ghmm.EmissionSequence(F,ts_obj)
# Find Viterbi Path
global model_ff
global model_tf
path_ff_obj = model_ff.viterbi(final_ts_obj)
path_tf_obj = model_tf.viterbi(final_ts_obj)
print path_ff_obj[1], path_tf_obj[1]
diff = abs(path_ff_obj[1]-path_tf_obj[1])
obj = max(path_ff_obj[1],path_tf_obj[1])
obj_min = min(abs(path_ff_obj[1]),abs(path_tf_obj[1]))
if ((obj == path_ff_obj[1]) and (diff > 10)):
#if ((obj == path_ff_obj[1]) and (obj_min > 1800)):
#if ((obj == path_ff_obj[1])):
print 'Taxel', i, 'is Foliage !'
taxel_FLAG[i] = 1
#elif ((obj == path_tf_obj[1]) and (obj_min > 1800)):
elif ((obj == path_tf_obj[1]) and (diff > 20)):
#elif ((obj == path_tf_obj[1])):
print 'Taxel', i, 'is Trunk !'
taxel_FLAG[i] = 2
#elif ((obj == path_tf_obj[1]):
#print 'Taxel', i, 'is Uncertain'
#taxel_FLAG[i] = 0
else:
taxel_FLAG[i] = 0
print 'Taxel', i, 'is Unknown'
else:
print 'Taxel', i, 'is not in Contact'
taxel_FLAG[i] = -1
def getdata():
rospy.loginfo('Initializing the Node !')
rospy.init_node('Online_Haptic_Map_Builder', anonymous=True)
tf_lstnr = tf.TransformListener()
rospy.loginfo('Waiting to Subscribe to the Skin Message...')
rospy.Subscriber("/skin_patch_forearm_right/taxels/forces", TaxelArray_Meka, callback, callback_args = (tf_lstnr))
rospy.spin()
def pubdata(idx, contact_info):
rospy.loginfo('Publishing data')
marker = Marker()
marker.ns = 'Haptic_Map_Markers'
marker.header.frame_id = '/torso_lift_link'
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.02
marker.scale.y = 0.02
marker.scale.z = 0.02
if idx == 1:
# Green for Foliage
marker.color.a = 1.0;
marker.color.r = 0.0;
marker.color.g = 1.0;
marker.color.b = 0.0;
elif idx == 2:
# Brown for Trunk
marker.color.a = 1.0;
marker.color.r = 0.5;
marker.color.g = 0.25;
marker.color.b = 0.125;
else:
# Red for Unknown
marker.color.a = 0.0;
marker.color.r = 1.0;
marker.color.g = 0.0;
marker.color.b = 0.0;
marker.pose.orientation.w = 1.0
marker.pose.position.x = contact_info[0]
marker.pose.position.y = contact_info[1]
marker.pose.position.z = contact_info[2]
markerArray.markers.append(marker)
# Renumber the marker IDs
id = 0
for m in markerArray.markers:
m.id = id
id += 1
# Publish the MarkerArray
publisher.publish(markerArray)
#rospy.sleep(0.01)
if __name__ == '__main__':
topic = 'visualization_marker_array'
publisher = rospy.Publisher(topic, MarkerArray)
markerArray = MarkerArray()
print "Initializing the HMM Models"
# HMM Implementation
Fmat = Fmat_original
Foliage_Trials = temp_num_fol
Trunk_Trials = temp_num_trunk
# Getting mean / covariance
i = 0
number_states = 10
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < Foliage_Trials):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 0:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
#print np.shape(state_1)
#print np.shape(feature_1_final_data[j])
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_ff_force = np.zeros((number_states,1))
sigma_ff = np.zeros((number_states,1))
while (j < number_states):
mu_ff_force[j] = np.mean(feature_1_final_data[j])
sigma_ff[j] = scp.std(feature_1_final_data[j])
j = j+1
i = Foliage_Trials
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < (Foliage_Trials + Trunk_Trials)):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == Foliage_Trials:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_tf_force = np.zeros((number_states,1))
sigma_tf = np.zeros((number_states,1))
while (j < number_states):
mu_tf_force[j] = np.mean(feature_1_final_data[j])
sigma_tf[j] = scp.std(feature_1_final_data[j])
j = j+1
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
if number_states == 3:
A = [[0.2, 0.5, 0.3],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0]]
elif number_states == 5:
A = [[0.2, 0.35, 0.2, 0.15, 0.1],
[0.0, 0.2, 0.45, 0.25, 0.1],
[0.0, 0.0, 0.2, 0.55, 0.25],
[0.0, 0.0, 0.0, 0.2, 0.8],
[0.0, 0.0, 0.0, 0.0, 1.0]]
elif number_states == 10:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
elif number_states == 15:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]]
elif number_states == 20:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_ff = [0.0]*number_states
B_tf = [0.0]*number_states
for num_states in range(number_states):
B_ff[num_states] = [mu_ff_force[num_states][0],sigma_ff[num_states][0]]
B_tf[num_states] = [mu_tf_force[num_states][0],sigma_tf[num_states][0]]
# pi - initial probabilities per state
if number_states == 3:
pi = [1./3.] * 3
elif number_states == 5:
pi = [0.2] * 5
elif number_states == 10:
pi = [0.1] * 10
elif number_states == 15:
pi = [1./15.] * 15
elif number_states == 20:
pi = [0.05] * 20
# generate FF, TF models from parameters
model_ff = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_ff, pi) # Will be Trained
model_tf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_tf, pi) # Will be Trained
total_seq = Fmat
for i in range((Foliage_Trials + Trunk_Trials)):
total_seq[i][:] = sum(total_seq[i][:],[])
total_seq_ff = total_seq[0:Foliage_Trials]
total_seq_tf = total_seq[Foliage_Trials:Foliage_Trials + Trunk_Trials]
#print len(total_seq_ff)
#print len(total_seq_tf)
print "Training the HMM Models..."
train_seq_ff = total_seq_ff
train_seq_tf = total_seq_tf
final_ts_ff = ghmm.SequenceSet(F,train_seq_ff)
final_ts_tf = ghmm.SequenceSet(F,train_seq_tf)
model_ff.baumWelch(final_ts_ff)
model_tf.baumWelch(final_ts_tf)
print "Models Trained: Ready to Collect Data !"
# Gather Data from Robot Online
taxel_FLAG = {}
for i in range(384):
taxel_FLAG[i] = -1 # -1 for not in Contact, 0 for Unknown (Red), 1 for Foliage (green), 2 for Trunk (brown)
fmags = {}
for i in range(384):
fmags[i] = []
global_contact_vector = {}
for i in range(384):
global_contact_vector[i] = []
FLAG_Trunk = False
FLAG_Foliage = False
FLAG_Unknown = True
getdata()
|
mit
|
mhue/scikit-learn
|
examples/mixture/plot_gmm_classifier.py
|
250
|
3918
|
"""
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
rmcgibbo/msmbuilder
|
msmbuilder/featurizer/featurizer.py
|
1
|
36870
|
# Author: Kyle A. Beauchamp <[email protected]>
# Contributors: Robert McGibbon <[email protected]>,
# Matthew Harrigan <[email protected]>
# Brooke Husic <[email protected]>
# Copyright (c) 2015, Stanford University and the Authors
# All rights reserved.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function, division, absolute_import
from six.moves import cPickle
import numpy as np
import mdtraj as md
from sklearn.base import TransformerMixin
import sklearn.pipeline
from sklearn.externals.joblib import Parallel, delayed
from msmbuilder import libdistance
from ..base import BaseEstimator
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def featurize_all(filenames, featurizer, topology, chunk=1000, stride=1):
"""Load and featurize many trajectory files.
Parameters
----------
filenames : list of strings
List of paths to MD trajectory files
featurizer : Featurizer
The featurizer to be invoked on each trajectory trajectory as
it is loaded
topology : str, Topology, Trajectory
Topology or path to a topology file, used to load trajectories with
MDTraj
chunk : {int, None}
If chunk is an int, load the trajectories up in chunks using
md.iterload for better memory efficiency (less trajectory data needs
to be in memory at once)
stride : int, default=1
Only read every stride-th frame.
Returns
-------
data : np.ndarray, shape=(total_length_of_all_trajectories, n_features)
indices : np.ndarray, shape=(total_length_of_all_trajectories)
fns : np.ndarray shape=(total_length_of_all_trajectories)
These three arrays all share the same indexing, such that data[i] is
the featurized version of indices[i]-th frame in the MD trajectory
with filename fns[i].
"""
data = []
indices = []
fns = []
for file in filenames:
kwargs = {} if file.endswith('.h5') else {'top': topology}
count = 0
for t in md.iterload(file, chunk=chunk, stride=stride, **kwargs):
x = featurizer.partial_transform(t)
n_frames = len(x)
data.append(x)
indices.append(count + (stride*np.arange(n_frames)))
fns.extend([file] * n_frames)
count += (stride*n_frames)
if len(data) == 0:
raise ValueError("None!")
return np.concatenate(data), np.concatenate(indices), np.array(fns)
def load(filename):
"""Load a featurizer from a cPickle file."""
with open(filename, 'rb') as f:
featurizer = cPickle.load(f)
return featurizer
class Featurizer(BaseEstimator, TransformerMixin):
"""Base class for objects that featurize Trajectories.
Notes
-----
At the bare minimum, a featurizer must implement the `partial_transform(traj)`
member function. A `transform(traj_list)` for featurizing multiple
trajectories in batch will be provided.
"""
def __init__(self):
pass
def featurize(self, traj):
raise NotImplementedError('This API was removed. Use partial_transform instead')
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
pass
def fit(self, traj_list, y=None):
return self
def transform(self, traj_list, y=None):
"""Featurize a several trajectories.
Parameters
----------
traj_list : list(mdtraj.Trajectory)
Trajectories to be featurized.
Returns
-------
features : list(np.ndarray), length = len(traj_list)
The featurized trajectories. features[i] is the featurized
version of traj_list[i] and has shape
(n_samples_i, n_features)
"""
return [self.partial_transform(traj) for traj in traj_list]
def save(self, filename):
with open(filename, 'wb') as f:
cPickle.dump(self, f)
class SuperposeFeaturizer(Featurizer):
"""Featurizer based on euclidian atom distances to reference structure.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector containing the distances from a specified set of atoms to
the 'reference position' of those atoms, in ``reference_traj``.
Parameters
----------
atom_indices : np.ndarray, shape=(n_atoms,), dtype=int
The indices of the atoms to superpose and compute the distances with
reference_traj : md.Trajectory
The reference conformation to superpose each frame with respect to
(only the first frame in reference_traj is used)
superpose_atom_indices : np.ndarray, shape=(n_atoms,), dtype=int
If not None, these atom_indices are used for the superposition
"""
def __init__(self, atom_indices, reference_traj, superpose_atom_indices=None):
self.atom_indices = atom_indices
if superpose_atom_indices is None:
self.superpose_atom_indices = atom_indices
else:
self.superpose_atom_indices = superpose_atom_indices
self.reference_traj = reference_traj
self.n_features = len(self.atom_indices)
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via distance
after superposition
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
traj.superpose(self.reference_traj, atom_indices=self.superpose_atom_indices)
diff2 = (traj.xyz[:, self.atom_indices] -
self.reference_traj.xyz[0, self.atom_indices]) ** 2
x = np.sqrt(np.sum(diff2, axis=2))
return x
class StrucRMSDFeaturizer(Featurizer):
"""Featurizer based on RMSD to one or more reference structures.
This featurizer inputs a trajectory to be analyzed ('traj') and a
reference trajectory ('ref') and outputs the RMSD of each frame of
traj with respect to each frame in ref. The output is a numpy array
with n_rows = traj.n_frames and n_columns = ref.n_frames.
Parameters
----------
atom_indices : np.ndarray, shape=(n_atoms,), dtype=int
The indices of the atoms to superpose and compute the distances with
reference_traj : md.Trajectory
The reference conformation to superpose each frame with respect to
(only the first frame in reference_traj is used)
superpose_atom_indices : np.ndarray, shape=(n_atoms,), dtype=int
If not None, these atom_indices are used for the superposition
"""
def __init__(self, atom_indices, reference_traj, superpose_atom_indices=None):
self.atom_indices = atom_indices
if superpose_atom_indices is None:
self.superpose_atom_indices = atom_indices
else:
self.superpose_atom_indices = superpose_atom_indices
self.reference_traj = reference_traj
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via distance
after superposition
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
result = libdistance.cdist(traj, self.reference_traj, 'rmsd')
return result
class AtomPairsFeaturizer(Featurizer):
"""Featurizer based on distances between specified pairs of atoms.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector of the distances between the specified pairs of atoms.
Parameters
----------
pair_indices : np.ndarray, shape=(n_pairs, 2), dtype=int
Each row gives the indices of two atoms involved in the interaction.
periodic : bool, default=False
If `periodic` is True and the trajectory contains unitcell
information, we will compute distances under the minimum image
convention.
exponent : float
Modify the distances by raising them to this exponent.
"""
def __init__(self, pair_indices, periodic=False, exponent=1.):
# TODO: We might want to implement more error checking here. Or during
# featurize(). E.g. are the pair_indices supplied valid?
self.pair_indices = pair_indices
self.atom_indices = pair_indices
self.n_features = len(self.pair_indices)
self.periodic = periodic
self.exponent = exponent
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via pairwise
atom-atom distances
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
d = md.geometry.compute_distances(traj, self.pair_indices, periodic=self.periodic)
return d ** self.exponent
class DihedralFeaturizer(Featurizer):
"""Featurizer based on dihedral angles.
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector containing one or more of the backbone or side-chain dihedral
angles, or the sin and cosine of these angles.
Parameters
----------
types : list
One or more of ['phi', 'psi', 'omega', 'chi1', 'chi2', 'chi3', 'chi4']
sincos : bool
Instead of outputting the angle, return the sine and cosine of the
angle as separate features.
"""
def __init__(self, types=['phi', 'psi'], sincos=True):
if isinstance(types, str):
types = [types]
self.types = list(types) # force a copy
self.sincos = sincos
known = {'phi', 'psi', 'omega', 'chi1', 'chi2', 'chi3', 'chi4'}
if not set(types).issubset(known):
raise ValueError('angles must be a subset of %s. you supplied %s' % (
str(known), str(types)))
def describe_features(self, traj):
"""Return a list of dictionaries describing the Dihderal features."""
x = []
for a in self.types:
func = getattr(md, 'compute_%s' % a)
aind, y = func(traj)
n = len(aind)
resSeq = [(np.unique([traj.top.atom(j).residue.resSeq for j in i])) for i in aind]
resid = [(np.unique([traj.top.atom(j).residue.index for j in i])) for i in aind]
resnames = [[traj.topology.residue(j).name for j in i ] for i in resid]
bigclass = ["dihedral"] * n
smallclass = [a] * n
if self.sincos:
#x.extend([np.sin(y), np.cos(y)])
aind = list(aind) * 2
resnames = resnames * 2
resSeq = resSeq * 2
resid = resid * 2
otherInfo = (["sin"] * n) + (["cos"] * n)
bigclass = bigclass * 2
smallclass = smallclass * 2
else:
otherInfo = ["nosincos"] * n
for i in range(len(resnames)):
d_i = dict(resname=resnames[i], atomind=aind[i],resSeq=resSeq[i], resid=resid[i],\
otherInfo=otherInfo[i], bigclass=bigclass[i], smallclass=smallclass[i])
x.append(d_i)
return x
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via calculation
of dihedral (torsion) angles
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
x = []
for a in self.types:
func = getattr(md, 'compute_%s' % a)
_,y = func(traj)
if self.sincos:
x.extend([np.sin(y), np.cos(y)])
else:
x.append(y)
return np.hstack(x)
class AlphaAngleFeaturizer(Featurizer):
"""Featurizer to extract alpha (dihedral) angles.
The alpha angle of residue `i` is the dihedral formed by the four CA atoms
of residues `i-1`, `i`, `i+1` and `i+2`.
Parameters
----------
sincos : bool
Instead of outputting the angle, return the sine and cosine of the
angle as separate features.
"""
def __init__(self, sincos=True):
self.sincos = sincos
self.atom_indices = None
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via calculation
of dihedral (torsion) angles of alpha carbon backbone
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
"""
ca = [a.index for a in traj.top.atoms if a.name == 'CA']
if len(ca) < 4:
return np.zeros((len(traj), 0), dtype=np.float32)
alpha_indices = np.array(
[(ca[i - 1], ca[i], ca[i+1], ca[i + 2]) for i in range(1, len(ca) - 2)])
result = md.compute_dihedrals(traj, alpha_indices)
x = []
if self.atom_indices is None:
self.atom_indices = np.vstack(alpha_indices)
if self.sincos:
x.extend([np.cos(result), np.sin(result)])
else:
x.append(result)
return np.hstack(x)
def describe_features(self, traj):
"""Return a list of dictionaries describing the alpha dihedral angle features."""
x = []
#fill in the atom indices using just the first frame
res_ = self.partial_transform(traj[0])
if self.atom_indices is not None:
aind = self.atom_indices
n = len(aind)
resSeq = [(np.unique([traj.top.atom(j).residue.resSeq for j in i])) for i in aind]
resid = [(np.unique([traj.top.atom(j).residue.index for j in i])) for i in aind]
resnames = [[traj.topology.residue(j).name for j in i ] for i in resid]
bigclass = ["dihedral"] * n
smallclass = ["alpha"] * n
if self.sincos:
#x.extend([np.sin(y), np.cos(y)])
aind = list(aind) * 2
resnames = resnames * 2
resSeq = resSeq * 2
resid = resid * 2
otherInfo = (["sin"] * n) + (["cos"] * n)
bigclass = bigclass * 2
smallclass = smallclass * 2
else:
otherInfo = ["nosincos"] * n
for i in range(len(resnames)):
d_i = dict(resname=resnames[i], atomind=aind[i],resSeq=resSeq[i], resid=resid[i],\
otherInfo=otherInfo[i], bigclass=bigclass[i], smallclass=smallclass[i])
x.append(d_i)
return x
else:
raise UserWarning("Cannot describe features for trajectories with fewer than 4 alpha carbon\
using AlphaAngleFeaturizer")
class KappaAngleFeaturizer(Featurizer):
"""Featurizer to extract kappa angles.
The kappa angle of residue `i` is the angle formed by the three CA atoms
of residues `i-2`, `i` and `i+2`. This featurizer extracts the
`n_residues - 4` kappa angles of each frame in a trajectory.
Parameters
----------
cos : bool
Compute the cosine of the angle instead of the angle itself.
"""
def __init__(self, cos=True):
self.cos = cos
self.atom_indices = None
def partial_transform(self, traj):
ca = [a.index for a in traj.top.atoms if a.name == 'CA']
if len(ca) < 5:
return np.zeros((len(traj), 0), dtype=np.float32)
angle_indices = np.array(
[(ca[i - 2], ca[i], ca[i + 2]) for i in range(2, len(ca) - 2)])
result = md.compute_angles(traj, angle_indices)
if self.atom_indices is None:
self.atom_indices = np.vstack(angle_indices)
if self.cos:
return np.cos(result)
assert result.shape == (traj.n_frames, traj.n_residues - 4)
return result
def describe_features(self, traj):
"""Return a list of dictionaries describing the Kappa angle features."""
x = []
#fill in the atom indices using just the first frame
res_ = self.partial_transform(traj[0])
if self.atom_indices is not None:
aind = self.atom_indices
n = len(aind)
resSeq = [(np.unique([traj.top.atom(j).residue.resSeq for j in i])) for i in aind]
resid = [(np.unique([traj.top.atom(j).residue.index for j in i])) for i in aind]
resnames = [[traj.topology.residue(j).name for j in i ] for i in resid]
bigclass = ["angle"] * n
smallclass = ["kappa"] * n
if self.cos:
otherInfo = (["cos"] * n)
else:
otherInfo = ["nocos"] * n
assert len(self.atom_indices)==len(resnames)
for i in range(len(resnames)):
d_i = dict(resname=resnames[i], atomind=aind[i],resSeq=resSeq[i], resid=resid[i],\
otherInfo=otherInfo[i], bigclass=bigclass[i], smallclass=smallclass[i])
x.append(d_i)
return x
else:
raise UserWarning("Cannot describe features for trajectories with fewer than 5 alpha carbon\
using KappaAngle Featurizer")
class SASAFeaturizer(Featurizer):
"""Featurizer based on solvent-accessible surface areas.
Parameters
----------
mode : {'atom', 'residue'}, default='residue'
In mode == 'atom', the extracted features are the per-atom
SASA. In mode == 'residue', this is consolidated down to
the per-residue SASA by summing over the atoms in each
residue.
Other Parameters
----------------
probe_radius : float
n_sphere_points : int
If supplied, these arguments will be passed directly to
`mdtraj.shrake_rupley`, overriding default values.
See Also
--------
mdtraj.shrake_rupley
"""
def __init__(self, mode='residue', **kwargs):
self.mode = mode
self.kwargs = kwargs
def partial_transform(self, traj):
return md.shrake_rupley(traj, mode=self.mode, **self.kwargs)
class ContactFeaturizer(Featurizer):
"""Featurizer based on residue-residue distances
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector of the distances between pairs of amino-acid residues.
The exact method for computing the the distance between two residues
is configurable with the ``scheme`` parameter.
Parameters
----------
contacts : np.ndarray or 'all'
array containing (0-indexed) indices of the residues to compute the
contacts for. (e.g. np.array([[0, 10], [0, 11]]) would compute
the contact between residue 0 and residue 10 as well as
the contact between residue 0 and residue 11.) [NOTE: if no
array is passed then 'all' contacts are calculated. This means
that the result will contain all contacts between residues
separated by at least 3 residues.]
scheme : {'ca', 'closest', 'closest-heavy'}
scheme to determine the distance between two residues:
'ca' : distance between two residues is given by the distance
between their alpha carbons
'closest' : distance is the closest distance between any
two atoms in the residues
'closest-heavy' : distance is the closest distance between
any two non-hydrogen atoms in the residues
ignore_nonprotein : bool
When using `contact==all`, don't compute contacts between
"residues" which are not protein (i.e. do not contain an alpha
carbon).
"""
def __init__(self, contacts='all', scheme='closest-heavy', ignore_nonprotein=True):
self.contacts = contacts
self.scheme = scheme
self.ignore_nonprotein = ignore_nonprotein
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via of residue-residue
distances
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
distances, _ = md.compute_contacts(traj, self.contacts, self.scheme, self.ignore_nonprotein)
return distances
def describe_features(self, traj):
"""Return a list of dictionaries describing the features in Contacts."""
x = []
#fill in the atom indices using just the first frame
distances,residue_indices = md.compute_contacts(traj, self.contacts, self.scheme, self.ignore_nonprotein)
n = residue_indices.shape[0]
aind = ["N/A"] * n
resSeq = [np.array([traj.top.residue(j).resSeq for j in i]) for i in residue_indices]
resid = [np.array([traj.top.residue(j).index for j in i]) for i in residue_indices]
resnames = [[traj.topology.residue(j).name for j in i ] for i in resid]
bigclass = [self.contacts] * n
smallclass = [self.scheme] * n
otherInfo = [self.ignore_nonprotein]*n
for i in range(n):
d_i = dict(resname=resnames[i], atomind=aind[i],resSeq=resSeq[i], resid=resid[i],\
otherInfo=otherInfo[i], bigclass=bigclass[i], smallclass=smallclass[i])
x.append(d_i)
return x
class GaussianSolventFeaturizer(Featurizer):
"""Featurizer on weighted pairwise distance between solute and solvent.
We apply a Gaussian kernel to each solute-solvent pairwise distance
and sum the kernels for each solute atom, resulting in a vector
of len(solute_indices).
The values can be physically interpreted as the degree of solvation
of each solute atom.
Parameters
----------
solute_indices : np.ndarray, shape=(n_solute,)
Indices of solute atoms
solvent_indices : np.ndarray, shape=(n_solvent,)
Indices of solvent atoms
sigma : float
Sets the length scale for the gaussian kernel
periodic : bool
Whether to consider a periodic system in distance calculations
References
----------
..[1] Gu, Chen, et al. BMC Bioinformatics 14, no. Suppl 2
(January 21, 2013): S8. doi:10.1186/1471-2105-14-S2-S8.
"""
def __init__(self, solute_indices, solvent_indices, sigma, periodic=False):
self.solute_indices = solute_indices
self.solvent_indices = solvent_indices
self.sigma = sigma
self.periodic = periodic
self.n_features = len(self.solute_indices)
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space via calculation
of solvent fingerprints
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
# The result vector
fingerprints = np.zeros((traj.n_frames, self.n_features))
atom_pairs = np.zeros((len(self.solvent_indices), 2))
sigma = self.sigma
for i, solute_i in enumerate(self.solute_indices):
# For each solute atom, calculate distance to all solvent
# molecules
atom_pairs[:, 0] = solute_i
atom_pairs[:, 1] = self.solvent_indices
distances = md.compute_distances(traj, atom_pairs, periodic=True)
distances = np.exp(-distances / (2 * sigma * sigma))
# Sum over water atoms for all frames
fingerprints[:, i] = np.sum(distances, axis=1)
return fingerprints
class RawPositionsFeaturizer(Featurizer):
"""Featurize an MD trajectory into a vector space with the raw
cartesian coordinates
Parameters
----------
atom_indices : None or array-like, dtype=int, shape=(n_atoms)
If specified, only return the coordinates for the atoms
given by atom_indices. Otherwise return all atoms
ref_traj : None or md.Trajectory
If specified, superpose each trajectory to the first frame of
ref_traj before getting positions. If atom_indices is also
specified, only superpose based on those atoms. The superposition
will modify each transformed trajectory *in place*.
"""
def __init__(self, atom_indices=None, ref_traj=None):
super(RawPositionsFeaturizer, self).__init__()
self.atom_indices = atom_indices
if atom_indices is not None and ref_traj is not None:
self.ref_traj = ref_traj.atom_slice(atom_indices)
else:
self.ref_traj = ref_traj
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space with the raw
cartesian coordinates.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
Notes
-----
If you requested superposition (gave `ref_traj` in __init__) the
input trajectory will be modified.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
# Optionally take only certain atoms
if self.atom_indices is not None:
p_traj = traj.atom_slice(self.atom_indices)
else:
p_traj = traj
# Optionally superpose to a reference trajectory.
if self.ref_traj is not None:
p_traj.superpose(self.ref_traj, parallel=False)
# Get the positions and reshape.
value = p_traj.xyz.reshape(len(p_traj), -1)
return value
class RMSDFeaturizer(Featurizer):
"""Featurizer based on RMSD to a series of reference frames.
Parameters
----------
trj0 : mdtraj.Trajectory
Reference trajectory. trj0.n_frames gives the number of features
in this Featurizer.
atom_indices : np.ndarray, default=None
Which atom indices to use during RMSD calculation. If None, MDTraj
should default to all atoms.
"""
def __init__(self, trj0, atom_indices=None):
self.n_features = trj0.n_frames
self.trj0 = trj0
self.atom_indices = atom_indices
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space by calculating
the RMSD to each frame in a reference trajectory.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
X = np.zeros((traj.n_frames, self.n_features))
for frame in range(self.n_features):
X[:, frame] = md.rmsd(traj, self.trj0, atom_indices=self.atom_indices, frame=frame)
return X
class DRIDFeaturizer(Featurizer):
"""Featurizer based on distribution of reciprocal interatomic
distances (DRID)
This featurizer transforms a dataset containing MD trajectories into
a vector dataset by representing each frame in each of the MD trajectories
by a vector containing the first three moments of a collection of
reciprocal interatomic distances. For details, see [1].
References
----------
.. [1] Zhou, Caflisch; Distribution of Reciprocal of Interatomic Distances:
A Fast Structural Metric. JCTC 2012 doi:10.1021/ct3003145
Parameters
----------
atom_indices : array-like of ints, default=None
Which atom indices to use during DRID featurization. If None,
all atoms are used
"""
def __init__(self, atom_indices=None):
self.atom_indices = atom_indices
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space using the distribution
of reciprocal interatomic distance (DRID) method.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
return md.geometry.compute_drid(traj, self.atom_indices)
class TrajFeatureUnion(BaseEstimator, sklearn.pipeline.FeatureUnion):
"""Mixtape version of sklearn.pipeline.FeatureUnion
Notes
-----
Works on lists of trajectories.
"""
def fit_transform(self, traj_list, y=None, **fit_params):
"""Fit all transformers using `trajectories`, transform the data
and concatenate results.
Parameters
----------
traj_list : list (of mdtraj.Trajectory objects)
Trajectories to featurize
y : Unused
Unused
Returns
-------
Y : list (of np.ndarray)
Y[i] is the featurized version of X[i]
Y[i] will have shape (n_samples_i, n_features), where
n_samples_i is the length of trajectory i and n_features
is the total (concatenated) number of features in the
concatenated list of featurizers.
"""
self.fit(traj_list, y, **fit_params)
return self.transform(traj_list)
def transform(self, traj_list):
"""Transform traj_list separately by each transformer, concatenate results.
Parameters
----------
trajectories : list (of mdtraj.Trajectory objects)
Trajectories to featurize
Returns
-------
Y : list (of np.ndarray)
Y[i] is the featurized version of X[i]
Y[i] will have shape (n_samples_i, n_features), where
n_samples_i is the length of trajectory i and n_features
is the total (concatenated) number of features in the
concatenated list of featurizers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(sklearn.pipeline._transform_one)(trans, name, traj_list, self.transformer_weights)
for name, trans in self.transformer_list)
X_i_stacked = [np.hstack([Xs[feature_ind][trj_ind] for feature_ind in range(len(Xs))]) for trj_ind in range(len(Xs[0]))]
return X_i_stacked
class Slicer(Featurizer):
"""Extracts slices (e.g. subsets) from data along the feature dimension.
Parameters
----------
index : list of integers, optional, default=None
These indices are the feature indices that will be selected
by the Slicer.transform() function.
"""
def __init__(self, index=None):
self.index = index
def partial_transform(self, X):
"""Slice a single input array along to select a subset of features.
Parameters
----------
X : np.ndarray, shape=(n_samples, n_features)
A sample to slice.
Returns
-------
X2 : np.ndarray shape=(n_samples, n_feature_subset)
Slice of X
"""
return X[:, self.index]
class FirstSlicer(Slicer):
"""Extracts slices (e.g. subsets) from data along the feature dimension.
Parameters
----------
first : int, optional, default=None
Select the first N features. This is essentially a shortcut for
`Slicer(index=arange(first))`
"""
def __init__(self, first=None):
self.first = first
@property
def index(self):
return np.arange(self.first)
|
lgpl-2.1
|
ghisvail/vispy
|
vispy/visuals/axis.py
|
1
|
18201
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from .visual import CompoundVisual
from .line import LineVisual
from .text import TextVisual
# XXX TODO list (see code, plus):
# 1. Automated tick direction?
# 2. Expand to 3D (only 2D supported currently)
# 3. Input validation
# 4. Property support
# 5. Reactivity to resizing (current tick lengths grow/shrink w/zoom)
# 6. Improve tick label naming (str(x) is not good) and tick selection
class AxisVisual(CompoundVisual):
"""Axis visual
Parameters
----------
pos : array
Co-ordinates of start and end of the axis.
domain : tuple
The data values at the beginning and end of the axis, used for tick
labels. i.e. (5, 10) means the axis starts at 5 and ends at 10. Default
is (0, 1).
tick_direction : array
The tick direction to use (in document coordinates).
scale_type : str
The type of scale. For now only 'linear' is supported.
axis_color : tuple
RGBA values for the axis colour. Default is black.
tick_color : tuple
RGBA values for the tick colours. The colour for the major and minor
ticks is currently fixed to be the same. Default is a dark grey.
text_color : Color
The color to use for drawing tick values.
font_size : float
The font size to use for rendering tick values.
**kwargs : dict
Keyword arguments to pass to `Visual`.
"""
def __init__(self, pos=None, domain=(0., 1.), tick_direction=(-1., 0.),
scale_type="linear", axis_color=(1, 1, 1),
tick_color=(0.7, 0.7, 0.7), text_color='w', font_size=8):
if scale_type != 'linear':
raise NotImplementedError('only linear scaling is currently '
'supported')
self._pos = None
self._domain = None
# If True, then axis stops at the first / last major tick.
# If False, then axis extends to edge of *pos*
# (private until we come up with a better name for this)
self._stop_at_major = (False, False)
self.ticker = Ticker(self)
self.tick_direction = np.array(tick_direction, float)
self.tick_direction = self.tick_direction
self.scale_type = scale_type
self.axis_color = axis_color
self.tick_color = tick_color
self.minor_tick_length = 5 # px
self.major_tick_length = 10 # px
self.label_margin = 5 # px
self._need_update = True
self._line = LineVisual(method='gl', width=3.0)
self._ticks = LineVisual(method='gl', width=2.0, connect='segments')
self._text = TextVisual(font_size=font_size, color=text_color)
CompoundVisual.__init__(self, [self._line, self._text, self._ticks])
if pos is not None:
self.pos = pos
self.domain = domain
@property
def pos(self):
return self._pos
@pos.setter
def pos(self, pos):
self._pos = np.array(pos, float)
self._need_update = True
self.update()
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, d):
if self._domain is None or d != self._domain:
self._domain = d
self._need_update = True
self.update()
@property
def _vec(self):
"""Vector in the direction of the axis line"""
return self.pos[1] - self.pos[0]
def _update_subvisuals(self):
tick_pos, labels, label_pos, anchors = self.ticker.get_update()
self._line.set_data(pos=self.pos, color=self.axis_color)
self._ticks.set_data(pos=tick_pos, color=self.tick_color)
self._text.text = list(labels)
self._text.pos = label_pos
self._text.anchors = anchors
self._need_update = False
def _prepare_draw(self, view):
if self._pos is None:
return False
if self._need_update:
self._update_subvisuals()
def _compute_bounds(self, axis, view):
if axis == 2:
return (0., 0.)
# now axis in (0, 1)
return self.pos[:, axis].min(), self.pos[:, axis].max()
class Ticker(object):
"""Class to determine tick marks
Parameters
----------
axis : instance of AxisVisual
The AxisVisual to generate ticks for.
"""
def __init__(self, axis):
self.axis = axis
def get_update(self):
major_tick_fractions, minor_tick_fractions, tick_labels = \
self._get_tick_frac_labels()
tick_pos, label_pos, anchors = self._get_tick_positions(
major_tick_fractions, minor_tick_fractions)
return tick_pos, tick_labels, label_pos, anchors
def _get_tick_positions(self, major_tick_fractions, minor_tick_fractions):
# tick direction is defined in visual coords, but use document
# coords to determine the tick length
trs = self.axis.transforms
visual_to_document = trs.get_transform('visual', 'document')
direction = np.array(self.axis.tick_direction)
direction /= np.linalg.norm(direction)
# use the document (pixel) coord system to set text anchors
anchors = []
if direction[0] < 0:
anchors.append('right')
elif direction[0] > 0:
anchors.append('left')
else:
anchors.append('center')
if direction[1] < 0:
anchors.append('bottom')
elif direction[1] > 0:
anchors.append('top')
else:
anchors.append('middle')
# now figure out the tick positions in visual (data) coords
doc_unit = visual_to_document.map([[0, 0], direction[:2]])
doc_unit = doc_unit[1] - doc_unit[0]
doc_len = np.linalg.norm(doc_unit)
vectors = np.array([[0., 0.],
direction * self.axis.minor_tick_length / doc_len,
direction * self.axis.major_tick_length / doc_len,
direction * (self.axis.major_tick_length +
self.axis.label_margin) / doc_len],
dtype=float)
minor_vector = vectors[1] - vectors[0]
major_vector = vectors[2] - vectors[0]
label_vector = vectors[3] - vectors[0]
major_origins, major_endpoints = self._tile_ticks(
major_tick_fractions, major_vector)
minor_origins, minor_endpoints = self._tile_ticks(
minor_tick_fractions, minor_vector)
tick_label_pos = major_origins + label_vector
num_major = len(major_tick_fractions)
num_minor = len(minor_tick_fractions)
c = np.empty([(num_major + num_minor) * 2, 2])
c[0:(num_major-1)*2+1:2] = major_origins
c[1:(num_major-1)*2+2:2] = major_endpoints
c[(num_major-1)*2+2::2] = minor_origins
c[(num_major-1)*2+3::2] = minor_endpoints
return c, tick_label_pos, anchors
def _tile_ticks(self, frac, tickvec):
"""Tiles tick marks along the axis."""
origins = np.tile(self.axis._vec, (len(frac), 1))
origins = self.axis.pos[0].T + (origins.T*frac).T
endpoints = tickvec + origins
return origins, endpoints
def _get_tick_frac_labels(self):
"""Get the major ticks, minor ticks, and major labels"""
minor_num = 4 # number of minor ticks per major division
if (self.axis.scale_type == 'linear'):
domain = self.axis.domain
if domain[1] < domain[0]:
flip = True
domain = domain[::-1]
else:
flip = False
offset = domain[0]
scale = domain[1] - domain[0]
transforms = self.axis.transforms
length = self.axis.pos[1] - self.axis.pos[0] # in logical coords
n_inches = np.sqrt(np.sum(length ** 2)) / transforms.dpi
# major = np.linspace(domain[0], domain[1], num=11)
# major = MaxNLocator(10).tick_values(*domain)
major = _get_ticks_talbot(domain[0], domain[1], n_inches, 2)
labels = ['%g' % x for x in major]
majstep = major[1] - major[0]
minor = []
minstep = majstep / (minor_num + 1)
minstart = 0 if self.axis._stop_at_major[0] else -1
minstop = -1 if self.axis._stop_at_major[1] else 0
for i in range(minstart, len(major) + minstop):
maj = major[0] + i * majstep
minor.extend(np.linspace(maj + minstep,
maj + majstep - minstep,
minor_num))
major_frac = (major - offset) / scale
minor_frac = (np.array(minor) - offset) / scale
major_frac = major_frac[::-1] if flip else major_frac
use_mask = (major_frac > -0.0001) & (major_frac < 1.0001)
major_frac = major_frac[use_mask]
labels = [l for li, l in enumerate(labels) if use_mask[li]]
minor_frac = minor_frac[(minor_frac > -0.0001) &
(minor_frac < 1.0001)]
elif self.axis.scale_type == 'logarithmic':
return NotImplementedError
elif self.axis.scale_type == 'power':
return NotImplementedError
return major_frac, minor_frac, labels
# #############################################################################
# Translated from matplotlib
class MaxNLocator(object):
"""
Select no more than N intervals at nice locations.
"""
def __init__(self, nbins=10, steps=None, trim=True, integer=False,
symmetric=False, prune=None):
"""
Keyword args:
*nbins*
Maximum number of intervals; one less than max number of ticks.
*steps*
Sequence of nice numbers starting with 1 and ending with 10;
e.g., [1, 2, 4, 5, 10]
*integer*
If True, ticks will take only integer values.
*symmetric*
If True, autoscaling will result in a range symmetric
about zero.
*prune*
['lower' | 'upper' | 'both' | None]
Remove edge ticks -- useful for stacked or ganged plots
where the upper tick of one axes overlaps with the lower
tick of the axes above it.
If prune=='lower', the smallest tick will
be removed. If prune=='upper', the largest tick will be
removed. If prune=='both', the largest and smallest ticks
will be removed. If prune==None, no ticks will be removed.
"""
self._nbins = int(nbins)
self._trim = trim
self._integer = integer
self._symmetric = symmetric
if prune is not None and prune not in ['upper', 'lower', 'both']:
raise ValueError(
"prune must be 'upper', 'lower', 'both', or None")
self._prune = prune
if steps is None:
steps = [1, 2, 2.5, 3, 4, 5, 6, 8, 10]
else:
if int(steps[-1]) != 10:
steps = list(steps)
steps.append(10)
self._steps = steps
self._integer = integer
if self._integer:
self._steps = [n for n in self._steps
if divmod(n, 1)[1] < 0.001]
def bin_boundaries(self, vmin, vmax):
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
if self._integer:
scale = max(1, scale)
vmin = vmin - offset
vmax = vmax - offset
raw_step = (vmax - vmin) / nbins
scaled_raw_step = raw_step / scale
best_vmax = vmax
best_vmin = vmin
for step in self._steps:
if step < scaled_raw_step:
continue
step *= scale
best_vmin = step * divmod(vmin, step)[0]
best_vmax = best_vmin + step * nbins
if (best_vmax >= vmax):
break
if self._trim:
extra_bins = int(divmod((best_vmax - vmax), step)[0])
nbins -= extra_bins
return (np.arange(nbins + 1) * step + best_vmin + offset)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
locs = self.bin_boundaries(vmin, vmax)
prune = self._prune
if prune == 'lower':
locs = locs[1:]
elif prune == 'upper':
locs = locs[:-1]
elif prune == 'both':
locs = locs[1:-1]
return locs
def view_limits(self, dmin, dmax):
if self._symmetric:
maxabs = max(abs(dmin), abs(dmax))
dmin = -maxabs
dmax = maxabs
return np.take(self.bin_boundaries(dmin, dmax), [0, -1])
def scale_range(vmin, vmax, n=1, threshold=100):
dv = abs(vmax - vmin)
if dv == 0: # maxabsv == 0 is a special case of this.
return 1.0, 0.0
# Note: this should never occur because
# vmin, vmax should have been checked by nonsingular(),
# and spread apart if necessary.
meanv = 0.5 * (vmax + vmin)
if abs(meanv) / dv < threshold:
offset = 0
elif meanv > 0:
ex = divmod(np.log10(meanv), 1)[0]
offset = 10 ** ex
else:
ex = divmod(np.log10(-meanv), 1)[0]
offset = -10 ** ex
ex = divmod(np.log10(dv / n), 1)[0]
scale = 10 ** ex
return scale, offset
# #############################################################################
# Tranlated from http://www.justintalbot.com/research/axis-labeling/
# See "An Extension of Wilkinson's Algorithm for Positioning Tick Labels
# on Axes" # by Justin Talbot, Sharon Lin, and Pat Hanrahan, InfoVis 2010.
def _coverage(dmin, dmax, lmin, lmax):
return 1 - 0.5 * ((dmax - lmax) ** 2 +
(dmin - lmin) ** 2) / (0.1 * (dmax - dmin)) ** 2
def _coverage_max(dmin, dmax, span):
range_ = dmax - dmin
if span <= range_:
return 1.
else:
half = (span - range_) / 2.0
return 1 - half ** 2 / (0.1 * range_) ** 2
def _density(k, m, dmin, dmax, lmin, lmax):
r = (k-1.0) / (lmax-lmin)
rt = (m-1.0) / (max(lmax, dmax) - min(lmin, dmin))
return 2 - max(r / rt, rt / r)
def _density_max(k, m):
return 2 - (k-1.0) / (m-1.0) if k >= m else 1.
def _simplicity(q, Q, j, lmin, lmax, lstep):
eps = 1e-10
n = len(Q)
i = Q.index(q) + 1
if ((lmin % lstep) < eps or
(lstep - lmin % lstep) < eps) and lmin <= 0 and lmax >= 0:
v = 1
else:
v = 0
return (n - i) / (n - 1.0) + v - j
def _simplicity_max(q, Q, j):
n = len(Q)
i = Q.index(q) + 1
return (n - i)/(n - 1.0) + 1. - j
def _get_ticks_talbot(dmin, dmax, n_inches, density=1.):
# density * size gives target number of intervals,
# density * size + 1 gives target number of tick marks,
# the density function converts this back to a density in data units
# (not inches)
n_inches = max(n_inches, 2.0) # Set minimum otherwise code can crash :(
m = density * n_inches + 1.0
only_inside = False # we cull values outside ourselves
Q = [1, 5, 2, 2.5, 4, 3]
w = [0.25, 0.2, 0.5, 0.05]
best_score = -2.0
best = None
j = 1.0
n_max = 1000
while j < n_max:
for q in Q:
sm = _simplicity_max(q, Q, j)
if w[0] * sm + w[1] + w[2] + w[3] < best_score:
j = n_max
break
k = 2.0
while k < n_max:
dm = _density_max(k, n_inches)
if w[0] * sm + w[1] + w[2] * dm + w[3] < best_score:
break
delta = (dmax-dmin)/(k+1.0)/j/q
z = np.ceil(np.log10(delta))
while z < float('infinity'):
step = j * q * 10 ** z
cm = _coverage_max(dmin, dmax, step*(k-1.0))
if (w[0] * sm +
w[1] * cm +
w[2] * dm +
w[3] < best_score):
break
min_start = np.floor(dmax/step)*j - (k-1.0)*j
max_start = np.ceil(dmin/step)*j
if min_start > max_start:
z = z+1
break
for start in range(int(min_start), int(max_start)+1):
lmin = start * (step/j)
lmax = lmin + step*(k-1.0)
lstep = step
s = _simplicity(q, Q, j, lmin, lmax, lstep)
c = _coverage(dmin, dmax, lmin, lmax)
d = _density(k, m, dmin, dmax, lmin, lmax)
l = 1. # _legibility(lmin, lmax, lstep)
score = w[0] * s + w[1] * c + w[2] * d + w[3] * l
if (score > best_score and
(not only_inside or (lmin >= dmin and
lmax <= dmax))):
best_score = score
best = (lmin, lmax, lstep, q, k)
z += 1
k += 1
if k == n_max:
raise RuntimeError('could not converge on ticks')
j += 1
if j == n_max:
raise RuntimeError('could not converge on ticks')
if best is None:
raise RuntimeError('could not converge on ticks')
return np.arange(best[4]) * best[2] + best[0]
|
bsd-3-clause
|
sonnyhu/scikit-learn
|
examples/model_selection/plot_learning_curve.py
|
33
|
4505
|
"""
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_iter=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_iter=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
|
bsd-3-clause
|
rolandwz/pymisc
|
ustrader/voters/pool.py
|
2
|
4648
|
# -*- coding: utf-8 -*-
import os, datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.dates import DateFormatter
from matplotlib.widgets import MultiCursor
from utils.rwlogging import log
from utils.rwlogging import strategyLogger as logs
from utils.rwlogging import tradesLogger as logtr
from utils.rwlogging import balLogger as logb
SLOPE_PERIOD, PROFIT_DAYS, FEE = 12, 24, 0.01
SLOPE_GUAGE, STD_GUAGE = 10, 0.15
#SLOPE_GUAGE, STD_GUAGE = 0.01, 0.1 # 15
AREA_NUM = 4
class VoterPool():
def __init__(self, num, prices):
self.num = num
self.voters = [0] * AREA_NUM
for area in range(AREA_NUM):
self.voters[area] = [['', 0, float('inf'), []]]
self.prices = prices
self.directs, self.slopes, self.stds, self.areas = calc_variants(prices)
def estimate(self, vname, ops, front):
l = len(self.directs)
front = max(front, SLOPE_PERIOD)
end = l - PROFIT_DAYS
goods = [0] * AREA_NUM
bads = [0] * AREA_NUM
for i in range(front, end):
if not checkTime(self.prices[i]['dt']): continue
area = self.areas[i]
goods[area] += ops[i] * self.directs[i]
if ops[i] != 0: goods[area] -= 1
#if ops[i] == self.directs[i]:
# goods[area] += 1
#elif self.directs[i] == 0:
# pass
#else:
# bads[area] += 1
for area in range(AREA_NUM):
#logb.info('------' + str(area) + ', ' + str(AREA_NUM))
vts, good, bad = self.voters[area], goods[area], bads[area]
#logb.info('--' + str(area) + ',' + vname + ',' + str(good) + ',' + str(bad))
gd = good - bad
if gd < vts[-1][1] - vts[-1][2]: continue
l = len(vts)
for i in range(l):
vgd = vts[i][1] - vts[i][2]
if gd > vgd:
logb.info(str(area) + ',' + vname + ',' + str(good) + ',' + str(bad))
vts.insert(i, [vname, good, bad, ops])
if l + 1 > self.num:
vts.pop(-1)
break
def showVoters(self):
logs.info('AREA, VOTER, GOOD, BAD')
for area in range(AREA_NUM):
vts = self.voters[area]
l = len(vts)
#print l
for i in range(l):
logs.info(str(area) + ',' + vts[i][0] + ',' + str(vts[i][1]) + ',' + str(vts[i][2]))
self.graph(area, i + 1, vts[i])
def graph(self, area, no, vt):
fname = str(area) + '_' + str(no) + '_' + vt[0]
dts = [p['dt'] for p in self.prices]
ps = [p['close'] for p in self.prices]
l = len(ps)
directs = []
ops = []
for i in range(l):
if self.areas[i] == area:
directs.append(self.directs[i] * 0.02)
if vt[3][i] == 0: ops.append(None)
else: ops.append(vt[3][i])
xs = range(len(directs))
fig = plt.figure()
ax1 = fig.add_subplot(211)
#ax1.xaxis.set_major_formatter(DateFormatter('%m-%d'))
#ax1.plot_date(dts, self.areas, 'y-')
#ax1.plot_date(dts, directs, 'b-')
#ax1.plot_date(dts, ops, 'r-')
ax1.plot(xs, directs, 'b-', xs, ops, 'r.')
ax1.grid()
ax1.set_ylim(-2, 2)
#ax1.plot_date(dts, stds, 'r-')
ax2 = fig.add_subplot(212)
ax2.xaxis.set_major_formatter(DateFormatter('%m-%d'))
ax2.plot_date(dts, ps, 'b-')
plt.savefig(os.path.join(os.path.dirname(__file__), '../result/' + fname + '.png'), dpi=150)
plt.close(fig)
#plt.show()
def calc_variants(prices):
ps = [p['close'] for p in prices]
l = len(prices)
directs = [0] * l
slopes = [0.0] * l
stds = [0.0] * l
areas = [0] * l
for i in range(l):
price = prices[i]
if i + PROFIT_DAYS < l:
p = prices[i]['rmb']
nextp = prices[i + PROFIT_DAYS]['rmb']
directs[i] = nextp - p
#if nextp - p > 2 * FEE:
# directs[i] = 1
#elif nextp - p < -2 * FEE:
# directs[i] = -1
#else:
# directs[i] = 0
if i < SLOPE_PERIOD - 1: continue
slopes[i] = calc_slope(ps[i - SLOPE_PERIOD + 1 : i + 1])
stds[i] = np.std(ps[i - SLOPE_PERIOD + 1 : i + 1], dtype=np.float64, ddof=0)
if slopes[i] >= SLOPE_GUAGE:
areas[i] = 3
elif slopes[i] <= -SLOPE_GUAGE:
areas[i] = 0
elif slopes[i] > -SLOPE_GUAGE and slopes[i] < SLOPE_GUAGE and stds[i] >= STD_GUAGE:
areas[i] = 2
elif slopes[i] > -SLOPE_GUAGE and slopes[i] < SLOPE_GUAGE and stds[i] < STD_GUAGE:
areas[i] = 1
else:
areas[i] = -1
return directs, slopes, stds, areas
def calc_slope(datas):
#print datas
l = len(datas)
xs = np.arange(l)
xsT = np.array([xs, np.ones(l)]).T
m, c = np.linalg.lstsq(xsT, datas)[0]
return m
def checkTime(dt):
td = datetime.timedelta(hours=6)
ndt = dt + td
return True
#print ndt.weekday, ndt.hour
if ndt.weekday() >= 5:
return False
if ndt.hour in [9, 10, 21, 22, 23, ]:
return True
if ndt.hour in [11, 15, ] and ndt.minute < 30:
return True
if ndt.hour in [13, ] and ndt.minute >= 30:
return True
return False
|
mit
|
HirokiNakahara/GUINNESS
|
gen_training_data.py
|
1
|
6214
|
# -----------------------------------------------------------------------
# gen_training_data.py:
# Training File Generator for prepared image files
#
# Creation Date : 04/Aug./2017
# Copyright (C) <2017> Hiroki Nakahara, All rights reserved.
#
# Released under the GPL v2.0 License.
#
# -----------------------------------------------------------------------
from chainer.datasets import tuple_dataset
from PIL import Image
import numpy as np
import glob
import cv2
#import cPickle as pickle # python 2.7
import _pickle as pickle # python 3.5
import matplotlib.pyplot as plt
import argparse
import random
from scipy import ndimage
import sys
parser = argparse.ArgumentParser(description='training dataset generator')
parser.add_argument('--pathfile', '-p', type=str, default='./imglist.txt',
help='Image File List (test file)')
parser.add_argument('--dataset', '-d', type=str, default='./hoge',
help='Pickle object for dataset output file name')
parser.add_argument('--size', '-s', type=int, default=32,
help='dataset size (default 32x32)')
# options for argumentation
parser.add_argument('--rotate', '-r', type=int, default=1,
help='Rotate')
parser.add_argument('--flip', '-f', type=str, default='no',
help='Flip')
parser.add_argument('--crop', '-c', type=int, default=1,
help='Crop')
parser.add_argument('--keepaspect', '-k', type=str, default='no',
help='Keep aspect ratio (default no)')
args = parser.parse_args()
dataset_fname = args.dataset + '_dataset.pkl'
label_fname = args.dataset + '_label.pkl'
tag_fname = args.dataset + '_tag.txt'
print("[INFO] IMAGE PATH FILE %s" % args.pathfile)
print("[INFO] DATASET FILE %s" % dataset_fname)
print("[INFO] LABEL FILE %s" % label_fname)
print("[INFO] TAG FILE %s" % tag_fname)
print("[INFO] DATASET SIZE %dx%d" % (int(args.size),int(args.size)))
print("[INFO] ROTATION %s" % args.rotate)
print("[INFO] FLIPPING %s" % args.flip)
print("[INFO] CROPPING %s" % args.crop)
print("[INFO] KEEP ASPECT RATIO %s" % args.keepaspect)
with open(args.pathfile, mode='r') as f:
lines2 = f.readlines()
pathsAndLabels = []
label_idx = 0
tags = []
for line in lines2:
words = line.split()
tags.append(words[1])
choped_line = words[0].rstrip('\n\r') + '/'
pathsAndLabels.append(np.asarray([choped_line, label_idx]))
print("[INFO] %s* are assigned to %d" % (choped_line, label_idx))
label_idx = label_idx + 1
# fileout tags
f = open(tag_fname, 'w')
for x in tags:
f.write(str(x) + "\n")
f.close()
# set data size
width = args.size
height = args.size
# get image path
allData = []
for pathAndLabel in pathsAndLabels:
path = pathAndLabel[0]
label = pathAndLabel[1]
imagelist = glob.glob(path + "*")
for imgName in imagelist:
allData.append([imgName, label])
allData = np.random.permutation(allData)
# set augmentation options
n_crop = args.crop
n_rotate = args.rotate
if args.flip == 'yes' or args.rotate > 1:
n_flip = 2
else:
n_flip = 1
# register all images, and normalization if needs,,,
imageData = np.zeros((len(allData)*n_crop*n_rotate*n_flip,3,width,height))
labelData = np.zeros(len(allData)*n_crop*n_rotate*n_flip)
idx = 0
for pathAndLabel in allData:
sys.stderr.write('\r\033[K' + "CONVERTING IMAGE %d/%d" % (idx,len(allData)*n_crop*n_rotate*n_flip))
sys.stderr.flush()
org_img = cv2.imread(pathAndLabel[0])
if org_img is None:
print("ERROR %s CANNOT BE OPENED" % pathAndLabel[0])
exit()
for i in range(n_crop):
for k in range(n_flip):
for j in range(n_rotate):
# padding empy pixels to keep aspect ratio
if args.keepaspect == 'yes':
h, w = org_img.shape[:2]
if h > w:
dst_img = np.zeros((h,h,3)).astype(np.uint8) #* 128
d = int((h-w)/2)
dst_img[0:h,d:d+w] = org_img[:,:]
else:
dst_img = np.zeros((w,w,3)).astype(np.uint8) #* 128
d = int((w-h)/2)
dst_img[d:d+h,0:w] = org_img[:,:]
org_img = dst_img
# cropping
if i > 0:
h, w = org_img.shape[:2]
if args.keepaspect == 'no':
h4 = h / 4
w4 = w / 4
left = random.randint(0,w4)
right = random.randint(w-w4,w)
top = random.randint(0,h4)
bottom = random.randint(h - h4,h)
img = org_img[top:bottom,left:right] # y:y+h,x:x+h
else:
rows,cols = org_img.shape[:2]
# resize with cropping
dd = random.randint(0,rows/8)
org_img = org_img[dd:rows-dd,dd:cols-dd]
rows = rows - dd
cols = cols - dd
# sliding
h4 = rows / 4
w4 = cols / 4
dw = random.randint(w4*(-1),w4)
dh = random.randint(h4*(-1),h4)
M = np.float32([[1,0,dw],[0,1,dh]])
img = cv2.warpAffine(org_img,M,(cols,rows))
else:
img = org_img
#flipping (if rotate, then flipping is also applied)
if k == 0:
pass
else:
img = cv2.flip(img, 1)
# rotation
img = ndimage.rotate( img, 2 * j, reshape=False)
# Resize
img = cv2.resize(img,(width,height))
# Transpose for Chainer dataset
reshaped = img.transpose(2, 0, 1) # (Y,X,BGR) -> (BGR,Y,X)
# store temporary memory
imageData[idx] = reshaped #bench
labelData[idx] = np.int32(pathAndLabel[1])
idx = idx + 1
imageData = imageData.astype(np.uint8)
# generate pickle file
threshold = np.int32(len(imageData)/10*9)
image = {}
label = {}
image['train'] = imageData[0:threshold]
image['test'] = imageData[threshold:]
label['train'] = labelData[0:threshold]
label['test'] = labelData[threshold:]
print("[INFO] SAVE %s as an image dataset" % dataset_fname)
with open(dataset_fname, mode='wb') as f:
pickle.dump(image, f)
print("[INFO] SAVE %s as a label dataset" % label_fname)
with open(label_fname, mode='wb') as f:
pickle.dump(label, f)
# -----------------------------------------------------------------------
# END OF PROGRAM
# -----------------------------------------------------------------------
|
gpl-2.0
|
sonnyhu/scikit-learn
|
benchmarks/bench_plot_ward.py
|
117
|
1283
|
"""
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
plt.figure("scikit-learn Ward's method benchmark results")
plt.imshow(np.log(ratio), aspect='auto', origin="lower")
plt.colorbar()
plt.contour(ratio, levels=[1, ], colors='k')
plt.yticks(range(len(n_features)), n_features.astype(np.int))
plt.ylabel('N features')
plt.xticks(range(len(n_samples)), n_samples.astype(np.int))
plt.xlabel('N samples')
plt.title("Scikit's time, in units of scipy time (log)")
plt.show()
|
bsd-3-clause
|
afritzler/keras-examples
|
simple-example.py
|
1
|
2649
|
# Python 2.7 on Jupyter
# Libraries: Keras, pandas, numpy, matplotlib, seaborn
# For compatibility
from __future__ import absolute_import
from __future__ import print_function
# For manipulating data
import pandas as pd
import numpy as np
from keras.utils import np_utils # For y values
# For plotting
%matplotlib inline
import seaborn as sns
# For Keras
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
# Set data
data = np.array([
[0, 0, 0],
[1, 1, 0],
[2, 2, 0],
[3, 3, 0],
[4, 4, 0],
[5, 5, 1],
[6, 6, 1],
[7, 7, 1],
[8, 8, 1],
[9, 9, 1],
])
data = np.vstack((data, data, data, data)) # Just for sufficient input
data = pd.DataFrame(data, columns=['x', 'y', 'class'])
# Split X and y
X = data.iloc[:, :-1].values
y = data.iloc[:, -1:].values
# Get dimensions of input and output
dimof_input = X.shape[1]
dimof_output = len(set(y.flat))
print('dimof_input: ', dimof_input)
print('dimof_output: ', dimof_output)
# Set y categorical
y = np_utils.to_categorical(y, dimof_output)
# Set constants
batch_size = 128
dimof_middle = 100
dropout = 0.2
countof_epoch = 100
verbose = 0
print('batch_size: ', batch_size)
print('dimof_middle: ', dimof_middle)
print('dropout: ', dropout)
print('countof_epoch: ', countof_epoch)
print('verbose: ', verbose)
print()
# Set model
model = Sequential()
model.add(Dense(dimof_input, dimof_middle, init='uniform', activation='tanh'))
model.add(Dropout(dropout))
model.add(Dense(dimof_middle, dimof_middle, init='uniform', activation='tanh'))
model.add(Dropout(dropout))
model.add(Dense(dimof_middle, dimof_output, init='uniform', activation='softmax'))
model.compile(loss='mse', optimizer='sgd')
# Train
model.fit(
X, y,
show_accuracy=True, validation_split=0.2,
batch_size=batch_size, nb_epoch=countof_epoch, verbose=verbose)
# Evaluate
loss, accuracy = model.evaluate(X, y, show_accuracy=True, verbose=verbose)
print('loss: ', loss)
print('accuracy: ', accuracy)
print()
# Predict
# model.predict_classes(X, verbose=verbose)
print('prediction of [1, 1]: ', model.predict_classes(np.array([[1, 1]]), verbose=verbose))
print('prediction of [8, 8]: ', model.predict_classes(np.array([[8, 8]]), verbose=verbose))
# Plot
sns.lmplot('x', 'y', data, 'class', fit_reg=False).set(title='Data')
data_ = data.copy()
data_['class'] = model.predict_classes(X, verbose=0)
sns.lmplot('x', 'y', data_, 'class', fit_reg=False).set(title='Trained Result')
data_['class'] = [ 'Error' if is_error else 'Non Error' for is_error in data['class'] != data_['class']]
sns.lmplot('x', 'y', data_, 'class', fit_reg=False).set(title='Errors')
None
|
mit
|
jjas0nn/solvem
|
tensorflow/lib/python2.7/site-packages/numpy/lib/tests/test_type_check.py
|
25
|
11269
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import long
from numpy.testing import (
TestCase, assert_, assert_equal, assert_array_equal, run_module_suite
)
from numpy.lib.type_check import (
common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close
)
def assert_all(x):
assert_(np.all(x), x)
class TestCommonType(TestCase):
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)
af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)
acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle)
acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble)
assert_(common_type(ai32) == np.float64)
assert_(common_type(af16) == np.float16)
assert_(common_type(af32) == np.float32)
assert_(common_type(af64) == np.float64)
assert_(common_type(acs) == np.csingle)
assert_(common_type(acd) == np.cdouble)
class TestMintypecode(TestCase):
def test_default_1(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype), 'd')
assert_equal(mintypecode('f'), 'f')
assert_equal(mintypecode('d'), 'd')
assert_equal(mintypecode('F'), 'F')
assert_equal(mintypecode('D'), 'D')
def test_default_2(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype+'f'), 'f')
assert_equal(mintypecode(itype+'d'), 'd')
assert_equal(mintypecode(itype+'F'), 'F')
assert_equal(mintypecode(itype+'D'), 'D')
assert_equal(mintypecode('ff'), 'f')
assert_equal(mintypecode('fd'), 'd')
assert_equal(mintypecode('fF'), 'F')
assert_equal(mintypecode('fD'), 'D')
assert_equal(mintypecode('df'), 'd')
assert_equal(mintypecode('dd'), 'd')
#assert_equal(mintypecode('dF',savespace=1),'F')
assert_equal(mintypecode('dF'), 'D')
assert_equal(mintypecode('dD'), 'D')
assert_equal(mintypecode('Ff'), 'F')
#assert_equal(mintypecode('Fd',savespace=1),'F')
assert_equal(mintypecode('Fd'), 'D')
assert_equal(mintypecode('FF'), 'F')
assert_equal(mintypecode('FD'), 'D')
assert_equal(mintypecode('Df'), 'D')
assert_equal(mintypecode('Dd'), 'D')
assert_equal(mintypecode('DF'), 'D')
assert_equal(mintypecode('DD'), 'D')
def test_default_3(self):
assert_equal(mintypecode('fdF'), 'D')
#assert_equal(mintypecode('fdF',savespace=1),'F')
assert_equal(mintypecode('fdD'), 'D')
assert_equal(mintypecode('fFD'), 'D')
assert_equal(mintypecode('dFD'), 'D')
assert_equal(mintypecode('ifd'), 'd')
assert_equal(mintypecode('ifF'), 'F')
assert_equal(mintypecode('ifD'), 'D')
assert_equal(mintypecode('idF'), 'D')
#assert_equal(mintypecode('idF',savespace=1),'F')
assert_equal(mintypecode('idD'), 'D')
class TestIsscalar(TestCase):
def test_basic(self):
assert_(np.isscalar(3))
assert_(not np.isscalar([3]))
assert_(not np.isscalar((3,)))
assert_(np.isscalar(3j))
assert_(np.isscalar(long(10)))
assert_(np.isscalar(4.0))
class TestReal(TestCase):
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(y, np.real(y))
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.real, np.real(y))
class TestImag(TestCase):
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(0, np.imag(y))
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.imag, np.imag(y))
class TestIscomplex(TestCase):
def test_fail(self):
z = np.array([-1, 0, 1])
res = iscomplex(z)
assert_(not np.sometrue(res, axis=0))
def test_pass(self):
z = np.array([-1j, 1, 0])
res = iscomplex(z)
assert_array_equal(res, [1, 0, 0])
class TestIsreal(TestCase):
def test_pass(self):
z = np.array([-1, 0, 1j])
res = isreal(z)
assert_array_equal(res, [1, 1, 0])
def test_fail(self):
z = np.array([-1j, 1, 0])
res = isreal(z)
assert_array_equal(res, [0, 1, 1])
class TestIscomplexobj(TestCase):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(not iscomplexobj(z))
z = np.array([-1j, 0, -1])
assert_(iscomplexobj(z))
def test_scalar(self):
assert_(not iscomplexobj(1.0))
assert_(iscomplexobj(1+0j))
def test_list(self):
assert_(iscomplexobj([3, 1+0j, True]))
assert_(not iscomplexobj([3, 1, True]))
def test_duck(self):
class DummyComplexArray:
@property
def dtype(self):
return np.dtype(complex)
dummy = DummyComplexArray()
assert_(iscomplexobj(dummy))
def test_pandas_duck(self):
# This tests a custom np.dtype duck-typed class, such as used by pandas
# (pandas.core.dtypes)
class PdComplex(np.complex128):
pass
class PdDtype(object):
name = 'category'
names = None
type = PdComplex
kind = 'c'
str = '<c16'
base = np.dtype('complex128')
class DummyPd:
@property
def dtype(self):
return PdDtype
dummy = DummyPd()
assert_(iscomplexobj(dummy))
class TestIsrealobj(TestCase):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(isrealobj(z))
z = np.array([-1j, 0, -1])
assert_(not isrealobj(z))
class TestIsnan(TestCase):
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isnan(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((1.,))/0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((-1.,))/0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array((0.,))/0.) == 1)
def test_integer(self):
assert_all(np.isnan(1) == 0)
def test_complex(self):
assert_all(np.isnan(1+1j) == 0)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array(0+0j)/0.) == 1)
class TestIsfinite(TestCase):
# Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isfinite(z) == 1
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((1.,))/0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((-1.,))/0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((0.,))/0.) == 0)
def test_integer(self):
assert_all(np.isfinite(1) == 1)
def test_complex(self):
assert_all(np.isfinite(1+1j) == 1)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
class TestIsinf(TestCase):
# Fixme, wrong place, isinf now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isinf(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((1.,))/0.) == 1)
def test_posinf_scalar(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array(1.,)/0.) == 1)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((-1.,))/0.) == 1)
def test_neginf_scalar(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array(-1.)/0.) == 1)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((0.,))/0.) == 0)
class TestIsposinf(TestCase):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isposinf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 0)
assert_(vals[1] == 0)
assert_(vals[2] == 1)
class TestIsneginf(TestCase):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isneginf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 1)
assert_(vals[1] == 0)
assert_(vals[2] == 0)
class TestNanToNum(TestCase):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1))/0.)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
vals = nan_to_num([1])
assert_array_equal(vals, np.array([1], np.int))
def test_complex_good(self):
vals = nan_to_num(1+1j)
assert_all(vals == 1+1j)
def test_complex_bad(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(0+1.j)/0.
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(np.isfinite(vals))
def test_complex_bad2(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(-1+1.j)/0.
vals = nan_to_num(v)
assert_all(np.isfinite(vals))
# Fixme
#assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
# !! This is actually (unexpectedly) positive
# !! inf. Comment out for now, and see if it
# !! changes
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
class TestRealIfClose(TestCase):
def test_basic(self):
a = np.random.rand(10)
b = real_if_close(a+1e-15j)
assert_all(isrealobj(b))
assert_array_equal(a, b)
b = real_if_close(a+1e-7j)
assert_all(iscomplexobj(b))
b = real_if_close(a+1e-7j, tol=1e-6)
assert_all(isrealobj(b))
class TestArrayConversion(TestCase):
def test_asfarray(self):
a = asfarray(np.array([1, 2, 3]))
assert_equal(a.__class__, np.ndarray)
assert_(np.issubdtype(a.dtype, np.float))
if __name__ == "__main__":
run_module_suite()
|
mit
|
rvraghav93/scikit-learn
|
sklearn/manifold/t_sne.py
|
8
|
35910
|
# Author: Alexander Fabisch -- <[email protected]>
# Author: Christopher Moody <[email protected]>
# Author: Nick Travers <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..neighbors import BallTree
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..decomposition import PCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..externals.six import string_types
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
m = "All probabilities should be finite"
assert np.all(np.isfinite(conditional_P)), m
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
assert np.all(np.abs(P) <= 1.0)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(skip_num_points, n_samples):
np.dot(np.ravel(PQd[i], order='K'), X_embedded[i] - X_embedded,
out=grad[i])
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples,
n_components):
"""t-SNE objective function: the absolute error of the
KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : array (n_samples, K)
The neighbors is not actually required to calculate the
divergence, but is here to match the signature of the
gradient function
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if len(P.shape) == 2:
P = squareform(P)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
return kl_divergence
def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples,
n_components, angle=0.5, skip_num_points=0,
verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : int64 array, shape (n_samples, K)
Array with element [i, j] giving the index for the jth
closest neighbor to point i.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
neighbors = neighbors.astype(np.int64, copy=False)
if len(P.shape) == 1:
sP = squareform(P).astype(np.float32)
else:
sP = P.astype(np.float32)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
objective_error : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i} (r(i, j) - k)
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Only used if method='exact'
Maximum number of iterations without progress before we abort the
optimization. If method='barnes_hut' this parameter is fixed to
a value of 30 and cannot be changed.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
Only used if method='exact'
If the gradient norm is below this threshold, the optimization will
be aborted. If method='barnes_hut' this parameter is fixed to a value
of 1e-3 and cannot be changed.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> np.set_printoptions(suppress=True)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 0.00017619, 0.00004014],
[ 0.00010268, 0.00020546],
[ 0.00018298, -0.00008335],
[ 0.00009501, -0.00001388]])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
if not ((isinstance(init, string_types) and
init in ["pca", "random"]) or
isinstance(init, np.ndarray)):
msg = "'init' must be 'pca', 'random', or a numpy array"
raise ValueError(msg)
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if not np.all(distances >= 0):
raise ValueError("All distances should be positive, either "
"the metric or precomputed distances given "
"as X are not correct")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
# the number of nearest neighbors to find
k = min(n_samples - 1, int(3. * self.perplexity + 1))
neighbors_nn = None
if self.method == 'barnes_hut':
if self.verbose:
print("[t-SNE] Computing %i nearest neighbors..." % k)
if self.metric == 'precomputed':
# Use the precomputed distances to find
# the k nearest neighbors and their distances
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
else:
# Find the nearest neighbors for every point
bt = BallTree(X)
# LvdM uses 3 * perplexity as the number of neighbors
# And we add one to not count the data point itself
# In the event that we have very small # of points
# set the neighbors to n - 1
distances_nn, neighbors_nn = bt.query(X, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
P = _joint_probabilities_nn(distances, neighbors_nn,
self.perplexity, self.verbose)
else:
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be zero or positive"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use 'n_iter_' instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state,
X_embedded=None, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0,
"learning_rate": self.learning_rate,
"n_iter_without_progress": self.n_iter_without_progress,
"verbose": self.verbose, "n_iter_check": 25,
"kwargs": dict(skip_num_points=skip_num_points)}
if self.method == 'barnes_hut':
m = "Must provide an array of neighbors to use Barnes-Hut"
assert neighbors is not None, m
obj_func = _kl_divergence_bh
objective_error = _kl_divergence_error
sP = squareform(P).astype(np.float32)
neighbors = neighbors.astype(np.int64)
args = [sP, neighbors, degrees_of_freedom, n_samples,
self.n_components]
opt_args['args'] = args
opt_args['min_grad_norm'] = 1e-3
opt_args['n_iter_without_progress'] = 30
# Don't always calculate the cost since that calculation
# can be nearly as expensive as the gradient
opt_args['objective_error'] = objective_error
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
opt_args['args'] = [P, degrees_of_freedom, n_samples,
self.n_components]
opt_args['min_error_diff'] = 0.0
opt_args['min_grad_norm'] = self.min_grad_norm
# Early exaggeration
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
opt_args['n_iter'] = 100
opt_args['momentum'] = 0.8
opt_args['it'] = it + 1
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Save the final number of iterations
self.n_iter_ = it
# Final optimization
P /= self.early_exaggeration
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
bsd-3-clause
|
Sentient07/scikit-learn
|
examples/svm/plot_rbf_parameters.py
|
20
|
8048
|
'''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_splits`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r,
edgecolors='k')
plt.xticks(())
plt.yticks(())
plt.axis('tight')
scores = grid.cv_results_['mean_test_score'].reshape(len(C_range),
len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
|
bsd-3-clause
|
alan-unravel/bokeh
|
bokeh/sampledata/gapminder.py
|
41
|
2655
|
from __future__ import absolute_import
import pandas as pd
from os.path import join
import sys
from . import _data_dir
'''
This module provides a pandas DataFrame instance of four
of the datasets from gapminder.org.
These are read in from csvs that have been downloaded from Bokeh's
sample data on S3. But the original code that generated the csvs from the
raw gapminder data is available at the bottom of this file.
'''
data_dir = _data_dir()
datasets = [
'fertility',
'life_expectancy',
'population',
'regions',
]
for dataset in datasets:
filename = join(data_dir, 'gapminder_%s.csv' % dataset)
try:
setattr(
sys.modules[__name__],
dataset,
pd.read_csv(filename, index_col='Country')
)
except (IOError, OSError):
raise RuntimeError('Could not load gapminder data file "%s". Please execute bokeh.sampledata.download()' % filename)
__all__ = datasets
# ====================================================
# Original data is from Gapminder - www.gapminder.org.
# The google docs links are maintained by gapminder
# The following script was used to get the data from gapminder
# and process it into the csvs stored in bokeh's sampledata.
"""
population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls"
fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls"
life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls"
regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls"
def _get_data(url):
# Get the data from the url and return only 1962 - 2013
df = pd.read_excel(url, index_col=0)
df = df.unstack().unstack()
df = df[(df.index >= 1964) & (df.index <= 2013)]
df = df.unstack().unstack()
return df
fertility_df = _get_data(fertility_url)
life_expectancy_df = _get_data(life_expectancy_url)
population_df = _get_data(population_url)
regions_df = pd.read_excel(regions_url, index_col=0)
# have common countries across all data
fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))
population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))
regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))
fertility_df.to_csv('gapminder_fertility.csv')
population_df.to_csv('gapminder_population.csv')
life_expectancy_df.to_csv('gapminder_life_expectancy.csv')
regions_df.to_csv('gapminder_regions.csv')
"""
# ======================================================
|
bsd-3-clause
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/sklearn/metrics/cluster/unsupervised.py
|
15
|
10182
|
"""Unsupervised evaluation metrics."""
# Authors: Robert Layton <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ...utils import check_X_y
from ...utils.fixes import bincount
from ..pairwise import pairwise_distances
from ...preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
check_number_of_labels(len(le.classes_), X.shape[0])
distances = pairwise_distances(X, metric=metric, **kwds)
unique_labels = le.classes_
n_samples_per_label = bincount(labels, minlength=len(unique_labels))
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf + intra_clust_dists
for curr_label in range(len(unique_labels)):
# Find inter_clust_dist for all samples belonging to the same
# label.
mask = labels == curr_label
current_distances = distances[mask]
# Leave out current sample.
n_samples_curr_lab = n_samples_per_label[curr_label] - 1
if n_samples_curr_lab != 0:
intra_clust_dists[mask] = np.sum(
current_distances[:, mask], axis=1) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in range(len(unique_labels)):
if other_label != curr_label:
other_mask = labels == other_label
other_distances = np.mean(
current_distances[:, other_mask], axis=1)
inter_clust_dists[mask] = np.minimum(
inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# score 0 for clusters of size 1, according to the paper
sil_samples[n_samples_per_label.take(labels) == 1] = 0
return sil_samples
def calinski_harabaz_score(X, labels):
"""Compute the Calinski and Harabaz score.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabaz_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Calinski-Harabaz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0., 0.
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (1. if intra_disp == 0. else
extra_disp * (n_samples - n_labels) /
(intra_disp * (n_labels - 1.)))
|
mit
|
charmie11/ShapeDTW
|
demo_shapedtw.py
|
1
|
1228
|
#!/usr/bin/python
from sklearn.metrics.pairwise import manhattan_distances
from load_data import load_MHAD_Mocap, sample_from_data
from shapedtw import matching_shapedtw
from visualizer import show_correspondences
if __name__ == '__main__':
dir_mocap = './'
id_subject = [1, 2]
id_action = 2
id_repeat = 1
parts = 3
for dim in range(1, 4):
# load MHAD dataset
x_train = load_MHAD_Mocap(dir_mocap,
id_subject[0],
id_action,
id_repeat,
parts=parts)[:, 0:dim]
x_test = load_MHAD_Mocap(dir_mocap,
id_subject[1],
id_action,
id_repeat,
parts=parts)[:, 0:dim]
x_train = sample_from_data(x_train, skip=10)
x_test = sample_from_data(x_test, skip=10)
dist, correspondences = matching_shapedtw(x_train,
x_test,
manhattan_distances)
show_correspondences(x_train, x_test, correspondences)
|
gpl-3.0
|
MartinDelzant/scikit-learn
|
benchmarks/bench_covertype.py
|
120
|
7381
|
"""
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
|
bsd-3-clause
|
dhruv13J/scikit-learn
|
sklearn/datasets/samples_generator.py
|
26
|
56311
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=False,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
|
bsd-3-clause
|
JosmanPS/scikit-learn
|
examples/model_selection/plot_train_error_vs_test_error.py
|
349
|
2577
|
"""
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
|
bsd-3-clause
|
russellgeoff/blog
|
DMPs/dmp_rhythmic.py
|
7
|
5145
|
'''
Copyright (C) 2013 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from dmp import DMPs
import numpy as np
class DMPs_rhythmic(DMPs):
"""An implementation of discrete DMPs"""
def __init__(self, **kwargs):
"""
"""
# call super class constructor
super(DMPs_rhythmic, self).__init__(pattern='rhythmic', **kwargs)
self.gen_centers()
# set variance of Gaussian basis functions
# trial and error to find this spacing
self.h = np.ones(self.bfs) * self.bfs#1.75
self.check_offset()
def gen_centers(self):
"""Set the centre of the Gaussian basis
functions be spaced evenly throughout run time"""
c = np.linspace(0, 2*np.pi, self.bfs+1)
c = c[0:-1]
self.c = c
def gen_front_term(self, x, dmp_num):
"""Generates the front term on the forcing term.
For rhythmic DMPs it's non-diminishing, so this
function is just a placeholder to return 1.
x float: the current value of the canonical system
dmp_num int: the index of the current dmp
"""
if isinstance(x, np.ndarray):
return np.ones(x.shape)
return 1
def gen_goal(self, y_des):
"""Generate the goal for path imitation.
For rhythmic DMPs the goal is the average of the
desired trajectory.
y_des np.array: the desired trajectory to follow
"""
goal = np.zeros(self.dmps)
for n in range(self.dmps):
num_idx = ~np.isnan(y_des[n]) # ignore nan's when calculating goal
goal[n] = .5 * (y_des[n,num_idx].min() + \
y_des[n,num_idx].max())
return goal
def gen_psi(self, x):
"""Generates the activity of the basis functions for a given
canonical system state or path.
x float, array: the canonical system state or path
"""
if isinstance(x, np.ndarray):
x = x[:,None]
return np.exp(self.h * (np.cos(x - self.c) - 1))
def gen_weights(self, f_target):
"""Generate a set of weights over the basis functions such
that the target forcing term trajectory is matched.
f_target np.array: the desired forcing term trajectory
"""
# calculate x and psi
x_track = self.cs.rollout()
psi_track = self.gen_psi(x_track)
#efficiently calculate weights for BFs using weighted linear regression
for d in range(self.dmps):
for b in range(self.bfs):
self.w[d,b] = np.dot(psi_track[:,b], f_target[:,d]) / \
(np.sum(psi_track[:,b]) + 1e-10)
#==============================
# Test code
#==============================
if __name__ == "__main__":
# test normal run
dmp = DMPs_rhythmic(dmps=1, bfs=10, w=np.zeros((1,10)))
y_track,dy_track,ddy_track = dmp.rollout()
import matplotlib.pyplot as plt
plt.figure(1, figsize=(6,3))
plt.plot(np.ones(len(y_track))*dmp.goal, 'r--', lw=2)
plt.plot(y_track, lw=2)
plt.title('DMP system - no forcing term')
plt.xlabel('time (ms)')
plt.ylabel('system trajectory')
plt.legend(['goal', 'system state'], loc='lower right')
plt.tight_layout()
# test imitation of path run
import matplotlib.pyplot as plt
plt.figure(2, figsize=(6,4))
num_bfs = [10, 30, 50, 100, 10000]
# a straight line to target
path1 = np.sin(np.arange(0,2*np.pi,.01)*5)
# a strange path to target
path2 = np.zeros(path1.shape)
path2[(len(path2) / 2.):] = .5
for ii, bfs in enumerate(num_bfs):
dmp = DMPs_rhythmic(dmps=2, bfs=bfs)
dmp.imitate_path(y_des=np.array([path1, path2]))
# change the scale of the movement
#dmp.goal[0] = 3; dmp.goal[1] = 2
y_track,dy_track,ddy_track = dmp.rollout()
plt.figure(2)
plt.subplot(211)
plt.plot(y_track[:,0], lw=2)
plt.subplot(212)
plt.plot(y_track[:,1], lw=2)
plt.subplot(211)
a = plt.plot(path1, 'r--', lw=2)
plt.title('DMP imitate path')
plt.xlabel('time (ms)')
plt.ylabel('system trajectory')
plt.legend([a[0]], ['desired path'], loc='lower right')
plt.subplot(212)
b = plt.plot(path2, 'r--', lw=2)
plt.title('DMP imitate path')
plt.xlabel('time (ms)')
plt.ylabel('system trajectory')
plt.legend(['%i BFs'%i for i in num_bfs], loc='lower right')
plt.tight_layout()
plt.show()
|
gpl-3.0
|
fredhusser/scikit-learn
|
sklearn/metrics/cluster/tests/test_unsupervised.py
|
230
|
2823
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
|
bsd-3-clause
|
ngoix/OCRF
|
sklearn/tests/test_isotonic.py
|
13
|
13122
|
import warnings
import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permutation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [1, 1, 2, 3, 4, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
def test_fast_predict():
# test that the faster prediction change doesn't
# affect out-of-sample predictions:
# https://github.com/scikit-learn/scikit-learn/pull/6206
rng = np.random.RandomState(123)
n_samples = 10 ** 3
# X values over the -10,10 range
X_train = 20.0 * rng.rand(n_samples) - 10
y_train = np.less(
rng.rand(n_samples),
1.0 / (1.0 + np.exp(-X_train))
).astype('int64')
weights = rng.rand(n_samples)
# we also want to test that everything still works when some weights are 0
weights[rng.rand(n_samples) < 0.1] = 0
slow_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
fast_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
# Build interpolation function with ALL input data, not just the
# non-redundant subset. The following 2 lines are taken from the
# .fit() method, without removing unnecessary points
X_train_fit, y_train_fit = slow_model._build_y(X_train, y_train,
sample_weight=weights,
trim_duplicates=False)
slow_model._build_f(X_train_fit, y_train_fit)
# fit with just the necessary data
fast_model.fit(X_train, y_train, sample_weight=weights)
X_test = 20.0 * rng.rand(n_samples) - 10
y_pred_slow = slow_model.predict(X_test)
y_pred_fast = fast_model.predict(X_test)
assert_array_equal(y_pred_slow, y_pred_fast)
|
bsd-3-clause
|
huobaowangxi/scikit-learn
|
examples/manifold/plot_swissroll.py
|
330
|
1446
|
"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
|
bsd-3-clause
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/sklearn/decomposition/tests/test_truncated_svd.py
|
4
|
6076
|
"""Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_almost_equal,
assert_greater, assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
|
apache-2.0
|
mlskit/astromlskit
|
KNN/knnfront.py
|
2
|
7198
|
from PyQt4 import QtCore, QtGui
from kNN import *
from DM import *
import numpy as np
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(249, 398)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setGeometry(QtCore.QRect(20, 10, 221, 61))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.lineEdit = QtGui.QLineEdit(self.groupBox)
self.lineEdit.setGeometry(QtCore.QRect(40, 20, 141, 20))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setGeometry(QtCore.QRect(20, 80, 221, 80))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.label = QtGui.QLabel(self.groupBox_2)
self.label.setGeometry(QtCore.QRect(30, 20, 111, 16))
self.label.setObjectName(_fromUtf8("label"))
self.checkBox = QtGui.QCheckBox(self.groupBox_2)
self.checkBox.setGeometry(QtCore.QRect(30, 50, 171, 17))
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.spinBox = QtGui.QSpinBox(self.groupBox_2)
self.spinBox.setGeometry(QtCore.QRect(150, 20, 42, 22))
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.spinBox.valueChanged.connect(self.setk)
self.groupBox_3 = QtGui.QGroupBox(Form)
self.groupBox_3.setGeometry(QtCore.QRect(20, 170, 221, 111))
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
## self.comboBox = QtGui.QComboBox(self.groupBox_3)
## self.comboBox.setGeometry(QtCore.QRect(30, 20, 161, 22))
## self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.checkBox_2 = QtGui.QCheckBox(self.groupBox_3)
self.checkBox_2.setGeometry(QtCore.QRect(30, 50, 151, 17))
self.checkBox_2.setObjectName(_fromUtf8("checkBox_2"))
self.groupBox_4 = QtGui.QGroupBox(self.groupBox_3)
self.groupBox_4.setGeometry(QtCore.QRect(0, 0, 221, 111))
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.comboBox_2 = QtGui.QComboBox(self.groupBox_4)
self.comboBox_2.setGeometry(QtCore.QRect(30, 20, 161, 22))
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.activated[str].connect(self.getdis)
self.checkBox_3 = QtGui.QCheckBox(self.groupBox_4)
self.checkBox_3.setGeometry(QtCore.QRect(30, 50, 151, 17))
self.checkBox_3.setObjectName(_fromUtf8("checkBox_3"))
self.checkBox_4 = QtGui.QCheckBox(self.groupBox_4)
self.checkBox_4.setGeometry(QtCore.QRect(30, 80, 181, 17))
self.checkBox_4.setObjectName(_fromUtf8("checkBox_4"))
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(50, 300, 161, 23))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton.clicked.connect(self.takeinput)
self.pushButton_2 = QtGui.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(50, 330, 161, 23))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_2.clicked.connect(self.takeoutput)
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(50, 360, 161, 23))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.pushButton_3.clicked.connect(self.startknn)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def startknn(self):
#print len(np.array(self.tr).shape),len(np.array(self.classlabels).shape)
a=train(self.tr,self.classlabels,self.k)
#print self.k
out=open("output.txt","w+")
for i in self.te:
print>>out,i,classify(a,i,distance_fn=self.dm)
# print classify(a,i,distance_fn=self.dm)
print "Done------------"
def takeinput(self):
fname = QtGui.QFileDialog.getOpenFileName(None, 'Open file', 'C:')
print type(fname)
import pandas as pd
df = pd.read_csv(str(fname), sep=",")
x=list(df[list(df)[0]])
y=list(df[list(df)[1]])
self.classlabels=list(df[list(df)[2]])
print self.classlabels
self.tr=(zip(x,y))
def takeoutput(self):
fname = QtGui.QFileDialog.getOpenFileName(None, 'Open file', 'C:')
print type(fname)
import pandas as pd
df = pd.read_csv(str(fname), sep=",")
x=list(df[list(df)[0]])
y=list(df[list(df)[1]])
#print x,y
self.te=(zip(x,y))
#print (self.te)
#print len(np.array(self.te).shape)
def getdis(self,dis="euclidean"):
if dis=="cityblock":
self.dm=cityblock_distance
elif dis=="euclidean":
self.dm=euclidean_distance
else:
self.dm=chebyshev_distance
print self.dm
def setk(self):
self.k=self.spinBox.value()
print self.k
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Learner/Classifier Name", None))
self.lineEdit.setText(_translate("Form", "K Nearest Neighbors", None))
self.groupBox_2.setTitle(_translate("Form", "Neighbors", None))
self.label.setText(_translate("Form", "Number of neighbors", None))
self.checkBox.setText(_translate("Form", "Wieigh by ranks not distances", None))
self.groupBox_3.setTitle(_translate("Form", "Distance", None))
self.checkBox_2.setText(_translate("Form", " Ignore Unknown Values", None))
self.groupBox_4.setTitle(_translate("Form", "Distance", None))
self.comboBox_2.setItemText(0, _translate("Form", "euclidean", None))
self.comboBox_2.setItemText(1, _translate("Form", "chebychev", None))
self.comboBox_2.setItemText(2, _translate("Form", "cityblock", None))
self.checkBox_3.setText(_translate("Form", " Ignore Unknown Values", None))
self.checkBox_4.setText(_translate("Form", " Normalize continous attributes", None))
self.pushButton.setText(_translate("Form", "Input train File", None))
self.pushButton_2.setText(_translate("Form", "Input test File", None))
self.pushButton_3.setText(_translate("Form", "Start", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Form()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
gpl-3.0
|
eranchetz/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/cm.py
|
70
|
5385
|
"""
This module contains the instantiations of color mapping classes
"""
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cbook as cbook
from matplotlib._cm import *
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None
"""
if name is None: name = mpl.rcParams['image.cmap']
if lut is None: lut = mpl.rcParams['image.lut']
assert(name in datad.keys())
return colors.LinearSegmentedColormap(name, datad[name], lut)
class ScalarMappable:
"""
This is a mixin class to support scalar -> RGBA mapping. Handles
normalization and colormapping
"""
def __init__(self, norm=None, cmap=None):
"""
*norm* is an instance of :class:`colors.Normalize` or one of
its subclasses, used to map luminance to 0-1. *cmap* is a
:mod:`cm` colormap instance, for example :data:`cm.jet`
"""
self.callbacksSM = cbook.CallbackRegistry((
'changed',))
if cmap is None: cmap = get_cmap()
if norm is None: norm = colors.Normalize()
self._A = None
self.norm = norm
self.cmap = cmap
self.colorbar = None
self.update_dict = {'array':False}
def set_colorbar(self, im, ax):
'set the colorbar image and axes associated with mappable'
self.colorbar = im, ax
def to_rgba(self, x, alpha=1.0, bytes=False):
'''Return a normalized rgba array corresponding to *x*. If *x*
is already an rgb array, insert *alpha*; if it is already
rgba, return it unchanged. If *bytes* is True, return rgba as
4 uint8s instead of 4 floats.
'''
try:
if x.ndim == 3:
if x.shape[2] == 3:
if x.dtype == np.uint8:
alpha = np.array(alpha*255, np.uint8)
m, n = x.shape[:2]
xx = np.empty(shape=(m,n,4), dtype = x.dtype)
xx[:,:,:3] = x
xx[:,:,3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("third dimension must be 3 or 4")
if bytes and xx.dtype != np.uint8:
xx = (xx * 255).astype(np.uint8)
return xx
except AttributeError:
pass
x = ma.asarray(x)
x = self.norm(x)
x = self.cmap(x, alpha=alpha, bytes=bytes)
return x
def set_array(self, A):
'Set the image array from numpy array *A*'
self._A = A
self.update_dict['array'] = True
def get_array(self):
'Return the array'
return self._A
def get_cmap(self):
'return the colormap'
return self.cmap
def get_clim(self):
'return the min, max of the color limits for image scaling'
return self.norm.vmin, self.norm.vmax
def set_clim(self, vmin=None, vmax=None):
"""
set the norm limits for image scaling; if *vmin* is a length2
sequence, interpret it as ``(vmin, vmax)`` which is used to
support setp
ACCEPTS: a length 2 sequence of floats
"""
if (vmin is not None and vmax is None and
cbook.iterable(vmin) and len(vmin)==2):
vmin, vmax = vmin
if vmin is not None: self.norm.vmin = vmin
if vmax is not None: self.norm.vmax = vmax
self.changed()
def set_cmap(self, cmap):
"""
set the colormap for luminance data
ACCEPTS: a colormap
"""
if cmap is None: cmap = get_cmap()
self.cmap = cmap
self.changed()
def set_norm(self, norm):
'set the normalization instance'
if norm is None: norm = colors.Normalize()
self.norm = norm
self.changed()
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale(self._A)
self.changed()
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale_None(self._A)
self.changed()
def add_checker(self, checker):
"""
Add an entry to a dictionary of boolean flags
that are set to True when the mappable is changed.
"""
self.update_dict[checker] = False
def check_update(self, checker):
"""
If mappable has changed since the last check,
return True; else return False
"""
if self.update_dict[checker]:
self.update_dict[checker] = False
return True
return False
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal
"""
self.callbacksSM.process('changed', self)
for key in self.update_dict:
self.update_dict[key] = True
|
agpl-3.0
|
jmetzen/scikit-learn
|
examples/plot_johnson_lindenstrauss_bound.py
|
127
|
7477
|
r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
|
bsd-3-clause
|
moijes12/oh-mainline
|
vendor/packages/mechanize/test/test_performance.py
|
22
|
2573
|
import os
import time
import sys
import unittest
import mechanize
from mechanize._testcase import TestCase, TempDirMaker
from mechanize._rfc3986 import urljoin
KB = 1024
MB = 1024**2
GB = 1024**3
def time_it(operation):
t = time.time()
operation()
return time.time() - t
def write_data(filename, nr_bytes):
block_size = 4096
block = "01234567" * (block_size // 8)
fh = open(filename, "w")
try:
for i in range(nr_bytes // block_size):
fh.write(block)
finally:
fh.close()
def time_retrieve_local_file(temp_maker, size, retrieve_fn):
temp_dir = temp_maker.make_temp_dir()
filename = os.path.join(temp_dir, "data")
write_data(filename, size)
def operation():
retrieve_fn(urljoin("file://", filename),
os.path.join(temp_dir, "retrieved"))
return time_it(operation)
class PerformanceTests(TestCase):
def test_retrieve_local_file(self):
def retrieve(url, filename):
br = mechanize.Browser()
br.retrieve(url, filename)
size = 100 * MB
# size = 1 * KB
desired_rate = 2*MB # per second
desired_time = size / float(desired_rate)
fudge_factor = 2.
self.assert_less_than(
time_retrieve_local_file(self, size, retrieve),
desired_time * fudge_factor)
def show_plot(rows):
import matplotlib.pyplot
figure = matplotlib.pyplot.figure()
axes = figure.add_subplot(111)
axes.plot([row[0] for row in rows], [row[1] for row in rows])
matplotlib.pyplot.show()
def power_2_range(start, stop):
n = start
while n <= stop:
yield n
n *= 2
def performance_plot():
def retrieve(url, filename):
br = mechanize.Browser()
br.retrieve(url, filename)
# import urllib2
# def retrieve(url, filename):
# urllib2.urlopen(url).read()
# from mechanize import _useragent
# ua = _useragent.UserAgent()
# ua.set_seekable_responses(True)
# ua.set_handle_equiv(False)
# def retrieve(url, filename):
# ua.retrieve(url, filename)
rows = []
for size in power_2_range(256 * KB, 256 * MB):
temp_maker = TempDirMaker()
try:
elapsed = time_retrieve_local_file(temp_maker, size, retrieve)
finally:
temp_maker.tear_down()
rows.append((size//float(MB), elapsed))
show_plot(rows)
if __name__ == "__main__":
args = sys.argv[1:]
if "--plot" in args:
performance_plot()
else:
unittest.main()
|
agpl-3.0
|
bearing/dosenet-analysis
|
Programming Lesson Modules/Module 7- Data Sorting, Searching, and Manipulation.py
|
1
|
4227
|
# -*- coding: utf-8 -*-
"""
#### Module 7- Data Sorting and Searching
Computer scripts excel at performing repetetive tasks that would normally be tedious or uninteresting to do by hand. Therer are many useful jobs that programs can perform, but in this module I will demonstrate three common data-processing techniques: sorting, searching, and manipulating. These jobs are fundamental functions of computer scripts and are encountered in nearly any field of computational data analysis.
For this module I will be using AirMonitor's archived weather data from July 23 2015 to July 23 2016
https://www.wunderground.com/history/airport/KOAK/2015/7/23/CustomHistory.html?dayend=23&monthend=7&yearend=2016&req_city=&req_state=&req_statename=&reqdb.zip=&reqdb.magic=&reqdb.wmo=&format=1
"""
import csv
import io
import urllib.request
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
url = 'https://www.wunderground.com/history/airport/KOAK/2015/7/23/CustomHistory.html?dayend=23&monthend=7&yearend=2016&req_city=&req_state=&req_statename=&reqdb.zip=&reqdb.magic=&reqdb.wmo=&format=1'
response= urllib.request.urlopen(url)
reader = csv.reader(io.TextIOWrapper(response))
datalist = []
timedata = []
meantemp = []
meanwind = []
rain = []
line = 0
for row in reader:
if line != 0:
datalist.append(row) # intermediate step of piling data into one list because url is a comma delimited url.
line += 1
for i in range(len(datalist)):
if i !=0:
timedata.append(datetime.strptime(datalist[i][0], '%Y-%m-%d'))
meantemp.append(float(datalist[i][2]))
meanwind.append(float(datalist[i][17]))
rain.append(datalist[i][19])
data = np.array((timedata,meantemp,meanwind,rain))
# now all the data is gathered in a multidimensional array in which the 1st column has dates, 2nd column has mean temperature, 3rd column has mean wind velocity, and 4th column has precipitation data.
def sort_func(type):
# INPUT: type is a string, either 'temp,'wind', or 'rain' to determine how how the list array is sorted
if type == 'temp':
sorted_index = np.argsort(data[1]) # argsort outputs a sorted list of indices from lowest to highest for the (1+1)nd row
sorted_data = data[:,sorted_index] # which is used to sort the columns in the multi-dimensional array
elif type == 'wind':
sorted_index = np.argsort(data[2]) # outputs a sorted list of indices from lowest to highest for the (1+2)rd row
sorted_data = data[:,sorted_index]
elif type == 'rain':
sorted_index = np.argsort(data[3]) # outputs a sorted list of indices from lowest to highest for the (1+3)th row
sorted_data = data[:,sorted_index]
else:
print('invalid input string')
return sorted_data # module outputs the sorted_data
# Note, this sort function is not entirely correct. When rainfall is detectable but not measurable, Wunderground stores the data as T for trace. Thus, a proper sorting function for rain would be [0, ..., 0, T, ..., T, 0.1, ..., etc.]
def printed_sort():
print(sort_func('temp')[1:,:9])
print(sort_func('wind')[1:,:9])
print(sort_func('rain')[1:,:9])
def search_func():
# Let's make a function that searches for the dates in which rainfall is detected (including trace amounts of rainfall, 'T')
# To do this, we can use a concept called list comprehension:
rainfall = list(data[3:,].flatten())
indices_trace = [i for i, target in enumerate(rainfall) if target == 'T']
# Next, we replace indices where 'T' appears with 0 so it doesn't interfere with next search
for index in indices_trace:
rainfall[index] = 0
rainfall = [float(j) for j in rainfall]
indices_rain = [i for i, target in enumerate(rainfall) if target > 0]
# When we combine the two indices, we place T before the numerical values
search_index = indices_trace + indices_rain
return search_index
def printed_search():
search_index = search_func()
print(data[:,search_index])
|
mit
|
jskDr/jamespy_py3
|
dl/mnist_r0.py
|
1
|
16472
|
# mnist.py
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Input, UpSampling2D
from keras.models import Model
from keras.utils import np_utils
from keras import backend as K
from mgh import recon
from . import kkeras
np.random.seed(1337) # for reproducibility
class CNN():
def __init__(self):
"""
By invoke run(), all code is executed.
"""
(X_train, y_train), (X_test, y_test) = mnist.load_data()
self.Org = (X_train, y_train), (X_test, y_test)
self.Data = self.Org
def run(self, nb_epoch=12):
batch_size = 128
nb_classes = 10
nb_epoch = nb_epoch
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
# the data, shuffled and split between train and test sets
# (X_train, y_train), (X_test, y_test) = mnist.load_data()
(X_train, y_train), (X_test, y_test) = self.Data
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
def holo_transform(Org):
# Transform X_train and X_test using hologram filtering.
(X_train, dump_train), (X_test, dump_test) = Org
print('Performing hologram transformation...')
sim = recon.Simulator(X_train.shape[1:])
X_train_holo = np.array([sim.diffract(x) for x in X_train])
X_test_holo = np.array([sim.diffract(x) for x in X_test])
Data = (X_train_holo, dump_train), (X_test_holo, dump_test)
return Data
def recon_transform(Holo):
"""
One-shot Recon with Hologram Image
"""
(X_train_holo, dump_train), (X_test_holo, dump_test) = Holo
print('Performing first-shot recon...')
sim = recon.Simulator(X_train_holo.shape[1:])
X_train_recon = np.array([np.abs(sim.reconstruct(x))
for x in X_train_holo])
X_test_recon = np.array([np.abs(sim.reconstruct(x))
for x in X_test_holo])
Data = (X_train_recon, dump_train), (X_test_recon, dump_test)
return Data
def update2(x_train, x_test):
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 1, 28, 28))
x_test = np.reshape(x_test, (len(x_test), 1, 28, 28))
return x_train, x_test
class CNN_HOLO(CNN):
def __init__(self):
"""
This CNN includes hologram transformation.
After transformation, CNN is working similarly.
"""
super().__init__()
def _holo_transform_r0(self):
# Transform X_train and X_test using hologram filtering.
(X_train, y_train), (X_test, y_test) = self.Org
print('Performing hologram transformation...')
sim = recon.Simulator(X_train.shape[1:])
X_train_holo = np.array([sim.diffract(x) for x in X_train])
X_test_holo = np.array([sim.diffract(x) for x in X_test])
self.Data = (X_train_holo, y_train), (X_test_holo, y_test)
self.Holo = self.Data
def holo_transform(self):
self.Data = holo_transform(self.Org)
self.Holo = self.Data
def holo_complex_transform(self):
# Transform X_train and X_test using hologram filtering.
(X_train, y_train), (X_test, y_test) = self.Org
print('Performing complex hologram transformation...')
sim = recon.Simulator(X_train.shape[1:])
def holo(X_train):
X_train_holo_abs_l = []
X_train_holo_ang_l = []
for x in X_train:
X_train_h = sim.diffract_full(x)
X_train_holo_abs_l.append(np.abs(X_train_h))
X_train_holo_ang_l.append(np.angle(X_train_h))
X_train_holo = np.zeros(
(X_train.shape[0], 2, X_train.shape[1], X_train.shape[2]))
X_train_holo[:, 0, :, :] = np.array(X_train_holo_abs_l)
X_train_holo[:, 1, :, :] = np.array(X_train_holo_ang_l)
return X_train_holo
X_train_holo = holo(X_train)
X_test_holo = holo(X_test)
self.Data = (X_train_holo, y_train), (X_test_holo, y_test)
self.Holo_complex = self.Data
self.complex_flag = True
def _recon_transform_r0(self):
if not hasattr(self, 'Holo'):
self.holo_transform()
(X_train_holo, y_train), (X_test_holo, y_test) = self.Holo
print('Performing first-shot recon...')
sim = recon.Simulator(X_train_holo.shape[1:])
X_train_recon = np.array([np.abs(sim.reconstruct(x))
for x in X_train_holo])
X_test_recon = np.array([np.abs(sim.reconstruct(x))
for x in X_test_holo])
self.Data = (X_train_recon, y_train), (X_test_recon, y_test)
self.Recon = self.Data
def recon_transform(self):
"""
self.recon_transform is performed using recon_transform()
"""
if not hasattr(self, 'Holo'):
self.holo_transform()
self.Data = recon_transform(self.Holo)
self.Recon = self.Data
def run(self, nb_epoch=12):
if hasattr(self, 'complex_flag') and self.complex_flag:
print('Classification for complex input data...')
self.run_complex(nb_epoch=nb_epoch)
else:
print('Classificaiton for real input data...')
super().run(nb_epoch=nb_epoch)
def run_complex(self, nb_epoch=12, kernel_size_1=None):
batch_size = 128
nb_classes = 10
nb_epoch = nb_epoch
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
if kernel_size_1 is None:
kernel_size_1 = kernel_size
# the data, shuffled and split between train and test sets
# (X_train, y_train), (X_test, y_test) = mnist.load_data()
(X_train, y_train), (X_test, y_test) = self.Data
# number of input data sets - abs and angle
nb_rgb = X_train.shape[1]
if K.image_dim_ordering() == 'th':
input_shape = (nb_rgb, img_rows, img_cols)
else:
raise ValueError('Only th ordering is support yet for RGB data')
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size_1[0], kernel_size_1[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
class AE:
def __init__(self):
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Modify input and output data to be appropritate for AE
self.Org = (X_train, X_train), (X_test, X_test)
self.Data = self.Org
def modeling(self):
input_img = Input(shape=(1, 28, 28))
# set-1
x = Convolution2D(16, 3, 3, activation='relu',
border_mode='same')(input_img) # 16,28,28
x = MaxPooling2D((2, 2), border_mode='same')(x) # 16,14,14
x = Dropout(0.25)(x) # Use dropout after maxpolling
# set-2
x = Convolution2D(8, 3, 3, activation='relu',
border_mode='same')(x) # 8,14,14
x = MaxPooling2D((2, 2), border_mode='same')(x) # 8,7,7
x = Dropout(0.25)(x) # Use dropout after maxpolling
# set-3
x = Convolution2D(8, 3, 3, activation='relu',
border_mode='same')(x) # 8,7,7
encoded = x
x = Convolution2D(8, 3, 3, activation='relu',
border_mode='same')(encoded) # 8,7,7
# x = Dropout(0.25)(x) # Use dropout after maxpolling
x = UpSampling2D((2, 2))(x) # 8,14,14
x = Convolution2D(8, 3, 3, activation='relu',
border_mode='same')(x) # 8,14,14
# x = Dropout(0.25)(x) # Use dropout after maxpolling
x = UpSampling2D((2, 2))(x) # 8, 28, 28
x = Convolution2D(16, 3, 3, activation='relu',
border_mode='same')(x) # 16, 28, 28
# x = Dropout(0.25)(x) # Use dropout after maxpolling
decoded = Convolution2D(
1, 3, 3, activation='sigmoid', border_mode='same')(x) # 1, 28, 28
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
self.autoencoder = autoencoder
def run(self, nb_epoch=100):
(x_train_in, x_train), (x_test_in, x_test) = self.Data
x_train_in, x_test_in = update2(x_train_in, x_test_in)
x_train, x_test = update2(x_train, x_test)
self.modeling()
autoencoder = self.autoencoder
history = autoencoder.fit(x_train_in, x_train,
nb_epoch=nb_epoch,
batch_size=128,
shuffle=True,
verbose=1,
validation_data=(x_test, x_test))
kkeras.plot_loss(history)
self.imshow()
#def imshow(self, x_test, x_test_in):
def imshow(self):
(_, _), (x_test_in, x_test) = self.Data
x_test_in, x_test = update2(x_test_in, x_test)
autoencoder = self.autoencoder
decoded_imgs = autoencoder.predict(x_test_in)
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + n + 1)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
class _AE_HOLO_r0(AE):
def __init__(self):
"""
Hologram transformation is performed
"""
super().__init__()
def holo_transform(self):
(x_train, _), (x_test, _) = self.Org
(x_train_in, _), (x_test_in, _) = holo_transform(self.Org)
self.Data = (x_train_in, x_train), (x_test_in, x_test)
self.Holo = self.Data
def imshow(self):
(_, _), (x_test_in, x_test) = self.Data
x_test_in, x_test = update2(x_test_in, x_test)
autoencoder = self.autoencoder
decoded_imgs = autoencoder.predict(x_test_in)
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(3, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, n + i + 1)
plt.imshow(x_test_in[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(3, n, n * 2 + i + 1)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
class AE_HOLO(AE):
def __init__(self):
"""
Hologram transformation is performed
"""
super().__init__()
(x_train, _), (x_test, _) = self.Org
x_train_in, x_test_in = x_train, x_test
self.Org = (x_train_in, x_train), (x_test_in, x_test)
def holo_transform(self):
CNN_HOLO.holo_transform(self)
def recon_transform(self):
CNN_HOLO.recon_transform(self)
def imshow(self):
(_, _), (x_test_in, x_test) = self.Data
x_test_in, x_test = update2(x_test_in, x_test)
autoencoder = self.autoencoder
decoded_imgs = autoencoder.predict(x_test_in)
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(3, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, n + i + 1)
plt.imshow(x_test_in[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(3, n, n * 2 + i + 1)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
|
mit
|
Lab603/PicEncyclopedias
|
jni-build/jni-build/jni/include/tensorflow/examples/skflow/iris_run_config.py
|
5
|
2035
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
mit
|
buddyd16/Structural-Engineering
|
Wood/e_test.py
|
1
|
1178
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 26 18:39:51 2019
@author: DonB
"""
from __future__ import division
import wood_classes as wood
import matplotlib.pyplot as plt
wall = wood.wood_stud_wall(1.5,5.5,10,12,"No. 2",875,150,1150,1400000,510000,565,19,90,0,0, [1,1,1,1,1,1], 0, 48, 0, 0)
max_e = (5.5/6.0)
step_e = max_e/10.0
ecc = [0+(i*step_e) for i in range(11)]
ecc.extend([1,1.5,2,2.5,3,3.5,4])
fig, ax1 = plt.subplots()
fig.set_size_inches(17, 11)
ax1.minorticks_on()
ax1.grid(b=True, which='major', color='k', linestyle='-', alpha=0.3)
ax1.grid(b=True, which='minor', color='g', linestyle='-', alpha=0.1)
ax2 = ax1.twinx()
for x in ecc:
w,p,d = wall.wall_pm_diagram_cd_stud(1,x)
if x == max_e:
ax1.plot(w,p, color='r')
ax2.plot(w,d, color='r')
else:
ax1.plot(w,p, color='k')
ax2.plot(w,d, color='k', alpha=0.4)
ax1.set_ylabel('Axial (lbs)')
ax1.set_xlabel('Moment (in-lbs)')
ax2.set_ylabel('Mid Height Deflection (in)')
plt.title('2x6 SPF No.2 - 10 ft tall - 12" spacing - variable ecc')
fig.tight_layout()
plt.savefig('wall_e_test.jpg', dpi=100)
plt.show()
|
bsd-3-clause
|
malthejorgensen/31c3-bottle-topple-supercut
|
detector.py
|
1
|
5951
|
import subprocess
import argparse
import re
import av
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='Detect the sound of toppled over Club Mate bottles at 31c3 talks.')
parser.add_argument('file', help='Ogg/Opus audio file to search for sound of toppled over bottles.')
parser.add_argument('-b', '--begin', help='Start playback or analysis at specified time, e.g. 17m9s or 41s or 12392424t ("t" is libav\'s AV_TIME_BASE).')
parser.add_argument('-d', '--duration', default='1s', help='Limit playback or analysis to specified duration, e.g. 300ms or 20s')
parser.add_argument('-p', '--play', action='store_true', default=False, help='Play the specified audio file')
parser.add_argument('-n', '--no-analyze', action='store_true', default=False, help='Don\'t do spectrum (Fourier) analysis')
parser.add_argument('-v', '--verbose', action='count', default=0, help='Verboseness, e.g. -v: a little debug output -vvv: A LOT of debug output.')
args = parser.parse_args()
# CONSTANTS
# http://ffmpeg-users.933282.n4.nabble.com/Duration-format-td935367.html
AV_TIME_BASE = av.time_base
def parse_time(time_str):
m = re.match(r'^((?P<minutes>\d+)m)?((?P<seconds>\d+)s)?((?P<milliseconds>\d+)ms)?((?P<ticks>\d+)t)?$', time_str)
if m is None or time_str == '':
raise Exception('Could not parse time: Must be of the form [MINUTESm][SECONDSs][MILLISECONDSms][TICKSt], e.g. "30m11s", "28s", "12392424t" or "4m300ms90090t".')
minutes, seconds, milliseconds, ticks = 0, 0, 0, 0
if m.group('ticks') is not None:
ticks = int(m.group('ticks'))
if m.group('milliseconds') is not None:
milliseconds = int(m.group('milliseconds'))
if m.group('seconds') is not None:
seconds = int(m.group('seconds'))
if m.group('minutes') is not None:
minutes = int(m.group('minutes'))
return (minutes, seconds, milliseconds, ticks)
# OPEN FILE
container = av.open(args.file)
audio_stream = [s for s in container.streams if s.type == 'audio'][0]
sample_rate = audio_stream.rate
seek_to_secs = None
if args.begin is not None:
try:
minutes, seconds, milliseconds, ticks = parse_time(args.begin)
print( parse_time(args.begin) )
except Exception as e:
print('Could not parse --begin argument: %s' % e)
exit(1)
seek_to_secs = 0
seek_to_secs += float(ticks) / AV_TIME_BASE
seek_to_secs += float(milliseconds) / 1000
seek_to_secs += seconds
seek_to_secs += 60 * minutes
seek_to_ts = int(seek_to_secs * AV_TIME_BASE)
if args.verbose >= 1:
print('Seeking to:', seek_to_ts)
container.seek(seek_to_ts)
# audio_stream.seek(seek_to_ts)
if args.duration is not None:
try:
minutes, seconds, milliseconds, ticks = parse_time(args.duration)
except Exception as e:
print('Could not parse --duration argument: %s' % e)
exit(1)
duration = 0
duration += float(ticks) / AV_TIME_BASE
duration += float(milliseconds) / 1000
duration += seconds
duration += 60 * minutes
# `audio_stream.rate` is 48000 -- 48kHz sampling rate
sample_length = audio_stream.rate * duration
sample = np.zeros(sample_length, dtype='float32')
i = 0
# sample2_length = 48000 * 1 # 1 second worth of samples
# sample2 = np.zeros(sample2_length, dtype='float32')
# j = 0
if args.play:
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=1,
rate=48000,
output=True)
stream.start_stream()
for packet in container.demux(audio_stream):
if i >= sample.shape[0]:
break
# if j >= sample2.shape[0]:
# break
for frame in packet.decode():
if args.verbose >= 3:
print('dts:', frame.dts)
print('pts:', frame.pts)
print('time:', frame.time)
print('time_base:', frame.time_base)
if seek_to_secs is not None and frame.time < seek_to_secs:
if args.verbose >= 3:
print('Skipping %.3f < %.3f' % (frame.time, seek_to_secs))
continue
if args.play:
stream.write(frame.planes[0].to_bytes())
# '<f4': 4-byte floats little endian
frame_sample = np.fromstring(frame.planes[0].to_bytes(), dtype='<f4')
if i != sample_length:
if i + frame_sample.shape[0] > sample_length:
sample[i:sample_length] = frame_sample[:i-sample_length]
i = sample_length
break
else:
sample[i:i+frame_sample.shape[0]] = frame_sample
i += frame_sample.shape[0]
# else:
# if j + frame_sample.shape[0] > sample2_length:
# sample[j:sample2_length] = frame_sample[:j-sample2_length]
# j = sample2_length
# break
# else:
# sample2[j:j+frame_sample.shape[0]] = frame_sample
# j += frame_sample.shape[0]
if not args.no_analyze:
# Plot sound wave
# plot(np.indices(sample.shape)[0]/48000, sample)
# xlabel('time [s]')
# # ylabel('amplitude [?]')
# show()
# Plot spectogram
spectrogram = plt.specgram(sample)
plt.title('Spectrogram')
plt.show()
exit()
# Plot frequencies (Fourier transform)
fourier = np.fft.rfft(sample)
freq = np.fft.rfftfreq(sample.shape[0], d=1./sample_rate)
# fourier2 = np.fft.rfft(sample2)
fig, ax = plt.subplots()
# ax.plot(freq, fourier2, color="blue", alpha=0.5) # half-transparant blue
ax.plot(freq, fourier, color="red", alpha=0.5) # half-transparant red
# plot([freq, freq], [fourier, fourier2])
ax.set_xlabel('frequency [Hz]')
ax.set_ylabel('amplitude [?]')
plt.show()
if args.play:
# stop PyAudio output stream and close PyAudio
stream.stop_stream()
stream.close()
p.terminate()
|
isc
|
iagapov/ocelot
|
mint/orbit_gui.py
|
2
|
7006
|
#from __future__ import unicode_literals
import os
import sys
from PyQt4 import QtGui, QtCore
from numpy import arange, sin, pi
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import ocelot.mint.swig.dcs as dcs
#from tune_common import get_sase, blm_names, init_blms, get_alarms
from ocelot.mint.flash1_interface import FLASH1MachineInterface
progname = os.path.basename(sys.argv[0])
bpm_names = ['BPM1TCOL',
'BPM6TCOL',
'BPM8TCOL',
'BPM3ECOL',
'BPM5ECOL',
'BPM2ORS',
'BPM7ORS',
'BPM9ORS',
'BPM12ORS',
'BPM1SFUND2',
'BPM1SFUND3',
'BPM1SFUND4',
'BPM1SFELC',
'BPM1SMATCH',
'BPM6SMATCH',
'BPM13SMATCH',
'BPM14SMATCH',
'BPM2UND1',
'BPM4UND1',
'BPM5UND1',
'BPM2UND2',
'BPM4UND2',
'BPM5UND2',
'BPM2UND3',
'BPM4UND3',
'BPM5UND3',
'BPM2UND4',
'BPM4UND4',
'BPM5UND4',
'BPM2UND5',
'BPM4UND5',
'BPM5UND5',
'BPM2UND6',
'BPM4UND6',
'BPM5UND6']
for i in xrange(len(bpm_names)):
bpm_names[i] = bpm_names[i].replace('BPM','')
'''
blm_names = ['14L.SMATCH',
'14R.SMATCH',
'1L.UND1',
'1R.UND1',
'1L.UND2',
'1R.UND2',
'1L.UND3',
'1R.UND3',
'1L.UND4',
'1R.UND4',
'1L.UND5',
'1R.UND5',
'1L.UND6',
'1R.UND6',
'1SFUND1','1SFUND2','1SFUND3','1SFUND4',
'1SFELC','3SFELC','4SFELC',
'10SMATCH','3SDUMP']
#blm_names = ['1L.UND1']
blm_names = ['3.1FL2SASE3','3.2FL2SASE3']
'''
bpms = []
for bpm_name in bpm_names:
bpm = dcs.BPM("TTF2.DIAG/BPM/" + bpm_name)
bpms.append(bpm)
mi = FLASH1MachineInterface()
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
# We want the axes cleared every time plot() is called
self.axes.hold(False)
self.compute_initial_figure()
#
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
class MyStaticMplCanvas(MyMplCanvas):
"""Simple canvas with a sine plot."""
def compute_initial_figure(self):
t = arange(0.0, 3.0, 0.01)
s = sin(2*pi*t)
self.axes.plot(t, s)
class XYCanvas(MyMplCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.update_figure)
timer.start(1000)
def compute_initial_figure(self):
self.axes.bar(range(14), range(14), width=0.2, color='b')
self.axes.set_ylim((-4,4))
def update_figure(self):
for bpm in bpms:
dcs.get_bpm_val(bpm)
print ('bpm read:', bpm.id, bpm.x, bpm.y)
x = [bpm.x for bpm in bpms]
y = [bpm.y for bpm in bpms]
z = [bpm.z_pos for bpm in bpms]
self.axes.bar(z, x, width=0.2, color='b', alpha=0.5)
self.axes.hold(True)
self.axes.bar(z, y, width=0.2, color='g', alpha=0.5)
self.axes.hold(False)
self.axes.set_ylim((-4,4))
self.axes.set_title('Orbit')
self.draw()
class BLMCanvas(MyMplCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
self.plane = 'x'
timer = QtCore.QTimer(self)
timer.timeout.connect(self.update_figure)
timer.start(1000)
def compute_initial_figure(self):
self.axes.bar(range(14), range(14), width=0.2, color='b')
#self.axes.set_ylim((-4,4))
def update_figure(self):
y = mi.get_alarms()
z = range(0, len(y))
self.axes.bar(z, y, width=0.2, color='b')
self.axes.set_title('Beam loss (threshold perc.)')
self.draw()
class SASECanvas(MyMplCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
self.data = []
timer = QtCore.QTimer(self)
timer.timeout.connect(self.update_figure)
timer.start(100)
def compute_initial_figure(self):
self.axes.plot(range(14), range(14), color='b')
#self.axes.set_ylim((-4,4))
def update_figure(self):
val = mi.get_sase(detector = 'gmd_default')
self.data.append(val)
self.axes.plot(xrange(len(self.data)), self.data, color='b')
self.axes.set_title('SASE')
self.draw()
class ApplicationWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("application main window")
self.file_menu = QtGui.QMenu('&File', self)
self.file_menu.addAction('&Quit', self.fileQuit,
QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
self.file_menu.addAction('&Save', self.fileSave,
QtCore.Qt.CTRL + QtCore.Qt.Key_S)
self.menuBar().addMenu(self.file_menu)
self.help_menu = QtGui.QMenu('&Help', self)
self.menuBar().addSeparator()
self.menuBar().addMenu(self.help_menu)
self.main_widget = QtGui.QWidget(self)
l = QtGui.QVBoxLayout(self.main_widget)
dc1 = XYCanvas(self.main_widget, width=5, height=4, dpi=100)
l.addWidget(dc1)
dc2 = BLMCanvas(self.main_widget, width=5, height=4, dpi=100)
l.addWidget(dc2)
dc3 = SASECanvas(self.main_widget, width=5, height=4, dpi=100)
l.addWidget(dc3)
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
def fileQuit(self):
self.close()
def fileSave(self):
print ('saving...')
def closeEvent(self, ce):
self.fileQuit()
qApp = QtGui.QApplication(sys.argv)
aw = ApplicationWindow()
aw.setWindowTitle("%s" % progname)
aw.show()
sys.exit(qApp.exec_())
#qApp.exec_()
|
gpl-3.0
|
fulmicoton/pylearn2
|
pylearn2/scripts/datasets/browse_small_norb.py
|
44
|
6901
|
#!/usr/bin/env python
import sys
import argparse
import pickle
import warnings
import exceptions
import numpy
try:
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
pyplot = None
from pylearn2.datasets import norb
warnings.warn("This script is deprecated. Please use ./browse_norb.py "
"instead. It is kept around as a tester for deprecated class "
"datasets.norb.SmallNORB",
exceptions.DeprecationWarning)
def main():
def parse_args():
parser = argparse.ArgumentParser(
description="Browser for SmallNORB dataset.")
parser.add_argument('--which_set',
default='train',
help="'train', 'test', or the path to a .pkl file")
parser.add_argument('--zca',
default=None,
help=("if --which_set points to a .pkl "
"file storing a ZCA-preprocessed "
"NORB dataset, you can optionally "
"enter the preprocessor's .pkl "
"file path here to undo the "
"ZCA'ing for visualization "
"purposes."))
return parser.parse_args()
def get_data(args):
if args.which_set in ('train', 'test'):
dataset = norb.SmallNORB(args.which_set, True)
else:
with open(args.which_set) as norb_file:
dataset = pickle.load(norb_file)
if len(dataset.y.shape) < 2 or dataset.y.shape[1] == 1:
print("This viewer does not support NORB datasets that "
"only have classification labels.")
sys.exit(1)
if args.zca is not None:
with open(args.zca) as zca_file:
zca = pickle.load(zca_file)
dataset.X = zca.inverse(dataset.X)
num_examples = dataset.X.shape[0]
topo_shape = ((num_examples, ) +
tuple(dataset.view_converter.shape))
assert topo_shape[-1] == 1
topo_shape = topo_shape[:-1]
values = dataset.X.reshape(topo_shape)
labels = numpy.array(dataset.y, 'int')
return values, labels, dataset.which_set
args = parse_args()
values, labels, which_set = get_data(args)
# For programming convenience, internally remap the instance labels to be
# 0-4, and the azimuth labels to be 0-17. The user will still only see the
# original, unmodified label values.
instance_index = norb.SmallNORB.label_type_to_index['instance']
def remap_instances(which_set, labels):
if which_set == 'train':
new_to_old_instance = [4, 6, 7, 8, 9]
elif which_set == 'test':
new_to_old_instance = [0, 1, 2, 3, 5]
num_instances = len(new_to_old_instance)
old_to_new_instance = numpy.ndarray(10, 'int')
old_to_new_instance.fill(-1)
old_to_new_instance[new_to_old_instance] = numpy.arange(num_instances)
instance_slice = numpy.index_exp[:, instance_index]
old_instances = labels[instance_slice]
new_instances = old_to_new_instance[old_instances]
labels[instance_slice] = new_instances
azimuth_index = norb.SmallNORB.label_type_to_index['azimuth']
azimuth_slice = numpy.index_exp[:, azimuth_index]
labels[azimuth_slice] = labels[azimuth_slice] / 2
return new_to_old_instance
new_to_old_instance = remap_instances(which_set, labels)
def get_new_azimuth_degrees(scalar_label):
return 20 * scalar_label
# Maps a label vector to the corresponding index in <values>
num_labels_by_type = numpy.array(norb.SmallNORB.num_labels_by_type, 'int')
num_labels_by_type[instance_index] = len(new_to_old_instance)
label_to_index = numpy.ndarray(num_labels_by_type, 'int')
label_to_index.fill(-1)
for i, label in enumerate(labels):
label_to_index[tuple(label)] = i
assert not numpy.any(label_to_index == -1) # all elements have been set
figure, axes = pyplot.subplots(1, 2, squeeze=True)
figure.canvas.set_window_title('Small NORB dataset (%sing set)' %
which_set)
# shift subplots down to make more room for the text
figure.subplots_adjust(bottom=0.05)
num_label_types = len(norb.SmallNORB.num_labels_by_type)
current_labels = numpy.zeros(num_label_types, 'int')
current_label_type = [0, ]
label_text = figure.suptitle("title text",
x=0.1,
horizontalalignment="left")
def redraw(redraw_text, redraw_images):
if redraw_text:
cl = current_labels
lines = [
'category: %s' % norb.SmallNORB.get_category(cl[0]),
'instance: %d' % new_to_old_instance[cl[1]],
'elevation: %d' % norb.SmallNORB.get_elevation_degrees(cl[2]),
'azimuth: %d' % get_new_azimuth_degrees(cl[3]),
'lighting: %d' % cl[4]]
lt = current_label_type[0]
lines[lt] = '==> ' + lines[lt]
text = ('Up/down arrows choose label, left/right arrows change it'
'\n\n' +
'\n'.join(lines))
label_text.set_text(text)
if redraw_images:
index = label_to_index[tuple(current_labels)]
image_pair = values[index, :, :, :]
for i in range(2):
axes[i].imshow(image_pair[i, :, :], cmap='gray')
figure.canvas.draw()
def on_key_press(event):
def add_mod(arg, step, size):
return (arg + size + step) % size
def incr_label_type(step):
current_label_type[0] = add_mod(current_label_type[0],
step,
num_label_types)
def incr_label(step):
lt = current_label_type[0]
num_labels = num_labels_by_type[lt]
current_labels[lt] = add_mod(current_labels[lt], step, num_labels)
if event.key == 'up':
incr_label_type(-1)
redraw(True, False)
elif event.key == 'down':
incr_label_type(1)
redraw(True, False)
elif event.key == 'left':
incr_label(-1)
redraw(True, True)
elif event.key == 'right':
incr_label(1)
redraw(True, True)
elif event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
redraw(True, True)
pyplot.show()
if __name__ == '__main__':
main()
|
bsd-3-clause
|
zfrenchee/pandas
|
pandas/tests/indexes/timedeltas/test_astype.py
|
1
|
3308
|
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (TimedeltaIndex, timedelta_range, Int64Index, Float64Index,
Index, Timedelta)
from ..datetimelike import DatetimeLike
class TestTimedeltaIndex(DatetimeLike):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def test_numeric_compat(self):
# Dummy method to override super's version; this test is now done
# in test_arithmetic.py
pass
def setup_method(self, method):
self.indices = dict(index=tm.makeTimedeltaIndex(10))
self.setup_indices()
def create_index(self):
return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [pd.NaT] * 3,
dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index(str(x) for x in idx)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
@pytest.mark.parametrize('dtype', [
float, 'datetime64', 'datetime64[ns]'])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
msg = 'Cannot cast TimedeltaIndex to dtype'
with tm.assert_raises_regex(TypeError, msg):
idx.astype(dtype)
def test_pickle_compat_construction(self):
pass
def test_shift(self):
# test shift for TimedeltaIndex
# err8083
drange = self.create_index()
result = drange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
|
bsd-3-clause
|
kmather73/ggplot
|
ggplot/stats/stat_summary.py
|
12
|
5323
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six import string_types
import numpy as np
import scipy.stats
import pandas as pd
from ggplot.utils import make_iterable_ntimes
from .stat import stat
def bootstrap_statistics(series, statistic, n_samples=1000, confidence_interval=0.95):
"""
Default parameters taken from
R's Hmisc smean.cl.boot
"""
alpha = 1 - confidence_interval
inds = np.random.randint(0, len(series), size=(n_samples, len(series)))
samples = series.values[inds]
means = np.sort(statistic(samples, axis=1))
return pd.Series({'ymin': means[int((alpha/2)*n_samples)],
'ymax': means[int((1-alpha/2)*n_samples)],
'y': statistic(series)})
def mean_cl_boot(series, n_samples=1000, confidence_interval=0.95):
return bootstrap_statistics(series, np.mean, n_samples=n_samples, confidence_interval=confidence_interval)
def mean_cl_normal(series, confidence_interval=0.95):
"""
Adapted from http://stackoverflow.com/a/15034143
"""
a = np.asarray(series)
m = np.mean(a)
se = scipy.stats.sem(a)
h = se * scipy.stats.t._ppf((1+confidence_interval)/2, len(a)-1)
return pd.Series({'y': m,
'ymin': m-h,
'ymax': m+h})
def mean_sdl(series, mult=2):
m = series.mean()
s = series.std()
return pd.Series({'y': m,
'ymin': m-mult*s,
'ymax': m+mult*s})
def median_hilow(series, confidence_interval=0.95):
tail = (1 - confidence_interval) / 2
return pd.Series({'y': np.median(series),
'ymin': np.percentile(series, 100 * tail),
'ymax': np.percentile(series, 100 * (1 - tail))})
def mean_se(series, mult=1):
m = np.mean(series)
se = mult * np.sqrt(np.var(series) / len(series))
return pd.Series({'y': m,
'ymin': m-se,
'ymax': m+se})
function_dict = {'mean_cl_boot': mean_cl_boot,
'mean_cl_normal': mean_cl_normal,
'mean_sdl': mean_sdl,
'median_hilow': median_hilow,
'mean_se': mean_se}
def combined_fun_data(series, fun_y, fun_ymin, fun_ymax):
d = {}
if fun_y:
d['y'] = fun_y(series)
if fun_ymin:
d['ymin'] = fun_ymin(series)
if fun_ymax:
d['ymax'] = fun_ymax(series)
return pd.Series(d)
class stat_summary(stat):
"""
Calculate summary statistics depending on x, usually by
calculating three values ymin, y and ymax for each value of x.
Parameters
----------
fun_data : string or function
One of `"mean_cl_boot"`, `"mean_cl_normal"`, `"mean_sdl"`, `"median_hilow"` or
any function that takes a pandas series and returns a series with three
rows indexed as `y`, `ymin` and `ymax`. Defaults to `"mean_cl_boot"`.
fun_y, fun_ymin, fun_ymax : function
Any function that takes a pandas series and returns a value
Notes
-----
If any of `fun_y`, `fun_ymin` or `fun_ymax` are provided, the value of
`fun_data` will be ignored.
As R's syntax `fun.data = some_function` is not valid in python, here
`fun_data = somefunction` is used for now.
Examples
--------
General usage:
.. plot::
:include-source:
from ggplot import *
ggplot(aes(x='cut', y='carat'), data=diamonds) \\
+ stat_summary(fun_data = 'mean_cl_boot')
Provide own function:
.. plot::
:include-source:
import numpy as np
from ggplot import *
def median_quantile(series):
return pd.Series({'y': np.median(series),
'ymin': np.percentile(series, 5),
'ymax': np.percentile(series, 95)})
ggplot(aes(x='cut', y='carat'), data=diamonds) \\
+ stat_summary(fun_data = median_quantile)
Provide different funtions for y, ymin and ymax:
.. plot:
:include-source:
import numpy as np
from ggplot import *
ggplot(aes(x='cut', y='carat'), data=diamonds) \\
+ stat_summary(fun_y = np.median, fun_ymin=np.min, fun_ymax=np.max)
"""
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'geom': 'pointrange', 'position': 'identity', 'fun_data': 'mean_cl_boot',
'fun_y': None, 'fun_ymin': None, 'fun_ymax': None}
CREATES = {'ymin', 'ymax'}
def _calculate(self, data):
if self.params['fun_y'] or self.params['fun_ymin'] or self.params['fun_ymax']:
fun_data = lambda s: combined_fun_data(s, self.params['fun_y'], self.params['fun_ymin'], self.params['fun_ymax'])
elif isinstance(self.params['fun_data'], string_types):
fun_data = function_dict[self.params['fun_data']]
else:
fun_data = self.params['fun_data']
new_data = data.groupby('x').apply(lambda df: fun_data(df['y'])).reset_index()
data.pop('x')
data.pop('y')
# Copy the other aesthetics into the new dataframe
n = len(new_data.x)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
|
bsd-2-clause
|
yl565/statsmodels
|
statsmodels/sandbox/tsa/examples/ex_mle_arma.py
|
33
|
4587
|
# -*- coding: utf-8 -*-
"""
TODO: broken because of changes to arguments and import paths
fixing this needs a closer look
Created on Thu Feb 11 23:41:53 2010
Author: josef-pktd
copyright: Simplified BSD see license.txt
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import numdifftools as ndt
import statsmodels.api as sm
from statsmodels.sandbox import tsa
from statsmodels.tsa.arma_mle import Arma # local import
from statsmodels.tsa.arima_process import arma_generate_sample
examples = ['arma']
if 'arma' in examples:
print("\nExample 1")
print('----------')
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arma_generate_sample(ar,ma,1000,0.1)
y1 -= y1.mean() #no mean correction/constant in estimation so far
arma1 = Arma(y1)
arma1.nar = 1
arma1.nma = 1
arma1res = arma1.fit_mle(order=(1,1), method='fmin')
print(arma1res.params)
#Warning need new instance otherwise results carry over
arma2 = Arma(y1)
arma2.nar = 1
arma2.nma = 1
res2 = arma2.fit(method='bfgs')
print(res2.params)
print(res2.model.hessian(res2.params))
print(ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params))
arest = tsa.arima.ARIMA(y1)
resls = arest.fit((1,0,1))
print(resls[0])
print(resls[1])
print('\nparameter estimate - comparing methods')
print('---------------------------------------')
print('parameter of DGP ar(1), ma(1), sigma_error')
print([-0.8, 0.5, 0.1])
print('mle with fmin')
print(arma1res.params)
print('mle with bfgs')
print(res2.params)
print('cond. least squares uses optim.leastsq ?')
errls = arest.error_estimate
print(resls[0], np.sqrt(np.dot(errls,errls)/errls.shape[0]))
err = arma1.geterrors(res2.params)
print('cond least squares parameter cov')
#print(np.dot(err,err)/err.shape[0] * resls[1])
#errls = arest.error_estimate
print(np.dot(errls,errls)/errls.shape[0] * resls[1])
# print('fmin hessian')
# print(arma1res.model.optimresults['Hopt'][:2,:2])
print('bfgs hessian')
print(res2.model.optimresults['Hopt'][:2,:2])
print('numdifftools inverse hessian')
print(-np.linalg.inv(ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params))[:2,:2])
print('\nFitting Arma(1,1) to squared data')
arma3 = Arma(y1**2)
res3 = arma3.fit(method='bfgs')
print(res3.params)
print('\nFitting Arma(3,3) to data from DGP Arma(1,1)')
arma4 = Arma(y1)
arma4.nar = 3
arma4.nma = 3
#res4 = arma4.fit(method='bfgs')
res4 = arma4.fit(start_params=[-0.5, -0.1,-0.1,0.2,0.1,0.1,0.5])
print(res4.params)
print('numdifftools inverse hessian')
pcov = -np.linalg.inv(ndt.Hessian(arma4.loglike, stepMax=1e-2)(res4.params))
#print(pcov)
print('standard error of parameter estimate from Hessian')
pstd = np.sqrt(np.diag(pcov))
print(pstd)
print('t-values')
print(res4.params/pstd)
print('eigenvalues of pcov:')
print(np.linalg.eigh(pcov)[0])
print('sometimes they are negative')
print("\nExample 2 - DGP is Arma(3,3)")
print('-----------------------------')
ar = [1.0, -0.6, -0.2, -0.1]
ma = [1.0, 0.5, 0.1, 0.1]
y2 = arest.generate_sample(ar,ma,1000,0.1)
y2 -= y2.mean() #no mean correction/constant in estimation so far
print('\nFitting Arma(3,3) to data from DGP Arma(3,3)')
arma4 = Arma(y2)
arma4.nar = 3
arma4.nma = 3
#res4 = arma4.fit(method='bfgs')
print('\ntrue parameters')
print('ar', ar[1:])
print('ma', ma[1:])
res4 = arma4.fit(start_params=[-0.5, -0.1,-0.1,0.2,0.1,0.1,0.5])
print(res4.params)
print('numdifftools inverse hessian')
pcov = -np.linalg.inv(ndt.Hessian(arma4.loglike, stepMax=1e-2)(res4.params))
#print(pcov)
print('standard error of parameter estimate from Hessian')
pstd = np.sqrt(np.diag(pcov))
print(pstd)
print('t-values')
print(res4.params/pstd)
print('eigenvalues of pcov:')
print(np.linalg.eigh(pcov)[0])
print('sometimes they are negative')
arma6 = Arma(y2)
arma6.nar = 3
arma6.nma = 3
res6 = arma6.fit(start_params=[-0.5, -0.1,-0.1,0.2,0.1,0.1,0.5],
method='bfgs')
print('\nmle with bfgs')
print(res6.params)
print('pstd with bfgs hessian')
hopt = res6.model.optimresults['Hopt']
print(np.sqrt(np.diag(hopt)))
#fmin estimates for coefficients in ARMA(3,3) look good
#but not inverse Hessian, sometimes negative values for variance
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.