File size: 4,459 Bytes
1865436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# coding:utf-8

import numpy as np


def direct_sigmoid(x):
    """Apply the sigmoid operation.
    """
    y = 1./(1.+1./np.exp(x))
    dy = y*(1-y)
    return y


def inverse_sigmoid(x):
    """Apply the inverse sigmoid operation.
            y = -ln(1-x/x)
    """
    y = -1 * np.log((1-x)/x)
    return y


def transform(X, components_, explained_variance_, mean_=None, whiten=False):
    """Apply dimensionality reduction to X.
    X is projected on the first principal components previously extracted
    from a training set.
    Parameters
    ----------
    X: array-like, shape (n_samples, n_features)
        New data, where n_samples is the number of samples
        and n_features is the number of features.
    components_: array-like, shape (n_components, n_features)
    mean_: array-like, shape (n_features,)
    explained_variance_: array-like, shape (n_components,)
                        Variance explained by each of the selected components.
    whiten : bool, optional
        When True (False by default) the ``components_`` vectors are divided
        by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
        with unit component-wise variances.
        Whitening will remove some information from the transformed signal
        (the relative variance scales of the components) but can sometimes
        improve the predictive accuracy of the downstream estimators by
        making data respect some hard-wired assumptions.
    Returns
    -------
    X_new : array-like, shape (n_samples, n_components)
    """

    if mean_ is not None:
        X = X - mean_
    X_transformed = np.dot(X, components_.T)
    if whiten:
        X_transformed /= np.sqrt(explained_variance_)
    return X_transformed


def inverse_transform(X, components_, explained_variance_, mean_=None, whiten=False):
    """Transform data back to its original space.
    In other words, return an input X_original whose transform would be X.
    Parameters
    ----------
    X : array-like, shape (n_samples, n_components)
        New data, where n_samples is the number of samples
        and n_components is the number of components.
    components_: array-like, shape (n_components, n_features)
    mean_: array-like, shape (n_features,)
    explained_variance_: array-like, shape (n_components,)
                        Variance explained by each of the selected components.
    whiten : bool, optional
        When True (False by default) the ``components_`` vectors are divided
        by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
        with unit component-wise variances.
        Whitening will remove some information from the transformed signal
        (the relative variance scales of the components) but can sometimes
        improve the predictive accuracy of the downstream estimators by
        making data respect some hard-wired assumptions.

    Returns
    -------
    X_original array-like, shape (n_samples, n_features)
    """
    if whiten:
        X_transformed = np.dot(X, np.sqrt(explained_variance_[:, np.newaxis]) * components_)
    else:
        X_transformed = np.dot(X, components_)

    if mean_ is not None:
        X_transformed = X_transformed + mean_

    return X_transformed


class IOUMetric(object):
    """
    Class to calculate mean-iou using fast_hist method
    """

    def __init__(self, num_classes):
        self.num_classes = num_classes
        self.hist = np.zeros((num_classes, num_classes))

    def _fast_hist(self, label_pred, label_true):
        mask = (label_true >= 0) & (label_true < self.num_classes)
        hist = np.bincount(
            self.num_classes * label_true[mask].astype(int) +
            label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
        return hist

    def add_batch(self, predictions, gts):
        for lp, lt in zip(predictions, gts):
            self.hist += self._fast_hist(lp.flatten(), lt.flatten())

    def evaluate(self):
        acc = np.diag(self.hist).sum() / self.hist.sum()
        acc_cls = np.diag(self.hist) / self.hist.sum(axis=1)
        acc_cls = np.nanmean(acc_cls)
        iu = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
        mean_iu = np.nanmean(iu)
        freq = self.hist.sum(axis=1) / self.hist.sum()
        fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
        return acc, acc_cls, iu, mean_iu, fwavacc