|
""" |
|
Adapted from https://github.com/openai/guided-diffusion/blob/22e0df8183507e13a7813f8d38d51b072ca1e67c/evaluations/evaluator.py |
|
""" |
|
|
|
|
|
import warnings |
|
|
|
import numpy as np |
|
from scipy import linalg |
|
|
|
|
|
class InvalidFIDException(Exception): |
|
pass |
|
|
|
|
|
class FIDStatistics: |
|
def __init__(self, mu: np.ndarray, sigma: np.ndarray): |
|
self.mu = mu |
|
self.sigma = sigma |
|
|
|
def frechet_distance(self, other, eps=1e-6): |
|
""" |
|
Compute the Frechet distance between two sets of statistics. |
|
""" |
|
|
|
mu1, sigma1 = self.mu, self.sigma |
|
mu2, sigma2 = other.mu, other.sigma |
|
|
|
mu1 = np.atleast_1d(mu1) |
|
mu2 = np.atleast_1d(mu2) |
|
|
|
sigma1 = np.atleast_2d(sigma1) |
|
sigma2 = np.atleast_2d(sigma2) |
|
|
|
assert ( |
|
mu1.shape == mu2.shape |
|
), f"Training and test mean vectors have different lengths: {mu1.shape}, {mu2.shape}" |
|
assert ( |
|
sigma1.shape == sigma2.shape |
|
), f"Training and test covariances have different dimensions: {sigma1.shape}, {sigma2.shape}" |
|
|
|
diff = mu1 - mu2 |
|
|
|
|
|
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) |
|
if not np.isfinite(covmean).all(): |
|
msg = ( |
|
"fid calculation produces singular product; adding %s to diagonal of cov estimates" |
|
% eps |
|
) |
|
warnings.warn(msg) |
|
offset = np.eye(sigma1.shape[0]) * eps |
|
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) |
|
|
|
|
|
if np.iscomplexobj(covmean): |
|
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): |
|
m = np.max(np.abs(covmean.imag)) |
|
raise ValueError("Imaginary component {}".format(m)) |
|
covmean = covmean.real |
|
|
|
tr_covmean = np.trace(covmean) |
|
|
|
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean |
|
|
|
|
|
def compute_statistics(feats: np.ndarray) -> FIDStatistics: |
|
mu = np.mean(feats, axis=0) |
|
sigma = np.cov(feats, rowvar=False) |
|
return FIDStatistics(mu, sigma) |
|
|
|
|
|
def compute_inception_score(preds: np.ndarray, split_size: int = 5000) -> float: |
|
|
|
scores = [] |
|
for i in range(0, len(preds), split_size): |
|
part = preds[i : i + split_size] |
|
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) |
|
kl = np.mean(np.sum(kl, 1)) |
|
scores.append(np.exp(kl)) |
|
return float(np.mean(scores)) |
|
|