|
from numpy.core.fromnumeric import size |
|
from base_explainer import BaseExplainer |
|
import tensorflow.keras as keras |
|
import tensorflow as tf |
|
import numpy as np |
|
from PIL import Image |
|
import matplotlib.cm as cm |
|
from lime import lime_image |
|
from skimage.segmentation import mark_boundaries |
|
from util import get_img_array |
|
import cv2 |
|
|
|
|
|
class LIMEExplainer(BaseExplainer): |
|
explainer = lime_image.LimeImageExplainer() |
|
|
|
|
|
def get_explanation(self, img, model, img_size, props, preprocess_input = None, index=None): |
|
|
|
img_array = self.__transform_img_fn([img], img_size, preprocess_input) |
|
|
|
explanation = self.explainer.explain_instance(img_array[0].astype('double'), model.predict, top_labels=6, hide_color=0, num_samples=500, random_seed=101) |
|
|
|
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, num_features=10, hide_rest=False) |
|
|
|
explation_img = np.uint8(mark_boundaries(props["undo_changes"](temp), mask) * 255) |
|
|
|
|
|
explation_img = cv2.cvtColor(explation_img, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
return keras.preprocessing.image.array_to_img(explation_img) |
|
|
|
def __transform_img_fn(self, path_list, size, preprocess_input): |
|
out = [] |
|
for img_path in path_list: |
|
img = keras.preprocessing.image.load_img(img_path, target_size=size) |
|
x = keras.preprocessing.image.img_to_array(img) |
|
x = np.expand_dims(x, axis=0) |
|
x = preprocess_input(x) |
|
out.append(x) |
|
return np.vstack(out) |