File size: 1,799 Bytes
e6b3e35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from numpy.core.fromnumeric import size
from base_explainer import BaseExplainer
import tensorflow.keras as keras
import tensorflow as tf
import numpy as np
from PIL import Image
import matplotlib.cm as cm
from lime import lime_image
from skimage.segmentation import mark_boundaries
from util import get_img_array
import cv2


class LIMEExplainer(BaseExplainer):
    explainer = lime_image.LimeImageExplainer()

    #implementacao do metodo abstrato
    def get_explanation(self, img, model, img_size, props, preprocess_input = None, index=None):
        #transforma a imagem em array
        img_array = self.__transform_img_fn([img], img_size, preprocess_input)
        #cria a explicação
        explanation = self.explainer.explain_instance(img_array[0].astype('double'), model.predict, top_labels=6, hide_color=0, num_samples=500, random_seed=101)
        #obtem a imagem e a máscara
        temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, num_features=10, hide_rest=False)
        #utiliza o mark boundaries para obter a imagem pronta para ser exibida
        explation_img = np.uint8(mark_boundaries(props["undo_changes"](temp), mask) * 255)
        
        #convert to rgb
        explation_img = cv2.cvtColor(explation_img, cv2.COLOR_BGR2RGB)

        #retorna a explicação 
        return keras.preprocessing.image.array_to_img(explation_img)

    def __transform_img_fn(self, path_list, size, preprocess_input):
        out = []
        for img_path in path_list:
            img = keras.preprocessing.image.load_img(img_path, target_size=size)
            x = keras.preprocessing.image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            out.append(x)
        return np.vstack(out)