Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
Created on Sat Dec 3 18:31:26 2022
|
4 |
+
|
5 |
+
@author: gabri
|
6 |
+
"""
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
import tensorflow as tf
|
10 |
+
import gradio as gr
|
11 |
+
from huggingface_hub import from_pretrained_keras
|
12 |
+
import cv2
|
13 |
+
import requests
|
14 |
+
from PIL import Image
|
15 |
+
import matplotlib.cm as cm
|
16 |
+
# import matplotlib.pyplot as plt
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
models_links = {
|
21 |
+
'xception':r'https://huggingface.co/gabri14el/grapevine_classification/resolve/main/experimentos/classificacao/Experimento%205/pesos.h5',
|
22 |
+
'resnet':r'https://huggingface.co/gabri14el/grapevine_classification/resolve/main/experimentos/classificacao/Experimento%209/pesos.h5',
|
23 |
+
'efficientnet':'https://huggingface.co/gabri14el/grapevine_classification/resolve/main/experimentos/classificacao/Experimento%2010/pesos.h5'}
|
24 |
+
|
25 |
+
model_weights = {
|
26 |
+
}
|
27 |
+
|
28 |
+
model_last_convolutional_layer = {
|
29 |
+
'xception':'block14_sepconv2_act',
|
30 |
+
'resnet':'conv5_block3_3_conv',
|
31 |
+
'efficientnet':'top_conv'}
|
32 |
+
|
33 |
+
classes = ['C贸dega', 'Moscatel Galego', 'Rabigato', 'Tinta Roriz', 'Tinto Cao', 'Touriga Nacional']
|
34 |
+
# functions for inference
|
35 |
+
target_size_dimension = 300
|
36 |
+
|
37 |
+
def define_model(model):
|
38 |
+
weights = get_weights(model)
|
39 |
+
if model == 'efficientnet':
|
40 |
+
preprocessing_function=tf.keras.applications.efficientnet.preprocess_input
|
41 |
+
model = tf.keras.applications.EfficientNetB3(
|
42 |
+
include_top=False,
|
43 |
+
input_shape= (target_size_dimension, target_size_dimension, 3),
|
44 |
+
weights='imagenet',
|
45 |
+
pooling='avg'
|
46 |
+
)
|
47 |
+
elif model == 'resnet':
|
48 |
+
preprocessing_function=tf.keras.applications.resnet_v2.preprocess_input
|
49 |
+
model = tf.keras.applications.resnet_v2.ResNet101V2(
|
50 |
+
include_top=False,
|
51 |
+
input_shape= (target_size_dimension, target_size_dimension, 3),
|
52 |
+
weights='imagenet',
|
53 |
+
pooling='avg'
|
54 |
+
)
|
55 |
+
else:
|
56 |
+
preprocessing_function=tf.keras.applications.xception.preprocess_input
|
57 |
+
model = tf.keras.applications.Xception(
|
58 |
+
include_top=False,
|
59 |
+
input_shape= (target_size_dimension, target_size_dimension, 3),
|
60 |
+
weights='imagenet',
|
61 |
+
pooling='avg'
|
62 |
+
)
|
63 |
+
|
64 |
+
x = tf.keras.layers.Dense(512, activation='relu')(model.output)
|
65 |
+
x = tf.keras.layers.Dropout(0.25)(x)
|
66 |
+
x = tf.keras.layers.Dense(512, activation='relu')(x)
|
67 |
+
x = tf.keras.layers.Dropout(0.25)(x)
|
68 |
+
output = tf.keras.layers.Dense(6, activation='softmax')(x)
|
69 |
+
nmodel = tf.keras.models.Model(model.input, output)
|
70 |
+
nmodel.load_weights(weights)
|
71 |
+
return preprocessing_function, nmodel
|
72 |
+
|
73 |
+
def get_weights(model):
|
74 |
+
if not model in model_weights:
|
75 |
+
r = requests.get(models_links[model], allow_redirects=True)
|
76 |
+
open(model+'.h5', 'wb').write(r.content)
|
77 |
+
model_weights[model] = model+'.h5'
|
78 |
+
return model_weights[model]
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
|
83 |
+
def get_img_array(img_path, size, expand=True):
|
84 |
+
# `img` is a PIL image of size 299x299
|
85 |
+
img = tf.keras.preprocessing.image.load_img(img_path, target_size=size)
|
86 |
+
# `array` is a float32 Numpy array of shape (299, 299, 3)
|
87 |
+
array = tf.keras.preprocessing.image.img_to_array(img)
|
88 |
+
# We add a dimension to transform our array into a "batch"
|
89 |
+
# of size (1, 299, 299, 3)
|
90 |
+
|
91 |
+
if expand:
|
92 |
+
array = np.expand_dims(array, axis=0)
|
93 |
+
return array
|
94 |
+
|
95 |
+
|
96 |
+
def make_gradcam_heatmap(img_array, grad_model, last_conv_layer_name, pred_index=None, tresh=0.1):
|
97 |
+
# First, we create a model that maps the input image to the activations
|
98 |
+
# of the last conv layer as well as the output predictions
|
99 |
+
#grad_model = tf.keras.models.Model(
|
100 |
+
#[model.inputs], [model.get_layer(last_conv_layer_name).output, model.output]
|
101 |
+
#)
|
102 |
+
|
103 |
+
# Then, we compute the gradient of the top predicted class for our input image
|
104 |
+
# with respect to the activations of the last conv layer
|
105 |
+
|
106 |
+
with tf.GradientTape() as tape:
|
107 |
+
last_conv_layer_output, preds = grad_model(img_array)
|
108 |
+
if pred_index is None:
|
109 |
+
pred_index = tf.argmax(preds[0])
|
110 |
+
class_channel = preds[:, pred_index]
|
111 |
+
|
112 |
+
# This is the gradient of the output neuron (top predicted or chosen)
|
113 |
+
# with regard to the output feature map of the last conv layer
|
114 |
+
grads = tape.gradient(class_channel, last_conv_layer_output)
|
115 |
+
|
116 |
+
# This is a vector where each entry is the mean intensity of the gradient
|
117 |
+
# over a specific feature map channel
|
118 |
+
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
|
119 |
+
|
120 |
+
# We multiply each channel in the feature map array
|
121 |
+
# by "how important this channel is" with regard to the top predicted class
|
122 |
+
# then sum all the channels to obtain the heatmap class activation
|
123 |
+
last_conv_layer_output = last_conv_layer_output[0]
|
124 |
+
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
|
125 |
+
heatmap = tf.squeeze(heatmap)
|
126 |
+
|
127 |
+
# For visualization purpose, we will also normalize the heatmap between 0 & 1
|
128 |
+
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
|
129 |
+
heatmap = heatmap.numpy()
|
130 |
+
return heatmap
|
131 |
+
|
132 |
+
def save_and_display_gradcam(img, heatmap, cam_path="cam.jpg", alpha=0.4):
|
133 |
+
|
134 |
+
# Rescale heatmap to a range 0-255
|
135 |
+
heatmap = np.uint8(255 * heatmap)
|
136 |
+
im = Image.fromarray(heatmap)
|
137 |
+
im = im.resize((img.shape[1], img.shape[0]))
|
138 |
+
|
139 |
+
im = np.asarray(im)
|
140 |
+
im = np.where(im > 0, 1, im)
|
141 |
+
|
142 |
+
# Use jet colormap to colorize heatmap
|
143 |
+
jet = cm.get_cmap("jet")
|
144 |
+
|
145 |
+
# Use RGB values of the colormap
|
146 |
+
jet_colors = jet(np.arange(256))[:, :3]
|
147 |
+
jet_heatmap = jet_colors[heatmap]
|
148 |
+
|
149 |
+
|
150 |
+
|
151 |
+
# Create an image with RGB colorized heatmap
|
152 |
+
jet_heatmap = tf.keras.preprocessing.image.array_to_img(jet_heatmap)
|
153 |
+
jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0]))
|
154 |
+
jet_heatmap = tf.keras.preprocessing.image.img_to_array(jet_heatmap)
|
155 |
+
|
156 |
+
# Superimpose the heatmap on original image
|
157 |
+
superimposed_img = jet_heatmap * alpha + img
|
158 |
+
superimposed_img = tf.keras.preprocessing.image.array_to_img(superimposed_img)
|
159 |
+
|
160 |
+
# Save the superimposed image
|
161 |
+
#superimposed_img.save(cam_path)
|
162 |
+
|
163 |
+
# Display Grad CAM
|
164 |
+
#display(Image(cam_path))
|
165 |
+
return superimposed_img, im
|
166 |
+
|
167 |
+
|
168 |
+
def infer(model_name, input_image):
|
169 |
+
print('#$$$$$$$$$$$$$$$$$$$$$$$$$ IN INFER $$$$$$$$$$$$$$$$$$$$$$$')
|
170 |
+
print(model_name, type(input_image))
|
171 |
+
preprocess, model = define_model(model_name)
|
172 |
+
|
173 |
+
#img = get_img_array(input_image, (target_size_dimension, target_size_dimension))
|
174 |
+
img_processed = preprocess(np.expand_dims(input_image, axis=0))
|
175 |
+
|
176 |
+
predictions = model.predict(img_processed)
|
177 |
+
predictions = np.squeeze(predictions)
|
178 |
+
|
179 |
+
|
180 |
+
result = {}
|
181 |
+
|
182 |
+
|
183 |
+
for i in range(len(classes)):
|
184 |
+
result[classes[i]] = float(predictions[i])
|
185 |
+
#predictions = np.argmax(predictions) # , axis=2
|
186 |
+
#predicted_label = classes[predictions.item()]
|
187 |
+
print(input_image.shape)
|
188 |
+
model.layers[-1].activation = None
|
189 |
+
grad_model = tf.keras.models.Model([model.inputs], [model.get_layer(model_last_convolutional_layer[model_name]).output, model.output])
|
190 |
+
|
191 |
+
print(result)
|
192 |
+
heatmap = make_gradcam_heatmap(img_processed, grad_model,model_last_convolutional_layer[model_name])
|
193 |
+
heat, mask = save_and_display_gradcam(input_image, heatmap)
|
194 |
+
|
195 |
+
return result, heat
|
196 |
+
|
197 |
+
gr.outputs.Image()
|
198 |
+
# get the inputs
|
199 |
+
css = css = ".output-image, .input-image, .image-preview {height: 300px !important}"
|
200 |
+
inputs = [gr.Radio(["resnet", "efficientnet", "xception"], label='Choose a model'), gr.inputs.Image(shape=(target_size_dimension, target_size_dimension), label='Select an image')]
|
201 |
+
# the app outputs two segmented images
|
202 |
+
output = [gr.outputs.Label(label="Result"), gr.outputs.Image(type="numpy", label="Heatmap (Grad-CAM)")]
|
203 |
+
# it's good practice to pass examples, description and a title to guide users
|
204 |
+
examples = [["./content/examples/Frog.jpg"], ["./content/examples/Truck.jpg"]]
|
205 |
+
title = "Grapevine image classification"
|
206 |
+
description = "Upload an image to classify it. The allowed classes are - C贸dega, Moscatel Galego, Rabigato, Tinta Roriz, Tinto Cao, Touriga Nacional <p><b>Space author: Gabriel Carneiro</b> <br><b> [email protected] </b> </p>"
|
207 |
+
|
208 |
+
gr_interface = gr.Interface(infer, inputs, output, allow_flagging=False, analytics_enabled=False, css=css, title=title, description=description).launch(enable_queue=True, debug=False)
|
209 |
+
#gr_interface.launch()
|