DHEIVER's picture
Update app.py
2d206f4
raw
history blame
4.32 kB
import gradio as gr
import tensorflow as tf
import numpy as np
from PIL import Image
import cv2
import datetime
from tensorflow.keras import backend as K
# Define the custom FixedDropout layer
class FixedDropout(tf.keras.layers.Layer):
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(FixedDropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape # Include the noise_shape argument
self.seed = seed # Include the seed argument
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
if training:
return K.in_train_phase(K.dropout(inputs, self.rate, noise_shape=self.noise_shape, seed=self.seed), inputs, training=training)
else:
return inputs
def get_config(self):
config = super(FixedDropout, self).get_config()
config['rate'] = self.rate # Serialize the rate argument
config['noise_shape'] = self.noise_shape # Serialize the noise_shape argument
config['seed'] = self.seed # Serialize the seed argument
return config
class ImageClassifierCataract:
def __init__(self, model_path):
self.model_path = model_path
self.model = self.load_model()
self.class_labels = ["Normal", "Cataract"]
def load_model(self):
# Load the trained TensorFlow model
with tf.keras.utils.custom_object_scope({'FixedDropout': FixedDropout}):
model = tf.keras.models.load_model(self.model_path)
return model
def classify_image(self, input_image):
input_image = tf.image.resize(input_image, (192, 256))
input_image = (input_image / 255.0)
input_image = np.expand_dims(input_image, axis=0)
current_time = datetime.datetime.now()
prediction = self.model.predict(input_image)
class_index = np.argmax(prediction)
predicted_class = self.class_labels[class_index]
output_image = (input_image[0] * 255).astype('uint8')
output_image = cv2.copyMakeBorder(output_image, 0, 50, 0, 0, cv2.BORDER_CONSTANT, value=(255, 255, 255))
label_background = np.ones((50, output_image.shape[1], 3), dtype=np.uint8) * 255
output_image[-50:] = label_background
image_height, image_width, _ = output_image.shape
box_size = 100
box_x = (image_width - box_size) // 2
box_y = (image_height - box_size) // 2
object_box_color = (255, 0, 0)
cv2.rectangle(output_image, (box_x, box_y), (box_x + box_size, box_y + box_size), object_box_color, 2)
# Create a formatted HTML block with the provided information in Portuguese
info_html = """
<div style="background-color: #f2f2f2; padding: 10px;">
<h2>Detecção de Catarata</h2>
<p>A catarata é uma das doenças oculares mais graves que pode causar cegueira se não for tratada. A detecção da doença em estágios iniciais, em vez de estágios avançados, pode evitar que o paciente fique cego.</p>
<p>Neste ponto, os pacientes suspeitos devem ser constantemente examinados. O controle contínuo e o acompanhamento dos pacientes são processos cansativos e laboriosos. Por essas razões, neste artigo, são propostos dois modelos de aprendizado profundo diferentes que podem ser usados no diagnóstico e detecção de catarata para auxiliar o trabalho e os procedimentos dos oftalmologistas.</p>
<p>Os modelos de aprendizado profundo propostos foram executados em um conjunto de dados de fundo de olho com sintomas normais e de catarata.</p>
</div>
"""
# Combine the output elements
output = {
"image": output_image,
"text": f"Predicted Class: {predicted_class}",
"html": info_html,
}
return output
def run_interface(self):
input_interface = gr.Interface(
fn=self.classify_image,
inputs="image",
outputs=["image", "text", "html"],
live=True
)
input_interface.launch()
if __name__ == "__main__":
model_path = 'modelo_treinado.h5' # Replace with the path to your trained model
app = ImageClassifierCataract(model_path)
app.run_interface()