DHEIVER's picture
Update app.py
6c07df1
raw
history blame
3.42 kB
import tensorflow as tf
import efficientnet.tfkeras as efn
from tensorflow.keras.layers import Input, GlobalAveragePooling2D, Dense
import numpy as np
import gradio as gr
# Dimensões da imagem
IMG_HEIGHT = 224
IMG_WIDTH = 224
# Função para construir o modelo de detecção de objetos
def build_object_detection_model(img_height, img_width):
# Replace this with your object detection model architecture and weights
# For example, you can use a model from TensorFlow Hub or any other source
object_detection_model = None # Load your object detection model here
return object_detection_model
# Função para construir o modelo de classificação
def build_classification_model(img_height, img_width, n):
inp = Input(shape=(img_height, img_width, n))
efnet = efn.EfficientNetB0(
input_shape=(img_height, img_width, n),
weights='imagenet',
include_top=False
)
x = efnet(inp)
x = GlobalAveragePooling2D()(x)
x = Dense(2, activation='softmax')(x)
model = tf.keras.Model(inputs=inp, outputs=x)
opt = tf.keras.optimizers.Adam(learning_rate=0.000003)
loss = tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.01)
model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
return model
# Load the object detection and classification models
object_detection_model = build_object_detection_model(IMG_HEIGHT, IMG_WIDTH)
classification_model = build_classification_model(IMG_HEIGHT, IMG_WIDTH, 3)
classification_model.load_weights('modelo_treinado.h5')
# Function to preprocess the image for classification
def preprocess_image(input_image):
input_image = tf.image.resize(input_image, (IMG_HEIGHT, IMG_WIDTH))
input_image = input_image / 255.0
return input_image
# Function to perform object detection and classification
def predict_image(input_image):
# Realize o pré-processamento na imagem de entrada
input_image_classification = preprocess_image(input_image)
# Faça uma previsão usando o modelo de classificação carregado
input_image_classification = tf.expand_dims(input_image_classification, axis=0)
classification_prediction = classification_model.predict(input_image_classification)
# Perform object detection here using the object_detection_model
# Replace this with your object detection logic to get bounding box coordinates
# A saída será uma matriz de previsões (no caso de classificação de duas classes, será algo como [[probabilidade_classe_0, probabilidade_classe_1]])
# Adicione lógica para interpretar o resultado e formatá-lo para exibição
class_names = ["Normal", "Cataract"]
predicted_class = class_names[np.argmax(classification_prediction)]
probability = classification_prediction[0][np.argmax(classification_prediction)]
# You can format the result with object detection bounding box and label here
# For example:
# formatted_text = f"Predicted Class: {predicted_class}\nProbability: {probability:.2%}\nObject Detection: {bounding_box_coordinates}"
# Return the formatted result
formatted_text = f"Predicted Class: {predicted_class}\nProbability: {probability:.2%}"
return formatted_text
# Crie uma interface Gradio para fazer previsões
iface = gr.Interface(
fn=predict_image,
inputs="image",
outputs="text",
interpretation="default"
)
# Execute a interface Gradio
iface.launch()