Spaces:
Sleeping
Sleeping
File size: 3,417 Bytes
adf2111 08cbfae 2d807c6 259195c 2d807c6 08cbfae 2d807c6 08cbfae 6c07df1 2d807c6 08cbfae 2d807c6 08cbfae 6c07df1 08cbfae 6c07df1 7e43527 b0a8cf0 6c07df1 2d807c6 977a5b3 6c07df1 08cbfae 6c07df1 08cbfae 2d807c6 6c07df1 bcdae47 6c07df1 08cbfae 6c07df1 2d807c6 6a61f29 08cbfae 2d807c6 7e43527 08cbfae 8d9c7cb 08cbfae 6c07df1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import tensorflow as tf
import efficientnet.tfkeras as efn
from tensorflow.keras.layers import Input, GlobalAveragePooling2D, Dense
import numpy as np
import gradio as gr
# Dimensões da imagem
IMG_HEIGHT = 224
IMG_WIDTH = 224
# Função para construir o modelo de detecção de objetos
def build_object_detection_model(img_height, img_width):
# Replace this with your object detection model architecture and weights
# For example, you can use a model from TensorFlow Hub or any other source
object_detection_model = None # Load your object detection model here
return object_detection_model
# Função para construir o modelo de classificação
def build_classification_model(img_height, img_width, n):
inp = Input(shape=(img_height, img_width, n))
efnet = efn.EfficientNetB0(
input_shape=(img_height, img_width, n),
weights='imagenet',
include_top=False
)
x = efnet(inp)
x = GlobalAveragePooling2D()(x)
x = Dense(2, activation='softmax')(x)
model = tf.keras.Model(inputs=inp, outputs=x)
opt = tf.keras.optimizers.Adam(learning_rate=0.000003)
loss = tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.01)
model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
return model
# Load the object detection and classification models
object_detection_model = build_object_detection_model(IMG_HEIGHT, IMG_WIDTH)
classification_model = build_classification_model(IMG_HEIGHT, IMG_WIDTH, 3)
classification_model.load_weights('modelo_treinado.h5')
# Function to preprocess the image for classification
def preprocess_image(input_image):
input_image = tf.image.resize(input_image, (IMG_HEIGHT, IMG_WIDTH))
input_image = input_image / 255.0
return input_image
# Function to perform object detection and classification
def predict_image(input_image):
# Realize o pré-processamento na imagem de entrada
input_image_classification = preprocess_image(input_image)
# Faça uma previsão usando o modelo de classificação carregado
input_image_classification = tf.expand_dims(input_image_classification, axis=0)
classification_prediction = classification_model.predict(input_image_classification)
# Perform object detection here using the object_detection_model
# Replace this with your object detection logic to get bounding box coordinates
# A saída será uma matriz de previsões (no caso de classificação de duas classes, será algo como [[probabilidade_classe_0, probabilidade_classe_1]])
# Adicione lógica para interpretar o resultado e formatá-lo para exibição
class_names = ["Normal", "Cataract"]
predicted_class = class_names[np.argmax(classification_prediction)]
probability = classification_prediction[0][np.argmax(classification_prediction)]
# You can format the result with object detection bounding box and label here
# For example:
# formatted_text = f"Predicted Class: {predicted_class}\nProbability: {probability:.2%}\nObject Detection: {bounding_box_coordinates}"
# Return the formatted result
formatted_text = f"Predicted Class: {predicted_class}\nProbability: {probability:.2%}"
return formatted_text
# Crie uma interface Gradio para fazer previsões
iface = gr.Interface(
fn=predict_image,
inputs="image",
outputs="text",
interpretation="default"
)
# Execute a interface Gradio
iface.launch()
|