File size: 4,505 Bytes
adf2111
08cbfae
2d807c6
259195c
2d807c6
fd93f84
08cbfae
 
2d807c6
 
08cbfae
6c07df1
 
 
 
 
 
 
 
 
2d807c6
 
 
08cbfae
 
 
 
2d807c6
 
08cbfae
 
 
 
 
 
6c07df1
 
 
 
08cbfae
6c07df1
7e43527
b0a8cf0
 
 
 
6c07df1
2d807c6
977a5b3
6c07df1
 
 
 
 
08cbfae
6c07df1
 
08cbfae
2d807c6
 
6c07df1
bcdae47
6c07df1
 
 
 
 
 
08cbfae
fd93f84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6a61f29
08cbfae
 
2d807c6
fd93f84
 
7e43527
08cbfae
8d9c7cb
08cbfae
6c07df1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import tensorflow as tf
import efficientnet.tfkeras as efn
from tensorflow.keras.layers import Input, GlobalAveragePooling2D, Dense
import numpy as np
import gradio as gr
from PIL import Image, ImageDraw, ImageFont

# Dimensões da imagem
IMG_HEIGHT = 224
IMG_WIDTH = 224

# Função para construir o modelo de detecção de objetos
def build_object_detection_model(img_height, img_width):
    # Replace this with your object detection model architecture and weights
    # For example, you can use a model from TensorFlow Hub or any other source
    object_detection_model = None  # Load your object detection model here
    return object_detection_model

# Função para construir o modelo de classificação
def build_classification_model(img_height, img_width, n):
    inp = Input(shape=(img_height, img_width, n))
    efnet = efn.EfficientNetB0(
        input_shape=(img_height, img_width, n),
        weights='imagenet',
        include_top=False
    )
    x = efnet(inp)
    x = GlobalAveragePooling2D()(x)
    x = Dense(2, activation='softmax')(x)
    model = tf.keras.Model(inputs=inp, outputs=x)
    opt = tf.keras.optimizers.Adam(learning_rate=0.000003)
    loss = tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.01)
    model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
    return model

# Load the object detection and classification models
object_detection_model = build_object_detection_model(IMG_HEIGHT, IMG_WIDTH)
classification_model = build_classification_model(IMG_HEIGHT, IMG_WIDTH, 3)
classification_model.load_weights('modelo_treinado.h5')

# Function to preprocess the image for classification
def preprocess_image(input_image):
    input_image = tf.image.resize(input_image, (IMG_HEIGHT, IMG_WIDTH))
    input_image = input_image / 255.0
    return input_image

# Function to perform object detection and classification
def predict_image(input_image):
    # Realize o pré-processamento na imagem de entrada
    input_image_classification = preprocess_image(input_image)

    # Faça uma previsão usando o modelo de classificação carregado
    input_image_classification = tf.expand_dims(input_image_classification, axis=0)
    classification_prediction = classification_model.predict(input_image_classification)

    # Perform object detection here using the object_detection_model
    # Replace this with your object detection logic to get bounding box coordinates

    # A saída será uma matriz de previsões (no caso de classificação de duas classes, será algo como [[probabilidade_classe_0, probabilidade_classe_1]])
    # Adicione lógica para interpretar o resultado e formatá-lo para exibição

    class_names = ["Normal", "Cataract"]
    predicted_class = class_names[np.argmax(classification_prediction)]
    probability = classification_prediction[0][np.argmax(classification_prediction)]

    # You can format the result with object detection bounding box and label here
    # For example:
    # formatted_text = f"Predicted Class: {predicted_class}\nProbability: {probability:.2%}\nObject Detection: {bounding_box_coordinates}"

    # Create an output image with object detection
    output_image = input_image  # Replace this with your object detection visualization

    # Convert the output image to bytes
    output_image_bytes = Image.fromarray(np.uint8(output_image * 255))

    # Create an image with the label "Normal" or "Cataract" outside the image
    draw = ImageDraw.Draw(output_image_bytes)
    font = ImageFont.load_default()  # You can customize the font and size here
    label_text = f"Predicted Class: {predicted_class}"
    label_size = draw.textsize(label_text, font=font)
    label_position = (10, 10)  # You can adjust the label position
    draw.rectangle([label_position, (label_position[0] + label_size[0], label_position[1] + label_size[1])], fill="white")
    draw.text(label_position, label_text, fill="black", font=font)

    # Convert the image with the label to bytes
    labeled_image_bytes = output_image_bytes.tobytes()

    # Return both the image with object detection and the labeled image
    return [labeled_image_bytes, f"Predicted Class: {predicted_class}"]

# Crie uma interface Gradio para fazer previsões
iface = gr.Interface(
    fn=predict_image,
    inputs=gr.inputs.Image(label="Upload an Image", type="file"),
    outputs=[gr.outputs.Image(type="pil"), gr.outputs.Text(label="Prediction", type="markdown")],
    interpretation="default"
)

# Execute a interface Gradio
iface.launch()