Spaces:
Sleeping
Sleeping
File size: 3,965 Bytes
adf2111 8c26c12 3639873 575441f adf2111 6ae14d2 abed96a 6ae14d2 abed96a adf2111 8dc8a0e 2283b5b adf2111 8dc8a0e 6b07df2 2283b5b 73036df e0ee69d 2283b5b 73036df 2283b5b 73036df e0ee69d 79b770a ab0fc4c 5a3b5bc 3835b01 5a3b5bc 57e8470 5a3b5bc 57e8470 5a3b5bc ab0fc4c b4734ce 57e8470 5a3b5bc adf2111 5a3b5bc 23fe0a8 700be79 23fe0a8 57e8470 2283b5b 907b41c adf2111 2283b5b ab0fc4c adf2111 2283b5b 6b07df2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import gradio as gr
import tensorflow as tf
import numpy as np
from PIL import Image
import cv2
import datetime # Importe o módulo datetime
# Defina a camada personalizada FixedDropout
class FixedDropout(tf.keras.layers.Dropout):
def _get_noise_shape(self, inputs):
if self.noise_shape is None:
return self.noise_shape
symbolic_shape = tf.shape(inputs)
noise_shape = [symbolic_shape[axis] if shape is None else shape
for axis, shape in enumerate(self.noise_shape)]
return tuple(noise_shape)
# Registre a camada personalizada FixedDropout
tf.keras.utils.get_custom_objects()['FixedDropout'] = FixedDropout
# Carregue seu modelo TensorFlow treinado
with tf.keras.utils.custom_object_scope({'FixedDropout': FixedDropout}):
model = tf.keras.models.load_model('modelo_treinado.h5')
# Defina uma função para fazer previsões
def classify_image(input_image):
# Log da forma da entrada
print(f"Forma da entrada: {input_image.shape}")
# Redimensione a imagem para as dimensões corretas (192x256)
input_image = tf.image.resize(input_image, (192, 256)) # Redimensione para as dimensões esperadas
input_image = (input_image / 255.0) # Normalize para [0, 1]
input_image = np.expand_dims(input_image, axis=0) # Adicione a dimensão de lote
# Log da forma da entrada após o redimensionamento
print(f"Forma da entrada após o redimensionamento: {input_image.shape}")
# Obtenha o tempo atual
current_time = datetime.datetime.now()
# Faça a previsão usando o modelo
prediction = model.predict(input_image)
# Assumindo que o modelo retorna probabilidades para duas classes, você pode retornar a classe com a maior probabilidade
class_index = np.argmax(prediction)
class_labels = ["Normal", "Cataract"] # Substitua pelas suas etiquetas de classe reais
predicted_class = class_labels[class_index]
# Crie uma imagem composta com o rótulo de previsão
output_image = (input_image[0] * 255).astype('uint8')
# Set output image dimensions to match input image
output_image = np.ones((input_image.shape[1], input_image.shape[2], 3), dtype=np.uint8) * 255
# Add space for the prediction label at the bottom of the image
label_background = np.ones((50, output_image.shape[1], 3), dtype=np.uint8) * 255
# Put the text label and box inside the white background
output_image[-50:] = label_background
# Write the prediction label on the image
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.4 # Tamanho da fonte reduzido
cv2.putText(output_image, f"Analysis Time: {current_time.strftime('%Y-%m-%d %H:%M:%S')}", (10, output_image.shape[0] - 30), font, font_scale, (0, 0, 0), 1)
cv2.putText(output_image, f"Predicted Class: {predicted_class}", (10, output_image.shape[0] - 10), font, font_scale, (0, 0, 0), 1) # Cor preta
# Calculate the box size as a percentage of the image size
box_percentage = 0.1 # Adjust as needed
box_size = int(min(output_image.shape[1], output_image.shape[0]) * box_percentage)
# Calculate the box position both horizontally and vertically
box_x = (output_image.shape[1] - box_size) // 2
box_y = (output_image.shape[0] - box_size) // 2
# Color-code the object box based on the predicted class
object_box_color = (0, 255, 0) if predicted_class == "Normal" else (255, 0, 0) # Green for Normal, Red for Cataract
# Draw a centered object identification box (blue rectangle)
cv2.rectangle(output_image, (box_x, box_y), (box_x + box_size, box_y + box_size), object_box_color, 2) # Caixa centralizada
return output_image
# Crie uma interface Gradio
input_interface = gr.Interface(
fn=classify_image,
inputs="image", # Especifique o tipo de entrada como "image"
outputs="image", # Especifique o tipo de saída como "image"
live=True
)
# Inicie o aplicativo Gradio
input_interface.launch()
|