File size: 1,889 Bytes
adf2111
 
 
3639873
0596f27
8d9c7cb
 
 
 
 
 
 
 
 
 
 
 
da33e50
 
 
8d9c7cb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import gradio as gr
import tensorflow as tf
import numpy as np
import cv2

# Load the pre-trained ResNet-18 model from PyTorch
import torch
import requests
from torchvision import transforms

resnet_model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
response = requests.get("https://git.io/JJkYN")
labels = response.text.split("\n")

# Load the TensorFlow model
tf_model_path = 'modelo_treinado.h5'
tf_model = tf.keras.models.load_model(tf_model_path)

class_labels = ["Normal", "Cataract"]

def predict(inp):
    # First, use the ResNet-18 model to predict labels
    inp_resized = transforms.ToTensor()(inp).unsqueeze(0)
    with torch.no_grad():
        prediction_resnet = torch.nn.functional.softmax(resnet_model(inp_resized)[0], dim=0)
        confidences_resnet = {labels[i]: float(prediction_resnet[i]) for i in range(1000)}

    # Then, use the TensorFlow model to predict Normal or Cataract
    img_array = cv2.cvtColor(np.array(inp), cv2.COLOR_RGB2BGR)
    img_array = cv2.resize(img_array, (224, 224))  # Resize to match the input size expected by the TF model
    img_array = img_array / 255.0  # Normalize pixel values
    img_array = np.expand_dims(img_array, axis=0)  # Add batch dimension

    prediction_tf = tf_model.predict(img_array)
    label_index = np.argmax(prediction_tf)
    confidence_tf = float(prediction_tf[0, label_index])

    # Combine the ResNet-18 and TensorFlow predictions
    resnet_label = max(confidences_resnet, key=confidences_resnet.get)
    if confidence_tf >= 0.5:
        final_label = class_labels[label_index]
        confidence = confidence_tf
    else:
        final_label = resnet_label
        confidence = confidences_resnet[resnet_label]

    return final_label, confidence

demo = gr.Interface(
    fn=predict,
    inputs=gr.inputs.Image(type="pil"),
    outputs=["label", "number"]
)

demo.launch()