File size: 4,883 Bytes
feeed5b
 
 
 
 
1e0fed5
 
 
feeed5b
 
1e0fed5
 
 
9478d60
feeed5b
 
 
 
 
 
 
 
 
 
 
 
1e0fed5
feeed5b
 
 
1e0fed5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
feeed5b
1e0fed5
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
import numpy as np
import gradio as gr
import tensorflow as tf
from io import StringIO
from PIL import Image
from ultralytics import YOLO
import cv2
from datetime import datetime

labels = []
classification_model = tf.keras.models.load_model('./models.h5')
detection_model = YOLO('./best.pt')

with open("labels.txt") as f:
    for line in f:
        labels.append(line.replace('\n', ''))

def classify_image(inp):
    # Create a copy of the input array to avoid reference issues
    inp_copy = np.copy(inp)
    # Resize the input image to the expected shape (224, 224)
    inp_copy = Image.fromarray(inp_copy)
    inp_copy = inp_copy.resize((224, 224))
    inp_copy = np.array(inp_copy)
    inp_copy = inp_copy.reshape((-1, 224, 224, 3))
    inp_copy = tf.keras.applications.efficientnet.preprocess_input(inp_copy)
    prediction = classification_model.predict(inp_copy).flatten()
    confidences = {labels[i]: float(prediction[i]) for i in range(90)}
    return confidences

def animal_detect_and_classify(img, detect_results):
    img = np.array(img)
    combined_results = []
    # Iterate over detections
    for result in detect_results:
        for box in result.boxes:
            # print(box)
            # Crop the RoI
            x1, y1, x2, y2 = map(int, box.xyxy[0])
            detect_img = img[y1:y2, x1:x2]
            # Convert the image to RGB format
            # detect_img = cv2.cvtColor(detect_img, cv2.COLOR_BGR2RGB)

            # Resize the input image to the expected shape (224, 224)
            detect_img = cv2.resize(detect_img, (224, 224))

            # Convert the image to a numpy array
            inp_array = np.array(detect_img)

            # Reshape the array to match the expected input shape
            inp_array = inp_array.reshape((-1, 224, 224, 3))

            # Preprocess the input array
            inp_array = tf.keras.applications.efficientnet.preprocess_input(inp_array)

            # Make predictions using the classification model
            prediction = classification_model.predict(inp_array)
            # Map predictions to labels
            threshold = 0.66
            confidences_classification = {labels[i]: float(prediction[0][i]) for i in range(90)}
            print(confidences_classification)
            predicted_labels = [labels[np.argmax(pred)] if np.max(pred) >= threshold else "animal" for pred in prediction]
            combined_results.append(((x1, y1, x2, y2), predicted_labels))
    return combined_results

def generate_color(class_name):
    # Generate a hash from the class name
    color_hash = hash(class_name)
    # Normalize the hash value to fit within the range of valid color values (0-255)
    color_hash = abs(color_hash) % 16777216
    R = color_hash//(256*256)
    G = (color_hash//256) % 256
    B = color_hash % 256
    # Convert the hash value to RGB color format
    color = (R, G, B)

    return color

def plot_detected_rectangles(image, detections):
    # Create a copy of the image to draw on
    image = np.array(image)
    img_with_rectangles = image.copy()

    # Iterate over each detected rectangle and its corresponding class name
    for rectangle, class_names in detections:
        if class_names[0] == "unknown":
            continue
        # Extract the coordinates of the rectangle
        x1, y1, x2, y2 = rectangle

        # Generate a random color
        color = generate_color(class_names[0])

        # Draw the rectangle on the image
        cv2.rectangle(img_with_rectangles, (x1, y1), (x2, y2), color, 2)

        # Put the class names above the rectangle
        for i, class_name in enumerate(class_names):
            cv2.putText(img_with_rectangles, class_name, (x1, y1 - 10 - i*20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)

    return img_with_rectangles

def detection_image(img, conf_threshold, iou_threshold):
    results = detection_model.predict(
        source=img,
        conf=conf_threshold,
        iou=iou_threshold,
        show_labels=True,
        show_conf=True,
        imgsz=640,
    )
    combined_results = animal_detect_and_classify(img, results)
    plotted_image = plot_detected_rectangles(img, combined_results)
    return Image.fromarray(plotted_image)

io1 = gr.Interface(classify_image, gr.Image(), gr.Label(num_top_classes=3))
io2 = gr.Interface(
    fn=detection_image,
    inputs=[
        gr.Image(type="pil", label="Upload Image"),
        gr.Slider(minimum=0, maximum=1, value=0.25, label="Confidence threshold"),
        gr.Slider(minimum=0, maximum=1, value=0.45, label="IoU threshold")
    ],
    outputs=gr.Image(type="pil", label="Result"),
    title="Animal Detection",
    description="Upload images for inference. The Ultralytics YOLOv8n model is used as pretrained model",
)

if __name__ == "__main__":
    gr.TabbedInterface(
        [io1, io2], ["Classification", "Object Detection"]
    ).launch(debug=True)