|
import gradio as gr |
|
from dotenv import load_dotenv |
|
from roboflow import Roboflow |
|
import tempfile |
|
import os |
|
import requests |
|
import numpy as np |
|
from sahi.predict import get_sliced_prediction |
|
import supervision as sv |
|
|
|
|
|
load_dotenv() |
|
api_key = os.getenv("ROBOFLOW_API_KEY") |
|
workspace = os.getenv("ROBOFLOW_WORKSPACE") |
|
project_name = os.getenv("ROBOFLOW_PROJECT") |
|
model_version = int(os.getenv("ROBOFLOW_MODEL_VERSION")) |
|
|
|
|
|
rf = Roboflow(api_key=api_key) |
|
project = rf.workspace(workspace).project(project_name) |
|
model = project.version(model_version).model |
|
|
|
|
|
def detect_objects(image): |
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file: |
|
image.save(temp_file, format="JPEG") |
|
temp_file_path = temp_file.name |
|
|
|
try: |
|
|
|
def callback(image_slice: np.ndarray) -> sv.Detections: |
|
results = model.infer(image_slice)[0] |
|
return sv.Detections.from_inference(results) |
|
|
|
|
|
slicer = sv.InferenceSlicer( |
|
callback=callback, |
|
slice_wh=(320, 320), |
|
overlap_wh=(64, 64), |
|
overlap_filter=sv.OverlapFilter.NON_MAX_SUPPRESSION, |
|
iou_threshold=0.5, |
|
) |
|
|
|
|
|
detections = slicer(image) |
|
|
|
|
|
box_annotator = sv.BoxAnnotator() |
|
label_annotator = sv.LabelAnnotator() |
|
|
|
annotated_image = box_annotator.annotate( |
|
scene=image.copy(), detections=detections) |
|
|
|
annotated_image = label_annotator.annotate( |
|
scene=annotated_image, detections=detections) |
|
|
|
|
|
output_image_path = "/tmp/prediction_visual.png" |
|
annotated_image.save(output_image_path) |
|
|
|
|
|
class_count = {} |
|
total_count = 0 |
|
|
|
for prediction in detections: |
|
class_name = prediction.class_id |
|
class_count[class_name] = class_count.get(class_name, 0) + 1 |
|
total_count += 1 |
|
|
|
|
|
result_text = "Detected Objects:\n\n" |
|
for class_name, count in class_count.items(): |
|
result_text += f"{class_name}: {count}\n" |
|
result_text += f"\nTotal objects detected: {total_count}" |
|
|
|
except requests.exceptions.HTTPError as http_err: |
|
|
|
result_text = f"HTTP error occurred: {http_err}" |
|
output_image_path = temp_file_path |
|
except Exception as err: |
|
|
|
result_text = f"An error occurred: {err}" |
|
output_image_path = temp_file_path |
|
|
|
|
|
os.remove(temp_file_path) |
|
|
|
return output_image_path, result_text |
|
|
|
|
|
with gr.Blocks() as iface: |
|
with gr.Row(): |
|
with gr.Column(): |
|
input_image = gr.Image(type="pil", label="Input Image") |
|
with gr.Column(): |
|
output_image = gr.Image(label="Detected Objects") |
|
with gr.Column(): |
|
output_text = gr.Textbox(label="Object Count") |
|
|
|
|
|
detect_button = gr.Button("Detect Objects") |
|
|
|
|
|
detect_button.click( |
|
fn=detect_objects, |
|
inputs=input_image, |
|
outputs=[output_image, output_text] |
|
) |
|
|
|
|
|
iface.launch() |
|
|