plants_yolo / app.py
Atulit23's picture
Upload folder using huggingface_hub
46f7684 verified
# from ultralytics import YOLO
# import cv2
# import matplotlib.pyplot as plt
# import matplotlib.patches as patches
# import numpy as np
# import requests
# model = YOLO('best (5).pt')
# img_url = 'https://www.greendna.in/cdn/shop/products/1296x728_Holy_Basil_1155x.jpg?v=1591462900'
# response = requests.get(img_url, stream=True)
# img_array = np.asarray(bytearray(response.content), dtype=np.uint8)
# img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
# classes_ = {0: 'anthurium', 1: 'clivia', 2: 'dieffenbachia', 3: 'dracaena', 4: 'gloxinia', 5: 'kalanchoe', 6: 'orchid', 7: 'sansevieria', 8: 'violet', 9: 'zamioculcas'}
# results = model.predict(source=img, conf = 0.4)
# # results = model.predict('api/default_1280-720-screenshot.webp', confidence=40, overlap=30).json()
# boxes = results[0].boxes.xyxy.tolist()
# classes = results[0].boxes.cls.tolist()
# names = results[0].names
# confidences = results[0].boxes.conf.tolist()
# print(boxes)
# print(classes)
# print(names)
# print(confidences)
# # Iterate through the results
# for box, cls, conf in zip(boxes, classes, confidences):
# x1, y1, x2, y2 = box
# confidence = conf
# detected_class = cls
# name = names[int(cls)]
# def plot_img_bbox(img, target):
# fig, a = plt.subplots(1,1)
# fig.set_size_inches(10, 10)
# a.imshow(img)
# for i, box in enumerate(target):
# #print(target['boxes'])
# x, y, width, height = box[0], box[1], box[2]-box[0], box[3]-box[1]
# # if arr[target['labels'][i]] == 'ad':
# rect = patches.Rectangle((x, y),
# width, height,
# linewidth = 2,
# edgecolor = 'r',
# facecolor = 'none')
# a.text(x, y-20, classes_[classes[i]], color='b', verticalalignment='top')
# a.add_patch(rect)
# plt.show()
# # if length of boxes is zero that means no deceptive popups were found
# plot_img_bbox(img, boxes)
import requests
from ultralytics import YOLO
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import gradio as gr
model = YOLO('best (5).pt')
def plot_img_bbox(img, target, save_path, classes):
fig, a = plt.subplots(1, 1)
fig.set_size_inches(10, 10)
classes_ = {0: 'anthurium', 1: 'clivia', 2: 'dieffenbachia', 3: 'dracaena', 4: 'gloxinia', 5: 'kalanchoe', 6: 'orchid', 7: 'sansevieria', 8: 'violet', 9: 'zamioculcas'}
a.imshow(img)
for i, box in enumerate(target):
x, y, width, height = box[0], box[1], box[2] - box[0], box[3] - box[1]
rect = patches.Rectangle((x, y), width, height, linewidth=2, edgecolor='r', facecolor='none')
a.text(x, y - 20, classes_[classes[i]], color='b', verticalalignment='top')
a.add_patch(rect)
plt.savefig(save_path)
plt.close()
upload_url = upload_to_cloudinary(save_path)
return upload_url
def upload_to_cloudinary(local_file_path):
upload_url = 'https://api.cloudinary.com/v1_1/ddvajyjou/image/upload'
files = {'file': open(local_file_path, 'rb')}
params = {'upload_preset': 'nb6tvi1b'}
response = requests.post(upload_url, files=files, params=params)
if response.status_code == 200:
return response.json()['secure_url']
else:
print(f"Error uploading to Cloudinary: {response.status_code}")
return None
def index(img_url):
response = requests.get(img_url, stream=True)
img_array = np.asarray(bytearray(response.content), dtype=np.uint8)
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
print(img_url)
results = model.predict(source=img, conf = 0.4)
boxes = results[0].boxes.xyxy.tolist()
classes = results[0].boxes.cls.tolist()
names = results[0].names
confidences = results[0].boxes.conf.tolist()
print(boxes)
print(classes)
print(names)
print(confidences)
final_url = plot_img_bbox(img, boxes, 'image.png', classes)
return final_url
inputs_image_url = [
gr.Textbox(type="text", label="Image URL"),
]
outputs_result_dict = [
gr.Textbox(type="text", label="Result Dictionary"),
]
interface_image_url = gr.Interface(
fn=index,
inputs=inputs_image_url,
outputs=outputs_result_dict,
title="Popup detection",
cache_examples=False,
)
gr.TabbedInterface(
[interface_image_url],
tab_names=['Image inference']
).queue().launch()