AlshimaaGamalAlsaied commited on
Commit
a47293c
Β·
1 Parent(s): 56d1b77
Files changed (6) hide show
  1. app.py +74 -0
  2. img1.png +0 -0
  3. img2.png +0 -0
  4. img3.png +0 -0
  5. requirements.txt +4 -0
  6. yolo5_epoch100 +1 -0
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ #import torch
3
+ import yolov5
4
+ import subprocess
5
+ import tempfile
6
+ import time
7
+ from pathlib import Path
8
+ import uuid
9
+ import cv2
10
+ import gradio as gr
11
+
12
+
13
+
14
+ # Images
15
+ #torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
16
+ #torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
17
+
18
+ def image_fn(
19
+ image: gr.inputs.Image = None,
20
+ model_path: gr.inputs.Dropdown = None,
21
+ image_size: gr.inputs.Slider = 640,
22
+ conf_threshold: gr.inputs.Slider = 0.25,
23
+ iou_threshold: gr.inputs.Slider = 0.45,
24
+ ):
25
+ """
26
+ YOLOv7 inference function
27
+ Args:
28
+ image: Input image
29
+ model_path: Path to the model
30
+ image_size: Image size
31
+ conf_threshold: Confidence threshold
32
+ iou_threshold: IOU threshold
33
+ Returns:
34
+ Rendered image
35
+ """
36
+
37
+ model = yolov5.load(model_path, device="cpu", hf_model=True, trace=False)
38
+ model.conf = conf_threshold
39
+ model.iou = iou_threshold
40
+ results = model([image], size=image_size)
41
+ return results.render()[0]
42
+
43
+
44
+
45
+ image_interface = gr.Interface(
46
+ fn=image_fn,
47
+ inputs=[
48
+ gr.inputs.Image(type="pil", label="Input Image"),
49
+ gr.inputs.Dropdown(
50
+ choices=[
51
+ "alshimaa/yolo5_epoch100",
52
+ #"kadirnar/yolov7-v0.1",
53
+ ],
54
+ default="alshimaa/yolo5_epoch100",
55
+ label="Model",
56
+ )
57
+ #gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size")
58
+ #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
59
+ #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
60
+ ],
61
+ outputs=gr.outputs.Image(type="filepath", label="Output Image"),
62
+ title="Obiect detection model p",
63
+ examples=[['img1.png', 'alshimaa/yolo5_epoch100', 640, 0.25, 0.45], ['img2.png', 'alshimaa/yolo5_epoch100', 640, 0.25, 0.45], ['img3.png', 'alshimaa/yolo5_epoch100', 640, 0.25, 0.45]],
64
+ cache_examples=True,
65
+ theme='huggingface',
66
+ )
67
+
68
+
69
+
70
+ if __name__ == "__main__":
71
+ gr.TabbedInterface(
72
+ [image_interface],
73
+ ["Detect Images"],
74
+ ).launch()
img1.png ADDED
img2.png ADDED
img3.png ADDED
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ torch
3
+ yolov5detect
4
+ HfApi
yolo5_epoch100 ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit c2212ddb924a66157b32a3af4f71b35c3d4c23fb