sayedM commited on
Commit
f77fbe2
1 Parent(s): 73fedc4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -24,14 +24,15 @@ def yolov8_inference(
24
  image: gr.inputs.Image = None,
25
  model_name: gr.inputs.Dropdown = None,
26
  image_size: gr.inputs.Slider = 1280,
27
- conf_threshold: gr.inputs.Slider = 0.25,
28
  iou_threshold: gr.inputs.Slider = 0.45,
29
  ):
30
 
31
-
32
  model = YOLO("https://huggingface.co/spaces/devisionx/Amazon_demo/blob/main/amazon.pt")
33
 
34
  results = model(image,conf=conf_threshold,iou=iou_threshold ,imgsz=1280)[0]
 
35
  detections = sv.Detections.from_yolov8(results)
36
  annotated_image = annotatorbbox.annotate(scene=image, detections=detections)
37
  # annotated_image = annotatormask.annotate(scene=annotated_image, detections=detections)
@@ -44,7 +45,7 @@ def yolov8_inference(
44
  image_input = gr.inputs.Image() # Adjust the shape according to your requirements
45
 
46
  inputs = [
47
- gr.inputs.Image(label="Input Image"),
48
  gr.Slider(
49
  minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"
50
  ),
@@ -52,11 +53,11 @@ inputs = [
52
  ]
53
 
54
  outputs = gr.Image(type="filepath", label="Output Image")
55
- title = "Ultralytics YOLOv8 Segmentation Demo"
56
  import os
57
  examples = [
58
- ["tu2.png", 0.25, 0.45],
59
- ["tu3.jpg", 0.25, 0.45],
60
  ]
61
  demo_app = gr.Interface(examples=examples,
62
  fn=yolov8_inference,
 
24
  image: gr.inputs.Image = None,
25
  model_name: gr.inputs.Dropdown = None,
26
  image_size: gr.inputs.Slider = 1280,
27
+ conf_threshold: gr.inputs.Slider = 0.5,
28
  iou_threshold: gr.inputs.Slider = 0.45,
29
  ):
30
 
31
+ image=image[:, :, ::-1].astype(np.uint8)
32
  model = YOLO("https://huggingface.co/spaces/devisionx/Amazon_demo/blob/main/amazon.pt")
33
 
34
  results = model(image,conf=conf_threshold,iou=iou_threshold ,imgsz=1280)[0]
35
+ image=image[:, :, ::-1].astype(np.uint8)
36
  detections = sv.Detections.from_yolov8(results)
37
  annotated_image = annotatorbbox.annotate(scene=image, detections=detections)
38
  # annotated_image = annotatormask.annotate(scene=annotated_image, detections=detections)
 
45
  image_input = gr.inputs.Image() # Adjust the shape according to your requirements
46
 
47
  inputs = [
48
+ gr.Image(label="Input Image"),
49
  gr.Slider(
50
  minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"
51
  ),
 
53
  ]
54
 
55
  outputs = gr.Image(type="filepath", label="Output Image")
56
+ title = "YOLOv8 Segmentation Demo"
57
  import os
58
  examples = [
59
+ ["tu2.png", 0.5, 0.45],
60
+ ["tu3.jpg", 0.5, 0.45],
61
  ]
62
  demo_app = gr.Interface(examples=examples,
63
  fn=yolov8_inference,