nehulagrawal commited on
Commit
cd236be
1 Parent(s): b28d162

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -61
app.py CHANGED
@@ -74,64 +74,4 @@ interface_image = gr.Interface(
74
  theme='huggingface'
75
  )
76
 
77
- ##################################################Video Inference################################################################
78
- def show_preds_video(
79
- video_path: str = None,
80
- model_path: str = None,
81
- image_size: int = 640,
82
- conf_threshold: float = 0.25,
83
- iou_threshold: float = 0.45,
84
- ):
85
- cap = cv2.VideoCapture(video_path)
86
-
87
- while cap.isOpened():
88
- success, frame = cap.read()
89
-
90
- if success:
91
- model = YOLO(model_path)
92
- model.overrides['conf'] = conf_threshold
93
- model.overrides['iou'] = iou_threshold
94
- model.overrides['agnostic_nms'] = False
95
- model.overrides['max_det'] = 1000
96
- results = model.predict(frame)
97
- annotated_frame = results[0].plot()
98
-
99
- # Do not display the frame using cv2.imshow
100
- # cv2.imshow("YOLOv8 Inference", annotated_frame)
101
-
102
- # Break the loop if 'q' is pressed
103
- if cv2.waitKey(1) & 0xFF == ord("q"):
104
- break
105
- else:
106
- break
107
-
108
- cap.release()
109
- cv2.destroyAllWindows()
110
-
111
-
112
- inputs_video = [
113
- gr.components.Video(type="filepath", label="Input Video"),
114
- gr.inputs.Dropdown(["foduucom/stockmarket-future-prediction"],
115
- default="foduucom/stockmarket-future-prediction", label="Model"),
116
- gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
117
- gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
118
- gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
119
-
120
- ]
121
- outputs_video = gr.outputs.Image(type="filepath", label="Output Video")
122
- video_path=[['test/testvideo.mp4','foduucom/stockmarket-future-prediction', 640, 0.25, 0.45]]
123
- interface_video = gr.Interface(
124
- fn=show_preds_video,
125
- inputs=inputs_video,
126
- outputs=outputs_video,
127
- title=model_heading,
128
- description=description,
129
- examples=video_path,
130
- cache_examples=False,
131
- theme='huggingface'
132
- )
133
-
134
- gr.TabbedInterface(
135
- [interface_image, interface_video],
136
- tab_names=['Image inference', 'Video inference']
137
- ).queue().launch()
 
74
  theme='huggingface'
75
  )
76
 
77
+ interface_image.launch(debug=True, enable_queue=True)