Update app.py
Browse files
app.py
CHANGED
@@ -35,13 +35,14 @@ def image_preprocess(image, target_size, gt_boxes=None):
|
|
35 |
|
36 |
input_size = 416
|
37 |
|
|
|
38 |
|
39 |
# Start from ORT 1.10, ORT requires explicitly setting the providers parameter if you want to use execution providers
|
40 |
# other than the default CPU provider (as opposed to the previous behavior of providers getting set/registered by default
|
41 |
# based on the build flags) when instantiating InferenceSession.
|
42 |
# For example, if NVIDIA GPU is available and ORT Python package is built with CUDA, then call API as following:
|
43 |
# rt.InferenceSession(path/to/model, providers=['CUDAExecutionProvider'])
|
44 |
-
sess = rt.InferenceSession("
|
45 |
|
46 |
outputs = sess.get_outputs()
|
47 |
|
|
|
35 |
|
36 |
input_size = 416
|
37 |
|
38 |
+
os.system("wget https://github.com/AK391/models/raw/main/vision/object_detection_segmentation/yolov4/model/yolov4.onnx")
|
39 |
|
40 |
# Start from ORT 1.10, ORT requires explicitly setting the providers parameter if you want to use execution providers
|
41 |
# other than the default CPU provider (as opposed to the previous behavior of providers getting set/registered by default
|
42 |
# based on the build flags) when instantiating InferenceSession.
|
43 |
# For example, if NVIDIA GPU is available and ORT Python package is built with CUDA, then call API as following:
|
44 |
# rt.InferenceSession(path/to/model, providers=['CUDAExecutionProvider'])
|
45 |
+
sess = rt.InferenceSession("yolov4.onnx")
|
46 |
|
47 |
outputs = sess.get_outputs()
|
48 |
|