File size: 1,574 Bytes
2b7bb94 a5ca5f7 443bcdd 2b7bb94 443bcdd 2b7bb94 3cb2603 2b7bb94 8e8ac18 2b7bb94 8e8ac18 2b7bb94 a5ca5f7 2b7bb94 8e8ac18 a5ca5f7 2b7bb94 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
from custom_torch_module.deploy_utils import Onnx_deploy_model
import gradio as gr
import time
from PIL import Image
import os
from pathlib import Path
model_path = list(Path("deploying model/").glob("*.onnx"))[0]
input_size = [1, 3, 224, 224]
img_size = input_size[-1]
title = "Gender Vision mini"
description = "An resnet50_clip_gap based model(fine tuned with Custom dataset : around 800 train images & 200 test iamges) F1 Score : 100%(1.00) with the custom test dataset. Optimized with ONNX(around 1.7 times faster than PyTorch version on cpu)"
article = "Through bunch of fine tuning and experiments. !REMEMBER! This model can be wrong."
def predict(img):
start_time = time.time()
output = onnx_model.run(img, return_prob=True)
end_time = time.time()
elapsed_time = end_time - start_time
prediction_fps = 1 / elapsed_time
pred_label_and_probs = {"Men" : output[0],"Women" : output[1]}
return pred_label_and_probs, prediction_fps
onnx_model = Onnx_deploy_model(model_path=model_path, img_size=img_size)
example_list = [["examples/" + example] for example in os.listdir("examples")]
# Create the Gradio demo
demo = gr.Interface(fn=predict,
inputs=gr.Image(type="pil"),
outputs=[gr.Label(num_top_classes=2, label="Predictions"),
gr.Number(label="Prediction speed(FPS)")],
examples=example_list,
title=title,
description=description,
article=article)
# Launch the demo
demo.launch() |