File size: 1,422 Bytes
2b7bb94 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from custom_torch_module.deploy_utils import Onnx_deploy_model
import gradio as gr
import time
from PIL import Image
model_path = "deploying model/" + "vit_xsmall_patch16_clip_224(trainble_0.15) (Acc 98.44%, Loss 0.168152).onnx"
input_size = [1, 3, 224, 224]
img_size = input_size[-1]
title = "Gender Vision mini"
description = "An ViT(xsmall_clip) based model(fine tuned with Custom dataset : around 800 train images & 200 test iamges) Accuracy : around 98.4% with the custom test dataset. Optimized with ONNX(around 1.7 times faster than PyTorch version on cpu)"
article = "Through bunch of fine tuning and experiments. !REMEMBER! This model can be wrong."
def predict(img):
start_time = time.time()
output = onnx_model.run(img, return_prob=True)
end_time = time.time()
elapsed_time = end_time - start_time
pred_label_and_probs = {"Men" : output[0],"Women" : output[1]}
return pred_label_and_probs, elapsed_time
onnx_model = Onnx_deploy_model(model_path=model_path, img_size=img_size)
# Create the Gradio demo
demo = gr.Interface(fn=predict,
inputs=gr.Image(type="pil"),
outputs=[gr.Label(num_top_classes=2, label="Predictions"),
gr.Number(label="Prediction time (s)")],
title=title,
description=description,
article=article)
# Launch the demo
demo.launch() |