File size: 1,854 Bytes
d95926f
35124ac
 
 
e1d803c
 
 
d95926f
35124ac
 
 
d95926f
 
e1d803c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35124ac
 
 
 
 
 
d95926f
 
 
89a0702
 
 
 
 
 
 
 
 
d95926f
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import gradio as gr
# import torch
# import requests
# from torchvision import transforms
import cv2
from geti_sdk.deployment import Deployment
from geti_sdk.utils import show_image_with_annotation_scene

# model = torch.hub.load("pytorch/vision:v0.6.0", "resnet18", pretrained=True).eval()
# response = requests.get("https://git.io/JJkYN")
# labels = response.text.split("\n")


# Step 1: Load the deployment
deployment = Deployment.from_folder("deployment")
deployment.load_inference_models(device="CPU")


def resize_image(image, target_dimension):
    height, width = image.shape[:2]
    max_dimension = max(height, width)
    scale_factor = target_dimension / max_dimension
    new_width = int(width * scale_factor)
    new_height = int(height * scale_factor)
    resized_image = cv2.resize(image, (new_width, new_height))
    return resized_image


def infer(image=None):
    if image is None:
        return [None,'Error: No image provided']

    image = resize_image(image, 1200)
    prediction = deployment.infer(image)
    output = show_image_with_annotation_scene(image, prediction, show_results=False)
    output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
    return [output, prediction.overview]


# def predict(inp):
#     inp = transforms.ToTensor()(inp).unsqueeze(0)
#     with torch.no_grad():
#         prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
#         confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
#     return confidences


def run():
    # demo = gr.Interface(
    #     fn=predict,
    #     inputs=gr.inputs.Image(type="pil"),
    #     outputs=gr.outputs.Label(num_top_classes=3),
    # )

    demo = gr.Interface(fn=infer, 
         inputs=['image'],
         outputs=['image', 'text'])

    demo.launch(server_name="0.0.0.0", server_port=7860)


if __name__ == "__main__":
    run()