Spaces:
Runtime error
Runtime error
import gradio as gr | |
# import torch | |
# import requests | |
# from torchvision import transforms | |
import cv2 | |
from geti_sdk.deployment import Deployment | |
from geti_sdk.utils import show_image_with_annotation_scene | |
# model = torch.hub.load("pytorch/vision:v0.6.0", "resnet18", pretrained=True).eval() | |
# response = requests.get("https://git.io/JJkYN") | |
# labels = response.text.split("\n") | |
# Step 1: Load the deployment | |
deployment = Deployment.from_folder("deployment") | |
deployment.load_inference_models(device="CPU") | |
def resize_image(image, target_dimension): | |
height, width = image.shape[:2] | |
max_dimension = max(height, width) | |
scale_factor = target_dimension / max_dimension | |
new_width = int(width * scale_factor) | |
new_height = int(height * scale_factor) | |
resized_image = cv2.resize(image, (new_width, new_height)) | |
return resized_image | |
def infer(image=None): | |
if image is None: | |
return [None,'Error: No image provided'] | |
image = resize_image(image, 1200) | |
prediction = deployment.infer(image) | |
output = show_image_with_annotation_scene(image, prediction, show_results=False) | |
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) | |
return [output, prediction.overview] | |
# def predict(inp): | |
# inp = transforms.ToTensor()(inp).unsqueeze(0) | |
# with torch.no_grad(): | |
# prediction = torch.nn.functional.softmax(model(inp)[0], dim=0) | |
# confidences = {labels[i]: float(prediction[i]) for i in range(1000)} | |
# return confidences | |
def run(): | |
# demo = gr.Interface( | |
# fn=predict, | |
# inputs=gr.inputs.Image(type="pil"), | |
# outputs=gr.outputs.Label(num_top_classes=3), | |
# ) | |
demo = gr.Interface(fn=infer, | |
inputs=['image'], | |
outputs=['image', 'text']) | |
demo.launch(server_name="0.0.0.0", server_port=7860) | |
if __name__ == "__main__": | |
run() | |