Spaces:
Running
Running
File size: 3,806 Bytes
dd78cef 63292fe 2652021 c18889e 69140ae a9d197e dd78cef 2652021 69140ae a9d197e dd78cef 69140ae dd78cef 69140ae dd78cef 69140ae dd78cef 69140ae 96c54a2 4b3cc5a 0526480 f848200 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
from openvino.runtime import Core
import gradio as gr
import numpy as np
from PIL import Image
import cv2
from torchvision import models,transforms
core = Core()
# Read model to OpenVINO Runtime
model_ir = core.read_model(model="Davinci_eye.onnx")
compiled_model_ir = core.compile_model(model=model_ir, device_name='CPU')
tfms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # imagenet
])
color_map={
(251,244,5): 1,
(37,250,5):2,
(0,21,209):3,
(172,21,2): 4,
(172,21,229): 5,
(6,254,249): 6,
(141,216,23):7,
(96,13,13):8,
(65,214,24):9,
(124,3,252):10,
(214,55,153):11,
(48,61,173):12,
(110,31,254):13,
(249,37,14):14,
(249,137,254):15,
(34,255,113):16,
(169,52,14):17,
(124,49,176):18,
(4,88,238):19,
(115,214,178):20,
(115,63,178):21,
(115,214,235):22,
(63,63,178): 23,
(130,34,26):24,
(220,158,161):25,
(201,117,56):26,
(121,16,40):27,
(15,126,0):28,
(0,50,70):29,
(20,20,0):30,
(20,20,0):31,
}
items = {
1: "HarmonicAce_Head",
2: "HarmonicAce_Body",
3: "MarylandBipolarForceps_Head",
4: "MarylandBipolarForceps_Wrist",
5: "MarylandBipolarForceps_Body",
6: "CadiereForceps_Head",
7: "CadiereForceps_Wrist",
8: "CadiereForceps_Body",
9: "CurvedAtraumaticGrasper_Head",
10: "CurvedAtraumaticGrasper_Body",
11: "Stapler_Head",
12: "Stapler_Body",
13: "MediumLargeClipApplier_Head",
14: "MediumLargeClipApplier_Wrist",
15: "MediumLargeClipApplier_Body",
16: "SmallClipApplier_Head",
17: "SmallClipApplier_Wrist",
18: "SmallClipApplier_Body",
19: "SuctionIrrigation",
20: "Needle",
21: "Endotip",
22: "Specimenbag",
23: "DrainTube",
24: "Liver",
25: "Stomach",
26: "Pancreas",
27: "Spleen",
28: "Gallbladder",
29:"Gauze",
30:"TheOther_Instruments",
31:"TheOther_Tissues",
}
colormap={v:[i for i in k] for k,v in color_map.items()}
def convert_mask_to_rgb(pred_mask):
rgb_mask=np.zeros((pred_mask.shape[0],pred_mask.shape[1],3),dtype=np.uint8)
for k,v in colormap.items():
rgb_mask[pred_mask==k]=v
return rgb_mask
def segment_image(filepath):
image=cv2.imread(filepath)
image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (512,512))
x=tfms(image.copy())
#ort_input={ort_session.get_inputs()[0].name:x.cpu().unsqueeze(0).float().numpy()}
#out=ort_session.run(None,ort_input)
out = compiled_model_ir(x.unsqueeze(0).float().cpu().numpy())
pred_mask=np.squeeze(np.argmax(out[0],1)).astype(np.uint8)
color_mask=convert_mask_to_rgb(pred_mask)
masked_image=cv2.addWeighted(image,0.6,color_mask,0.4,0.1)
pred_keys=pred_mask[np.nonzero(pred_mask)]
objects=[items[k] for k in pred_keys]
surgery_items=np.unique(np.array(objects),axis=0)
surg=""
for item in surgery_items:
surg+=item+","+" "
return Image.fromarray(masked_image),surg
demo=gr.Interface(fn=segment_image,inputs=gr.Image(type='filepath'),
outputs=[gr.Image(type="pil"),gr.Text()],
examples=["R001_ch1_video_03_00-29-13-03.jpg",
"R002_ch1_video_01_01-07-25-19.jpg",
"R003_ch1_video_05_00-22-42-23.jpg",
"R004_ch1_video_01_01-12-22-00.jpg",
"R005_ch1_video_03_00-19-10-11.jpg",
"R006_ch1_video_01_00-45-02-10.jpg",
"R013_ch1_video_03_00-40-17-11.jpg"],
#themes=gr.themes.Glass(primary_hue=gr.themes.colors.blue,secondary_hue=gr.themes.colors.blue),
title="Davinci Eye(Quantized for CPU)")
demo.launch() |