mkthoma commited on
Commit
74963fc
·
1 Parent(s): f742771

app update

Browse files
Files changed (1) hide show
  1. app.py +92 -1
app.py CHANGED
@@ -1 +1,92 @@
1
- print("hello")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.optim as optim
3
+ import lightning.pytorch as pl
4
+ from torchvision import transforms
5
+ from custom_yolo.custom_library.utils import cells_to_bboxes, non_max_suppression
6
+ from custom_yolo.custom_library import config
7
+ import matplotlib.pyplot as plt
8
+ import matplotlib.patches as patches
9
+ from custom_yolo.custom_library.lightning_model import YOLOv3Lightning
10
+ import cv2
11
+ import numpy as np
12
+ from pytorch_grad_cam.utils.image import show_cam_on_image
13
+ from custom_yolo.custom_library.gradio_utils import draw_predictions, YoloCAM
14
+ import gradio as gr
15
+ import os
16
+ import albumentations as A
17
+ from albumentations.pytorch import ToTensorV2
18
+
19
+
20
+ model = YOLOv3Lightning(config=config)
21
+ model.load_state_dict(torch.load("custom_yolo_model.pth", map_location=torch.device('cpu')), strict=False)
22
+ model.setup(stage="test")
23
+ classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
24
+
25
+ scaled_anchors = (torch.tensor(config.ANCHORS)* torch.tensor(config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)).to(config.DEVICE)
26
+
27
+ transforms = A.Compose(
28
+ [
29
+ A.LongestMaxSize(max_size=config.IMAGE_SIZE),
30
+ A.PadIfNeeded(
31
+ min_height=config.IMAGE_SIZE, min_width=config.IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
32
+ ),
33
+ A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
34
+ ToTensorV2(),
35
+ ],
36
+ )
37
+
38
+ def model_inference(image, iou_threshold=0.5, threshold=0.4, show_cam="No", transparency=0.5, target_layer=-2):
39
+ # Transforming image
40
+ transformed_image = transforms(image=image)["image"].unsqueeze(0)
41
+ output = model(transformed_image)
42
+ # Selecting layer for gradCAM
43
+ if target_layer == -2:
44
+ layer = [model.model.layers[-2]]
45
+ else:
46
+ layer = [model.model.layers[-1]]
47
+
48
+ cam = YoloCAM(model=model, target_layers=layer, use_cuda=False)
49
+
50
+ bboxes = [[] for _ in range(1)]
51
+ for i in range(3):
52
+ batch_size, A, S, _, _ = output[i].shape
53
+ anchor = scaled_anchors[i]
54
+ boxes_scale_i = cells_to_bboxes(output[i], anchor, S=S, is_preds=True)
55
+ for idx, (box) in enumerate(boxes_scale_i):
56
+ bboxes[idx] += box
57
+
58
+ nms_boxes = non_max_suppression(bboxes[0], iou_threshold=iou_threshold, threshold=threshold, box_format="midpoint")
59
+ plot_img = draw_predictions(image.copy(), nms_boxes, class_labels=config.PASCAL_CLASSES)
60
+ if show_cam == "No":
61
+ return [plot_img]
62
+ else:
63
+ grayscale_cam = cam(transformed_image, scaled_anchors)[0, :, :]
64
+ img = cv2.resize(image, (416, 416))
65
+ img = np.float32(img) / 255
66
+ cam_image = show_cam_on_image(img, grayscale_cam, use_rgb=True, image_weight=transparency)
67
+ return [plot_img, cam_image]
68
+
69
+
70
+ title = "Custom YOLOv3"
71
+ description = "Pytorch Lightning implemetation of YOLOv3 on Pascal VOC dataset.\
72
+ Supported classes are aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, dining table, dog, horse, motorbike, person, potted plant, sheep, sofa, train, and TV/monitor."
73
+
74
+ # examples = [
75
+ # ["images/000014.jpg", 0.5, 0.4, True, 0.5],
76
+ # ["images/000017.jpg", 0.6, 0.5, True, 0.5],
77
+ # ["images/000018.jpg", 0.55, 0.45, True, 0.5],
78
+ # ["images/000030.jpg", 0.5, 0.4, True, 0.5],
79
+ # ["images/Puppies.jpg", 0.6, 0.7, True, 0.5],
80
+ # ]
81
+
82
+ demo = gr.Interface(model_inference, inputs=[gr.Image(shape=(416, 416), label="Input an image"),
83
+ gr.Slider(0, 1, value=0.5, label="IOU Threshold"),
84
+ gr.Slider(0, 1, value=0.4, label="Threshold"),
85
+ gr.Radio(["Yes", "No"], value="No" , label="Show GradCAM outputs"),
86
+ gr.Slider(0, 1, value=0.5, label="Opacity of GradCAM"),
87
+ gr.Slider(-2, -1, value=-2, step=1, label="Which Layer?")],
88
+ outputs=[gr.Gallery(rows=2, columns=1)],
89
+ title=title, description=description)
90
+ demo.launch()
91
+
92
+