Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -18,8 +18,33 @@ inv_normalize = transforms.Normalize(
|
|
18 |
classes = ('plane', 'car', 'bird', 'cat', 'deer',
|
19 |
'dog', 'frog', 'horse', 'ship', 'truck')
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
def inference(input_img, transparency = 0.5, target_layer_number = -1):
|
|
|
|
|
|
|
22 |
org_img = input_img
|
|
|
23 |
transform = transforms.ToTensor()
|
24 |
input_img = transform(input_img)
|
25 |
input_img = input_img
|
@@ -39,20 +64,23 @@ def inference(input_img, transparency = 0.5, target_layer_number = -1):
|
|
39 |
visualization = show_cam_on_image(org_img/255, grayscale_cam, use_rgb=True, image_weight=transparency)
|
40 |
return classes[prediction[0].item()], visualization, confidences
|
41 |
|
|
|
|
|
|
|
42 |
demo = gr.Interface(
|
43 |
inference,
|
44 |
inputs = [
|
45 |
-
gr.Image(
|
46 |
(0, 1, value = 0.5, label="Overall Opacity of Image"),
|
47 |
gr.Slider(-2, -1, value = -2, step=1, label="Which Layer?")
|
48 |
],
|
49 |
outputs = [
|
50 |
"text",
|
51 |
-
gr.Image(
|
52 |
gr.Label(num_top_classes=3)
|
53 |
],
|
54 |
-
title =
|
55 |
-
description =
|
56 |
-
examples =
|
57 |
)
|
58 |
demo.launch()
|
|
|
18 |
classes = ('plane', 'car', 'bird', 'cat', 'deer',
|
19 |
'dog', 'frog', 'horse', 'ship', 'truck')
|
20 |
|
21 |
+
def resize_image_pil(image, new_width, new_height):
|
22 |
+
|
23 |
+
# Convert to PIL image
|
24 |
+
img = Image.fromarray(np.array(image))
|
25 |
+
|
26 |
+
# Get original size
|
27 |
+
width, height = img.size
|
28 |
+
|
29 |
+
# Calculate scale
|
30 |
+
width_scale = new_width / width
|
31 |
+
height_scale = new_height / height
|
32 |
+
scale = min(width_scale, height_scale)
|
33 |
+
|
34 |
+
# Resize
|
35 |
+
resized = img.resize((int(width*scale), int(height*scale)), Image.NEAREST)
|
36 |
+
|
37 |
+
# Crop to exact size
|
38 |
+
resized = resized.crop((0, 0, new_width, new_height))
|
39 |
+
|
40 |
+
return resized
|
41 |
+
|
42 |
def inference(input_img, transparency = 0.5, target_layer_number = -1):
|
43 |
+
input_img = resize_image_pil(input_img, 32, 32)
|
44 |
+
|
45 |
+
input_img = np.array(input_img)
|
46 |
org_img = input_img
|
47 |
+
input_img = input_img.reshape((32, 32, 3))
|
48 |
transform = transforms.ToTensor()
|
49 |
input_img = transform(input_img)
|
50 |
input_img = input_img
|
|
|
64 |
visualization = show_cam_on_image(org_img/255, grayscale_cam, use_rgb=True, image_weight=transparency)
|
65 |
return classes[prediction[0].item()], visualization, confidences
|
66 |
|
67 |
+
title = "CIFAR10 trained on ResNet18 Model with GradCAM"
|
68 |
+
description = "A simple Gradio interface to infer on ResNet model, and get GradCAM results"
|
69 |
+
examples = [["cat.jpg", 0.5, -1], ["dog.jpg", 0.5, -1]]
|
70 |
demo = gr.Interface(
|
71 |
inference,
|
72 |
inputs = [
|
73 |
+
gr.Image(width=256, height=256, label="Input Image"), gr.Slider
|
74 |
(0, 1, value = 0.5, label="Overall Opacity of Image"),
|
75 |
gr.Slider(-2, -1, value = -2, step=1, label="Which Layer?")
|
76 |
],
|
77 |
outputs = [
|
78 |
"text",
|
79 |
+
gr.Image(width=256, height=256, label="Output"),
|
80 |
gr.Label(num_top_classes=3)
|
81 |
],
|
82 |
+
title = title,
|
83 |
+
description = description,
|
84 |
+
examples = examples,
|
85 |
)
|
86 |
demo.launch()
|