exorcist123 commited on
Commit
b5955cc
·
1 Parent(s): 11748c1

make a program more efficient

Browse files
Files changed (2) hide show
  1. app.py +25 -25
  2. crowd_counter/__init__.py +32 -12
app.py CHANGED
@@ -1,25 +1,25 @@
1
- import gradio as gr
2
- from crowd_counter import CrowdCounter
3
- from PIL import Image
4
- import cv2
5
-
6
- crowd_counter_engine = CrowdCounter()
7
-
8
- def predict(inp):
9
- inp = Image.fromarray(inp.astype('uint8'), 'RGB')
10
- response = crowd_counter_engine.inference(inp)
11
- crowd_count = response[0]
12
- pred_img= response[1]
13
- return cv2.cvtColor(pred_img, cv2.COLOR_BGR2RGB), crowd_count
14
-
15
- title = "Crowd Counter Demo"
16
- desc = "A Demo of Proposal Point Prediction for Crowd Counting - Powered by P2PNet"
17
- examples = [
18
- ["images/img-1.jpg"],
19
- ["images/img-2.jpg"],
20
- ["images/img-3.jpg"],
21
- ]
22
- inputs = gr.inputs.Image(label="Image of Crowd")
23
- outputs = [gr.outputs.Image(label="Proposal Points Prediction",type = "numpy"), gr.outputs.Label(label="Predicted Count",type = "numpy")]
24
- gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title=title, description=desc, examples=examples,
25
- allow_flagging=False).launch()
 
1
+ import gradio as gr
2
+ from crowd_counter import CrowdCounter
3
+ from PIL import Image
4
+ import cv2
5
+
6
+ crowd_counter_engine = CrowdCounter()
7
+
8
+ def predict(inp):
9
+ inp = Image.fromarray(inp.astype('uint8'), 'RGB')
10
+ response = crowd_counter_engine.inference(inp)
11
+ crowd_count = response[0]
12
+ pred_img= response[1]
13
+ return cv2.cvtColor(pred_img, cv2.COLOR_BGR2RGB), crowd_count
14
+
15
+ title = "Crowd Counter Demo"
16
+ desc = "A Demo of Proposal Point Prediction for Crowd Counting - Powered by P2PNet"
17
+ examples = [
18
+ ["images/img-1.jpg"],
19
+ ["images/img-2.jpg"],
20
+ ["images/img-3.jpg"],
21
+ ]
22
+ inputs = gr.inputs.Image(label="Image of Crowd")
23
+ outputs = [gr.outputs.Image(label="Proposal Points Prediction",type = "numpy"), gr.outputs.Label(label="Predicted Count",type = "numpy")]
24
+ gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title=title, description=desc, examples=examples,
25
+ allow_flagging=False).launch(share=True)
crowd_counter/__init__.py CHANGED
@@ -40,7 +40,6 @@ class CrowdCounter:
40
  line=2,
41
  output_dir="./crowd_counter/preds",
42
  weight_path="./crowd_counter/weights/SHTechA.pth",
43
- # gpu_id=0,
44
  )
45
 
46
  # device = torch.device('cuda')
@@ -68,14 +67,25 @@ class CrowdCounter:
68
  def test(
69
  self, args: Args, img_raw: Image.Image , debug: bool = False,
70
  ) -> tuple[any, Image.Image, torch.Tensor]:
71
-
72
- # round the size
73
- width, height = img_raw.size
74
- new_width = width // 128 * 128
75
- new_height = height // 128 * 128
76
- img_raw = img_raw.resize((new_width, new_height), Image.LANCZOS)
 
 
 
 
 
 
 
 
 
 
 
77
  # pre-proccessing
78
- img = self.transform(img_raw)
79
 
80
  samples = torch.Tensor(img).unsqueeze(0)
81
  samples = samples.to(self.device)
@@ -94,15 +104,25 @@ class CrowdCounter:
94
  outputs_points[outputs_scores > threshold].detach().cpu().numpy().tolist()
95
  )
96
 
 
 
 
 
 
 
 
 
 
 
97
  # draw the predictions
98
- size = 5
99
  img_to_draw = cv2.cvtColor(np.array(img_raw), cv2.COLOR_RGB2BGR)
100
 
101
- for p in points:
102
  img_to_draw = cv2.circle(
103
- img_to_draw, (int(p[0]), int(p[1])), size, (255, 0, 0), -1
104
  )
105
- return points, img_to_draw, conf
106
 
107
  # Function to process and save images
108
  def inference(self, img_raw: Image.Image) -> tuple[int, Image.Image]:
 
40
  line=2,
41
  output_dir="./crowd_counter/preds",
42
  weight_path="./crowd_counter/weights/SHTechA.pth",
 
43
  )
44
 
45
  # device = torch.device('cuda')
 
67
  def test(
68
  self, args: Args, img_raw: Image.Image , debug: bool = False,
69
  ) -> tuple[any, Image.Image, torch.Tensor]:
70
+
71
+ ori_width, ori_height = img_raw.size
72
+ max_dimension = 384
73
+
74
+ if ori_width > max_dimension or ori_height > max_dimension:
75
+ scale_factor = max_dimension / max(ori_width, ori_height)
76
+ new_width = int(ori_width * scale_factor)
77
+ new_height = int(ori_height * scale_factor)
78
+ new_width = new_width // 128 * 128
79
+ new_height = new_height // 128 * 128
80
+ img_resized = img_raw.resize((new_width, new_height), Image.LANCZOS)
81
+ else:
82
+ img_resized = img_raw
83
+ new_width = ori_width
84
+ new_height = ori_height
85
+
86
+ print(new_width, new_height)
87
  # pre-proccessing
88
+ img = self.transform(img_resized)
89
 
90
  samples = torch.Tensor(img).unsqueeze(0)
91
  samples = samples.to(self.device)
 
104
  outputs_points[outputs_scores > threshold].detach().cpu().numpy().tolist()
105
  )
106
 
107
+ scale_factor_width = ori_width / new_width
108
+ scale_factor_height = ori_height / new_height
109
+
110
+ adjusted_points = []
111
+ for p in points:
112
+ # Adjust each point's coordinates
113
+ adjusted_x = int(p[0] * scale_factor_width)
114
+ adjusted_y = int(p[1] * scale_factor_height)
115
+ adjusted_points.append((adjusted_x, adjusted_y))
116
+
117
  # draw the predictions
118
+ size = 3
119
  img_to_draw = cv2.cvtColor(np.array(img_raw), cv2.COLOR_RGB2BGR)
120
 
121
+ for p in adjusted_points:
122
  img_to_draw = cv2.circle(
123
+ img_to_draw, (int(p[0]), int(p[1])), size, (0, 0, 255), -1
124
  )
125
+ return adjusted_points, img_to_draw, conf
126
 
127
  # Function to process and save images
128
  def inference(self, img_raw: Image.Image) -> tuple[int, Image.Image]: