Jie Hu commited on
Commit
0bfe798
·
1 Parent(s): abf5f3c

init project

Browse files
Files changed (1) hide show
  1. modules/pe3r/images.py +27 -27
modules/pe3r/images.py CHANGED
@@ -53,32 +53,32 @@ class Images:
53
  self.np_images_size.append(np_shape)
54
 
55
 
56
- # # -- sam2 images --
57
- # img_mean = torch.tensor((0.485, 0.456, 0.406))[:, None, None]
58
- # img_std = torch.tensor((0.229, 0.224, 0.225))[:, None, None]
59
- # self.sam2_images = []
60
- # # TODO
61
- # self.sam2_video_size = (self.pil_images_size[0][1], self.pil_images_size[0][0])
62
- # self.sam2_input_size = 512
63
- # for pil_image in self.pil_images:
64
- # np_image = np.array(pil_image.resize((self.sam2_input_size, self.sam2_input_size)))
65
- # np_image = np_image / 255.0
66
- # sam2_image = torch.from_numpy(np_image).permute(2, 0, 1)
67
- # self.sam2_images.append(sam2_image)
68
- # self.sam2_images = torch.stack(self.sam2_images)
69
- # self.sam2_images -= img_mean
70
- # self.sam2_images /= img_std
71
- # self.sam2_images.to(device)
72
 
73
- # # -- sam1 images --
74
- # self.sam1_images = []
75
- # self.sam1_images_size = []
76
- # self.sam1_input_size = 1024
77
- # self.sam1_transform = ResizeLongestSide(self.sam1_input_size)
78
- # for np_image in self.np_images:
79
- # sam1_image = self.sam1_transform.apply_image(np_image)
80
- # sam1_image_torch = torch.as_tensor(sam1_image, device=device)
81
- # transformed_image = sam1_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
82
 
83
- # self.sam1_images.append(transformed_image)
84
- # self.sam1_images_size.append(tuple(transformed_image.shape[-2:]))
 
53
  self.np_images_size.append(np_shape)
54
 
55
 
56
+ # -- sam2 images --
57
+ img_mean = torch.tensor((0.485, 0.456, 0.406))[:, None, None]
58
+ img_std = torch.tensor((0.229, 0.224, 0.225))[:, None, None]
59
+ self.sam2_images = []
60
+ # TODO
61
+ self.sam2_video_size = (self.pil_images_size[0][1], self.pil_images_size[0][0])
62
+ self.sam2_input_size = 512
63
+ for pil_image in self.pil_images:
64
+ np_image = np.array(pil_image.resize((self.sam2_input_size, self.sam2_input_size)))
65
+ np_image = np_image / 255.0
66
+ sam2_image = torch.from_numpy(np_image).permute(2, 0, 1)
67
+ self.sam2_images.append(sam2_image)
68
+ self.sam2_images = torch.stack(self.sam2_images)
69
+ self.sam2_images -= img_mean
70
+ self.sam2_images /= img_std
71
+ self.sam2_images.to(device)
72
 
73
+ # -- sam1 images --
74
+ self.sam1_images = []
75
+ self.sam1_images_size = []
76
+ self.sam1_input_size = 1024
77
+ self.sam1_transform = ResizeLongestSide(self.sam1_input_size)
78
+ for np_image in self.np_images:
79
+ sam1_image = self.sam1_transform.apply_image(np_image)
80
+ sam1_image_torch = torch.as_tensor(sam1_image, device=device)
81
+ transformed_image = sam1_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
82
 
83
+ self.sam1_images.append(transformed_image)
84
+ self.sam1_images_size.append(tuple(transformed_image.shape[-2:]))