crystal-technologies commited on
Commit
2880d52
·
1 Parent(s): a23ef1a

Upload 6 files

Browse files
Files changed (6) hide show
  1. analyze.py +31 -0
  2. dataset.py +115 -0
  3. dense.py +85 -0
  4. describe.py +239 -0
  5. evaluate.py +83 -0
  6. faces.py +84 -0
analyze.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from CircumSpect.dense import describe_image
2
+ from CircumSpect.faces import recognize_users
3
+ import cv2
4
+ import time
5
+
6
+ def analyze_image(cap):
7
+ users, recognized = recognize_users(cap)
8
+
9
+ captions, annotated = describe_image(recognized)
10
+
11
+ if users == []:
12
+ users = "No faces identified"
13
+
14
+ if type(users) == list:
15
+ users = ", ".join(users)
16
+
17
+ output = f"""Faces: {users}
18
+ View description: {", ".join(captions)}"""
19
+ return output, annotated
20
+
21
+ # cap = cv2.VideoCapture(0)
22
+ # while True:
23
+ # output, image = analyze_image(cap)
24
+ # print(output)
25
+
26
+ # cv2.imshow("Annotated Image", image)
27
+ # if cv2.waitKey(1) & 0xFF == ord('q'):
28
+ # break
29
+
30
+ # cap.release()
31
+ # cv2.destroyAllWindows()
dataset.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+
4
+ import h5py
5
+ import torch
6
+ from torch.utils.data import Dataset
7
+ import torchvision.transforms as transforms
8
+ from PIL import Image
9
+
10
+
11
+ class DenseCapDataset(Dataset):
12
+
13
+ @staticmethod
14
+ def collate_fn(batch):
15
+ """Use in torch.utils.data.DataLoader
16
+ """
17
+
18
+ return tuple(zip(*batch)) # as tuples instead of stacked tensors
19
+
20
+ @staticmethod
21
+ def get_transform():
22
+ """More complicated transform utils in torchvison/references/detection/transforms.py
23
+ """
24
+
25
+ transform = transforms.Compose([
26
+ transforms.ToTensor(),
27
+ transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
28
+ ])
29
+
30
+ return transform
31
+
32
+ def __init__(self, img_dir_root, vg_data_path, look_up_tables_path, dataset_type=None, transform=None):
33
+
34
+ assert dataset_type in {None, 'train', 'test', 'val'}
35
+
36
+ super(DenseCapDataset, self).__init__()
37
+
38
+ self.img_dir_root = img_dir_root
39
+ self.vg_data_path = vg_data_path
40
+ self.look_up_tables_path = look_up_tables_path
41
+ self.dataset_type = dataset_type # if dataset_type is None, all data will be use
42
+ self.transform = transform
43
+
44
+ # === load data here ====
45
+
46
+ self.vg_data = h5py.File(vg_data_path, 'r')
47
+ self.look_up_tables = pickle.load(open(look_up_tables_path, 'rb'))
48
+
49
+ def set_dataset_type(self, dataset_type, verbose=True):
50
+
51
+ assert dataset_type in {None, 'train', 'test', 'val'}
52
+
53
+ if verbose:
54
+ print('[DenseCapDataset]: {} switch to {}'.format(self.dataset_type, dataset_type))
55
+
56
+ self.dataset_type = dataset_type
57
+
58
+ def __getitem__(self, idx):
59
+
60
+ vg_idx = self.look_up_tables['split'][self.dataset_type][idx] if self.dataset_type else idx
61
+
62
+ img_path = os.path.join(self.img_dir_root, self.look_up_tables['idx_to_directory'][vg_idx],
63
+ self.look_up_tables['idx_to_filename'][vg_idx])
64
+
65
+ img = Image.open(img_path).convert("RGB")
66
+ if self.transform is not None:
67
+ img = self.transform(img)
68
+ else:
69
+ img = transforms.ToTensor()(img)
70
+
71
+ first_box_idx = self.vg_data['img_to_first_box'][vg_idx]
72
+ last_box_idx = self.vg_data['img_to_last_box'][vg_idx]
73
+
74
+ boxes = torch.as_tensor(self.vg_data['boxes'][first_box_idx: last_box_idx+1], dtype=torch.float32)
75
+ caps = torch.as_tensor(self.vg_data['captions'][first_box_idx: last_box_idx+1], dtype=torch.long)
76
+ caps_len = torch.as_tensor(self.vg_data['lengths'][first_box_idx: last_box_idx+1], dtype=torch.long)
77
+
78
+ targets = {
79
+ 'boxes': boxes,
80
+ 'caps': caps,
81
+ 'caps_len': caps_len,
82
+ }
83
+
84
+ info = {
85
+ 'idx': vg_idx,
86
+ 'dir': self.look_up_tables['idx_to_directory'][vg_idx],
87
+ 'file_name': self.look_up_tables['idx_to_filename'][vg_idx]
88
+ }
89
+
90
+ return img, targets, info
91
+
92
+ def __len__(self):
93
+
94
+ if self.dataset_type:
95
+ return len(self.look_up_tables['split'][self.dataset_type])
96
+ else:
97
+ return len(self.look_up_tables['filename_to_idx'])
98
+
99
+ if __name__ == '__main__':
100
+
101
+ IMG_DIR_ROOT = './data/visual-genome'
102
+ VG_DATA_PATH = './data/VG-regions.h5'
103
+ LOOK_UP_TABLES_PATH = './data/VG-regions-dicts.pkl'
104
+
105
+ dcd = DenseCapDataset(IMG_DIR_ROOT, VG_DATA_PATH, LOOK_UP_TABLES_PATH)
106
+
107
+ print('all', len(dcd))
108
+ print(dcd[0])
109
+
110
+ for data_type in {'train', 'test', 'val'}:
111
+
112
+ dcd.set_dataset_type(data_type)
113
+
114
+ print(data_type, len(dcd))
115
+ print(dcd[0])
dense.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from matplotlib.patches import Rectangle
2
+ from CircumSpect.describe import process_image
3
+ import matplotlib.pyplot as plt
4
+ from PIL import Image
5
+ import ocrmac
6
+ import json
7
+ import time
8
+ import cv2
9
+ import os
10
+
11
+ time.sleep(2)
12
+
13
+ with open('pwd.txt', 'r') as pwd:
14
+ folder_location = pwd.read()
15
+
16
+
17
+ def crop_and_save_image(img, box, output_path):
18
+
19
+ # Convert box coordinates to integers
20
+ box = [int(coord) for coord in box]
21
+
22
+ # Crop the image to the specified region of interest
23
+ cropped_img = img[box[1]:box[3], box[0]:box[2]]
24
+ cv2.imwrite(output_path, cropped_img)
25
+
26
+
27
+ def visualize_result(image_file_path, result):
28
+ assert isinstance(result, list)
29
+
30
+ og_img = cv2.imread(image_file_path)
31
+ img = cv2.imread(image_file_path)
32
+
33
+ captions = []
34
+
35
+ for r in result:
36
+ box = r['box']
37
+ caption = r['cap']
38
+ if "<unk>" in caption:
39
+ crop_and_save_image(og_img, box, "ocr.png")
40
+ recognized = ocrmac.OCR('Sample Images/Image.jpeg').recognize()
41
+ caption = caption.replace("<unk>", recognized[0][0])
42
+ cv2.rectangle(img, (int(box[0]), int(box[1])),
43
+ (int(box[2]), int(box[3])), (0, 0, 255), 2)
44
+ cv2.rectangle(img, (int(box[0]), int(box[1])), (int(
45
+ box[2]), int(box[1])-50), (200, 200, 200), -1)
46
+ cv2.rectangle(img, (int(box[0]), int(box[1])),
47
+ (int(box[2]), int(box[1])-50), (0, 0, 0), 2)
48
+ cv2.putText(img, caption, (int(box[0]), int(
49
+ box[1]) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
50
+ captions.append(caption)
51
+
52
+ cv2.imwrite("output.png", img)
53
+
54
+ return captions, img
55
+
56
+
57
+ def describe_image(frame):
58
+ IMG_FILE_PATH = f'{folder_location}image.png'
59
+
60
+ cv2.imwrite(IMG_FILE_PATH, frame)
61
+ process_image(IMG_FILE_PATH, folder_location)
62
+
63
+ RESULT_JSON_PATH = f'{folder_location}CircumSpect/result.json'
64
+ with open(RESULT_JSON_PATH, 'r') as f:
65
+ results = json.load(f)
66
+
67
+ TO_K = 10
68
+ assert IMG_FILE_PATH in results.keys()
69
+ captions, frame = visualize_result(
70
+ IMG_FILE_PATH, results[IMG_FILE_PATH][:TO_K])
71
+
72
+ return captions, frame
73
+
74
+ if __name__ == "__main__":
75
+ cap = cv2.VideoCapture(0)
76
+ time.sleep(2)
77
+ start = time.time()
78
+ while True:
79
+ end = time.time()
80
+ print(end-start)
81
+ _, img = cap.read()
82
+ caption, frame = describe_image(img)
83
+ cv2.imshow("CircumSpect", frame)
84
+ cv2.waitKey(1)
85
+ start = time.time()
describe.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import h5py
3
+ import json
4
+ import pickle
5
+ import argparse
6
+
7
+ import torch
8
+ import numpy as np
9
+ from PIL import Image
10
+ from tqdm import tqdm
11
+ import torchvision.transforms as transforms
12
+
13
+ from CircumSpect.model.densecap import densecap_resnet50_fpn
14
+
15
+ model = None
16
+ first_run = True
17
+
18
+ def load_model(console_args):
19
+
20
+ with open(console_args.config_json, 'r') as f:
21
+ model_args = json.load(f)
22
+
23
+ model = densecap_resnet50_fpn(backbone_pretrained=model_args['backbone_pretrained'],
24
+ return_features=console_args.extract,
25
+ feat_size=model_args['feat_size'],
26
+ hidden_size=model_args['hidden_size'],
27
+ max_len=model_args['max_len'],
28
+ emb_size=model_args['emb_size'],
29
+ rnn_num_layers=model_args['rnn_num_layers'],
30
+ vocab_size=model_args['vocab_size'],
31
+ fusion_type=model_args['fusion_type'],
32
+ box_detections_per_img=console_args.box_per_img)
33
+
34
+ checkpoint = torch.load(console_args.model_checkpoint, map_location=torch.device('cpu'))
35
+ model.load_state_dict(checkpoint['model'])
36
+
37
+ if console_args.verbose and 'results_on_val' in checkpoint.keys():
38
+ print('[INFO]: checkpoint {} loaded'.format(console_args.model_checkpoint))
39
+ print('[INFO]: correspond performance on val set:')
40
+ for k, v in checkpoint['results_on_val'].items():
41
+ if not isinstance(v, dict):
42
+ print(' {}: {:.3f}'.format(k, v))
43
+
44
+ return model
45
+
46
+
47
+ def get_image_path(console_args):
48
+
49
+ img_list = []
50
+
51
+ if os.path.isdir(console_args.img_path):
52
+ for file_name in os.listdir(console_args.img_path):
53
+ img_list.append(os.path.join(console_args.img_path, file_name))
54
+ else:
55
+ img_list.append(console_args.img_path)
56
+
57
+ return img_list
58
+
59
+
60
+ def img_to_tensor(img_list):
61
+
62
+ assert isinstance(img_list, list) and len(img_list) > 0
63
+
64
+ img_tensors = []
65
+
66
+ for img_path in img_list:
67
+
68
+ img = Image.open(img_path).convert("RGB")
69
+
70
+ img_tensors.append(transforms.ToTensor()(img))
71
+
72
+ return img_tensors
73
+
74
+
75
+ def describe_images(model, img_list, device, console_args):
76
+
77
+ assert isinstance(img_list, list)
78
+ assert isinstance(console_args.batch_size, int) and console_args.batch_size > 0
79
+
80
+ all_results = []
81
+
82
+ with torch.no_grad():
83
+
84
+ model.to(device)
85
+ model.eval()
86
+
87
+ for i in tqdm(range(0, len(img_list), console_args.batch_size), disable=not console_args.verbose):
88
+
89
+ image_tensors = img_to_tensor(img_list[i:i+console_args.batch_size])
90
+ input_ = [t.to(device) for t in image_tensors]
91
+
92
+ results = model(input_)
93
+
94
+ all_results.extend([{k:v.cpu() for k,v in r.items()} for r in results])
95
+
96
+ return all_results
97
+
98
+
99
+ def save_results_to_file(img_list, all_results, console_args):
100
+
101
+ with open(os.path.join(console_args.lut_path), 'rb') as f:
102
+ look_up_tables = pickle.load(f)
103
+
104
+ idx_to_token = look_up_tables['idx_to_token']
105
+
106
+ results_dict = {}
107
+ if console_args.extract:
108
+ total_box = sum(len(r['boxes']) for r in all_results)
109
+ start_idx = 0
110
+ img_idx = 0
111
+ h = h5py.File(os.path.join(console_args.result_dir, 'box_feats.h5'), 'w')
112
+ h.create_dataset('feats', (total_box, all_results[0]['feats'].shape[1]), dtype=np.float32)
113
+ h.create_dataset('boxes', (total_box, 4), dtype=np.float32)
114
+ h.create_dataset('start_idx', (len(img_list),), dtype=np.long)
115
+ h.create_dataset('end_idx', (len(img_list),), dtype=np.long)
116
+
117
+ for img_path, results in zip(img_list, all_results):
118
+
119
+ if console_args.verbose:
120
+ print('[Result] ==== {} ====='.format(img_path))
121
+
122
+ results_dict[img_path] = []
123
+ for box, cap, score in zip(results['boxes'], results['caps'], results['scores']):
124
+
125
+ r = {
126
+ 'box': [round(c, 2) for c in box.tolist()],
127
+ 'score': round(score.item(), 2),
128
+ 'cap': ' '.join(idx_to_token[idx] for idx in cap.tolist()
129
+ if idx_to_token[idx] not in ['<pad>', '<bos>', '<eos>'])
130
+ }
131
+
132
+ if console_args.verbose and r['score'] > 0.9:
133
+ print(' SCORE {} BOX {}'.format(r['score'], r['box']))
134
+ print(' CAP {}\n'.format(r['cap']))
135
+
136
+ results_dict[img_path].append(r)
137
+
138
+ if console_args.extract:
139
+ box_num = len(results['boxes'])
140
+ h['feats'][start_idx: start_idx+box_num] = results['feats'].cpu().numpy()
141
+ h['boxes'][start_idx: start_idx+box_num] = results['boxes'].cpu().numpy()
142
+ h['start_idx'][img_idx] = start_idx
143
+ h['end_idx'][img_idx] = start_idx + box_num - 1
144
+ start_idx += box_num
145
+ img_idx += 1
146
+
147
+ if console_args.extract:
148
+ h.close()
149
+ # save order of img to a txt
150
+ if len(img_list) > 1:
151
+ with open(os.path.join(console_args.result_dir, 'feat_img_mappings.txt'), 'w') as f:
152
+ for img_path in img_list:
153
+ f.writelines(os.path.split(img_path)[1] + '\n')
154
+
155
+ if not os.path.exists(console_args.result_dir):
156
+ os.mkdir(console_args.result_dir)
157
+ with open(os.path.join(console_args.result_dir, 'result.json'), 'w') as f:
158
+ json.dump(results_dict, f, indent=2)
159
+
160
+ if console_args.verbose:
161
+ print('[INFO] result save to {}'.format(os.path.join(console_args.result_dir, 'result.json')))
162
+ if console_args.extract:
163
+ print('[INFO] feats save to {}'.format(os.path.join(console_args.result_dir, 'box_feats.h5')))
164
+ print('[INFO] order save to {}'.format(os.path.join(console_args.result_dir, 'feat_img_mappings.txt')))
165
+
166
+
167
+ def validate_box_feat(model, all_results, device, console_args):
168
+
169
+ with torch.no_grad():
170
+
171
+ box_describer = model.roi_heads.box_describer
172
+ box_describer.to(device)
173
+ box_describer.eval()
174
+
175
+ if console_args.verbose:
176
+ print('[INFO] start validating box features...')
177
+ for results in tqdm(all_results, disable=not console_args.verbose):
178
+
179
+ captions = box_describer(results['feats'].to(device))
180
+
181
+ assert (captions.cpu() == results['caps']).all().item(), 'caption mismatch'
182
+
183
+ if console_args.verbose:
184
+ print('[INFO] validate box feat done, no problem')
185
+
186
+
187
+ def main(console_args):
188
+ global model
189
+ global first_run
190
+ device = torch.device("cuda" if torch.cuda.is_available() and not args.cpu else "cpu")
191
+
192
+ # === prepare images ====
193
+ img_list = get_image_path(console_args)
194
+
195
+ # === prepare model ====
196
+ if first_run:
197
+ model = load_model(console_args)
198
+ first_run = False
199
+
200
+ # === inference ====
201
+ all_results = describe_images(model, img_list, device, console_args)
202
+
203
+ # === save results ====
204
+ save_results_to_file(img_list, all_results, console_args)
205
+
206
+ if console_args.extract and console_args.check:
207
+ validate_box_feat(model, all_results, device, console_args)
208
+
209
+
210
+ def process_image(image, folder_location):
211
+ global args
212
+ parser = argparse.ArgumentParser(description='Do dense captioning')
213
+ parser.add_argument('--config_json', type=str, help="path of the json file which stored model configuration")
214
+ parser.add_argument('--lut_path', type=str, default=f'{folder_location}CircumSpect/data/VG-regions-dicts-lite.pkl', help='look up table path')
215
+ parser.add_argument('--model_checkpoint', type=str, help="path of the trained model checkpoint")
216
+ parser.add_argument('--img_path', type=str, help="path of images, should be a file or a directory with only images")
217
+ parser.add_argument('--result_dir', type=str, default='.',
218
+ help="path of the directory to save the output file")
219
+ parser.add_argument('--box_per_img', type=int, default=100, help='max boxes to describe per image')
220
+ parser.add_argument('--batch_size', type=int, default=1, help="useful when img_path is a directory")
221
+ parser.add_argument('--extract', action='store_true', help='whether to extract features')
222
+ parser.add_argument('--cpu', action='store_true', help='whether use cpu to compute')
223
+ parser.add_argument('--verbose', action='store_true', help='whether output info')
224
+ parser.add_argument('--check', action='store_true', help='whether to validate box feat by regenerate sentences')
225
+ args = argparse.Namespace()
226
+
227
+ args.config_json = f'{folder_location}CircumSpect/model_params/train_all_val_all_bz_2_epoch_10_inject_init/config.json'
228
+ args.lut_path = f'{folder_location}CircumSpect/data/VG-regions-dicts-lite.pkl'
229
+ args.model_checkpoint = f'{folder_location}models/train_all_val_all_bz_2_epoch_10_inject_init.pth.tar'
230
+ args.img_path = image
231
+ args.result_dir = f'{folder_location}CircumSpect/'
232
+ args.box_per_img = 100
233
+ args.batch_size = 2
234
+ args.extract = False
235
+ args.cpu = False
236
+ args.verbose = True
237
+ args.check = False
238
+
239
+ main(args)
evaluate.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from tqdm import tqdm
3
+
4
+ from utils.data_loader import DenseCapDataset, DataLoaderPFG
5
+ from model.evaluator import DenseCapEvaluator
6
+
7
+
8
+ def quality_check(model, dataset, idx_to_token, device, max_iter=-1):
9
+
10
+ model.to(device)
11
+ data_loader = DataLoaderPFG(dataset, batch_size=1, shuffle=False, num_workers=1,
12
+ pin_memory=True, collate_fn=DenseCapDataset.collate_fn)
13
+
14
+ print('[quality check]')
15
+ for i, (img, targets, info) in enumerate(data_loader):
16
+
17
+ img = [img_tensor.to(device) for img_tensor in img]
18
+ targets = [{k: v.to(device) for k, v in target.items()} for target in targets]
19
+
20
+ with torch.no_grad():
21
+ model.eval()
22
+ model.return_features = False
23
+ detections = model(img)
24
+
25
+ for j in range(len(targets)):
26
+ print('<{}>'.format(info[j]['file_name']))
27
+ print('=== ground truth ===')
28
+ for box, cap, cap_len in zip(targets[j]['boxes'], targets[j]['caps'], targets[j]['caps_len']):
29
+ print('box:', box.tolist())
30
+ print('len:', cap_len.item())
31
+ print('cap:', ' '.join(idx_to_token[idx] for idx in cap.tolist() if idx_to_token[idx] != '<pad>'))
32
+ print('-'*20)
33
+
34
+ print('=== predict ===')
35
+ for box, cap, score in zip(detections[j]['boxes'], detections[j]['caps'], detections[j]['scores']):
36
+ print('box:', [round(c, 2) for c in box.tolist()])
37
+ print('score:', round(score.item(), 2))
38
+ print('cap:', ' '.join(idx_to_token[idx] for idx in cap.tolist() if idx_to_token[idx] != '<pad>'))
39
+ print('-'*20)
40
+
41
+ if i >= max_iter > 0:
42
+ break
43
+
44
+
45
+ def quantity_check(model, dataset, idx_to_token, device, max_iter=-1, verbose=True):
46
+
47
+ model.to(device)
48
+ data_loader = DataLoaderPFG(dataset, batch_size=4, shuffle=False, num_workers=2,
49
+ pin_memory=True, collate_fn=DenseCapDataset.collate_fn)
50
+
51
+ evaluator = DenseCapEvaluator(list(model.roi_heads.box_describer.special_idx.keys()))
52
+
53
+ print('[quantity check]')
54
+ for i, (img, targets, info) in tqdm(enumerate(data_loader), total=len(data_loader)):
55
+
56
+ img = [img_tensor.to(device) for img_tensor in img]
57
+ targets = [{k: v.to(device) for k, v in target.items()} for target in targets]
58
+
59
+ with torch.no_grad():
60
+ model.eval()
61
+ model.return_features = False
62
+ detections = model(img)
63
+
64
+ for j in range(len(targets)):
65
+ scores = detections[j]['scores']
66
+ boxes = detections[j]['boxes']
67
+ text = [' '.join(idx_to_token[idx] for idx in cap.tolist() if idx_to_token[idx] != '<pad>')
68
+ for cap in detections[j]['caps']]
69
+ target_boxes = targets[j]['boxes']
70
+ target_text = [' '.join(idx_to_token[idx] for idx in cap.tolist() if idx_to_token[idx] != '<pad>')
71
+ for cap in targets[j]['caps']]
72
+ img_id = info[j]['file_name']
73
+
74
+ evaluator.add_result(scores, boxes, text, target_boxes, target_text, img_id)
75
+
76
+ if i >= max_iter > 0:
77
+ break
78
+
79
+ results = evaluator.evaluate(verbose)
80
+ if verbose:
81
+ print('MAP: {:.3f} DET_MAP: {:.3f}'.format(results['map'], results['detmap']))
82
+
83
+ return results
faces.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import face_recognition
2
+ from PIL import Image
3
+ import numpy as np
4
+ import pickle
5
+ import cv2
6
+ import os
7
+
8
+ with open('pwd.txt', 'r') as pwd:
9
+ folder_location = pwd.read()
10
+
11
+ def find_encodings(images_):
12
+ encode_list = []
13
+ for imgs in images_:
14
+ imgs = np.array(Image.open('./img/face_recognition/'+imgs))
15
+ imgs = cv2.cvtColor(imgs, cv2.COLOR_BGR2RGB)
16
+ encode = face_recognition.face_encodings(imgs)[0]
17
+ encode_list.append(encode)
18
+ return encode_list
19
+
20
+ def recognize_users(cap):
21
+ path = f'{folder_location}img/face_recognition'
22
+ recognized_users = [] # List to store names of recognized users
23
+ images = []
24
+ classNames = []
25
+ myList = os.listdir(path)
26
+
27
+ for cl in myList:
28
+ curImg = cv2.imread(f'{path}/{cl}')
29
+ images.append(curImg)
30
+ classNames.append(os.path.splitext(cl)[0])
31
+ try:
32
+ with open(f'{folder_location}models/face_rec', 'rb') as file:
33
+ encodeListKnown = pickle.load(file)
34
+ except:
35
+ path = f'{folder_location}img/face_recognition'
36
+ images = []
37
+ classNames = []
38
+ myList = os.listdir(path)
39
+ images = myList
40
+
41
+ encodeListKnown = find_encodings(images)
42
+ print(len(encodeListKnown))
43
+ print('Encoding Complete')
44
+
45
+ with open(f'{folder_location}models/face_rec', 'wb') as file:
46
+ pickle.dump(encodeListKnown, file)
47
+ file.close()
48
+
49
+ _, img = cap.read()
50
+ img = cv2.flip(img, 2)
51
+ imgS = cv2.resize(img, (0,0), None, 0.25, 0.25)
52
+ imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
53
+
54
+ facesCurFrame = face_recognition.face_locations(imgS)
55
+ encodeCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
56
+
57
+ for encodeFace, faceLoc in zip(encodeCurFrame, facesCurFrame):
58
+ matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
59
+ faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
60
+ print(faceDis)
61
+ matchIndices = np.where(matches)[0] # Get indices of all matched faces
62
+
63
+ for matchIndex in matchIndices:
64
+ name = classNames[matchIndex].upper()
65
+ recognized_users.append(name) # Append recognized user to the list
66
+ print(name)
67
+ y1, x2, y2, x1 = faceLoc
68
+ y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4
69
+ cv2.rectangle(img, (x1, y1), (x2, y2), (205, 154, 79), 2)
70
+ cv2.putText(img, name, (x1+6, y2-6), cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 255, 255), 2)
71
+
72
+ if not matchIndices.all():
73
+ name = "UNKNOWN"
74
+ print(name)
75
+ y1, x2, y2, x1 = faceLoc
76
+ y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4
77
+ cv2.rectangle(img, (x1, y1), (x2, y2), (205, 154, 79))
78
+ cv2.putText(img, 'UNKNOWN', (x1+6, y2-6), cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 255, 255), 2)
79
+
80
+ # cv2.imshow("Face Recognition", img)
81
+ # cv2.waitKey(1)
82
+
83
+ return recognized_users, img
84
+