import numpy as np import os import h5py import cv2 import os.path as osp import torch import pandas as pd from pprint import pprint import matplotlib import matplotlib.pyplot as plt import torch.nn.functional as F def resize(image, size): image = F.interpolate(image.unsqueeze(0), size=size, mode="nearest").squeeze(0) return image def read_labels(label_path): labels_ids = [] all_true_boxes = [] with open(label_path, 'r') as sequence_ids: labels_ids.extend(map(str.strip, sequence_ids.readlines())) for i, sequence_id in enumerate(labels_ids): true_bounding_boxes = sequence_id.split(' ')[1:] if true_bounding_boxes == ['None']: all_true_boxes.append(None) else: # print("boxesnum", len(true_bounding_boxes)) true_bounding_boxes = [(int(box.split(',')[4]), int(box.split(',')[0]), int(box.split(',')[1]), int(box.split(',')[2])-int(box.split(',')[0]), int(box.split(',')[3])-int(box.split(',')[1])) for box in true_bounding_boxes] # boxes_type={'names':('class_id','x','y','w','h'), 'formats':('u4','u4', 'u4','u4','u4')} true_bounding_boxes=np.array(true_bounding_boxes) # ,dtype=boxes_type all_true_boxes.append(true_bounding_boxes) return all_true_boxes def read_PKUdataset_info_to_dict(file_path): xls = pd.ExcelFile(file_path) data_dict = {} for sheet_name in xls.sheet_names: df = xls.parse(sheet_name) sheet_data = df.set_index('sequence name').to_dict(orient='index') for key, value in sheet_data.items(): if key not in data_dict: data_dict[key] = {} data_dict[key]['time length'] = value['time length (s)'] data_dict[key]['start number'] = value['Vidar start number'] data_dict[key]['end number'] = value['Vidar end number'] return data_dict def vis_event_image(image_data): cmap = matplotlib.colormaps['coolwarm'] plt.imshow(image_data, cmap=cmap, vmin=-1, vmax=1) cbar = plt.colorbar(ticks=[-1, 0, 1]) cbar.set_ticklabels(['Red', 'Neutral', 'Blue']) plt.title('Visualization of Image with -1, 0, 1') plt.savefig("vis_event.png", format='png') plt.close() def get_sequences_ids(data_path): """ getting ids from .txt file. args : - data_path: .txt file. return: - ids: the sequences from .txt file. """ ids = [] with open(data_path, 'r') as sequence_ids: ids.extend(map(str.strip, sequence_ids.readlines())) return ids def load_vidar_dat(filename, frame_cnt=None, size=(480, 680), reverse_spike=True): ''' output: (frame_cnt, height, width) {0,1} float32 ''' array = np.fromfile(filename, dtype=np.uint8) height, width = size len_per_frame = height * width // 8 framecnt = frame_cnt if frame_cnt != None else len(array) // len_per_frame spikes = [] for i in range(framecnt): compr_frame = array[i * len_per_frame: (i + 1) * len_per_frame] blist = [] for b in range(8): blist.append(np.right_shift(np.bitwise_and( compr_frame, np.left_shift(1, b)), b)) frame_ = np.stack(blist).transpose() frame_ = frame_.reshape((height, width), order='C') if reverse_spike: frame_ = np.flipud(frame_) spikes.append(frame_) return np.array(spikes).astype(np.float32) def make_dir(path): if not osp.exists(path): os.makedirs(path) return def RawToSpike(video_seq, h, w, flipud=True): video_seq = np.array(video_seq).astype(np.uint8) img_size = h*w img_num = len(video_seq)//(img_size//8) SpikeMatrix = np.zeros([img_num, h, w], np.uint8) pix_id = np.arange(0,h*w) pix_id = np.reshape(pix_id, (h, w)) comparator = np.left_shift(1, np.mod(pix_id, 8)) byte_id = pix_id // 8 for img_id in np.arange(img_num): id_start = img_id*img_size//8 id_end = id_start + img_size//8 cur_info = video_seq[id_start:id_end] data = cur_info[byte_id] result = np.bitwise_and(data, comparator) if flipud: SpikeMatrix[img_id, :, :] = np.flipud((result == comparator)) else: SpikeMatrix[img_id, :, :] = (result == comparator) return SpikeMatrix def save_to_h5(SpikeMatrix, h5path): f = h5py.File(h5path, 'w') f['raw_spike'] = SpikeMatrix f.close() def dat_to_h5(dat_path, h5path, size=[436, 1024]): f = open(dat_path, 'rb') video_seq = f.read() video_seq = np.frombuffer(video_seq, 'b') sp_mat = RawToSpike(video_seq, size[0], size[1]) save_to_h5(sp_mat, h5path) ############################################################################ ## General Read Function def read_gen(file_name): ext = osp.splitext(file_name)[-1] if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg': # return Image.open(file_name) return cv2.imread(file_name) elif ext == '.bin' or ext == '.raw': return np.load(file_name) elif ext == '.flo': return readFlow(file_name).astype(np.float32) return [] def im2gray(im): # im = np.array(im).astype(np.float32)[..., :3] / 255. # return cv2.cvtColor(im, cv2.COLOR_RGB2GRAY) im = im.astype(np.float32) / 255. im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) im = np.expand_dims(im, axis=0) return im def im_color(im): im = im.astype(np.float32) / 255. im = im.transpose([2, 0, 1]) return im TAG_CHAR = np.array([202021.25], np.float32) def readFlow(fn): """ Read .flo file in Middlebury format""" # Code adapted from: # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy # WARNING: this will work on little-endian architectures (eg Intel x86) only! # print 'fn = %s'%(fn) with open(fn, 'rb') as f: magic = np.fromfile(f, np.float32, count=1) if 202021.25 != magic: print('Magic number incorrect. Invalid .flo file') return None else: w = np.fromfile(f, np.int32, count=1) h = np.fromfile(f, np.int32, count=1) # print 'Reading %d x %d flo file\n' % (w, h) data = np.fromfile(f, np.float32, count=2*int(w)*int(h)) # Reshape data into 3D array (columns, rows, bands) # The reshape here is for visualization, the original code is (w,h,2) return np.resize(data, (int(h), int(w), 2)) from contextlib import contextmanager import time import torch @contextmanager def timeblock(label): torch.cuda.synchronize() start = time.perf_counter() try: yield finally: torch.cuda.synchronize() end = time.perf_counter() print('{} : {}'.format(label, end - start)) if __name__ == '__main__': img_unpadded = torch.randn(8, 1, 260, 346) img_padded = pad_to_square_and_32(img_unpadded, 0) print(img_unpadded.shape, img_padded.shape) info_dict = read_PKUdataset_info_to_dict('Dtracker/myaflow/dataset/statistics/PKU_Vidar_DVS_statistics.xlsx') scene = '00047_rotation_5000K_800r' ######## ## spike # spike_dir = 'Dtracker_data/test/Vidar/00047_rotation_5000K_800r/' # spikes_paths = sorted(os.listdir(spike_dir), key=lambda x: int(os.path.basename(x).split(".")[0])) # spikes_paths_list = [osp.join(spike_dir, spikes_paths[i]) for i in range(len(spikes_paths))] # # pprint(spikes_paths_list) # spikes = [dat_to_spmat(p, size=(250, 400)) for p in spikes_paths_list] # print("len(spikes)", len(spikes), type(spikes), type(spikes[-1])) ######## ## event # scene = '00505_UAV_outdoor6' # event_path = 'Dtracker_data/test/DVS/00047_rotation_5000K_800r.hdf5' # # event_path = 'Dtracker_data/test/DVS/00505_UAV_outdoor6.hdf5' # events = event_to_bins(event_path, temporal_length=info_dict[scene]['time length'], interval=0.02) # events = event_bins_to_image(events, height=260, width=346) # print("len(events)", len(events), type(events), type(events[-1])) ######## ## label # label_path = 'Dtracker_data/test/labels/00047_rotation_5000K_800r.txt' # labels = read_labels(label_path) # # pprint(labels) # print("len(labels)", len(labels), type(labels), type(labels[-1]))