diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..4febadb2842277d1fc595a1735ab131a7529f8ce 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +README.md filter=lfs diff=lfs merge=lfs -text +florence_sam filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..82f927558a3dff0ea8c20858856e70779fd02c93 --- /dev/null +++ b/.gitignore @@ -0,0 +1,162 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/README.md b/README.md index 8a731574eb49e7b01b71753df071d461e86ec98d..45999447eb4089ac0042635efe9f798ad44fdfeb 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,3 @@ ---- -title: Video Object Removal -emoji: 🌖 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 4.42.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +version https://git-lfs.github.com/spec/v1 +oid sha256:0c027f5ae5bf8ef0109fc9b40cf66d656334abf88fda4cabdad99fab6cb9e840 +size 240 diff --git a/florence_sam/RAFT/__init__.py b/florence_sam/RAFT/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..e7179ea3ce4ad81425c619772d4bc47bc7ceea3a --- /dev/null +++ b/florence_sam/RAFT/__init__.py @@ -0,0 +1,2 @@ +# from .demo import RAFT_infer +from .raft import RAFT diff --git a/florence_sam/RAFT/corr.py b/florence_sam/RAFT/corr.py new file mode 100755 index 0000000000000000000000000000000000000000..449dbd963b8303eda242a65063ca857b95475721 --- /dev/null +++ b/florence_sam/RAFT/corr.py @@ -0,0 +1,111 @@ +import torch +import torch.nn.functional as F +from .utils.utils import bilinear_sampler, coords_grid + +try: + import alt_cuda_corr +except: + # alt_cuda_corr is not compiled + pass + + +class CorrBlock: + def __init__(self, fmap1, fmap2, num_levels=4, radius=4): + self.num_levels = num_levels + self.radius = radius + self.corr_pyramid = [] + + # all pairs correlation + corr = CorrBlock.corr(fmap1, fmap2) + + batch, h1, w1, dim, h2, w2 = corr.shape + corr = corr.reshape(batch*h1*w1, dim, h2, w2) + + self.corr_pyramid.append(corr) + for i in range(self.num_levels-1): + corr = F.avg_pool2d(corr, 2, stride=2) + self.corr_pyramid.append(corr) + + def __call__(self, coords): + r = self.radius + coords = coords.permute(0, 2, 3, 1) + batch, h1, w1, _ = coords.shape + + out_pyramid = [] + for i in range(self.num_levels): + corr = self.corr_pyramid[i] + dx = torch.linspace(-r, r, 2*r+1) + dy = torch.linspace(-r, r, 2*r+1) + delta = torch.stack(torch.meshgrid(dy, dx), axis=-1).to(coords.device) + + centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i + delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2) + coords_lvl = centroid_lvl + delta_lvl + + corr = bilinear_sampler(corr, coords_lvl) + corr = corr.view(batch, h1, w1, -1) + out_pyramid.append(corr) + + out = torch.cat(out_pyramid, dim=-1) + return out.permute(0, 3, 1, 2).contiguous().float() + + @staticmethod + def corr(fmap1, fmap2): + batch, dim, ht, wd = fmap1.shape + fmap1 = fmap1.view(batch, dim, ht*wd) + fmap2 = fmap2.view(batch, dim, ht*wd) + + corr = torch.matmul(fmap1.transpose(1,2), fmap2) + corr = corr.view(batch, ht, wd, 1, ht, wd) + return corr / torch.sqrt(torch.tensor(dim).float()) + + +class CorrLayer(torch.autograd.Function): + @staticmethod + def forward(ctx, fmap1, fmap2, coords, r): + fmap1 = fmap1.contiguous() + fmap2 = fmap2.contiguous() + coords = coords.contiguous() + ctx.save_for_backward(fmap1, fmap2, coords) + ctx.r = r + corr, = correlation_cudaz.forward(fmap1, fmap2, coords, ctx.r) + return corr + + @staticmethod + def backward(ctx, grad_corr): + fmap1, fmap2, coords = ctx.saved_tensors + grad_corr = grad_corr.contiguous() + fmap1_grad, fmap2_grad, coords_grad = \ + correlation_cudaz.backward(fmap1, fmap2, coords, grad_corr, ctx.r) + return fmap1_grad, fmap2_grad, coords_grad, None + + +class AlternateCorrBlock: + def __init__(self, fmap1, fmap2, num_levels=4, radius=4): + self.num_levels = num_levels + self.radius = radius + + self.pyramid = [(fmap1, fmap2)] + for i in range(self.num_levels): + fmap1 = F.avg_pool2d(fmap1, 2, stride=2) + fmap2 = F.avg_pool2d(fmap2, 2, stride=2) + self.pyramid.append((fmap1, fmap2)) + + def __call__(self, coords): + + coords = coords.permute(0, 2, 3, 1) + B, H, W, _ = coords.shape + + corr_list = [] + for i in range(self.num_levels): + r = self.radius + fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1) + fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1) + + coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous() + corr = alt_cuda_corr(fmap1_i, fmap2_i, coords_i, r) + corr_list.append(corr.squeeze(1)) + + corr = torch.stack(corr_list, dim=1) + corr = corr.reshape(B, -1, H, W) + return corr / 16.0 diff --git a/florence_sam/RAFT/datasets.py b/florence_sam/RAFT/datasets.py new file mode 100755 index 0000000000000000000000000000000000000000..3411fdacfb900024005e8997d07c600e963a95ca --- /dev/null +++ b/florence_sam/RAFT/datasets.py @@ -0,0 +1,235 @@ +# Data loading based on https://github.com/NVIDIA/flownet2-pytorch + +import numpy as np +import torch +import torch.utils.data as data +import torch.nn.functional as F + +import os +import math +import random +from glob import glob +import os.path as osp + +from utils import frame_utils +from utils.augmentor import FlowAugmentor, SparseFlowAugmentor + + +class FlowDataset(data.Dataset): + def __init__(self, aug_params=None, sparse=False): + self.augmentor = None + self.sparse = sparse + if aug_params is not None: + if sparse: + self.augmentor = SparseFlowAugmentor(**aug_params) + else: + self.augmentor = FlowAugmentor(**aug_params) + + self.is_test = False + self.init_seed = False + self.flow_list = [] + self.image_list = [] + self.extra_info = [] + + def __getitem__(self, index): + + if self.is_test: + img1 = frame_utils.read_gen(self.image_list[index][0]) + img2 = frame_utils.read_gen(self.image_list[index][1]) + img1 = np.array(img1).astype(np.uint8)[..., :3] + img2 = np.array(img2).astype(np.uint8)[..., :3] + img1 = torch.from_numpy(img1).permute(2, 0, 1).float() + img2 = torch.from_numpy(img2).permute(2, 0, 1).float() + return img1, img2, self.extra_info[index] + + if not self.init_seed: + worker_info = torch.utils.data.get_worker_info() + if worker_info is not None: + torch.manual_seed(worker_info.id) + np.random.seed(worker_info.id) + random.seed(worker_info.id) + self.init_seed = True + + index = index % len(self.image_list) + valid = None + if self.sparse: + flow, valid = frame_utils.readFlowKITTI(self.flow_list[index]) + else: + flow = frame_utils.read_gen(self.flow_list[index]) + + img1 = frame_utils.read_gen(self.image_list[index][0]) + img2 = frame_utils.read_gen(self.image_list[index][1]) + + flow = np.array(flow).astype(np.float32) + img1 = np.array(img1).astype(np.uint8) + img2 = np.array(img2).astype(np.uint8) + + # grayscale images + if len(img1.shape) == 2: + img1 = np.tile(img1[...,None], (1, 1, 3)) + img2 = np.tile(img2[...,None], (1, 1, 3)) + else: + img1 = img1[..., :3] + img2 = img2[..., :3] + + if self.augmentor is not None: + if self.sparse: + img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid) + else: + img1, img2, flow = self.augmentor(img1, img2, flow) + + img1 = torch.from_numpy(img1).permute(2, 0, 1).float() + img2 = torch.from_numpy(img2).permute(2, 0, 1).float() + flow = torch.from_numpy(flow).permute(2, 0, 1).float() + + if valid is not None: + valid = torch.from_numpy(valid) + else: + valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000) + + return img1, img2, flow, valid.float() + + + def __rmul__(self, v): + self.flow_list = v * self.flow_list + self.image_list = v * self.image_list + return self + + def __len__(self): + return len(self.image_list) + + +class MpiSintel(FlowDataset): + def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean'): + super(MpiSintel, self).__init__(aug_params) + flow_root = osp.join(root, split, 'flow') + image_root = osp.join(root, split, dstype) + + if split == 'test': + self.is_test = True + + for scene in os.listdir(image_root): + image_list = sorted(glob(osp.join(image_root, scene, '*.png'))) + for i in range(len(image_list)-1): + self.image_list += [ [image_list[i], image_list[i+1]] ] + self.extra_info += [ (scene, i) ] # scene and frame_id + + if split != 'test': + self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo'))) + + +class FlyingChairs(FlowDataset): + def __init__(self, aug_params=None, split='train', root='datasets/FlyingChairs_release/data'): + super(FlyingChairs, self).__init__(aug_params) + + images = sorted(glob(osp.join(root, '*.ppm'))) + flows = sorted(glob(osp.join(root, '*.flo'))) + assert (len(images)//2 == len(flows)) + + split_list = np.loadtxt('chairs_split.txt', dtype=np.int32) + for i in range(len(flows)): + xid = split_list[i] + if (split=='training' and xid==1) or (split=='validation' and xid==2): + self.flow_list += [ flows[i] ] + self.image_list += [ [images[2*i], images[2*i+1]] ] + + +class FlyingThings3D(FlowDataset): + def __init__(self, aug_params=None, root='datasets/FlyingThings3D', dstype='frames_cleanpass'): + super(FlyingThings3D, self).__init__(aug_params) + + for cam in ['left']: + for direction in ['into_future', 'into_past']: + image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*'))) + image_dirs = sorted([osp.join(f, cam) for f in image_dirs]) + + flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*'))) + flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs]) + + for idir, fdir in zip(image_dirs, flow_dirs): + images = sorted(glob(osp.join(idir, '*.png')) ) + flows = sorted(glob(osp.join(fdir, '*.pfm')) ) + for i in range(len(flows)-1): + if direction == 'into_future': + self.image_list += [ [images[i], images[i+1]] ] + self.flow_list += [ flows[i] ] + elif direction == 'into_past': + self.image_list += [ [images[i+1], images[i]] ] + self.flow_list += [ flows[i+1] ] + + +class KITTI(FlowDataset): + def __init__(self, aug_params=None, split='training', root='datasets/KITTI'): + super(KITTI, self).__init__(aug_params, sparse=True) + if split == 'testing': + self.is_test = True + + root = osp.join(root, split) + images1 = sorted(glob(osp.join(root, 'image_2/*_10.png'))) + images2 = sorted(glob(osp.join(root, 'image_2/*_11.png'))) + + for img1, img2 in zip(images1, images2): + frame_id = img1.split('/')[-1] + self.extra_info += [ [frame_id] ] + self.image_list += [ [img1, img2] ] + + if split == 'training': + self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png'))) + + +class HD1K(FlowDataset): + def __init__(self, aug_params=None, root='datasets/HD1k'): + super(HD1K, self).__init__(aug_params, sparse=True) + + seq_ix = 0 + while 1: + flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix))) + images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix))) + + if len(flows) == 0: + break + + for i in range(len(flows)-1): + self.flow_list += [flows[i]] + self.image_list += [ [images[i], images[i+1]] ] + + seq_ix += 1 + + +def fetch_dataloader(args, TRAIN_DS='C+T+K+S+H'): + """ Create the data loader for the corresponding trainign set """ + + if args.stage == 'chairs': + aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True} + train_dataset = FlyingChairs(aug_params, split='training') + + elif args.stage == 'things': + aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True} + clean_dataset = FlyingThings3D(aug_params, dstype='frames_cleanpass') + final_dataset = FlyingThings3D(aug_params, dstype='frames_finalpass') + train_dataset = clean_dataset + final_dataset + + elif args.stage == 'sintel': + aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True} + things = FlyingThings3D(aug_params, dstype='frames_cleanpass') + sintel_clean = MpiSintel(aug_params, split='training', dstype='clean') + sintel_final = MpiSintel(aug_params, split='training', dstype='final') + + if TRAIN_DS == 'C+T+K+S+H': + kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True}) + hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True}) + train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things + + elif TRAIN_DS == 'C+T+K/S': + train_dataset = 100*sintel_clean + 100*sintel_final + things + + elif args.stage == 'kitti': + aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False} + train_dataset = KITTI(aug_params, split='training') + + train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, + pin_memory=False, shuffle=True, num_workers=4, drop_last=True) + + print('Training with %d image pairs' % len(train_dataset)) + return train_loader + diff --git a/florence_sam/RAFT/demo.py b/florence_sam/RAFT/demo.py new file mode 100755 index 0000000000000000000000000000000000000000..096963bdbb36aed3df673f131d6e044d8c6f95ea --- /dev/null +++ b/florence_sam/RAFT/demo.py @@ -0,0 +1,79 @@ +import sys +import argparse +import os +import cv2 +import glob +import numpy as np +import torch +from PIL import Image + +from .raft import RAFT +from .utils import flow_viz +from .utils.utils import InputPadder + + + +DEVICE = 'cuda' + +def load_image(imfile): + img = np.array(Image.open(imfile)).astype(np.uint8) + img = torch.from_numpy(img).permute(2, 0, 1).float() + return img + + +def load_image_list(image_files): + images = [] + for imfile in sorted(image_files): + images.append(load_image(imfile)) + + images = torch.stack(images, dim=0) + images = images.to(DEVICE) + + padder = InputPadder(images.shape) + return padder.pad(images)[0] + + +def viz(img, flo): + img = img[0].permute(1,2,0).cpu().numpy() + flo = flo[0].permute(1,2,0).cpu().numpy() + + # map flow to rgb image + flo = flow_viz.flow_to_image(flo) + # img_flo = np.concatenate([img, flo], axis=0) + img_flo = flo + + cv2.imwrite('/home/chengao/test/flow.png', img_flo[:, :, [2,1,0]]) + # cv2.imshow('image', img_flo[:, :, [2,1,0]]/255.0) + # cv2.waitKey() + + +def demo(args): + model = torch.nn.DataParallel(RAFT(args)) + model.load_state_dict(torch.load(args.model)) + + model = model.module + model.to(DEVICE) + model.eval() + + with torch.no_grad(): + images = glob.glob(os.path.join(args.path, '*.png')) + \ + glob.glob(os.path.join(args.path, '*.jpg')) + + images = load_image_list(images) + for i in range(images.shape[0]-1): + image1 = images[i,None] + image2 = images[i+1,None] + + flow_low, flow_up = model(image1, image2, iters=20, test_mode=True) + viz(image1, flow_up) + + +def RAFT_infer(args): + model = torch.nn.DataParallel(RAFT(args)) + model.load_state_dict(torch.load(args.model)) + + model = model.module + model.to(DEVICE) + model.eval() + + return model diff --git a/florence_sam/RAFT/extractor.py b/florence_sam/RAFT/extractor.py new file mode 100755 index 0000000000000000000000000000000000000000..9a9c759d1243d4694e8656c2f6f8a37e53edd009 --- /dev/null +++ b/florence_sam/RAFT/extractor.py @@ -0,0 +1,267 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ResidualBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn='group', stride=1): + super(ResidualBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(planes) + self.norm2 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm3 = nn.BatchNorm2d(planes) + + elif norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(planes) + self.norm2 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm3 = nn.InstanceNorm2d(planes) + + elif norm_fn == 'none': + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + if not stride == 1: + self.norm3 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3) + + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x+y) + + + +class BottleneckBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn='group', stride=1): + super(BottleneckBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0) + self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride) + self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4) + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(planes//4) + self.norm2 = nn.BatchNorm2d(planes//4) + self.norm3 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm4 = nn.BatchNorm2d(planes) + + elif norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(planes//4) + self.norm2 = nn.InstanceNorm2d(planes//4) + self.norm3 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm4 = nn.InstanceNorm2d(planes) + + elif norm_fn == 'none': + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + self.norm3 = nn.Sequential() + if not stride == 1: + self.norm4 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4) + + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + y = self.relu(self.norm3(self.conv3(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x+y) + +class BasicEncoder(nn.Module): + def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0): + super(BasicEncoder, self).__init__() + self.norm_fn = norm_fn + + if self.norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64) + + elif self.norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(64) + + elif self.norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(64) + + elif self.norm_fn == 'none': + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = 64 + self.layer1 = self._make_layer(64, stride=1) + self.layer2 = self._make_layer(96, stride=2) + self.layer3 = self._make_layer(128, stride=2) + + # output convolution + self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1) + + self.dropout = None + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + + def forward(self, x): + + # if input is list, combine batch dimension + is_list = isinstance(x, tuple) or isinstance(x, list) + if is_list: + batch_dim = x[0].shape[0] + x = torch.cat(x, dim=0) + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + + x = self.conv2(x) + + if self.training and self.dropout is not None: + x = self.dropout(x) + + if is_list: + x = torch.split(x, [batch_dim, batch_dim], dim=0) + + return x + + +class SmallEncoder(nn.Module): + def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0): + super(SmallEncoder, self).__init__() + self.norm_fn = norm_fn + + if self.norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32) + + elif self.norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(32) + + elif self.norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(32) + + elif self.norm_fn == 'none': + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = 32 + self.layer1 = self._make_layer(32, stride=1) + self.layer2 = self._make_layer(64, stride=2) + self.layer3 = self._make_layer(96, stride=2) + + self.dropout = None + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + + self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + + def forward(self, x): + + # if input is list, combine batch dimension + is_list = isinstance(x, tuple) or isinstance(x, list) + if is_list: + batch_dim = x[0].shape[0] + x = torch.cat(x, dim=0) + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.conv2(x) + + if self.training and self.dropout is not None: + x = self.dropout(x) + + if is_list: + x = torch.split(x, [batch_dim, batch_dim], dim=0) + + return x diff --git a/florence_sam/RAFT/raft.py b/florence_sam/RAFT/raft.py new file mode 100755 index 0000000000000000000000000000000000000000..829ef97b8d3e280aac59ebef7bb2eaf06274b62a --- /dev/null +++ b/florence_sam/RAFT/raft.py @@ -0,0 +1,146 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .update import BasicUpdateBlock, SmallUpdateBlock +from .extractor import BasicEncoder, SmallEncoder +from .corr import CorrBlock, AlternateCorrBlock +from .utils.utils import bilinear_sampler, coords_grid, upflow8 + +try: + autocast = torch.cuda.amp.autocast +except: + # dummy autocast for PyTorch < 1.6 + class autocast: + def __init__(self, enabled): + pass + def __enter__(self): + pass + def __exit__(self, *args): + pass + + +class RAFT(nn.Module): + def __init__(self, args): + super(RAFT, self).__init__() + self.args = args + + if args.small: + self.hidden_dim = hdim = 96 + self.context_dim = cdim = 64 + args.corr_levels = 4 + args.corr_radius = 3 + + else: + self.hidden_dim = hdim = 128 + self.context_dim = cdim = 128 + args.corr_levels = 4 + args.corr_radius = 4 + + if 'dropout' not in args._get_kwargs(): + args.dropout = 0 + + if 'alternate_corr' not in args._get_kwargs(): + args.alternate_corr = False + + # feature network, context network, and update block + if args.small: + self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout) + self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout) + self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim) + + else: + self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout) + self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout) + self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim) + + + def freeze_bn(self): + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + + def initialize_flow(self, img): + """ Flow is represented as difference between two coordinate grids flow = coords1 - coords0""" + N, C, H, W = img.shape + coords0 = coords_grid(N, H//8, W//8).to(img.device) + coords1 = coords_grid(N, H//8, W//8).to(img.device) + + # optical flow computed as difference: flow = coords1 - coords0 + return coords0, coords1 + + def upsample_flow(self, flow, mask): + """ Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """ + N, _, H, W = flow.shape + mask = mask.view(N, 1, 9, 8, 8, H, W) + mask = torch.softmax(mask, dim=2) + + up_flow = F.unfold(8 * flow, [3,3], padding=1) + up_flow = up_flow.view(N, 2, 9, 1, 1, H, W) + + up_flow = torch.sum(mask * up_flow, dim=2) + up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) + return up_flow.reshape(N, 2, 8*H, 8*W) + + + def forward(self, image1, image2, iters=12, flow_init=None, test_mode=True): + """ Estimate optical flow between pair of frames """ + + # image1 = 2 * (image1 / 255.0) - 1.0 + # image2 = 2 * (image2 / 255.0) - 1.0 + + image1 = image1.contiguous() + image2 = image2.contiguous() + + hdim = self.hidden_dim + cdim = self.context_dim + + # run the feature network + with autocast(enabled=self.args.mixed_precision): + fmap1, fmap2 = self.fnet([image1, image2]) + + fmap1 = fmap1.float() + fmap2 = fmap2.float() + + if self.args.alternate_corr: + corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius) + else: + corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius) + + # run the context network + with autocast(enabled=self.args.mixed_precision): + cnet = self.cnet(image1) + net, inp = torch.split(cnet, [hdim, cdim], dim=1) + net = torch.tanh(net) + inp = torch.relu(inp) + + coords0, coords1 = self.initialize_flow(image1) + + if flow_init is not None: + coords1 = coords1 + flow_init + + flow_predictions = [] + for itr in range(iters): + coords1 = coords1.detach() + corr = corr_fn(coords1) # index correlation volume + + flow = coords1 - coords0 + with autocast(enabled=self.args.mixed_precision): + net, up_mask, delta_flow = self.update_block(net, inp, corr, flow) + + # F(t+1) = F(t) + \Delta(t) + coords1 = coords1 + delta_flow + + # upsample predictions + if up_mask is None: + flow_up = upflow8(coords1 - coords0) + else: + flow_up = self.upsample_flow(coords1 - coords0, up_mask) + + flow_predictions.append(flow_up) + + if test_mode: + return coords1 - coords0, flow_up + + return flow_predictions diff --git a/florence_sam/RAFT/update.py b/florence_sam/RAFT/update.py new file mode 100755 index 0000000000000000000000000000000000000000..f940497f9b5eb1c12091574fe9a0223a1b196d50 --- /dev/null +++ b/florence_sam/RAFT/update.py @@ -0,0 +1,139 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class FlowHead(nn.Module): + def __init__(self, input_dim=128, hidden_dim=256): + super(FlowHead, self).__init__() + self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1) + self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + return self.conv2(self.relu(self.conv1(x))) + +class ConvGRU(nn.Module): + def __init__(self, hidden_dim=128, input_dim=192+128): + super(ConvGRU, self).__init__() + self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1) + self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1) + self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1) + + def forward(self, h, x): + hx = torch.cat([h, x], dim=1) + + z = torch.sigmoid(self.convz(hx)) + r = torch.sigmoid(self.convr(hx)) + q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1))) + + h = (1-z) * h + z * q + return h + +class SepConvGRU(nn.Module): + def __init__(self, hidden_dim=128, input_dim=192+128): + super(SepConvGRU, self).__init__() + self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2)) + self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2)) + self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2)) + + self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0)) + self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0)) + self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0)) + + + def forward(self, h, x): + # horizontal + hx = torch.cat([h, x], dim=1) + z = torch.sigmoid(self.convz1(hx)) + r = torch.sigmoid(self.convr1(hx)) + q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1))) + h = (1-z) * h + z * q + + # vertical + hx = torch.cat([h, x], dim=1) + z = torch.sigmoid(self.convz2(hx)) + r = torch.sigmoid(self.convr2(hx)) + q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1))) + h = (1-z) * h + z * q + + return h + +class SmallMotionEncoder(nn.Module): + def __init__(self, args): + super(SmallMotionEncoder, self).__init__() + cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2 + self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0) + self.convf1 = nn.Conv2d(2, 64, 7, padding=3) + self.convf2 = nn.Conv2d(64, 32, 3, padding=1) + self.conv = nn.Conv2d(128, 80, 3, padding=1) + + def forward(self, flow, corr): + cor = F.relu(self.convc1(corr)) + flo = F.relu(self.convf1(flow)) + flo = F.relu(self.convf2(flo)) + cor_flo = torch.cat([cor, flo], dim=1) + out = F.relu(self.conv(cor_flo)) + return torch.cat([out, flow], dim=1) + +class BasicMotionEncoder(nn.Module): + def __init__(self, args): + super(BasicMotionEncoder, self).__init__() + cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2 + self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0) + self.convc2 = nn.Conv2d(256, 192, 3, padding=1) + self.convf1 = nn.Conv2d(2, 128, 7, padding=3) + self.convf2 = nn.Conv2d(128, 64, 3, padding=1) + self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1) + + def forward(self, flow, corr): + cor = F.relu(self.convc1(corr)) + cor = F.relu(self.convc2(cor)) + flo = F.relu(self.convf1(flow)) + flo = F.relu(self.convf2(flo)) + + cor_flo = torch.cat([cor, flo], dim=1) + out = F.relu(self.conv(cor_flo)) + return torch.cat([out, flow], dim=1) + +class SmallUpdateBlock(nn.Module): + def __init__(self, args, hidden_dim=96): + super(SmallUpdateBlock, self).__init__() + self.encoder = SmallMotionEncoder(args) + self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64) + self.flow_head = FlowHead(hidden_dim, hidden_dim=128) + + def forward(self, net, inp, corr, flow): + motion_features = self.encoder(flow, corr) + inp = torch.cat([inp, motion_features], dim=1) + net = self.gru(net, inp) + delta_flow = self.flow_head(net) + + return net, None, delta_flow + +class BasicUpdateBlock(nn.Module): + def __init__(self, args, hidden_dim=128, input_dim=128): + super(BasicUpdateBlock, self).__init__() + self.args = args + self.encoder = BasicMotionEncoder(args) + self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim) + self.flow_head = FlowHead(hidden_dim, hidden_dim=256) + + self.mask = nn.Sequential( + nn.Conv2d(128, 256, 3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 64*9, 1, padding=0)) + + def forward(self, net, inp, corr, flow, upsample=True): + motion_features = self.encoder(flow, corr) + inp = torch.cat([inp, motion_features], dim=1) + + net = self.gru(net, inp) + delta_flow = self.flow_head(net) + + # scale mask to balence gradients + mask = .25 * self.mask(net) + return net, mask, delta_flow + + + diff --git a/florence_sam/RAFT/utils/__init__.py b/florence_sam/RAFT/utils/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..0437149bfee42718973728158641020ccc1906ad --- /dev/null +++ b/florence_sam/RAFT/utils/__init__.py @@ -0,0 +1,2 @@ +from .flow_viz import flow_to_image +from .frame_utils import writeFlow diff --git a/florence_sam/RAFT/utils/augmentor.py b/florence_sam/RAFT/utils/augmentor.py new file mode 100755 index 0000000000000000000000000000000000000000..e81c4f2b5c16c31c0ae236d744f299d430228a04 --- /dev/null +++ b/florence_sam/RAFT/utils/augmentor.py @@ -0,0 +1,246 @@ +import numpy as np +import random +import math +from PIL import Image + +import cv2 +cv2.setNumThreads(0) +cv2.ocl.setUseOpenCL(False) + +import torch +from torchvision.transforms import ColorJitter +import torch.nn.functional as F + + +class FlowAugmentor: + def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True): + + # spatial augmentation params + self.crop_size = crop_size + self.min_scale = min_scale + self.max_scale = max_scale + self.spatial_aug_prob = 0.8 + self.stretch_prob = 0.8 + self.max_stretch = 0.2 + + # flip augmentation params + self.do_flip = do_flip + self.h_flip_prob = 0.5 + self.v_flip_prob = 0.1 + + # photometric augmentation params + self.photo_aug = ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.5/3.14) + self.asymmetric_color_aug_prob = 0.2 + self.eraser_aug_prob = 0.5 + + def color_transform(self, img1, img2): + """ Photometric augmentation """ + + # asymmetric + if np.random.rand() < self.asymmetric_color_aug_prob: + img1 = np.array(self.photo_aug(Image.fromarray(img1)), dtype=np.uint8) + img2 = np.array(self.photo_aug(Image.fromarray(img2)), dtype=np.uint8) + + # symmetric + else: + image_stack = np.concatenate([img1, img2], axis=0) + image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8) + img1, img2 = np.split(image_stack, 2, axis=0) + + return img1, img2 + + def eraser_transform(self, img1, img2, bounds=[50, 100]): + """ Occlusion augmentation """ + + ht, wd = img1.shape[:2] + if np.random.rand() < self.eraser_aug_prob: + mean_color = np.mean(img2.reshape(-1, 3), axis=0) + for _ in range(np.random.randint(1, 3)): + x0 = np.random.randint(0, wd) + y0 = np.random.randint(0, ht) + dx = np.random.randint(bounds[0], bounds[1]) + dy = np.random.randint(bounds[0], bounds[1]) + img2[y0:y0+dy, x0:x0+dx, :] = mean_color + + return img1, img2 + + def spatial_transform(self, img1, img2, flow): + # randomly sample scale + ht, wd = img1.shape[:2] + min_scale = np.maximum( + (self.crop_size[0] + 8) / float(ht), + (self.crop_size[1] + 8) / float(wd)) + + scale = 2 ** np.random.uniform(self.min_scale, self.max_scale) + scale_x = scale + scale_y = scale + if np.random.rand() < self.stretch_prob: + scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch) + scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch) + + scale_x = np.clip(scale_x, min_scale, None) + scale_y = np.clip(scale_y, min_scale, None) + + if np.random.rand() < self.spatial_aug_prob: + # rescale the images + img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + flow = flow * [scale_x, scale_y] + + if self.do_flip: + if np.random.rand() < self.h_flip_prob: # h-flip + img1 = img1[:, ::-1] + img2 = img2[:, ::-1] + flow = flow[:, ::-1] * [-1.0, 1.0] + + if np.random.rand() < self.v_flip_prob: # v-flip + img1 = img1[::-1, :] + img2 = img2[::-1, :] + flow = flow[::-1, :] * [1.0, -1.0] + + y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0]) + x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1]) + + img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + + return img1, img2, flow + + def __call__(self, img1, img2, flow): + img1, img2 = self.color_transform(img1, img2) + img1, img2 = self.eraser_transform(img1, img2) + img1, img2, flow = self.spatial_transform(img1, img2, flow) + + img1 = np.ascontiguousarray(img1) + img2 = np.ascontiguousarray(img2) + flow = np.ascontiguousarray(flow) + + return img1, img2, flow + +class SparseFlowAugmentor: + def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False): + # spatial augmentation params + self.crop_size = crop_size + self.min_scale = min_scale + self.max_scale = max_scale + self.spatial_aug_prob = 0.8 + self.stretch_prob = 0.8 + self.max_stretch = 0.2 + + # flip augmentation params + self.do_flip = do_flip + self.h_flip_prob = 0.5 + self.v_flip_prob = 0.1 + + # photometric augmentation params + self.photo_aug = ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14) + self.asymmetric_color_aug_prob = 0.2 + self.eraser_aug_prob = 0.5 + + def color_transform(self, img1, img2): + image_stack = np.concatenate([img1, img2], axis=0) + image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8) + img1, img2 = np.split(image_stack, 2, axis=0) + return img1, img2 + + def eraser_transform(self, img1, img2): + ht, wd = img1.shape[:2] + if np.random.rand() < self.eraser_aug_prob: + mean_color = np.mean(img2.reshape(-1, 3), axis=0) + for _ in range(np.random.randint(1, 3)): + x0 = np.random.randint(0, wd) + y0 = np.random.randint(0, ht) + dx = np.random.randint(50, 100) + dy = np.random.randint(50, 100) + img2[y0:y0+dy, x0:x0+dx, :] = mean_color + + return img1, img2 + + def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0): + ht, wd = flow.shape[:2] + coords = np.meshgrid(np.arange(wd), np.arange(ht)) + coords = np.stack(coords, axis=-1) + + coords = coords.reshape(-1, 2).astype(np.float32) + flow = flow.reshape(-1, 2).astype(np.float32) + valid = valid.reshape(-1).astype(np.float32) + + coords0 = coords[valid>=1] + flow0 = flow[valid>=1] + + ht1 = int(round(ht * fy)) + wd1 = int(round(wd * fx)) + + coords1 = coords0 * [fx, fy] + flow1 = flow0 * [fx, fy] + + xx = np.round(coords1[:,0]).astype(np.int32) + yy = np.round(coords1[:,1]).astype(np.int32) + + v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1) + xx = xx[v] + yy = yy[v] + flow1 = flow1[v] + + flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32) + valid_img = np.zeros([ht1, wd1], dtype=np.int32) + + flow_img[yy, xx] = flow1 + valid_img[yy, xx] = 1 + + return flow_img, valid_img + + def spatial_transform(self, img1, img2, flow, valid): + # randomly sample scale + + ht, wd = img1.shape[:2] + min_scale = np.maximum( + (self.crop_size[0] + 1) / float(ht), + (self.crop_size[1] + 1) / float(wd)) + + scale = 2 ** np.random.uniform(self.min_scale, self.max_scale) + scale_x = np.clip(scale, min_scale, None) + scale_y = np.clip(scale, min_scale, None) + + if np.random.rand() < self.spatial_aug_prob: + # rescale the images + img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y) + + if self.do_flip: + if np.random.rand() < 0.5: # h-flip + img1 = img1[:, ::-1] + img2 = img2[:, ::-1] + flow = flow[:, ::-1] * [-1.0, 1.0] + valid = valid[:, ::-1] + + margin_y = 20 + margin_x = 50 + + y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y) + x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x) + + y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0]) + x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1]) + + img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + return img1, img2, flow, valid + + + def __call__(self, img1, img2, flow, valid): + img1, img2 = self.color_transform(img1, img2) + img1, img2 = self.eraser_transform(img1, img2) + img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid) + + img1 = np.ascontiguousarray(img1) + img2 = np.ascontiguousarray(img2) + flow = np.ascontiguousarray(flow) + valid = np.ascontiguousarray(valid) + + return img1, img2, flow, valid diff --git a/florence_sam/RAFT/utils/flow_viz.py b/florence_sam/RAFT/utils/flow_viz.py new file mode 100755 index 0000000000000000000000000000000000000000..dcee65e89b91b07ee0496aeb4c7e7436abf99641 --- /dev/null +++ b/florence_sam/RAFT/utils/flow_viz.py @@ -0,0 +1,132 @@ +# Flow visualization code used from https://github.com/tomrunia/OpticalFlow_Visualization + + +# MIT License +# +# Copyright (c) 2018 Tom Runia +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to conditions. +# +# Author: Tom Runia +# Date Created: 2018-08-03 + +import numpy as np + +def make_colorwheel(): + """ + Generates a color wheel for optical flow visualization as presented in: + Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) + URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf + + Code follows the original C++ source code of Daniel Scharstein. + Code follows the the Matlab source code of Deqing Sun. + + Returns: + np.ndarray: Color wheel + """ + + RY = 15 + YG = 6 + GC = 4 + CB = 11 + BM = 13 + MR = 6 + + ncols = RY + YG + GC + CB + BM + MR + colorwheel = np.zeros((ncols, 3)) + col = 0 + + # RY + colorwheel[0:RY, 0] = 255 + colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY) + col = col+RY + # YG + colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG) + colorwheel[col:col+YG, 1] = 255 + col = col+YG + # GC + colorwheel[col:col+GC, 1] = 255 + colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC) + col = col+GC + # CB + colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB) + colorwheel[col:col+CB, 2] = 255 + col = col+CB + # BM + colorwheel[col:col+BM, 2] = 255 + colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM) + col = col+BM + # MR + colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR) + colorwheel[col:col+MR, 0] = 255 + return colorwheel + + +def flow_uv_to_colors(u, v, convert_to_bgr=False): + """ + Applies the flow color wheel to (possibly clipped) flow components u and v. + + According to the C++ source code of Daniel Scharstein + According to the Matlab source code of Deqing Sun + + Args: + u (np.ndarray): Input horizontal flow of shape [H,W] + v (np.ndarray): Input vertical flow of shape [H,W] + convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. + + Returns: + np.ndarray: Flow visualization image of shape [H,W,3] + """ + flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) + colorwheel = make_colorwheel() # shape [55x3] + ncols = colorwheel.shape[0] + rad = np.sqrt(np.square(u) + np.square(v)) + a = np.arctan2(-v, -u)/np.pi + fk = (a+1) / 2*(ncols-1) + k0 = np.floor(fk).astype(np.int32) + k1 = k0 + 1 + k1[k1 == ncols] = 0 + f = fk - k0 + for i in range(colorwheel.shape[1]): + tmp = colorwheel[:,i] + col0 = tmp[k0] / 255.0 + col1 = tmp[k1] / 255.0 + col = (1-f)*col0 + f*col1 + idx = (rad <= 1) + col[idx] = 1 - rad[idx] * (1-col[idx]) + col[~idx] = col[~idx] * 0.75 # out of range + # Note the 2-i => BGR instead of RGB + ch_idx = 2-i if convert_to_bgr else i + flow_image[:,:,ch_idx] = np.floor(255 * col) + return flow_image + + +def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False): + """ + Expects a two dimensional flow image of shape. + + Args: + flow_uv (np.ndarray): Flow UV image of shape [H,W,2] + clip_flow (float, optional): Clip maximum of flow values. Defaults to None. + convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. + + Returns: + np.ndarray: Flow visualization image of shape [H,W,3] + """ + assert flow_uv.ndim == 3, 'input flow must have three dimensions' + assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]' + if clip_flow is not None: + flow_uv = np.clip(flow_uv, 0, clip_flow) + u = flow_uv[:,:,0] + v = flow_uv[:,:,1] + rad = np.sqrt(np.square(u) + np.square(v)) + rad_max = np.max(rad) + epsilon = 1e-5 + u = u / (rad_max + epsilon) + v = v / (rad_max + epsilon) + return flow_uv_to_colors(u, v, convert_to_bgr) \ No newline at end of file diff --git a/florence_sam/RAFT/utils/flow_viz_pt.py b/florence_sam/RAFT/utils/flow_viz_pt.py new file mode 100644 index 0000000000000000000000000000000000000000..12e666a40fa49c11592e311b141aa2a522e567fd --- /dev/null +++ b/florence_sam/RAFT/utils/flow_viz_pt.py @@ -0,0 +1,118 @@ +# Flow visualization code adapted from https://github.com/tomrunia/OpticalFlow_Visualization +import torch +torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732 + +@torch.no_grad() +def flow_to_image(flow: torch.Tensor) -> torch.Tensor: + + """ + Converts a flow to an RGB image. + + Args: + flow (Tensor): Flow of shape (N, 2, H, W) or (2, H, W) and dtype torch.float. + + Returns: + img (Tensor): Image Tensor of dtype uint8 where each color corresponds + to a given flow direction. Shape is (N, 3, H, W) or (3, H, W) depending on the input. + """ + + if flow.dtype != torch.float: + raise ValueError(f"Flow should be of dtype torch.float, got {flow.dtype}.") + + orig_shape = flow.shape + if flow.ndim == 3: + flow = flow[None] # Add batch dim + + if flow.ndim != 4 or flow.shape[1] != 2: + raise ValueError(f"Input flow should have shape (2, H, W) or (N, 2, H, W), got {orig_shape}.") + + max_norm = torch.sum(flow**2, dim=1).sqrt().max() + epsilon = torch.finfo((flow).dtype).eps + normalized_flow = flow / (max_norm + epsilon) + img = _normalized_flow_to_image(normalized_flow) + + if len(orig_shape) == 3: + img = img[0] # Remove batch dim + return img + +@torch.no_grad() +def _normalized_flow_to_image(normalized_flow: torch.Tensor) -> torch.Tensor: + + """ + Converts a batch of normalized flow to an RGB image. + + Args: + normalized_flow (torch.Tensor): Normalized flow tensor of shape (N, 2, H, W) + Returns: + img (Tensor(N, 3, H, W)): Flow visualization image of dtype uint8. + """ + + N, _, H, W = normalized_flow.shape + device = normalized_flow.device + flow_image = torch.zeros((N, 3, H, W), dtype=torch.uint8, device=device) + colorwheel = _make_colorwheel().to(device) # shape [55x3] + num_cols = colorwheel.shape[0] + norm = torch.sum(normalized_flow**2, dim=1).sqrt() + a = torch.atan2(-normalized_flow[:, 1, :, :], -normalized_flow[:, 0, :, :]) / torch.pi + fk = (a + 1) / 2 * (num_cols - 1) + k0 = torch.floor(fk).to(torch.long) + k1 = k0 + 1 + k1[k1 == num_cols] = 0 + f = fk - k0 + + for c in range(colorwheel.shape[1]): + tmp = colorwheel[:, c] + col0 = tmp[k0] / 255.0 + col1 = tmp[k1] / 255.0 + col = (1 - f) * col0 + f * col1 + col = 1 - norm * (1 - col) + flow_image[:, c, :, :] = torch.floor(255. * col) + return flow_image + + +@torch.no_grad() +def _make_colorwheel() -> torch.Tensor: + """ + Generates a color wheel for optical flow visualization as presented in: + Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) + URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf. + + Returns: + colorwheel (Tensor[55, 3]): Colorwheel Tensor. + """ + + RY = 15 + YG = 6 + GC = 4 + CB = 11 + BM = 13 + MR = 6 + + ncols = RY + YG + GC + CB + BM + MR + colorwheel = torch.zeros((ncols, 3)) + col = 0 + + # RY + colorwheel[0:RY, 0] = 255 + colorwheel[0:RY, 1] = torch.floor(255. * torch.arange(0., RY) / RY) + col = col + RY + # YG + colorwheel[col : col + YG, 0] = 255 - torch.floor(255. * torch.arange(0., YG) / YG) + colorwheel[col : col + YG, 1] = 255 + col = col + YG + # GC + colorwheel[col : col + GC, 1] = 255 + colorwheel[col : col + GC, 2] = torch.floor(255. * torch.arange(0., GC) / GC) + col = col + GC + # CB + colorwheel[col : col + CB, 1] = 255 - torch.floor(255. * torch.arange(CB) / CB) + colorwheel[col : col + CB, 2] = 255 + col = col + CB + # BM + colorwheel[col : col + BM, 2] = 255 + colorwheel[col : col + BM, 0] = torch.floor(255. * torch.arange(0., BM) / BM) + col = col + BM + # MR + colorwheel[col : col + MR, 2] = 255 - torch.floor(255. * torch.arange(MR) / MR) + colorwheel[col : col + MR, 0] = 255 + return colorwheel diff --git a/florence_sam/RAFT/utils/frame_utils.py b/florence_sam/RAFT/utils/frame_utils.py new file mode 100755 index 0000000000000000000000000000000000000000..6c491135efaffc25bd61ec3ecde99d236f5deb12 --- /dev/null +++ b/florence_sam/RAFT/utils/frame_utils.py @@ -0,0 +1,137 @@ +import numpy as np +from PIL import Image +from os.path import * +import re + +import cv2 +cv2.setNumThreads(0) +cv2.ocl.setUseOpenCL(False) + +TAG_CHAR = np.array([202021.25], np.float32) + +def readFlow(fn): + """ Read .flo file in Middlebury format""" + # Code adapted from: + # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy + + # WARNING: this will work on little-endian architectures (eg Intel x86) only! + # print 'fn = %s'%(fn) + with open(fn, 'rb') as f: + magic = np.fromfile(f, np.float32, count=1) + if 202021.25 != magic: + print('Magic number incorrect. Invalid .flo file') + return None + else: + w = np.fromfile(f, np.int32, count=1) + h = np.fromfile(f, np.int32, count=1) + # print 'Reading %d x %d flo file\n' % (w, h) + data = np.fromfile(f, np.float32, count=2*int(w)*int(h)) + # Reshape data into 3D array (columns, rows, bands) + # The reshape here is for visualization, the original code is (w,h,2) + return np.resize(data, (int(h), int(w), 2)) + +def readPFM(file): + file = open(file, 'rb') + + color = None + width = None + height = None + scale = None + endian = None + + header = file.readline().rstrip() + if header == b'PF': + color = True + elif header == b'Pf': + color = False + else: + raise Exception('Not a PFM file.') + + dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline()) + if dim_match: + width, height = map(int, dim_match.groups()) + else: + raise Exception('Malformed PFM header.') + + scale = float(file.readline().rstrip()) + if scale < 0: # little-endian + endian = '<' + scale = -scale + else: + endian = '>' # big-endian + + data = np.fromfile(file, endian + 'f') + shape = (height, width, 3) if color else (height, width) + + data = np.reshape(data, shape) + data = np.flipud(data) + return data + +def writeFlow(filename,uv,v=None): + """ Write optical flow to file. + + If v is None, uv is assumed to contain both u and v channels, + stacked in depth. + Original code by Deqing Sun, adapted from Daniel Scharstein. + """ + nBands = 2 + + if v is None: + assert(uv.ndim == 3) + assert(uv.shape[2] == 2) + u = uv[:,:,0] + v = uv[:,:,1] + else: + u = uv + + assert(u.shape == v.shape) + height,width = u.shape + f = open(filename,'wb') + # write the header + f.write(TAG_CHAR) + np.array(width).astype(np.int32).tofile(f) + np.array(height).astype(np.int32).tofile(f) + # arrange into matrix form + tmp = np.zeros((height, width*nBands)) + tmp[:,np.arange(width)*2] = u + tmp[:,np.arange(width)*2 + 1] = v + tmp.astype(np.float32).tofile(f) + f.close() + + +def readFlowKITTI(filename): + flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR) + flow = flow[:,:,::-1].astype(np.float32) + flow, valid = flow[:, :, :2], flow[:, :, 2] + flow = (flow - 2**15) / 64.0 + return flow, valid + +def readDispKITTI(filename): + disp = cv2.imread(filename, cv2.IMREAD_ANYDEPTH) / 256.0 + valid = disp > 0.0 + flow = np.stack([-disp, np.zeros_like(disp)], -1) + return flow, valid + + +def writeFlowKITTI(filename, uv): + uv = 64.0 * uv + 2**15 + valid = np.ones([uv.shape[0], uv.shape[1], 1]) + uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16) + cv2.imwrite(filename, uv[..., ::-1]) + + +def read_gen(file_name, pil=False): + ext = splitext(file_name)[-1] + if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg': + return Image.open(file_name) + elif ext == '.bin' or ext == '.raw': + return np.load(file_name) + elif ext == '.flo': + return readFlow(file_name).astype(np.float32) + elif ext == '.pfm': + flow = readPFM(file_name).astype(np.float32) + if len(flow.shape) == 2: + return flow + else: + return flow[:, :, :-1] + return [] \ No newline at end of file diff --git a/florence_sam/RAFT/utils/utils.py b/florence_sam/RAFT/utils/utils.py new file mode 100755 index 0000000000000000000000000000000000000000..5f32d281c1c46353a0a2bf36b0550adb74125c65 --- /dev/null +++ b/florence_sam/RAFT/utils/utils.py @@ -0,0 +1,82 @@ +import torch +import torch.nn.functional as F +import numpy as np +from scipy import interpolate + + +class InputPadder: + """ Pads images such that dimensions are divisible by 8 """ + def __init__(self, dims, mode='sintel'): + self.ht, self.wd = dims[-2:] + pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8 + pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8 + if mode == 'sintel': + self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2] + else: + self._pad = [pad_wd//2, pad_wd - pad_wd//2, 0, pad_ht] + + def pad(self, *inputs): + return [F.pad(x, self._pad, mode='replicate') for x in inputs] + + def unpad(self,x): + ht, wd = x.shape[-2:] + c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]] + return x[..., c[0]:c[1], c[2]:c[3]] + +def forward_interpolate(flow): + flow = flow.detach().cpu().numpy() + dx, dy = flow[0], flow[1] + + ht, wd = dx.shape + x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht)) + + x1 = x0 + dx + y1 = y0 + dy + + x1 = x1.reshape(-1) + y1 = y1.reshape(-1) + dx = dx.reshape(-1) + dy = dy.reshape(-1) + + valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht) + x1 = x1[valid] + y1 = y1[valid] + dx = dx[valid] + dy = dy[valid] + + flow_x = interpolate.griddata( + (x1, y1), dx, (x0, y0), method='nearest', fill_value=0) + + flow_y = interpolate.griddata( + (x1, y1), dy, (x0, y0), method='nearest', fill_value=0) + + flow = np.stack([flow_x, flow_y], axis=0) + return torch.from_numpy(flow).float() + + +def bilinear_sampler(img, coords, mode='bilinear', mask=False): + """ Wrapper for grid_sample, uses pixel coordinates """ + H, W = img.shape[-2:] + xgrid, ygrid = coords.split([1,1], dim=-1) + xgrid = 2*xgrid/(W-1) - 1 + ygrid = 2*ygrid/(H-1) - 1 + + grid = torch.cat([xgrid, ygrid], dim=-1) + img = F.grid_sample(img, grid, align_corners=True) + + if mask: + mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1) + return img, mask.float() + + return img + + +def coords_grid(batch, ht, wd): + coords = torch.meshgrid(torch.arange(ht), torch.arange(wd)) + coords = torch.stack(coords[::-1], dim=0).float() + return coords[None].repeat(batch, 1, 1, 1) + + +def upflow8(flow, mode='bilinear'): + new_size = (8 * flow.shape[2], 8 * flow.shape[3]) + return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True) diff --git a/florence_sam/README.md b/florence_sam/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c951c84e1aed32d1277c92c1d36854dbbbd7824b --- /dev/null +++ b/florence_sam/README.md @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:031c6f979f28b8b0f3b42af389fd1f741046ca89de9bc05a33912db7e30e8741 +size 258 diff --git a/florence_sam/app.py b/florence_sam/app.py new file mode 100644 index 0000000000000000000000000000000000000000..85905ea180a6c68281a5e0db44b168bd939e176f --- /dev/null +++ b/florence_sam/app.py @@ -0,0 +1,397 @@ +import os +from typing import Tuple, Optional + +import cv2 +import gradio as gr +import numpy as np +import spaces +import supervision as sv +import torch +from PIL import Image +from tqdm import tqdm +from utils.video import generate_unique_name, create_directory, delete_directory + +from utils.florence import load_florence_model, run_florence_inference, \ + FLORENCE_DETAILED_CAPTION_TASK, \ + FLORENCE_CAPTION_TO_PHRASE_GROUNDING_TASK, FLORENCE_OPEN_VOCABULARY_DETECTION_TASK +from utils.modes import IMAGE_INFERENCE_MODES, IMAGE_OPEN_VOCABULARY_DETECTION_MODE, \ + IMAGE_CAPTION_GROUNDING_MASKS_MODE, VIDEO_INFERENCE_MODES +from utils.sam import load_sam_image_model, run_sam_inference, load_sam_video_model + +MARKDOWN = """ +# Florence2 + SAM2 🔥 + +
+ +This demo integrates Florence2 and SAM2 by creating a two-stage inference pipeline. In +the first stage, Florence2 performs tasks such as object detection, open-vocabulary +object detection, image captioning, or phrase grounding. In the second stage, SAM2 +performs object segmentation on the image. +""" + +IMAGE_PROCESSING_EXAMPLES = [ + [IMAGE_OPEN_VOCABULARY_DETECTION_MODE, "https://media.roboflow.com/notebooks/examples/dog-2.jpeg", 'straw, white napkin, black napkin, hair'], + [IMAGE_OPEN_VOCABULARY_DETECTION_MODE, "https://media.roboflow.com/notebooks/examples/dog-3.jpeg", 'tail'], + [IMAGE_CAPTION_GROUNDING_MASKS_MODE, "https://media.roboflow.com/notebooks/examples/dog-2.jpeg", None], + [IMAGE_CAPTION_GROUNDING_MASKS_MODE, "https://media.roboflow.com/notebooks/examples/dog-3.jpeg", None], +] +VIDEO_PROCESSING_EXAMPLES = [ + ["videos/clip-07-camera-1.mp4", "player in white outfit, player in black outfit, ball, rim"], + ["videos/clip-07-camera-2.mp4", "player in white outfit, player in black outfit, ball, rim"], + ["videos/clip-07-camera-3.mp4", "player in white outfit, player in black outfit, ball, rim"] +] + +VIDEO_SCALE_FACTOR = 0.5 +VIDEO_TARGET_DIRECTORY = "tmp" +create_directory(directory_path=VIDEO_TARGET_DIRECTORY) + +DEVICE = torch.device("cuda") +# DEVICE = torch.device("cpu") + +torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__() +if torch.cuda.get_device_properties(0).major >= 8: + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + + +FLORENCE_MODEL, FLORENCE_PROCESSOR = load_florence_model(device=DEVICE) +SAM_IMAGE_MODEL = load_sam_image_model(device=DEVICE) +SAM_VIDEO_MODEL = load_sam_video_model(device=DEVICE) +COLORS = ['#FF1493', '#00BFFF', '#FF6347', '#FFD700', '#32CD32', '#8A2BE2'] +COLOR_PALETTE = sv.ColorPalette.from_hex(COLORS) +BOX_ANNOTATOR = sv.BoxAnnotator(color=COLOR_PALETTE, color_lookup=sv.ColorLookup.INDEX) +LABEL_ANNOTATOR = sv.LabelAnnotator( + color=COLOR_PALETTE, + color_lookup=sv.ColorLookup.INDEX, + text_position=sv.Position.CENTER_OF_MASS, + text_color=sv.Color.from_hex("#000000"), + border_radius=5 +) +MASK_ANNOTATOR = sv.MaskAnnotator( + color=COLOR_PALETTE, + color_lookup=sv.ColorLookup.INDEX +) + + +def annotate_image(image, detections): + output_image = image.copy() + output_image = MASK_ANNOTATOR.annotate(output_image, detections) + output_image = BOX_ANNOTATOR.annotate(output_image, detections) + output_image = LABEL_ANNOTATOR.annotate(output_image, detections) + return output_image + + +def on_mode_dropdown_change(text): + return [ + gr.Textbox(visible=text == IMAGE_OPEN_VOCABULARY_DETECTION_MODE), + gr.Textbox(visible=text == IMAGE_CAPTION_GROUNDING_MASKS_MODE), + ] + + +@spaces.GPU +@torch.inference_mode() +@torch.autocast(device_type="cuda", dtype=torch.bfloat16) +def process_image( + mode_dropdown, image_input, text_input +) -> Tuple[Optional[Image.Image], Optional[str]]: + if not image_input: + gr.Info("Please upload an image.") + return None, None + + if mode_dropdown == IMAGE_OPEN_VOCABULARY_DETECTION_MODE: + if not text_input: + gr.Info("Please enter a text prompt.") + return None, None + + texts = [prompt.strip() for prompt in text_input.split(",")] + detections_list = [] + for text in texts: + _, result = run_florence_inference( + model=FLORENCE_MODEL, + processor=FLORENCE_PROCESSOR, + device=DEVICE, + image=image_input, + task=FLORENCE_OPEN_VOCABULARY_DETECTION_TASK, + text=text + ) + detections = sv.Detections.from_lmm( + lmm=sv.LMM.FLORENCE_2, + result=result, + resolution_wh=image_input.size + ) + detections = run_sam_inference(SAM_IMAGE_MODEL, image_input, detections) + detections_list.append(detections) + + detections = sv.Detections.merge(detections_list) + detections = run_sam_inference(SAM_IMAGE_MODEL, image_input, detections) + return annotate_image(image_input, detections), None + + if mode_dropdown == IMAGE_CAPTION_GROUNDING_MASKS_MODE: + _, result = run_florence_inference( + model=FLORENCE_MODEL, + processor=FLORENCE_PROCESSOR, + device=DEVICE, + image=image_input, + task=FLORENCE_DETAILED_CAPTION_TASK + ) + caption = result[FLORENCE_DETAILED_CAPTION_TASK] + _, result = run_florence_inference( + model=FLORENCE_MODEL, + processor=FLORENCE_PROCESSOR, + device=DEVICE, + image=image_input, + task=FLORENCE_CAPTION_TO_PHRASE_GROUNDING_TASK, + text=caption + ) + detections = sv.Detections.from_lmm( + lmm=sv.LMM.FLORENCE_2, + result=result, + resolution_wh=image_input.size + ) + detections = run_sam_inference(SAM_IMAGE_MODEL, image_input, detections) + return annotate_image(image_input, detections), caption + + +@spaces.GPU(duration=300) +@torch.inference_mode() +@torch.autocast(device_type="cuda", dtype=torch.bfloat16) +def process_video( + video_input, text_input, progress=gr.Progress(track_tqdm=True) +) -> Optional[str]: + if not video_input: + gr.Info("Please upload a video.") + return None + + if not text_input: + gr.Info("Please enter a text prompt.") + return None + + frame_generator = sv.get_video_frames_generator(video_input) + frame = next(frame_generator) + frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) + + texts = [prompt.strip() for prompt in text_input.split(",")] + detections_list = [] + for text in texts: + _, result = run_florence_inference( + model=FLORENCE_MODEL, + processor=FLORENCE_PROCESSOR, + device=DEVICE, + image=frame, + task=FLORENCE_OPEN_VOCABULARY_DETECTION_TASK, + text=text + ) + detections = sv.Detections.from_lmm( + lmm=sv.LMM.FLORENCE_2, + result=result, + resolution_wh=frame.size + ) + detections = run_sam_inference(SAM_IMAGE_MODEL, frame, detections) + detections_list.append(detections) + + detections = sv.Detections.merge(detections_list) + detections = run_sam_inference(SAM_IMAGE_MODEL, frame, detections) + + if len(detections.mask) == 0: + gr.Info( + "No objects of class {text_input} found in the first frame of the video. " + "Trim the video to make the object appear in the first frame or try a " + "different text prompt." + ) + return None + + name = generate_unique_name() + frame_directory_path = os.path.join(VIDEO_TARGET_DIRECTORY, name) + frames_sink = sv.ImageSink( + target_dir_path=frame_directory_path, + image_name_pattern="{:05d}.jpeg" + ) + + video_info = sv.VideoInfo.from_video_path(video_input) + video_info.width = int(video_info.width * VIDEO_SCALE_FACTOR) + video_info.height = int(video_info.height * VIDEO_SCALE_FACTOR) + + frames_generator = sv.get_video_frames_generator(video_input) + with frames_sink: + for frame in tqdm( + frames_generator, + total=video_info.total_frames, + desc="splitting video into frames" + ): + frame = sv.scale_image(frame, VIDEO_SCALE_FACTOR) + frames_sink.save_image(frame) + + inference_state = SAM_VIDEO_MODEL.init_state( + video_path=frame_directory_path, + device=DEVICE + ) + + for mask_index, mask in enumerate(detections.mask): + _, object_ids, mask_logits = SAM_VIDEO_MODEL.add_new_mask( + inference_state=inference_state, + frame_idx=0, + obj_id=mask_index, + mask=mask + ) + + video_path = os.path.join(VIDEO_TARGET_DIRECTORY, f"{name}.mp4") + frames_generator = sv.get_video_frames_generator(video_input) + masks_generator = SAM_VIDEO_MODEL.propagate_in_video(inference_state) + with sv.VideoSink(video_path, video_info=video_info) as sink: + for frame, (_, tracker_ids, mask_logits) in zip(frames_generator, masks_generator): + frame = sv.scale_image(frame, VIDEO_SCALE_FACTOR) + masks = (mask_logits > 0.0).cpu().numpy().astype(bool) + if len(masks.shape) == 4: + masks = np.squeeze(masks, axis=1) + + detections = sv.Detections( + xyxy=sv.mask_to_xyxy(masks=masks), + mask=masks, + class_id=np.array(tracker_ids) + ) + annotated_frame = frame.copy() + annotated_frame = MASK_ANNOTATOR.annotate( + scene=annotated_frame, detections=detections) + annotated_frame = BOX_ANNOTATOR.annotate( + scene=annotated_frame, detections=detections) + sink.write_frame(annotated_frame) + + delete_directory(frame_directory_path) + return video_path + + +with gr.Blocks() as demo: + gr.Markdown(MARKDOWN) + with gr.Tab("Image"): + image_processing_mode_dropdown_component = gr.Dropdown( + choices=IMAGE_INFERENCE_MODES, + value=IMAGE_INFERENCE_MODES[0], + label="Mode", + info="Select a mode to use.", + interactive=True + ) + with gr.Row(): + with gr.Column(): + image_processing_image_input_component = gr.Image( + type='pil', label='Upload image') + image_processing_text_input_component = gr.Textbox( + label='Text prompt', + placeholder='Enter comma separated text prompts') + image_processing_submit_button_component = gr.Button( + value='Submit', variant='primary') + with gr.Column(): + image_processing_image_output_component = gr.Image( + type='pil', label='Image output') + image_processing_text_output_component = gr.Textbox( + label='Caption output', visible=False) + + with gr.Row(): + gr.Examples( + fn=process_image, + examples=IMAGE_PROCESSING_EXAMPLES, + inputs=[ + image_processing_mode_dropdown_component, + image_processing_image_input_component, + image_processing_text_input_component + ], + outputs=[ + image_processing_image_output_component, + image_processing_text_output_component + ], + run_on_click=True + ) + with gr.Tab("Video"): + video_processing_mode_dropdown_component = gr.Dropdown( + choices=VIDEO_INFERENCE_MODES, + value=VIDEO_INFERENCE_MODES[0], + label="Mode", + info="Select a mode to use.", + interactive=True + ) + with gr.Row(): + with gr.Column(): + video_processing_video_input_component = gr.Video( + label='Upload video') + video_processing_text_input_component = gr.Textbox( + label='Text prompt', + placeholder='Enter comma separated text prompts') + video_processing_submit_button_component = gr.Button( + value='Submit', variant='primary') + with gr.Column(): + video_processing_video_output_component = gr.Video( + label='Video output') + with gr.Row(): + gr.Examples( + fn=process_video, + examples=VIDEO_PROCESSING_EXAMPLES, + inputs=[ + video_processing_video_input_component, + video_processing_text_input_component + ], + outputs=video_processing_video_output_component, + run_on_click=True + ) + + image_processing_submit_button_component.click( + fn=process_image, + inputs=[ + image_processing_mode_dropdown_component, + image_processing_image_input_component, + image_processing_text_input_component + ], + outputs=[ + image_processing_image_output_component, + image_processing_text_output_component + ] + ) + image_processing_text_input_component.submit( + fn=process_image, + inputs=[ + image_processing_mode_dropdown_component, + image_processing_image_input_component, + image_processing_text_input_component + ], + outputs=[ + image_processing_image_output_component, + image_processing_text_output_component + ] + ) + image_processing_mode_dropdown_component.change( + on_mode_dropdown_change, + inputs=[image_processing_mode_dropdown_component], + outputs=[ + image_processing_text_input_component, + image_processing_text_output_component + ] + ) + video_processing_submit_button_component.click( + fn=process_video, + inputs=[ + video_processing_video_input_component, + video_processing_text_input_component + ], + outputs=video_processing_video_output_component + ) + video_processing_text_input_component.submit( + fn=process_video, + inputs=[ + video_processing_video_input_component, + video_processing_text_input_component + ], + outputs=video_processing_video_output_component + ) + +demo.launch(debug=False, show_error=True) diff --git a/florence_sam/checkpoints/sam2_hiera_base_plus.pt b/florence_sam/checkpoints/sam2_hiera_base_plus.pt new file mode 100644 index 0000000000000000000000000000000000000000..604440531fd79487b117597f70fe7031899b499e --- /dev/null +++ b/florence_sam/checkpoints/sam2_hiera_base_plus.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0bb7f236400a49669ffdd1be617959a8b1d1065081789d7bbff88eded3a8071 +size 323493298 diff --git a/florence_sam/checkpoints/sam2_hiera_large.pt b/florence_sam/checkpoints/sam2_hiera_large.pt new file mode 100644 index 0000000000000000000000000000000000000000..7198ee4779a9e91db4d79bdc80e188cc182482e0 --- /dev/null +++ b/florence_sam/checkpoints/sam2_hiera_large.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7442e4e9b732a508f80e141e7c2913437a3610ee0c77381a66658c3a445df87b +size 897952466 diff --git a/florence_sam/checkpoints/sam2_hiera_small.pt b/florence_sam/checkpoints/sam2_hiera_small.pt new file mode 100644 index 0000000000000000000000000000000000000000..2750fc4d2d6598be8d0e9a49f7514cf4ebc8fee2 --- /dev/null +++ b/florence_sam/checkpoints/sam2_hiera_small.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95949964d4e548409021d47b22712d5f1abf2564cc0c3c765ba599a24ac7dce3 +size 184309650 diff --git a/florence_sam/checkpoints/sam2_hiera_tiny.pt b/florence_sam/checkpoints/sam2_hiera_tiny.pt new file mode 100644 index 0000000000000000000000000000000000000000..b40cafb7f49e2ec805913bc4182e4abba996a097 --- /dev/null +++ b/florence_sam/checkpoints/sam2_hiera_tiny.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65b50056e05bcb13694174f51bb6da89c894b57b75ccdf0ba6352c597c5d1125 +size 155906050 diff --git a/florence_sam/configs/__init__.py b/florence_sam/configs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae --- /dev/null +++ b/florence_sam/configs/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/florence_sam/configs/sam2_hiera_b+.yaml b/florence_sam/configs/sam2_hiera_b+.yaml new file mode 100644 index 0000000000000000000000000000000000000000..58f3eb81554018e873f8515ecb98e36d16ac29e4 --- /dev/null +++ b/florence_sam/configs/sam2_hiera_b+.yaml @@ -0,0 +1,113 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 112 + num_heads: 2 + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [896, 448, 224, 112] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: false + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False diff --git a/florence_sam/configs/sam2_hiera_l.yaml b/florence_sam/configs/sam2_hiera_l.yaml new file mode 100644 index 0000000000000000000000000000000000000000..918667f50c3e1ad2dcf77c0c14cb4dd114cfd080 --- /dev/null +++ b/florence_sam/configs/sam2_hiera_l.yaml @@ -0,0 +1,117 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 144 + num_heads: 2 + stages: [2, 6, 36, 4] + global_att_blocks: [23, 33, 43] + window_pos_embed_bkg_spatial_size: [7, 7] + window_spec: [8, 4, 16, 8] + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [1152, 576, 288, 144] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: false + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False diff --git a/florence_sam/configs/sam2_hiera_s.yaml b/florence_sam/configs/sam2_hiera_s.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26e5d4d39f7b2892396106005c37c7ffe6c83bc2 --- /dev/null +++ b/florence_sam/configs/sam2_hiera_s.yaml @@ -0,0 +1,116 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 96 + num_heads: 1 + stages: [1, 2, 11, 2] + global_att_blocks: [7, 10, 13] + window_pos_embed_bkg_spatial_size: [7, 7] + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [768, 384, 192, 96] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: false + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + compile_image_encoder: False diff --git a/florence_sam/configs/sam2_hiera_t.yaml b/florence_sam/configs/sam2_hiera_t.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a62c903aaa5f80828077c6e06a59626926570ed6 --- /dev/null +++ b/florence_sam/configs/sam2_hiera_t.yaml @@ -0,0 +1,118 @@ +# @package _global_ + +# Model +model: + _target_: sam2.modeling.sam2_base.SAM2Base + image_encoder: + _target_: sam2.modeling.backbones.image_encoder.ImageEncoder + scalp: 1 + trunk: + _target_: sam2.modeling.backbones.hieradet.Hiera + embed_dim: 96 + num_heads: 1 + stages: [1, 2, 7, 2] + global_att_blocks: [5, 7, 9] + window_pos_embed_bkg_spatial_size: [7, 7] + neck: + _target_: sam2.modeling.backbones.image_encoder.FpnNeck + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 256 + normalize: true + scale: null + temperature: 10000 + d_model: 256 + backbone_channel_list: [768, 384, 192, 96] + fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features + fpn_interp_model: nearest + + memory_attention: + _target_: sam2.modeling.memory_attention.MemoryAttention + d_model: 256 + pos_enc_at_input: true + layer: + _target_: sam2.modeling.memory_attention.MemoryAttentionLayer + activation: relu + dim_feedforward: 2048 + dropout: 0.1 + pos_enc_at_attn: false + self_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + d_model: 256 + pos_enc_at_cross_attn_keys: true + pos_enc_at_cross_attn_queries: false + cross_attention: + _target_: sam2.modeling.sam.transformer.RoPEAttention + rope_theta: 10000.0 + feat_sizes: [32, 32] + rope_k_repeat: True + embedding_dim: 256 + num_heads: 1 + downsample_rate: 1 + dropout: 0.1 + kv_in_dim: 64 + num_layers: 4 + + memory_encoder: + _target_: sam2.modeling.memory_encoder.MemoryEncoder + out_dim: 64 + position_encoding: + _target_: sam2.modeling.position_encoding.PositionEmbeddingSine + num_pos_feats: 64 + normalize: true + scale: null + temperature: 10000 + mask_downsampler: + _target_: sam2.modeling.memory_encoder.MaskDownSampler + kernel_size: 3 + stride: 2 + padding: 1 + fuser: + _target_: sam2.modeling.memory_encoder.Fuser + layer: + _target_: sam2.modeling.memory_encoder.CXBlock + dim: 256 + kernel_size: 7 + padding: 3 + layer_scale_init_value: 1e-6 + use_dwconv: True # depth-wise convs + num_layers: 2 + + num_maskmem: 7 + image_size: 1024 + # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask + # SAM decoder + sigmoid_scale_for_mem_enc: 20.0 + sigmoid_bias_for_mem_enc: -10.0 + use_mask_input_as_output_without_sam: true + # Memory + directly_add_no_mem_embed: true + # use high-resolution feature map in the SAM mask decoder + use_high_res_features_in_sam: true + # output 3 masks on the first click on initial conditioning frames + multimask_output_in_sam: true + # SAM heads + iou_prediction_use_sigmoid: True + # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder + use_obj_ptrs_in_encoder: true + add_tpos_enc_to_obj_ptrs: false + only_obj_ptrs_in_the_past_for_eval: true + # object occlusion prediction + pred_obj_scores: true + pred_obj_scores_mlp: true + fixed_no_obj_ptr: true + # multimask tracking settings + multimask_output_for_tracking: true + use_multimask_token_for_obj_ptr: true + multimask_min_pt_num: 0 + multimask_max_pt_num: 1 + use_mlp_for_obj_ptr_proj: true + # Compilation flag + # HieraT does not currently support compilation, should always be set to False + compile_image_encoder: False diff --git a/florence_sam/configs/train_flowcomp.json b/florence_sam/configs/train_flowcomp.json new file mode 100644 index 0000000000000000000000000000000000000000..c3c9ca8043b88611c1e0579ee2f469d3eee987b6 --- /dev/null +++ b/florence_sam/configs/train_flowcomp.json @@ -0,0 +1,40 @@ +{ + "seed": 2023, + "save_dir": "experiments_model/", + "train_data_loader": { + "name": "youtube-vos", + "video_root": "your_video_root", + "flow_root": "your_flow_root", + "w": 432, + "h": 240, + "num_local_frames": 10, + "num_ref_frames": 1, + "load_flow": 0 + }, + "losses": { + "flow_weight": 0.25 + }, + "model": { + "net": "recurrent_flow_completion" + }, + "trainer": { + "version": "trainer_flow_w_edge", + "type": "Adam", + "beta1": 0, + "beta2": 0.99, + "lr": 5e-5, + "batch_size": 8, + "num_workers": 4, + "num_prefetch_queue": 4, + "log_freq": 100, + "save_freq": 5e3, + "iterations": 700e3, + "scheduler": { + "type": "MultiStepLR", + "milestones": [ + 300e3, 400e3, 500e3, 600e3 + ], + "gamma": 0.2 + } + } +} \ No newline at end of file diff --git a/florence_sam/configs/train_propainter.json b/florence_sam/configs/train_propainter.json new file mode 100644 index 0000000000000000000000000000000000000000..c0c29ba7a6ad02d6983206d530f6256d8b120ec7 --- /dev/null +++ b/florence_sam/configs/train_propainter.json @@ -0,0 +1,48 @@ +{ + "seed": 2023, + "save_dir": "experiments_model/", + "train_data_loader": { + "name": "youtube-vos", + "video_root": "your_video_root", + "flow_root": "your_flow_root", + "w": 432, + "h": 240, + "num_local_frames": 10, + "num_ref_frames": 6, + "load_flow": 0 + }, + "losses": { + "hole_weight": 1, + "valid_weight": 1, + "flow_weight": 1, + "adversarial_weight": 0.01, + "GAN_LOSS": "hinge", + "perceptual_weight": 0 + }, + "model": { + "net": "propainter", + "no_dis": 0, + "load_d": 1, + "interp_mode": "nearest" + }, + "trainer": { + "version": "trainer", + "type": "Adam", + "beta1": 0, + "beta2": 0.99, + "lr": 1e-4, + "batch_size": 8, + "num_workers": 8, + "num_prefetch_queue": 8, + "log_freq": 100, + "save_freq": 1e4, + "iterations": 700e3, + "scheduler": { + "type": "MultiStepLR", + "milestones": [ + 400e3 + ], + "gamma": 0.1 + } + } +} \ No newline at end of file diff --git a/florence_sam/core/dataset.py b/florence_sam/core/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..27b135bb7716f0e89d9a3ec9fd4411dfe3eb94eb --- /dev/null +++ b/florence_sam/core/dataset.py @@ -0,0 +1,232 @@ +import os +import json +import random + +import cv2 +from PIL import Image +import numpy as np + +import torch +import torchvision.transforms as transforms + +from utils.file_client import FileClient +from utils.img_util import imfrombytes +from utils.flow_util import resize_flow, flowread +from core.utils import (create_random_shape_with_random_motion, Stack, + ToTorchFormatTensor, GroupRandomHorizontalFlip,GroupRandomHorizontalFlowFlip) + + +class TrainDataset(torch.utils.data.Dataset): + def __init__(self, args: dict): + self.args = args + self.video_root = args['video_root'] + self.flow_root = args['flow_root'] + self.num_local_frames = args['num_local_frames'] + self.num_ref_frames = args['num_ref_frames'] + self.size = self.w, self.h = (args['w'], args['h']) + + self.load_flow = args['load_flow'] + if self.load_flow: + assert os.path.exists(self.flow_root) + + json_path = os.path.join('./datasets', args['name'], 'train.json') + + with open(json_path, 'r') as f: + self.video_train_dict = json.load(f) + self.video_names = sorted(list(self.video_train_dict.keys())) + + # self.video_names = sorted(os.listdir(self.video_root)) + self.video_dict = {} + self.frame_dict = {} + + for v in self.video_names: + frame_list = sorted(os.listdir(os.path.join(self.video_root, v))) + v_len = len(frame_list) + if v_len > self.num_local_frames + self.num_ref_frames: + self.video_dict[v] = v_len + self.frame_dict[v] = frame_list + + + self.video_names = list(self.video_dict.keys()) # update names + + self._to_tensors = transforms.Compose([ + Stack(), + ToTorchFormatTensor(), + ]) + self.file_client = FileClient('disk') + + def __len__(self): + return len(self.video_names) + + def _sample_index(self, length, sample_length, num_ref_frame=3): + complete_idx_set = list(range(length)) + pivot = random.randint(0, length - sample_length) + local_idx = complete_idx_set[pivot:pivot + sample_length] + remain_idx = list(set(complete_idx_set) - set(local_idx)) + ref_index = sorted(random.sample(remain_idx, num_ref_frame)) + + return local_idx + ref_index + + def __getitem__(self, index): + video_name = self.video_names[index] + # create masks + all_masks = create_random_shape_with_random_motion( + self.video_dict[video_name], imageHeight=self.h, imageWidth=self.w) + + # create sample index + selected_index = self._sample_index(self.video_dict[video_name], + self.num_local_frames, + self.num_ref_frames) + + # read video frames + frames = [] + masks = [] + flows_f, flows_b = [], [] + for idx in selected_index: + frame_list = self.frame_dict[video_name] + img_path = os.path.join(self.video_root, video_name, frame_list[idx]) + img_bytes = self.file_client.get(img_path, 'img') + img = imfrombytes(img_bytes, float32=False) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = cv2.resize(img, self.size, interpolation=cv2.INTER_LINEAR) + img = Image.fromarray(img) + + frames.append(img) + masks.append(all_masks[idx]) + + if len(frames) <= self.num_local_frames-1 and self.load_flow: + current_n = frame_list[idx][:-4] + next_n = frame_list[idx+1][:-4] + flow_f_path = os.path.join(self.flow_root, video_name, f'{current_n}_{next_n}_f.flo') + flow_b_path = os.path.join(self.flow_root, video_name, f'{next_n}_{current_n}_b.flo') + flow_f = flowread(flow_f_path, quantize=False) + flow_b = flowread(flow_b_path, quantize=False) + flow_f = resize_flow(flow_f, self.h, self.w) + flow_b = resize_flow(flow_b, self.h, self.w) + flows_f.append(flow_f) + flows_b.append(flow_b) + + if len(frames) == self.num_local_frames: # random reverse + if random.random() < 0.5: + frames.reverse() + masks.reverse() + if self.load_flow: + flows_f.reverse() + flows_b.reverse() + flows_ = flows_f + flows_f = flows_b + flows_b = flows_ + + if self.load_flow: + frames, flows_f, flows_b = GroupRandomHorizontalFlowFlip()(frames, flows_f, flows_b) + else: + frames = GroupRandomHorizontalFlip()(frames) + + # normalizate, to tensors + frame_tensors = self._to_tensors(frames) * 2.0 - 1.0 + mask_tensors = self._to_tensors(masks) + if self.load_flow: + flows_f = np.stack(flows_f, axis=-1) # H W 2 T-1 + flows_b = np.stack(flows_b, axis=-1) + flows_f = torch.from_numpy(flows_f).permute(3, 2, 0, 1).contiguous().float() + flows_b = torch.from_numpy(flows_b).permute(3, 2, 0, 1).contiguous().float() + + # img [-1,1] mask [0,1] + if self.load_flow: + return frame_tensors, mask_tensors, flows_f, flows_b, video_name + else: + return frame_tensors, mask_tensors, 'None', 'None', video_name + + +class TestDataset(torch.utils.data.Dataset): + def __init__(self, args): + self.args = args + self.size = self.w, self.h = args['size'] + + self.video_root = args['video_root'] + self.mask_root = args['mask_root'] + self.flow_root = args['flow_root'] + + self.load_flow = args['load_flow'] + if self.load_flow: + assert os.path.exists(self.flow_root) + self.video_names = sorted(os.listdir(self.mask_root)) + + self.video_dict = {} + self.frame_dict = {} + + for v in self.video_names: + frame_list = sorted(os.listdir(os.path.join(self.video_root, v))) + v_len = len(frame_list) + self.video_dict[v] = v_len + self.frame_dict[v] = frame_list + + self._to_tensors = transforms.Compose([ + Stack(), + ToTorchFormatTensor(), + ]) + self.file_client = FileClient('disk') + + def __len__(self): + return len(self.video_names) + + def __getitem__(self, index): + video_name = self.video_names[index] + selected_index = list(range(self.video_dict[video_name])) + + # read video frames + frames = [] + masks = [] + flows_f, flows_b = [], [] + for idx in selected_index: + frame_list = self.frame_dict[video_name] + frame_path = os.path.join(self.video_root, video_name, frame_list[idx]) + + img_bytes = self.file_client.get(frame_path, 'input') + img = imfrombytes(img_bytes, float32=False) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = cv2.resize(img, self.size, interpolation=cv2.INTER_LINEAR) + img = Image.fromarray(img) + + frames.append(img) + + mask_path = os.path.join(self.mask_root, video_name, str(idx).zfill(5) + '.png') + mask = Image.open(mask_path).resize(self.size, Image.NEAREST).convert('L') + + # origin: 0 indicates missing. now: 1 indicates missing + mask = np.asarray(mask) + m = np.array(mask > 0).astype(np.uint8) + + m = cv2.dilate(m, + cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)), + iterations=4) + mask = Image.fromarray(m * 255) + masks.append(mask) + + if len(frames) <= len(selected_index)-1 and self.load_flow: + current_n = frame_list[idx][:-4] + next_n = frame_list[idx+1][:-4] + flow_f_path = os.path.join(self.flow_root, video_name, f'{current_n}_{next_n}_f.flo') + flow_b_path = os.path.join(self.flow_root, video_name, f'{next_n}_{current_n}_b.flo') + flow_f = flowread(flow_f_path, quantize=False) + flow_b = flowread(flow_b_path, quantize=False) + flow_f = resize_flow(flow_f, self.h, self.w) + flow_b = resize_flow(flow_b, self.h, self.w) + flows_f.append(flow_f) + flows_b.append(flow_b) + + # normalizate, to tensors + frames_PIL = [np.array(f).astype(np.uint8) for f in frames] + frame_tensors = self._to_tensors(frames) * 2.0 - 1.0 + mask_tensors = self._to_tensors(masks) + + if self.load_flow: + flows_f = np.stack(flows_f, axis=-1) # H W 2 T-1 + flows_b = np.stack(flows_b, axis=-1) + flows_f = torch.from_numpy(flows_f).permute(3, 2, 0, 1).contiguous().float() + flows_b = torch.from_numpy(flows_b).permute(3, 2, 0, 1).contiguous().float() + + if self.load_flow: + return frame_tensors, mask_tensors, flows_f, flows_b, video_name, frames_PIL + else: + return frame_tensors, mask_tensors, 'None', 'None', video_name \ No newline at end of file diff --git a/florence_sam/core/dist.py b/florence_sam/core/dist.py new file mode 100644 index 0000000000000000000000000000000000000000..4e4e9e670a3b853fac345618d3557d648d813902 --- /dev/null +++ b/florence_sam/core/dist.py @@ -0,0 +1,47 @@ +import os +import torch + + +def get_world_size(): + """Find OMPI world size without calling mpi functions + :rtype: int + """ + if os.environ.get('PMI_SIZE') is not None: + return int(os.environ.get('PMI_SIZE') or 1) + elif os.environ.get('OMPI_COMM_WORLD_SIZE') is not None: + return int(os.environ.get('OMPI_COMM_WORLD_SIZE') or 1) + else: + return torch.cuda.device_count() + + +def get_global_rank(): + """Find OMPI world rank without calling mpi functions + :rtype: int + """ + if os.environ.get('PMI_RANK') is not None: + return int(os.environ.get('PMI_RANK') or 0) + elif os.environ.get('OMPI_COMM_WORLD_RANK') is not None: + return int(os.environ.get('OMPI_COMM_WORLD_RANK') or 0) + else: + return 0 + + +def get_local_rank(): + """Find OMPI local rank without calling mpi functions + :rtype: int + """ + if os.environ.get('MPI_LOCALRANKID') is not None: + return int(os.environ.get('MPI_LOCALRANKID') or 0) + elif os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') is not None: + return int(os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') or 0) + else: + return 0 + + +def get_master_ip(): + if os.environ.get('AZ_BATCH_MASTER_NODE') is not None: + return os.environ.get('AZ_BATCH_MASTER_NODE').split(':')[0] + elif os.environ.get('AZ_BATCHAI_MPI_MASTER_NODE') is not None: + return os.environ.get('AZ_BATCHAI_MPI_MASTER_NODE') + else: + return "127.0.0.1" diff --git a/florence_sam/core/loss.py b/florence_sam/core/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..b1d94d0ce9433b66ce2dce7adb24acb16051e8da --- /dev/null +++ b/florence_sam/core/loss.py @@ -0,0 +1,180 @@ +import torch +import torch.nn as nn +import lpips +from model.vgg_arch import VGGFeatureExtractor + +class PerceptualLoss(nn.Module): + """Perceptual loss with commonly used style loss. + + Args: + layer_weights (dict): The weight for each layer of vgg feature. + Here is an example: {'conv5_4': 1.}, which means the conv5_4 + feature layer (before relu5_4) will be extracted with weight + 1.0 in calculting losses. + vgg_type (str): The type of vgg network used as feature extractor. + Default: 'vgg19'. + use_input_norm (bool): If True, normalize the input image in vgg. + Default: True. + range_norm (bool): If True, norm images with range [-1, 1] to [0, 1]. + Default: False. + perceptual_weight (float): If `perceptual_weight > 0`, the perceptual + loss will be calculated and the loss will multiplied by the + weight. Default: 1.0. + style_weight (float): If `style_weight > 0`, the style loss will be + calculated and the loss will multiplied by the weight. + Default: 0. + criterion (str): Criterion used for perceptual loss. Default: 'l1'. + """ + + def __init__(self, + layer_weights, + vgg_type='vgg19', + use_input_norm=True, + range_norm=False, + perceptual_weight=1.0, + style_weight=0., + criterion='l1'): + super(PerceptualLoss, self).__init__() + self.perceptual_weight = perceptual_weight + self.style_weight = style_weight + self.layer_weights = layer_weights + self.vgg = VGGFeatureExtractor( + layer_name_list=list(layer_weights.keys()), + vgg_type=vgg_type, + use_input_norm=use_input_norm, + range_norm=range_norm) + + self.criterion_type = criterion + if self.criterion_type == 'l1': + self.criterion = torch.nn.L1Loss() + elif self.criterion_type == 'l2': + self.criterion = torch.nn.L2loss() + elif self.criterion_type == 'mse': + self.criterion = torch.nn.MSELoss(reduction='mean') + elif self.criterion_type == 'fro': + self.criterion = None + else: + raise NotImplementedError(f'{criterion} criterion has not been supported.') + + def forward(self, x, gt): + """Forward function. + + Args: + x (Tensor): Input tensor with shape (n, c, h, w). + gt (Tensor): Ground-truth tensor with shape (n, c, h, w). + + Returns: + Tensor: Forward results. + """ + # extract vgg features + x_features = self.vgg(x) + gt_features = self.vgg(gt.detach()) + + # calculate perceptual loss + if self.perceptual_weight > 0: + percep_loss = 0 + for k in x_features.keys(): + if self.criterion_type == 'fro': + percep_loss += torch.norm(x_features[k] - gt_features[k], p='fro') * self.layer_weights[k] + else: + percep_loss += self.criterion(x_features[k], gt_features[k]) * self.layer_weights[k] + percep_loss *= self.perceptual_weight + else: + percep_loss = None + + # calculate style loss + if self.style_weight > 0: + style_loss = 0 + for k in x_features.keys(): + if self.criterion_type == 'fro': + style_loss += torch.norm( + self._gram_mat(x_features[k]) - self._gram_mat(gt_features[k]), p='fro') * self.layer_weights[k] + else: + style_loss += self.criterion(self._gram_mat(x_features[k]), self._gram_mat( + gt_features[k])) * self.layer_weights[k] + style_loss *= self.style_weight + else: + style_loss = None + + return percep_loss, style_loss + + def _gram_mat(self, x): + """Calculate Gram matrix. + + Args: + x (torch.Tensor): Tensor with shape of (n, c, h, w). + + Returns: + torch.Tensor: Gram matrix. + """ + n, c, h, w = x.size() + features = x.view(n, c, w * h) + features_t = features.transpose(1, 2) + gram = features.bmm(features_t) / (c * h * w) + return gram + +class LPIPSLoss(nn.Module): + def __init__(self, + loss_weight=1.0, + use_input_norm=True, + range_norm=False,): + super(LPIPSLoss, self).__init__() + self.perceptual = lpips.LPIPS(net="vgg", spatial=False).eval() + self.loss_weight = loss_weight + self.use_input_norm = use_input_norm + self.range_norm = range_norm + + if self.use_input_norm: + # the mean is for image with range [0, 1] + self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)) + # the std is for image with range [0, 1] + self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)) + + def forward(self, pred, target): + if self.range_norm: + pred = (pred + 1) / 2 + target = (target + 1) / 2 + if self.use_input_norm: + pred = (pred - self.mean) / self.std + target = (target - self.mean) / self.std + lpips_loss = self.perceptual(target.contiguous(), pred.contiguous()) + return self.loss_weight * lpips_loss.mean(), None + + +class AdversarialLoss(nn.Module): + r""" + Adversarial loss + https://arxiv.org/abs/1711.10337 + """ + def __init__(self, + type='nsgan', + target_real_label=1.0, + target_fake_label=0.0): + r""" + type = nsgan | lsgan | hinge + """ + super(AdversarialLoss, self).__init__() + self.type = type + self.register_buffer('real_label', torch.tensor(target_real_label)) + self.register_buffer('fake_label', torch.tensor(target_fake_label)) + + if type == 'nsgan': + self.criterion = nn.BCELoss() + elif type == 'lsgan': + self.criterion = nn.MSELoss() + elif type == 'hinge': + self.criterion = nn.ReLU() + + def __call__(self, outputs, is_real, is_disc=None): + if self.type == 'hinge': + if is_disc: + if is_real: + outputs = -outputs + return self.criterion(1 + outputs).mean() + else: + return (-outputs).mean() + else: + labels = (self.real_label + if is_real else self.fake_label).expand_as(outputs) + loss = self.criterion(outputs, labels) + return loss diff --git a/florence_sam/core/lr_scheduler.py b/florence_sam/core/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..1bd1341cdcc64aa1c2a416b837551590ded4a43d --- /dev/null +++ b/florence_sam/core/lr_scheduler.py @@ -0,0 +1,112 @@ +""" + LR scheduler from BasicSR https://github.com/xinntao/BasicSR +""" +import math +from collections import Counter +from torch.optim.lr_scheduler import _LRScheduler + + +class MultiStepRestartLR(_LRScheduler): + """ MultiStep with restarts learning rate scheme. + Args: + optimizer (torch.nn.optimizer): Torch optimizer. + milestones (list): Iterations that will decrease learning rate. + gamma (float): Decrease ratio. Default: 0.1. + restarts (list): Restart iterations. Default: [0]. + restart_weights (list): Restart weights at each restart iteration. + Default: [1]. + last_epoch (int): Used in _LRScheduler. Default: -1. + """ + def __init__(self, + optimizer, + milestones, + gamma=0.1, + restarts=(0, ), + restart_weights=(1, ), + last_epoch=-1): + self.milestones = Counter(milestones) + self.gamma = gamma + self.restarts = restarts + self.restart_weights = restart_weights + assert len(self.restarts) == len( + self.restart_weights), 'restarts and their weights do not match.' + super(MultiStepRestartLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + if self.last_epoch in self.restarts: + weight = self.restart_weights[self.restarts.index(self.last_epoch)] + return [ + group['initial_lr'] * weight + for group in self.optimizer.param_groups + ] + if self.last_epoch not in self.milestones: + return [group['lr'] for group in self.optimizer.param_groups] + return [ + group['lr'] * self.gamma**self.milestones[self.last_epoch] + for group in self.optimizer.param_groups + ] + + +def get_position_from_periods(iteration, cumulative_period): + """Get the position from a period list. + It will return the index of the right-closest number in the period list. + For example, the cumulative_period = [100, 200, 300, 400], + if iteration == 50, return 0; + if iteration == 210, return 2; + if iteration == 300, return 2. + Args: + iteration (int): Current iteration. + cumulative_period (list[int]): Cumulative period list. + Returns: + int: The position of the right-closest number in the period list. + """ + for i, period in enumerate(cumulative_period): + if iteration <= period: + return i + + +class CosineAnnealingRestartLR(_LRScheduler): + """ Cosine annealing with restarts learning rate scheme. + An example of config: + periods = [10, 10, 10, 10] + restart_weights = [1, 0.5, 0.5, 0.5] + eta_min=1e-7 + It has four cycles, each has 10 iterations. At 10th, 20th, 30th, the + scheduler will restart with the weights in restart_weights. + Args: + optimizer (torch.nn.optimizer): Torch optimizer. + periods (list): Period for each cosine anneling cycle. + restart_weights (list): Restart weights at each restart iteration. + Default: [1]. + eta_min (float): The mimimum lr. Default: 0. + last_epoch (int): Used in _LRScheduler. Default: -1. + """ + def __init__(self, + optimizer, + periods, + restart_weights=(1, ), + eta_min=1e-7, + last_epoch=-1): + self.periods = periods + self.restart_weights = restart_weights + self.eta_min = eta_min + assert (len(self.periods) == len(self.restart_weights) + ), 'periods and restart_weights should have the same length.' + self.cumulative_period = [ + sum(self.periods[0:i + 1]) for i in range(0, len(self.periods)) + ] + super(CosineAnnealingRestartLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + idx = get_position_from_periods(self.last_epoch, + self.cumulative_period) + current_weight = self.restart_weights[idx] + nearest_restart = 0 if idx == 0 else self.cumulative_period[idx - 1] + current_period = self.periods[idx] + + return [ + self.eta_min + current_weight * 0.5 * (base_lr - self.eta_min) * + (1 + math.cos(math.pi * ( + (self.last_epoch - nearest_restart) / current_period))) + for base_lr in self.base_lrs + ] diff --git a/florence_sam/core/metrics.py b/florence_sam/core/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..d0dfb73f1d09a249f801770eada5e133c8148df2 --- /dev/null +++ b/florence_sam/core/metrics.py @@ -0,0 +1,569 @@ +import numpy as np +from skimage import measure +from scipy import linalg + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.utils import to_tensors + + +def calculate_epe(flow1, flow2): + """Calculate End point errors.""" + + epe = torch.sum((flow1 - flow2)**2, dim=1).sqrt() + epe = epe.view(-1) + return epe.mean().item() + + +def calculate_psnr(img1, img2): + """Calculate PSNR (Peak Signal-to-Noise Ratio). + Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio + Args: + img1 (ndarray): Images with range [0, 255]. + img2 (ndarray): Images with range [0, 255]. + Returns: + float: psnr result. + """ + + assert img1.shape == img2.shape, \ + (f'Image shapes are differnet: {img1.shape}, {img2.shape}.') + + mse = np.mean((img1 - img2)**2) + if mse == 0: + return float('inf') + return 20. * np.log10(255. / np.sqrt(mse)) + + +def calc_psnr_and_ssim(img1, img2): + """Calculate PSNR and SSIM for images. + img1: ndarray, range [0, 255] + img2: ndarray, range [0, 255] + """ + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + + psnr = calculate_psnr(img1, img2) + ssim = measure.compare_ssim(img1, + img2, + data_range=255, + multichannel=True, + win_size=65) + + return psnr, ssim + + +########################### +# I3D models +########################### + + +def init_i3d_model(i3d_model_path): + print(f"[Loading I3D model from {i3d_model_path} for FID score ..]") + i3d_model = InceptionI3d(400, in_channels=3, final_endpoint='Logits') + i3d_model.load_state_dict(torch.load(i3d_model_path)) + i3d_model.to(torch.device('cuda:0')) + return i3d_model + + +def calculate_i3d_activations(video1, video2, i3d_model, device): + """Calculate VFID metric. + video1: list[PIL.Image] + video2: list[PIL.Image] + """ + video1 = to_tensors()(video1).unsqueeze(0).to(device) + video2 = to_tensors()(video2).unsqueeze(0).to(device) + video1_activations = get_i3d_activations( + video1, i3d_model).cpu().numpy().flatten() + video2_activations = get_i3d_activations( + video2, i3d_model).cpu().numpy().flatten() + + return video1_activations, video2_activations + + +def calculate_vfid(real_activations, fake_activations): + """ + Given two distribution of features, compute the FID score between them + Params: + real_activations: list[ndarray] + fake_activations: list[ndarray] + """ + m1 = np.mean(real_activations, axis=0) + m2 = np.mean(fake_activations, axis=0) + s1 = np.cov(real_activations, rowvar=False) + s2 = np.cov(fake_activations, rowvar=False) + return calculate_frechet_distance(m1, s1, m2, s2) + + +def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): + """Numpy implementation of the Frechet Distance. + The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) + and X_2 ~ N(mu_2, C_2) is + d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). + Stable version by Dougal J. Sutherland. + Params: + -- mu1 : Numpy array containing the activations of a layer of the + inception net (like returned by the function 'get_predictions') + for generated samples. + -- mu2 : The sample mean over activations, precalculated on an + representive data set. + -- sigma1: The covariance matrix over activations for generated samples. + -- sigma2: The covariance matrix over activations, precalculated on an + representive data set. + Returns: + -- : The Frechet Distance. + """ + + mu1 = np.atleast_1d(mu1) + mu2 = np.atleast_1d(mu2) + + sigma1 = np.atleast_2d(sigma1) + sigma2 = np.atleast_2d(sigma2) + + assert mu1.shape == mu2.shape, \ + 'Training and test mean vectors have different lengths' + assert sigma1.shape == sigma2.shape, \ + 'Training and test covariances have different dimensions' + + diff = mu1 - mu2 + + # Product might be almost singular + covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) + if not np.isfinite(covmean).all(): + msg = ('fid calculation produces singular product; ' + 'adding %s to diagonal of cov estimates') % eps + print(msg) + offset = np.eye(sigma1.shape[0]) * eps + covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) + + # Numerical error might give slight imaginary component + if np.iscomplexobj(covmean): + if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): + m = np.max(np.abs(covmean.imag)) + raise ValueError('Imaginary component {}'.format(m)) + covmean = covmean.real + + tr_covmean = np.trace(covmean) + + return (diff.dot(diff) + np.trace(sigma1) + # NOQA + np.trace(sigma2) - 2 * tr_covmean) + + +def get_i3d_activations(batched_video, + i3d_model, + target_endpoint='Logits', + flatten=True, + grad_enabled=False): + """ + Get features from i3d model and flatten them to 1d feature, + valid target endpoints are defined in InceptionI3d.VALID_ENDPOINTS + VALID_ENDPOINTS = ( + 'Conv3d_1a_7x7', + 'MaxPool3d_2a_3x3', + 'Conv3d_2b_1x1', + 'Conv3d_2c_3x3', + 'MaxPool3d_3a_3x3', + 'Mixed_3b', + 'Mixed_3c', + 'MaxPool3d_4a_3x3', + 'Mixed_4b', + 'Mixed_4c', + 'Mixed_4d', + 'Mixed_4e', + 'Mixed_4f', + 'MaxPool3d_5a_2x2', + 'Mixed_5b', + 'Mixed_5c', + 'Logits', + 'Predictions', + ) + """ + with torch.set_grad_enabled(grad_enabled): + feat = i3d_model.extract_features(batched_video.transpose(1, 2), + target_endpoint) + if flatten: + feat = feat.view(feat.size(0), -1) + + return feat + + +# This code is from https://github.com/piergiaj/pytorch-i3d/blob/master/pytorch_i3d.py +# I only fix flake8 errors and do some cleaning here + + +class MaxPool3dSamePadding(nn.MaxPool3d): + def compute_pad(self, dim, s): + if s % self.stride[dim] == 0: + return max(self.kernel_size[dim] - self.stride[dim], 0) + else: + return max(self.kernel_size[dim] - (s % self.stride[dim]), 0) + + def forward(self, x): + # compute 'same' padding + (batch, channel, t, h, w) = x.size() + pad_t = self.compute_pad(0, t) + pad_h = self.compute_pad(1, h) + pad_w = self.compute_pad(2, w) + + pad_t_f = pad_t // 2 + pad_t_b = pad_t - pad_t_f + pad_h_f = pad_h // 2 + pad_h_b = pad_h - pad_h_f + pad_w_f = pad_w // 2 + pad_w_b = pad_w - pad_w_f + + pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b) + x = F.pad(x, pad) + return super(MaxPool3dSamePadding, self).forward(x) + + +class Unit3D(nn.Module): + def __init__(self, + in_channels, + output_channels, + kernel_shape=(1, 1, 1), + stride=(1, 1, 1), + padding=0, + activation_fn=F.relu, + use_batch_norm=True, + use_bias=False, + name='unit_3d'): + """Initializes Unit3D module.""" + super(Unit3D, self).__init__() + + self._output_channels = output_channels + self._kernel_shape = kernel_shape + self._stride = stride + self._use_batch_norm = use_batch_norm + self._activation_fn = activation_fn + self._use_bias = use_bias + self.name = name + self.padding = padding + + self.conv3d = nn.Conv3d( + in_channels=in_channels, + out_channels=self._output_channels, + kernel_size=self._kernel_shape, + stride=self._stride, + padding=0, # we always want padding to be 0 here. We will + # dynamically pad based on input size in forward function + bias=self._use_bias) + + if self._use_batch_norm: + self.bn = nn.BatchNorm3d(self._output_channels, + eps=0.001, + momentum=0.01) + + def compute_pad(self, dim, s): + if s % self._stride[dim] == 0: + return max(self._kernel_shape[dim] - self._stride[dim], 0) + else: + return max(self._kernel_shape[dim] - (s % self._stride[dim]), 0) + + def forward(self, x): + # compute 'same' padding + (batch, channel, t, h, w) = x.size() + pad_t = self.compute_pad(0, t) + pad_h = self.compute_pad(1, h) + pad_w = self.compute_pad(2, w) + + pad_t_f = pad_t // 2 + pad_t_b = pad_t - pad_t_f + pad_h_f = pad_h // 2 + pad_h_b = pad_h - pad_h_f + pad_w_f = pad_w // 2 + pad_w_b = pad_w - pad_w_f + + pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b) + x = F.pad(x, pad) + + x = self.conv3d(x) + if self._use_batch_norm: + x = self.bn(x) + if self._activation_fn is not None: + x = self._activation_fn(x) + return x + + +class InceptionModule(nn.Module): + def __init__(self, in_channels, out_channels, name): + super(InceptionModule, self).__init__() + + self.b0 = Unit3D(in_channels=in_channels, + output_channels=out_channels[0], + kernel_shape=[1, 1, 1], + padding=0, + name=name + '/Branch_0/Conv3d_0a_1x1') + self.b1a = Unit3D(in_channels=in_channels, + output_channels=out_channels[1], + kernel_shape=[1, 1, 1], + padding=0, + name=name + '/Branch_1/Conv3d_0a_1x1') + self.b1b = Unit3D(in_channels=out_channels[1], + output_channels=out_channels[2], + kernel_shape=[3, 3, 3], + name=name + '/Branch_1/Conv3d_0b_3x3') + self.b2a = Unit3D(in_channels=in_channels, + output_channels=out_channels[3], + kernel_shape=[1, 1, 1], + padding=0, + name=name + '/Branch_2/Conv3d_0a_1x1') + self.b2b = Unit3D(in_channels=out_channels[3], + output_channels=out_channels[4], + kernel_shape=[3, 3, 3], + name=name + '/Branch_2/Conv3d_0b_3x3') + self.b3a = MaxPool3dSamePadding(kernel_size=[3, 3, 3], + stride=(1, 1, 1), + padding=0) + self.b3b = Unit3D(in_channels=in_channels, + output_channels=out_channels[5], + kernel_shape=[1, 1, 1], + padding=0, + name=name + '/Branch_3/Conv3d_0b_1x1') + self.name = name + + def forward(self, x): + b0 = self.b0(x) + b1 = self.b1b(self.b1a(x)) + b2 = self.b2b(self.b2a(x)) + b3 = self.b3b(self.b3a(x)) + return torch.cat([b0, b1, b2, b3], dim=1) + + +class InceptionI3d(nn.Module): + """Inception-v1 I3D architecture. + The model is introduced in: + Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset + Joao Carreira, Andrew Zisserman + https://arxiv.org/pdf/1705.07750v1.pdf. + See also the Inception architecture, introduced in: + Going deeper with convolutions + Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, + Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich. + http://arxiv.org/pdf/1409.4842v1.pdf. + """ + + # Endpoints of the model in order. During construction, all the endpoints up + # to a designated `final_endpoint` are returned in a dictionary as the + # second return value. + VALID_ENDPOINTS = ( + 'Conv3d_1a_7x7', + 'MaxPool3d_2a_3x3', + 'Conv3d_2b_1x1', + 'Conv3d_2c_3x3', + 'MaxPool3d_3a_3x3', + 'Mixed_3b', + 'Mixed_3c', + 'MaxPool3d_4a_3x3', + 'Mixed_4b', + 'Mixed_4c', + 'Mixed_4d', + 'Mixed_4e', + 'Mixed_4f', + 'MaxPool3d_5a_2x2', + 'Mixed_5b', + 'Mixed_5c', + 'Logits', + 'Predictions', + ) + + def __init__(self, + num_classes=400, + spatial_squeeze=True, + final_endpoint='Logits', + name='inception_i3d', + in_channels=3, + dropout_keep_prob=0.5): + """Initializes I3D model instance. + Args: + num_classes: The number of outputs in the logit layer (default 400, which + matches the Kinetics dataset). + spatial_squeeze: Whether to squeeze the spatial dimensions for the logits + before returning (default True). + final_endpoint: The model contains many possible endpoints. + `final_endpoint` specifies the last endpoint for the model to be built + up to. In addition to the output at `final_endpoint`, all the outputs + at endpoints up to `final_endpoint` will also be returned, in a + dictionary. `final_endpoint` must be one of + InceptionI3d.VALID_ENDPOINTS (default 'Logits'). + name: A string (optional). The name of this module. + Raises: + ValueError: if `final_endpoint` is not recognized. + """ + + if final_endpoint not in self.VALID_ENDPOINTS: + raise ValueError('Unknown final endpoint %s' % final_endpoint) + + super(InceptionI3d, self).__init__() + self._num_classes = num_classes + self._spatial_squeeze = spatial_squeeze + self._final_endpoint = final_endpoint + self.logits = None + + if self._final_endpoint not in self.VALID_ENDPOINTS: + raise ValueError('Unknown final endpoint %s' % + self._final_endpoint) + + self.end_points = {} + end_point = 'Conv3d_1a_7x7' + self.end_points[end_point] = Unit3D(in_channels=in_channels, + output_channels=64, + kernel_shape=[7, 7, 7], + stride=(2, 2, 2), + padding=(3, 3, 3), + name=name + end_point) + if self._final_endpoint == end_point: + return + + end_point = 'MaxPool3d_2a_3x3' + self.end_points[end_point] = MaxPool3dSamePadding( + kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0) + if self._final_endpoint == end_point: + return + + end_point = 'Conv3d_2b_1x1' + self.end_points[end_point] = Unit3D(in_channels=64, + output_channels=64, + kernel_shape=[1, 1, 1], + padding=0, + name=name + end_point) + if self._final_endpoint == end_point: + return + + end_point = 'Conv3d_2c_3x3' + self.end_points[end_point] = Unit3D(in_channels=64, + output_channels=192, + kernel_shape=[3, 3, 3], + padding=1, + name=name + end_point) + if self._final_endpoint == end_point: + return + + end_point = 'MaxPool3d_3a_3x3' + self.end_points[end_point] = MaxPool3dSamePadding( + kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0) + if self._final_endpoint == end_point: + return + + end_point = 'Mixed_3b' + self.end_points[end_point] = InceptionModule(192, + [64, 96, 128, 16, 32, 32], + name + end_point) + if self._final_endpoint == end_point: + return + + end_point = 'Mixed_3c' + self.end_points[end_point] = InceptionModule( + 256, [128, 128, 192, 32, 96, 64], name + end_point) + if self._final_endpoint == end_point: + return + + end_point = 'MaxPool3d_4a_3x3' + self.end_points[end_point] = MaxPool3dSamePadding( + kernel_size=[3, 3, 3], stride=(2, 2, 2), padding=0) + if self._final_endpoint == end_point: + return + + end_point = 'Mixed_4b' + self.end_points[end_point] = InceptionModule( + 128 + 192 + 96 + 64, [192, 96, 208, 16, 48, 64], name + end_point) + if self._final_endpoint == end_point: + return + + end_point = 'Mixed_4c' + self.end_points[end_point] = InceptionModule( + 192 + 208 + 48 + 64, [160, 112, 224, 24, 64, 64], name + end_point) + if self._final_endpoint == end_point: + return + + end_point = 'Mixed_4d' + self.end_points[end_point] = InceptionModule( + 160 + 224 + 64 + 64, [128, 128, 256, 24, 64, 64], name + end_point) + if self._final_endpoint == end_point: + return + + end_point = 'Mixed_4e' + self.end_points[end_point] = InceptionModule( + 128 + 256 + 64 + 64, [112, 144, 288, 32, 64, 64], name + end_point) + if self._final_endpoint == end_point: + return + + end_point = 'Mixed_4f' + self.end_points[end_point] = InceptionModule( + 112 + 288 + 64 + 64, [256, 160, 320, 32, 128, 128], + name + end_point) + if self._final_endpoint == end_point: + return + + end_point = 'MaxPool3d_5a_2x2' + self.end_points[end_point] = MaxPool3dSamePadding( + kernel_size=[2, 2, 2], stride=(2, 2, 2), padding=0) + if self._final_endpoint == end_point: + return + + end_point = 'Mixed_5b' + self.end_points[end_point] = InceptionModule( + 256 + 320 + 128 + 128, [256, 160, 320, 32, 128, 128], + name + end_point) + if self._final_endpoint == end_point: + return + + end_point = 'Mixed_5c' + self.end_points[end_point] = InceptionModule( + 256 + 320 + 128 + 128, [384, 192, 384, 48, 128, 128], + name + end_point) + if self._final_endpoint == end_point: + return + + end_point = 'Logits' + self.avg_pool = nn.AvgPool3d(kernel_size=[2, 7, 7], stride=(1, 1, 1)) + self.dropout = nn.Dropout(dropout_keep_prob) + self.logits = Unit3D(in_channels=384 + 384 + 128 + 128, + output_channels=self._num_classes, + kernel_shape=[1, 1, 1], + padding=0, + activation_fn=None, + use_batch_norm=False, + use_bias=True, + name='logits') + + self.build() + + def replace_logits(self, num_classes): + self._num_classes = num_classes + self.logits = Unit3D(in_channels=384 + 384 + 128 + 128, + output_channels=self._num_classes, + kernel_shape=[1, 1, 1], + padding=0, + activation_fn=None, + use_batch_norm=False, + use_bias=True, + name='logits') + + def build(self): + for k in self.end_points.keys(): + self.add_module(k, self.end_points[k]) + + def forward(self, x): + for end_point in self.VALID_ENDPOINTS: + if end_point in self.end_points: + x = self._modules[end_point]( + x) # use _modules to work with dataparallel + + x = self.logits(self.dropout(self.avg_pool(x))) + if self._spatial_squeeze: + logits = x.squeeze(3).squeeze(3) + # logits is batch X time X classes, which is what we want to work with + return logits + + def extract_features(self, x, target_endpoint='Logits'): + for end_point in self.VALID_ENDPOINTS: + if end_point in self.end_points: + x = self._modules[end_point](x) + if end_point == target_endpoint: + break + if target_endpoint == 'Logits': + return x.mean(4).mean(3).mean(2) + else: + return x diff --git a/florence_sam/core/prefetch_dataloader.py b/florence_sam/core/prefetch_dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..5088425050d4cc98114a9b93eb50ea60273f35a0 --- /dev/null +++ b/florence_sam/core/prefetch_dataloader.py @@ -0,0 +1,125 @@ +import queue as Queue +import threading +import torch +from torch.utils.data import DataLoader + + +class PrefetchGenerator(threading.Thread): + """A general prefetch generator. + + Ref: + https://stackoverflow.com/questions/7323664/python-generator-pre-fetch + + Args: + generator: Python generator. + num_prefetch_queue (int): Number of prefetch queue. + """ + + def __init__(self, generator, num_prefetch_queue): + threading.Thread.__init__(self) + self.queue = Queue.Queue(num_prefetch_queue) + self.generator = generator + self.daemon = True + self.start() + + def run(self): + for item in self.generator: + self.queue.put(item) + self.queue.put(None) + + def __next__(self): + next_item = self.queue.get() + if next_item is None: + raise StopIteration + return next_item + + def __iter__(self): + return self + + +class PrefetchDataLoader(DataLoader): + """Prefetch version of dataloader. + + Ref: + https://github.com/IgorSusmelj/pytorch-styleguide/issues/5# + + TODO: + Need to test on single gpu and ddp (multi-gpu). There is a known issue in + ddp. + + Args: + num_prefetch_queue (int): Number of prefetch queue. + kwargs (dict): Other arguments for dataloader. + """ + + def __init__(self, num_prefetch_queue, **kwargs): + self.num_prefetch_queue = num_prefetch_queue + super(PrefetchDataLoader, self).__init__(**kwargs) + + def __iter__(self): + return PrefetchGenerator(super().__iter__(), self.num_prefetch_queue) + + +class CPUPrefetcher(): + """CPU prefetcher. + + Args: + loader: Dataloader. + """ + + def __init__(self, loader): + self.ori_loader = loader + self.loader = iter(loader) + + def next(self): + try: + return next(self.loader) + except StopIteration: + return None + + def reset(self): + self.loader = iter(self.ori_loader) + + +class CUDAPrefetcher(): + """CUDA prefetcher. + + Ref: + https://github.com/NVIDIA/apex/issues/304# + + It may consums more GPU memory. + + Args: + loader: Dataloader. + opt (dict): Options. + """ + + def __init__(self, loader, opt): + self.ori_loader = loader + self.loader = iter(loader) + self.opt = opt + self.stream = torch.cuda.Stream() + self.device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu') + self.preload() + + def preload(self): + try: + self.batch = next(self.loader) # self.batch is a dict + except StopIteration: + self.batch = None + return None + # put tensors to gpu + with torch.cuda.stream(self.stream): + for k, v in self.batch.items(): + if torch.is_tensor(v): + self.batch[k] = self.batch[k].to(device=self.device, non_blocking=True) + + def next(self): + torch.cuda.current_stream().wait_stream(self.stream) + batch = self.batch + self.preload() + return batch + + def reset(self): + self.loader = iter(self.ori_loader) + self.preload() diff --git a/florence_sam/core/trainer.py b/florence_sam/core/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..5e6b6a669e92131697fcb12fd548509ce1b81080 --- /dev/null +++ b/florence_sam/core/trainer.py @@ -0,0 +1,509 @@ +import os +import glob +import logging +import importlib +from tqdm import tqdm + +import torch +import torch.nn as nn +import torch.nn.functional as F +from core.prefetch_dataloader import PrefetchDataLoader, CPUPrefetcher +from torch.utils.data.distributed import DistributedSampler +from torch.nn.parallel import DistributedDataParallel as DDP +import torchvision +from torch.utils.tensorboard import SummaryWriter + +from core.lr_scheduler import MultiStepRestartLR, CosineAnnealingRestartLR +from core.loss import AdversarialLoss, PerceptualLoss, LPIPSLoss +from core.dataset import TrainDataset + +from model.modules.flow_comp_raft import RAFT_bi, FlowLoss, EdgeLoss +from model.recurrent_flow_completion import RecurrentFlowCompleteNet + +from RAFT.utils.flow_viz_pt import flow_to_image + + +class Trainer: + def __init__(self, config): + self.config = config + self.epoch = 0 + self.iteration = 0 + self.num_local_frames = config['train_data_loader']['num_local_frames'] + self.num_ref_frames = config['train_data_loader']['num_ref_frames'] + + # setup data set and data loader + self.train_dataset = TrainDataset(config['train_data_loader']) + + self.train_sampler = None + self.train_args = config['trainer'] + if config['distributed']: + self.train_sampler = DistributedSampler( + self.train_dataset, + num_replicas=config['world_size'], + rank=config['global_rank']) + + dataloader_args = dict( + dataset=self.train_dataset, + batch_size=self.train_args['batch_size'] // config['world_size'], + shuffle=(self.train_sampler is None), + num_workers=self.train_args['num_workers'], + sampler=self.train_sampler, + drop_last=True) + + self.train_loader = PrefetchDataLoader(self.train_args['num_prefetch_queue'], **dataloader_args) + self.prefetcher = CPUPrefetcher(self.train_loader) + + # set loss functions + self.adversarial_loss = AdversarialLoss(type=self.config['losses']['GAN_LOSS']) + self.adversarial_loss = self.adversarial_loss.to(self.config['device']) + self.l1_loss = nn.L1Loss() + # self.perc_loss = PerceptualLoss( + # layer_weights={'conv3_4': 0.25, 'conv4_4': 0.25, 'conv5_4': 0.5}, + # use_input_norm=True, + # range_norm=True, + # criterion='l1' + # ).to(self.config['device']) + + if self.config['losses']['perceptual_weight'] > 0: + self.perc_loss = LPIPSLoss(use_input_norm=True, range_norm=True).to(self.config['device']) + + # self.flow_comp_loss = FlowCompletionLoss().to(self.config['device']) + # self.flow_comp_loss = FlowCompletionLoss(self.config['device']) + + # set raft + self.fix_raft = RAFT_bi(device = self.config['device']) + self.fix_flow_complete = RecurrentFlowCompleteNet('weights/recurrent_flow_completion.pth') + for p in self.fix_flow_complete.parameters(): + p.requires_grad = False + self.fix_flow_complete.to(self.config['device']) + self.fix_flow_complete.eval() + + # self.flow_loss = FlowLoss() + + # setup models including generator and discriminator + net = importlib.import_module('model.' + config['model']['net']) + self.netG = net.InpaintGenerator() + # print(self.netG) + self.netG = self.netG.to(self.config['device']) + if not self.config['model'].get('no_dis', False): + if self.config['model'].get('dis_2d', False): + self.netD = net.Discriminator_2D( + in_channels=3, + use_sigmoid=config['losses']['GAN_LOSS'] != 'hinge') + else: + self.netD = net.Discriminator( + in_channels=3, + use_sigmoid=config['losses']['GAN_LOSS'] != 'hinge') + self.netD = self.netD.to(self.config['device']) + + self.interp_mode = self.config['model']['interp_mode'] + # setup optimizers and schedulers + self.setup_optimizers() + self.setup_schedulers() + self.load() + + if config['distributed']: + self.netG = DDP(self.netG, + device_ids=[self.config['local_rank']], + output_device=self.config['local_rank'], + broadcast_buffers=True, + find_unused_parameters=True) + if not self.config['model']['no_dis']: + self.netD = DDP(self.netD, + device_ids=[self.config['local_rank']], + output_device=self.config['local_rank'], + broadcast_buffers=True, + find_unused_parameters=False) + + # set summary writer + self.dis_writer = None + self.gen_writer = None + self.summary = {} + if self.config['global_rank'] == 0 or (not config['distributed']): + if not self.config['model']['no_dis']: + self.dis_writer = SummaryWriter( + os.path.join(config['save_dir'], 'dis')) + self.gen_writer = SummaryWriter( + os.path.join(config['save_dir'], 'gen')) + + def setup_optimizers(self): + """Set up optimizers.""" + backbone_params = [] + for name, param in self.netG.named_parameters(): + if param.requires_grad: + backbone_params.append(param) + else: + print(f'Params {name} will not be optimized.') + + optim_params = [ + { + 'params': backbone_params, + 'lr': self.config['trainer']['lr'] + }, + ] + + self.optimG = torch.optim.Adam(optim_params, + betas=(self.config['trainer']['beta1'], + self.config['trainer']['beta2'])) + + if not self.config['model']['no_dis']: + self.optimD = torch.optim.Adam( + self.netD.parameters(), + lr=self.config['trainer']['lr'], + betas=(self.config['trainer']['beta1'], + self.config['trainer']['beta2'])) + + def setup_schedulers(self): + """Set up schedulers.""" + scheduler_opt = self.config['trainer']['scheduler'] + scheduler_type = scheduler_opt.pop('type') + + if scheduler_type in ['MultiStepLR', 'MultiStepRestartLR']: + self.scheG = MultiStepRestartLR( + self.optimG, + milestones=scheduler_opt['milestones'], + gamma=scheduler_opt['gamma']) + if not self.config['model']['no_dis']: + self.scheD = MultiStepRestartLR( + self.optimD, + milestones=scheduler_opt['milestones'], + gamma=scheduler_opt['gamma']) + elif scheduler_type == 'CosineAnnealingRestartLR': + self.scheG = CosineAnnealingRestartLR( + self.optimG, + periods=scheduler_opt['periods'], + restart_weights=scheduler_opt['restart_weights'], + eta_min=scheduler_opt['eta_min']) + if not self.config['model']['no_dis']: + self.scheD = CosineAnnealingRestartLR( + self.optimD, + periods=scheduler_opt['periods'], + restart_weights=scheduler_opt['restart_weights'], + eta_min=scheduler_opt['eta_min']) + else: + raise NotImplementedError( + f'Scheduler {scheduler_type} is not implemented yet.') + + def update_learning_rate(self): + """Update learning rate.""" + self.scheG.step() + if not self.config['model']['no_dis']: + self.scheD.step() + + def get_lr(self): + """Get current learning rate.""" + return self.optimG.param_groups[0]['lr'] + + def add_summary(self, writer, name, val): + """Add tensorboard summary.""" + if name not in self.summary: + self.summary[name] = 0 + self.summary[name] += val + n = self.train_args['log_freq'] + if writer is not None and self.iteration % n == 0: + writer.add_scalar(name, self.summary[name] / n, self.iteration) + self.summary[name] = 0 + + def load(self): + """Load netG (and netD).""" + # get the latest checkpoint + model_path = self.config['save_dir'] + # TODO: add resume name + if os.path.isfile(os.path.join(model_path, 'latest.ckpt')): + latest_epoch = open(os.path.join(model_path, 'latest.ckpt'), + 'r').read().splitlines()[-1] + else: + ckpts = [ + os.path.basename(i).split('.pth')[0] + for i in glob.glob(os.path.join(model_path, '*.pth')) + ] + ckpts.sort() + latest_epoch = ckpts[-1][4:] if len(ckpts) > 0 else None + + if latest_epoch is not None: + gen_path = os.path.join(model_path, + f'gen_{int(latest_epoch):06d}.pth') + dis_path = os.path.join(model_path, + f'dis_{int(latest_epoch):06d}.pth') + opt_path = os.path.join(model_path, + f'opt_{int(latest_epoch):06d}.pth') + + if self.config['global_rank'] == 0: + print(f'Loading model from {gen_path}...') + dataG = torch.load(gen_path, map_location=self.config['device']) + self.netG.load_state_dict(dataG) + if not self.config['model']['no_dis'] and self.config['model']['load_d']: + dataD = torch.load(dis_path, map_location=self.config['device']) + self.netD.load_state_dict(dataD) + + data_opt = torch.load(opt_path, map_location=self.config['device']) + self.optimG.load_state_dict(data_opt['optimG']) + # self.scheG.load_state_dict(data_opt['scheG']) + if not self.config['model']['no_dis'] and self.config['model']['load_d']: + self.optimD.load_state_dict(data_opt['optimD']) + # self.scheD.load_state_dict(data_opt['scheD']) + self.epoch = data_opt['epoch'] + self.iteration = data_opt['iteration'] + else: + gen_path = self.config['trainer'].get('gen_path', None) + dis_path = self.config['trainer'].get('dis_path', None) + opt_path = self.config['trainer'].get('opt_path', None) + if gen_path is not None: + if self.config['global_rank'] == 0: + print(f'Loading Gen-Net from {gen_path}...') + dataG = torch.load(gen_path, map_location=self.config['device']) + self.netG.load_state_dict(dataG) + + if dis_path is not None and not self.config['model']['no_dis'] and self.config['model']['load_d']: + if self.config['global_rank'] == 0: + print(f'Loading Dis-Net from {dis_path}...') + dataD = torch.load(dis_path, map_location=self.config['device']) + self.netD.load_state_dict(dataD) + if opt_path is not None: + data_opt = torch.load(opt_path, map_location=self.config['device']) + self.optimG.load_state_dict(data_opt['optimG']) + self.scheG.load_state_dict(data_opt['scheG']) + if not self.config['model']['no_dis'] and self.config['model']['load_d']: + self.optimD.load_state_dict(data_opt['optimD']) + self.scheD.load_state_dict(data_opt['scheD']) + else: + if self.config['global_rank'] == 0: + print('Warnning: There is no trained model found.' + 'An initialized model will be used.') + + def save(self, it): + """Save parameters every eval_epoch""" + if self.config['global_rank'] == 0: + # configure path + gen_path = os.path.join(self.config['save_dir'], + f'gen_{it:06d}.pth') + dis_path = os.path.join(self.config['save_dir'], + f'dis_{it:06d}.pth') + opt_path = os.path.join(self.config['save_dir'], + f'opt_{it:06d}.pth') + print(f'\nsaving model to {gen_path} ...') + + # remove .module for saving + if isinstance(self.netG, torch.nn.DataParallel) or isinstance(self.netG, DDP): + netG = self.netG.module + if not self.config['model']['no_dis']: + netD = self.netD.module + else: + netG = self.netG + if not self.config['model']['no_dis']: + netD = self.netD + + # save checkpoints + torch.save(netG.state_dict(), gen_path) + if not self.config['model']['no_dis']: + torch.save(netD.state_dict(), dis_path) + torch.save( + { + 'epoch': self.epoch, + 'iteration': self.iteration, + 'optimG': self.optimG.state_dict(), + 'optimD': self.optimD.state_dict(), + 'scheG': self.scheG.state_dict(), + 'scheD': self.scheD.state_dict() + }, opt_path) + else: + torch.save( + { + 'epoch': self.epoch, + 'iteration': self.iteration, + 'optimG': self.optimG.state_dict(), + 'scheG': self.scheG.state_dict() + }, opt_path) + + latest_path = os.path.join(self.config['save_dir'], 'latest.ckpt') + os.system(f"echo {it:06d} > {latest_path}") + + def train(self): + """training entry""" + pbar = range(int(self.train_args['iterations'])) + if self.config['global_rank'] == 0: + pbar = tqdm(pbar, + initial=self.iteration, + dynamic_ncols=True, + smoothing=0.01) + + os.makedirs('logs', exist_ok=True) + + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s %(filename)s[line:%(lineno)d]" + "%(levelname)s %(message)s", + datefmt="%a, %d %b %Y %H:%M:%S", + filename=f"logs/{self.config['save_dir'].split('/')[-1]}.log", + filemode='w') + + while True: + self.epoch += 1 + self.prefetcher.reset() + if self.config['distributed']: + self.train_sampler.set_epoch(self.epoch) + self._train_epoch(pbar) + if self.iteration > self.train_args['iterations']: + break + print('\nEnd training....') + + def _train_epoch(self, pbar): + """Process input and calculate loss every training epoch""" + device = self.config['device'] + train_data = self.prefetcher.next() + while train_data is not None: + self.iteration += 1 + frames, masks, flows_f, flows_b, _ = train_data + frames, masks = frames.to(device), masks.to(device).float() + l_t = self.num_local_frames + b, t, c, h, w = frames.size() + gt_local_frames = frames[:, :l_t, ...] + local_masks = masks[:, :l_t, ...].contiguous() + + masked_frames = frames * (1 - masks) + masked_local_frames = masked_frames[:, :l_t, ...] + # get gt optical flow + if flows_f[0] == 'None' or flows_b[0] == 'None': + gt_flows_bi = self.fix_raft(gt_local_frames) + else: + gt_flows_bi = (flows_f.to(device), flows_b.to(device)) + + # ---- complete flow ---- + pred_flows_bi, _ = self.fix_flow_complete.forward_bidirect_flow(gt_flows_bi, local_masks) + pred_flows_bi = self.fix_flow_complete.combine_flow(gt_flows_bi, pred_flows_bi, local_masks) + # pred_flows_bi = gt_flows_bi + + # ---- image propagation ---- + prop_imgs, updated_local_masks = self.netG.module.img_propagation(masked_local_frames, pred_flows_bi, local_masks, interpolation=self.interp_mode) + updated_masks = masks.clone() + updated_masks[:, :l_t, ...] = updated_local_masks.view(b, l_t, 1, h, w) + updated_frames = masked_frames.clone() + prop_local_frames = gt_local_frames * (1-local_masks) + prop_imgs.view(b, l_t, 3, h, w) * local_masks # merge + updated_frames[:, :l_t, ...] = prop_local_frames + + # ---- feature propagation + Transformer ---- + pred_imgs = self.netG(updated_frames, pred_flows_bi, masks, updated_masks, l_t) + pred_imgs = pred_imgs.view(b, -1, c, h, w) + + # get the local frames + pred_local_frames = pred_imgs[:, :l_t, ...] + comp_local_frames = gt_local_frames * (1. - local_masks) + pred_local_frames * local_masks + comp_imgs = frames * (1. - masks) + pred_imgs * masks + + gen_loss = 0 + dis_loss = 0 + # optimize net_g + if not self.config['model']['no_dis']: + for p in self.netD.parameters(): + p.requires_grad = False + + self.optimG.zero_grad() + + # generator l1 loss + hole_loss = self.l1_loss(pred_imgs * masks, frames * masks) + hole_loss = hole_loss / torch.mean(masks) * self.config['losses']['hole_weight'] + gen_loss += hole_loss + self.add_summary(self.gen_writer, 'loss/hole_loss', hole_loss.item()) + + valid_loss = self.l1_loss(pred_imgs * (1 - masks), frames * (1 - masks)) + valid_loss = valid_loss / torch.mean(1-masks) * self.config['losses']['valid_weight'] + gen_loss += valid_loss + self.add_summary(self.gen_writer, 'loss/valid_loss', valid_loss.item()) + + # perceptual loss + if self.config['losses']['perceptual_weight'] > 0: + perc_loss = self.perc_loss(pred_imgs.view(-1,3,h,w), frames.view(-1,3,h,w))[0] * self.config['losses']['perceptual_weight'] + gen_loss += perc_loss + self.add_summary(self.gen_writer, 'loss/perc_loss', perc_loss.item()) + + # gan loss + if not self.config['model']['no_dis']: + # generator adversarial loss + gen_clip = self.netD(comp_imgs) + gan_loss = self.adversarial_loss(gen_clip, True, False) + gan_loss = gan_loss * self.config['losses']['adversarial_weight'] + gen_loss += gan_loss + self.add_summary(self.gen_writer, 'loss/gan_loss', gan_loss.item()) + gen_loss.backward() + self.optimG.step() + + if not self.config['model']['no_dis']: + # optimize net_d + for p in self.netD.parameters(): + p.requires_grad = True + self.optimD.zero_grad() + + # discriminator adversarial loss + real_clip = self.netD(frames) + fake_clip = self.netD(comp_imgs.detach()) + dis_real_loss = self.adversarial_loss(real_clip, True, True) + dis_fake_loss = self.adversarial_loss(fake_clip, False, True) + dis_loss += (dis_real_loss + dis_fake_loss) / 2 + self.add_summary(self.dis_writer, 'loss/dis_vid_real', dis_real_loss.item()) + self.add_summary(self.dis_writer, 'loss/dis_vid_fake', dis_fake_loss.item()) + dis_loss.backward() + self.optimD.step() + + self.update_learning_rate() + + # write image to tensorboard + if self.iteration % 200 == 0: + # img to cpu + t = 0 + gt_local_frames_cpu = ((gt_local_frames.view(b,-1,3,h,w) + 1)/2.0).cpu() + masked_local_frames = ((masked_local_frames.view(b,-1,3,h,w) + 1)/2.0).cpu() + prop_local_frames_cpu = ((prop_local_frames.view(b,-1,3,h,w) + 1)/2.0).cpu() + pred_local_frames_cpu = ((pred_local_frames.view(b,-1,3,h,w) + 1)/2.0).cpu() + img_results = torch.cat([masked_local_frames[0][t], gt_local_frames_cpu[0][t], + prop_local_frames_cpu[0][t], pred_local_frames_cpu[0][t]], 1) + img_results = torchvision.utils.make_grid(img_results, nrow=1, normalize=True) + if self.gen_writer is not None: + self.gen_writer.add_image(f'img/img:inp-gt-res-{t}', img_results, self.iteration) + + t = 5 + if masked_local_frames.shape[1] > 5: + img_results = torch.cat([masked_local_frames[0][t], gt_local_frames_cpu[0][t], + prop_local_frames_cpu[0][t], pred_local_frames_cpu[0][t]], 1) + img_results = torchvision.utils.make_grid(img_results, nrow=1, normalize=True) + if self.gen_writer is not None: + self.gen_writer.add_image(f'img/img:inp-gt-res-{t}', img_results, self.iteration) + + # flow to cpu + gt_flows_forward_cpu = flow_to_image(gt_flows_bi[0][0]).cpu() + masked_flows_forward_cpu = (gt_flows_forward_cpu[0] * (1-local_masks[0][0].cpu())).to(gt_flows_forward_cpu) + pred_flows_forward_cpu = flow_to_image(pred_flows_bi[0][0]).cpu() + + flow_results = torch.cat([gt_flows_forward_cpu[0], masked_flows_forward_cpu, pred_flows_forward_cpu[0]], 1) + if self.gen_writer is not None: + self.gen_writer.add_image('img/flow:gt-pred', flow_results, self.iteration) + + # console logs + if self.config['global_rank'] == 0: + pbar.update(1) + if not self.config['model']['no_dis']: + pbar.set_description((f"d: {dis_loss.item():.3f}; " + f"hole: {hole_loss.item():.3f}; " + f"valid: {valid_loss.item():.3f}")) + else: + pbar.set_description((f"hole: {hole_loss.item():.3f}; " + f"valid: {valid_loss.item():.3f}")) + + if self.iteration % self.train_args['log_freq'] == 0: + if not self.config['model']['no_dis']: + logging.info(f"[Iter {self.iteration}] " + f"d: {dis_loss.item():.4f}; " + f"hole: {hole_loss.item():.4f}; " + f"valid: {valid_loss.item():.4f}") + else: + logging.info(f"[Iter {self.iteration}] " + f"hole: {hole_loss.item():.4f}; " + f"valid: {valid_loss.item():.4f}") + + # saving models + if self.iteration % self.train_args['save_freq'] == 0: + self.save(int(self.iteration)) + + if self.iteration > self.train_args['iterations']: + break + + train_data = self.prefetcher.next() \ No newline at end of file diff --git a/florence_sam/core/trainer_flow_w_edge.py b/florence_sam/core/trainer_flow_w_edge.py new file mode 100644 index 0000000000000000000000000000000000000000..d4eba04c8a5fa56bce3e335e6036bc0e0a1e848a --- /dev/null +++ b/florence_sam/core/trainer_flow_w_edge.py @@ -0,0 +1,380 @@ +import os +import glob +import logging +import importlib +from tqdm import tqdm + +import torch +import torch.nn as nn +import torch.nn.functional as F +from core.prefetch_dataloader import PrefetchDataLoader, CPUPrefetcher +from torch.utils.data.distributed import DistributedSampler +from torch.nn.parallel import DistributedDataParallel as DDP + +from torch.utils.tensorboard import SummaryWriter + +from core.lr_scheduler import MultiStepRestartLR, CosineAnnealingRestartLR +from core.dataset import TrainDataset + +from model.modules.flow_comp_raft import RAFT_bi, FlowLoss, EdgeLoss + +# from skimage.feature import canny +from model.canny.canny_filter import Canny +from RAFT.utils.flow_viz_pt import flow_to_image + + +class Trainer: + def __init__(self, config): + self.config = config + self.epoch = 0 + self.iteration = 0 + self.num_local_frames = config['train_data_loader']['num_local_frames'] + self.num_ref_frames = config['train_data_loader']['num_ref_frames'] + + # setup data set and data loader + self.train_dataset = TrainDataset(config['train_data_loader']) + + self.train_sampler = None + self.train_args = config['trainer'] + if config['distributed']: + self.train_sampler = DistributedSampler( + self.train_dataset, + num_replicas=config['world_size'], + rank=config['global_rank']) + + dataloader_args = dict( + dataset=self.train_dataset, + batch_size=self.train_args['batch_size'] // config['world_size'], + shuffle=(self.train_sampler is None), + num_workers=self.train_args['num_workers'], + sampler=self.train_sampler, + drop_last=True) + + self.train_loader = PrefetchDataLoader(self.train_args['num_prefetch_queue'], **dataloader_args) + self.prefetcher = CPUPrefetcher(self.train_loader) + + # set raft + self.fix_raft = RAFT_bi(device = self.config['device']) + self.flow_loss = FlowLoss() + self.edge_loss = EdgeLoss() + self.canny = Canny(sigma=(2,2), low_threshold=0.1, high_threshold=0.2) + + # setup models including generator and discriminator + net = importlib.import_module('model.' + config['model']['net']) + self.netG = net.RecurrentFlowCompleteNet() + # print(self.netG) + self.netG = self.netG.to(self.config['device']) + + # setup optimizers and schedulers + self.setup_optimizers() + self.setup_schedulers() + self.load() + + if config['distributed']: + self.netG = DDP(self.netG, + device_ids=[self.config['local_rank']], + output_device=self.config['local_rank'], + broadcast_buffers=True, + find_unused_parameters=True) + + # set summary writer + self.dis_writer = None + self.gen_writer = None + self.summary = {} + if self.config['global_rank'] == 0 or (not config['distributed']): + self.gen_writer = SummaryWriter( + os.path.join(config['save_dir'], 'gen')) + + def setup_optimizers(self): + """Set up optimizers.""" + backbone_params = [] + for name, param in self.netG.named_parameters(): + if param.requires_grad: + backbone_params.append(param) + else: + print(f'Params {name} will not be optimized.') + + optim_params = [ + { + 'params': backbone_params, + 'lr': self.config['trainer']['lr'] + }, + ] + + self.optimG = torch.optim.Adam(optim_params, + betas=(self.config['trainer']['beta1'], + self.config['trainer']['beta2'])) + + + def setup_schedulers(self): + """Set up schedulers.""" + scheduler_opt = self.config['trainer']['scheduler'] + scheduler_type = scheduler_opt.pop('type') + + if scheduler_type in ['MultiStepLR', 'MultiStepRestartLR']: + self.scheG = MultiStepRestartLR( + self.optimG, + milestones=scheduler_opt['milestones'], + gamma=scheduler_opt['gamma']) + elif scheduler_type == 'CosineAnnealingRestartLR': + self.scheG = CosineAnnealingRestartLR( + self.optimG, + periods=scheduler_opt['periods'], + restart_weights=scheduler_opt['restart_weights']) + else: + raise NotImplementedError( + f'Scheduler {scheduler_type} is not implemented yet.') + + def update_learning_rate(self): + """Update learning rate.""" + self.scheG.step() + + def get_lr(self): + """Get current learning rate.""" + return self.optimG.param_groups[0]['lr'] + + def add_summary(self, writer, name, val): + """Add tensorboard summary.""" + if name not in self.summary: + self.summary[name] = 0 + self.summary[name] += val + n = self.train_args['log_freq'] + if writer is not None and self.iteration % n == 0: + writer.add_scalar(name, self.summary[name] / n, self.iteration) + self.summary[name] = 0 + + def load(self): + """Load netG.""" + # get the latest checkpoint + model_path = self.config['save_dir'] + if os.path.isfile(os.path.join(model_path, 'latest.ckpt')): + latest_epoch = open(os.path.join(model_path, 'latest.ckpt'), + 'r').read().splitlines()[-1] + else: + ckpts = [ + os.path.basename(i).split('.pth')[0] + for i in glob.glob(os.path.join(model_path, '*.pth')) + ] + ckpts.sort() + latest_epoch = ckpts[-1][4:] if len(ckpts) > 0 else None + + if latest_epoch is not None: + gen_path = os.path.join(model_path, f'gen_{int(latest_epoch):06d}.pth') + opt_path = os.path.join(model_path,f'opt_{int(latest_epoch):06d}.pth') + + if self.config['global_rank'] == 0: + print(f'Loading model from {gen_path}...') + dataG = torch.load(gen_path, map_location=self.config['device']) + self.netG.load_state_dict(dataG) + + + data_opt = torch.load(opt_path, map_location=self.config['device']) + self.optimG.load_state_dict(data_opt['optimG']) + self.scheG.load_state_dict(data_opt['scheG']) + + self.epoch = data_opt['epoch'] + self.iteration = data_opt['iteration'] + + else: + if self.config['global_rank'] == 0: + print('Warnning: There is no trained model found.' + 'An initialized model will be used.') + + def save(self, it): + """Save parameters every eval_epoch""" + if self.config['global_rank'] == 0: + # configure path + gen_path = os.path.join(self.config['save_dir'], + f'gen_{it:06d}.pth') + opt_path = os.path.join(self.config['save_dir'], + f'opt_{it:06d}.pth') + print(f'\nsaving model to {gen_path} ...') + + # remove .module for saving + if isinstance(self.netG, torch.nn.DataParallel) or isinstance(self.netG, DDP): + netG = self.netG.module + else: + netG = self.netG + + # save checkpoints + torch.save(netG.state_dict(), gen_path) + torch.save( + { + 'epoch': self.epoch, + 'iteration': self.iteration, + 'optimG': self.optimG.state_dict(), + 'scheG': self.scheG.state_dict() + }, opt_path) + + latest_path = os.path.join(self.config['save_dir'], 'latest.ckpt') + os.system(f"echo {it:06d} > {latest_path}") + + def train(self): + """training entry""" + pbar = range(int(self.train_args['iterations'])) + if self.config['global_rank'] == 0: + pbar = tqdm(pbar, + initial=self.iteration, + dynamic_ncols=True, + smoothing=0.01) + + os.makedirs('logs', exist_ok=True) + + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s %(filename)s[line:%(lineno)d]" + "%(levelname)s %(message)s", + datefmt="%a, %d %b %Y %H:%M:%S", + filename=f"logs/{self.config['save_dir'].split('/')[-1]}.log", + filemode='w') + + while True: + self.epoch += 1 + self.prefetcher.reset() + if self.config['distributed']: + self.train_sampler.set_epoch(self.epoch) + self._train_epoch(pbar) + if self.iteration > self.train_args['iterations']: + break + print('\nEnd training....') + + # def get_edges(self, flows): # fgvc + # # (b, t, 2, H, W) + # b, t, _, h, w = flows.shape + # flows = flows.view(-1, 2, h, w) + # flows_list = flows.permute(0, 2, 3, 1).cpu().numpy() + # edges = [] + # for f in list(flows_list): + # flows_gray = (f[:, :, 0] ** 2 + f[:, :, 1] ** 2) ** 0.5 + # if flows_gray.max() < 1: + # flows_gray = flows_gray*0 + # else: + # flows_gray = flows_gray / flows_gray.max() + + # edge = canny(flows_gray, sigma=2, low_threshold=0.1, high_threshold=0.2) # fgvc + # edge = torch.from_numpy(edge).view(1, 1, h, w).float() + # edges.append(edge) + # edges = torch.stack(edges, dim=0).to(self.config['device']) + # edges = edges.view(b, t, 1, h, w) + # return edges + + def get_edges(self, flows): + # (b, t, 2, H, W) + b, t, _, h, w = flows.shape + flows = flows.view(-1, 2, h, w) + flows_gray = (flows[:, 0, None] ** 2 + flows[:, 1, None] ** 2) ** 0.5 + if flows_gray.max() < 1: + flows_gray = flows_gray*0 + else: + flows_gray = flows_gray / flows_gray.max() + + magnitude, edges = self.canny(flows_gray.float()) + edges = edges.view(b, t, 1, h, w) + return edges + + def _train_epoch(self, pbar): + """Process input and calculate loss every training epoch""" + device = self.config['device'] + train_data = self.prefetcher.next() + while train_data is not None: + self.iteration += 1 + frames, masks, flows_f, flows_b, _ = train_data + frames, masks = frames.to(device), masks.to(device) + masks = masks.float() + + l_t = self.num_local_frames + b, t, c, h, w = frames.size() + gt_local_frames = frames[:, :l_t, ...] + local_masks = masks[:, :l_t, ...].contiguous() + + # get gt optical flow + if flows_f[0] == 'None' or flows_b[0] == 'None': + gt_flows_bi = self.fix_raft(gt_local_frames) + else: + gt_flows_bi = (flows_f.to(device), flows_b.to(device)) + + # get gt edge + gt_edges_forward = self.get_edges(gt_flows_bi[0]) + gt_edges_backward = self.get_edges(gt_flows_bi[1]) + gt_edges_bi = [gt_edges_forward, gt_edges_backward] + + # complete flow + pred_flows_bi, pred_edges_bi = self.netG.module.forward_bidirect_flow(gt_flows_bi, local_masks) + + # optimize net_g + self.optimG.zero_grad() + + # compulte flow_loss + flow_loss, warp_loss = self.flow_loss(pred_flows_bi, gt_flows_bi, local_masks, gt_local_frames) + flow_loss = flow_loss * self.config['losses']['flow_weight'] + warp_loss = warp_loss * 0.01 + self.add_summary(self.gen_writer, 'loss/flow_loss', flow_loss.item()) + self.add_summary(self.gen_writer, 'loss/warp_loss', warp_loss.item()) + + # compute edge loss + edge_loss = self.edge_loss(pred_edges_bi, gt_edges_bi, local_masks) + edge_loss = edge_loss*1.0 + self.add_summary(self.gen_writer, 'loss/edge_loss', edge_loss.item()) + + loss = flow_loss + warp_loss + edge_loss + loss.backward() + self.optimG.step() + self.update_learning_rate() + + # write image to tensorboard + # if self.iteration % 200 == 0: + if self.iteration % 200 == 0 and self.gen_writer is not None: + t = 5 + # forward to cpu + gt_flows_forward_cpu = flow_to_image(gt_flows_bi[0][0]).cpu() + masked_flows_forward_cpu = (gt_flows_forward_cpu[t] * (1-local_masks[0][t].cpu())).to(gt_flows_forward_cpu) + pred_flows_forward_cpu = flow_to_image(pred_flows_bi[0][0]).cpu() + + flow_results = torch.cat([gt_flows_forward_cpu[t], masked_flows_forward_cpu, pred_flows_forward_cpu[t]], 1) + self.gen_writer.add_image('img/flow-f:gt-pred', flow_results, self.iteration) + + # backward to cpu + gt_flows_backward_cpu = flow_to_image(gt_flows_bi[1][0]).cpu() + masked_flows_backward_cpu = (gt_flows_backward_cpu[t] * (1-local_masks[0][t+1].cpu())).to(gt_flows_backward_cpu) + pred_flows_backward_cpu = flow_to_image(pred_flows_bi[1][0]).cpu() + + flow_results = torch.cat([gt_flows_backward_cpu[t], masked_flows_backward_cpu, pred_flows_backward_cpu[t]], 1) + self.gen_writer.add_image('img/flow-b:gt-pred', flow_results, self.iteration) + + # TODO: show edge + # forward + gt_edges_forward_cpu = gt_edges_bi[0][0].cpu() + masked_edges_forward_cpu = (gt_edges_forward_cpu[t] * (1-local_masks[0][t].cpu())).to(gt_edges_forward_cpu) + pred_edges_forward_cpu = pred_edges_bi[0][0].cpu() + + edge_results = torch.cat([gt_edges_forward_cpu[t], masked_edges_forward_cpu, pred_edges_forward_cpu[t]], 1) + self.gen_writer.add_image('img/edge-f:gt-pred', edge_results, self.iteration) + # backward + gt_edges_backward_cpu = gt_edges_bi[1][0].cpu() + masked_edges_backward_cpu = (gt_edges_backward_cpu[t] * (1-local_masks[0][t+1].cpu())).to(gt_edges_backward_cpu) + pred_edges_backward_cpu = pred_edges_bi[1][0].cpu() + + edge_results = torch.cat([gt_edges_backward_cpu[t], masked_edges_backward_cpu, pred_edges_backward_cpu[t]], 1) + self.gen_writer.add_image('img/edge-b:gt-pred', edge_results, self.iteration) + + # console logs + if self.config['global_rank'] == 0: + pbar.update(1) + pbar.set_description((f"flow: {flow_loss.item():.3f}; " + f"warp: {warp_loss.item():.3f}; " + f"edge: {edge_loss.item():.3f}; " + f"lr: {self.get_lr()}")) + + if self.iteration % self.train_args['log_freq'] == 0: + logging.info(f"[Iter {self.iteration}] " + f"flow: {flow_loss.item():.4f}; " + f"warp: {warp_loss.item():.4f}") + + # saving models + if self.iteration % self.train_args['save_freq'] == 0: + self.save(int(self.iteration)) + + if self.iteration > self.train_args['iterations']: + break + + train_data = self.prefetcher.next() \ No newline at end of file diff --git a/florence_sam/core/utils.py b/florence_sam/core/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..37dccb2d26e6916aacbd530ab03726a7c54f8ec8 --- /dev/null +++ b/florence_sam/core/utils.py @@ -0,0 +1,371 @@ +import os +import io +import cv2 +import random +import numpy as np +from PIL import Image, ImageOps +import zipfile +import math + +import torch +import matplotlib +import matplotlib.patches as patches +from matplotlib.path import Path +from matplotlib import pyplot as plt +from torchvision import transforms + +# matplotlib.use('agg') + +# ########################################################################### +# Directory IO +# ########################################################################### + + +def read_dirnames_under_root(root_dir): + dirnames = [ + name for i, name in enumerate(sorted(os.listdir(root_dir))) + if os.path.isdir(os.path.join(root_dir, name)) + ] + print(f'Reading directories under {root_dir}, num: {len(dirnames)}') + return dirnames + + +class TrainZipReader(object): + file_dict = dict() + + def __init__(self): + super(TrainZipReader, self).__init__() + + @staticmethod + def build_file_dict(path): + file_dict = TrainZipReader.file_dict + if path in file_dict: + return file_dict[path] + else: + file_handle = zipfile.ZipFile(path, 'r') + file_dict[path] = file_handle + return file_dict[path] + + @staticmethod + def imread(path, idx): + zfile = TrainZipReader.build_file_dict(path) + filelist = zfile.namelist() + filelist.sort() + data = zfile.read(filelist[idx]) + # + im = Image.open(io.BytesIO(data)) + return im + + +class TestZipReader(object): + file_dict = dict() + + def __init__(self): + super(TestZipReader, self).__init__() + + @staticmethod + def build_file_dict(path): + file_dict = TestZipReader.file_dict + if path in file_dict: + return file_dict[path] + else: + file_handle = zipfile.ZipFile(path, 'r') + file_dict[path] = file_handle + return file_dict[path] + + @staticmethod + def imread(path, idx): + zfile = TestZipReader.build_file_dict(path) + filelist = zfile.namelist() + filelist.sort() + data = zfile.read(filelist[idx]) + file_bytes = np.asarray(bytearray(data), dtype=np.uint8) + im = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) + im = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB)) + # im = Image.open(io.BytesIO(data)) + return im + + +# ########################################################################### +# Data augmentation +# ########################################################################### + + +def to_tensors(): + return transforms.Compose([Stack(), ToTorchFormatTensor()]) + + +class GroupRandomHorizontalFlowFlip(object): + """Randomly horizontally flips the given PIL.Image with a probability of 0.5 + """ + def __call__(self, img_group, flowF_group, flowB_group): + v = random.random() + if v < 0.5: + ret_img = [ + img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group + ] + ret_flowF = [ff[:, ::-1] * [-1.0, 1.0] for ff in flowF_group] + ret_flowB = [fb[:, ::-1] * [-1.0, 1.0] for fb in flowB_group] + return ret_img, ret_flowF, ret_flowB + else: + return img_group, flowF_group, flowB_group + + +class GroupRandomHorizontalFlip(object): + """Randomly horizontally flips the given PIL.Image with a probability of 0.5 + """ + def __call__(self, img_group, is_flow=False): + v = random.random() + if v < 0.5: + ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group] + if is_flow: + for i in range(0, len(ret), 2): + # invert flow pixel values when flipping + ret[i] = ImageOps.invert(ret[i]) + return ret + else: + return img_group + + +class Stack(object): + def __init__(self, roll=False): + self.roll = roll + + def __call__(self, img_group): + mode = img_group[0].mode + if mode == '1': + img_group = [img.convert('L') for img in img_group] + mode = 'L' + if mode == 'L': + return np.stack([np.expand_dims(x, 2) for x in img_group], axis=2) + elif mode == 'RGB': + if self.roll: + return np.stack([np.array(x)[:, :, ::-1] for x in img_group], + axis=2) + else: + return np.stack(img_group, axis=2) + else: + raise NotImplementedError(f"Image mode {mode}") + + +class ToTorchFormatTensor(object): + """ Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255] + to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """ + def __init__(self, div=True): + self.div = div + + def __call__(self, pic): + if isinstance(pic, np.ndarray): + # numpy img: [L, C, H, W] + img = torch.from_numpy(pic).permute(2, 3, 0, 1).contiguous() + else: + # handle PIL Image + img = torch.ByteTensor(torch.ByteStorage.from_buffer( + pic.tobytes())) + img = img.view(pic.size[1], pic.size[0], len(pic.mode)) + # put it from HWC to CHW format + # yikes, this transpose takes 80% of the loading time/CPU + img = img.transpose(0, 1).transpose(0, 2).contiguous() + img = img.float().div(255) if self.div else img.float() + return img + + +# ########################################################################### +# Create masks with random shape +# ########################################################################### + + +def create_random_shape_with_random_motion(video_length, + imageHeight=240, + imageWidth=432): + # get a random shape + height = random.randint(imageHeight // 3, imageHeight - 1) + width = random.randint(imageWidth // 3, imageWidth - 1) + edge_num = random.randint(6, 8) + ratio = random.randint(6, 8) / 10 + + region = get_random_shape(edge_num=edge_num, + ratio=ratio, + height=height, + width=width) + region_width, region_height = region.size + # get random position + x, y = random.randint(0, imageHeight - region_height), random.randint( + 0, imageWidth - region_width) + velocity = get_random_velocity(max_speed=3) + m = Image.fromarray(np.zeros((imageHeight, imageWidth)).astype(np.uint8)) + m.paste(region, (y, x, y + region.size[0], x + region.size[1])) + masks = [m.convert('L')] + # return fixed masks + if random.uniform(0, 1) > 0.5: + return masks * video_length + # return moving masks + for _ in range(video_length - 1): + x, y, velocity = random_move_control_points(x, + y, + imageHeight, + imageWidth, + velocity, + region.size, + maxLineAcceleration=(3, + 0.5), + maxInitSpeed=3) + m = Image.fromarray( + np.zeros((imageHeight, imageWidth)).astype(np.uint8)) + m.paste(region, (y, x, y + region.size[0], x + region.size[1])) + masks.append(m.convert('L')) + return masks + + +def create_random_shape_with_random_motion_zoom_rotation(video_length, zoomin=0.9, zoomout=1.1, rotmin=1, rotmax=10, imageHeight=240, imageWidth=432): + # get a random shape + assert zoomin < 1, "Zoom-in parameter must be smaller than 1" + assert zoomout > 1, "Zoom-out parameter must be larger than 1" + assert rotmin < rotmax, "Minimum value of rotation must be smaller than maximun value !" + height = random.randint(imageHeight//3, imageHeight-1) + width = random.randint(imageWidth//3, imageWidth-1) + edge_num = random.randint(6, 8) + ratio = random.randint(6, 8)/10 + region = get_random_shape( + edge_num=edge_num, ratio=ratio, height=height, width=width) + region_width, region_height = region.size + # get random position + x, y = random.randint( + 0, imageHeight-region_height), random.randint(0, imageWidth-region_width) + velocity = get_random_velocity(max_speed=3) + m = Image.fromarray(np.zeros((imageHeight, imageWidth)).astype(np.uint8)) + m.paste(region, (y, x, y+region.size[0], x+region.size[1])) + masks = [m.convert('L')] + # return fixed masks + if random.uniform(0, 1) > 0.5: + return masks*video_length # -> directly copy all the base masks + # return moving masks + for _ in range(video_length-1): + x, y, velocity = random_move_control_points( + x, y, imageHeight, imageWidth, velocity, region.size, maxLineAcceleration=(3, 0.5), maxInitSpeed=3) + m = Image.fromarray( + np.zeros((imageHeight, imageWidth)).astype(np.uint8)) + ### add by kaidong, to simulate zoon-in, zoom-out and rotation + extra_transform = random.uniform(0, 1) + # zoom in and zoom out + if extra_transform > 0.75: + resize_coefficient = random.uniform(zoomin, zoomout) + region = region.resize((math.ceil(region_width * resize_coefficient), math.ceil(region_height * resize_coefficient)), Image.NEAREST) + m.paste(region, (y, x, y + region.size[0], x + region.size[1])) + region_width, region_height = region.size + # rotation + elif extra_transform > 0.5: + m.paste(region, (y, x, y + region.size[0], x + region.size[1])) + m = m.rotate(random.randint(rotmin, rotmax)) + # region_width, region_height = region.size + ### end + else: + m.paste(region, (y, x, y+region.size[0], x+region.size[1])) + masks.append(m.convert('L')) + return masks + + +def get_random_shape(edge_num=9, ratio=0.7, width=432, height=240): + ''' + There is the initial point and 3 points per cubic bezier curve. + Thus, the curve will only pass though n points, which will be the sharp edges. + The other 2 modify the shape of the bezier curve. + edge_num, Number of possibly sharp edges + points_num, number of points in the Path + ratio, (0, 1) magnitude of the perturbation from the unit circle, + ''' + points_num = edge_num*3 + 1 + angles = np.linspace(0, 2*np.pi, points_num) + codes = np.full(points_num, Path.CURVE4) + codes[0] = Path.MOVETO + # Using this instead of Path.CLOSEPOLY avoids an innecessary straight line + verts = np.stack((np.cos(angles), np.sin(angles))).T * \ + (2*ratio*np.random.random(points_num)+1-ratio)[:, None] + verts[-1, :] = verts[0, :] + path = Path(verts, codes) + # draw paths into images + fig = plt.figure() + ax = fig.add_subplot(111) + patch = patches.PathPatch(path, facecolor='black', lw=2) + ax.add_patch(patch) + ax.set_xlim(np.min(verts)*1.1, np.max(verts)*1.1) + ax.set_ylim(np.min(verts)*1.1, np.max(verts)*1.1) + ax.axis('off') # removes the axis to leave only the shape + fig.canvas.draw() + # convert plt images into numpy images + data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) + data = data.reshape((fig.canvas.get_width_height()[::-1] + (3,))) + plt.close(fig) + # postprocess + data = cv2.resize(data, (width, height))[:, :, 0] + data = (1 - np.array(data > 0).astype(np.uint8))*255 + corrdinates = np.where(data > 0) + xmin, xmax, ymin, ymax = np.min(corrdinates[0]), np.max( + corrdinates[0]), np.min(corrdinates[1]), np.max(corrdinates[1]) + region = Image.fromarray(data).crop((ymin, xmin, ymax, xmax)) + return region + + +def random_accelerate(velocity, maxAcceleration, dist='uniform'): + speed, angle = velocity + d_speed, d_angle = maxAcceleration + if dist == 'uniform': + speed += np.random.uniform(-d_speed, d_speed) + angle += np.random.uniform(-d_angle, d_angle) + elif dist == 'guassian': + speed += np.random.normal(0, d_speed / 2) + angle += np.random.normal(0, d_angle / 2) + else: + raise NotImplementedError( + f'Distribution type {dist} is not supported.') + return (speed, angle) + + +def get_random_velocity(max_speed=3, dist='uniform'): + if dist == 'uniform': + speed = np.random.uniform(max_speed) + elif dist == 'guassian': + speed = np.abs(np.random.normal(0, max_speed / 2)) + else: + raise NotImplementedError( + f'Distribution type {dist} is not supported.') + angle = np.random.uniform(0, 2 * np.pi) + return (speed, angle) + + +def random_move_control_points(X, + Y, + imageHeight, + imageWidth, + lineVelocity, + region_size, + maxLineAcceleration=(3, 0.5), + maxInitSpeed=3): + region_width, region_height = region_size + speed, angle = lineVelocity + X += int(speed * np.cos(angle)) + Y += int(speed * np.sin(angle)) + lineVelocity = random_accelerate(lineVelocity, + maxLineAcceleration, + dist='guassian') + if ((X > imageHeight - region_height) or (X < 0) + or (Y > imageWidth - region_width) or (Y < 0)): + lineVelocity = get_random_velocity(maxInitSpeed, dist='guassian') + new_X = np.clip(X, 0, imageHeight - region_height) + new_Y = np.clip(Y, 0, imageWidth - region_width) + return new_X, new_Y, lineVelocity + + +if __name__ == '__main__': + + trials = 10 + for _ in range(trials): + video_length = 10 + # The returned masks are either stationary (50%) or moving (50%) + masks = create_random_shape_with_random_motion(video_length, + imageHeight=240, + imageWidth=432) + + for m in masks: + cv2.imshow('mask', np.array(m)) + cv2.waitKey(500) diff --git a/florence_sam/datasets/davis/test.json b/florence_sam/datasets/davis/test.json new file mode 100644 index 0000000000000000000000000000000000000000..54875df42cba3451a6c3f2642706652ae087996a --- /dev/null +++ b/florence_sam/datasets/davis/test.json @@ -0,0 +1 @@ +{"bear": 82, "blackswan": 50, "bmx-bumps": 90, "bmx-trees": 80, "boat": 75, "breakdance": 84, "breakdance-flare": 71, "bus": 80, "camel": 90, "car-roundabout": 75, "car-shadow": 40, "car-turn": 80, "cows": 104, "dance-jump": 60, "dance-twirl": 90, "dog": 60, "dog-agility": 25, "drift-chicane": 52, "drift-straight": 50, "drift-turn": 64, "elephant": 80, "flamingo": 80, "goat": 90, "hike": 80, "hockey": 75, "horsejump-high": 50, "horsejump-low": 60, "kite-surf": 50, "kite-walk": 80, "libby": 49, "lucia": 70, "mallard-fly": 70, "mallard-water": 80, "motocross-bumps": 60, "motocross-jump": 40, "motorbike": 43, "paragliding": 70, "paragliding-launch": 80, "parkour": 100, "rhino": 90, "rollerblade": 35, "scooter-black": 43, "scooter-gray": 75, "soapbox": 99, "soccerball": 48, "stroller": 91, "surf": 55, "swing": 60, "tennis": 70, "train": 80} \ No newline at end of file diff --git a/florence_sam/datasets/davis/train.json b/florence_sam/datasets/davis/train.json new file mode 100644 index 0000000000000000000000000000000000000000..3f63b2d95553e8ab606d9c207a6a8ae56a28035c --- /dev/null +++ b/florence_sam/datasets/davis/train.json @@ -0,0 +1 @@ +{"baseball": 90, "basketball-game": 77, "bears-ball": 78, "bmx-rider": 85, "butterfly": 80, "car-competition": 66, "cat": 52, "chairlift": 99, "circus": 73, "city-ride": 70, "crafting": 45, "curling": 76, "dog-competition": 85, "dolphins-show": 74, "dribbling": 49, "drone-flying": 70, "ducks": 75, "elephant-hyenas": 55, "giraffes": 88, "gym-ball": 69, "helicopter-landing": 77, "horse-race": 80, "horses-kids": 78, "hurdles-race": 55, "ice-hockey": 52, "jet-ski": 83, "juggling-selfie": 78, "kayak-race": 63, "kids-robot": 75, "landing": 35, "luggage": 83, "mantaray": 73, "marbles": 70, "mascot": 78, "mermaid": 78, "monster-trucks": 99, "motorbike-indoors": 79, "motorbike-race": 88, "music-band": 87, "obstacles": 81, "obstacles-race": 48, "peacock": 75, "plane-exhibition": 73, "puppet": 100, "robot-battle": 85, "robotic-arm": 82, "rodeo": 85, "sea-turtle": 90, "skydiving-jumping": 75, "snowboard-race": 75, "snowboard-sand": 55, "surfer": 80, "swimmer": 86, "table-tennis": 70, "tram": 84, "trucks-race": 78, "twist-dance": 83, "volleyball-beach": 73, "water-slide": 88, "weightlifting": 90} \ No newline at end of file diff --git a/florence_sam/datasets/youtube-vos/test.json b/florence_sam/datasets/youtube-vos/test.json new file mode 100644 index 0000000000000000000000000000000000000000..c4d79d915bd9171830d7b10de53f433dc92ca81d --- /dev/null +++ b/florence_sam/datasets/youtube-vos/test.json @@ -0,0 +1 @@ +{"0070461469": 91, "00bd64cb00": 180, "00fef116ee": 96, "012257ffcf": 180, "01475d1fe7": 180, "0163b18674": 96, "017fa2adaa": 180, "0232ba85ed": 180, "02b1a46f42": 180, "02caec8ac0": 91, "047436c72c": 96, "0481e165b4": 150, "04f98557e7": 144, "05e73c3ecb": 96, "08f95ce1ff": 144, "0b6db1c6fd": 96, "0bd8c18197": 180, "0c6d13ee2c": 91, "0c7ba00455": 96, "0cba3e52eb": 91, "0d16524447": 150, "0d4827437d": 150, "0d62fa582a": 180, "0e1f91c0d7": 91, "0ef454b3f0": 91, "10e18fcf0c": 96, "11105e147e": 91, "11444b16da": 91, "11a4df37a4": 180, "11b3298d6a": 96, "13006c4c7e": 96, "1345523ba1": 180, "144a16eb12": 180, "15a6536e74": 180, "1616507c9e": 180, "1655f4782a": 92, "16608ccef6": 96, "16bc05b66c": 150, "16f1e1779b": 96, "17caf00e26": 96, "18f1e2f716": 91, "191a0bfcdf": 180, "19d4acf831": 91, "1a1dc21969": 96, "1a72d9fcea": 150, "1a92c81edd": 180, "1b2c2022a3": 96, "1d1601d079": 180, "1db7b25d1c": 180, "1dee5b7b5a": 150, "1e0c2e54f2": 96, "1e458b1539": 92, "1e6ac08c86": 91, "1e790eae99": 56, "1ed0c6ca5b": 96, "1edbdb6d18": 180, "1f2015e056": 96, "215ac56b15": 180, "2233485b49": 96, "224d171af6": 180, "237c6ebaf4": 91, "2462c51412": 96, "24bf968338": 180, "250d5953a0": 150, "25bcf222fb": 180, "25ea8feecf": 150, "25fc493839": 92, "262f69837e": 180, "264ca20298": 180, "26d8d48248": 51, "270f84c5e5": 91, "27889bc0fe": 180, "29b87846e7": 96, "29d2e79171": 180, "2a44411a3d": 180, "2b426fd330": 180, "2c4c4e2d5b": 180, "2c4c718eda": 180, "2c962c1bbe": 180, "2cc841341c": 92, "2cf6c4d17e": 91, "2d7ef0be04": 180, "2e5e52c6c8": 150, "2ef6fce8c6": 144, "3014e769bf": 180, "30d5f163b6": 180, "318df73d6a": 90, "31fbb9df3c": 96, "3255fcad2f": 180, "3303eea8e4": 91, "3447c30052": 150, "362722660c": 180, "37e0b4642b": 91, "383e51ed93": 180, "386b050bd0": 41, "3876ba3136": 180, "388ec2934c": 180, "38b45d9c6b": 96, "396680839c": 150, "39ffa3a4a4": 180, "3b0291b2be": 150, "3b333693f4": 180, "3bde1da2cf": 96, "3c5f4e6672": 91, "3c80682cc6": 92, "3ce634a1c1": 180, "3d6a761295": 96, "3da878c317": 91, "3db571b7ee": 96, "3e2336812c": 180, "3f16b04d6d": 96, "3fbbc75c5e": 180, "4015a1e1cc": 87, "406cd7bd48": 91, "407b87ba26": 91, "40a5628dcc": 91, "41af239f5e": 180, "42c671b285": 180, "42de37f462": 180, "4381c60a2f": 180, "4445dc0af5": 180, "44a3419d24": 180, "4566034eaf": 51, "45877fd086": 180, "4595935b88": 91, "4923010cfe": 96, "49b6d81ee8": 180, "4a39c34139": 180, "4a5a9fde01": 144, "4a90394892": 180, "4af10534e4": 180, "4af307f5bc": 180, "4be0ac97df": 91, "4be9025726": 91, "4c18a7bfab": 91, "4c269afea9": 91, "4c3db058db": 179, "4e1ef26a1e": 96, "50f4c0195b": 150, "50f89963c0": 96, "5105c5e4b8": 180, "51d60e4f93": 46, "51ee638399": 96, "522ea1a892": 180, "528e9f30e7": 91, "532efb206a": 180, "544b1486ac": 91, "5592eb680c": 180, "562fadda3a": 91, "568b30cf93": 150, "575f0e2d8e": 91, "5767fe466c": 150, "581c78d558": 180, "5a0ddcf128": 96, "5adf056317": 144, "5b33c701ce": 180, "5b8f636b33": 150, "5b9d26b1d7": 180, "5c24813a0b": 180, "5d0b35f30f": 46, "5e130392e1": 96, "5e41efe5bc": 180, "5e75de78ae": 91, "5fc34880f7": 180, "60912d6bab": 96, "612c96383d": 180, "61e5fd2205": 144, "620e350d23": 180, "62c27fcaaf": 180, "637c22d967": 91, "63eaebe4a2": 96, "63fd6b311e": 180, "64099f32ab": 180, "65643c4b34": 96, "660a88feb5": 180, "664b8d0c9f": 150, "665a7947b0": 180, "66affc2e86": 180, "673b1c03c9": 96, "67780f49c2": 91, "679a24b7bd": 180, "680d35b75b": 144, "68364a69ef": 180, "683bfaf498": 180, "68e883ff28": 180, "691f63f681": 180, "69f2d3146c": 96, "6c5c018237": 91, "6caa33f43a": 96, "6d2c7cc107": 180, "6d55effbbe": 144, "6d6b09b420": 51, "6d715acc3e": 180, "6e89b7359d": 96, "6e9428d555": 150, "6e9feafa2b": 91, "6eced45fee": 180, "6ef0b3282c": 96, "6f9019f0ea": 91, "6fe0ee9b7c": 180, "6ff74d4995": 180, "712b6ec68e": 96, "71680a627f": 96, "716aad4b56": 180, "721c2cda07": 180, "72218d52ac": 96, "7286b8aac9": 91, "728ba7998d": 91, "73b2b9af5f": 96, "7452941f4f": 180, "759d8249dd": 91, "75a55907dc": 150, "75f3a2a19e": 150, "77e7e4b1a1": 144, "7898e6542c": 180, "78e639c2c4": 91, "79091168f8": 180, "7ad5af3fe6": 180, "7b1a7dec16": 150, "7b36c4c3db": 180, "7b455d07cc": 150, "7bce4cfa48": 180, "7c064444d0": 144, "7c8014406a": 91, "7cb70182e5": 96, "7d04e540f5": 91, "7d5df020bf": 96, "7dfda4322c": 96, "7e6a27cc7c": 96, "7e9e344bf4": 180, "7eb9424a53": 180, "7ec8ea61f4": 91, "7fd2806fb0": 180, "8006501830": 150, "8014aeb412": 180, "80d1d22999": 180, "812f31be15": 144, "81312af68f": 92, "82843a1676": 150, "835aea9584": 36, "8366c67e9b": 180, "8467aa6c5c": 180, "8470ee5f48": 180, "8473ae2c60": 180, "8519765a65": 150, "851f73e4fc": 96, "85621c2c81": 150, "85b045995c": 180, "860c0a7cf8": 92, "861bd4b31e": 180, "8639adb930": 180, "8683e4d414": 150, "8687e892ff": 180, "86c5907811": 180, "870c197c8b": 180, "87de455fb7": 180, "87e1975888": 96, "87f5d4903c": 96, "883ede763d": 150, "88b84fe107": 91, "88ee198ce0": 91, "89d148a39f": 96, "89f3d789c5": 180, "8a22bb6c32": 180, "8a76048654": 180, "8a99d63296": 97, "8b0697f61a": 96, "8b722babfb": 180, "8ba5691030": 180, "8bdd52a66b": 150, "8c427b6a57": 180, "8cb68f36f6": 91, "8cbf0d6194": 180, "8d1ab4a2ed": 91, "8d55a5aebb": 180, "8d8c5906bd": 180, "8eb95e2e56": 150, "8f99788aa7": 180, "8fa5b3778f": 91, "9009ab4811": 91, "90c10e44cf": 91, "90c2c5c336": 96, "9124189275": 91, "91ee8300e7": 144, "9246556dfd": 91, "9323741e3b": 150, "94a33d3d20": 180, "9584210f86": 91, "9637e3b658": 51, "966c4c022e": 180, "9781e083b5": 180, "990d358980": 180, "995c087687": 150, "99a7d42674": 144, "99f056c109": 180, "9a29032b9c": 180, "9b07fc4cf6": 180, "9b5aa49509": 96, "9b5abb8108": 91, "9be210e984": 150, "9c3c28740e": 180, "9cace717c5": 180, "9d3ff7c1c1": 91, "9d8c66d92c": 150, "9eaa2f1fcc": 91, "9f1967f60f": 96, "9fa359e1cb": 150, "9fca469ddd": 96, "9ff11b620a": 180, "9ff655b9a3": 180, "a029b21901": 180, "a0c7eedeb8": 144, "a15e70486b": 180, "a35bef8bbf": 180, "a4309379a2": 91, "a51335af59": 96, "a5690fb3bf": 180, "a5b71f76fb": 86, "a5c8b1f945": 150, "a635426233": 150, "a73cc75b81": 144, "a7863d3903": 180, "a88f1fd4e3": 144, "aa2e90aa98": 144, "aab5ecf878": 91, "aafc5edf08": 96, "ab49400ffe": 180, "acd7b890f6": 91, "ad3ee9b86b": 180, "ad5fda372c": 144, "adb2040e5f": 91, "ae30aed29d": 180, "ae57b941a0": 180, "aeb9de8f66": 41, "af658a277c": 91, "af881cd801": 150, "b016a85236": 180, "b0313efe37": 96, "b19d6e149a": 120, "b19f091836": 180, "b2304e81df": 144, "b2d23dcf3a": 150, "b3cee57f31": 36, "b41a7ebfc6": 180, "b455f801b5": 46, "b47336c07b": 96, "b499ce791f": 180, "b52d26ddf9": 96, "b5c525cb08": 180, "b5d3b9be03": 91, "b6386bc3ce": 96, "b748b0f3be": 180, "b75e9ea782": 180, "b8237af453": 180, "b8a2104720": 96, "b8d6f92a65": 96, "b8f93a4094": 180, "bb0a1708ea": 180, "bb2245ab94": 180, "bb4ae8019f": 180, "bbdc38baa0": 76, "bbfe438d63": 96, "bc2be9fdc8": 96, "bcc00265f4": 96, "bd42cc48e4": 150, "bd43315417": 180, "bd85b04982": 51, "bda3146a46": 96, "be2b40d82a": 150, "c0f856e4de": 96, "c1bfacba4a": 91, "c1dcd30fb2": 96, "c285ede7f3": 180, "c2a6163d39": 150, "c3517ebed5": 86, "c3aabac30c": 180, "c3bb62a2f7": 144, "c454f19e90": 150, "c4c410ccd7": 180, "c5b94822e3": 180, "c64e9d1f7e": 91, "c682d1748f": 150, "c6d04b1ca3": 180, "c6dda81d86": 180, "c71623ab0c": 180, "c7db88a9db": 144, "c80ecb97d6": 150, "c8dd4de705": 180, "c915c8cbba": 150, "cb25a994d8": 144, "cba3e31e88": 91, "cc43a853e2": 180, "cc6c653874": 180, "cc718c7746": 180, "cc7e050f7f": 144, "cd14ed8653": 144, "cd5e4efaad": 46, "cddf78284d": 86, "cde37afe57": 144, "ce358eaf23": 150, "ce45145721": 91, "ce7d4af66d": 180, "ce9fb4bd8e": 91, "cec4db17a0": 180, "cecdd82d3c": 180, "ceea39e735": 180, "cf3e28c92a": 180, "cf8c671dab": 150, "cfd1e8166f": 96, "cfe7d98e50": 150, "cff0bbcba8": 96, "d1219663b7": 180, "d18ea7cd51": 180, "d1ed509b94": 91, "d22c5d5908": 81, "d2c6c7d8f6": 96, "d380084b7c": 91, "d3a2586e34": 180, "d3b1039c67": 180, "d3b25a44b3": 180, "d3f1d615b1": 180, "d7203fdab6": 96, "d76e963754": 96, "d7b3892660": 66, "d8b3e257da": 150, "d8b93e6bb1": 180, "d949468ad6": 180, "da553b619f": 180, "daac20af89": 180, "db8bf2430a": 180, "dbd729449a": 180, "dc0928b157": 91, "dc9aa0b8c0": 180, "dcc0637430": 180, "dcd3e1b53e": 86, "de1854f657": 101, "deb31e46cf": 96, "debccf2743": 150, "decf924833": 150, "e08b241b91": 180, "e0daa3b339": 180, "e1a52251b7": 180, "e1fc6d5237": 91, "e228ce16fd": 96, "e36dbb2ab7": 91, "e3dcf7a45e": 180, "e411e957af": 180, "e412e6a76b": 180, "e45a003b97": 179, "e60826ddf9": 91, "e6295c843b": 96, "e62c23b62b": 150, "e6b7a8fe73": 180, "e6f0e3131c": 180, "e7a3f8884e": 180, "e7c176739c": 180, "e965cd989b": 86, "e989440f7b": 150, "e98d115b9c": 81, "ea5f8c74d6": 180, "ea8a5b5a78": 96, "eaad295e8c": 150, "eaf4947f74": 180, "eb65451f4b": 92, "eb79c39e8e": 180, "eb92c92912": 96, "ebbb88e5f5": 180, "ec9b46eb6c": 180, "eca0be379d": 180, "ed33e8efb7": 66, "eda3a7bbb1": 150, "ee3ff10184": 180, "eec8403cc8": 91, "eee2db8829": 150, "ef22b8a227": 91, "ef8737ca22": 180, "eff7c1c098": 180, "f00dc892b2": 96, "f019c9ff98": 96, "f01edcbffb": 179, "f0866da89c": 180, "f12eb5256e": 180, "f1df2ea2dc": 180, "f29119c644": 180, "f3419f3a62": 150, "f35029f76d": 180, "f39dc2240d": 180, "f3aa63fa74": 150, "f3f3c201bd": 180, "f4865471b4": 96, "f505ae958c": 91, "f7605e73cd": 150, "f7917687d6": 180, "f7d310e219": 180, "f7e25f87b2": 180, "f94cd39525": 91, "f9f9aa431c": 180, "fa666fcc95": 66, "fb10740465": 180, "fb25b14e48": 91, "fb28ec1ba3": 150, "fbdda5ec7b": 96, "fbdf2180ee": 150, "fc0db37221": 91, "fd237cf4fb": 180, "fe36582e18": 180, "fef14bb2f2": 180, "ffe59ed1c1": 150} \ No newline at end of file diff --git a/florence_sam/datasets/youtube-vos/train.json b/florence_sam/datasets/youtube-vos/train.json new file mode 100644 index 0000000000000000000000000000000000000000..ac43202f1016619010595d602908690b2be9fddc --- /dev/null +++ b/florence_sam/datasets/youtube-vos/train.json @@ -0,0 +1 @@ +{"003234408d": 180, "0043f083b5": 96, "0044fa5fba": 87, "005a527edd": 144, "0065b171f9": 180, "00917dcfc4": 96, "00a23ccf53": 180, "00ad5016a4": 91, "01082ae388": 150, "011ac0a06f": 180, "013099c098": 91, "0155498c85": 180, "01694ad9c8": 91, "017ac35701": 180, "01b80e8e1a": 61, "01baa5a4e1": 150, "01c3111683": 180, "01c4cb5ffe": 180, "01c76f0a82": 96, "01c783268c": 180, "01e64dd36a": 91, "01ed275c6e": 96, "01ff60d1fa": 180, "020cd28cd2": 150, "02264db755": 180, "0248626d9a": 91, "02668dbffa": 150, "0274193026": 144, "02d28375aa": 180, "02f3a5c4df": 46, "031ccc99b1": 91, "0321b18c10": 92, "0348a45bca": 180, "0355e92655": 92, "0358b938c1": 91, "0368107cf1": 96, "0379ddf557": 180, "038b2cc71d": 91, "038c15a5dd": 178, "03a06cc98a": 96, "03a63e187f": 180, "03c95b4dae": 92, "03e2b57b0e": 150, "04194e1248": 180, "04259896e2": 180, "0444918a5f": 96, "04460a7a52": 180, "04474174a4": 180, "0450095513": 150, "045f00aed2": 180, "04667fabaa": 180, "04735c5030": 91, "04990d1915": 92, "04d62d9d98": 96, "04f21da964": 180, "04fbad476e": 180, "04fe256562": 96, "0503bf89c9": 150, "0536c9eed0": 92, "054acb238f": 180, "05579ca250": 150, "056c200404": 96, "05774f3a2c": 180, "058a7592c8": 96, "05a0a513df": 96, "05a569d8aa": 91, "05aa652648": 150, "05d7715782": 96, "05e0b0f28f": 150, "05fdbbdd7a": 66, "05ffcfed85": 180, "0630391881": 150, "06840b2bbe": 91, "068f7dce6f": 180, "0693719753": 150, "06ce2b51fb": 91, "06e224798e": 180, "06ee361788": 91, "06fbb3fa2c": 90, "0700264286": 96, "070c918ca7": 180, "07129e14a4": 180, "07177017e9": 86, "07238ffc58": 180, "07353b2a89": 150, "0738493cbf": 87, "075926c651": 87, "075c701292": 180, "0762ea9a30": 96, "07652ee4af": 150, "076f206928": 96, "077d32af19": 96, "079049275c": 144, "07913cdda7": 92, "07a11a35e8": 180, "07ac33b6df": 150, "07b6e8fda8": 46, "07c62c3d11": 180, "07cc1c7d74": 180, "080196ef01": 180, "081207976e": 96, "081ae4fa44": 150, "081d8250cb": 96, "082900c5d4": 96, "0860df21e2": 180, "0866d4c5e3": 91, "0891ac2eb6": 81, "08931bc458": 180, "08aa2705d5": 180, "08c8450db7": 96, "08d50b926c": 180, "08e1e4de15": 180, "08e48c1a48": 92, "08f561c65e": 180, "08feb87790": 96, "09049f6fe3": 150, "092e4ff450": 180, "09338adea8": 180, "093c335ccc": 144, "0970d28339": 180, "0974a213dc": 96, "097b471ed8": 96, "0990941758": 180, "09a348f4fa": 150, "09a6841288": 96, "09c5bad17b": 96, "09c9ce80c7": 180, "09ff54fef4": 150, "0a23765d15": 91, "0a275e7f12": 96, "0a2f2bd294": 96, "0a7a2514aa": 96, "0a7b27fde9": 180, "0a8c467cc3": 180, "0ac8c560ae": 96, "0b1627e896": 96, "0b285c47f6": 144, "0b34ec1d55": 180, "0b5b5e8e5a": 96, "0b68535614": 180, "0b6f9105fc": 180, "0b7dbfa3cb": 91, "0b9cea51ca": 180, "0b9d012be8": 180, "0bcfc4177d": 96, "0bd37b23c1": 96, "0bd864064c": 158, "0c11c6bf7b": 180, "0c26bc77ac": 180, "0c3a04798c": 96, "0c44a9d545": 180, "0c817cc390": 180, "0ca839ee9a": 180, "0cd7ac0ac0": 150, "0ce06e0121": 180, "0cfe974a89": 180, "0d2fcc0dcd": 96, "0d3aad05d2": 144, "0d40b015f4": 180, "0d97fba242": 91, "0d9cc80d7e": 51, "0dab85b6d3": 144, "0db5c427a5": 96, "0dbaf284f1": 97, "0de4923598": 97, "0df28a9101": 150, "0e04f636c4": 150, "0e05f0e232": 180, "0e0930474b": 91, "0e27472bea": 180, "0e30020549": 144, "0e621feb6c": 180, "0e803c7d73": 91, "0e9ebe4e3c": 92, "0e9f2785ec": 96, "0ea68d418b": 96, "0eb403a222": 96, "0ee92053d6": 97, "0eefca067f": 150, "0f17fa6fcb": 180, "0f1ac8e9a3": 180, "0f202e9852": 91, "0f2ab8b1ff": 180, "0f51a78756": 150, "0f5fbe16b0": 180, "0f6072077b": 91, "0f6b69b2f4": 180, "0f6c2163de": 144, "0f74ec5599": 180, "0f9683715b": 96, "0fa7b59356": 180, "0fb173695b": 96, "0fc958cde2": 150, "0fe7b1a621": 180, "0ffcdb491c": 96, "101caff7d4": 96, "1022fe8417": 96, "1032e80b37": 96, "103f501680": 180, "104e64565f": 96, "104f1ab997": 91, "106242403f": 96, "10b31f5431": 180, "10eced835e": 91, "110d26fa3a": 150, "1122c1d16a": 180, "1145b49a5f": 180, "11485838c2": 96, "114e7676ec": 180, "1157472b95": 180, "115ee1072c": 91, "1171141012": 150, "117757b4b8": 180, "1178932d2f": 180, "117cc76bda": 180, "1180cbf814": 180, "1187bbd0e3": 96, "1197e44b26": 180, "119cf20728": 180, "119dd54871": 180, "11a0c3b724": 91, "11a6ba8c94": 180, "11c722a456": 180, "11cbcb0b4d": 96, "11ccf5e99d": 96, "11ce6f452e": 91, "11e53de6f2": 46, "11feabe596": 150, "120cb9514d": 180, "12156b25b3": 180, "122896672d": 180, "1232b2f1d4": 36, "1233ac8596": 97, "1239c87234": 180, "1250423f7c": 96, "1257a1bc67": 180, "125d1b19dd": 180, "126d203967": 180, "1295e19071": 96, "12ad198c54": 144, "12bddb2bcb": 150, "12ec9b93ee": 180, "12eebedc35": 91, "132852e094": 180, "1329409f2a": 180, "13325cfa14": 96, "1336440745": 180, "134d06dbf9": 97, "135625b53d": 144, "13870016f9": 92, "13960b3c84": 96, "13adaad9d9": 180, "13ae097e20": 180, "13e3070469": 96, "13f6a8c20d": 144, "1416925cf2": 92, "142d2621f5": 91, "145d5d7c03": 180, "145fdc3ac5": 180, "1471274fa7": 76, "14a6b5a139": 180, "14c21cea0d": 180, "14dae0dc93": 96, "14f9bd22b5": 180, "14fd28ae99": 180, "15097d5d4e": 144, "150ea711f2": 180, "1514e3563f": 180, "152aaa3a9e": 180, "152b7d3bd7": 150, "15617297cc": 180, "15abbe0c52": 150, "15d1fb3de5": 180, "15f67b0fab": 180, "161eb59aad": 96, "16288ea47f": 180, "164410ce62": 91, "165c3c8cd4": 96, "165c42b41b": 91, "165ec9e22b": 144, "1669502269": 91, "16763cccbb": 150, "16adde065e": 96, "16af445362": 96, "16afd538ad": 150, "16c3fa4d5d": 96, "16d1d65c27": 180, "16e8599e94": 180, "16fe9fb444": 91, "1705796b02": 96, "1724db7671": 144, "17418e81ea": 180, "175169edbb": 144, "17622326fd": 180, "17656bae77": 91, "17b0d94172": 61, "17c220e4f6": 180, "17c7bcd146": 96, "17cb4afe89": 180, "17cd79a434": 180, "17d18604c3": 96, "17d8ca1a37": 150, "17e33f4330": 180, "17f7a6d805": 150, "180abc8378": 180, "183ba3d652": 96, "185bf64702": 96, "18913cc690": 91, "1892651815": 180, "189ac8208a": 91, "189b44e92c": 97, "18ac264b76": 150, "18b245ab49": 91, "18b5cebc34": 150, "18bad52083": 180, "18bb5144d5": 180, "18c6f205c5": 96, "1903f9ea15": 96, "1917b209f2": 91, "191e74c01d": 150, "19367bb94e": 180, "193ffaa217": 91, "19696b67d3": 96, "197f3ab6f3": 180, "1981e763cc": 180, "198afe39ae": 144, "19a6e62b9b": 150, "19b60d5335": 180, "19c00c11f9": 150, "19e061eb88": 91, "19e8bc6178": 86, "19ee80dac6": 180, "1a25a9170a": 180, "1a359a6c1a": 150, "1a3e87c566": 150, "1a5fe06b00": 91, "1a6c0fbd1e": 144, "1a6f3b5a4b": 96, "1a8afbad92": 92, "1a8bdc5842": 150, "1a95752aca": 150, "1a9c131cb7": 180, "1aa3da3ee3": 150, "1ab27ec7ea": 56, "1abf16d21d": 150, "1acd0f993b": 180, "1ad202e499": 180, "1af8d2395d": 180, "1afd39a1fa": 91, "1b2d31306f": 180, "1b3fa67f0e": 92, "1b43fa74b4": 150, "1b73ea9fc2": 92, "1b7e8bb255": 96, "1b8680f8cd": 180, "1b883843c0": 91, "1b8898785b": 180, "1b88ba1aa4": 180, "1b96a498e5": 150, "1bbc4c274f": 96, "1bd87fe9ab": 66, "1c4090c75b": 180, "1c41934f84": 96, "1c72b04b56": 180, "1c87955a3a": 150, "1c9f9eb792": 180, "1ca240fede": 96, "1ca5673803": 180, "1cada35274": 180, "1cb44b920d": 180, "1cd10e62be": 150, "1d3087d5e5": 180, "1d3685150a": 92, "1d6ff083aa": 96, "1d746352a6": 66, "1da256d146": 91, "1da4e956b1": 180, "1daf812218": 150, "1dba687bce": 180, "1dce57d05d": 86, "1de4a9e537": 97, "1dec5446c8": 180, "1dfbe6f586": 150, "1e1a18c45a": 180, "1e1e42529d": 76, "1e4be70796": 96, "1eb60959c8": 180, "1ec8b2566b": 180, "1ecdc2941c": 180, "1ee0ac70ff": 87, "1ef8e17def": 91, "1f1a2a9fc0": 86, "1f1beb8daa": 150, "1f2609ee13": 180, "1f3876f8d0": 144, "1f4ec0563d": 150, "1f64955634": 96, "1f7d31b5b2": 96, "1f8014b7fd": 96, "1f9c7d10f1": 180, "1fa350df76": 96, "1fc9538993": 180, "1fe2f0ec59": 150, "2000c02f9d": 180, "20142b2f05": 180, "201a8d75e5": 150, "2023b3ee4f": 180, "202b767bbc": 92, "203594a418": 180, "2038987336": 150, "2039c3aecb": 96, "204a90d81f": 150, "207bc6cf01": 144, "208833d1d1": 180, "20c6d8b362": 46, "20e3e52e0a": 96, "2117fa0c14": 180, "211bc5d102": 150, "2120d9c3c3": 150, "2125235a49": 180, "21386f5978": 92, "2142af8795": 150, "215dfc0f73": 96, "217bae91e5": 180, "217c0d44e4": 150, "219057c87b": 150, "21d0edbf81": 96, "21df87ad76": 96, "21f1d089f5": 96, "21f4019116": 180, "222597030f": 91, "222904eb5b": 92, "223a0e0657": 180, "223bd973ab": 92, "22472f7395": 150, "224e7c833e": 96, "225aba51d9": 86, "2261d421ea": 180, "2263a8782b": 180, "2268cb1ffd": 150, "2268e93b0a": 61, "2293c99f3f": 180, "22a1141970": 91, "22b13084b2": 180, "22d9f5ab0c": 180, "22f02efe3a": 144, "232c09b75b": 150, "2350d71b4b": 180, "2376440551": 180, "2383d8aafd": 144, "238b84e67f": 96, "238d4b86f6": 91, "238d947c6b": 46, "23993ce90d": 180, "23b0c8a9ab": 150, "23b3beafcc": 156, "23d80299fe": 92, "23f404a9fc": 96, "240118e58a": 178, "2431dec2fd": 180, "24440e0ac7": 97, "2457274dbc": 180, "2465bf515d": 91, "246b142c4d": 180, "247d729e36": 96, "2481ceafeb": 150, "24866b4e6a": 150, "2489d78320": 180, "24ab0b83e8": 180, "24b0868d92": 180, "24b5207cd9": 96, "24ddf05c03": 92, "250116161c": 71, "256ad2e3fc": 180, "256bd83d5e": 180, "256dcc8ab8": 180, "2589956baa": 150, "258b3b33c6": 91, "25ad437e29": 96, "25ae395636": 180, "25c750c6db": 150, "25d2c3fe5d": 180, "25dc80db7c": 96, "25f97e926f": 180, "26011bc28b": 150, "260846ffbe": 180, "260dd9ad33": 66, "267964ee57": 92, "2680861931": 96, "268ac7d3fc": 180, "26b895d91e": 71, "26bc786d4f": 91, "26ddd2ef12": 180, "26de3d18ca": 150, "26f7784762": 180, "2703e52a6a": 180, "270ed80c12": 180, "2719b742ab": 180, "272f4163d0": 180, "27303333e1": 96, "27659fa7d6": 180, "279214115d": 180, "27a5f92a9c": 97, "27cf2af1f3": 150, "27f0d5f8a2": 86, "28075f33c1": 180, "281629cb41": 96, "282b0d51f5": 96, "282fcab00b": 96, "28449fa0dc": 180, "28475208ca": 96, "285580b7c4": 180, "285b69e223": 150, "288c117201": 150, "28a8eb9623": 180, "28bf9c3cf3": 180, "28c6b8f86a": 180, "28c972dacd": 144, "28d9fa6016": 96, "28e392de91": 144, "28f4a45190": 150, "298c844fc9": 91, "29a0356a2b": 180, "29d779f9e3": 76, "29dde5f12b": 86, "29de7b6579": 150, "29e630bdd0": 144, "29f2332d30": 144, "2a18873352": 92, "2a3824ff31": 91, "2a559dd27f": 96, "2a5c09acbd": 76, "2a63eb1524": 96, "2a6a30a4ea": 150, "2a6d9099d1": 180, "2a821394e3": 81, "2a8c5b1342": 96, "2abc8d66d2": 96, "2ac9ef904a": 46, "2b08f37364": 150, "2b351bfd7d": 180, "2b659a49d7": 66, "2b69ee5c26": 96, "2b6c30bbbd": 180, "2b88561cf2": 144, "2b8b14954e": 180, "2ba621c750": 150, "2bab50f9a7": 180, "2bb00c2434": 91, "2bbde474ef": 92, "2bdd82fb86": 150, "2be06fb855": 96, "2bf545c2f5": 180, "2bffe4cf9a": 96, "2c04b887b7": 144, "2c05209105": 180, "2c0ad8cf39": 180, "2c11fedca8": 56, "2c1a94ebfb": 91, "2c1e8c8e2f": 180, "2c29fabcf1": 96, "2c2c076c01": 180, "2c3ea7ee7d": 92, "2c41fa0648": 87, "2c44bb6d1c": 96, "2c54cfbb78": 180, "2c5537eddf": 180, "2c6e63b7de": 150, "2cb10c6a7e": 180, "2cbcd5ccd1": 180, "2cc5d9c5f6": 180, "2cd01cf915": 180, "2cdbf5f0a7": 91, "2ce660f123": 96, "2cf114677e": 150, "2d01eef98e": 180, "2d03593bdc": 96, "2d183ac8c4": 180, "2d33ad3935": 96, "2d3991d83e": 150, "2d4333577b": 180, "2d4d015c64": 96, "2d8f5e5025": 144, "2d900bdb8e": 180, "2d9a1a1d49": 46, "2db0576a5c": 180, "2dc0838721": 180, "2dcc417f82": 150, "2df005b843": 180, "2df356de14": 180, "2e00393d96": 61, "2e03b8127a": 180, "2e0f886168": 96, "2e2bf37e6d": 180, "2e42410932": 87, "2ea78f46e4": 180, "2ebb017a26": 180, "2ee2edba2a": 96, "2efb07554a": 180, "2f17e4fc1e": 96, "2f2c65c2f3": 144, "2f2d9b33be": 150, "2f309c206b": 180, "2f53822e88": 144, "2f53998171": 96, "2f5b0c89b1": 180, "2f680909e6": 180, "2f710f66bd": 180, "2f724132b9": 91, "2f7e3517ae": 91, "2f96f5fc6f": 180, "2f97d9fecb": 96, "2fbfa431ec": 96, "2fc9520b53": 180, "2fcd9f4c62": 180, "2feb30f208": 87, "2ff7f5744f": 150, "30085a2cc6": 96, "30176e3615": 56, "301f72ee11": 92, "3026bb2f61": 180, "30318465dc": 150, "3054ca937d": 180, "306121e726": 92, "3064ad91e8": 180, "307444a47f": 180, "307bbb7409": 91, "30a20194ab": 144, "30c35c64a4": 150, "30dbdb2cd6": 91, "30fc77d72f": 150, "310021b58b": 96, "3113140ee8": 144, "3150b2ee57": 180, "31539918c4": 180, "318dfe2ce2": 144, "3193da4835": 91, "319f725ad9": 180, "31bbd0d793": 91, "322505c47f": 180, "322b237865": 92, "322da43910": 97, "3245e049fb": 66, "324c4c38f6": 180, "324e35111a": 150, "3252398f09": 150, "327dc4cabf": 180, "328d918c7d": 180, "3290c0de97": 96, "3299ae3116": 180, "32a7cd687b": 150, "33098cedb4": 92, "3332334ac4": 180, "334cb835ac": 180, "3355e056eb": 180, "33639a2847": 180, "3373891cdc": 180, "337975816b": 180, "33e29d7e91": 96, "34046fe4f2": 180, "3424f58959": 180, "34370a710f": 92, "343bc6a65a": 179, "3450382ef7": 144, "3454303a08": 180, "346aacf439": 180, "346e92ff37": 180, "34a5ece7dd": 144, "34b109755a": 180, "34d1b37101": 96, "34dd2c70a7": 180, "34efa703df": 180, "34fbee00a6": 150, "3504df2fda": 96, "35195a56a1": 150, "351c822748": 180, "351cfd6bc5": 180, "3543d8334c": 180, "35573455c7": 96, "35637a827f": 96, "357a710863": 92, "358bf16f9e": 96, "35ab34cc34": 180, "35c6235b8d": 91, "35d01a438a": 180, "3605019d3b": 96, "3609bc3f88": 92, "360e25da17": 97, "36299c687c": 96, "362c5bc56e": 180, "3649228783": 150, "365b0501ea": 92, "365f459863": 180, "369893f3ad": 180, "369c9977e1": 180, "369dde050a": 96, "36c7dac02f": 180, "36d5b1493b": 180, "36f5cc68fd": 91, "3735480d18": 180, "374b479880": 97, "375a49d38f": 180, "375a5c0e09": 180, "376bda9651": 144, "377db65f60": 144, "37c19d1087": 46, "37d4ae24fc": 96, "37ddce7f8b": 180, "37e10d33af": 180, "37e45c6247": 96, "37fa0001e8": 180, "3802d458c0": 150, "382caa3cb4": 91, "383bb93111": 91, "388843df90": 180, "38924f4a7f": 92, "38b00f93d7": 92, "38c197c10e": 96, "38c9c3d801": 180, "38eb2bf67f": 92, "38fe9b3ed1": 180, "390352cced": 180, "390c51b987": 96, "390ca6f1d6": 144, "392bc0f8a1": 96, "392ecb43bd": 92, "3935291688": 150, "3935e63b41": 180, "394454fa9c": 180, "394638fc8b": 96, "39545e20b7": 180, "397abeae8f": 180, "3988074b88": 91, "398f5d5f19": 174, "39bc49a28c": 180, "39befd99fb": 144, "39c3c7bf55": 180, "39d584b09f": 91, "39f6f6ffb1": 180, "3a079fb484": 180, "3a0d3a81b7": 150, "3a1d55d22b": 82, "3a20a7583e": 96, "3a2c1f66e5": 150, "3a33f4d225": 180, "3a3bf84b13": 144, "3a4565e5ec": 144, "3a4e32ed5e": 180, "3a7ad86ce0": 180, "3a7bdde9b8": 180, "3a98867cbe": 91, "3aa3f1c9e8": 150, "3aa7fce8b6": 91, "3aa876887d": 96, "3ab807ded6": 96, "3ab9b1a85a": 96, "3adac8d7da": 180, "3ae1a4016f": 96, "3ae2deaec2": 180, "3ae81609d6": 144, "3af847e62f": 92, "3b23792b84": 144, "3b3b0af2ee": 150, "3b512dad74": 144, "3b6c7988f6": 91, "3b6e983b5b": 180, "3b74a0fc20": 180, "3b7a50b80d": 180, "3b96d3492f": 180, "3b9ad0c5a9": 150, "3b9ba0894a": 180, "3bb4e10ed7": 144, "3bd9a9b515": 150, "3beef45388": 96, "3c019c0a24": 96, "3c090704aa": 96, "3c2784fc0d": 144, "3c47ab95f8": 150, "3c4db32d74": 91, "3c5ff93faf": 180, "3c700f073e": 180, "3c713cbf2f": 91, "3c8320669c": 180, "3c90d225ee": 180, "3cadbcc404": 96, "3cb9be84a5": 150, "3cc37fd487": 91, "3cc6f90cb2": 92, "3cd5e035ef": 180, "3cdf03531b": 178, "3cdf828f59": 180, "3d254b0bca": 180, "3d5aeac5ba": 180, "3d690473e1": 180, "3d69fed2fb": 96, "3d8997aeb6": 96, "3db0d6b07e": 96, "3db1ddb8cf": 180, "3db907ac77": 180, "3dcbc0635b": 150, "3dd48ed55f": 144, "3de4ac4ec4": 92, "3decd63d88": 180, "3e04a6be11": 180, "3e108fb65a": 96, "3e1448b01c": 150, "3e16c19634": 180, "3e2845307e": 61, "3e38336da5": 96, "3e3a819865": 180, "3e3e4be915": 96, "3e680622d7": 91, "3e7d2aeb07": 96, "3e7d8f363d": 180, "3e91f10205": 26, "3ea4c49bbe": 144, "3eb39d11ab": 180, "3ec273c8d5": 96, "3ed3f91271": 76, "3ee062a2fd": 180, "3eede9782c": 180, "3ef2fa99cb": 180, "3efc6e9892": 92, "3f0b0dfddd": 96, "3f0c860359": 91, "3f18728586": 180, "3f3b15f083": 96, "3f45a470ad": 46, "3f4f3bc803": 150, "3fd96c5267": 91, "3fea675fab": 91, "3fee8cbc9f": 96, "3fff16d112": 180, "401888b36c": 144, "4019231330": 150, "402316532d": 180, "402680df52": 180, "404d02e0c0": 150, "40709263a8": 81, "4083cfbe15": 150, "40a96c5cb1": 96, "40b8e50f82": 91, "40f4026bf5": 144, "4100b57a3a": 150, "41059fdd0b": 180, "41124e36de": 144, "4122aba5f9": 180, "413bab0f0d": 96, "4164faee0b": 180, "418035eec9": 180, "4182d51532": 96, "418bb97e10": 144, "41a34c20e7": 96, "41dab05200": 180, "41ff6d5e2a": 77, "420caf0859": 56, "42264230ba": 96, "425a0c96e0": 91, "42da96b87c": 180, "42eb5a5b0f": 180, "42f17cd14d": 91, "42f5c61c49": 180, "42ffdcdee9": 180, "432f9884f9": 91, "43326d9940": 150, "4350f3ab60": 144, "4399ffade3": 96, "43a6c21f37": 150, "43b5555faa": 180, "43d63b752a": 180, "4416bdd6ac": 92, "4444753edd": 76, "444aa274e7": 150, "444d4e0596": 150, "446b8b5f7a": 96, "4478f694bb": 91, "44b1da0d87": 92, "44b4dad8c9": 96, "44b5ece1b9": 180, "44d239b24e": 150, "44eaf8f51e": 180, "44f4f57099": 96, "44f7422af2": 180, "450787ac97": 180, "4523656564": 96, "4536c882e5": 180, "453b65daa4": 180, "454f227427": 91, "45636d806a": 180, "456fb9362e": 91, "457e717a14": 150, "45a89f35e1": 180, "45bf0e947d": 150, "45c36a9eab": 150, "45d9fc1357": 174, "45f8128b97": 180, "4607f6c03c": 91, "46146dfd39": 92, "4620e66b1e": 150, "4625f3f2d3": 96, "462b22f263": 96, "4634736113": 180, "463c0f4fdd": 180, "46565a75f8": 96, "46630b55ae": 56, "466839cb37": 91, "466ba4ae0c": 180, "4680236c9d": 180, "46bf4e8709": 91, "46e18e42f1": 150, "46f5093c59": 180, "47269e0499": 92, "472da1c484": 144, "47354fab09": 180, "4743bb84a7": 92, "474a796272": 180, "4783d2ab87": 96, "479cad5da3": 180, "479f5d7ef6": 96, "47a05fbd1d": 96, "4804ee2767": 97, "4810c3fbca": 180, "482fb439c2": 150, "48375af288": 96, "484ab44de4": 96, "485f3944cd": 96, "4867b84887": 150, "486a8ac57e": 180, "486e69c5bd": 180, "48812cf33e": 150, "4894b3b9ea": 180, "48bd66517d": 180, "48d83b48a4": 91, "49058178b8": 46, "4918d10ff0": 91, "4932911f80": 150, "49405b7900": 180, "49972c2d14": 150, "499bf07002": 96, "49b16e9377": 180, "49c104258e": 144, "49c879f82d": 96, "49e7326789": 180, "49ec3e406a": 91, "49fbf0c98a": 96, "4a0255c865": 180, "4a088fe99a": 96, "4a341402d0": 180, "4a3471bdf5": 96, "4a4b50571c": 144, "4a50f3d2e9": 96, "4a6e3faaa1": 180, "4a7191f08a": 150, "4a86fcfc30": 180, "4a885fa3ef": 144, "4a8af115de": 21, "4aa2e0f865": 180, "4aa9d6527f": 180, "4abb74bb52": 96, "4ae13de1cd": 91, "4af8cb323f": 97, "4b02c272b3": 180, "4b19c529fb": 96, "4b2974eff4": 180, "4b3154c159": 95, "4b54d2587f": 180, "4b556740ff": 144, "4b67aa9ef6": 178, "4b97cc7b8d": 96, "4baa1ed4aa": 91, "4bc8c676bb": 96, "4beaea4dbe": 180, "4bf5763d24": 96, "4bffa92b67": 138, "4c25dfa8ec": 96, "4c397b6fd4": 180, "4c51e75d66": 150, "4c7710908f": 180, "4c9b5017be": 180, "4ca2ffc361": 92, "4cad2e93bc": 150, "4cd427b535": 180, "4cd9a4b1ef": 180, "4cdfe3c2b2": 180, "4cef87b649": 96, "4cf208e9b3": 180, "4cf5bc3e60": 92, "4cfdd73249": 91, "4cff5c9e42": 180, "4d26d41091": 96, "4d5c23c554": 180, "4d67c59727": 150, "4d983cad9f": 180, "4da0d00b55": 144, "4daa179861": 91, "4dadd57153": 92, "4db117e6c5": 91, "4de4ce4dea": 180, "4dfaee19e5": 180, "4dfdd7fab0": 180, "4e3f346aa5": 92, "4e49c2a9c7": 56, "4e4e06a749": 180, "4e70279712": 96, "4e72856cc7": 91, "4e752f8075": 180, "4e7a28907f": 66, "4e824b9247": 180, "4e82b1df57": 180, "4e87a639bc": 180, "4ea77bfd15": 150, "4eb6fc23a2": 180, "4ec9da329e": 96, "4efb9a0720": 180, "4f062fbc63": 96, "4f35be0e0b": 96, "4f37e86797": 91, "4f414dd6e7": 180, "4f424abded": 180, "4f470cc3ae": 144, "4f601d255a": 150, "4f7386a1ab": 144, "4f824d3dcd": 91, "4f827b0751": 144, "4f8db33a13": 180, "4fa160f8a3": 180, "4fa9c30a45": 180, "4facd8f0e8": 96, "4fca07ad01": 91, "4fded94004": 180, "4fdfef4dea": 91, "4feb3ac01f": 92, "4fffec8479": 96, "500c835a86": 180, "50168342bf": 180, "50243cffdc": 180, "5031d5a036": 180, "504dd9c0fd": 96, "50568fbcfb": 180, "5069c7c5b3": 180, "508189ac91": 180, "50b6b3d4b7": 91, "50c6f4fe3e": 86, "50cce40173": 180, "50efbe152f": 180, "50f290b95d": 91, "5104aa1fea": 96, "5110dc72c0": 180, "511e8ecd7f": 150, "513aada14e": 92, "5158d6e985": 180, "5161e1fa57": 180, "51794ddd58": 96, "517d276725": 91, "51a597ee04": 51, "51b37b6d97": 96, "51b5dc30a0": 96, "51e85b347b": 180, "51eea1fdac": 150, "51eef778af": 91, "51f384721c": 76, "521cfadcb4": 180, "52355da42f": 96, "5247d4b160": 180, "524b470fd0": 180, "524cee1534": 96, "5252195e8a": 91, "5255c9ca97": 144, "525928f46f": 96, "526df007a7": 180, "529b12de78": 91, "52c7a3d653": 150, "52c8ec0373": 91, "52d225ed52": 96, "52ee406d9e": 180, "52ff1ccd4a": 96, "53143511e8": 180, "5316d11eb7": 96, "53253f2362": 180, "534a560609": 91, "5352c4a70e": 180, "536096501f": 92, "536b17bcea": 180, "5380eaabff": 144, "5390a43a54": 180, "53af427bb2": 91, "53bf5964ce": 180, "53c30110b5": 96, "53cad8e44a": 150, "53d9c45013": 91, "53e274f1b5": 150, "53e32d21ea": 96, "540850e1c7": 96, "540cb31cfe": 180, "541c4da30f": 91, "541d7935d7": 180, "545468262b": 180, "5458647306": 144, "54657855cd": 96, "547b3fb23b": 180, "5497dc3712": 150, "549c56f1d4": 96, "54a4260bb1": 150, "54b98b8d5e": 180, "54e1054b0f": 91, "54e8867b83": 180, "54ebe34f6e": 180, "5519b4ad13": 86, "551acbffd5": 150, "55341f42da": 180, "5566ab97e1": 91, "556c79bbf2": 144, "5589637cc4": 180, "558aa072f0": 180, "559824b6f6": 91, "55c1764e90": 180, "55eda6c77e": 180, "562d173565": 150, "5665c024cb": 96, "566cef4959": 91, "5675d78833": 144, "5678a91bd8": 180, "567a2b4bd0": 180, "569c282890": 86, "56cc449917": 150, "56e71f3e07": 150, "56f09b9d92": 180, "56fc0e8cf9": 144, "571ca79c71": 91, "57243657cf": 144, "57246af7d1": 91, "57427393e9": 96, "574b682c19": 180, "578f211b86": 180, "5790ac295d": 91, "579393912d": 180, "57a344ab1a": 180, "57bd3bcda4": 180, "57bfb7fa4c": 150, "57c010175e": 180, "57c457cc75": 180, "57c7fc2183": 150, "57d5289a01": 61, "58045fde85": 96, "58163c37cd": 150, "582d463e5c": 180, "5851739c15": 180, "585dd0f208": 66, "587250f3c3": 180, "589e4cc1de": 180, "589f65f5d5": 180, "58a07c17d5": 180, "58adc6d8b6": 76, "58b9bcf656": 96, "58c374917e": 96, "58fc75fd42": 87, "5914c30f05": 96, "59323787d5": 150, "5937b08d69": 96, "594065ddd7": 96, "595a0ceea6": 91, "59623ec40b": 91, "597ff7ef78": 150, "598935ef05": 46, "598c2ad3b2": 180, "59a6459751": 180, "59b175e138": 96, "59bf0a149f": 180, "59d53d1649": 180, "59e3e6fae7": 180, "59fe33e560": 180, "5a13a73fe5": 96, "5a25c22770": 150, "5a4a785006": 96, "5a50640995": 180, "5a75f7a1cf": 96, "5a841e59ad": 180, "5a91c5ab6d": 150, "5ab49d9de0": 96, "5aba1057fe": 180, "5abe46ba6d": 91, "5ac7c88d0c": 180, "5aeb95cc7d": 92, "5af15e4fc3": 91, "5afe381ae4": 96, "5b07b4229d": 51, "5b1001cc4f": 180, "5b1df237d2": 180, "5b263013bf": 91, "5b27d19f0b": 180, "5b48ae16c5": 96, "5b5babc719": 180, "5baaebdf00": 180, "5bab55cdbe": 180, "5bafef6e79": 96, "5bc77844da": 180, "5bd1f84545": 180, "5bddc3ba25": 180, "5bdf7c20d2": 180, "5bf23bc9d3": 180, "5c01f6171a": 144, "5c021681b7": 96, "5c185cff1d": 180, "5c42aba280": 180, "5c44bf8ab6": 180, "5c4c574894": 144, "5c52fa4662": 76, "5c6ea7dac3": 96, "5c74315dc2": 180, "5c7668855e": 92, "5c83e96778": 180, "5ca36173e4": 96, "5cac477371": 97, "5cb0cb1b2f": 96, "5cb0cfb98f": 144, "5cb49a19cf": 180, "5cbf7dc388": 180, "5d0e07d126": 96, "5d1e24b6e3": 81, "5d663000ff": 150, "5da6b2dc5d": 180, "5de9b90f24": 61, "5e08de0ed7": 180, "5e1011df9a": 87, "5e1ce354fd": 150, "5e35512dd7": 180, "5e418b25f9": 96, "5e4849935a": 144, "5e4ee19663": 96, "5e886ef78f": 96, "5e8d00b974": 180, "5e8d59dc31": 180, "5ed838bd5c": 96, "5edda6ee5a": 180, "5ede4d2f7a": 144, "5ede9767da": 144, "5ee23ca60e": 87, "5eec4d9fe5": 96, "5eecf07824": 180, "5eef7ed4f4": 91, "5ef5860ac6": 144, "5ef6573a99": 96, "5f1193e72b": 91, "5f29ced797": 96, "5f32cf521e": 150, "5f51876986": 96, "5f6ebe94a9": 86, "5f6f14977c": 91, "5f808d0d2d": 91, "5fb8aded6a": 180, "5fba90767d": 96, "5fd1c7a3df": 92, "5fd3da9f68": 91, "5fee2570ae": 180, "5ff66140d6": 180, "5ff8b85b53": 180, "600803c0f6": 180, "600be7f53e": 96, "6024888af8": 180, "603189a03c": 96, "6057307f6e": 180, "6061ddbb65": 96, "606c86c455": 180, "60c61cc2e5": 180, "60e51ff1ae": 150, "610e38b751": 150, "61344be2f6": 180, "6135e27185": 96, "614afe7975": 150, "614e571886": 180, "614e7078db": 96, "619812a1a7": 96, "61b481a78b": 96, "61c7172650": 180, "61cf7e40d2": 96, "61d08ef5a1": 46, "61da008958": 96, "61ed178ecb": 61, "61f5d1282c": 92, "61fd977e49": 144, "621584cffe": 180, "625817a927": 180, "625892cf0b": 96, "625b89d28a": 91, "629995af95": 150, "62a0840bb5": 180, "62ad6e121c": 87, "62d6ece152": 91, "62ede7b2da": 91, "62f025e1bc": 180, "6316faaebc": 97, "63281534dc": 150, "634058dda0": 144, "6353f09384": 180, "6363c87314": 180, "636e4872e0": 180, "637681cd6b": 180, "6376d49f31": 180, "6377809ec2": 180, "63936d7de5": 96, "639bddef11": 150, "63d37e9fd3": 180, "63d90c2bae": 96, "63e544a5d6": 180, "63ebbcf874": 96, "63fff40b31": 180, "6406c72e4d": 61, "64148128be": 96, "6419386729": 150, "643092bc41": 96, "644081b88d": 144, "64453cf61d": 180, "644bad9729": 96, "6454f548fd": 180, "645913b63a": 180, "64750b825f": 180, "64a43876b7": 96, "64dd6c83e3": 92, "64e05bf46e": 96, "64f55f1478": 150, "650b0165e4": 180, "651066ed39": 180, "652b67d960": 180, "653821d680": 180, "6538d00d73": 180, "65866dce22": 150, "6589565c8c": 150, "659832db64": 180, "65ab7e1d98": 180, "65b7dda462": 180, "65bd5eb4f5": 180, "65dcf115ab": 91, "65e9825801": 180, "65f9afe51c": 91, "65ff12bcb5": 180, "666b660284": 180, "6671643f31": 180, "668364b372": 96, "66852243cb": 96, "6693a52081": 180, "669b572898": 180, "66e98e78f5": 91, "670f12e88f": 180, "674c12c92d": 91, "675c27208a": 180, "675ed3e1ca": 144, "67741db50a": 96, "678a2357eb": 70, "67b0f4d562": 180, "67cfbff9b1": 180, "67e717d6bd": 91, "67ea169a3b": 92, "67ea809e0e": 180, "681249baa3": 180, "683de643d9": 180, "6846ac20df": 96, "6848e012ef": 96, "684bcd8812": 96, "684dc1c40c": 96, "685a1fa9cf": 91, "686dafaac9": 144, "68807d8601": 96, "6893778c77": 96, "6899d2dabe": 91, "68a2fad4ab": 180, "68cb45fda3": 180, "68cc4a1970": 96, "68dcb40675": 180, "68ea4a8c3d": 180, "68f6e7fbf0": 96, "68fa8300b4": 180, "69023db81f": 96, "6908ccf557": 91, "691a111e7c": 180, "6927723ba5": 180, "692ca0e1a2": 97, "692eb57b63": 180, "69340faa52": 96, "693cbf0c9d": 180, "6942f684ad": 96, "6944fc833b": 180, "69491c0ebf": 91, "695b61a2b0": 96, "6979b4d83f": 180, "697d4fdb02": 144, "69910460a4": 180, "6997636670": 180, "69a436750b": 96, "69aebf7669": 180, "69b8c17047": 180, "69c67f109f": 180, "69e0e7b868": 180, "69ea9c09d1": 180, "69f0af42a6": 97, "6a078cdcc7": 144, "6a37a91708": 71, "6a42176f2e": 180, "6a48e4aea8": 96, "6a5977be3a": 180, "6a5de0535f": 180, "6a80d2e2e5": 96, "6a96c8815d": 180, "6a986084e2": 96, "6aa8e50445": 92, "6ab9dce449": 150, "6abf0ba6b2": 180, "6acc6049d9": 96, "6adb31756c": 180, "6ade215eb0": 96, "6afb7d50e4": 144, "6afd692f1a": 180, "6b0b1044fe": 91, "6b17c67633": 180, "6b1b6ef28b": 92, "6b1e04d00d": 180, "6b2261888d": 96, "6b25d6528a": 144, "6b3a24395c": 150, "6b685eb75b": 96, "6b79be238c": 92, "6b928b7ba6": 96, "6b9c43c25a": 180, "6ba99cc41f": 91, "6bdab62bcd": 86, "6bf2e853b1": 180, "6bf584200f": 180, "6bf95df2b9": 150, "6c0949c51c": 180, "6c11a5f11f": 96, "6c23d89189": 61, "6c4387daf5": 96, "6c4ce479a4": 86, "6c5123e4bc": 96, "6c54265f16": 92, "6c56848429": 96, "6c623fac5f": 36, "6c81b014e9": 96, "6c99ea7c31": 92, "6c9d29d509": 91, "6c9e3b7d1a": 91, "6ca006e283": 96, "6caeb928d6": 180, "6cb2ee722a": 180, "6cbfd32c5e": 180, "6cc791250b": 150, "6cccc985e0": 96, "6d12e30c48": 180, "6d4bf200ad": 180, "6d6d2b8843": 91, "6d6eea5682": 180, "6d7a3d0c21": 96, "6d7efa9b9e": 180, "6da21f5c91": 180, "6da6adabc0": 150, "6dd2827fbb": 96, "6dd36705b9": 131, "6df3637557": 180, "6dfe55e9e5": 150, "6e1a21ba55": 96, "6e2f834767": 180, "6e36e4929a": 96, "6e4f460caf": 96, "6e618d26b6": 56, "6ead4670f7": 180, "6eaff19b9f": 180, "6eb2e1cd9e": 180, "6eb30b3b5a": 96, "6eca26c202": 91, "6ecad29e52": 96, "6ef0b44654": 96, "6efcfe9275": 180, "6f4789045c": 180, "6f49f522ef": 96, "6f67d7c4c4": 180, "6f96e91d81": 144, "6fc6fce380": 180, "6fc9b44c00": 96, "6fce7f3226": 150, "6fdf1ca888": 150, "702fd8b729": 180, "70405185d2": 180, "7053e4f41e": 180, "707bf4ce41": 87, "7082544248": 81, "708535b72a": 96, "7094ac0f60": 180, "70a6b875fa": 180, "70c3e97e41": 180, "7106b020ab": 91, "711dce6fe2": 96, "7136a4453f": 180, "7143fb084f": 180, "714d902095": 150, "7151c53b32": 150, "715357be94": 180, "7163b8085f": 150, "716df1aa59": 150, "71caded286": 150, "71d2665f35": 91, "71d67b9e19": 96, "71e06dda39": 180, "720b398b9c": 91, "720e3fa04c": 150, "720e7a5f1e": 91, "721bb6f2cb": 91, "722803f4f2": 92, "72552a07c9": 91, "726243a205": 96, "72690ef572": 46, "728cda9b65": 86, "728e81c319": 91, "72a810a799": 180, "72acb8cdf6": 180, "72b01281f9": 180, "72cac683e4": 91, "72cadebbce": 180, "72cae058a5": 180, "72d8dba870": 180, "72e8d1c1ff": 96, "72edc08285": 180, "72f04f1a38": 81, "731b825695": 144, "7320b49b13": 180, "732626383b": 87, "732df1eb05": 150, "73329902ab": 150, "733798921e": 150, "733824d431": 150, "734ea0d7fb": 91, "735a7cf7b9": 144, "7367a42892": 91, "7368d5c053": 180, "738e5a0a14": 180, "73c6ae7711": 96, "73e1852735": 150, "73e4e5cc74": 150, "73eac9156b": 180, "73f8441a88": 91, "7419e2ab3f": 91, "74267f68b9": 91, "7435690c8c": 46, "747c44785c": 81, "747f1b1f2f": 144, "748b2d5c01": 96, "74d4cee0a4": 91, "74ec2b3073": 91, "74ef677020": 96, "750be4c4d8": 96, "75172d4ac8": 96, "75285a7eb1": 180, "75504539c3": 91, "7550949b1d": 96, "7551cbd537": 150, "75595b453d": 91, "7559b4b0ec": 91, "755bd1fbeb": 96, "756f76f74d": 180, "7570ca7f3c": 180, "757a69746e": 180, "757cac96c6": 180, "7584129dc3": 144, "75a058dbcd": 91, "75b09ce005": 96, "75cae39a8f": 180, "75cee6caf0": 180, "75cf58fb2c": 91, "75d5c2f32a": 180, "75eaf5669d": 96, "75f7937438": 180, "75f99bd3b3": 96, "75fa586876": 92, "7613df1f84": 150, "762e1b3487": 96, "76379a3e69": 180, "764271f0f3": 92, "764503c499": 86, "7660005554": 46, "7666351b84": 96, "76693db153": 51, "767856368b": 92, "768671f652": 180, "768802b80d": 180, "76962c7ed2": 71, "76a75f4eee": 150, "76b90809f7": 180, "770a441457": 96, "772a0fa402": 180, "772f2ffc3e": 91, "774f6c2175": 180, "77610860e0": 56, "777e58ff3d": 96, "77920f1708": 150, "7799df28e7": 180, "779e847a9a": 81, "77ba4edc72": 96, "77c834dc43": 41, "77d8aa8691": 180, "77e7f38f4d": 144, "77eea6845e": 96, "7806308f33": 91, "78254660ea": 91, "7828af8bff": 180, "784398620a": 71, "784d201b12": 96, "78613981ed": 180, "78896c6baf": 92, "78aff3ebc0": 150, "78c7c03716": 91, "78d3676361": 91, "78e29dd4c3": 150, "78f1a1a54f": 91, "79208585cd": 180, "792218456c": 180, "7923bad550": 150, "794e6fc49f": 96, "796e6762ce": 180, "797cd21f71": 150, "79921b21c2": 150, "79a5778027": 180, "79bc006280": 180, "79bf95e624": 91, "79d9e00c55": 91, "79e20fc008": 96, "79e9db913e": 180, "79f014085e": 91, "79fcbb433a": 150, "7a13a5dfaa": 180, "7a14bc9a36": 96, "7a3c535f70": 96, "7a446a51e9": 91, "7a56e759c5": 91, "7a5f46198d": 86, "7a626ec98d": 92, "7a802264c4": 180, "7a8b5456ca": 180, "7abdff3086": 150, "7aecf9f7ac": 150, "7b0fd09c28": 96, "7b18b3db87": 180, "7b39fe7371": 144, "7b49e03d4c": 180, "7b5388c9f1": 180, "7b5cf7837f": 180, "7b733d31d8": 180, "7b74fd7b98": 180, "7b918ccb8a": 150, "7ba3ce3485": 96, "7bb0abc031": 180, "7bb5bb25cd": 180, "7bb7dac673": 92, "7bc7761b8c": 180, "7bf3820566": 96, "7c03a18ec1": 96, "7c078f211b": 150, "7c37d7991a": 71, "7c4ec17eff": 144, "7c649c2aaf": 180, "7c73340ab7": 91, "7c78a2266d": 180, "7c88ce3c5b": 180, "7ca6843a72": 180, "7cc9258dee": 96, "7cec7296ae": 46, "7d0ffa68a4": 96, "7d11b4450f": 81, "7d1333fcbe": 96, "7d18074fef": 91, "7d18c8c716": 96, "7d508fb027": 180, "7d55f791f0": 180, "7d74e3c2f6": 150, "7d783f67a9": 96, "7d83a5d854": 150, "7dd409947e": 180, "7de45f75e5": 150, "7e0cd25696": 150, "7e1922575c": 96, "7e1e3bbcc1": 180, "7e24023274": 180, "7e2f212fd3": 96, "7e6d1cc1f4": 180, "7e7cdcb284": 144, "7e9b6bef69": 66, "7ea5b49283": 92, "7eb2605d96": 91, "7eb26b8485": 180, "7ecd1f0c69": 96, "7f02b3cfe2": 180, "7f1723f0d5": 97, "7f21063c3a": 81, "7f3658460e": 91, "7f54132e48": 144, "7f559f9d4a": 144, "7f5faedf8b": 96, "7f838baf2b": 180, "7fa5f527e3": 96, "7ff84d66dd": 150, "802b45c8c4": 180, "804382b1ad": 180, "804c558adb": 96, "804f6338a4": 180, "8056117b89": 150, "806b6223ab": 96, "8088bda461": 46, "80b790703b": 180, "80c4a94706": 96, "80ce2e351b": 180, "80db581acd": 96, "80e12193df": 150, "80e41b608f": 180, "80f16b016d": 91, "81541b3725": 91, "8175486e6a": 96, "8179095000": 180, "8193671178": 180, "81a58d2c6b": 150, "81aa1286fb": 96, "81dffd30fb": 96, "8200245704": 41, "823e7a86e8": 46, "824973babb": 144, "824ca5538f": 180, "827171a845": 180, "8273a03530": 180, "827cf4f886": 91, "82b865c7dd": 180, "82c1517708": 91, "82d15514d6": 150, "82e117b900": 179, "82fec06574": 150, "832b5ef379": 97, "83424c9fbf": 180, "8345358fb8": 71, "834b50b31b": 180, "835e3b67d7": 97, "836ea92b15": 90, "837c618777": 144, "838eb3bd89": 180, "839381063f": 91, "839bc71489": 180, "83a8151377": 180, "83ae88d217": 180, "83ca8bcad0": 180, "83ce590d7f": 180, "83d3130ba0": 36, "83d40bcba5": 86, "83daba503a": 144, "83de906ec0": 180, "84044f37f3": 180, "84696b5a5e": 96, "84752191a3": 91, "847eeeb2e0": 180, "848e7835a0": 96, "84a4b29286": 180, "84a4bf147d": 66, "84be115c09": 144, "84d95c4350": 180, "84e0922cf7": 150, "84f0cfc665": 96, "8515f6db22": 180, "851f2f32c1": 91, "852a4d6067": 150, "854c48b02a": 96, "857a387c86": 180, "859633d56a": 96, "85a4f4a639": 144, "85ab85510c": 180, "85b1eda0d9": 92, "85dc1041c6": 96, "85e081f3c7": 150, "85f75187ad": 96, "8604bb2b75": 96, "860745b042": 150, "863b4049d7": 180, "8643de22d0": 180, "8647d06439": 46, "864ffce4fe": 180, "8662d9441a": 180, "8666521b13": 76, "868d6a0685": 91, "869fa45998": 91, "86a40b655d": 150, "86a8ae4223": 92, "86b2180703": 180, "86c85d27df": 180, "86d3755680": 180, "86e61829a1": 180, "871015806c": 91, "871e409c5c": 180, "8744b861ce": 96, "8749369ba0": 180, "878a299541": 144, "8792c193a0": 96, "8799ab0118": 96, "87d1f7d741": 180, "882b9e4500": 180, "885673ea17": 180, "8859dedf41": 96, "8873ab2806": 91, "887a93b198": 180, "8883e991a9": 86, "8891aa6dfa": 91, "8899d8cbcd": 91, "88b8274d67": 180, "88d3b80af6": 91, "88ede83da2": 180, "88f345941b": 180, "890976d6da": 91, "8909bde9ab": 91, "8929c7d5d9": 180, "89363acf76": 150, "89379487e0": 96, "8939db6354": 180, "893f658345": 144, "8953138465": 180, "895c96d671": 180, "895cbf96f9": 180, "895e8b29a7": 91, "898fa256c8": 180, "89986c60be": 180, "89b874547b": 180, "89bdb021d5": 144, "89c802ff9c": 96, "89d6336c2b": 180, "89ebb27334": 91, "8a27e2407c": 96, "8a31f7bca5": 96, "8a4a2fc105": 96, "8a5d6c619c": 96, "8a75ad7924": 180, "8aa817e4ed": 87, "8aad0591eb": 180, "8aca214360": 180, "8ae168c71b": 96, "8b0cfbab97": 21, "8b3645d826": 96, "8b3805dbd4": 180, "8b473f0f5d": 180, "8b4f6d1186": 180, "8b4fb018b7": 66, "8b518ee936": 92, "8b523bdfd6": 150, "8b52fb5fba": 91, "8b91036e5c": 144, "8b99a77ac5": 180, "8ba04b1e7b": 96, "8ba782192f": 180, "8bbeaad78b": 96, "8bd1b45776": 180, "8bd7a2dda6": 150, "8bdb091ccf": 180, "8be56f165d": 96, "8be950d00f": 96, "8bf84e7d45": 180, "8bffc4374b": 66, "8bfff50747": 180, "8c09867481": 144, "8c0a3251c3": 180, "8c3015cccb": 180, "8c469815cf": 96, "8c9ccfedc7": 91, "8ca1af9f3c": 150, "8ca3f6e6c1": 96, "8ca6a4f60f": 96, "8cac6900fe": 96, "8cba221a1e": 180, "8cbbe62ccd": 180, "8d064b29e2": 92, "8d167e7c08": 91, "8d4ab94e1c": 96, "8d81f6f899": 180, "8d87897d66": 91, "8dcccd2bd2": 180, "8dcfb878a8": 150, "8dd3ab71b9": 91, "8dda6bf10f": 96, "8ddd51ca94": 180, "8dea22c533": 180, "8def5bd3bf": 96, "8e1848197c": 91, "8e3a83cf2d": 91, "8e478e73f3": 91, "8e98ae3c84": 96, "8ea6687ab0": 180, "8eb0d315c1": 91, "8ec10891f9": 150, "8ec3065ec2": 180, "8ecf51a971": 150, "8eddbab9f7": 91, "8ee198467a": 180, "8ee2368f40": 180, "8ef595ce82": 150, "8f0a653ad7": 150, "8f1204a732": 150, "8f1600f7f6": 91, "8f16366707": 96, "8f1ce0a411": 92, "8f2e05e814": 91, "8f320d0e09": 96, "8f3b4a84ad": 91, "8f3fdad3da": 96, "8f5d3622d8": 96, "8f62a2c633": 180, "8f81c9405a": 97, "8f8c974d53": 120, "8f918598b6": 96, "8ff61619f6": 96, "9002761b41": 96, "90107941f3": 92, "90118a42ee": 96, "902bc16b37": 91, "903e87e0d6": 144, "9041a0f489": 96, "9047bf3222": 51, "9057bfa502": 150, "90617b0954": 92, "9076f4b6db": 180, "9077e69b08": 144, "909655b4a6": 96, "909c2eca88": 180, "909dbd1b76": 180, "90bc4a319a": 180, "90c7a87887": 96, "90cc785ddd": 96, "90d300f09b": 180, "9101ea9b1b": 96, "9108130458": 150, "911ac9979b": 150, "9151cad9b5": 97, "9153762797": 180, "91634ee0c9": 91, "916942666f": 76, "9198cfb4ea": 180, "919ac864d6": 180, "91b67d58d4": 180, "91bb8df281": 150, "91be106477": 91, "91c33b4290": 180, "91ca7dd9f3": 144, "91d095f869": 180, "91f107082e": 180, "920329dd5e": 180, "920c959958": 150, "92128fbf4b": 144, "9223dacb40": 150, "923137bb7f": 61, "9268e1f88a": 180, "927647fe08": 150, "9276f5ba47": 150, "92a28cd233": 71, "92b5c1fc6d": 144, "92c46be756": 180, "92dabbe3a0": 96, "92e3159361": 180, "92ebab216a": 180, "934bdc2893": 180, "9359174efc": 180, "935d97dd2f": 91, "935feaba1b": 96, "93901858ee": 150, "939378f6d6": 91, "939bdf742e": 96, "93a22bee7e": 96, "93da9aeddf": 91, "93e2feacce": 180, "93e6f1fdf9": 96, "93e811e393": 180, "93e85d8fd3": 180, "93f623d716": 180, "93ff35e801": 46, "94031f12f2": 96, "94091a4873": 180, "94125907e3": 87, "9418653742": 91, "941c870569": 101, "94209c86f0": 180, "9437c715eb": 76, "9445c3eca2": 91, "9467c8617c": 96, "946d71fb5d": 96, "948f3ae6fb": 180, "9498baa359": 96, "94a33abeab": 91, "94bf1af5e3": 144, "94cf3a8025": 96, "94db712ac8": 180, "94e4b66cff": 92, "94e76cbaf6": 180, "950be91db1": 180, "952058e2d0": 92, "952633c37f": 96, "952ec313fe": 87, "9533fc037c": 96, "9574b81269": 92, "9579b73761": 180, "957f7bc48b": 180, "958073d2b0": 150, "9582e0eb33": 71, "9584092d0b": 91, "95b58b8004": 150, "95bd88da55": 180, "95f74a9959": 180, "962781c601": 180, "962f045bf5": 91, "964ad23b44": 91, "967b90590e": 144, "967bffe201": 86, "96825c4714": 81, "968492136a": 96, "9684ef9d64": 86, "968c41829e": 91, "96a856ef9a": 180, "96dfc49961": 180, "96e1a5b4f8": 180, "96e6ff0917": 150, "96fb88e9d7": 96, "96fbe5fc23": 150, "96fc924050": 96, "9715cc83dc": 180, "9720eff40f": 180, "972c187c0d": 180, "97476eb38d": 180, "97659ed431": 180, "9773492949": 96, "97756b264f": 96, "977bff0d10": 96, "97ab569ff3": 96, "97ba838008": 180, "97d9d008c7": 150, "97e59f09fa": 96, "97eb642e56": 96, "98043e2d14": 96, "981ff580cf": 180, "983e66cbfc": 96, "984f0f1c36": 180, "98595f2bb4": 91, "985c3be474": 91, "9869a12362": 180, "986b5a5e18": 180, "9877af5063": 180, "98911292da": 180, "9893a3cf77": 97, "9893d9202d": 91, "98a8b06e7f": 91, "98ac6f93d9": 150, "98b6974d12": 96, "98ba3c9417": 180, "98c7c00a19": 96, "98d044f206": 96, "98e909f9d1": 150, "98fe7f0410": 150, "990f2742c7": 96, "992bd0779a": 180, "994b9b47ba": 150, "9955b76bf5": 91, "9966f3adac": 46, "997117a654": 180, "999d53d841": 150, "99c04108d3": 180, "99c4277aee": 96, "99c6b1acf2": 96, "99dc8bb20b": 180, "99fcba71e5": 150, "99fecd4efb": 92, "9a02c70ba2": 96, "9a08e7a6f8": 180, "9a2f2c0f86": 81, "9a3254a76e": 92, "9a3570a020": 180, "9a39112493": 180, "9a4e9fd399": 180, "9a50af4bfb": 180, "9a68631d24": 150, "9a72318dbf": 92, "9a767493b7": 180, "9a7fc1548b": 96, "9a84ccf6a7": 150, "9a9c0e15b7": 96, "9adf06d89b": 150, "9b22b54ee4": 91, "9b473fc8fe": 96, "9b4f081782": 180, "9b997664ba": 180, "9bc454e109": 180, "9bccfd04de": 96, "9bce4583a2": 96, "9bebf1b87f": 158, "9bfc50d261": 180, "9c166c86ff": 96, "9c293ef4d7": 144, "9c29c047b0": 91, "9c3bc2e2a7": 96, "9c3ce23bd1": 91, "9c404cac0c": 180, "9c5180d23a": 144, "9c7feca6e4": 144, "9caa49d3ff": 180, "9cb2f1b646": 180, "9ce6f765c3": 91, "9cfee34031": 180, "9d01f08ec6": 180, "9d04c280b8": 91, "9d12ceaddc": 180, "9d15f8cb3c": 180, "9d2101e9bf": 180, "9d407c3aeb": 96, "9ddefc6165": 180, "9df0b1e298": 96, "9e16f115d8": 144, "9e249b4982": 96, "9e29b1982c": 92, "9e493e4773": 180, "9e4c752cd0": 91, "9e4de40671": 96, "9e6319faeb": 96, "9e6ddbb52d": 91, "9eadcea74f": 180, "9ecec5f8ea": 46, "9efb47b595": 96, "9f30bfe61e": 72, "9f3734c3a4": 180, "9f5b858101": 180, "9f66640cda": 180, "9f913803e9": 180, "9f97bc74c8": 180, "9fbad86e20": 180, "9fc2bad316": 180, "9fc5c3af78": 150, "9fcb310255": 92, "9fcc256871": 91, "9fd2fd4d47": 180, "a0071ae316": 96, "a023141022": 56, "a046399a74": 96, "a066e739c1": 150, "a06722ba82": 96, "a07a15dd64": 180, "a07b47f694": 180, "a09c39472e": 144, "a0b208fe2e": 91, "a0b61c959e": 96, "a0bc6c611d": 180, "a0e6da5ba2": 91, "a1193d6490": 96, "a14ef483ff": 91, "a14f709908": 180, "a15ccc5658": 96, "a16062456f": 180, "a174e8d989": 91, "a177c2733c": 150, "a17c62e764": 92, "a18ad065fc": 150, "a1aaf63216": 96, "a1bb65fb91": 150, "a1bd8e5349": 91, "a1dfdd0cac": 180, "a2052e4f6c": 96, "a20fd34693": 96, "a21ffe4d81": 150, "a22349e647": 180, "a235d01ec1": 180, "a24f63e8a2": 180, "a2554c9f6d": 46, "a263ce8a87": 180, "a29bfc29ec": 91, "a2a80072d4": 150, "a2a800ab63": 180, "a2bcd10a33": 180, "a2bdaff3b0": 91, "a2c146ab0d": 91, "a2c996e429": 96, "a2dc51ebe8": 180, "a2e6608bfa": 180, "a2f2a55f01": 96, "a301869dea": 180, "a31fccd2cc": 180, "a34f440f33": 180, "a35e0206da": 180, "a36bdc4cab": 180, "a36e8c79d8": 71, "a378053b20": 144, "a37db3a2b3": 91, "a38950ebc2": 180, "a39a0eb433": 91, "a39c9bca52": 180, "a3a945dc8c": 91, "a3b40a0c1e": 150, "a3b8588550": 91, "a3c502bec3": 180, "a3f2878017": 180, "a3f4d58010": 180, "a3f51855c3": 150, "a402dc0dfe": 21, "a4065a7eda": 180, "a412bb2fef": 180, "a416b56b53": 96, "a41ec95906": 91, "a43299e362": 180, "a4757bd7af": 96, "a48c53c454": 180, "a49dcf9ad5": 150, "a4a506521f": 180, "a4ba7753d9": 180, "a4bac06849": 91, "a4f05d681c": 91, "a50c10060f": 150, "a50eb5a0ea": 150, "a5122c6ec6": 150, "a522b1aa79": 96, "a590915345": 180, "a5b5b59139": 96, "a5b77abe43": 180, "a5c2b2c3e1": 96, "a5cd17bb11": 180, "a5da03aef1": 180, "a5dd11de0d": 150, "a5ea2b93b6": 150, "a5eaeac80b": 180, "a5ec5b0265": 144, "a5f350a87e": 180, "a5f472caf4": 96, "a6027a53cf": 180, "a61715bb1b": 180, "a61cf4389d": 150, "a61d9bbd9b": 180, "a6470dbbf5": 150, "a64a40f3eb": 76, "a653d5c23b": 180, "a65bd23cb5": 150, "a66e0b7ad4": 180, "a66fc5053c": 91, "a68259572b": 180, "a6a810a92c": 150, "a6bc36937f": 91, "a6c3a374e9": 180, "a6d8a4228d": 180, "a6f4e0817f": 180, "a71e0481f5": 96, "a7203deb2d": 150, "a7392d4438": 150, "a73d3c3902": 180, "a7491f1578": 150, "a74b9ca19c": 180, "a77b7a91df": 150, "a78195a5f5": 150, "a78758d4ce": 180, "a7e6d6c29a": 96, "a800d85e88": 51, "a832fa8790": 180, "a83d06410d": 150, "a8999af004": 180, "a8f78125b9": 180, "a907b18df1": 150, "a919392446": 150, "a965504e88": 96, "a96b84b8d2": 96, "a973f239cd": 91, "a977126596": 180, "a9804f2a08": 91, "a984e56893": 96, "a99738f24c": 91, "a99bdd0079": 144, "a9c9c1517e": 178, "a9cbf9c41b": 150, "a9e42e3c0c": 150, "aa07b7c1c0": 180, "aa175e5ec7": 96, "aa1a338630": 96, "aa27d7b868": 96, "aa45f1caaf": 91, "aa49e46432": 96, "aa51934e1b": 180, "aa6287bb6c": 96, "aa6d999971": 180, "aa85278334": 96, "aab33f0e2a": 180, "aaba004362": 180, "aade4cf385": 180, "aae78feda4": 91, "aaed233bf3": 180, "aaff16c2db": 96, "ab199e8dfb": 96, "ab23b78715": 96, "ab2e1b5577": 180, "ab33a18ded": 96, "ab45078265": 180, "ab56201494": 180, "ab90f0d24b": 180, "abab2e6c20": 180, "abb50c8697": 92, "abbe2d15a0": 180, "abbe73cd21": 150, "abe61a11bb": 180, "abeae8ce21": 150, "ac2b431d5f": 150, "ac2cb1b9eb": 150, "ac31fcd6d0": 91, "ac3d3a126d": 180, "ac46bd8087": 180, "ac783ef388": 180, "acb73e4297": 150, "acbf581760": 180, "accafc3531": 96, "acf2c4b745": 96, "acf44293a2": 96, "acf736a27b": 90, "acff336758": 180, "ad1fe56886": 92, "ad28f9b9d9": 91, "ad2de9f80e": 180, "ad397527b2": 97, "ad3d1cfbcb": 86, "ad3fada9d9": 180, "ad4108ee8e": 180, "ad54468654": 66, "ad573f7d31": 96, "ad6255bc29": 180, "ad65ebaa07": 144, "ad97cc064a": 96, "adabbd1cc4": 180, "adb0b5a270": 180, "adc648f890": 150, "add21ee467": 180, "adfd15ceef": 180, "adfdd52eac": 96, "ae01cdab63": 180, "ae0b50ff4f": 96, "ae13ee3d70": 180, "ae1bcbd423": 180, "ae20d09dea": 180, "ae2cecf5f6": 56, "ae3bc4a0ef": 180, "ae499c7514": 92, "ae628f2cd4": 150, "ae8545d581": 86, "ae93214fe6": 150, "ae9cd16dbf": 46, "aeba9ac967": 180, "aebb242b5c": 150, "aed4e0b4c4": 86, "aedd71f125": 180, "aef3e2cb0e": 180, "af0b54cee3": 96, "af3de54c7a": 180, "af5fd24a36": 150, "af8826d084": 91, "af8ad72057": 180, "afb71e22c5": 92, "afcb331e1f": 96, "afe1a35c1e": 150, "b01080b5d3": 180, "b05ad0d345": 96, "b0623a6232": 91, "b064dbd4b7": 96, "b06ed37831": 96, "b06f5888e6": 92, "b08dcc490e": 91, "b0a68228dc": 92, "b0aece727f": 144, "b0b0731606": 96, "b0c7f11f9f": 180, "b0cca8b830": 180, "b0dd580a89": 180, "b0de66ca08": 180, "b0df7c5c5c": 96, "b0f5295608": 96, "b11099eb09": 180, "b132a53086": 91, "b1399fac64": 180, "b13abc0c69": 96, "b1457e3b5e": 180, "b15bf4453b": 91, "b179c4a82d": 96, "b17ee70e8c": 180, "b190b1aa65": 96, "b19b3e22c0": 180, "b19c561fab": 180, "b1d1cd2e6e": 92, "b1d7c03927": 91, "b1d7fe2753": 180, "b1f540a4bd": 96, "b1fc9c64e1": 96, "b1fcbb3ced": 180, "b220939e93": 96, "b22099b419": 180, "b241e95235": 96, "b2432ae86d": 180, "b2456267df": 180, "b247940d01": 150, "b24af1c35c": 180, "b24f600420": 97, "b24fe36b2a": 150, "b258fb0b7d": 180, "b26b219919": 96, "b26d9904de": 96, "b274456ce1": 180, "b27b28d581": 72, "b2a26bc912": 180, "b2a9c51e1b": 180, "b2b0baf470": 180, "b2b2756fe7": 96, "b2ce7699e3": 180, "b2edc76bd2": 150, "b2f6b52100": 180, "b30bf47bcd": 180, "b34105a4e9": 91, "b372a82edf": 150, "b3779a1962": 96, "b379ab4ff5": 46, "b37a1d69e3": 150, "b37c01396e": 180, "b382b09e25": 150, "b3996e4ba5": 180, "b3d9ca2aee": 180, "b3dde1e1e9": 180, "b3eb7f05eb": 86, "b40b25055c": 91, "b41e0f1f19": 91, "b44e32a42b": 91, "b4805ae9cd": 46, "b4807569a5": 97, "b48efceb3e": 150, "b493c25c7f": 180, "b4b565aba1": 150, "b4b715a15b": 180, "b4d0c90bf4": 91, "b4d84bc371": 180, "b4e5ad97aa": 180, "b4eaea9e6b": 150, "b50f4b90d5": 180, "b53f675641": 150, "b54278cd43": 180, "b554843889": 150, "b573c0677a": 180, "b58d853734": 180, "b5943b18ab": 180, "b5a09a83f3": 71, "b5aae1fe25": 91, "b5b9da5364": 97, "b5eb64d419": 91, "b5ebb1d000": 96, "b5f1c0c96a": 96, "b5f7fece90": 180, "b6070de1bb": 180, "b60a76fe73": 86, "b61f998772": 96, "b62c943664": 96, "b63094ba0c": 180, "b64fca8100": 96, "b673e7dcfb": 96, "b678b7db00": 180, "b68fc1b217": 180, "b69926d9fa": 96, "b6a1df3764": 180, "b6a4859528": 96, "b6b4738b78": 96, "b6b4f847b7": 150, "b6b8d502d4": 150, "b6bb00e366": 180, "b6d65a9eef": 180, "b6d79a0845": 180, "b6e9ec577f": 91, "b6ec609f7b": 163, "b6f92a308d": 180, "b70a2c0ab1": 46, "b70a5a0d50": 180, "b70c052f2f": 150, "b70d231781": 92, "b72ac6e10b": 180, "b7302d8226": 92, "b73867d769": 150, "b751e767f2": 180, "b76df6e059": 96, "b77e5eddef": 92, "b7a2c2c83c": 96, "b7bcbe6466": 180, "b7c2a469c4": 180, "b7d69da8f0": 144, "b7f31b7c36": 61, "b7f675fb98": 46, "b7fb871660": 51, "b82e5ad1c9": 91, "b841cfb932": 96, "b84b8ae665": 180, "b85b78ac2b": 180, "b86c17caa6": 180, "b86e50d82d": 96, "b871db031a": 66, "b87d56925a": 96, "b8aaa59b75": 92, "b8c03d1091": 180, "b8c3210036": 46, "b8e16df00b": 144, "b8f34cf72e": 91, "b8fb75864e": 150, "b9004db86c": 180, "b9166cbae9": 92, "b920b256a6": 180, "b938d79dff": 20, "b93963f214": 180, "b941aef1a0": 144, "b94d34d14e": 96, "b964c57da4": 96, "b96a95bc7a": 180, "b96c57d2c7": 144, "b9b6bdde0c": 180, "b9bcb3e0f2": 96, "b9d3b92169": 180, "b9dd4b306c": 180, "b9f43ef41e": 92, "ba1f03c811": 96, "ba3a775d7b": 180, "ba3c7f2a31": 150, "ba3fcd417d": 180, "ba5e1f4faa": 150, "ba795f3089": 96, "ba8a291e6a": 150, "ba98512f97": 92, "bac9db04f5": 180, "baedae3442": 180, "baff40d29d": 180, "bb04e28695": 96, "bb1b0ee89f": 96, "bb1c770fe7": 150, "bb1fc34f99": 150, "bb2d220506": 180, "bb334e5cdb": 91, "bb337f9830": 81, "bb721eb9aa": 96, "bb87ff58bd": 96, "bb89a6b18a": 87, "bbaa9a036a": 144, "bbb4302dda": 180, "bbd31510cf": 96, "bbe0256a75": 180, "bc141b9ad5": 91, "bc17ab8a99": 150, "bc318160de": 180, "bc3b9ee033": 91, "bc4240b43c": 96, "bc4ce49105": 91, "bc4f71372d": 96, "bc6b8d6371": 180, "bcaad44ad7": 150, "bcc241b081": 91, "bcc5d8095e": 96, "bcd1d39afb": 96, "bd0d849da4": 180, "bd0e9ed437": 150, "bd2c94730f": 180, "bd321d2be6": 61, "bd3ec46511": 91, "bd5b2e2848": 41, "bd7e02b139": 96, "bd96f9943a": 180, "bda224cb25": 91, "bda4a82837": 96, "bdb74e333f": 180, "bdccd69dde": 96, "bddcc15521": 180, "be116aab29": 150, "be15e18f1e": 150, "be1a284edb": 180, "be2a367a7b": 180, "be376082d0": 150, "be3e3cffbd": 51, "be5d1d89a0": 180, "be8b72fe37": 180, "be9b29e08e": 91, "bea1f6e62c": 97, "bea83281b5": 92, "beb921a4c9": 96, "bec5e9edcd": 180, "beeb8a3f92": 150, "bf2232b58d": 96, "bf28751739": 150, "bf443804e8": 180, "bf461df850": 150, "bf5374f122": 180, "bf551a6f60": 180, "bf8d0f5ada": 96, "bf961167a6": 92, "bfab1ad8f9": 150, "bfcb05d88d": 96, "bfd8f6e6c9": 92, "bfd91d0742": 150, "bfe262322f": 87, "c013f42ed7": 180, "c01878083f": 180, "c01faff1ed": 180, "c046fd0edb": 150, "c053e35f97": 91, "c079a6482d": 96, "c0847b521a": 96, "c0a1e06710": 180, "c0e8d4635c": 96, "c0e973ad85": 96, "c0f49c6579": 92, "c0f5b222d7": 96, "c10d07c90d": 180, "c1268d998c": 96, "c130c3fc0c": 180, "c14826ad5e": 180, "c15b922281": 180, "c16f09cb63": 180, "c18e19d922": 180, "c1c830a735": 96, "c1e8aeea45": 180, "c20a5ccc99": 180, "c20fd5e597": 180, "c219d6f8dc": 150, "c2406ae462": 96, "c26f7b5824": 180, "c279e641ee": 96, "c27adaeac5": 180, "c2a35c1cda": 96, "c2a9903b8b": 180, "c2b62567c1": 96, "c2b974ec8c": 150, "c2baaff7bf": 91, "c2be6900f2": 180, "c304dd44d5": 180, "c307f33da2": 96, "c30a7b62c9": 92, "c3128733ee": 180, "c31fa6c598": 180, "c325c8201e": 96, "c32d4aa5d1": 180, "c33f28249a": 144, "c34365e2d7": 180, "c3457af795": 96, "c34d120a88": 180, "c3509e728d": 96, "c35e4fa6c4": 180, "c36240d96f": 150, "c3641dfc5a": 92, "c37b17a4a9": 180, "c39559ddf6": 180, "c3b0c6e180": 96, "c3b3d82e6c": 180, "c3be369fdb": 91, "c3bf1e40c2": 97, "c3c760b015": 96, "c3dd38bf98": 150, "c3e4274614": 91, "c3edc48cbd": 180, "c41e6587f5": 96, "c4272227b0": 96, "c42917fe82": 86, "c438858117": 180, "c44676563f": 180, "c44beb7472": 180, "c45411dacb": 91, "c4571bedc8": 91, "c46deb2956": 180, "c479ee052e": 180, "c47d551843": 180, "c49f07d46d": 180, "c4cc40c1fc": 97, "c4f256f5d5": 144, "c4f5b1ddcc": 180, "c4ff9b4885": 150, "c52bce43db": 66, "c544da6854": 180, "c55784c766": 180, "c557b69fbf": 180, "c593a3f7ab": 92, "c598faa682": 180, "c5ab1f09c8": 180, "c5b6da8602": 96, "c5b9128d94": 96, "c5e845c6b7": 150, "c5fba7b341": 150, "c60897f093": 96, "c61fe6ed7c": 96, "c62188c536": 96, "c64035b2e2": 150, "c69689f177": 180, "c6a12c131f": 51, "c6bb6d2d5c": 180, "c6c18e860f": 150, "c6d9526e0d": 180, "c6e55c33f0": 96, "c7030b28bd": 96, "c70682c7cc": 180, "c70f9be8c5": 87, "c71f30d7b6": 180, "c73c8e747f": 180, "c760eeb8b3": 144, "c7637cab0a": 150, "c7a1a17308": 87, "c7bf937af5": 91, "c7c2860db3": 180, "c7cef4aee2": 91, "c7ebfc5d57": 180, "c813dcf13c": 91, "c82235a49a": 96, "c82a7619a1": 180, "c82ecb90cb": 180, "c844f03dc7": 96, "c8557963f3": 91, "c89147e6e8": 180, "c8a46ff0c8": 150, "c8ab107dd5": 97, "c8b869a04a": 96, "c8c7b306a6": 91, "c8c8b28781": 180, "c8d79e3163": 180, "c8edab0415": 150, "c8f494f416": 96, "c8f6cba9fd": 150, "c909ceea97": 92, "c9188f4980": 180, "c922365dd4": 96, "c92c8c3c75": 96, "c937eb0b83": 91, "c94b31b5e5": 180, "c95cd17749": 180, "c96379c03c": 180, "c96465ee65": 180, "c965afa713": 144, "c9734b451f": 92, "c9862d82dc": 180, "c98b6fe013": 180, "c9999b7c48": 180, "c99e92aaf0": 97, "c9b3a8fbda": 150, "c9bf64e965": 96, "c9c3cb3797": 91, "c9d1c60cd0": 144, "c9de9c22c4": 96, "ca1828fa54": 96, "ca346f17eb": 180, "ca3787d3d3": 150, "ca4b99cbac": 96, "ca91c69e3b": 71, "ca91e99105": 46, "caa8e97f81": 96, "caac5807f8": 180, "cabba242c2": 96, "cad5a656a9": 180, "cad673e375": 180, "cad8a85930": 150, "cae7b0a02b": 180, "cae7ef3184": 180, "caeb6b6cbb": 150, "caecf0a5db": 91, "cb15312003": 76, "cb2e35d610": 150, "cb35a87504": 150, "cb3f22b0cf": 96, "cbb410da64": 91, "cc8728052e": 150, "cc892997b8": 180, "cce03c2a9b": 144, "cd47a23e31": 92, "cd4dc03dc0": 180, "cd5ae611da": 96, "cd603bb9d1": 144, "cd8f49734c": 180, "cdc6b1c032": 92, "cdcfe008ad": 144, "cdd57027c2": 96, "ce1af99b4b": 150, "ce1bc5743a": 150, "ce25872021": 97, "ce2776f78f": 180, "ce49b1f474": 180, "ce4f0a266f": 180, "ce5641b195": 180, "ce6866aa19": 180, "ce712ed3c9": 91, "ce7d1c8117": 144, "ce7dbeaa88": 180, "ce9b015a5e": 180, "cea7697b25": 96, "cebbd826cf": 150, "cec3415361": 150, "cec41ad4f4": 180, "ced49d26df": 180, "ced7705ab2": 144, "cef824a1e1": 92, "cf13f5c95a": 144, "cf4376a52d": 180, "cf85ab28b5": 180, "cfc2e50b9d": 150, "cfcd571fff": 144, "cfd9d4ae47": 180, "cfda2dcce5": 150, "cff035928b": 91, "cff8191891": 46, "d01608c2a5": 96, "d01a8f1f83": 144, "d021d68bca": 180, "d04258ca14": 150, "d0483573dc": 150, "d04a90aaff": 180, "d05279c0bd": 180, "d0696bd5fc": 91, "d072fda75b": 178, "d0a83bcd9f": 150, "d0ab39112e": 180, "d0acde820f": 96, "d0b4442c71": 144, "d0c65e9e95": 180, "d0fb600c73": 150, "d107a1457c": 61, "d123d674c1": 66, "d14d1e9289": 96, "d154e3388e": 96, "d177e9878a": 96, "d1802f69f8": 150, "d182c4483a": 180, "d195d31128": 180, "d200838929": 180, "d205e3cff5": 180, "d247420c4c": 180, "d2484bff33": 66, "d26f6ed9b0": 150, "d280fcd1cb": 180, "d2857f0faa": 180, "d292a50c7f": 46, "d295ea2dc7": 96, "d2a58b4fa6": 91, "d2b026739a": 150, "d2ebe0890f": 180, "d2ede5d862": 91, "d301ca58cc": 150, "d3069da8bb": 91, "d343d4a77d": 150, "d355e634ef": 86, "d367fb5253": 91, "d36d16358e": 76, "d38bc77e2c": 101, "d38d1679e2": 144, "d3932ad4bd": 97, "d3987b2930": 180, "d39934abe3": 144, "d3ae1c3f4c": 92, "d3b088e593": 87, "d3e6e05e16": 150, "d3eefae7c5": 144, "d3f55f5ab8": 180, "d3f5c309cc": 61, "d4034a7fdf": 180, "d4193011f3": 144, "d429c67630": 180, "d42c0ff975": 180, "d44a764409": 180, "d44e6acd1d": 66, "d45158c175": 150, "d454e8444f": 150, "d45f62717e": 180, "d48ebdcf74": 180, "d49ab52a25": 86, "d4a607ad81": 92, "d4b063c7db": 144, "d4da13e9ba": 96, "d4dd1a7d00": 180, "d4f4f7c9c3": 96, "d521aba02e": 180, "d535bb1b97": 92, "d53b955f78": 96, "d55cb7a205": 92, "d55f247a45": 150, "d5695544d8": 180, "d5853d9b8b": 180, "d5b6c6d94a": 96, "d5cae12834": 150, "d5df027f0c": 144, "d5ee40e5d0": 180, "d600046f73": 144, "d632fd3510": 144, "d6476cad55": 180, "d65a7bae86": 150, "d664c89912": 150, "d689658f06": 180, "d6917db4be": 96, "d69967143e": 96, "d699d3d798": 91, "d69f757a3f": 180, "d6ac0e065c": 91, "d6c02bfda5": 96, "d6c1b5749e": 92, "d6e12ef6cc": 92, "d6eed152c4": 180, "d6faaaf726": 96, "d704766646": 180, "d708e1350c": 180, "d7135cf104": 180, "d7157a9f44": 46, "d719cf9316": 96, "d724134cfd": 144, "d73a60a244": 180, "d7411662da": 144, "d74875ea7c": 96, "d756f5a694": 91, "d7572b7d8a": 180, "d763bd6d96": 180, "d7697c8b13": 96, "d7797196b4": 150, "d79c834768": 180, "d7b34e5d73": 91, "d7bb6b37a7": 150, "d7c7e064a6": 180, "d7fbf545b3": 96, "d82a0aa15b": 180, "d847e24abd": 144, "d8596701b7": 144, "d86101499c": 144, "d87069ba86": 150, "d87160957b": 144, "d874654b52": 91, "d88a403092": 96, "d8aee40f3f": 144, "d8e77a222d": 91, "d8eb07c381": 180, "d9010348a1": 66, "d90e3cf281": 91, "d92532c7b2": 180, "d927fae122": 150, "d95707bca8": 91, "d973b31c00": 144, "d991cb471d": 180, "d992c69d37": 150, "d99d770820": 180, "d9b63abc11": 180, "d9db6f1983": 144, "d9e52be2d2": 96, "d9edc82650": 150, "da01070697": 96, "da070ea4b7": 180, "da080507b9": 150, "da0e944cc4": 180, "da28d94ff4": 96, "da5d78b9d1": 180, "da6003fc72": 150, "da690fee9f": 180, "da6c68708f": 180, "da7a816676": 144, "dac361e828": 180, "dac71659b8": 144, "dad980385d": 96, "daebc12b77": 150, "db0968cdd3": 150, "db231a7100": 92, "db59282ace": 91, "db7f267c3f": 180, "dba35b87fd": 96, "dbba735a50": 86, "dbca076acd": 180, "dbd66dc3ac": 180, "dbdc3c292b": 180, "dbf4a5b32b": 180, "dbfc417d28": 180, "dc1745e0a2": 91, "dc32a44804": 180, "dc34b35e30": 150, "dc504a4f79": 92, "dc704dd647": 180, "dc71bc6918": 92, "dc7771b3be": 180, "dcf8c93617": 96, "dd0f4c9fb9": 180, "dd415df125": 120, "dd601f9a3f": 144, "dd61d903df": 150, "dd77583736": 150, "dd8636bd8b": 180, "dd9fe6c6ac": 92, "ddb2da4c14": 180, "ddcd450d47": 144, "dde8e67fb4": 76, "ddfc3f04d3": 150, "de2ab79dfa": 180, "de2f35b2fd": 91, "de30990a51": 180, "de36b216da": 96, "de37403340": 180, "de46e4943b": 96, "de4ddbccb1": 180, "de5e480f05": 96, "de6a9382ca": 96, "de74a601d3": 180, "de827c510d": 92, "ded6069f7b": 180, "defb71c741": 96, "df01f277f1": 180, "df05214b82": 92, "df0638b0a0": 46, "df11931ffe": 180, "df1b0e4620": 180, "df20a8650d": 92, "df2bc56d7c": 180, "df365282c6": 180, "df39a0d9df": 96, "df3c430c24": 91, "df5536cfb9": 180, "df59cfd91d": 97, "df5e2152b3": 66, "df741313c9": 96, "df7626172f": 92, "df8ad5deb9": 180, "df96aa609a": 180, "df9705605c": 180, "df9c91c4da": 180, "dfc0d3d27a": 180, "dfdbf91a99": 180, "e00baaae9b": 180, "e0a938c6e7": 91, "e0b2ceee6f": 150, "e0bdb5dfae": 36, "e0be1f6e17": 96, "e0c478f775": 150, "e0de82caa7": 180, "e0f217dd59": 91, "e0f7208874": 180, "e0fb58395e": 180, "e1194c2e9d": 150, "e11adcd05d": 180, "e128124b9d": 87, "e1495354e4": 180, "e1561d6d4b": 180, "e158805399": 91, "e16945b951": 46, "e19edcd34b": 180, "e1a1544285": 180, "e1ab7957f4": 150, "e1d26d35be": 96, "e1e957085b": 96, "e1f14510fa": 180, "e214b160f4": 180, "e2167379b8": 150, "e21acb20ab": 180, "e221105579": 180, "e22ddf8a1b": 180, "e22de45950": 96, "e22ffc469b": 180, "e23cca5244": 96, "e252f46f0b": 180, "e25fa6cf39": 180, "e26e486026": 150, "e275760245": 96, "e27bbedbfe": 92, "e29e9868a8": 180, "e2b37ff8af": 96, "e2b608d309": 180, "e2bef4da9a": 96, "e2c87a6421": 96, "e2ea25542c": 144, "e2fb1d6497": 178, "e2fcc99117": 91, "e33c18412a": 71, "e348377191": 91, "e352cb59c8": 180, "e36ac982f0": 91, "e391bc981e": 96, "e39e3e0a06": 96, "e3bf38265f": 51, "e3d5b2cd21": 150, "e3d60e82d5": 46, "e3e3245492": 96, "e3e4134877": 150, "e3f4635e03": 180, "e4004ee048": 180, "e402d1afa5": 180, "e415093d27": 71, "e41ceb5d81": 180, "e424653b78": 96, "e42b6d3dbb": 96, "e42d60f0d4": 180, "e436d0ff1e": 180, "e43d7ae2c5": 92, "e4428801bc": 97, "e44e0b4917": 180, "e470345ede": 180, "e48e8b4263": 180, "e4922e3726": 180, "e4936852bb": 96, "e495f32c60": 41, "e499228f26": 150, "e4af66e163": 180, "e4b2095f58": 180, "e4d19c8283": 180, "e4d4872dab": 96, "e4e2983570": 41, "e4eaa63aab": 91, "e4ef0a3a34": 91, "e4f8e5f46e": 96, "e4ffb6d0dd": 71, "e53e21aa02": 180, "e57f4f668b": 180, "e588433c1e": 96, "e597442c99": 150, "e5abc0e96b": 91, "e5be628030": 180, "e5ce96a55d": 61, "e5d6b70a9f": 81, "e5fde1574c": 92, "e625e1d27b": 180, "e6261d2348": 91, "e6267d46bc": 96, "e6295f223f": 180, "e63463d8c6": 96, "e6387bd1e0": 180, "e653883384": 96, "e65f134e0b": 150, "e668ef5664": 180, "e672ccd250": 92, "e674510b20": 91, "e676107765": 150, "e699da0cdf": 180, "e6be243065": 46, "e6deab5e0b": 76, "e6f065f2b9": 96, "e71629e7b5": 96, "e72a7d7b0b": 150, "e72f6104e1": 92, "e75a466eea": 72, "e76c55933f": 150, "e7784ec8ad": 180, "e78922e5e6": 47, "e78d450a9c": 91, "e7c6354e77": 91, "e7c8de1fce": 150, "e7ea10db28": 150, "e803918710": 180, "e8073a140b": 180, "e828dd02db": 150, "e845994987": 150, "e8485a2615": 96, "e85c5118a7": 180, "e88b6736e4": 180, "e8962324e3": 91, "e8b3018d36": 91, "e8cee8bf0b": 150, "e8d97ebece": 144, "e8da49ea6a": 96, "e8ed1a3ccf": 180, "e8f7904326": 72, "e8f8341dec": 180, "e8fa21eb13": 180, "e90c10fc4c": 150, "e914b8cac8": 180, "e92b6bfea4": 46, "e92e1b7623": 150, "e93f83e512": 92, "e9422ad240": 46, "e9460b55f9": 180, "e9502628f6": 180, "e950befd5f": 180, "e9582bdd1b": 91, "e95e5afe0f": 96, "e97cfac475": 96, "e98d57d99c": 91, "e98eda8978": 92, "e99706b555": 41, "e9bc0760ba": 91, "e9d3c78bf3": 87, "e9ec1b7ea8": 144, "ea065cc205": 180, "ea138b6617": 150, "ea16d3fd48": 180, "ea2545d64b": 180, "ea286a581c": 150, "ea320da917": 96, "ea345f3627": 91, "ea3b94a591": 180, "ea444a37eb": 71, "ea4a01216b": 180, "ea5672ffa8": 81, "eaa99191cb": 150, "eaab4d746c": 91, "eac7a59bc1": 150, "ead5d3835a": 96, "eaec65cfa7": 180, "eaed1a87be": 180, "eb2f821c6f": 180, "eb383cb82e": 91, "eb6992fe02": 150, "eb6ac20a01": 92, "eb6d7ab39e": 96, "eb7921facd": 180, "eb8fce51a6": 180, "ebbb90e9f9": 91, "ebbf5c9ee1": 180, "ebc4ec32e6": 91, "ebe56e5ef8": 180, "ec1299aee4": 97, "ec139ff675": 180, "ec193e1a01": 180, "ec28252938": 150, "ec387be051": 180, "ec3d4fac00": 91, "ec4186ce12": 95, "ec579c2f96": 91, "ecae59b782": 180, "ecb33a0448": 180, "ece6bc9e92": 150, "ecfedd4035": 92, "ecfff22fd6": 180, "ed3291c3d6": 180, "ed3cd5308d": 180, "ed3e6fc1a5": 180, "ed72ae8825": 180, "ed7455da68": 92, "ed844e879f": 150, "ed8f814b2b": 92, "ed911a1f63": 180, "ed9ff4f649": 180, "eda8ab984b": 180, "edb8878849": 96, "edbfdfe1b4": 180, "edd22c46a2": 96, "edd663afa3": 180, "ede3552eae": 96, "edeab61ee0": 174, "ee07583fc0": 150, "ee316eaed6": 91, "ee3f509537": 150, "ee40a1e491": 92, "ee4bf100f1": 180, "ee6f9b01f9": 180, "ee947ed771": 96, "ee9706ac7f": 91, "ee9a7840ae": 180, "eeb90cb569": 180, "eebf45e5c5": 92, "eeed0c7d73": 87, "ef0061a309": 96, "ef07f1a655": 96, "ef0a8e8f35": 56, "ef232a2aed": 150, "ef308ad2e9": 180, "ef44945428": 96, "ef45ce3035": 180, "ef5dde449d": 180, "ef5e770988": 144, "ef6359cea3": 96, "ef65268834": 180, "ef6cb5eae0": 86, "ef78972bc2": 150, "ef8cfcfc4f": 82, "ef96501dd0": 150, "ef9a2e976b": 91, "efb24f950f": 180, "efce0c1868": 180, "efe5ac6901": 91, "efe828affa": 180, "efea4e0523": 144, "f0268aa627": 180, "f0483250c8": 180, "f04cf99ee6": 62, "f05b189097": 96, "f08928c6d3": 96, "f09d74856f": 150, "f0a7607d63": 180, "f0ad38da27": 71, "f0c34e1213": 92, "f0c7f86c29": 180, "f0dfa18ba7": 150, "f0eb3179f7": 180, "f119bab27d": 150, "f14409b6a3": 180, "f1489baff4": 86, "f14c18cf6a": 180, "f15c607b92": 180, "f1af214222": 97, "f1b77bd309": 180, "f1ba9e1a3e": 180, "f1d99239eb": 66, "f1dc710cf4": 180, "f1ec5c08fa": 97, "f22648fe12": 180, "f22d21f1f1": 144, "f233257395": 91, "f23e95dbe5": 96, "f2445b1572": 150, "f253b3486d": 144, "f277c7a6a4": 91, "f2ab2b84d6": 87, "f2b7c9b1f3": 150, "f2b83d5ce5": 180, "f2c276018f": 150, "f2cfd94d64": 150, "f2dd6e3add": 150, "f2e7653f16": 180, "f2f333ad06": 96, "f2f55d6713": 180, "f2fdb6abec": 180, "f305a56d9f": 46, "f3085d6570": 96, "f3325c3338": 180, "f3400f1204": 180, "f34497c932": 97, "f34a56525e": 91, "f36483c824": 96, "f3704d5663": 91, "f3734c4913": 150, "f38e5aa5b4": 86, "f3986fba44": 180, "f3a0ffc7d9": 180, "f3b24a7d28": 96, "f3e6c35ec3": 180, "f3fc0ea80b": 96, "f40a683fbe": 180, "f4207ca554": 180, "f4377499c2": 150, "f46184f393": 144, "f46c2d0a6d": 180, "f46c364dca": 180, "f46f7a0b63": 180, "f46fe141b0": 91, "f470b9aeb0": 180, "f47eb7437f": 96, "f48b535719": 92, "f49e4866ac": 180, "f4aa882cfd": 180, "f4daa3dbd5": 96, "f4dd51ac35": 91, "f507a1b9dc": 96, "f51c5ac84b": 86, "f52104164b": 180, "f54c67b9bb": 96, "f5966cadd2": 180, "f5bddf5598": 91, "f5d85cfd17": 92, "f5e2e7d6a0": 96, "f5f051e9b4": 180, "f5f8a93a76": 150, "f6283e8af5": 96, "f635e9568b": 180, "f6474735be": 144, "f659251be2": 150, "f66981af4e": 96, "f6708fa398": 87, "f697fe8e8f": 96, "f6adb12c42": 76, "f6c7906ca4": 180, "f6cd0a8016": 144, "f6d6f15ae7": 144, "f6e501892c": 96, "f6f59d986f": 180, "f6fe8c90a5": 180, "f714160545": 144, "f74c3888d7": 180, "f7782c430e": 150, "f7783ae5f2": 96, "f77ab47923": 97, "f788a98327": 91, "f7961ac1f0": 96, "f7a71e7574": 150, "f7a8521432": 180, "f7afbf4947": 150, "f7b7cd5f44": 81, "f7cf4b4a39": 92, "f7d49799ad": 150, "f7e0c9bb83": 180, "f7e5b84928": 96, "f7e6bd58be": 96, "f7f2a38ac6": 96, "f7f6cb2d6d": 150, "f83f19e796": 76, "f85796a921": 91, "f8603c26b2": 180, "f8819b42ec": 144, "f891f8eaa1": 96, "f89288d10c": 92, "f895ae8cc1": 180, "f8af30d4b6": 97, "f8b4ac12f1": 180, "f8c3fb2b01": 180, "f8c8de2764": 180, "f8db369b40": 92, "f8fcb6a78c": 180, "f94aafdeef": 180, "f95d217b70": 96, "f9681d5103": 92, "f9750192a4": 91, "f9823a32c2": 96, "f991ddb4c2": 96, "f99d535567": 96, "f9ae3d98b7": 144, "f9b6217959": 91, "f9bd1fabf5": 96, "f9c68eaa64": 180, "f9d3e04c4f": 92, "f9daf64494": 180, "f9e4cc5a0a": 96, "f9ea6b7f31": 96, "f9f3852526": 180, "fa04c615cf": 150, "fa08e00a56": 180, "fa4370d74d": 180, "fa67744af3": 180, "fa88d48a92": 150, "fa8b904cc9": 92, "fa9526bdf1": 150, "fa9b9d2426": 150, "fad633fbe1": 150, "faf5222dc3": 91, "faff0e15f1": 180, "fb08c64e8c": 180, "fb23455a7f": 150, "fb2e19fa6e": 180, "fb34dfbb77": 180, "fb47fcea1e": 96, "fb49738155": 180, "fb4cbc514b": 71, "fb4e6062f7": 180, "fb5ba7ad6e": 96, "fb63cd1236": 96, "fb81157a07": 180, "fb92abdaeb": 180, "fba22a6848": 92, "fbaca0c9df": 180, "fbc645f602": 96, "fbd77444cd": 96, "fbe53dc8e8": 96, "fbe541dd73": 97, "fbe8488798": 91, "fbfd25174f": 96, "fc28cb305e": 97, "fc33b1ffd6": 150, "fc6186f0bb": 180, "fc918e3a40": 150, "fc96cda9d8": 150, "fc9832eea4": 150, "fcb10d0f81": 180, "fcd20a2509": 180, "fcf637e3ab": 92, "fcfd81727f": 96, "fd31890379": 180, "fd33551c28": 144, "fd542da05e": 144, "fd6789b3fe": 180, "fd77828200": 180, "fd7af75f4d": 150, "fdb28d0fbb": 150, "fdb3d1fb1e": 82, "fdb8b04124": 96, "fdc6e3d581": 91, "fdfce7e6fc": 180, "fe0f76d41b": 180, "fe24b0677d": 180, "fe3c02699d": 144, "fe58b48235": 96, "fe6a5596b8": 91, "fe6c244f63": 66, "fe7afec086": 180, "fe985d510a": 144, "fe9db35d15": 96, "fea8ffcd36": 144, "feb1080388": 180, "fed208bfca": 180, "feda5ad1c2": 180, "feec95b386": 91, "ff15a5eff6": 144, "ff204daf4b": 96, "ff25f55852": 180, "ff2ada194f": 180, "ff2ce142e8": 96, "ff49d36d20": 180, "ff5a1ec4f3": 180, "ff66152b25": 180, "ff692fdc56": 180, "ff773b1a1e": 96, "ff97129478": 144, "ffb904207d": 180, "ffc43fc345": 150, "fffe5f8df6": 180} \ No newline at end of file diff --git a/florence_sam/experiment.ipynb b/florence_sam/experiment.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..e7d4b4a125b48248baac923a4d6f7f040ef34e06 --- /dev/null +++ b/florence_sam/experiment.ipynb @@ -0,0 +1,785 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# import os\n", + "# import numpy as np\n", + "# from PIL import Image\n", + "\n", + "# def load_images_from_folder(folder, image_type):\n", + "# images = []\n", + "# for filename in sorted(os.listdir(folder)):\n", + "# img_path = os.path.join(folder, filename)\n", + "# if os.path.isfile(img_path):\n", + "# img = Image.open(img_path)\n", + "\n", + "# # Resize the image to make it divisble by 8 but keep the aspect ratio same.\n", + "# width, height = img.size\n", + "# new_width = (width//8)*8\n", + "# new_height = (height//8)*8\n", + "# img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)\n", + "# if image_type == 'mask':\n", + "# img = img.convert('L')\n", + "# img_array = np.array(img)\n", + "# if image_type == 'mask':\n", + "# img_array = np.expand_dims(img_array, axis=-1)\n", + "# images.append(img_array)\n", + "# return np.array(images)\n", + "\n", + "# input_frames = 'input_frames' # Directory for video frames\n", + "# input_masks = 'output_frames' # Directory for mask frames\n", + "\n", + "# # Load video frames\n", + "# video_sequence = load_images_from_folder(input_frames, 'video')\n", + "# # Load mask frames\n", + "# mask_sequence = load_images_from_folder(input_masks, 'mask')\n", + "\n", + "# # Save as .npy files\n", + "# np.save('images.npy', video_sequence)\n", + "# np.save('masks.npy', mask_sequence)\n", + "\n", + "# print(\"Video sequence and mask sequence have been saved as .npy files.\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Video sequence shape: (12, 360, 640, 3)\n", + "Mask sequence shape: (12, 360, 640, 1)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# # load .npy file and check the images and there dimenstions\n", + "\n", + "# import os\n", + "# import numpy as np\n", + "# import matplotlib.pyplot as plt\n", + "\n", + "# # Load the .npy files\n", + "# video_sequence = np.load('images.npy')\n", + "# mask_sequence = np.load('masks.npy')\n", + "\n", + "# # Check the dimensions of the video sequence and mask sequence\n", + "# print('Video sequence shape:', video_sequence.shape)\n", + "# print('Mask sequence shape:', mask_sequence.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "FPS of the video: 59.94005994005994\n" + ] + } + ], + "source": [ + "import cv2, os\n", + "os.makedirs(\"input_frames\", exist_ok=True)\n", + "\n", + "# Video input\n", + "VIDEO_INPUT = \"videos/clip-07-camera-2.mp4\"\n", + "\n", + "# Video Scale Factor\n", + "VIDEO_SCALE_FACTOR = 0.5\n", + "\n", + "# open the video file\n", + "cap = cv2.VideoCapture(VIDEO_INPUT)\n", + "\n", + "# Get FPS of the video\n", + "fps = cap.get(cv2.CAP_PROP_FPS)\n", + "print(f\"FPS of the video: {fps}\")\n", + "\n", + "# get the video frame width and height\n", + "frame_width = int(cap.get(3) * VIDEO_SCALE_FACTOR)\n", + "frame_height = int(cap.get(4) * VIDEO_SCALE_FACTOR)\n", + "\n", + "# Now save all the frames to input_frames folder\n", + "count = 0\n", + "while True:\n", + " ret, frame = cap.read()\n", + " if not ret:\n", + " break\n", + " frame = cv2.resize(frame, (frame_width, frame_height))\n", + " cv2.imwrite(f\"input_frames/frame_{count:04d}.jpg\", frame)\n", + " count += 1" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n" + ] + } + ], + "source": [ + "import os\n", + "import cv2\n", + "import numpy as np\n", + "import torch\n", + "from PIL import Image\n", + "from tqdm import tqdm\n", + "\n", + "import supervision as sv\n", + "from utils.video import generate_unique_name, create_directory, delete_directory\n", + "from utils.florence import load_florence_model, run_florence_inference, FLORENCE_OPEN_VOCABULARY_DETECTION_TASK\n", + "from utils.sam import load_sam_image_model, load_sam_video_model, run_sam_inference\n", + "\n", + "# Constants\n", + "VIDEO_INPUT = \"videos/clip-07-camera-2.mp4\"\n", + "TEXT_INPUT = \"players, basketball, rim, players shadow\"\n", + "VIDEO_SCALE_FACTOR = 0.5\n", + "VIDEO_TARGET_DIRECTORY = \"tmp\"\n", + "\n", + "# Create target directory\n", + "create_directory(directory_path=VIDEO_TARGET_DIRECTORY)\n", + "\n", + "# Set device\n", + "DEVICE = torch.device(\"cuda\")\n", + "# DEVICE = torch.device(\"cpu\")\n", + "\n", + "# Enable mixed precision and TF32 for Ampere GPUs\n", + "torch.autocast(device_type=\"cuda\", dtype=torch.bfloat16).__enter__()\n", + "if torch.cuda.get_device_properties(0).major >= 8:\n", + " torch.backends.cuda.matmul.allow_tf32 = True\n", + " torch.backends.cudnn.allow_tf32 = True\n", + "\n", + "# Load models\n", + "FLORENCE_MODEL, FLORENCE_PROCESSOR = load_florence_model(device=DEVICE)\n", + "SAM_IMAGE_MODEL = load_sam_image_model(device=DEVICE)\n", + "SAM_VIDEO_MODEL = load_sam_video_model(device=DEVICE)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# Load the first frame of the video\n", + "frame_generator = sv.get_video_frames_generator(VIDEO_INPUT)\n", + "frame = next(frame_generator)\n", + "frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "# Process text input\n", + "texts = [prompt.strip() for prompt in TEXT_INPUT.split(\",\")]" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "detections_list = []\n", + "for text in texts:\n", + " _, result = run_florence_inference(\n", + " model=FLORENCE_MODEL,\n", + " processor=FLORENCE_PROCESSOR,\n", + " device=DEVICE,\n", + " image=frame,\n", + " task=FLORENCE_OPEN_VOCABULARY_DETECTION_TASK,\n", + " text=text\n", + " )\n", + " detections = sv.Detections.from_lmm(\n", + " lmm=sv.LMM.FLORENCE_2,\n", + " result=result,\n", + " resolution_wh=frame.size\n", + " )\n", + " detections = run_sam_inference(SAM_IMAGE_MODEL, frame, detections)\n", + " detections_list.append(detections)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "detections = sv.Detections.merge(detections_list)\n", + "detections = run_sam_inference(SAM_IMAGE_MODEL, frame, detections)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Generate unique name for video processing\n", + " name = generate_unique_name()\n", + " frame_directory_path = os.path.join(\"tmp\", name)\n", + " create_directory(frame_directory_path)\n", + " frames_sink = sv.ImageSink(\n", + " target_dir_path=frame_directory_path,\n", + " image_name_pattern=\"{:05d}.jpeg\"\n", + " )\n", + " # Get video info and scale\n", + " video_info = sv.VideoInfo.from_video_path(video_path)\n", + " video_info.width = int(video_info.width * self.scale_factor)\n", + " video_info.height = int(video_info.height * self.scale_factor)\n", + "\n", + " # Initialize SAM video model state\n", + " inference_state = self.sam_video_model.init_state(\n", + " video_path=frame_directory_path,\n", + " device=self.device\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Splitting video into frames: 5%|▍ | 18/397 [00:00<00:03, 100.21it/s]" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Splitting video into frames: 100%|██████████| 397/397 [00:02<00:00, 192.86it/s]\n", + "frame loading (JPEG): 100%|██████████| 397/397 [00:14<00:00, 27.67it/s]\n" + ] + } + ], + "source": [ + "# Generate unique name for video processing\n", + "name = generate_unique_name()\n", + "frame_directory_path = os.path.join(VIDEO_TARGET_DIRECTORY, name)\n", + "frames_sink = sv.ImageSink(\n", + " target_dir_path=frame_directory_path,\n", + " image_name_pattern=\"{:05d}.jpeg\"\n", + ")\n", + "\n", + "# Get video info and scale\n", + "video_info = sv.VideoInfo.from_video_path(VIDEO_INPUT)\n", + "video_info.width = int(video_info.width * VIDEO_SCALE_FACTOR)\n", + "video_info.height = int(video_info.height * VIDEO_SCALE_FACTOR)\n", + "\n", + "# Split video into frames\n", + "frames_generator = sv.get_video_frames_generator(VIDEO_INPUT)\n", + "with frames_sink:\n", + " for frame in tqdm(frames_generator, total=video_info.total_frames, desc=\"Splitting video into frames\"):\n", + " frame = sv.scale_image(frame, VIDEO_SCALE_FACTOR)\n", + " frames_sink.save_image(frame)\n", + "\n", + "# Initialize SAM video model\n", + "inference_state = SAM_VIDEO_MODEL.init_state(\n", + " video_path=frame_directory_path,\n", + " device=DEVICE\n", + ")\n", + "\n", + "# Add masks to inference state\n", + "for mask_index, mask in enumerate(detections.mask):\n", + " _, object_ids, mask_logits = SAM_VIDEO_MODEL.add_new_mask(\n", + " inference_state=inference_state,\n", + " frame_idx=0,\n", + " obj_id=mask_index,\n", + " mask=mask\n", + " )\n", + "\n", + "# Create output video path\n", + "video_path = os.path.join(VIDEO_TARGET_DIRECTORY, f\"{name}.mp4\")\n", + "frames_generator = sv.get_video_frames_generator(VIDEO_INPUT)\n", + "masks_generator = SAM_VIDEO_MODEL.propagate_in_video(inference_state)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "COLORS = ['#FFFFFF']\n", + "\n", + "COLOR_PALETTE = sv.ColorPalette.from_hex(COLORS)\n", + "\n", + "MASK_ANNOTATOR = sv.MaskAnnotator(\n", + " color=COLOR_PALETTE,\n", + " color_lookup=sv.ColorLookup.INDEX\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "propagate in video: 1%| | 4/397 [00:00<00:25, 15.67it/s]" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "propagate in video: 100%|█████████▉| 396/397 [00:32<00:00, 12.26it/s]" + ] + } + ], + "source": [ + "counter = 0\n", + "with sv.VideoSink(video_path, video_info=video_info) as sink:\n", + " for frame, (_, tracker_ids, mask_logits) in zip(frames_generator, masks_generator):\n", + " frame = sv.scale_image(frame, VIDEO_SCALE_FACTOR)\n", + " masks = (mask_logits > 0.0).cpu().numpy().astype(bool)\n", + " if len(masks.shape) == 4:\n", + " masks = np.squeeze(masks, axis=1)\n", + "\n", + " detections = sv.Detections(\n", + " xyxy=sv.mask_to_xyxy(masks=masks),\n", + " mask=masks,\n", + " class_id=np.array(tracker_ids)\n", + " )\n", + " # create a black image with same size as original frame\n", + " annotated_frame = frame.copy()\n", + " # make all pixels of annotated_frame black\n", + " annotated_frame[:, :, :] = 0\n", + " annotated_frame = MASK_ANNOTATOR.annotate(\n", + " scene=annotated_frame, detections=detections)\n", + " annotated_frame = (annotated_frame > 0).astype(np.uint8) * 255\n", + " # Image.fromarray(annotated_frame).save(f\"output_frames/{counter}.jpeg\")\n", + " counter += 1\n", + " sink.write_frame(annotated_frame)\n", + "\n", + "delete_directory(frame_directory_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "delete_directory(\"input_frames\")\n", + "delete_directory(\"output_frames\")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "FPS of the video: 59.94005994005994\n" + ] + } + ], + "source": [ + "import cv2, os\n", + "os.makedirs(\"input_frames\", exist_ok=True)\n", + "\n", + "# open the video file\n", + "cap = cv2.VideoCapture(VIDEO_INPUT)\n", + "\n", + "# Get FPS of the video\n", + "fps = cap.get(cv2.CAP_PROP_FPS)\n", + "print(f\"FPS of the video: {fps}\")\n", + "\n", + "# get the video frame width and height\n", + "frame_width = int(cap.get(3) * VIDEO_SCALE_FACTOR)\n", + "frame_height = int(cap.get(4) * VIDEO_SCALE_FACTOR)\n", + "\n", + "# Now save all the frames to input_frames folder\n", + "count = 0\n", + "while True:\n", + " ret, frame = cap.read()\n", + " if not ret:\n", + " break\n", + " frame = cv2.resize(frame, (frame_width, frame_height))\n", + " cv2.imwrite(f\"input_frames/frame_{count:04d}.jpg\", frame)\n", + " count += 1" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "import cv2, os\n", + "os.makedirs(\"output_frames\", exist_ok=True)\n", + "\n", + "# Get FPS of the video\n", + "fps = video_info.fps\n", + "\n", + "# get the video frame width and height\n", + "frame_width = video_info.width\n", + "frame_height = video_info.height\n", + "\n", + "# open the video file\n", + "cap = cv2.VideoCapture(video_path)\n", + "\n", + "# Now save all the frames to output_frames folder\n", + "count = 0\n", + "while True:\n", + " ret, frame = cap.read()\n", + " if not ret:\n", + " break\n", + " frame = cv2.resize(frame, (frame_width, frame_height))\n", + " cv2.imwrite(f\"output_frames/frame_{count:04d}.jpg\", frame)\n", + " count += 1" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "# # Create video sink and write annotated frames\n", + "# counter = 0\n", + "# with sv.VideoSink(video_path, video_info=video_info) as sink:\n", + "# for frame, (_, tracker_ids, mask_logits) in zip(frames_generator, masks_generator):\n", + "# frame = sv.scale_image(frame, VIDEO_SCALE_FACTOR)\n", + "# masks = (mask_logits > 0.0).cpu().numpy().astype(bool)\n", + "# if len(masks.shape) == 4:\n", + "# masks = np.squeeze(masks, axis=1)\n", + "\n", + "# # Now combine all masks\n", + "# mask = np.zeros((frame.shape[0], frame.shape[1], 3), dtype=np.uint8)\n", + "# for individual_mask in masks:\n", + "# mask[individual_mask] = 255\n", + "\n", + "# Image.fromarray(mask).save(f\"output_frames/{counter}.jpeg\")\n", + "# counter += 1" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "propagate in video: 100%|█████████▉| 396/397 [00:52<00:00, 12.26it/s]" + ] + } + ], + "source": [ + "# import cv2\n", + "# import numpy as np\n", + "# import os\n", + "\n", + "# # input frames (RGB video frames)\n", + "# input_frames = \"input_frames\"\n", + "\n", + "# # output frames (Mask frames)\n", + "# output_frames = \"output_frames\"\n", + "\n", + "# # output video\n", + "# output_video = \"output_video.mp4\"\n", + "\n", + "# # Load the input frames\n", + "# input_frame_files = sorted(os.listdir(input_frames))\n", + "# input_frames = [cv2.imread(os.path.join(input_frames, file)) for file in input_frame_files]\n", + "\n", + "# # Load the mask frames\n", + "# mask_frame_files = sorted(os.listdir(output_frames))\n", + "# mask_frames = [cv2.imread(os.path.join(output_frames, file)) for file in mask_frame_files]\n", + "\n", + "# fps = 60\n", + "\n", + "# # New based on each masked frame replaced the masked area of the input frame with the mask frame.\n", + "# fourcc = cv2.VideoWriter_fourcc(*'avc1')\n", + "\n", + "# # Get the height and width of the frames\n", + "# height, width, _ = input_frames[0].shape\n", + "\n", + "# # Create the output video writer\n", + "# out = cv2.VideoWriter(output_video, fourcc, fps, (width, height))\n", + "\n", + "# # Iterate over each frame\n", + "# for i in range(len(input_frames)):\n", + "# # Get the input frame and mask frame\n", + "# input_frame = input_frames[i]\n", + "# mask_frame = mask_frames[i]\n", + "\n", + "# # Replace the masked area of the input frame with the mask frame\n", + "# masked_frame = input_frame.copy()\n", + "# masked_frame[mask_frame == 255] = mask_frame[mask_frame == 255]\n", + "\n", + "# # Write the frame to the output video\n", + "# out.write(masked_frame)\n", + "\n", + "# # Release the video writer\n", + "# out.release()" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + "Splitting video into frames: 100%|██████████| 397/397 [00:02<00:00, 195.57it/s]\n", + "frame loading (JPEG): 100%|██████████| 397/397 [00:14<00:00, 26.87it/s]\n", + "propagate in video: 100%|█████████▉| 396/397 [00:32<00:00, 12.22it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processed video saved at: tmp/20240827202744_da1ab9da-7b6a-4b23-83b8-a1c4474c4d97\n" + ] + } + ], + "source": [ + "import cv2\n", + "import os\n", + "import torch\n", + "import numpy as np\n", + "from PIL import Image\n", + "import supervision as sv\n", + "from tqdm import tqdm\n", + "from utils.video import generate_unique_name, create_directory, delete_directory\n", + "from utils.florence import load_florence_model, run_florence_inference, FLORENCE_OPEN_VOCABULARY_DETECTION_TASK\n", + "from utils.sam import load_sam_image_model, load_sam_video_model, run_sam_inference\n", + "\n", + "\n", + "class VideoProcessor:\n", + " def __init__(self, device=None):\n", + " self.device = device or torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", + " self._enable_mixed_precision()\n", + "\n", + " # Load models\n", + " self.florence_model, self.florence_processor = load_florence_model(device=self.device)\n", + " self.sam_image_model = load_sam_image_model(device=self.device)\n", + " self.sam_video_model = load_sam_video_model(device=self.device)\n", + "\n", + " # Set up mask annotator with a white color palette\n", + " self.mask_annotator = sv.MaskAnnotator(\n", + " color=sv.ColorPalette.from_hex([\"#FFFFFF\"]),\n", + " color_lookup=sv.ColorLookup.INDEX\n", + " )\n", + "\n", + " def _enable_mixed_precision(self):\n", + " torch.autocast(device_type=self.device.type, dtype=torch.bfloat16).__enter__()\n", + " if torch.cuda.is_available() and torch.cuda.get_device_properties(0).major >= 8:\n", + " torch.backends.cuda.matmul.allow_tf32 = True\n", + " torch.backends.cudnn.allow_tf32 = True\n", + "\n", + " def process_video(self, video_path, scale_factor, prompt):\n", + " self.scale_factor = scale_factor\n", + "\n", + " # Process video based on the prompt\n", + " output_video_path, session_path = self._process_prompt(video_path, prompt)\n", + "\n", + " # Create frames from the output video\n", + " self._create_frames(output_video_path, os.path.join(session_path, \"output_frames\"))\n", + " \n", + " # Delete the output video\n", + " os.remove(output_video_path)\n", + "\n", + " return session_path\n", + "\n", + " def _create_frames(self, video_path, output_dir):\n", + " create_directory(output_dir)\n", + " # get the video frame width and height\n", + " cap = cv2.VideoCapture(video_path)\n", + " frame_width = int(cap.get(3))\n", + " frame_height = int(cap.get(4))\n", + "\n", + " # open the video file\n", + " cap = cv2.VideoCapture(video_path)\n", + "\n", + " # Now save all the frames to output_frames folder\n", + " count = 0\n", + " while True:\n", + " ret, frame = cap.read()\n", + " if not ret:\n", + " break\n", + " frame = cv2.resize(frame, (frame_width, frame_height))\n", + " cv2.imwrite(f\"{output_dir}/frame_{count:04d}.jpg\", frame)\n", + " count += 1\n", + "\n", + "\n", + " def _process_prompt(self, video_path, prompt):\n", + " # Process the first frame with the prompt using the loaded models\n", + " frame_generator = sv.get_video_frames_generator(video_path)\n", + " frame = next(frame_generator)\n", + " frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n", + " \n", + " texts = [p.strip() for p in prompt.split(\",\")]\n", + " detections_list = []\n", + "\n", + " for text in texts:\n", + " _, result = run_florence_inference(\n", + " model=self.florence_model,\n", + " processor=self.florence_processor,\n", + " device=self.device,\n", + " image=frame,\n", + " task=FLORENCE_OPEN_VOCABULARY_DETECTION_TASK,\n", + " text=text\n", + " )\n", + " detections = sv.Detections.from_lmm(\n", + " lmm=sv.LMM.FLORENCE_2,\n", + " result=result,\n", + " resolution_wh=frame.size\n", + " )\n", + " detections = run_sam_inference(self.sam_image_model, frame, detections)\n", + " detections_list.append(detections)\n", + "\n", + " # Merge detections from all prompts\n", + " detections = sv.Detections.merge(detections_list)\n", + " detections = run_sam_inference(self.sam_image_model, frame, detections)\n", + "\n", + " # Check if any objects were detected\n", + " if len(detections.mask) == 0:\n", + " raise ValueError(f\"No objects of class {', '.join(texts)} found in the first frame of the video.\")\n", + "\n", + " # Generate unique name for video processing\n", + " name = generate_unique_name()\n", + " session_path = os.path.join(\"tmp\", name)\n", + " frame_directory_path = os.path.join(session_path, \"input_frames\")\n", + " create_directory(frame_directory_path)\n", + "\n", + " frames_sink = sv.ImageSink(\n", + " target_dir_path=frame_directory_path,\n", + " image_name_pattern=\"{:05d}.jpeg\"\n", + " )\n", + "\n", + " # Get video info and scale\n", + " video_info = sv.VideoInfo.from_video_path(video_path)\n", + " video_info.width = int(video_info.width * self.scale_factor)\n", + " video_info.height = int(video_info.height * self.scale_factor)\n", + "\n", + " # Split video into frames\n", + " frames_generator = sv.get_video_frames_generator(video_path)\n", + " with frames_sink:\n", + " for frame in tqdm(frames_generator, total=video_info.total_frames, desc=\"Splitting video into frames\"):\n", + " frame = sv.scale_image(frame, self.scale_factor)\n", + " frames_sink.save_image(frame)\n", + "\n", + " # Initialize SAM video model state\n", + " inference_state = self.sam_video_model.init_state(\n", + " video_path=frame_directory_path,\n", + " device=self.device\n", + " )\n", + "\n", + " # Add masks to inference state\n", + " for mask_index, mask in enumerate(detections.mask):\n", + " _, _, _ = self.sam_video_model.add_new_mask(\n", + " inference_state=inference_state,\n", + " frame_idx=0,\n", + " obj_id=mask_index,\n", + " mask=mask\n", + " )\n", + "\n", + " # Create output video path\n", + " output_video_path = os.path.join(\"tmp\", f\"{name}.mp4\")\n", + " frames_generator = sv.get_video_frames_generator(video_path)\n", + " masks_generator = self.sam_video_model.propagate_in_video(inference_state)\n", + "\n", + " # Process and annotate each frame\n", + " with sv.VideoSink(output_video_path, video_info=video_info) as sink:\n", + " for frame, (_, tracker_ids, mask_logits) in zip(frames_generator, masks_generator):\n", + " frame = sv.scale_image(frame, self.scale_factor)\n", + " masks = (mask_logits > 0.0).cpu().numpy().astype(bool)\n", + " if len(masks.shape) == 4:\n", + " masks = np.squeeze(masks, axis=1)\n", + "\n", + " detections = sv.Detections(\n", + " xyxy=sv.mask_to_xyxy(masks=masks),\n", + " mask=masks,\n", + " class_id=np.array(tracker_ids)\n", + " )\n", + "\n", + " annotated_frame = frame.copy()\n", + "\n", + " annotated_frame[:, :, :] = 0\n", + " \n", + " annotated_frame = self.mask_annotator.annotate(\n", + " scene=annotated_frame, detections=detections\n", + " )\n", + " annotated_frame = (annotated_frame > 0).astype(np.uint8) * 255\n", + " sink.write_frame(annotated_frame)\n", + "\n", + " return output_video_path, session_path\n", + "\n", + "\n", + "# Example usage:\n", + "video_processor = VideoProcessor()\n", + "output_video = video_processor.process_video(\n", + " video_path=\"videos/clip-07-camera-2.mp4\", \n", + " scale_factor=0.5, \n", + " prompt=\"players, basketball, rim, players shadow\"\n", + ")\n", + "print(f\"Processed video saved at: {output_video}\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "vor", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/florence_sam/inference_propainter.py b/florence_sam/inference_propainter.py new file mode 100644 index 0000000000000000000000000000000000000000..d322be3903520fc5081aab5320baf1f49a6330ec --- /dev/null +++ b/florence_sam/inference_propainter.py @@ -0,0 +1,476 @@ +# -*- coding: utf-8 -*- +import os +import cv2 +import argparse +import imageio +import numpy as np +import scipy.ndimage +from PIL import Image +from tqdm import tqdm + +import torch +import torchvision + +from model.modules.flow_comp_raft import RAFT_bi +from model.recurrent_flow_completion import RecurrentFlowCompleteNet +from model.propainter import InpaintGenerator +from propainter_utils.download_util import load_file_from_url +from core.utils import to_tensors +from model.misc import get_device + +import warnings +warnings.filterwarnings("ignore") + +pretrain_model_url = 'https://github.com/sczhou/ProPainter/releases/download/v0.1.0/' + +def imwrite(img, file_path, params=None, auto_mkdir=True): + if auto_mkdir: + dir_name = os.path.abspath(os.path.dirname(file_path)) + os.makedirs(dir_name, exist_ok=True) + return cv2.imwrite(file_path, img, params) + + +# resize frames +def resize_frames(frames, size=None): + if size is not None: + out_size = size + process_size = (out_size[0]-out_size[0]%8, out_size[1]-out_size[1]%8) + frames = [f.resize(process_size) for f in frames] + else: + out_size = frames[0].size + process_size = (out_size[0]-out_size[0]%8, out_size[1]-out_size[1]%8) + if not out_size == process_size: + frames = [f.resize(process_size) for f in frames] + + return frames, process_size, out_size + + +# read frames from video +def read_frame_from_videos(frame_root): + if frame_root.endswith(('mp4', 'mov', 'avi', 'MP4', 'MOV', 'AVI')): # input video path + video_name = os.path.basename(frame_root)[:-4] + vframes, aframes, info = torchvision.io.read_video(filename=frame_root, pts_unit='sec') # RGB + frames = list(vframes.numpy()) + frames = [Image.fromarray(f) for f in frames] + fps = info['video_fps'] + else: + video_name = os.path.basename(frame_root) + frames = [] + fr_lst = sorted(os.listdir(frame_root)) + for fr in fr_lst: + frame = cv2.imread(os.path.join(frame_root, fr)) + frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) + frames.append(frame) + fps = None + size = frames[0].size + + return frames, fps, size, video_name + + +def binary_mask(mask, th=0.1): + mask[mask>th] = 1 + mask[mask<=th] = 0 + return mask + + +# read frame-wise masks +def read_mask(mpath, length, size, flow_mask_dilates=8, mask_dilates=5): + masks_img = [] + masks_dilated = [] + flow_masks = [] + + if mpath.endswith(('jpg', 'jpeg', 'png', 'JPG', 'JPEG', 'PNG')): # input single img path + masks_img = [Image.open(mpath)] + else: + mnames = sorted(os.listdir(mpath)) + for mp in mnames: + masks_img.append(Image.open(os.path.join(mpath, mp))) + + for mask_img in masks_img: + if size is not None: + mask_img = mask_img.resize(size, Image.NEAREST) + mask_img = np.array(mask_img.convert('L')) + + # Dilate 8 pixel so that all known pixel is trustworthy + if flow_mask_dilates > 0: + flow_mask_img = scipy.ndimage.binary_dilation(mask_img, iterations=flow_mask_dilates).astype(np.uint8) + else: + flow_mask_img = binary_mask(mask_img).astype(np.uint8) + # Close the small holes inside the foreground objects + # flow_mask_img = cv2.morphologyEx(flow_mask_img, cv2.MORPH_CLOSE, np.ones((21, 21),np.uint8)).astype(bool) + # flow_mask_img = scipy.ndimage.binary_fill_holes(flow_mask_img).astype(np.uint8) + flow_masks.append(Image.fromarray(flow_mask_img * 255)) + + if mask_dilates > 0: + mask_img = scipy.ndimage.binary_dilation(mask_img, iterations=mask_dilates).astype(np.uint8) + else: + mask_img = binary_mask(mask_img).astype(np.uint8) + masks_dilated.append(Image.fromarray(mask_img * 255)) + + if len(masks_img) == 1: + flow_masks = flow_masks * length + masks_dilated = masks_dilated * length + + return flow_masks, masks_dilated + + +def extrapolation(video_ori, scale): + """Prepares the data for video outpainting. + """ + nFrame = len(video_ori) + imgW, imgH = video_ori[0].size + + # Defines new FOV. + imgH_extr = int(scale[0] * imgH) + imgW_extr = int(scale[1] * imgW) + imgH_extr = imgH_extr - imgH_extr % 8 + imgW_extr = imgW_extr - imgW_extr % 8 + H_start = int((imgH_extr - imgH) / 2) + W_start = int((imgW_extr - imgW) / 2) + + # Extrapolates the FOV for video. + frames = [] + for v in video_ori: + frame = np.zeros(((imgH_extr, imgW_extr, 3)), dtype=np.uint8) + frame[H_start: H_start + imgH, W_start: W_start + imgW, :] = v + frames.append(Image.fromarray(frame)) + + # Generates the mask for missing region. + masks_dilated = [] + flow_masks = [] + + dilate_h = 4 if H_start > 10 else 0 + dilate_w = 4 if W_start > 10 else 0 + mask = np.ones(((imgH_extr, imgW_extr)), dtype=np.uint8) + + mask[H_start+dilate_h: H_start+imgH-dilate_h, + W_start+dilate_w: W_start+imgW-dilate_w] = 0 + flow_masks.append(Image.fromarray(mask * 255)) + + mask[H_start: H_start+imgH, W_start: W_start+imgW] = 0 + masks_dilated.append(Image.fromarray(mask * 255)) + + flow_masks = flow_masks * nFrame + masks_dilated = masks_dilated * nFrame + + return frames, flow_masks, masks_dilated, (imgW_extr, imgH_extr) + + +def get_ref_index(mid_neighbor_id, neighbor_ids, length, ref_stride=10, ref_num=-1): + ref_index = [] + if ref_num == -1: + for i in range(0, length, ref_stride): + if i not in neighbor_ids: + ref_index.append(i) + else: + start_idx = max(0, mid_neighbor_id - ref_stride * (ref_num // 2)) + end_idx = min(length, mid_neighbor_id + ref_stride * (ref_num // 2)) + for i in range(start_idx, end_idx, ref_stride): + if i not in neighbor_ids: + if len(ref_index) > ref_num: + break + ref_index.append(i) + return ref_index + + + +if __name__ == '__main__': + # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + device = get_device() + + parser = argparse.ArgumentParser() + parser.add_argument( + '-i', '--video', type=str, default='inputs/object_removal/bmx-trees', help='Path of the input video or image folder.') + parser.add_argument( + '-m', '--mask', type=str, default='inputs/object_removal/bmx-trees_mask', help='Path of the mask(s) or mask folder.') + parser.add_argument( + '-o', '--output', type=str, default='results', help='Output folder. Default: results') + parser.add_argument( + "--resize_ratio", type=float, default=1.0, help='Resize scale for processing video.') + parser.add_argument( + '--height', type=int, default=-1, help='Height of the processing video.') + parser.add_argument( + '--width', type=int, default=-1, help='Width of the processing video.') + parser.add_argument( + '--mask_dilation', type=int, default=4, help='Mask dilation for video and flow masking.') + parser.add_argument( + "--ref_stride", type=int, default=10, help='Stride of global reference frames.') + parser.add_argument( + "--neighbor_length", type=int, default=10, help='Length of local neighboring frames.') + parser.add_argument( + "--subvideo_length", type=int, default=80, help='Length of sub-video for long video inference.') + parser.add_argument( + "--raft_iter", type=int, default=20, help='Iterations for RAFT inference.') + parser.add_argument( + '--mode', default='video_inpainting', choices=['video_inpainting', 'video_outpainting'], help="Modes: video_inpainting / video_outpainting") + parser.add_argument( + '--scale_h', type=float, default=1.0, help='Outpainting scale of height for video_outpainting mode.') + parser.add_argument( + '--scale_w', type=float, default=1.2, help='Outpainting scale of width for video_outpainting mode.') + parser.add_argument( + '--save_fps', type=int, default=24, help='Frame per second. Default: 24') + parser.add_argument( + '--save_frames', action='store_true', help='Save output frames. Default: False') + parser.add_argument( + '--fp16', action='store_true', help='Use fp16 (half precision) during inference. Default: fp32 (single precision).') + + args = parser.parse_args() + + # Use fp16 precision during inference to reduce running memory cost + use_half = True if args.fp16 else False + if device == torch.device('cpu'): + use_half = False + + frames, fps, size, video_name = read_frame_from_videos(args.video) + if not args.width == -1 and not args.height == -1: + size = (args.width, args.height) + if not args.resize_ratio == 1.0: + size = (int(args.resize_ratio * size[0]), int(args.resize_ratio * size[1])) + + frames, size, out_size = resize_frames(frames, size) + + fps = args.save_fps if fps is None else fps + save_root = os.path.join(args.output, video_name) + if not os.path.exists(save_root): + os.makedirs(save_root, exist_ok=True) + + if args.mode == 'video_inpainting': + frames_len = len(frames) + flow_masks, masks_dilated = read_mask(args.mask, frames_len, size, + flow_mask_dilates=args.mask_dilation, + mask_dilates=args.mask_dilation) + w, h = size + elif args.mode == 'video_outpainting': + assert args.scale_h is not None and args.scale_w is not None, 'Please provide a outpainting scale (s_h, s_w).' + frames, flow_masks, masks_dilated, size = extrapolation(frames, (args.scale_h, args.scale_w)) + w, h = size + else: + raise NotImplementedError + + # for saving the masked frames or video + masked_frame_for_save = [] + for i in range(len(frames)): + mask_ = np.expand_dims(np.array(masks_dilated[i]),2).repeat(3, axis=2)/255. + img = np.array(frames[i]) + green = np.zeros([h, w, 3]) + green[:,:,1] = 255 + alpha = 0.6 + # alpha = 1.0 + fuse_img = (1-alpha)*img + alpha*green + fuse_img = mask_ * fuse_img + (1-mask_)*img + masked_frame_for_save.append(fuse_img.astype(np.uint8)) + + frames_inp = [np.array(f).astype(np.uint8) for f in frames] + frames = to_tensors()(frames).unsqueeze(0) * 2 - 1 + flow_masks = to_tensors()(flow_masks).unsqueeze(0) + masks_dilated = to_tensors()(masks_dilated).unsqueeze(0) + frames, flow_masks, masks_dilated = frames.to(device), flow_masks.to(device), masks_dilated.to(device) + + + ############################################## + # set up RAFT and flow competition model + ############################################## + ckpt_path = load_file_from_url(url=os.path.join(pretrain_model_url, 'raft-things.pth'), + model_dir='weights', progress=True, file_name=None) + fix_raft = RAFT_bi(ckpt_path, device) + + ckpt_path = load_file_from_url(url=os.path.join(pretrain_model_url, 'recurrent_flow_completion.pth'), + model_dir='weights', progress=True, file_name=None) + fix_flow_complete = RecurrentFlowCompleteNet(ckpt_path) + for p in fix_flow_complete.parameters(): + p.requires_grad = False + fix_flow_complete.to(device) + fix_flow_complete.eval() + + + ############################################## + # set up ProPainter model + ############################################## + ckpt_path = load_file_from_url(url=os.path.join(pretrain_model_url, 'ProPainter.pth'), + model_dir='weights', progress=True, file_name=None) + model = InpaintGenerator(model_path=ckpt_path).to(device) + model.eval() + + + ############################################## + # ProPainter inference + ############################################## + video_length = frames.size(1) + print(f'\nProcessing: {video_name} [{video_length} frames]...') + with torch.no_grad(): + # ---- compute flow ---- + if frames.size(-1) <= 640: + short_clip_len = 12 + elif frames.size(-1) <= 720: + short_clip_len = 8 + elif frames.size(-1) <= 1280: + short_clip_len = 4 + else: + short_clip_len = 2 + + # use fp32 for RAFT + if frames.size(1) > short_clip_len: + gt_flows_f_list, gt_flows_b_list = [], [] + for f in range(0, video_length, short_clip_len): + end_f = min(video_length, f + short_clip_len) + if f == 0: + flows_f, flows_b = fix_raft(frames[:,f:end_f], iters=args.raft_iter) + else: + flows_f, flows_b = fix_raft(frames[:,f-1:end_f], iters=args.raft_iter) + + gt_flows_f_list.append(flows_f) + gt_flows_b_list.append(flows_b) + torch.cuda.empty_cache() + + gt_flows_f = torch.cat(gt_flows_f_list, dim=1) + gt_flows_b = torch.cat(gt_flows_b_list, dim=1) + gt_flows_bi = (gt_flows_f, gt_flows_b) + else: + gt_flows_bi = fix_raft(frames, iters=args.raft_iter) + torch.cuda.empty_cache() + + + if use_half: + frames, flow_masks, masks_dilated = frames.half(), flow_masks.half(), masks_dilated.half() + gt_flows_bi = (gt_flows_bi[0].half(), gt_flows_bi[1].half()) + fix_flow_complete = fix_flow_complete.half() + model = model.half() + + + # ---- complete flow ---- + flow_length = gt_flows_bi[0].size(1) + if flow_length > args.subvideo_length: + pred_flows_f, pred_flows_b = [], [] + pad_len = 5 + for f in range(0, flow_length, args.subvideo_length): + s_f = max(0, f - pad_len) + e_f = min(flow_length, f + args.subvideo_length + pad_len) + pad_len_s = max(0, f) - s_f + pad_len_e = e_f - min(flow_length, f + args.subvideo_length) + pred_flows_bi_sub, _ = fix_flow_complete.forward_bidirect_flow( + (gt_flows_bi[0][:, s_f:e_f], gt_flows_bi[1][:, s_f:e_f]), + flow_masks[:, s_f:e_f+1]) + pred_flows_bi_sub = fix_flow_complete.combine_flow( + (gt_flows_bi[0][:, s_f:e_f], gt_flows_bi[1][:, s_f:e_f]), + pred_flows_bi_sub, + flow_masks[:, s_f:e_f+1]) + + pred_flows_f.append(pred_flows_bi_sub[0][:, pad_len_s:e_f-s_f-pad_len_e]) + pred_flows_b.append(pred_flows_bi_sub[1][:, pad_len_s:e_f-s_f-pad_len_e]) + torch.cuda.empty_cache() + + pred_flows_f = torch.cat(pred_flows_f, dim=1) + pred_flows_b = torch.cat(pred_flows_b, dim=1) + pred_flows_bi = (pred_flows_f, pred_flows_b) + else: + pred_flows_bi, _ = fix_flow_complete.forward_bidirect_flow(gt_flows_bi, flow_masks) + pred_flows_bi = fix_flow_complete.combine_flow(gt_flows_bi, pred_flows_bi, flow_masks) + torch.cuda.empty_cache() + + + # ---- image propagation ---- + masked_frames = frames * (1 - masks_dilated) + subvideo_length_img_prop = min(100, args.subvideo_length) # ensure a minimum of 100 frames for image propagation + if video_length > subvideo_length_img_prop: + updated_frames, updated_masks = [], [] + pad_len = 10 + for f in range(0, video_length, subvideo_length_img_prop): + s_f = max(0, f - pad_len) + e_f = min(video_length, f + subvideo_length_img_prop + pad_len) + pad_len_s = max(0, f) - s_f + pad_len_e = e_f - min(video_length, f + subvideo_length_img_prop) + + b, t, _, _, _ = masks_dilated[:, s_f:e_f].size() + pred_flows_bi_sub = (pred_flows_bi[0][:, s_f:e_f-1], pred_flows_bi[1][:, s_f:e_f-1]) + prop_imgs_sub, updated_local_masks_sub = model.img_propagation(masked_frames[:, s_f:e_f], + pred_flows_bi_sub, + masks_dilated[:, s_f:e_f], + 'nearest') + updated_frames_sub = frames[:, s_f:e_f] * (1 - masks_dilated[:, s_f:e_f]) + \ + prop_imgs_sub.view(b, t, 3, h, w) * masks_dilated[:, s_f:e_f] + updated_masks_sub = updated_local_masks_sub.view(b, t, 1, h, w) + + updated_frames.append(updated_frames_sub[:, pad_len_s:e_f-s_f-pad_len_e]) + updated_masks.append(updated_masks_sub[:, pad_len_s:e_f-s_f-pad_len_e]) + torch.cuda.empty_cache() + + updated_frames = torch.cat(updated_frames, dim=1) + updated_masks = torch.cat(updated_masks, dim=1) + else: + b, t, _, _, _ = masks_dilated.size() + prop_imgs, updated_local_masks = model.img_propagation(masked_frames, pred_flows_bi, masks_dilated, 'nearest') + updated_frames = frames * (1 - masks_dilated) + prop_imgs.view(b, t, 3, h, w) * masks_dilated + updated_masks = updated_local_masks.view(b, t, 1, h, w) + torch.cuda.empty_cache() + + + ori_frames = frames_inp + comp_frames = [None] * video_length + + neighbor_stride = args.neighbor_length // 2 + if video_length > args.subvideo_length: + ref_num = args.subvideo_length // args.ref_stride + else: + ref_num = -1 + + # ---- feature propagation + transformer ---- + for f in tqdm(range(0, video_length, neighbor_stride)): + neighbor_ids = [ + i for i in range(max(0, f - neighbor_stride), + min(video_length, f + neighbor_stride + 1)) + ] + ref_ids = get_ref_index(f, neighbor_ids, video_length, args.ref_stride, ref_num) + selected_imgs = updated_frames[:, neighbor_ids + ref_ids, :, :, :] + selected_masks = masks_dilated[:, neighbor_ids + ref_ids, :, :, :] + selected_update_masks = updated_masks[:, neighbor_ids + ref_ids, :, :, :] + selected_pred_flows_bi = (pred_flows_bi[0][:, neighbor_ids[:-1], :, :, :], pred_flows_bi[1][:, neighbor_ids[:-1], :, :, :]) + + with torch.no_grad(): + # 1.0 indicates mask + l_t = len(neighbor_ids) + + # pred_img = selected_imgs # results of image propagation + pred_img = model(selected_imgs, selected_pred_flows_bi, selected_masks, selected_update_masks, l_t) + + pred_img = pred_img.view(-1, 3, h, w) + + pred_img = (pred_img + 1) / 2 + pred_img = pred_img.cpu().permute(0, 2, 3, 1).numpy() * 255 + binary_masks = masks_dilated[0, neighbor_ids, :, :, :].cpu().permute( + 0, 2, 3, 1).numpy().astype(np.uint8) + for i in range(len(neighbor_ids)): + idx = neighbor_ids[i] + img = np.array(pred_img[i]).astype(np.uint8) * binary_masks[i] \ + + ori_frames[idx] * (1 - binary_masks[i]) + if comp_frames[idx] is None: + comp_frames[idx] = img + else: + comp_frames[idx] = comp_frames[idx].astype(np.float32) * 0.5 + img.astype(np.float32) * 0.5 + + comp_frames[idx] = comp_frames[idx].astype(np.uint8) + + torch.cuda.empty_cache() + + # save each frame + if args.save_frames: + for idx in range(video_length): + f = comp_frames[idx] + f = cv2.resize(f, out_size, interpolation = cv2.INTER_CUBIC) + f = cv2.cvtColor(f, cv2.COLOR_BGR2RGB) + img_save_root = os.path.join(save_root, 'frames', str(idx).zfill(4)+'.png') + imwrite(f, img_save_root) + + + # if args.mode == 'video_outpainting': + # comp_frames = [i[10:-10,10:-10] for i in comp_frames] + # masked_frame_for_save = [i[10:-10,10:-10] for i in masked_frame_for_save] + + # save videos frame + masked_frame_for_save = [cv2.resize(f, out_size) for f in masked_frame_for_save] + comp_frames = [cv2.resize(f, out_size) for f in comp_frames] + imageio.mimwrite(os.path.join(save_root, 'masked_in.mp4'), masked_frame_for_save, fps=fps, quality=7) + imageio.mimwrite(os.path.join(save_root, 'inpaint_out.mp4'), comp_frames, fps=fps, quality=7) + + print(f'\nAll results are saved in {save_root}') + + torch.cuda.empty_cache() \ No newline at end of file diff --git a/florence_sam/main.py b/florence_sam/main.py new file mode 100644 index 0000000000000000000000000000000000000000..6b5f35cd3d8611971235f349f266fd8bc2be7cd7 --- /dev/null +++ b/florence_sam/main.py @@ -0,0 +1,12 @@ +import os +from pipeline import video_processor +from propainter_pipeline import process_video + +session_path, fps = video_processor.process_video( + video_path="/home/ubuntu/ahmedghani/clip-07-camera-2.mp4", + scale_factor=0.5, + prompt="players, basketball, rim, players shadow" +) +print(f"Processed video saved at: {session_path}") +print(f"FPS: {fps}") +process_video(video=os.path.join(session_path, "input_frames"), mask=os.path.join(session_path, "output_frames"), resize_ratio=0.5, save_fps=int(fps), fp16=True) \ No newline at end of file diff --git a/florence_sam/model/__init__.py b/florence_sam/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/florence_sam/model/__init__.py @@ -0,0 +1 @@ + diff --git a/florence_sam/model/canny/canny_filter.py b/florence_sam/model/canny/canny_filter.py new file mode 100644 index 0000000000000000000000000000000000000000..3d16195c9355b506e22a2ba527006adb9c541a7c --- /dev/null +++ b/florence_sam/model/canny/canny_filter.py @@ -0,0 +1,256 @@ +import math +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .gaussian import gaussian_blur2d +from .kernels import get_canny_nms_kernel, get_hysteresis_kernel +from .sobel import spatial_gradient + +def rgb_to_grayscale(image, rgb_weights = None): + if len(image.shape) < 3 or image.shape[-3] != 3: + raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}") + + if rgb_weights is None: + # 8 bit images + if image.dtype == torch.uint8: + rgb_weights = torch.tensor([76, 150, 29], device=image.device, dtype=torch.uint8) + # floating point images + elif image.dtype in (torch.float16, torch.float32, torch.float64): + rgb_weights = torch.tensor([0.299, 0.587, 0.114], device=image.device, dtype=image.dtype) + else: + raise TypeError(f"Unknown data type: {image.dtype}") + else: + # is tensor that we make sure is in the same device/dtype + rgb_weights = rgb_weights.to(image) + + # unpack the color image channels with RGB order + r = image[..., 0:1, :, :] + g = image[..., 1:2, :, :] + b = image[..., 2:3, :, :] + + w_r, w_g, w_b = rgb_weights.unbind() + return w_r * r + w_g * g + w_b * b + + +def canny( + input: torch.Tensor, + low_threshold: float = 0.1, + high_threshold: float = 0.2, + kernel_size: Tuple[int, int] = (5, 5), + sigma: Tuple[float, float] = (1, 1), + hysteresis: bool = True, + eps: float = 1e-6, +) -> Tuple[torch.Tensor, torch.Tensor]: + r"""Find edges of the input image and filters them using the Canny algorithm. + + .. image:: _static/img/canny.png + + Args: + input: input image tensor with shape :math:`(B,C,H,W)`. + low_threshold: lower threshold for the hysteresis procedure. + high_threshold: upper threshold for the hysteresis procedure. + kernel_size: the size of the kernel for the gaussian blur. + sigma: the standard deviation of the kernel for the gaussian blur. + hysteresis: if True, applies the hysteresis edge tracking. + Otherwise, the edges are divided between weak (0.5) and strong (1) edges. + eps: regularization number to avoid NaN during backprop. + + Returns: + - the canny edge magnitudes map, shape of :math:`(B,1,H,W)`. + - the canny edge detection filtered by thresholds and hysteresis, shape of :math:`(B,1,H,W)`. + + .. note:: + See a working example `here