Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 5,098 Bytes
7dcb60d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
# This file is adapted from https://github.com/facebookresearch/CutLER/blob/077938c626341723050a1971107af552a6ca6697/cutler/demo/demo.py
# The original license file is the file named LICENSE.CutLER in this repo.
import argparse
import multiprocessing as mp
import pathlib
import shlex
import subprocess
import sys
import numpy as np
import torch
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
sys.path.append('CutLER/cutler/')
sys.path.append('CutLER/cutler/demo')
from config import add_cutler_config
from predictor import VisualizationDemo
mp.set_start_method('spawn', force=True)
UNSUPERVISED_MODELS = {
'Unsupervised': {
'config_path':
'CutLER/cutler/model_zoo/configs/CutLER-ImageNet/cascade_mask_rcnn_R_50_FPN.yaml',
'weight_url':
'http://dl.fbaipublicfiles.com/cutler/checkpoints/cutler_cascade_final.pth',
}
}
SEMI_SUPERVISED_MODELS = {
f'Semi-supervised with COCO ({perc}%)': {
'config_path':
f'CutLER/cutler/model_zoo/configs/COCO-Semisupervised/cascade_mask_rcnn_R_50_FPN_{perc}perc.yaml',
'weight_url':
f'http://dl.fbaipublicfiles.com/cutler/checkpoints/cutler_semi_{perc}perc.pth',
}
for perc in [1, 2, 5, 10, 20, 30, 40, 50, 60, 80]
}
FULLY_SUPERVISED_MODELS = {
'Fully-supervised with COCO': {
'config_path':
f'CutLER/cutler/model_zoo/configs/COCO-Semisupervised/cascade_mask_rcnn_R_50_FPN_100perc.yaml',
'weight_url':
f'http://dl.fbaipublicfiles.com/cutler/checkpoints/cutler_fully_100perc.pth',
}
}
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
add_cutler_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Disable the use of SyncBN normalization when running on a CPU
# SyncBN is not supported on CPU and can cause errors, so we switch to BN instead
if cfg.MODEL.DEVICE == 'cpu' and cfg.MODEL.RESNETS.NORM == 'SyncBN':
cfg.MODEL.RESNETS.NORM = 'BN'
cfg.MODEL.FPN.NORM = 'BN'
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(
description='Detectron2 demo for builtin configs')
parser.add_argument(
'--config-file',
default=
'model_zoo/configs/CutLER-ImageNet/cascade_mask_rcnn_R_50_FPN.yaml',
metavar='FILE',
help='path to config file',
)
parser.add_argument('--webcam',
action='store_true',
help='Take inputs from webcam.')
parser.add_argument('--video-input', help='Path to video file.')
parser.add_argument(
'--input',
nargs='+',
help='A list of space separated input images; '
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
'--output',
help='A file or directory to save output visualizations. '
'If not given, will show output in an OpenCV window.',
)
parser.add_argument(
'--confidence-threshold',
type=float,
default=0.35,
help='Minimum score for instance predictions to be shown',
)
parser.add_argument(
'--opts',
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
class Model:
MODEL_DICT = UNSUPERVISED_MODELS | SEMI_SUPERVISED_MODELS | FULLY_SUPERVISED_MODELS
def __init__(self):
self.model_dir = pathlib.Path('checkpoints')
self.model_dir.mkdir(exist_ok=True)
def load_model(self, model_name: str,
score_threshold: float) -> VisualizationDemo:
model_info = self.MODEL_DICT[model_name]
weight_url = model_info['weight_url']
weight_path = self.model_dir / weight_url.split('/')[-1]
if not weight_path.exists():
weight_path.parent.mkdir(exist_ok=True)
subprocess.run(shlex.split(f'wget {weight_url} -O {weight_path}'))
arg_list = [
'--config-file', model_info['config_path'],
'--confidence-threshold',
str(score_threshold), '--opts', 'MODEL.WEIGHTS',
weight_path.as_posix(), 'MODEL.DEVICE',
'cuda:0' if torch.cuda.is_available() else 'cpu'
]
args = get_parser().parse_args(arg_list)
cfg = setup_cfg(args)
return VisualizationDemo(cfg)
def __call__(self,
image_path: str,
model_name: str,
score_threshold: float = 0.5) -> np.ndarray:
model = self.load_model(model_name, score_threshold)
image = read_image(image_path, format='BGR')
_, res = model.run_on_image(image)
return res.get_image()
|