Spaces:
Runtime error
Runtime error
Commit
Β·
7206ed3
0
Parent(s):
Duplicate from DeepLabCut/MegaDetector_DeepLabCut
Browse filesCo-authored-by: vic chiang <[email protected]>
- .gitattributes +32 -0
- .gitignore +3 -0
- DLC_models/__pycache__/models.cpython-310.pyc +0 -0
- DLC_models/download_utils.py +61 -0
- DLC_models/models.py +60 -0
- DLC_models/pretrained_model_urls.yaml +19 -0
- DLC_models/read.md +3 -0
- MD_models/md_v5a.0.0.pt +3 -0
- MD_models/md_v5b.0.0.pt +3 -0
- MD_models/read.md +0 -0
- README.md +13 -0
- app.py +179 -0
- detection_utils.py +116 -0
- examples/cat.jpg +0 -0
- examples/dog.jpeg +0 -0
- examples/monkey_face.jpeg +0 -0
- examples/monkey_full.jpg +0 -0
- examples/read.md +0 -0
- fonts/Amiko-Regular.ttf +0 -0
- fonts/LoveNature.otf +0 -0
- fonts/PainterDecorator.otf +0 -0
- fonts/UncialAnimals.ttf +0 -0
- fonts/ZEN.TTF +0 -0
- fonts/read.md +0 -0
- megadet_model/md_v5a.0.0.pt +3 -0
- megadet_model/md_v5b.0.0.pt +3 -0
- requirements.txt +17 -0
- save_results.py +56 -0
- ui_utils.py +81 -0
- viz_utils.py +190 -0
.gitattributes
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
23 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
32 |
+
example/monkey_face.jpg filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
model/__pycache__/
|
DLC_models/__pycache__/models.cpython-310.pyc
ADDED
Binary file (2.29 kB). View file
|
|
DLC_models/download_utils.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import urllib.request
|
2 |
+
import tarfile
|
3 |
+
from tqdm import tqdm
|
4 |
+
import os
|
5 |
+
import yaml
|
6 |
+
from ruamel.yaml import YAML
|
7 |
+
|
8 |
+
def read_plainconfig(configname):
|
9 |
+
if not os.path.exists(configname):
|
10 |
+
raise FileNotFoundError(
|
11 |
+
f"Config {configname} is not found. Please make sure that the file exists."
|
12 |
+
)
|
13 |
+
with open(configname) as file:
|
14 |
+
return YAML().load(file)
|
15 |
+
|
16 |
+
def DownloadModel(modelname,
|
17 |
+
target_dir):
|
18 |
+
"""
|
19 |
+
Downloads a DeepLabCut Model Zoo Project
|
20 |
+
"""
|
21 |
+
|
22 |
+
def show_progress(count, block_size, total_size):
|
23 |
+
pbar.update(block_size)
|
24 |
+
|
25 |
+
def tarfilenamecutting(tarf):
|
26 |
+
"""' auxfun to extract folder path
|
27 |
+
ie. /xyz-trainsetxyshufflez/
|
28 |
+
"""
|
29 |
+
for memberid, member in enumerate(tarf.getmembers()):
|
30 |
+
if memberid == 0:
|
31 |
+
parent = str(member.path)
|
32 |
+
l = len(parent) + 1
|
33 |
+
if member.path.startswith(parent):
|
34 |
+
member.path = member.path[l:]
|
35 |
+
yield member
|
36 |
+
|
37 |
+
neturls = read_plainconfig("DLC_models/pretrained_model_urls.yaml") #FIXME
|
38 |
+
|
39 |
+
if modelname in neturls.keys():
|
40 |
+
url = neturls[modelname]
|
41 |
+
print(url)
|
42 |
+
response = urllib.request.urlopen(url)
|
43 |
+
print(
|
44 |
+
"Downloading the model from the DeepLabCut server @Harvard -> Go Crimson!!! {}....".format(
|
45 |
+
url
|
46 |
+
)
|
47 |
+
)
|
48 |
+
total_size = int(response.getheader("Content-Length"))
|
49 |
+
pbar = tqdm(unit="B", total=total_size, position=0)
|
50 |
+
filename, _ = urllib.request.urlretrieve(url, reporthook=show_progress)
|
51 |
+
with tarfile.open(filename, mode="r:gz") as tar:
|
52 |
+
tar.extractall(target_dir, members=tarfilenamecutting(tar))
|
53 |
+
else:
|
54 |
+
models = [
|
55 |
+
fn
|
56 |
+
for fn in neturls.keys()
|
57 |
+
if "resnet_" not in fn and "mobilenet_" not in fn
|
58 |
+
]
|
59 |
+
print("Model does not exist: ", modelname)
|
60 |
+
print("Pick one of the following: ", models)
|
61 |
+
return target_dir
|
DLC_models/models.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import urllib.request
|
2 |
+
import tarfile
|
3 |
+
from tqdm import tqdm
|
4 |
+
import os
|
5 |
+
import yaml
|
6 |
+
from ruamel.yaml import YAML
|
7 |
+
|
8 |
+
def read_plainconfig(configname):
|
9 |
+
if not os.path.exists(configname):
|
10 |
+
raise FileNotFoundError(
|
11 |
+
f"Config {configname} is not found. Please make sure that the file exists."
|
12 |
+
)
|
13 |
+
with open(configname) as file:
|
14 |
+
return YAML().load(file)
|
15 |
+
|
16 |
+
def DownloadModel(modelname, target_dir):
|
17 |
+
"""
|
18 |
+
Downloads a DeepLabCut Model Zoo Project
|
19 |
+
"""
|
20 |
+
|
21 |
+
def show_progress(count, block_size, total_size):
|
22 |
+
pbar.update(block_size)
|
23 |
+
|
24 |
+
def tarfilenamecutting(tarf):
|
25 |
+
"""' auxfun to extract folder path
|
26 |
+
ie. /xyz-trainsetxyshufflez/
|
27 |
+
"""
|
28 |
+
for memberid, member in enumerate(tarf.getmembers()):
|
29 |
+
if memberid == 0:
|
30 |
+
parent = str(member.path)
|
31 |
+
l = len(parent) + 1
|
32 |
+
if member.path.startswith(parent):
|
33 |
+
member.path = member.path[l:]
|
34 |
+
yield member
|
35 |
+
|
36 |
+
neturls = read_plainconfig("./model/pretrained_model_urls.yaml") #FIXME
|
37 |
+
|
38 |
+
if modelname in neturls.keys():
|
39 |
+
url = neturls[modelname]
|
40 |
+
print(url)
|
41 |
+
response = urllib.request.urlopen(url)
|
42 |
+
print(
|
43 |
+
"Downloading the model from the DeepLabCut server @Harvard -> Go Crimson!!! {}....".format(
|
44 |
+
url
|
45 |
+
)
|
46 |
+
)
|
47 |
+
total_size = int(response.getheader("Content-Length"))
|
48 |
+
pbar = tqdm(unit="B", total=total_size, position=0)
|
49 |
+
filename, _ = urllib.request.urlretrieve(url, reporthook=show_progress)
|
50 |
+
with tarfile.open(filename, mode="r:gz") as tar:
|
51 |
+
tar.extractall(target_dir, members=tarfilenamecutting(tar))
|
52 |
+
else:
|
53 |
+
models = [
|
54 |
+
fn
|
55 |
+
for fn in neturls.keys()
|
56 |
+
if "resnet_" not in fn and "mobilenet_" not in fn
|
57 |
+
]
|
58 |
+
print("Model does not exist: ", modelname)
|
59 |
+
print("Pick one of the following: ", models)
|
60 |
+
return target_dir
|
DLC_models/pretrained_model_urls.yaml
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#Model Zoo from where the Charles flow(s)
|
2 |
+
full_human: https://huggingface.co/mwmathis/DeepLabCutModelZoo-DLC_human_fullbody_resnet_101/resolve/main/DLC_human_fullbody_resnet_101.tar.gz
|
3 |
+
#full_dog: https://huggingface.co/mwmathis/DeepLabCutModelZoo-DLC_human_fullbody_resnet_101/resolve/main/DLC_human_fullbody_resnet_101.tar.gz
|
4 |
+
#full_cat: https://huggingface.co/mwmathis/DeepLabCutModelZoo-DLC_human_fullbody_resnet_101/resolve/main/DLC_human_fullbody_resnet_101.tar.gz
|
5 |
+
primate_face: https://huggingface.co/mwmathis/DeepLabCutModelZoo-primate_face/resolve/main/DLC_primate_face_resnet_50_iteration-1_shuffle-1.tar.gz
|
6 |
+
mouse_pupil_vclose: https://huggingface.co/mwmathis/DeepLabCutModelZoo-mouse_pupil_vclose/resolve/main/DLC_mouse_pupil_vclose_resnet_50_iteration-0_shuffle-1.tar.gz
|
7 |
+
horse_sideview: https://huggingface.co/mwmathis/DeepLabCutModelZoo-horse_sideview/resolve/main/DLC_Horses_resnet_50_iteration-1_shuffle-1.tar.gz
|
8 |
+
full_macaque: https://huggingface.co/mwmathis/DeepLabCutModelZoo-macaque_full/resolve/main/DLC_macaque_full_resnet50.tar.gz
|
9 |
+
#full_cheetah: https://huggingface.co/mwmathis/DeepLabCutModelZoo-macaque_full/resolve/main/DLC_macaque_full_resnet50.tar.gz
|
10 |
+
|
11 |
+
#Old URLs for the models
|
12 |
+
#full_human: http://deeplabcut.rowland.harvard.edu/models/DLC_human_fullbody_resnet_101.tar.gz
|
13 |
+
#full_dog: http://deeplabcut.rowland.harvard.edu/models/DLC_Dog_resnet_50_iteration-0_shuffle-0.tar.gz
|
14 |
+
#full_cat: http://deeplabcut.rowland.harvard.edu/models/DLC_Cat_resnet_50_iteration-0_shuffle-0.tar.gz
|
15 |
+
#primate_face: http://deeplabcut.rowland.harvard.edu/models/DLC_primate_face_resnet_50_iteration-1_shuffle-1.tar.gz
|
16 |
+
#mouse_pupil_vclose: http://deeplabcut.rowland.harvard.edu/models/DLC_mouse_pupil_vclose_resnet_50_iteration-0_shuffle-1.tar.gz
|
17 |
+
#horse_sideview: http://deeplabcut.rowland.harvard.edu/models/DLC_Horses_resnet_50_iteration-1_shuffle-1.tar.gz
|
18 |
+
#full_macaque: http://deeplabcut.rowland.harvard.edu/models/DLC_macaque_full_resnet50.tar.gz
|
19 |
+
#full_cheetah: http://deeplabcut.rowland.harvard.edu/models/DLC_full_cheetah_resnet_152.tar.
|
DLC_models/read.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
These models are part of the DeepLabCut Model zoo. For details, model citations, and further information, please see: modelzoo.deeplabcut.org
|
2 |
+
|
3 |
+
https://arxiv.org/abs/2203.07436v1 & http://modelzoo.deeplabcut.org
|
MD_models/md_v5a.0.0.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94e88fe97c8050f2e3d0cc4cb4f64729d639d74312dcbe2f74f8eecd3b01b276
|
3 |
+
size 280766885
|
MD_models/md_v5b.0.0.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c7026a704c3e0566a199e38a09363200f0f39fb8f804190fd15c8a60ab3beabd
|
3 |
+
size 280766885
|
MD_models/read.md
ADDED
File without changes
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: MegaDetector + DeepLabCut
|
3 |
+
emoji: π¦£ππ
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.1.4
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: DeepLabCut/MegaDetector_DeepLabCut
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Built from https://huggingface.co/spaces/hlydecker/MegaDetector_v5
|
2 |
+
# Built from https://huggingface.co/spaces/sofmi/MegaDetector_DLClive/blob/main/app.py
|
3 |
+
# Built from https://huggingface.co/spaces/Neslihan/megadetector_dlcmodels/blob/main/app.py
|
4 |
+
|
5 |
+
import os
|
6 |
+
import yaml
|
7 |
+
import numpy as np
|
8 |
+
from matplotlib import cm
|
9 |
+
import gradio as gr
|
10 |
+
|
11 |
+
from PIL import Image, ImageColor, ImageFont, ImageDraw
|
12 |
+
# check git lfs pull!!
|
13 |
+
from DLC_models.download_utils import DownloadModel
|
14 |
+
from dlclive import DLCLive, Processor
|
15 |
+
|
16 |
+
from viz_utils import save_results_as_json, draw_keypoints_on_image, draw_bbox_w_text, save_results_only_dlc
|
17 |
+
from detection_utils import predict_md, crop_animal_detections, predict_dlc
|
18 |
+
from ui_utils import gradio_inputs_for_MD_DLC, gradio_outputs_for_MD_DLC, gradio_description_and_examples
|
19 |
+
|
20 |
+
# import pdb
|
21 |
+
#########################################
|
22 |
+
# Input params - Global vars
|
23 |
+
|
24 |
+
MD_models_dict = {'md_v5a': "MD_models/md_v5a.0.0.pt", #
|
25 |
+
'md_v5b': "MD_models/md_v5b.0.0.pt"}
|
26 |
+
|
27 |
+
# DLC models target dirs
|
28 |
+
DLC_models_dict = {#'full_cat': "DLC_models/DLC_Cat/",
|
29 |
+
#'full_dog': "DLC_models/DLC_Dog/",
|
30 |
+
'full_human': "DLC_models/DLC_human_dancing/",
|
31 |
+
'full_macaque': 'DLC_models/DLC_monkey/',
|
32 |
+
'primate_face': "DLC_models/DLC_FacialLandmarks/"}
|
33 |
+
|
34 |
+
|
35 |
+
# FONTS = {'amiko': "fonts/Amiko-Regular.ttf",
|
36 |
+
# 'nature': "fonts/LoveNature.otf",
|
37 |
+
# 'painter':"fonts/PainterDecorator.otf",
|
38 |
+
# 'animals': "fonts/UncialAnimals.ttf",
|
39 |
+
# 'zen': "fonts/ZEN.TTF"}
|
40 |
+
#####################################################
|
41 |
+
def predict_pipeline(img_input,
|
42 |
+
mega_model_input,
|
43 |
+
dlc_model_input_str,
|
44 |
+
flag_dlc_only,
|
45 |
+
flag_show_str_labels,
|
46 |
+
bbox_likelihood_th,
|
47 |
+
kpts_likelihood_th,
|
48 |
+
font_style,
|
49 |
+
font_size,
|
50 |
+
keypt_color,
|
51 |
+
marker_size,
|
52 |
+
):
|
53 |
+
|
54 |
+
if not flag_dlc_only:
|
55 |
+
############################################################
|
56 |
+
# ### Run Megadetector
|
57 |
+
md_results = predict_md(img_input,
|
58 |
+
MD_models_dict[mega_model_input], #mega_model_input,
|
59 |
+
size=640) #Image.fromarray(results.imgs[0])
|
60 |
+
|
61 |
+
################################################################
|
62 |
+
# Obtain animal crops for bboxes with confidence above th
|
63 |
+
list_crops = crop_animal_detections(img_input,
|
64 |
+
md_results,
|
65 |
+
bbox_likelihood_th)
|
66 |
+
|
67 |
+
############################################################
|
68 |
+
## Get DLC model and label map
|
69 |
+
|
70 |
+
# If model is found: do not download (previous execution is likely within same day)
|
71 |
+
# TODO: can we ask the user whether to reload dlc model if a directory is found?
|
72 |
+
if os.path.isdir(DLC_models_dict[dlc_model_input_str]) and \
|
73 |
+
len(os.listdir(DLC_models_dict[dlc_model_input_str])) > 0:
|
74 |
+
path_to_DLCmodel = DLC_models_dict[dlc_model_input_str]
|
75 |
+
else:
|
76 |
+
path_to_DLCmodel = DownloadModel(dlc_model_input_str,
|
77 |
+
DLC_models_dict[dlc_model_input_str])
|
78 |
+
|
79 |
+
# extract map label ids to strings
|
80 |
+
pose_cfg_path = os.path.join(DLC_models_dict[dlc_model_input_str],
|
81 |
+
'pose_cfg.yaml')
|
82 |
+
with open(pose_cfg_path, "r") as stream:
|
83 |
+
pose_cfg_dict = yaml.safe_load(stream)
|
84 |
+
map_label_id_to_str = dict([(k,v) for k,v in zip([el[0] for el in pose_cfg_dict['all_joints']], # pose_cfg_dict['all_joints'] is a list of one-element lists,
|
85 |
+
pose_cfg_dict['all_joints_names'])])
|
86 |
+
|
87 |
+
##############################################################
|
88 |
+
# Run DLC and visualise results
|
89 |
+
dlc_proc = Processor()
|
90 |
+
|
91 |
+
# if required: ignore MD crops and run DLC on full image [mostly for testing]
|
92 |
+
if flag_dlc_only:
|
93 |
+
# compute kpts on input img
|
94 |
+
list_kpts_per_crop = predict_dlc([np.asarray(img_input)],
|
95 |
+
kpts_likelihood_th,
|
96 |
+
path_to_DLCmodel,
|
97 |
+
dlc_proc)
|
98 |
+
# draw kpts on input img #fix!
|
99 |
+
draw_keypoints_on_image(img_input,
|
100 |
+
list_kpts_per_crop[0], # a numpy array with shape [num_keypoints, 2].
|
101 |
+
map_label_id_to_str,
|
102 |
+
flag_show_str_labels,
|
103 |
+
use_normalized_coordinates=False,
|
104 |
+
font_style=font_style,
|
105 |
+
font_size=font_size,
|
106 |
+
keypt_color=keypt_color,
|
107 |
+
marker_size=marker_size)
|
108 |
+
|
109 |
+
donw_file = save_results_only_dlc(list_kpts_per_crop[0], map_label_id_to_str,dlc_model_input_str)
|
110 |
+
|
111 |
+
return img_input, donw_file
|
112 |
+
|
113 |
+
else:
|
114 |
+
# Compute kpts for each crop
|
115 |
+
list_kpts_per_crop = predict_dlc(list_crops,
|
116 |
+
kpts_likelihood_th,
|
117 |
+
path_to_DLCmodel,
|
118 |
+
dlc_proc)
|
119 |
+
|
120 |
+
# resize input image to match megadetector output
|
121 |
+
img_background = img_input.resize((md_results.ims[0].shape[1],
|
122 |
+
md_results.ims[0].shape[0]))
|
123 |
+
|
124 |
+
# draw keypoints on each crop and paste to background img
|
125 |
+
for ic, (np_crop, kpts_crop) in enumerate(zip(list_crops,
|
126 |
+
list_kpts_per_crop)):
|
127 |
+
|
128 |
+
img_crop = Image.fromarray(np_crop)
|
129 |
+
|
130 |
+
# Draw keypts on crop
|
131 |
+
draw_keypoints_on_image(img_crop,
|
132 |
+
kpts_crop, # a numpy array with shape [num_keypoints, 2].
|
133 |
+
map_label_id_to_str,
|
134 |
+
flag_show_str_labels,
|
135 |
+
use_normalized_coordinates=False, # if True, then I should use md_results.xyxyn for list_kpts_crop
|
136 |
+
font_style=font_style,
|
137 |
+
font_size=font_size,
|
138 |
+
keypt_color=keypt_color,
|
139 |
+
marker_size=marker_size)
|
140 |
+
|
141 |
+
# Paste crop in original image
|
142 |
+
img_background.paste(img_crop,
|
143 |
+
box = tuple([int(t) for t in md_results.xyxy[0][ic,:2]]))
|
144 |
+
|
145 |
+
# Plot bbox
|
146 |
+
bb_per_animal = md_results.xyxy[0].tolist()[ic]
|
147 |
+
pred = md_results.xyxy[0].tolist()[ic][4]
|
148 |
+
if bbox_likelihood_th < pred:
|
149 |
+
draw_bbox_w_text(img_background,
|
150 |
+
bb_per_animal,
|
151 |
+
font_style=font_style,
|
152 |
+
font_size=font_size) # TODO: add selectable color for bbox?
|
153 |
+
|
154 |
+
|
155 |
+
# Save detection results as json
|
156 |
+
download_file = save_results_as_json(md_results,list_kpts_per_crop,map_label_id_to_str, bbox_likelihood_th,dlc_model_input_str,mega_model_input)
|
157 |
+
|
158 |
+
return img_background, download_file
|
159 |
+
|
160 |
+
#########################################################
|
161 |
+
# Define user interface and launch
|
162 |
+
inputs = gradio_inputs_for_MD_DLC(list(MD_models_dict.keys()),
|
163 |
+
list(DLC_models_dict.keys()))
|
164 |
+
outputs = gradio_outputs_for_MD_DLC()
|
165 |
+
[gr_title,
|
166 |
+
gr_description,
|
167 |
+
examples] = gradio_description_and_examples()
|
168 |
+
|
169 |
+
# launch
|
170 |
+
demo = gr.Interface(predict_pipeline,
|
171 |
+
inputs=inputs,
|
172 |
+
outputs=outputs,
|
173 |
+
title=gr_title,
|
174 |
+
description=gr_description,
|
175 |
+
examples=examples,
|
176 |
+
theme="huggingface")
|
177 |
+
|
178 |
+
demo.launch(enable_queue=True, share=True)
|
179 |
+
|
detection_utils.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from tkinter import W
|
3 |
+
import gradio as gr
|
4 |
+
from matplotlib import cm
|
5 |
+
import torch
|
6 |
+
import torchvision
|
7 |
+
from dlclive import DLCLive, Processor
|
8 |
+
import matplotlib
|
9 |
+
from PIL import Image, ImageColor, ImageFont, ImageDraw
|
10 |
+
import numpy as np
|
11 |
+
import math
|
12 |
+
|
13 |
+
|
14 |
+
import yaml
|
15 |
+
import pdb
|
16 |
+
|
17 |
+
############################################
|
18 |
+
# Predict detections with MegaDetector v5a model
|
19 |
+
def predict_md(im,
|
20 |
+
megadetector_model, #Megadet_Models[mega_model_input]
|
21 |
+
size=640):
|
22 |
+
|
23 |
+
# resize image
|
24 |
+
g = (size / max(im.size)) # multipl factor to make max size of the image equal to input size
|
25 |
+
im = im.resize((int(x * g) for x in im.size),
|
26 |
+
Image.ANTIALIAS) # resize
|
27 |
+
# device
|
28 |
+
if torch.cuda.is_available():
|
29 |
+
md_device = torch.device('cuda')
|
30 |
+
else:
|
31 |
+
md_device = torch.device('cpu')
|
32 |
+
|
33 |
+
# megadetector
|
34 |
+
MD_model = torch.hub.load('ultralytics/yolov5', # repo_or_dir
|
35 |
+
'custom', #model
|
36 |
+
megadetector_model, # args for callable model
|
37 |
+
force_reload=True,
|
38 |
+
device=md_device)
|
39 |
+
|
40 |
+
# send model to gpu if possible
|
41 |
+
if (md_device == torch.device('cuda')):
|
42 |
+
print('Sending model to GPU')
|
43 |
+
MD_model.to(md_device)
|
44 |
+
|
45 |
+
## detect objects
|
46 |
+
results = MD_model(im) # inference # vars(results).keys()= dict_keys(['imgs', 'pred', 'names', 'files', 'times', 'xyxy', 'xywh', 'xyxyn', 'xywhn', 'n', 't', 's'])
|
47 |
+
|
48 |
+
return results
|
49 |
+
|
50 |
+
|
51 |
+
##########################################
|
52 |
+
def crop_animal_detections(img_in,
|
53 |
+
yolo_results,
|
54 |
+
likelihood_th):
|
55 |
+
|
56 |
+
## Extract animal crops
|
57 |
+
list_labels_as_str = [i for i in yolo_results.names.values()] # ['animal', 'person', 'vehicle']
|
58 |
+
list_np_animal_crops = []
|
59 |
+
|
60 |
+
# image to crop (scale as input for megadetector)
|
61 |
+
img_in = img_in.resize((yolo_results.ims[0].shape[1],
|
62 |
+
yolo_results.ims[0].shape[0]))
|
63 |
+
# for every detection in the img
|
64 |
+
for det_array in yolo_results.xyxy:
|
65 |
+
|
66 |
+
# for every detection
|
67 |
+
for j in range(det_array.shape[0]):
|
68 |
+
|
69 |
+
# compute coords around bbox rounded to the nearest integer (for pasting later)
|
70 |
+
xmin_rd = int(math.floor(det_array[j,0])) # int() should suffice?
|
71 |
+
ymin_rd = int(math.floor(det_array[j,1]))
|
72 |
+
|
73 |
+
xmax_rd = int(math.ceil(det_array[j,2]))
|
74 |
+
ymax_rd = int(math.ceil(det_array[j,3]))
|
75 |
+
|
76 |
+
pred_llk = det_array[j,4]
|
77 |
+
pred_label = det_array[j,5]
|
78 |
+
# keep animal crops above threshold
|
79 |
+
if (pred_label == list_labels_as_str.index('animal')) and \
|
80 |
+
(pred_llk >= likelihood_th):
|
81 |
+
area = (xmin_rd, ymin_rd, xmax_rd, ymax_rd)
|
82 |
+
|
83 |
+
#pdb.set_trace()
|
84 |
+
crop = img_in.crop(area) #Image.fromarray(img_in).crop(area)
|
85 |
+
crop_np = np.asarray(crop)
|
86 |
+
|
87 |
+
# add to list
|
88 |
+
list_np_animal_crops.append(crop_np)
|
89 |
+
|
90 |
+
return list_np_animal_crops
|
91 |
+
|
92 |
+
##########################################
|
93 |
+
def predict_dlc(list_np_crops,
|
94 |
+
kpts_likelihood_th,
|
95 |
+
DLCmodel,
|
96 |
+
dlc_proc):
|
97 |
+
|
98 |
+
# run dlc thru list of crops
|
99 |
+
dlc_live = DLCLive(DLCmodel, processor=dlc_proc)
|
100 |
+
dlc_live.init_inference(list_np_crops[0])
|
101 |
+
|
102 |
+
list_kpts_per_crop = []
|
103 |
+
all_kypts = []
|
104 |
+
np_aux = np.empty((1,3)) # can I avoid hardcoding here?
|
105 |
+
for crop in list_np_crops:
|
106 |
+
# scale crop here?
|
107 |
+
keypts_xyp = dlc_live.get_pose(crop) # third column is llk!
|
108 |
+
# set kpts below threhsold to nan
|
109 |
+
|
110 |
+
#pdb.set_trace()
|
111 |
+
keypts_xyp[keypts_xyp[:,-1] < kpts_likelihood_th,:] = np_aux.fill(np.nan)
|
112 |
+
# add kpts of this crop to list
|
113 |
+
list_kpts_per_crop.append(keypts_xyp)
|
114 |
+
all_kypts.append(keypts_xyp)
|
115 |
+
|
116 |
+
return list_kpts_per_crop
|
examples/cat.jpg
ADDED
![]() |
examples/dog.jpeg
ADDED
![]() |
examples/monkey_face.jpeg
ADDED
![]() |
examples/monkey_full.jpg
ADDED
![]() |
examples/read.md
ADDED
File without changes
|
fonts/Amiko-Regular.ttf
ADDED
Binary file (194 kB). View file
|
|
fonts/LoveNature.otf
ADDED
Binary file (20.5 kB). View file
|
|
fonts/PainterDecorator.otf
ADDED
Binary file (182 kB). View file
|
|
fonts/UncialAnimals.ttf
ADDED
Binary file (68.9 kB). View file
|
|
fonts/ZEN.TTF
ADDED
Binary file (106 kB). View file
|
|
fonts/read.md
ADDED
File without changes
|
megadet_model/md_v5a.0.0.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94e88fe97c8050f2e3d0cc4cb4f64729d639d74312dcbe2f74f8eecd3b01b276
|
3 |
+
size 280766885
|
megadet_model/md_v5b.0.0.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c7026a704c3e0566a199e38a09363200f0f39fb8f804190fd15c8a60ab3beabd
|
3 |
+
size 280766885
|
requirements.txt
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ipython
|
2 |
+
transformers
|
3 |
+
Pillow
|
4 |
+
gradio
|
5 |
+
numpy
|
6 |
+
torch
|
7 |
+
torchvision
|
8 |
+
timm
|
9 |
+
seaborn
|
10 |
+
deeplabcut[modelzoo]
|
11 |
+
deeplabcut-live
|
12 |
+
argparse
|
13 |
+
humanfriendly
|
14 |
+
pandas
|
15 |
+
statistics
|
16 |
+
tqdm
|
17 |
+
psutil
|
save_results.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import numpy as np
|
3 |
+
import pdb
|
4 |
+
|
5 |
+
dict_pred = {0: 'animal', 1: 'person', 2: 'vehicle'}
|
6 |
+
|
7 |
+
|
8 |
+
def save_results(md_results, dlc_outputs,map_label_id_to_str,thr,output_file = 'dowload_predictions.json'):
|
9 |
+
|
10 |
+
"""
|
11 |
+
|
12 |
+
write json
|
13 |
+
|
14 |
+
"""
|
15 |
+
info = {}
|
16 |
+
## info megaDetector
|
17 |
+
info['file']= md_results.files[0]
|
18 |
+
number_bb = len(md_results.xyxy[0].tolist())
|
19 |
+
info['number_of_bb'] = number_bb
|
20 |
+
number_bb_thr = len(dlc_outputs)
|
21 |
+
labels = [n for n in map_label_id_to_str.values()]
|
22 |
+
#pdb.set_trace()
|
23 |
+
new_index = []
|
24 |
+
for i in range(number_bb):
|
25 |
+
corner_x1,corner_y1,corner_x2,corner_y2,confidence, _ = md_results.xyxy[0].tolist()[i]
|
26 |
+
|
27 |
+
if confidence > thr:
|
28 |
+
new_index.append(i)
|
29 |
+
|
30 |
+
|
31 |
+
for i in range(number_bb_thr):
|
32 |
+
aux={}
|
33 |
+
corner_x1,corner_y1,corner_x2,corner_y2,confidence, _ = md_results.xyxy[0].tolist()[new_index[i]]
|
34 |
+
aux['corner_1'] = (corner_x1,corner_y1)
|
35 |
+
aux['corner_2'] = (corner_x2,corner_y2)
|
36 |
+
aux['predict MD'] = md_results.names[0]
|
37 |
+
aux['confidence MD'] = confidence
|
38 |
+
|
39 |
+
## info dlc
|
40 |
+
kypts = []
|
41 |
+
for s in dlc_outputs[i]:
|
42 |
+
aux1 = []
|
43 |
+
for j in s:
|
44 |
+
aux1.append(float(j))
|
45 |
+
|
46 |
+
kypts.append(aux1)
|
47 |
+
aux['dlc_pred'] = dict(zip(labels,kypts))
|
48 |
+
info['bb_' + str(new_index[i]) ]=aux
|
49 |
+
|
50 |
+
|
51 |
+
with open(output_file, 'w') as f:
|
52 |
+
json.dump(info, f, indent=1)
|
53 |
+
print('Output file saved at {}'.format(output_file))
|
54 |
+
|
55 |
+
return output_file
|
56 |
+
|
ui_utils.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
##############################
|
4 |
+
def gradio_inputs_for_MD_DLC(md_models_list, # list(MD_models_dict.keys())
|
5 |
+
dlc_models_list, # list(DLC_models_dict.keys())
|
6 |
+
):
|
7 |
+
# Input image
|
8 |
+
gr_image_input = gr.inputs.Image(type="pil", label="Input Image")
|
9 |
+
|
10 |
+
|
11 |
+
# Models
|
12 |
+
gr_mega_model_input = gr.inputs.Dropdown(choices=md_models_list,
|
13 |
+
default='md_v5a', # default option
|
14 |
+
type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
|
15 |
+
label='Select MegaDetector model')
|
16 |
+
gr_dlc_model_input = gr.inputs.Dropdown(choices=dlc_models_list, # choices
|
17 |
+
default='full_cat', # default option
|
18 |
+
type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
|
19 |
+
label='Select DeepLabCut model')
|
20 |
+
|
21 |
+
# Other inputs
|
22 |
+
gr_dlc_only_checkbox = gr.inputs.Checkbox(False,
|
23 |
+
label='Run DLClive only, directly on input image?')
|
24 |
+
gr_str_labels_checkbox = gr.inputs.Checkbox(True,
|
25 |
+
label='Show bodypart labels?')
|
26 |
+
|
27 |
+
gr_slider_conf_bboxes = gr.inputs.Slider(0,1,.02,0.8,
|
28 |
+
label='Set confidence threshold for animal detections')
|
29 |
+
gr_slider_conf_keypoints = gr.inputs.Slider(0,1,.05,0,
|
30 |
+
label='Set confidence threshold for keypoints')
|
31 |
+
|
32 |
+
# Data viz
|
33 |
+
gr_keypt_color = gr.ColorPicker(value ="#ff0000", label="choose color for keypoint label")
|
34 |
+
|
35 |
+
gr_labels_font_style = gr.inputs.Dropdown(choices=['amiko', 'animals', 'nature', 'painter', 'zen'],
|
36 |
+
default='amiko',
|
37 |
+
type='value',
|
38 |
+
label='Select keypoint label font')
|
39 |
+
gr_slider_font_size = gr.inputs.Slider(5,30,1,8,
|
40 |
+
label='Set font size')
|
41 |
+
gr_slider_marker_size = gr.inputs.Slider(1,20,1,5,
|
42 |
+
label='Set marker size')
|
43 |
+
|
44 |
+
# list of inputs
|
45 |
+
return [gr_image_input,
|
46 |
+
gr_mega_model_input,
|
47 |
+
gr_dlc_model_input,
|
48 |
+
gr_dlc_only_checkbox,
|
49 |
+
gr_str_labels_checkbox,
|
50 |
+
gr_slider_conf_bboxes,
|
51 |
+
gr_slider_conf_keypoints,
|
52 |
+
gr_labels_font_style,
|
53 |
+
gr_slider_font_size,
|
54 |
+
gr_keypt_color,
|
55 |
+
gr_slider_marker_size]
|
56 |
+
|
57 |
+
####################################################
|
58 |
+
def gradio_outputs_for_MD_DLC():
|
59 |
+
# User interface: outputs
|
60 |
+
gr_image_output = gr.outputs.Image(type="pil", label="Output Image")
|
61 |
+
gr_file_download = gr.File(label="Download JSON file")
|
62 |
+
return [gr_image_output,
|
63 |
+
gr_file_download]
|
64 |
+
|
65 |
+
##############################################
|
66 |
+
# User interace: description
|
67 |
+
def gradio_description_and_examples():
|
68 |
+
title = "MegaDetector v5 + DeepLabCut!"
|
69 |
+
description = "Contributed by Sofia Minano, Neslihan Wittek, Nirel Kadzo, VicShaoChih Chiang, Sabrina Benas -- DLC AI Residents 2022.\
|
70 |
+
This App detects and estimate the pose of animals in camera trap images using <a href='https://github.com/microsoft/CameraTraps'>MegaDetector v5a</a> + <a href='https://github.com/DeepLabCut/DeepLabCut-live'>DeepLabCut-live</a>. \
|
71 |
+
We host models from the <a href='http://www.mackenziemathislab.org/dlc-modelzoo'>DeepLabCut ModelZoo Project</a>\, and two <a href='https://github.com/microsoft/CameraTraps/blob/main/megadetector.md'>MegaDetector Models</a>. Please carefully check their licensing information if you use this project. The App additionally builds upon on work from <a href='https://huggingface.co/spaces/hlydecker/MegaDetector_v5'>hlydecker/MegaDetector_v5</a> \
|
72 |
+
<a href='https://huggingface.co/spaces/sofmi/MegaDetector_DLClive'>sofmi/MegaDetector_DLClive</a> \
|
73 |
+
<a href='https://huggingface.co/spaces/Neslihan/megadetector_dlcmodels'>Neslihan/megadetector_dlcmodels</a>\."
|
74 |
+
|
75 |
+
# article = "<p style='text-align: center'>This app makes predictions using a YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. This app was built by Henry Lydecker but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
|
76 |
+
|
77 |
+
examples = [['examples/monkey_full.jpg', 'md_v5a','full_macaque', False, True, 0.5, 0.3, 'amiko', 9, 'blue', 3]]
|
78 |
+
#['examples/dog.jpeg', 'md_v5a', 'full_dog', False, True, 0.5, 0.00, 'amiko',9, 'yellow', 3],
|
79 |
+
#['examples/cat.jpg', 'md_v5a', 'full_cat', False, True, 0.5, 0.05, 'amiko', 9, 'purple', 3]
|
80 |
+
|
81 |
+
return [title,description,examples]
|
viz_utils.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
from matplotlib import cm
|
5 |
+
import matplotlib
|
6 |
+
from PIL import Image, ImageColor, ImageFont, ImageDraw
|
7 |
+
import numpy as np
|
8 |
+
import pdb
|
9 |
+
from datetime import date
|
10 |
+
today = date.today()
|
11 |
+
FONTS = {'amiko': "fonts/Amiko-Regular.ttf",
|
12 |
+
'nature': "fonts/LoveNature.otf",
|
13 |
+
'painter':"fonts/PainterDecorator.otf",
|
14 |
+
'animals': "fonts/UncialAnimals.ttf",
|
15 |
+
'zen': "fonts/ZEN.TTF"}
|
16 |
+
|
17 |
+
#########################################
|
18 |
+
# Draw keypoints on image
|
19 |
+
def draw_keypoints_on_image(image,
|
20 |
+
keypoints,
|
21 |
+
map_label_id_to_str,
|
22 |
+
flag_show_str_labels,
|
23 |
+
use_normalized_coordinates=True,
|
24 |
+
font_style='amiko',
|
25 |
+
font_size=8,
|
26 |
+
keypt_color="#ff0000",
|
27 |
+
marker_size=2,
|
28 |
+
):
|
29 |
+
"""Draws keypoints on an image.
|
30 |
+
Modified from:
|
31 |
+
https://www.programcreek.com/python/?code=fjchange%2Fobject_centric_VAD%2Fobject_centric_VAD-master%2Fobject_detection%2Futils%2Fvisualization_utils.py
|
32 |
+
Args:
|
33 |
+
image: a PIL.Image object.
|
34 |
+
keypoints: a numpy array with shape [num_keypoints, 2].
|
35 |
+
map_label_id_to_str: dict with keys=label number and values= label string
|
36 |
+
flag_show_str_labels: boolean to select whether or not to show string labels
|
37 |
+
color: color to draw the keypoints with. Default is red.
|
38 |
+
radius: keypoint radius. Default value is 2.
|
39 |
+
use_normalized_coordinates: if True (default), treat keypoint values as
|
40 |
+
relative to the image. Otherwise treat them as absolute.
|
41 |
+
|
42 |
+
|
43 |
+
"""
|
44 |
+
# get a drawing context
|
45 |
+
draw = ImageDraw.Draw(image,"RGBA")
|
46 |
+
|
47 |
+
im_width, im_height = image.size
|
48 |
+
keypoints_x = [k[0] for k in keypoints]
|
49 |
+
keypoints_y = [k[1] for k in keypoints]
|
50 |
+
alpha = [k[2] for k in keypoints]
|
51 |
+
norm = matplotlib.colors.Normalize(vmin=0, vmax=255)
|
52 |
+
|
53 |
+
names_for_color = [i for i in map_label_id_to_str.keys()]
|
54 |
+
colores = np.linspace(0, 255, num=len(names_for_color),dtype= int)
|
55 |
+
|
56 |
+
# adjust keypoints coords if required
|
57 |
+
if use_normalized_coordinates:
|
58 |
+
keypoints_x = tuple([im_width * x for x in keypoints_x])
|
59 |
+
keypoints_y = tuple([im_height * y for y in keypoints_y])
|
60 |
+
|
61 |
+
#cmap = matplotlib.cm.get_cmap('hsv')
|
62 |
+
cmap2 = matplotlib.cm.get_cmap('Greys')
|
63 |
+
# draw ellipses around keypoints
|
64 |
+
for i, (keypoint_x, keypoint_y) in enumerate(zip(keypoints_x, keypoints_y)):
|
65 |
+
round_fill = list(cm.viridis(norm(colores[i]),bytes=True))#[round(num*255) for num in list(cmap(i))[:3]] #check!
|
66 |
+
if np.isnan(alpha[i]) == False :
|
67 |
+
round_fill[3] = round(alpha[i] *255)
|
68 |
+
#print(round_fill)
|
69 |
+
#round_outline = [round(num*255) for num in list(cmap2(alpha[i]))[:3]]
|
70 |
+
draw.ellipse([(keypoint_x - marker_size, keypoint_y - marker_size),
|
71 |
+
(keypoint_x + marker_size, keypoint_y + marker_size)],
|
72 |
+
fill=tuple(round_fill), outline= 'black', width=1) #fill and outline: [0,255]
|
73 |
+
|
74 |
+
# add string labels around keypoints
|
75 |
+
if flag_show_str_labels:
|
76 |
+
font = ImageFont.truetype(FONTS[font_style],
|
77 |
+
font_size)
|
78 |
+
draw.text((keypoint_x + marker_size, keypoint_y + marker_size),#(0.5*im_width, 0.5*im_height), #-------
|
79 |
+
map_label_id_to_str[i],
|
80 |
+
ImageColor.getcolor(keypt_color, "RGB"), # rgb #
|
81 |
+
font=font)
|
82 |
+
|
83 |
+
#########################################
|
84 |
+
# Draw bboxes on image
|
85 |
+
def draw_bbox_w_text(img,
|
86 |
+
results,
|
87 |
+
font_style='amiko',
|
88 |
+
font_size=8): #TODO: select color too?
|
89 |
+
#pdb.set_trace()
|
90 |
+
bbxyxy = results
|
91 |
+
w, h = bbxyxy[2], bbxyxy[3]
|
92 |
+
shape = [(bbxyxy[0], bbxyxy[1]), (w , h)]
|
93 |
+
imgR = ImageDraw.Draw(img)
|
94 |
+
imgR.rectangle(shape, outline ="red",width=5) ##bb for animal
|
95 |
+
|
96 |
+
confidence = bbxyxy[4]
|
97 |
+
string_bb = 'animal ' + str(round(confidence, 2))
|
98 |
+
font = ImageFont.truetype(FONTS[font_style], font_size)
|
99 |
+
|
100 |
+
text_size = font.getsize(string_bb) # (h,w)
|
101 |
+
position = (bbxyxy[0],bbxyxy[1] - text_size[1] -2 )
|
102 |
+
left, top, right, bottom = imgR.textbbox(position, string_bb, font=font)
|
103 |
+
imgR.rectangle((left, top-5, right+5, bottom+5), fill="red")
|
104 |
+
imgR.text((bbxyxy[0] + 3 ,bbxyxy[1] - text_size[1] -2 ), string_bb, font=font, fill="black")
|
105 |
+
|
106 |
+
return imgR
|
107 |
+
|
108 |
+
###########################################
|
109 |
+
def save_results_as_json(md_results, dlc_outputs, map_dlc_label_id_to_str, thr,model,mega_model_input, path_to_output_file = 'download_predictions.json'):
|
110 |
+
|
111 |
+
"""
|
112 |
+
Output detections as json file
|
113 |
+
|
114 |
+
"""
|
115 |
+
# initialise dict to save to json
|
116 |
+
info = {}
|
117 |
+
info['date'] = str(today)
|
118 |
+
info['MD_model'] = str(mega_model_input)
|
119 |
+
# info from megaDetector
|
120 |
+
info['file']= md_results.files[0]
|
121 |
+
number_bb = len(md_results.xyxy[0].tolist())
|
122 |
+
info['number_of_bb'] = number_bb
|
123 |
+
# info from DLC
|
124 |
+
number_bb_thr = len(dlc_outputs)
|
125 |
+
labels = [n for n in map_dlc_label_id_to_str.values()]
|
126 |
+
|
127 |
+
# create list of bboxes above th
|
128 |
+
new_index = []
|
129 |
+
for i in range(number_bb):
|
130 |
+
corner_x1,corner_y1,corner_x2,corner_y2,confidence, _ = md_results.xyxy[0].tolist()[i]
|
131 |
+
|
132 |
+
if confidence > thr:
|
133 |
+
new_index.append(i)
|
134 |
+
|
135 |
+
# define aux dict for every bounding box above threshold
|
136 |
+
for i in range(number_bb_thr):
|
137 |
+
aux={}
|
138 |
+
# MD output
|
139 |
+
corner_x1,corner_y1,corner_x2,corner_y2,confidence, _ = md_results.xyxy[0].tolist()[new_index[i]]
|
140 |
+
aux['corner_1'] = (corner_x1,corner_y1)
|
141 |
+
aux['corner_2'] = (corner_x2,corner_y2)
|
142 |
+
aux['predict MD'] = md_results.names[0]
|
143 |
+
aux['confidence MD'] = confidence
|
144 |
+
|
145 |
+
# DLC output
|
146 |
+
info['dlc_model'] = model
|
147 |
+
kypts = []
|
148 |
+
for s in dlc_outputs[i]:
|
149 |
+
aux1 = []
|
150 |
+
for j in s:
|
151 |
+
aux1.append(float(j))
|
152 |
+
|
153 |
+
kypts.append(aux1)
|
154 |
+
aux['dlc_pred'] = dict(zip(labels,kypts))
|
155 |
+
info['bb_' + str(new_index[i]) ]=aux
|
156 |
+
|
157 |
+
# save dict as json
|
158 |
+
with open(path_to_output_file, 'w') as f:
|
159 |
+
json.dump(info, f, indent=1)
|
160 |
+
print('Output file saved at {}'.format(path_to_output_file))
|
161 |
+
|
162 |
+
return path_to_output_file
|
163 |
+
|
164 |
+
|
165 |
+
def save_results_only_dlc(dlc_outputs,map_label_id_to_str,model,output_file = 'dowload_predictions_dlc.json'):
|
166 |
+
|
167 |
+
"""
|
168 |
+
write json dlc output
|
169 |
+
"""
|
170 |
+
info = {}
|
171 |
+
info['date'] = str(today)
|
172 |
+
labels = [n for n in map_label_id_to_str.values()]
|
173 |
+
info['dlc_model'] = model
|
174 |
+
kypts = []
|
175 |
+
for s in dlc_outputs:
|
176 |
+
aux1 = []
|
177 |
+
for j in s:
|
178 |
+
aux1.append(float(j))
|
179 |
+
|
180 |
+
kypts.append(aux1)
|
181 |
+
info['dlc_pred'] = dict(zip(labels,kypts))
|
182 |
+
|
183 |
+
with open(output_file, 'w') as f:
|
184 |
+
json.dump(info, f, indent=1)
|
185 |
+
print('Output file saved at {}'.format(output_file))
|
186 |
+
|
187 |
+
return output_file
|
188 |
+
|
189 |
+
|
190 |
+
###########################################
|