Update src/run/yolov3/config.py
Browse files- src/run/yolov3/config.py +13 -7
src/run/yolov3/config.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
import albumentations as A
|
2 |
import cv2
|
3 |
import torch
|
4 |
-
from albumentations.pytorch import ToTensorV2
|
5 |
|
|
|
|
|
6 |
|
7 |
DATASET = "PASCAL_VOC"
|
8 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
@@ -10,6 +11,7 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
10 |
NUM_WORKERS = 0
|
11 |
BATCH_SIZE = 32
|
12 |
IMAGE_SIZE = 416
|
|
|
13 |
NUM_CLASSES = 20
|
14 |
LEARNING_RATE = 1e-5
|
15 |
WEIGHT_DECAY = 1e-4
|
@@ -18,9 +20,11 @@ CONF_THRESHOLD = 0.05
|
|
18 |
MAP_IOU_THRESH = 0.5
|
19 |
NMS_IOU_THRESH = 0.45
|
20 |
S = [IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8]
|
|
|
21 |
PIN_MEMORY = True
|
22 |
LOAD_MODEL = False
|
23 |
SAVE_MODEL = True
|
|
|
24 |
CHECKPOINT_FILE = "checkpoint.pth.tar"
|
25 |
IMG_DIR = DATASET + "/images/"
|
26 |
LABEL_DIR = DATASET + "/labels/"
|
@@ -31,7 +35,8 @@ ANCHORS = [
|
|
31 |
[(0.02, 0.03), (0.04, 0.07), (0.08, 0.06)],
|
32 |
] # Note these have been rescaled to be between [0, 1]
|
33 |
|
34 |
-
|
|
|
35 |
|
36 |
scale = 1.1
|
37 |
train_transforms = A.Compose(
|
@@ -61,8 +66,8 @@ train_transforms = A.Compose(
|
|
61 |
A.ToGray(p=0.1),
|
62 |
A.ChannelShuffle(p=0.05),
|
63 |
A.Normalize(
|
64 |
-
mean=
|
65 |
-
std=
|
66 |
max_pixel_value=255,
|
67 |
),
|
68 |
ToTensorV2(),
|
@@ -80,8 +85,8 @@ test_transforms = A.Compose(
|
|
80 |
min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
|
81 |
),
|
82 |
A.Normalize(
|
83 |
-
mean=
|
84 |
-
std=
|
85 |
max_pixel_value=255,
|
86 |
),
|
87 |
ToTensorV2(),
|
@@ -90,6 +95,7 @@ test_transforms = A.Compose(
|
|
90 |
)
|
91 |
|
92 |
PASCAL_CLASSES = [
|
|
|
93 |
"aeroplane",
|
94 |
"bicycle",
|
95 |
"bird",
|
@@ -193,4 +199,4 @@ COCO_LABELS = [
|
|
193 |
"teddy bear",
|
194 |
"hair drier",
|
195 |
"toothbrush",
|
196 |
-
]
|
|
|
1 |
import albumentations as A
|
2 |
import cv2
|
3 |
import torch
|
|
|
4 |
|
5 |
+
from albumentations.pytorch import ToTensorV2
|
6 |
+
from utils import seed_everything
|
7 |
|
8 |
DATASET = "PASCAL_VOC"
|
9 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
11 |
NUM_WORKERS = 0
|
12 |
BATCH_SIZE = 32
|
13 |
IMAGE_SIZE = 416
|
14 |
+
MAX_IMAGE_SIZE = 416
|
15 |
NUM_CLASSES = 20
|
16 |
LEARNING_RATE = 1e-5
|
17 |
WEIGHT_DECAY = 1e-4
|
|
|
20 |
MAP_IOU_THRESH = 0.5
|
21 |
NMS_IOU_THRESH = 0.45
|
22 |
S = [IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8]
|
23 |
+
|
24 |
PIN_MEMORY = True
|
25 |
LOAD_MODEL = False
|
26 |
SAVE_MODEL = True
|
27 |
+
|
28 |
CHECKPOINT_FILE = "checkpoint.pth.tar"
|
29 |
IMG_DIR = DATASET + "/images/"
|
30 |
LABEL_DIR = DATASET + "/labels/"
|
|
|
35 |
[(0.02, 0.03), (0.04, 0.07), (0.08, 0.06)],
|
36 |
] # Note these have been rescaled to be between [0, 1]
|
37 |
|
38 |
+
mean = [0.485, 0.456, 0.406]
|
39 |
+
std = [0.229, 0.224, 0.225]
|
40 |
|
41 |
scale = 1.1
|
42 |
train_transforms = A.Compose(
|
|
|
66 |
A.ToGray(p=0.1),
|
67 |
A.ChannelShuffle(p=0.05),
|
68 |
A.Normalize(
|
69 |
+
mean=mean,
|
70 |
+
std=std,
|
71 |
max_pixel_value=255,
|
72 |
),
|
73 |
ToTensorV2(),
|
|
|
85 |
min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
|
86 |
),
|
87 |
A.Normalize(
|
88 |
+
mean=mean,
|
89 |
+
std=std,
|
90 |
max_pixel_value=255,
|
91 |
),
|
92 |
ToTensorV2(),
|
|
|
95 |
)
|
96 |
|
97 |
PASCAL_CLASSES = [
|
98 |
+
# "background",
|
99 |
"aeroplane",
|
100 |
"bicycle",
|
101 |
"bird",
|
|
|
199 |
"teddy bear",
|
200 |
"hair drier",
|
201 |
"toothbrush",
|
202 |
+
]
|